diff --git "a/1915.jsonl" "b/1915.jsonl" new file mode 100644--- /dev/null +++ "b/1915.jsonl" @@ -0,0 +1,733 @@ +{"seq_id":"392571985","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nsys.path.append(\"../python_lib/\")\nimport codecs\nfrom gensim.models import word2vec\nimport vital\nimport mecabutil\nimport feature\n\n# 日本語を標準出力できるように\nsys.stdout = codecs.getwriter(\"utf_8\")(sys.stdout)\n\n# 入力ファイル設定\n# model_file = \"./model/csj_50.model\"\n# ndim = 50\n\nmodel_file = \"./model/web_600.model\"\nndim = 600\n\n# モデルの読み込み\nmodel = word2vec.Word2Vec.load_word2vec_format(model_file, binary=False)\n\n# クエリリスト生成\nquery_file = \"/home/hara/Develop/NTCIR11/Data/formalrun_query/formalrun_query_text.txt\"\nquery_list = vital.file_read(query_file)\\\n .split(\"\\n\")\ndel query_list[len(query_list) - 1]\n\n# クエリ毎にベクトル生成し,出力させる\nquery_feature_dict = []\nfor query in query_list:\n words = mecabutil.get_words(query)\n content_words = [word.surface for word in words]\n\n vital.pp(content_words)\n\n try:\n query_vector = feature.doc_convolute(content_words, model, ndim)\n vital.pp(query_vector)\n except:\n print(\"error occured in query\")\n print(query)\n\n print(\"\\n\")\n\n# for word in query:\n# try:\n# print model[word]\n# except:\n# print(\"no value for key\"),\n# print(word)\n","sub_path":"sample_impl/usemodel.py","file_name":"usemodel.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"363105328","text":"import os, json\nimport csv\nimport pandas as pd\nimport nltk\n\n# path_to_json = '/Users/weiding/Google Drive/Application Project/100_files'\npath_to_json = '/Users/weiding/Desktop/vc/sum_all'\n# read all json files from the folder\njson_files = [pos_json for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')]\nnltk.download('punkt')\n\n# enumerate each file\ncount_empty_file = 0\ncompany_tab_list = []\ncompany_tab_content_list = []\ncount_total_tabs = 0\ncount_empty_tabs = 0\ncount_valid_tabs = 0\ncheck_file_empty = False\nfor index, js in enumerate(json_files):\n with open(os.path.join(path_to_json, js)) as json_file:\n json_text = json.load(json_file)\n if len(json_text) == 0: # find and ignore empty files\n count_empty_file += 1\n else:\n js = js.split('_20', 1)[0] # only keep the website name or company name\n\n # processing the tab\n\n if check_file_empty:\n count_empty_file += 1\n check_file_empty = True\n for key, value in json_text.items():\n count_total_tabs += 1\n if len(value) == 0: # remove the empty tab content and tab\n count_empty_tabs += 1\n elif \"The Wayback Machine\" in value:\n continue\n else:\n check_file_empty = False # if a tab has useful information, this tab is not empty\n count_valid_tabs += 1\n key = key.split('_', 1)[-1] # remove the date\n key = key.split('.html', 1)[0] # remove the .html\n sub_tabs = key.split('_')\n for sub_tab in sub_tabs:\n if len(sub_tab) != 0:\n company_tab_list.append([js, sub_tab])\n sentenceList = nltk.sent_tokenize(value) # split paragraph as sentences\n for sentence in sentenceList:\n company_tab_content_list.append([js, key, sentence])\n\nprint(\"Number of empty file: \" + str(count_empty_file))\nprint(\"Number of total tabs:\" + str(count_total_tabs))\nprint(\"Number of empty tabs:\" + str(count_empty_tabs))\nprint(\"Number of valid tabs:\" + str(count_valid_tabs))\n\nwith open('tabs.csv', 'w') as writeFile:\n writer = csv.writer(writeFile)\n for row in company_tab_list:\n writer.writerow(row)\n\nwriteFile.close()\n\n# generate a dataframe: each row with 3 columns - company, tab and content. Each sentence of content as a single row.\ncompany_tab_content_df = pd.DataFrame(company_tab_content_list)\ncompany_tab_content_df.columns = ['company', 'tab', 'content']\ncompany_tab_content_df.drop_duplicates(subset=['company', 'content'], inplace=True) # remove duplicate sentences\ncompany_tab_unique_content_df = company_tab_content_df.groupby(['company', 'tab'])['content'].\\\n apply(lambda x: '.'.join(x)).reset_index() # gather sentences together with same company and tab\n\n# add industry category to the data\ndf_industry = pd.read_stata('/Users/weiding/Desktop/industry.dta')\ncompany_tab_unique_content_df = company_tab_unique_content_df.rename(columns = {'company':'Web'})\ndf_sum_all_with_label = pd.merge(company_tab_unique_content_df, df_industry, on='Web', how='left')\ndf_sum_all_with_label = df_sum_all_with_label.dropna(subset=['IndustrySegment'])\n\ndf_sum_all_with_label.to_csv(\"company_tab_content_sum_all_with_label.csv\", encoding='utf-8', index=False)\n","sub_path":"Preproceesing.py","file_name":"Preproceesing.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"302348178","text":"#coding=utf-8\n\nimport unittest\n\n\"\"\"\nBackpack III\n\nGiven n kind of items with size Ai and value Vi( each item has an infinite number available) and a backpack with size m. \nWhat's the maximum value can you put into the backpack?\n\n Notice\n\nYou cannot divide item into small pieces and the total size of items you choose should smaller or equal to m.\n\nHave you met this question in a real interview? Yes\nExample\nGiven 4 items with size [2, 3, 5, 7] and value [1, 5, 2, 4], and a backpack with size 10. The maximum value is 15.\n\nTags \nDynamic Programming\nRelated Problems \nMedium Partition Equal Subset Sum 29 %\nMedium Backpack VI 30 %\nMedium Backpack V 45 %\nMedium Backpack IV 41 %\nMedium Backpack II 39 %\nMedium Backpack\n\"\"\"\n\n\n\nclass Solution:\n # @param {int[]} A an integer array\n # @param {int[]} V an integer array\n # @param {int} m an integer\n # @return {int} an array\n def backPackIII(self, nums, vals, m): #1105ms, 758ms\n # Write your code here\n dp = [0 for _ in range(m+1)]\n self.fill(nums, vals, m, dp)\n return dp[m]\n\n def fill(self, nums, vals, bag, dp):\n if bag == 0 or bag>0 and dp[bag] != 0:\n return dp[bag]\n for i in range(len(nums)):\n weight = nums[i]\n val = vals[i]\n if bag >= weight:\n dp[bag] = max(self.fill(nums, vals, bag-weight, dp) + val, dp[bag])\n return dp[bag] # don't forget to return value here\n\n\n\n\n\n\nclass SolutionTester(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def test_case1(self):\n nums = [2, 3, 5, 7]\n vals = [1, 5, 2, 4]\n m = 10\n answer = 15\n result = self.sol.backPackIII(nums, vals, m)\n self.assertEqual(answer, result)\n\n\n\ndef main():\n suite = unittest.TestLoader().loadTestsFromTestCase(SolutionTester)\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\nif __name__ == \"__main__\":\n main()\n\n\"\"\"\n\nFollowing is a very simple and smart way\n\nclass Solution:\n # @param {int[]} A an integer array\n # @param {int[]} V an integer array\n # @param {int} m an integer\n # @return {int} an array\n def backPackIII(self, A, V, m): #590ms\n # Write your code here\n f = [0 for i in xrange(m+1)]\n for (a, v) in zip(A, V):\n for j in xrange(a, m+1):\n if f[j - a] + v > f[j]:\n f[j] = f[j - a] + v\n return f[m]\n \n\n===================================================================================================================\n\nhttps://zhengyang2015.gitbooks.io/lintcode/backpack_iii_440.html\n\n这道题和II的思想一样,f[j]表示容量为j的背包对前i件物品能取的最大值,其中物品可以重复选取。对物品从0遍历到n-1,每次只有比A[i]大的背包容量\n才有可能被更新。\n和II不同的是,这道题物品可以重复选择,所以内层遍历j的时候从小到大遍历,这样物品可以重复选取。比如一开始在j的时候取了i,然后随着j的增大,在\nj'的时候又取了i,而恰好j = j' - A[i],在这种情况下i就被重复选取。如果从大往小遍历则所有物品只能取一次,所以II中是从大往小遍历。\n因此可以重复取元素则背包容量从小到大遍历,反之从大到小遍历。\n\n \n\"\"\"\n\n#-*- coding:utf-8 -*-\n","sub_path":"mjbeto/backpackIII.py","file_name":"backpackIII.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"619518666","text":"from kivy.app import App\nfrom kivy.uix.image import Image\nfrom kivy.clock import Clock\nfrom kivy.graphics.texture import Texture\nfrom kivy.core.window import Window\nfrom kivy.uix.screenmanager import Screen\nfrom kivy.lang import Builder\nfrom kivy.properties import NumericProperty\nimport numpy as np\nimport cv2\nfrom kivy.uix.effectwidget import EffectWidget, EffectBase\n\n# import matlab.engine\n\nfrom pathlib import Path\n\n# from PIL import Image as ImagePillow\n\nWINDOW_MIN_WIDTH = 800\nWINDOW_MIN_HEIGHT = 600\n\n# The effect string is glsl code defining an effect function.\neffect_string = '''\nvec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)\n{\n // Note that time is a uniform variable that is automatically\n // provided to all effects.\n float red = color.x * abs(sin(time*2.0));\n float green = color.y; // No change\n float blue = color.z * (1.0 - abs(sin(time*2.0)));\n return vec4(red, green, blue, color.w);\n}\n'''\n\nclass DemoEffect(EffectWidget):\n def __init__(self, *args, **kwargs):\n self.effect_reference = EffectBase(glsl=effect_string)\n super(DemoEffect, self).__init__(*args, **kwargs)\n\nclass KivyCamera(Image):\n\n def __init__(self, capture = None, **kwargs):\n super(KivyCamera, self).__init__(**kwargs)\n\n # self.eng = App.get_running_app().future.result()\n # self.eng.addpath('m:/files/files/phd/functions/messRopeFunctions', nargout=0)\n # self.eng.addpath('e:/百度云同步盘/files/phd/functions/messRopeFunctions', nargout=0)\n\n # self.rectFilePathName = 'm:/files/files/phd/functions/messRopeFunctions/rect_anno.txt'\n # self.rotateFilePathName = 'm:/files/files/phd/functions/messRopeFunctions/angle_rotate.txt'\n\n # self.rectFilePathName = 'rect_anno.txt'\n # self.rotateFilePathName = 'angle_rotate.txt'\n\n video_files_path = './test2.mp4'\n self.capture = cv2.VideoCapture(video_files_path)\n\n self.timeStampPythonFilePathName = './timeStampPython.txt';\n self.timeStampMatlabFilePathName = './timeStampMatlab.txt';\n\n return_value, frame = self.capture.read()\n if return_value:\n self.w, self.h = frame.shape[1], frame.shape[0]\n frameTimeStamp = self.capture.get(0)\n self.roundFrameTimeStamp = round(frameTimeStamp/1000, 1)\n with open(self.timeStampPythonFilePathName, 'w') as fpPython:\n fpPython.write(str(self.roundFrameTimeStamp))\n #\n # # bwRef = matlab.double([[1,2,3,4,5], [6,7,8,9,10]])\n # # self.bwRef = np.zeros((self.h, self.w))\n # # print(self.bwRef.shape)\n # # self.eng.load('bestPara.mat', nargout=0)\n # #\n # # bestPara = self.eng.workspace['bestPara']\n # # dataMLOutput = self.eng.workspace['dataMLOutput']\n # # GMModelOutput = self.eng.workspace['GMModelOutput']\n # # epsilonOutput = self.eng.workspace['epsilonOutput']\n #\n # print(frame.shape)\n # # t1 = time.time()\n # # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # # gray = cv2.resize(gray,(10,20))\n # # # data_list_matlab = matlab.double(gray)\n # # # print(type(data_list_matlab))\n # # # print(data_list_matlab.size)\n # # # print(data_list_matlab)\n # # A = matlab.double([[1,2,3,4,5], [6,7,8,9,10]])\n # # print(A)\n # # # data_list = gray.tolist()\n # # # flat_list = [item for sublist in data_list for item in sublist]\n # # # self.eng.imshow(data_list_matlab, nargout=0)\n # # # newimg = cv2.resize(gray,(10,20))\n # # # print(newimg.shape)\n # # # print(type(newimg))\n # # # print(newimg)\n # # # data_list = newimg.tolist()\n # # # print(len(data_list))\n # # # print(type(data_list))\n # # # print(data_list)\n # # # flat_list = [item for sublist in data_list for item in sublist]\n # # # print(len(flat_list))\n # # # print(type(flat_list))\n # # # print(flat_list)\n # # # self.eng.fun_imshowPython(data_list, frame.shape[1], frame.shape[0], nargout=0)\n # # # data_list_matlab = matlab.uint8(data_list)\n # # # messTagMatlab, messPosMatlab = self.eng.fun_autoRecognizeByVideoPython1(data_list_matlab,self.rectFilePathName,\\\n # # # self.rotateFilePathName,bestPara,dataMLOutput,GMModelOutput,epsilonOutput,\\\n # # # frame.shape[1],frame.shape[0],nargout=2)\n # # # print(messTagMatlab)\n # # # print(messPosMatlab)\n # # elapsed1 = time.time() - t1\n # # print(elapsed1)\n #\n # # eps = self.eng.workspace['epsilonOutput']\n # # print(eps)\n # # GMModelOutput = self.eng.workspace['GMModelOutput']\n # # GMModelOutputType = type(GMModelOutput)\n # # print(GMModelOutputType)\n # # self.eng.workspace['epsilonOutput'] = 9\n # # a = self.eng.eval('epsilonOutput+1')\n # # print(a)\n #\n # # t1 = time.time()\n # # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # # # cv2.imshow('image', gray)\n # # data_list = gray.tolist()\n # # self.eng.fun_imshowPython(data_list, frame.shape[1], frame.shape[0], nargout=0)\n # # elapsed1 = time.time() - t1\n # # print(elapsed1)\n # # print(frame.shape)\n #\n #\n #\n # # t2 = time.time()\n # # bChannel,gChannel,rChannel = cv2.split(frame)\n # # data_listR = rChannel.tolist()\n # # data_listG = gChannel.tolist()\n # # data_listB = bChannel.tolist()\n # # self.eng.fun_imshowColor(data_listR, data_listG, data_listB, \\\n # # frame.shape[1], frame.shape[0], nargout=0)\n # # elapsed2 = time.time() - t2\n # # print(elapsed2)\n #\n # # imPillow = ImagePillow.fromarray(frame)\n # # image_mat = matlab.uint8(list(imPillow.getdata()))\n # # image_mat.reshape((imPillow.size[0], imPillow.size[1], 3))\n # # self.eng.fun_imshowPillow(image_mat, nargout=0)\n #\n #\n # # vidFrame = matlab.double(list(frame))\n # # self.eng.imshow(mat, nargout=0)\n #\n # # data1 = np.random.uniform(low = 0.0, high = 30000.0, size = (10,))\n # # data1m = matlab.double(list(data1))\n # # print(data1m)\n #\n # # frameType = type(frame)\n # # print(frameType)\n #\n # # self.eng.imshow(frame, nargout=0)\n # # GMModelOutput = matlab.object\n # # bestPara, dataMLOutput, GMModelOutput, epsilonOutput = self.eng.fun_loadMatFile('bestPara.mat', nargout=4)\n # # print(epsilonOutput)\n\n self.clockEvent = Clock.schedule_interval(self.update, 1.0 / 15)\n self.readFrequency = 30\n self.readCount = 0\n self.polygonLineThickness = 3\n self.messTag1 = 0\n self.messTag2 = 0\n\n # def start(self, capture, fps=30):\n # self.capture = capture\n # Clock.schedule_interval(self.update, 1.0 / fps)\n #\n # def stop(self):\n # Clock.unschedule_interval(self.update)\n # self.capture = None\n\n def update(self, dt):\n matlab_file = Path(self.timeStampMatlabFilePathName)\n if matlab_file.is_file():\n with open(self.timeStampMatlabFilePathName) as fpMatlab:\n strTimeStampMatlab = fpMatlab.readline().rstrip('\\n')\n if self.roundFrameTimeStamp <= float(strTimeStampMatlab):\n self.readCount += 1\n return_value, frame = self.capture.read()\n if return_value:\n # self.eng.fun_autoRecognizeByVideo(frame,self.rectFilePathName,\\\n # self.rotateFilePathName,bestParaMats,self.bwRef)\n\n frameTimeStamp = self.capture.get(0)\n self.roundFrameTimeStamp = round(frameTimeStamp/1000, 1)\n with open(self.timeStampPythonFilePathName, 'w') as fpPython:\n fpPython.write(str(self.roundFrameTimeStamp))\n\n if (self.readCount % self.readFrequency) == 0:\n with open('./data_1.txt') as fp1:\n tagFirst = fp1.readline().rstrip('\\n')\n line1 = '[color=ffff00]' + tagFirst + '[/color]'\n App.get_running_app().root.ids.holyLabel1.text = line1\n\n self.messTag1 = int(tagFirst)\n if self.messTag1 == 1:\n strPosFirst = fp1.readline().rstrip('\\n')\n strPosFirst = strPosFirst.split('\\t')\n self.pts1 = np.array([[int(strPosFirst[0]),int(strPosFirst[1])],\\\n [int(strPosFirst[2]),int(strPosFirst[3])],\\\n [int(strPosFirst[4]),int(strPosFirst[5])],\\\n [int(strPosFirst[6]),int(strPosFirst[7])]], np.int32)\n with open('./data_2.txt') as fp2:\n tagSecond = fp2.readline().rstrip('\\n')\n line2 = '[color=ffff00]' + tagSecond + '[/color]'\n App.get_running_app().root.ids.holyLabel2.text = line2\n\n self.messTag2 = int(tagSecond)\n if self.messTag2 == 1:\n strPosSecond = fp2.readline().rstrip('\\n')\n strPosSecond = strPosSecond.split('\\t')\n self.pts2 = np.array([[int(strPosSecond[0]),int(strPosSecond[1])],\\\n [int(strPosSecond[2]),int(strPosSecond[3])],\\\n [int(strPosSecond[4]),int(strPosSecond[5])],\\\n [int(strPosSecond[6]),int(strPosSecond[7])]], np.int32)\n\n if self.messTag1 == 1:\n cv2.polylines(frame,[self.pts1],True,(0,0,255),self.polygonLineThickness)\n elif self.messTag2 == 1:\n cv2.polylines(frame,[self.pts2],True,(0,0,255),self.polygonLineThickness)\n\n if self.messTag1 == 1 or self.messTag2 == 1:\n App.get_running_app().root.ids.holyLabelMess.text = \\\n '[b][color=ff0000]乱绳[/color][/b]'\n App.get_running_app().root.ids.holyEffect.effects = \\\n [App.get_running_app().root.ids.holyEffect.effect_reference]\n # App.get_running_app().root.ids.holyLabelMess.font_size = \\\n # App.get_running_app().root.font_scaling*60\n else:\n App.get_running_app().root.ids.holyLabelMess.text = \\\n '[b][color=00ff00]正常[/color][/b]'\n\n texture = self.texture\n # w, h = frame.shape[1], frame.shape[0]\n if not texture or texture.width != self.w or texture.height != self.h:\n self.texture = texture = Texture.create(size=(self.w, self.h))\n texture.flip_vertical()\n texture.blit_buffer(frame.tobytes(), colorfmt='bgr')\n self.canvas.ask_update()\n else:\n self.capture.set(0, 0)\n # Clock.unschedule(self.clockEvent)\n print(self.capture)\n # self.eng.simple(nargout=0)\n # tf = self.eng.isprime(37)\n # print(tf)\n # self.capture = None\n\nclass MessRopeRoot(Screen):\n\n font_scaling = NumericProperty()\n\n def on_size(self, *args):\n self.font_scaling = min(Window.width/WINDOW_MIN_WIDTH, Window.height/WINDOW_MIN_HEIGHT)\n # self.ids.holyLabel.text = '[color=ffff00]changed[/color]'\n\n def showcase_boxlayout(self, layout):\n pass\n\n # def dostart(self, *largs):\n # global capture\n # video_files_path = './data/test1.mp4'\n # capture = cv2.VideoCapture(video_files_path)\n # self.ids.qrcam.start(capture)\n #\n # def doexit(self):\n # global capture\n # if capture != None:\n # capture.release()\n # capture = None\n # EventLoop.close()\n\n\nclass MessRopeApp(App):\n\n # future = matlab.engine.start_matlab(async=True)\n\n def build(self):\n Window.minimum_width = WINDOW_MIN_WIDTH\n Window.minimum_height = WINDOW_MIN_HEIGHT\n with open('./messropewin.kv', encoding='utf8') as f:\n self.messropeWin = Builder.load_string(f.read())\n return self.messropeWin\n\n def on_stop(self):\n if self.messropeWin.ids.qrcam.capture:\n print(self.messropeWin.ids.qrcam.capture)\n Clock.unschedule(self.messropeWin.ids.qrcam.clockEvent)\n self.messropeWin.ids.qrcam.capture.release()\n self.messropeWin.ids.qrcam.capture = None\n\nif __name__ == '__main__':\n MessRopeApp().run()\n","sub_path":"main_cv_simple.py","file_name":"main_cv_simple.py","file_ext":"py","file_size_in_byte":13544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"211993063","text":"def person1():\r\n name = input(\"1st Person name : \")\r\n print(name + \" : Hello!How can i help you ? \")\r\n\r\n\r\n# Function 2\r\n\r\ndef person2():\r\n name = input(\"What is your name : \")\r\n food = input(\"What would you like to eat : \")\r\n drink = input(\"What will you drink : \")\r\n dessert = input(\"What would you have in dessert :\")\r\n print(\"\\tName \\t: \", name)\r\n print(\"\\tFood \\t: \", food)\r\n print(\"\\tDrink \\t: \", drink)\r\n print(\"\\tDessert : \", dessert)\r\n\r\n\r\nperson1()\r\nperson2()\r\n","sub_path":"Week2_Functions.py","file_name":"Week2_Functions.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"638956615","text":"# pylint: disable=line-too-long, no-member\n\nimport calendar\nimport csv\nimport os\nimport tempfile\nimport time\n\nfrom zipfile import ZipFile\n\nimport arrow\n\nfrom django.conf import settings\nfrom django.contrib.gis.geos import GEOSGeometry\nfrom django.template.loader import render_to_string\nfrom django.utils.text import slugify\n\nfrom ..models import DataPoint, DataSourceReference, DataGeneratorDefinition\n\ndef extract_secondary_identifier(properties):\n if 'status' in properties:\n return properties['status']\n\n return None\n\ndef generator_name(identifier): # pylint: disable=unused-argument\n return 'Device Location'\n\ndef visualization(source, generator):\n context = {}\n context['source'] = source\n context['generator_identifier'] = generator\n\n try:\n context['google_api_key'] = settings.PDK_GOOGLE_MAPS_API_KEY\n except AttributeError:\n pass\n\n values = []\n\n min_latitude = 90\n max_latitude = -90\n\n min_longitude = 180\n max_longitude = -180\n\n start = None\n end = None\n\n for point in DataPoint.objects.filter(source=source.identifier, generator_identifier=generator).order_by('-created')[:500]:\n if end is None:\n end = point.created\n\n start = point.created\n\n properties = point.fetch_properties()\n\n values.append(properties)\n\n latitude = properties['latitude']\n longitude = properties['longitude']\n\n if latitude < min_latitude:\n min_latitude = latitude\n\n if latitude > max_latitude:\n max_latitude = latitude\n\n if longitude < min_longitude:\n min_longitude = longitude\n\n if longitude > max_longitude:\n max_longitude = longitude\n\n context['values'] = values\n\n context['center_latitude'] = (min_latitude + max_latitude) / 2\n context['center_longitude'] = (min_longitude + max_longitude) / 2\n\n context['start'] = time.mktime(start.timetuple())\n context['end'] = time.mktime(end.timetuple())\n\n return render_to_string('generators/pdk_device_location_template.html', context)\n\ndef data_table(source, generator):\n context = {}\n context['source'] = source\n context['generator_identifier'] = generator\n\n context['values'] = DataPoint.objects.filter(source=source.identifier, generator_identifier=generator).order_by('-created')[:500]\n\n return render_to_string('generators/pdk_device_location_table_template.html', context)\n\ndef compile_report(generator, sources, data_start=None, data_end=None, date_type='created'): # pylint: disable=too-many-locals, too-many-branches, too-many-statements\n now = arrow.get()\n filename = tempfile.gettempdir() + '/pdk_export_' + str(now.timestamp) + str(now.microsecond / 1e6) + '.zip'\n\n with ZipFile(filename, 'w', allowZip64=True) as export_file:\n seen_sources = []\n\n for source in sources:\n export_source = source\n\n seen_index = 1\n\n while slugify(export_source) in seen_sources:\n export_source = source + '__' + str(seen_index)\n\n seen_index += 1\n\n seen_sources.append(slugify(export_source))\n\n source_reference = DataSourceReference.reference_for_source(source)\n generator_definition = DataGeneratorDefinition.definition_for_identifier(generator)\n\n points = DataPoint.objects.filter(source_reference=source_reference, generator_definition=generator_definition)\n\n if data_start is not None:\n if date_type == 'recorded':\n points = points.filter(recorded__gte=data_start)\n else:\n points = points.filter(created__gte=data_start)\n\n if data_end is not None:\n if date_type == 'recorded':\n points = points.filter(recorded__lte=data_end)\n else:\n points = points.filter(created__lte=data_end)\n\n points = points.order_by('source', 'created')\n\n identifier = slugify(generator + '__' + source)\n\n secondary_filename = tempfile.gettempdir() + '/' + identifier + '.txt'\n\n with open(secondary_filename, 'w') as outfile:\n writer = csv.writer(outfile, delimiter='\\t')\n\n columns = [\n 'Source',\n 'Created Timestamp',\n 'Created Date',\n 'Recorded Timestamp',\n 'Recorded Date',\n 'Observed',\n 'Raw Timestamp',\n 'Provider',\n 'Latitude',\n 'Longitude',\n 'Accuracy',\n 'Altitude',\n 'Speed',\n 'Bearing',\n ]\n\n writer.writerow(columns)\n\n for point in points:\n properties = point.fetch_properties()\n\n row = []\n\n row.append(point.source)\n row.append(calendar.timegm(point.created.utctimetuple()))\n row.append(point.created.isoformat())\n\n row.append(calendar.timegm(point.recorded.utctimetuple()))\n row.append(point.recorded.isoformat())\n\n try:\n row.append(properties['observed'])\n except KeyError:\n row.append('')\n\n try:\n row.append(properties['location_timestamp'])\n except KeyError:\n row.append('')\n\n try:\n row.append(properties['provider'])\n except KeyError:\n row.append('')\n\n try:\n row.append(properties['latitude'])\n except KeyError:\n row.append('')\n\n try:\n row.append(properties['longitude'])\n except KeyError:\n row.append('')\n\n try:\n row.append(properties['accuracy'])\n except KeyError:\n row.append('')\n\n try:\n row.append(properties['altitude'])\n except KeyError:\n row.append('')\n\n try:\n row.append(properties['speed'])\n except KeyError:\n row.append('')\n\n try:\n row.append(properties['bearing'])\n except KeyError:\n row.append('')\n\n writer.writerow(row)\n\n export_file.write(secondary_filename, slugify(generator) + '/' + slugify(export_source) + '.txt')\n\n os.remove(secondary_filename)\n\n return filename\n\ndef extract_location(point):\n properties = point.fetch_properties()\n\n latitude = properties['latitude']\n longitude = properties['longitude']\n\n if latitude is not None and longitude is not None:\n point.generated_at = GEOSGeometry('POINT(' + str(longitude) + ' ' + str(latitude) + ')')\n point.save()\n","sub_path":"generators/pdk_location.py","file_name":"pdk_location.py","file_ext":"py","file_size_in_byte":7255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"648731622","text":"# hybrid score function April 30 complete procedure\n\ntotalcomparisons = 0\n\nc1 = 1\nc2 = 12\n\n# new params\ntotresults = []\ntotclusterids=[]\n\n#diff. category of accuracy index\nmissedmatching = 0\nwrongmatching =0 # matched to a different cluster\ncontinuedmatching = 0 # continues erroneously \n\nclustering_error = 0 \n\n#####\ninitialframe = 410\nendframe = 500\n\nunew = map10[initialframe]\n\nca = list(range(c1, c2+1))\n#unew = [1]\ncarray = np.setdiff1d(ca, unew)\n\nprint(\"carray :\", carray)\n\nfor initialcluster in ca:#range(c1, c2+1):\n \n \n arrayx = []\n arrayy = []\n pavex = []\n pavey = []\n\n phx = [] # previous high x values\n phy =[] \n\n\n obnum = 1\n\n totalmap ={}\n\n prevmap = {}\n\n mf = defaultdict(list)\n # maximum object/cluster id in any frame\n mx = 20\n\n for j in range(0, mx):\n mf[j]=0 \n\n matchfreq= mf\n \n # April 30\n # array for counts of pts in clusters\n counts = {} #[]\n # normalized matchfreq scores\n normalmatchfreq = mf\n\n thres= 10\n\n finalarray=[]\n\n finalarray.append(initialcluster)\n\n finalx =[]\n finaly =[]\n\n avex =[]\n avey =[]\n\n xvalues =[]\n yvalues =[]\n\n # edited method\n angles = []\n \n for i in range(initialframe, endframe+1):\n name = \"file_out\"\n name = name+str(i)\n name = name+\".csv\"\n firstrow=0\n # clear matchfreq\n for j in range(0, mx):\n matchfreq[j] =0\n normalmatchfreq[j]=0\n\n currentmap3 = {} # average coordinates\n\n print(\"curr frame is:\", i)\n\n\n with open(name) as csv_file:\n f =0 \n # reset hxvalues , hyvalues\n hxvalues = []\n hyvalues=[]\n\n obnum=1 \n\n currentmap= {}\n\n csv_reader = csv.reader(csv_file, delimiter=\",\")\n\n if i==initialframe:\n for row in csv_reader:\n # no need for first row skip \n clusterid = float(row[0])\n if clusterid==initialcluster:\n xpoint = float(row[1])\n ypoint = float(row[2])\n #print(\"xpt\", xpoint)\n #print(\"ypt\", ypoint)\n arrayx.append(xpoint)\n arrayy.append(ypoint)\n xr = round(xpoint)\n yr = round(ypoint)\n fromi = dinvlookupdict[(xr,yr)]\n h1, i1 = highest80(fromi)\n ilist = np.unique(i1)\n for ind1 in ilist:\n prevmap[ind1] = 1\n #i1 = newhighestfreq(fromi)\n #prevmap[i1] = 1\n\n plt.scatter(arrayx, arrayy)\n\n avx = np.mean(arrayx)\n avy = np.mean(arrayy)\n\n finalx.append(avx)\n finaly.append(avy)\n\n # save angle\n\n plt.annotate(i, (avx, avy), textcoords=\"offset points\", xytext=(0,10), ha='center')\n\n #append avx and avy\n avex.append(avx)\n avey.append(avy)\n\n # reset ky \n ky = initialcluster\n continue\n # counter of number of points in cluster: unique points?\n ccount = 1\n for row in csv_reader:\n clusterid = float(row[0])\n\n if clusterid != obnum:\n numo1 = float(obnum)\n # append into dictionary of maps\n totalmap[numo1] = currentmap\n counts[numo1] = ccount\n normalmatchfreq[numo1] = matchfreq[numo1]/ccount\n \n if normalmatchfreq[numo1] > f:#matchfreq[numo1] > f:\n f = normalmatchfreq[numo1] #matchfreq[numo1]\n print(\"f is\", f)\n ky = numo1\n\n hxvalues = xvalues\n hyvalues = yvalues\n obnum= clusterid\n currentmap={}\n # take the average\n avecurrentx = np.mean(xvalues)\n avecurrenty = np.mean(yvalues)\n currentmap3[numo1] = [avecurrentx, avecurrenty]\n\n xvalues =[]\n yvalues =[]\n \n ccount=1\n continue\n\n xpoint = float(row[1])\n ypoint = float(row[2])\n xr = round(xpoint)\n yr = round(ypoint)\n ccount=ccount+1\n \n xvalues.append(xpoint) # save to array \n yvalues.append(ypoint)\n fromi = dinvlookupdict[(xr,yr)]\n # function to find highest freq \n h1, i1 = highest80(fromi)\n # save to map\n #currentmap[i1] = 1\n for ind1 in i1:\n currentmap[ind1] = 1\n # check prev map\n val = prevmap.get(fromi)\n if val == None:\n pass\n else:\n numo = float(obnum)\n matchfreq[numo] = matchfreq[numo]+1\n # check at end of file\n numo2 = float(clusterid)\n counts[numo2] = ccount\n normalmatchfreq[numo2] = matchfreq[numo2]/ccount\n \n avecurrentx = np.mean(xvalues) ##USE AS CURRENT X\n avecurrenty = np.mean(yvalues)\n currentmap3[numo2] = [avecurrentx, avecurrenty]\n totalmap[numo2] = currentmap\n \n if normalmatchfreq[numo2] > f:\n ky = numo2\n hxvalues = xvalues\n hyvalues = yvalues\n totalmap[ky]= currentmap\n\n if len(hxvalues) ==0:\n foundmin=0\n mindist = thres\n c_first=0\n for c in currentmap3:\n cvalue = currentmap3[c]\n cx = cvalue[0]\n cy = cvalue[1]\n ax = finalx[-1]\n ay = finaly[-1]\n dist1 = pow(cx - ax,2) + pow(cy - ay,2)\n dist = math.sqrt(dist1)\n if dist < thres:\n if c_first==0:\n c_first=1\n minclust=c\n mcx = cx\n mcy= cy\n mindist = dist\n foundmin=1\n if dist< mindist:\n mindist = dist\n minclust=c\n mcx = cx\n mcy = cy\n if foundmin==1:\n print(\"found min\")\n print(\"dist is\", mindist)\n print(\"minclust is\", minclust)\n if len(angles)==0:\n print(\"angles array is of length 0 and minclust:\", minclust)\n prevmap = totalmap[minclust]\n avx = mcx\n avy = mcy \n ## move to end\n ## finalarray.append(minclust) \n # add to angles\n xdiff = avx - finalx[-1]\n ydiff = avy - finaly[-1]\n ## check initial direction 2/28/21 \n if avy >20: # +y, -x\n # break if both directions are unsatisfied / one direction unsat.\n if xdiff >0 and ydiff <0:\n print(\"both dir. unsat.\")\n # plot\n plt.scatter(mcx, mcy)\n plt.annotate(\"wrong dir.\", (mcx, mcy))\n break\n if avy <20: # -y, +x\n if xdiff<0 and ydiff>0:\n print(\"both dir. unsat.\")\n plt.scatter(mcx, mcy)\n #plot \n plt.annotate(\"wrong dir.\", (mcx, mcy))\n break\n rad = math.atan2(ydiff, xdiff)\n ang = math.degrees(rad)\n # moved here \n finalarray.append(minclust) \n \n if ang<0:\n ang = 360+ang\n angles.append(ang)\n\n finalx.append(mcx)\n finaly.append(mcy)\n\n\n\n #plt.scatter(mcx, mcy)\n #plt.annotate(i, (avx, avy), textcoords=\"offset points\", xytext=(0,10), ha='center')\n continue\n prev_avex = finalx[-1]\n prev_avey = finaly[-1]\n\n xdiff_curr = mcx - prev_avex #how is avx set?\n ydiff_curr = mcy - prev_avey \n \n if mcy >20: # +y, -x\n # break if both directions are unsatisfied / one direction unsat.\n if xdiff_curr >0 and ydiff_curr <0:\n print(\"both dir. unsat.; angles not empty\")\n # plot\n plt.scatter(mcx, mcy)\n plt.annotate(\"wrong dir.\", (mcx, mcy))\n break\n if mcy <20:\n if xdiff_curr<0 and ydiff_curr>0:\n print(\"both dir. unsat.; angles not empty\")\n # plot\n plt.scatter(mcx, mcy)\n plt.annotate(\"wrong dir.\", (mcx, mcy))\n break\n \n # calc angle\n rad = math.atan2(ydiff_curr, xdiff_curr)\n\n ang = math.degrees(rad)\n if ang<0:\n ang = 360+ang\n\n prev_ang = angles[-1]\n\n ang_diff = abs(ang - prev_ang)\n\n print(\"prev ang :\", prev_ang)\n print(\"curr ang:\", ang)\n\n print(\"ang_diff is:\", ang_diff)\n if ang_diff <= 30: # change from 45 to 30\n print(\"angle holds\")\n prevmap= totalmap[minclust]\n avx = mcx\n avy =mcy\n finalarray.append(minclust)\n finalx.append(mcx)\n finaly.append(mcy)\n # append to slopes / diffs\n #xdiff.append(avx - avex[-1])\n #ydiff.append(avy - avey[-1])\n #avex.append(avx)\n #avey.append(avy)\n #append to angles\n angles.append(ang)\n else:\n print(\"angles too large, stop\")\n break\n else:\n print(\"not found and end, after last frame\", i)\n break\n if len(hxvalues) !=0:\n finalarray.append(ky)\n prevmap = totalmap[ky]\n\n avx = np.mean(hxvalues)\n avy = np.mean(hyvalues)\n\n prev_avx = finalx[-1]\n prev_avy = finaly[-1]\n\n # update angles\n xdiff = avx - prev_avx\n ydiff = avy - prev_avy \n rad = math.atan2(ydiff, xdiff)\n ang = math.degrees(rad)\n if ang <0:\n ang = 360+ang\n angles.append(ang)\n\n finalx.append(avx)\n finaly.append(avy)\n \n if i % 10==0:\n print(\"(map10) i is\", i)\n print(\"(map10) key is\", ky)\n m10 = map10[i]\n m10.append(ky)\n # check sim clusts\n v1 = errorclusters.get(i)\n if v1!=None:\n errorarray= errorclusters[i]\n for el in errorarray:\n for e in el:\n if e == ky:\n m10.extend(el)\n map10[i] = m10\n\n #print(\"final x\", finalx)\n #print(\"final y\", finaly)\n\n\n print(\"initial cluster\", initialcluster)\n print(\"initial frame\", initialframe)\n\n print(\"final array is\", finalarray)\n \n listclusterids= finalarray\n \n print(\"listclusterids is\", listclusterids)\n \n # plot\n \n \n alen = len(finalarray)\n print(\"length of array \", alen)\n \n xv1=[]\n yv1=[]\n\n ax =[]\n ay =[]\n\n acounter=0\n for i in range(initialframe, initialframe+alen):\n name = \"file_out\"\n name = name+str(i)\n name = name+\".csv\"\n\n\n a = finalarray[acounter]\n xarray = []\n yarray = []\n with open(name) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=\",\")\n for row in csv_reader:\n clusterid = float(row[0])\n xpoint = float(row[1])\n ypoint = float(row[2])\n\n if clusterid == a:\n xarray.append(xpoint)\n yarray.append(ypoint)\n\n\n # plot\n plt.scatter(xarray,yarray)\n avx = np.mean(xarray)\n avy = np.mean(yarray)\n plt.annotate(i, (avx, avy), textcoords=\"offset points\", xytext=(0,10), ha='center')\n\n ax.append(avx)\n ay.append(avy)\n\n acounter =acounter +1\n if acounter > alen:\n break\n\n plt.show()\n \n # app result\n result = []\n result.append(initialcluster)\n \n iframe = initialframe\n currentcluster = initialcluster\n t = True\n while t:\n nextres = findnextclusterapp(iframe, currentcluster)\n if str(nextres) == \"nan\":\n break\n if iframe >= endframe:\n break\n result.append(nextres)\n iframe = iframe+1\n currentcluster = nextres\n \n len1 = len(result) #datastore result\n len2 = len(listclusterids) # our result \n\n setlen = min(len1, len2)\n od1 = {}\n od2 = {}\n booleanwrong=[]\n \n for j1 in range(0,setlen):\n nolongerwrong=0\n nolongerwrongoriginal=0\n frameno=initialframe+j1\n od1[frameno]=[result[j1]]\n od2[frameno]=[listclusterids[j1]]\n if result[j1]==listclusterids[j1]:\n nolongerwrong=1\n nolongerwrongoriginal=1\n if result[j1]!= listclusterids[j1]:\n v1 = errorclusters.get(frameno)\n if v1!=None:\n errorarray= errorclusters[frameno]\n for el in errorarray:\n if result[j1] in el:\n if listclusterids[j1] in el:\n nolongerwrong=1\n if nolongerwrong!=1:\n booleanwrong.append(1) # wrong\n else:\n booleanwrong.append(0) # not wrong\n if nolongerwrongoriginal==0:\n clustering_error = clustering_error+1\n maxlen= max(len1, len2)\n last1 = result[setlen-1]\n last2 = listclusterids[setlen-1]\n \n if len1 != len2:\n# iterate over setlen to max len\n for j2 in range(setlen, maxlen):\n nolongerwrong=0\n nolongerwrongoriginal = 0 \n frameno2 = initialframe+j2\n if len1 > len2:\n next2 = findnextcluster(frameno2, last2)\n if next2 == result[j2]:\n nolongerwrong=1\n nolongerwrongoriginal=1\n v2 = errorclusters.get(frameno2)\n if v2 != None:\n errorarray = errorclusters[frameno2]\n for el in errorarray:\n if result[j2] in el:\n if next2 in el:\n nolongerwrong=1\n if nolongerwrong==1:\n booleanwrong.append(0)\n if nolongerwrongoriginal ==0:\n clustering_error= clustering_error+1\n else:\n booleanwrong.append(1)\n last2 = next2\n if len2 > len1:\n next2 = findnextclusterapp(frameno2, last2)\n if next2 == listclusterids[j2]:\n nolongerwrong=1\n nolongerwrongoriginal=1\n v2 = errorclusters.get(frameno2)\n if v2 != None:\n errorarray = errorclusters[frameno2]\n for el in errorarray:\n if listclusterids[j2] in el:\n if next2 in el:\n nolongerwrong=1\n if nolongerwrong==1:\n booleanwrong.append(0)\n if nolongerwrongoriginal==0:\n clustering_error=clustering_error+1\n else:\n booleanwrong.append(1) # wrong\n last2 = next2\n d1 = {}\n d2 = {}\n \n for j1 in range(0, setlen):\n frameno = initialframe+j1\n # check similar clusters\n clusters1 = od1[frameno]\n clusters2 = od2[frameno]\n\n simclusters = errorclusters.get(frameno)\n if simclusters != None:\n simarray = errorclusters[frameno]\n # check if matches\n for el in simarray:\n if od1[frameno] in el:\n clusters1.extend(el)\n if od2[frameno] in el:\n clusters2.extend(el)\n d1[frameno] = clusters1\n d2[frameno] = clusters2\n \n maxlen= max(len1, len2)\n # set original clusters for up to maxlen\n for j2 in range(setlen, maxlen):\n frameno = initialframe+j2\n d1[frameno] = []\n d2[frameno] = []\n if len1>len2:\n d1[frameno]= [result[j2]]\n if len2>len1:\n d2[frameno]=[listclusterids[j2]]\n\n d1[initialframe+maxlen] = []\n d2[initialframe+maxlen] = []\n setlen = min(len1, len2)\n maxlen= max(len1,len2)\n \n for j in range(0, maxlen):\n\n frameno = initialframe+j \n #print(\"j is\", j)\n #print(\"frame num is\", frameno)\n\n # all clusters within current frame\n clust1 = d1[frameno]\n clust2 = d2[frameno]\n\n\n # check clusters to next step\n for c1 in clust1:\n nc1 = findnextclusterapp(frameno, c1)\n if str(nc1) == \"nan\":\n #print(\"nc1 is nan\")\n continue\n #print(\"frameno plus one\", frameno+1)\n #print(\"nc1\", nc1)\n nclust1 = d1[frameno+1]\n # only append if not already there\n if nc1 not in nclust1:\n nclust1.append(nc1)\n #nclust1.append(nc1)\n #nclust1 = np.unique(nclust1)\n d1[frameno+1] = nclust1\n for c2 in clust2:\n nc2 = findnextcluster(frameno, c2)\n if nc2 == -1:\n continue\n nclust2 = d2[frameno+1]\n if nc2 not in nclust2:\n nclust2.append(nc2)\n d2[frameno+1] = nclust2\n if booleanwrong[j]==1:\n set1 = set(clust1)\n intersect = set1.intersection(clust2)\n if len(intersect)>0:\n print(\"intersect\")\n booleanwrong[j]=0\n clustering_error=clustering_error+1\n\n bindex = 1\n firstframewrong = -1\n for b in booleanwrong:\n if b == 1:\n firstframewrong = bindex\n break\n bindex= bindex+1\n\n if firstframewrong == -1:\n totalcomparisons= totalcomparisons+maxlen\n else:\n totalcomparisons= totalcomparisons+firstframewrong\n \n contflag = 0\n missedflag= 0\n \n # print length of array found\n print(\"len of listclusterids\", len(listclusterids))\n \n if np.sum(booleanwrong) >0:\n if firstframewrong>len1:\n print(\"continuedly\")\n print(\"array1\", result) # array1 is app result\n print(\"array2\", listclusterids)\n contflag= 1\n continuedmatching= continuedmatching+1\n if firstframewrong > len2: \n print(\"missedly\")\n print(\"array1\", result)\n print(\"array2\", listclusterids)\n missedmatching = missedmatching+1\n missedflag=1\n if contflag==0 and missedflag==0:\n print(\"wrongly\")\n print(\"array1\", result)\n print(\"array2\", listclusterids)\n wrongmatching= wrongmatching+1\n \n if contflag==1 and missedflag==1:\n print(\"BOTH CONTINUED AND MISSED\")\n \nprint(\"missed matchings:\", missedmatching)\nprint(\"cont matchings:\", continuedmatching)\nprint(\"wrong matchings:\", wrongmatching)\nprint(\"total :\", totalcomparisons)\nprint(\"clust. errors:\", clustering_error)\n","sub_path":"may2020/score_functions/score_hybrid_method1_complete.py","file_name":"score_hybrid_method1_complete.py","file_ext":"py","file_size_in_byte":21198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"508337680","text":"import tkinter as tk\nimport logic.clientType\nimport view.tableFrame\n\n\nclass StatisticWindow(tk.Frame):\n def __init__(self, master, previous_frame, window_list, case_list, stats_view, **kw):\n super().__init__(master, **kw)\n\n self._window_list = window_list\n self._case_list = case_list\n self._before = previous_frame\n self._stats_view = stats_view\n\n self._table = None\n self._button_dic = None\n\n frame = tk.Frame(self.master)\n\n table_section = tk.Frame(frame)\n buttons_section = tk.Frame(frame)\n\n self.init_table_section(table_section)\n self.init_buttons_section(buttons_section)\n\n table_section.pack(ipady=20)\n buttons_section.pack()\n frame.grid(row=0, column=0, sticky='nwes')\n frame.tkraise()\n\n self._button_dic[logic.clientType.ClientType.NORMAL].invoke()\n\n def init_table_section(self, master):\n self._table = view.tableFrame.TableFrame(master, len(self._case_list) + 2, len(self._window_list) + 2)\n for i, window in zip(range(self._table.column_number), self._window_list):\n self._table.change_value(0, i + 1, str(window))\n\n for i, case in zip(range(self._table.row_number), self._case_list):\n self._table.change_value(i + 1, 0, str(case))\n\n self._table.change_value(0, self._table.column_number - 1, 'Łacznie')\n self._table.change_value(self._table.row_number - 1, 0, 'Łacznie')\n\n def init_buttons_section(self, master):\n ct = logic.clientType.ClientType\n\n toolbox = tk.Frame(master)\n frame_stats_options = tk.Frame(master)\n\n button_style = {'width': 10, 'borderwidth': 3}\n\n self._button_dic = {\n ct.NORMAL: tk.Button(toolbox, button_style, text='Normalny',\n command=lambda: self.change_view(ct.NORMAL)),\n ct.VIP: tk.Button(toolbox, button_style, text='VIP', command=lambda: self.change_view(ct.VIP)),\n 'back': tk.Button(frame_stats_options, button_style, text=\"Powrót\", command=self.back_to_previous_frame)}\n\n toolbox.pack(side='left', ipadx=20)\n frame_stats_options.pack()\n\n for i in self._button_dic.values():\n i.pack(side='left')\n\n def change_view(self, client_type):\n ct = logic.clientType.ClientType\n if client_type == ct.VIP:\n self._button_dic[ct.VIP].configure(relief='sunken', state='disabled')\n self._button_dic[ct.NORMAL].configure(relief='raised', state='active')\n else:\n self._button_dic[ct.NORMAL].configure(relief='sunken', state='disabled')\n self._button_dic[ct.VIP].configure(relief='raised', state='active')\n\n sum_row = [0] * len(self._case_list)\n sum_col = [0] * len(self._window_list)\n\n for i in range(len(self._window_list)):\n for j, case in enumerate(self._case_list):\n data = self._window_list[i].time[client_type][case.id]\n\n value = sum(data)\n sum_row[j] += value\n sum_col[i] += value\n if value > 0:\n value = round(value/len(data), 2)\n self._table.change_value(j + 1, i + 1, value)\n\n for i in range(len(sum_row)):\n self._table.change_value(i + 1, self._table.column_number - 1, round(sum_row[i], 2))\n for i in range(len(sum_col)):\n self._table.change_value(self._table.row_number - 1, i + 1, round(sum_col[i], 2))\n\n def back_to_previous_frame(self):\n self._stats_view = False\n self._before.tkraise()\n","sub_path":"view/statisticWindow.py","file_name":"statisticWindow.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"550119880","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Game',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('start_time', models.DateTimeField(auto_now_add=True)),\n ('last_active', models.DateTimeField(auto_now=True)),\n ('status', models.CharField(default=b'A', max_length=1, choices=[(b'A', b'Active'), (b'F', b'First Player Wins'), (b'S', b'Second Player Wins'), (b'D', b'Draw')])),\n ('first_player', models.ForeignKey(related_name='games_first_player', to=settings.AUTH_USER_MODEL)),\n ('next_to_move', models.ForeignKey(related_name='games_to_move', to=settings.AUTH_USER_MODEL)),\n ('second_player', models.ForeignKey(related_name='games_second_player', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Move',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('x', models.IntegerField()),\n ('y', models.IntegerField()),\n ('comment', models.CharField(max_length=300)),\n ('game', models.ForeignKey(to='tictactoe.Game')),\n ],\n ),\n ]\n","sub_path":"tictactoe/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"280517003","text":"from telegram.ext import Updater, Filters, CommandHandler, ConversationHandler, RegexHandler, MessageHandler\nfrom telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)\nfrom telegram.error import (TelegramError, Unauthorized, BadRequest, \n TimedOut, ChatMigrated, NetworkError)\nimport logging\n\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport requests\n\nimport json\n\nfrom webtoon import WebtoonCralwer\nimport weather\nimport secret\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', \n level=logging.INFO\n)\nlogger = logging.getLogger(__name__)\n\nSTART, SELECT, DOWNLOAD = range(3)\n\ndef get_daily_webtoon_list():\n webtoon_list = []\n res = urllib.request.urlopen('https://comic.naver.com/webtoon/weekday.nhn').read().decode('utf-8')\n bs_obj = BeautifulSoup(res, 'html.parser')\n for wt in bs_obj.select('.col_selected > .col_inner > ul > li'):\n webtoon_name = wt.find('a', recursive=False).text\n webtoon_list.append(webtoon_name)\n return webtoon_list\n\ndef start(bot, update, *args, **kargs):\n daily_webtoon = '\\n'.join(get_daily_webtoon_list())\n update.message.reply_text(\n '===오늘의 웹툰 목록===\\n' + daily_webtoon,\n reply_markup=ReplyKeyboardRemove()\n )\n return SELECT\n\ndef select(bot, update):\n global selected_wt \n selected_wt = WebtoonCralwer(update.message.text)\n reply_keyboard = [selected_wt.published_recently().keys()]\n update.message.reply_text(\n 'What u wanna download?',\n reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))\n return DOWNLOAD\n\ndef download(bot, update):\n zip_url = selected_wt.download(update.message.text)\n bot.send_document(chat_id=update.message.chat_id, document=open(zip_url,'rb'))\n return ConversationHandler.END\n\ndef cancel(bot, update):\n update.message.reply_text('Bye! I hope we can talk again some day',\n reply_markup=ReplyKeyboardRemove())\n return ConversationHandler.END\n\ndef weatherCommand(bot, update):\n update.message.reply_text(weather.getWeather())\n\ndef error_callback(bot, update, error):\n try:\n raise error\n except Unauthorized as e:\n print(e)\n except BadRequest as e:\n print(e)\n except TimedOut as e:\n print(e)\n except NetworkError as e:\n print(e)\n except ChatMigrated as e:\n print(e)\n except TelegramError as e:\n print(e)\n\ndef main():\n print('start telegram chat bot')\n my_token = secret.my_token\n updater = Updater(my_token)\n\n weather_handler = CommandHandler('weather', weatherCommand)\n conv_handler = ConversationHandler(\n entry_points = [CommandHandler('webtoon', start)],\n states={\n SELECT : [MessageHandler(Filters.text, select)],\n DOWNLOAD : [MessageHandler(Filters.text, download),\n CommandHandler('exit', cancel)]\n },\n fallbacks=[CommandHandler('cancel', cancel)]\n )\n updater.dispatcher.add_handler(conv_handler)\n updater.dispatcher.add_handler(weather_handler)\n updater.dispatcher.add_error_handler(error_callback)\n updater.start_polling()\n updater.idle()\n\nif __name__ == '__main__':\n main()","sub_path":"telegram_conv.py","file_name":"telegram_conv.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"630668819","text":"from sklearn.metrics import cohen_kappa_score\nimport xlrd\n\nlabel_path_1 = r\"C:\\Users\\Yingbo\\Desktop\\Kappa\\With Jule\\Jule.xlsx\"\nlabel_path_2 = r\"C:\\Users\\Yingbo\\Desktop\\Kappa\\With Jule\\Yingbo.xlsx\"\n\ndef excel_data(file_path):\n data = xlrd.open_workbook(file_path)\n table = data.sheet_by_index(0)\n\n nrows = table.nrows\n ncols = table.ncols\n\n excel_list = []\n for row in range(0, nrows):\n for col in range(ncols):\n cell_value = int(table.cell(row, col).value)\n excel_list.append(cell_value)\n return excel_list\n\nlist1 = excel_data(label_path_1)\nlist2 = excel_data(label_path_2)\n\nprint(\"The cohen_kappa_score between two annotators is: \", cohen_kappa_score(list1, list2))\n\n","sub_path":"Basic Code/FinalTest/kappa.py","file_name":"kappa.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"444360161","text":"# https://www.linkedin.com/pulse/dimensionality-reduction-using-tsne-python-deepak-kumar\n# t-Distributed Stochastic Neighbor Embedding (t-SNE)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nfrom sklearn.datasets import fetch_mldata\n\nmnist = fetch_mldata(\"MNIST original\")\nX = mnist.data / 255.0\ny = mnist.target\n\nfeat_cols = [ 'pixel'+str(i) for i in range(X.shape[1]) ]\n\ndf = pd.DataFrame(X, columns=feat_cols)\ndf['label'] = y\ndf['label'] = df['label'].apply(lambda i: str(i))\n\nX, y = None, None\nprint('Size of the dataframe: {}'.format(df.shape))\nrndperm = np.random.permutation(df.shape[0])\n\n# Plot the graph\n'''plt.gray()\nfig = plt.figure( figsize=(16,7) )\nfor i in range(0,30):\n ax = fig.add_subplot(3,10,i+1, title='Digit: ' + str(df.loc[rndperm[i],'label']) )\n ax.matshow(df.loc[rndperm[i],feat_cols].values.reshape((28,28)).astype(float))\n\nplt.show()'''\n\n# tSNE\nfrom sklearn.manifold import TSNE\nn_sne = 7000\n\ntsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)\ntsne_results = tsne.fit_transform(df.loc[rndperm[:n_sne], feat_cols].values)\n\ndf_tsne = df.loc[rndperm[:n_sne],:].copy()\ndf_tsne['x-tsne'] = tsne_results[:,0]\ndf_tsne['y-tsne'] = tsne_results[:,1]","sub_path":"dimension_reduction/mnist_tSNE.py","file_name":"mnist_tSNE.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"410827418","text":"import telebot\nfrom telebot import types\n\nbot = telebot.TeleBot('343937060:AAE4vSFxBvu8rNfOEHSremf3v9jMqvI7-2k')\n\n@bot.message_handler(commands=['start','help'])\ndef select_lang(message):\n lang = types.InlineKeyboardMarkup(row_width=2)\n eng = types.InlineKeyboardButton('English \\U0001f1f1\\U0001f1f7', callback_data='english')\n heb = types.InlineKeyboardButton('עברית \\U0001f1ee\\U0001f1f1', callback_data='hebrew')\n lang.add(eng, heb)\n bot.send_message(message.chat.id, 'ברוכים הבאים, בחרו את השפה שלכם:\\n\\nWelcome, please select your language:', reply_markup=lang)\n\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_inline(call):\n eng = types.InlineKeyboardMarkup()\n e = types.InlineKeyboardButton('To select a language click here\\U0001f448', callback_data='lang')\n eng.add(e)\n heb = types.InlineKeyboardMarkup()\n h = types.InlineKeyboardButton('לבחירת שפה לחץ כאן\\U0001f449', callback_data='lang')\n heb.add(h)\n lang = types.InlineKeyboardMarkup(row_width=2)\n english = types.InlineKeyboardButton('English \\U0001f1f1\\U0001f1f7', callback_data='english')\n hebrew = types.InlineKeyboardButton('עברית \\U0001f1ee\\U0001f1f1', callback_data='hebrew')\n lang.add(english, hebrew)\n id = call.from_user.id\n name = call.from_user.first_name\n username = call.from_user.username\n if call.data == 'english':\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text='Hello ' +name +'!\\nYour username is: @' +username +'\\nYour ID is: '+str(id), reply_markup=eng)\n elif call.data == 'hebrew':\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text='שלום ' +name +'!\\nהיוזר שלך: @' +username +'\\nה-ID שלך: '+str(id), reply_markup=heb)\n elif call.data == 'lang':\n msg = bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text='ברוכים הבאים, בחרו את השפה שלכם:\\n\\nWelcome, please select your language:', reply_markup=lang)\n bot.register_next_step_handler(msg, callback_inline)\n else:\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text='Error')\n\ndef inline_id(inl):\n id = inl.from_user.id\n first_name = inl.from_user.first_name\n last_name = inl.from_user.last_name\n name = ' '.join([first_name, last_name])\n username = inl.from_user.username\n return 'השם שלי: ' +name +'.\\nה-ID שלי: ' +str(id) +'\\nהיוזר שלי: @' +username\n\n@bot.inline_handler(lambda query: len(query.query) is 0)\ndef default_query(inline_query):\n r = types.InlineQueryResultArticle('1', 'שתף את הפרטים שלך', types.InputTextMessageContent(inline_id(inline_query)))\n bot.answer_inline_query(inline_query.id, [r])\n\n\nprint('Listening...')\n\nbot.polling(none_stop=True)","sub_path":"showID_bot.py","file_name":"showID_bot.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"253592753","text":"from CashChecks import aa_top_apps, aa_top_apps_data, top_apps_db_setup, db_list, unified_list, remove_dupes, dupes_output_list\n\ndaterange = ['2015-07-01', '2015-06-01']\ncountrylist = ['FR', 'WW']\n\nfor dater in daterange:\n for ctry in countrylist:\n\n #Calls App Annie Top Apps API, data output into aa_top_apps_data\n # def aa_top_apps(market, device, country, category, date, feeds, granularity, ranks)\n aa_top_apps(market='ios', date=dater, country=ctry, feeds='free grossing', granularity='daily', category='Overall > Health and Fitness', ranks=5)\n\n\n# print(aa_top_apps_data)\n\n# Prepares API output for Top Apps to import into apps DB (db_list) and unified DB (unified_list)\ntop_apps_db_setup(aa_top_apps_data)\n\nprint(db_list)\n\nprint('--------------')\n\nremove_dupes(unified_list)\n\nunified_db_list = dupes_output_list\nprint(unified_db_list)","sub_path":"GitCommit/Index.py","file_name":"Index.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"321817910","text":"#coding=UTF-8\r\nimport sys, re\r\n\r\ndef main(date, title, author, thumbnail):\r\n\r\n ''' \r\n Instruktioner:\r\n python blogtojson.py datum \"titel\" författare\r\n -> då bör texten finnas i filen datum.txt\r\n och då blir titeln på inlägget \"titel\"\r\n och inlägget skrivet av författare\r\n '''\r\n\r\n# For searching the textfile and seeing if its an image or vid, instead of a paragraph\r\n im_pattern = r'.jpg'\r\n im_pattern2 = r'.JPG'\r\n im_pattern3 = r'.PNG'\r\n im_pattern4 = r'.jpeg'\r\n video_pattern = r'.mp4'\r\n\r\n try:\r\n textfil = open(date +'.txt', 'r')\r\n except:\r\n print('Textfile to be read from was not found! Did you write your blogpost in .txt format and name it correctly in cmd?')\r\n quit()\r\n\r\n jsonfil = open(r'json/' + date +'.json', 'w')\r\n\r\n jsonfil.write(r'''{\r\n \"info\": {\r\n\r\n \"tripID\": \"kroatien\",\r\n ''')\r\n jsonfil.write(r'\"date\": \"' + date + r'\",' + '\\n')\r\n jsonfil.write(r'\"title\": \"' + title + r'\",' + '\\n')\r\n jsonfil.write(r'\"author\": \"' + author + r'\",' + '\\n')\r\n jsonfil.write(r'\"thumbnail\": \"' + thumbnail + r'\"' + '\\n')\r\n jsonfil.write(r'''\r\n },\r\n \"content\": [\r\n ''')\r\n\r\n while True:\r\n line = textfil.readline() # this reads through the lines of the file one at a time\r\n line = line.replace('\\r', '')\r\n if line == '':\r\n break\r\n elif line == '\\n' or line.startswith('\\r'):\r\n continue\r\n elif re.search(im_pattern, line) or re.search(im_pattern2, line) or re.search(im_pattern3, line) or re.search(im_pattern4, line):\r\n print(\"Found image! \" + line)\r\n jsonfil.write(r'{' + '\\n')\r\n jsonfil.write(r'\"image\": \"' + line[:-1] + r'\"' + '\\n')\r\n jsonfil.write(r'},' + '\\n')\r\n elif re.search(video_pattern, line):\r\n print(\"Found video! \" + line)\r\n jsonfil.write(r'{' + '\\n')\r\n jsonfil.write(r'\"video\": \"' + line[:-1] + r'\"' + '\\n')\r\n jsonfil.write(r'},' + '\\n')\r\n else:\r\n print(\"Found paragraph! \" + line)\r\n jsonfil.write(r'{' + '\\n')\r\n jsonfil.write(r'\"paragraph\": \"' + line[:-1] + r'\"' + ' \\n')\r\n jsonfil.write(r'},' + '\\n')\r\n\r\n textfil.close()\r\n\r\n# remove last , and \\n for valid json file\r\n jsonfil.seek(-2,1)\r\n jsonfil.truncate()\r\n\r\n jsonfil.write(r'''\t\t\t\t\t\t\t\t\t\r\n ]\r\n }''')\r\n\r\n jsonfil.close()\r\n\r\n print('''\r\n ..........................\r\n Done!\r\n Printed to ''' + date + '.json\\n..........................')\r\n return;\r\n\r\nif __name__ == \"__main__\": \r\n if len(sys.argv) < 3:\r\n print('Syntax is: python blogtohtml.py date \"title\" author')\r\n quit()\r\n\r\n main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])\r\n","sub_path":"res/kroatien/blog/blogtojson.py","file_name":"blogtojson.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"123560082","text":"\"\"\"\nGUI chat client that implements the Observer design pattern.\n\"\"\"\n\n__author__ = 'Mike Woinoski (michaelw@articulatedesign.us.com)'\n\nimport tkinter as tk\nfrom chat_client import Observer\nfrom chat_room import ChatRoom\n\nroot = tk.Tk()\n\n# TODO: ChatClientGui is a tkinter GUI chat client. Although the GUI code\n# makes it more complex than the simple command client you completed before,\n# you'll see that the Subject/Observer interaction is exactly the same as in\n# the simpler chat client.\n# (no code change required)\n\n\n# TODO: make ChatClientGui a subclass of Observer\nclass ChatClientGui(Observer):\n last_y = 50\n\n # TODO: note the parameters to the ChatClientGui __init__() method\n # (no code changes required)\n def __init__(self, client_name, window, chat_room):\n self.create_widgets(client_name, window)\n\n # TODO: copy the 3 lines of code from the body of the\n # ChatClient __init__() method here\n super().__init__(chat_room)\n self.client_name = client_name\n self.chat_room = chat_room\n\n def create_widgets(self, name, window):\n if window != root:\n window = tk.Toplevel(root)\n window.geometry(f'+200+{ChatClientGui.last_y}')\n ChatClientGui.last_y += 250\n window.wm_title(name)\n self.frame = tk.Frame(window)\n self.frame.grid()\n self.messaging_field = \\\n tk.Text(self.frame, width=69, height=10, wrap=tk.WORD)\n self.messaging_field.grid(row=0, column=0, columnspan=2, sticky=tk.W)\n\n self.entry_field = tk.Entry(self.frame, width=92)\n self.entry_field.grid(row=1, column=0, sticky=tk.W)\n self.entry_field.bind('', self.callback)\n\n self.frame.pack()\n\n def add_text(self, data):\n self.messaging_field.insert(tk.END, data)\n\n # TODO: copy the new_message() method from ChatClient here\n def new_message(self, message):\n self.chat_room.add_message(self.client_name, message)\n\n def callback(self, event):\n # TODO: note how we get the message text from the GUI's entry field.\n # (no code change required)\n message = self.entry_field.get()\n\n self.entry_field.delete(0, tk.END)\n print('got message ' + message)\n self.add_text('(me) ' + message + '\\n')\n\n # TODO: call the ChatClientGui's new_message() method to send the text\n # from the entry field to the chat room\n self.new_message(message)\n\n # TODO: note that the update() method parameter list and the first 3 lines\n # of code are exactly the same as in the plain ChatClient class.\n # (no code change required)\n def update(self, chat_msg):\n id = chat_msg.id\n value = chat_msg.value\n print(f'\\tMessage from {id}: \"{value}\"')\n\n # TODO: note the call to add_text(), which adds the new chat message\n # to the client's output window\n # (no code change required)\n if self.client_name != chat_msg.id:\n self.add_text(f'({chat_msg.id}) {chat_msg.value}\\n')\n\n\ndef main():\n # TODO: create a ChatRoom instance and assign it to a variable named\n # `chat_host`\n chat_host = ChatRoom()\n\n # TODO: note the chat_host argument to the ChatClientGui constructor for\n # all three ChatClientGui instances\n # (no code change required)\n chat_client1 = ChatClientGui('Client 1', root, chat_host)\n chat_client2 = ChatClientGui('Client 2', chat_client1, chat_host)\n chat_client3 = ChatClientGui('Client 3', chat_client1, chat_host)\n\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"exercises/solution_ex06_design_patterns/chat_gui_client.py","file_name":"chat_gui_client.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"177643647","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 14 14:30:58 2021\n\n@author: dansh\n\"\"\"\n\nimport izhikevich_cells as izh\n\n\n\nclass CCell(izh.izhCell):\n def __init__(self, stimVal):\n super().__init__(stimVal)\n self.celltype = 'Chattering_Cell'\n self.C = 50\n self.vr = -60\n self.vt = -40\n self.k = 1.5\n self.a = 0.03\n self.b = 1\n self.c = -40\n self.d = 150\n self.vpeak = 25\n self.stimVal = stimVal\n \ndef createCell():\n myCell = CCell(stimVal=4000) \n myCell.simulate()\n izh.plotMyData(myCell)\n \nif __name__=='__main__':\n createCell()\n","sub_path":"chattering_cell.py","file_name":"chattering_cell.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"527709305","text":"#!/usr/bin/env python3\nimport boto3\nimport time\nimport subprocess\n\nname_bucket='manzi2019'\nec2 = boto3.resource('ec2')\ninstance = ec2.create_instances(\n ImageId='ami-047bb4163c506cd98',\n MinCount=1,\n MaxCount=1,\n InstanceType='t2.micro',\n KeyName='manziAssKey',\n SecurityGroupIds=['sg-0d7a4dab022b37f9d'],\n UserData='''\n\t\t#!/bin/bash\n\t\tyum update\n\t\tyum install httpd -y\n\t\tservice httpd start''',\n TagSpecifications=[\n\t{\n\t\t'ResourceType': 'instance',\n\t\t'Tags':[\n\t\t\t{\n\t\t\t\t'Key': 'Name',\n\t\t\t\t'Value': 'Manzi'\n\t\t\t},\n\t\t\t]\n\t},\n\t])\nprint (instance[0].id)\n\ninstance[0].wait_until_running()\ninstance[0].reload()\nprint(instance[0].public_ip_address)\nip_add=instance[0].public_ip_address\n\ntime.sleep(10)\n\ncmd1=\"ssh -o StrictHostKeyChecking=no -i manziAssKey.pem ec2-user@\" + ip_add + \" 'pwd'\"\nprint(cmd1)\ncmd2='curl http://devops.witdemo.net/image.jpg > image.jpg'\ncmd4=\"curl https://s3-eu-west-1.amazonaws.com/\"+name_bucket+\"/image.jpg > bucket_image.jpg\"\ncmd5=\"ssh -o StrictHostKeyChecking=no -i manziAssKey.pem ec2-user@\" + ip_add + \" 'curl http://169.254.169.254/latest/meta-data/local-ipv4' > meta.json\"\n\ncmd_html1=\"ssh -o StrictHostKeyChecking=no -i manziAssKey.pem ec2-user@\" + ip_add + \" 'echo \\\"\\\" > index.html'\"\ncmd_html2=\"ssh -o StrictHostKeyChecking=no -i manziAssKey.pem ec2-user@\" + ip_add + \" 'echo \\'Private IP Address:\\' >>index.html'\"\ncmd_html3=\"ssh -o StrictHostKeyChecking=no -i manziAssKey.pem ec2-user@\" + ip_add + \" 'curl http://169.254.169.254/latest/meta-data/local-ipv4 >> index.html'\"\ncmd_html4=\"ssh -o StrictHostKeyChecking=no -i manziAssKey.pem ec2-user@\" + ip_add + \" 'echo \\\"
Here is the image:
\\\" >> index.html'\"\ncmd_html5=\"ssh -o StrictHostKeyChecking=no -i manziAssKey.pem ec2-user@\" + ip_add + \" 'echo \\\"\\\" >> index.html'\"\ncmd_html6=\"ssh -o StrictHostKeyChecking=no -i manziAssKey.pem ec2-user@\" + ip_add + \" 'sudo cp index.html /var/www/html'\"\n\n\nsubprocess.call(cmd2,shell=True)\nsubprocess.call(cmd4,shell=True)\nsubprocess.call(cmd5,shell=True)\nsubprocess.call(cmd_html1,shell=True)\nsubprocess.call(cmd_html2,shell=True)\nsubprocess.call(cmd_html3,shell=True)\nsubprocess.call(cmd_html4,shell=True)\nsubprocess.call(cmd_html5,shell=True)\n\ntime.sleep(10)\n\nsubprocess.call(cmd_html6,shell=True)\ninstance[0].reload()\n","sub_path":"create_instance.py","file_name":"create_instance.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"329299551","text":"''' print ('its a beautiful day in Keller')\n\nprint (\"It's a sunny day, \\n in Keller\") # \\n prints in a new line \nprint (\"\"\" This is another way to print \n over multiple lines\"\"\")\nprint ('Here is a double quote \" ' + \"Here is a single quote ' \" ) # print the double quote + single quote\n\nprint('Or we can put it like this \\\" to get the double quote to print')\n\nprint (\"It's a sunny day, \\\\news in Keller\") # \\\\ before the n to print the n letter \n\n# Collect the user's name\nname = input(\"What is your name? \")\n\n# Display the name\nprint (name)\n\n#Update\nname ='Removed first input name, adding something different'\nprint(name) '''\n\nfirstName = input(\"What is your First name?\")\nfirstName = firstName.swapcase() # swapcase, Lower, upper\nlastName = input(\"What is your Last name?\")\nprint (\"\\nHello \" + firstName +\" \"+ lastName)\n\n\n\n\n\n\n\n","sub_path":"From Zero to Hero Video 11 Hours/From_Zero_to_Hero_Video_11_Hours.py","file_name":"From_Zero_to_Hero_Video_11_Hours.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"386606955","text":"from concurrent.futures import Future\nfrom dataclasses import dataclass\nfrom typing import Optional, List, Dict, Any, Union, Iterable\nimport agate\n\nimport dbt\nimport dbt.exceptions\n\nfrom dbt.adapters.base import AdapterConfig\nfrom dbt.adapters.base.impl import catch_as_completed\nfrom dbt.adapters.sql import SQLAdapter\nfrom dbt.adapters.spark import SparkConnectionManager\nfrom dbt.adapters.spark import SparkRelation\nfrom dbt.adapters.spark import SparkColumn\nfrom dbt.adapters.base import BaseRelation\nfrom dbt.clients.agate_helper import DEFAULT_TYPE_TESTER\nfrom dbt.logger import GLOBAL_LOGGER as logger\nfrom dbt.utils import executor\n\nGET_COLUMNS_IN_RELATION_MACRO_NAME = 'get_columns_in_relation'\nLIST_SCHEMAS_MACRO_NAME = 'list_schemas'\nLIST_RELATIONS_MACRO_NAME = 'list_relations_without_caching'\nDROP_RELATION_MACRO_NAME = 'drop_relation'\nFETCH_TBL_PROPERTIES_MACRO_NAME = 'fetch_tbl_properties'\n\nKEY_TABLE_OWNER = 'Owner'\nKEY_TABLE_STATISTICS = 'Statistics'\n\n\n@dataclass\nclass SparkConfig(AdapterConfig):\n file_format: str = 'parquet'\n location_root: Optional[str] = None\n partition_by: Optional[Union[List[str], str]] = None\n clustered_by: Optional[Union[List[str], str]] = None\n buckets: Optional[int] = None\n\n\nclass SparkAdapter(SQLAdapter):\n COLUMN_NAMES = (\n 'table_database',\n 'table_schema',\n 'table_name',\n 'table_type',\n 'table_comment',\n 'table_owner',\n 'column_name',\n 'column_index',\n 'column_type',\n 'column_comment',\n\n 'stats:bytes:label',\n 'stats:bytes:value',\n 'stats:bytes:description',\n 'stats:bytes:include',\n\n 'stats:rows:label',\n 'stats:rows:value',\n 'stats:rows:description',\n 'stats:rows:include',\n )\n\n Relation = SparkRelation\n Column = SparkColumn\n ConnectionManager = SparkConnectionManager\n AdapterSpecificConfigs = SparkConfig\n\n @classmethod\n def date_function(cls) -> str:\n return 'current_timestamp()'\n\n @classmethod\n def convert_text_type(cls, agate_table, col_idx):\n return \"string\"\n\n @classmethod\n def convert_number_type(cls, agate_table, col_idx):\n decimals = agate_table.aggregate(agate.MaxPrecision(col_idx))\n return \"double\" if decimals else \"bigint\"\n\n @classmethod\n def convert_date_type(cls, agate_table, col_idx):\n return \"date\"\n\n @classmethod\n def convert_time_type(cls, agate_table, col_idx):\n return \"time\"\n\n @classmethod\n def convert_datetime_type(cls, agate_table, col_idx):\n return \"timestamp\"\n\n def quote(self, identifier):\n return '`{}`'.format(identifier)\n\n def add_schema_to_cache(self, schema) -> str:\n \"\"\"Cache a new schema in dbt. It will show up in `list relations`.\"\"\"\n if schema is None:\n name = self.nice_connection_name()\n dbt.exceptions.raise_compiler_error(\n 'Attempted to cache a null schema for {}'.format(name)\n )\n if dbt.flags.USE_CACHE:\n self.cache.add_schema(None, schema)\n # so jinja doesn't render things\n return ''\n\n def list_relations_without_caching(\n self, schema_relation: SparkRelation\n ) -> List[SparkRelation]:\n kwargs = {'schema_relation': schema_relation}\n try:\n results = self.execute_macro(\n LIST_RELATIONS_MACRO_NAME,\n kwargs=kwargs,\n release=True\n )\n except dbt.exceptions.RuntimeException as e:\n errmsg = getattr(e, 'msg', '')\n if f\"Database '{schema_relation}' not found\" in errmsg:\n return []\n else:\n description = \"Error while retrieving information about\"\n logger.debug(f\"{description} {schema_relation}: {e.msg}\")\n return []\n\n relations = []\n for row in results:\n if len(row) != 4:\n raise dbt.exceptions.RuntimeException(\n f'Invalid value from \"show table extended ...\", '\n f'got {len(row)} values, expected 4'\n )\n _schema, name, _, information = row\n rel_type = ('view' if 'Type: VIEW' in information else 'table')\n relation = self.Relation.create(\n schema=_schema,\n identifier=name,\n type=rel_type\n )\n relations.append(relation)\n\n return relations\n\n def get_relation(\n self, database: str, schema: str, identifier: str\n ) -> Optional[BaseRelation]:\n if not self.Relation.include_policy.database:\n database = None\n\n return super().get_relation(database, schema, identifier)\n\n def parse_describe_extended(\n self,\n relation: Relation,\n raw_rows: List[agate.Row]\n ) -> List[SparkColumn]:\n # Convert the Row to a dict\n dict_rows = [dict(zip(row._keys, row._values)) for row in raw_rows]\n # Find the separator between the rows and the metadata provided\n # by the DESCRIBE TABLE EXTENDED statement\n pos = self.find_table_information_separator(dict_rows)\n\n # Remove rows that start with a hash, they are comments\n rows = [\n row for row in raw_rows[0:pos]\n if not row['col_name'].startswith('#')\n ]\n metadata = {\n col['col_name']: col['data_type'] for col in raw_rows[pos + 1:]\n }\n\n raw_table_stats = metadata.get(KEY_TABLE_STATISTICS)\n table_stats = SparkColumn.convert_table_stats(raw_table_stats)\n return [SparkColumn(\n table_database=None,\n table_schema=relation.schema,\n table_name=relation.name,\n table_type=relation.type,\n table_owner=metadata.get(KEY_TABLE_OWNER),\n table_stats=table_stats,\n column=column['col_name'],\n column_index=idx,\n dtype=column['data_type'],\n ) for idx, column in enumerate(rows)]\n\n @staticmethod\n def find_table_information_separator(rows: List[dict]) -> int:\n pos = 0\n for row in rows:\n if not row['col_name'] or row['col_name'].startswith('#'):\n break\n pos += 1\n return pos\n\n def get_columns_in_relation(self, relation: Relation) -> List[SparkColumn]:\n rows: List[agate.Row] = super().get_columns_in_relation(relation)\n return self.parse_describe_extended(relation, rows)\n\n def _get_columns_for_catalog(\n self, relation: SparkRelation\n ) -> Iterable[Dict[str, Any]]:\n properties = self.get_properties(relation)\n columns = self.get_columns_in_relation(relation)\n owner = properties.get(KEY_TABLE_OWNER)\n\n for column in columns:\n if owner:\n column.table_owner = owner\n # convert SparkColumns into catalog dicts\n as_dict = column.to_dict()\n as_dict['column_name'] = as_dict.pop('column', None)\n as_dict['column_type'] = as_dict.pop('dtype')\n as_dict['table_database'] = None\n yield as_dict\n\n def get_properties(self, relation: Relation) -> Dict[str, str]:\n properties = self.execute_macro(\n FETCH_TBL_PROPERTIES_MACRO_NAME,\n kwargs={'relation': relation}\n )\n return dict(properties)\n\n def get_catalog(self, manifest):\n schema_map = self._get_catalog_schemas(manifest)\n if len(schema_map) != 1:\n dbt.exceptions.raise_compiler_error(\n f'Expected only one database in get_catalog, found '\n f'{list(schema_map)}'\n )\n\n with executor(self.config) as tpe:\n futures: List[Future[agate.Table]] = []\n for info, schemas in schema_map.items():\n for schema in schemas:\n futures.append(tpe.submit(\n self._get_one_catalog, info, [schema], manifest\n ))\n catalogs, exceptions = catch_as_completed(futures)\n return catalogs, exceptions\n\n def _get_one_catalog(\n self, information_schema, schemas, manifest,\n ) -> agate.Table:\n name = f'{information_schema.database}.information_schema'\n\n if len(schemas) != 1:\n dbt.exceptions.raise_compiler_error(\n f'Expected only one schema in spark _get_one_catalog, found '\n f'{schemas}'\n )\n\n database = information_schema.database\n schema = list(schemas)[0]\n\n with self.connection_named(name):\n columns: List[Dict[str, Any]] = []\n for relation in self.list_relations(database, schema):\n logger.debug(\"Getting table schema for relation {}\", relation)\n columns.extend(self._get_columns_for_catalog(relation))\n return agate.Table.from_object(\n columns, column_types=DEFAULT_TYPE_TESTER\n )\n\n def check_schema_exists(self, database, schema):\n results = self.execute_macro(\n LIST_SCHEMAS_MACRO_NAME,\n kwargs={'database': database}\n )\n\n exists = True if schema in [row[0] for row in results] else False\n return exists\n","sub_path":"dbt/adapters/spark/impl.py","file_name":"impl.py","file_ext":"py","file_size_in_byte":9301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"557361181","text":"import os\nimport re\n\nfrom vcd.gtkw import GTKWSave\n\nfrom pygears import find, registry\nfrom pygears.common.decoupler import decoupler_din\nfrom pygears.core.port import OutPort as GearOutPort\nfrom pygears.rtl.port import InPort, OutPort\nfrom pygears.sim import sim_log\nfrom pygears.sim.extens.graphviz import graph\nfrom pygears.sim.extens.vcd import module_sav\nfrom pygears.sim.modules.sim_socket import SimSocket\n\n\ndef _get_end_consumer_rec(intf, consumers):\n for port in intf.consumers:\n cons_intf = port.consumer\n\n if isinstance(port, InPort) and (not cons_intf.consumers):\n consumers.append(port)\n else:\n _get_end_consumer_rec(cons_intf, consumers)\n\n\ndef get_end_consumer(intf):\n consumers = []\n _get_end_consumer_rec(intf, consumers)\n return consumers\n\n\ndef _get_producer_rec(intf, producers):\n if isinstance(intf.producer, OutPort) or isinstance(\n intf.producer, GearOutPort):\n producers.append(intf.producer)\n else:\n _get_producer_rec(intf.producer, producers)\n\n\ndef get_producer(intf):\n producers = []\n _get_producer_rec(intf, producers)\n return producers\n\n\ndef find_target_prod(intf):\n # Who is producer gear?\n # prod_rtl_port = intf.producer\n end_p = get_producer(intf)\n if len(end_p) != 1:\n return None\n if isinstance(end_p[0], OutPort):\n prod_rtl_port = end_p[0]\n prod_rtl_node = prod_rtl_port.node\n prod_gear = prod_rtl_node.gear\n else:\n prod_gear = end_p[0].gear\n if len(prod_gear.child):\n if len(prod_gear.child) > 1:\n sim_log().warning(\n f'ActivityCosim: prod has more than one child. Setting on first.'\n )\n return prod_gear.child[0]\n else:\n return prod_gear\n\n\ndef find_target_cons(intf):\n # Who is consumer port? Case when not broadcast!\n # cons_rtl_port = intf.consumers[0]\n end_c = get_end_consumer(intf)\n if len(end_c) != 1:\n if len(end_c) > 1:\n sim_log().debug(f'Find target cons: found broadcast')\n return None\n cons_rtl_port = end_c[0]\n cons_rtl_node, port_id = cons_rtl_port.node, cons_rtl_port.index\n cons_gear = cons_rtl_node.gear\n cons_port = cons_gear.in_ports[port_id]\n\n return cons_port\n\n\ndef find_target_intf(gear_name, intf_name):\n gear_mod = find(gear_name)\n rtl_node = registry('rtl/map/node')[gear_mod].node\n\n intf_name = intf_name[1:] # spy name always starts with _\n for i in rtl_node.local_interfaces():\n if registry('svgen/map')[i].basename == intf_name:\n return i\n\n\ndef set_waiting_edge(g, port):\n g.edge_map[port].set_color('blue')\n g.edge_map[port].set_penwidth(6)\n\n\ndef set_blocking_edge(g, port):\n g.edge_map[port].set_color('red')\n g.edge_map[port].set_penwidth(6)\n\n\ndef set_blocking_node(g, module):\n g.node_map[module].set_fillcolor('red')\n g.node_map[module].set_style('filled')\n\n\nclass ActivityReporter:\n def __init__(self, top, draw_graph=True, cosim_check=False):\n sim = registry('sim/simulator')\n sim.events['before_run'].append(self.before_run)\n sim.events['after_run'].append(self.after_run)\n self.blockers = {}\n self.draw_graph = draw_graph\n self.cosim_check = cosim_check\n\n def intf_pull_start(self, intf):\n consumer = intf.producer\n producer = intf.in_queue.intf.consumers[0]\n self.blockers[consumer] = producer\n return True\n\n def intf_pull_done(self, intf):\n consumer = intf.producer\n del self.blockers[consumer]\n return True\n\n def before_run(self, sim):\n sim_map = registry('sim/map')\n\n for module, sim_gear in sim_map.items():\n for p in module.in_ports:\n p.consumer.events['pull_start'].append(self.intf_pull_start)\n p.consumer.events['pull_done'].append(self.intf_pull_done)\n\n def after_run(self, sim):\n\n if self.draw_graph:\n g = graph(\n outdir=registry('sim/artifact_dir'),\n node_filter=lambda g: not g.child)\n else:\n g = None\n\n blocking_gears = set()\n cosim_name = None\n for sim_gear in sim.sim_gears:\n if isinstance(sim_gear, SimSocket):\n cosim_name = sim_gear.gear.name\n break\n if cosim_name and self.cosim_check:\n self.cosim_activity(g, cosim_name)\n self.sim_gears_activity(g, sim, blocking_gears)\n\n if self.draw_graph:\n outdir = registry('sim/artifact_dir')\n g.graph.write_svg(os.path.join(outdir, 'proba.svg'))\n\n try:\n vcd_writer = registry('VCD')\n except KeyError:\n return\n\n with open(os.path.join(outdir, 'issue.gtkw'), 'w') as f:\n gtkw = GTKWSave(f)\n for module in blocking_gears:\n module_sav(gtkw, module, vcd_writer.vcd_vars)\n\n def sim_gears_activity(self, g, sim, blocking_gears):\n for sim_gear in sim.sim_gears:\n if isinstance(sim_gear, SimSocket):\n continue\n\n module = sim_gear.gear\n\n if self.draw_graph:\n g.node_map[module].set_style('filled')\n if sim_gear not in sim.done:\n g.node_map[module].set_fillcolor('yellow')\n\n if module.definition == decoupler_din:\n if not module.queue.empty():\n if self.draw_graph:\n set_blocking_node(g, module)\n blocking_gears.add(module)\n sim_log().error(f'Data left in decoupler: {module.name}')\n\n for p in module.in_ports:\n q = p.get_queue()\n # print(f'{module.name}.{p.basename} queue empty: {q.empty()}')\n if q._unfinished_tasks:\n src_port = q.intf.consumers[0]\n if self.draw_graph:\n set_blocking_edge(g, p)\n blocking_gears.add(module)\n sim_log().error(\n f'{src_port.gear.name}.{src_port.basename} -> {module.name}.{p.basename} was not acknowledged'\n )\n\n if p in self.blockers:\n if self.draw_graph:\n set_waiting_edge(g, p)\n src_port = self.blockers[p]\n sim_log().info(\n f'{p.gear.name}.{p.basename} waiting on {src_port.gear.name}.{src_port.basename}'\n )\n\n def cosim_activity(self, g, top_name):\n outdir = registry('sim/artifact_dir')\n activity_path = os.path.join(outdir, 'activity.log')\n\n if not os.path.isfile(activity_path):\n return\n\n with open(activity_path, 'r') as log:\n for line in log:\n activity_name = line.rpartition(': ')[0]\n activity_name = activity_name.replace('top.dut.',\n f'{top_name}/')\n activity_name = activity_name.rpartition('.')[0]\n activity_name = activity_name.replace('_i.', '/')\n gear_name, _, intf_name = activity_name.rpartition('/')\n\n # Const always has valid high\n const_regex = r'.*_const(?P\\d+)_s'\n const_regex_one = r'.*_const_s'\n if not (re.match(const_regex, intf_name)\n or re.match(const_regex_one, intf_name)):\n sim_log().error(\n f'Cosim spy not acknowledged: {activity_name}')\n\n if self.draw_graph:\n bc_regex = r'.*_bc_(?P\\d+).*'\n if re.match(bc_regex, intf_name):\n sim_log().debug(\n f'Activity monitor cosim: bc not supported {activity_name}'\n )\n continue\n\n intf = find_target_intf(gear_name, intf_name)\n if intf is None:\n sim_log().error(\n f'Cannot find matching interface for {activity_name}'\n )\n continue\n if intf.is_broadcast:\n sim_log().debug(\n f'Intf bc not supported {activity_name}')\n continue\n\n try:\n prod_gear = find_target_prod(intf)\n set_blocking_node(g, prod_gear)\n except (KeyError, AttributeError):\n sim_log().debug(\n f'Cannot find node for {activity_name}')\n\n try:\n cons_port = find_target_cons(intf)\n set_blocking_edge(g, cons_port)\n except (KeyError, AttributeError):\n sim_log().debug(\n f'Cannot find edge for {activity_name}')\n","sub_path":"pygears/sim/extens/activity_report.py","file_name":"activity_report.py","file_ext":"py","file_size_in_byte":9038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"99146608","text":"# This code hase been acquired from TRN-pytorch repository\n# 'https://github.com/metalbubble/TRN-pytorch/blob/master/process_dataset.py'\n# which is prepared by Bolei Zhou\n#\n# Processing the raw dataset of Jester\n#\n# generate the meta files:\n# category.txt: the list of categories.\n# train_videofolder.txt: each row contains [videoname num_frames classIDX]\n# val_videofolder.txt: same as above\n#\n# Created by Bolei Zhou, Dec.2 2017\n\nimport os\nimport pdb\n\nROOT_DATASET_JESTER = '/usr/home/sut/datasets/jester/'\nROOT_DATASET = '/usr/home/sut/datasets/jester/rgb'\n\nROOT_DATASET_STH = '/usr/home/sut/datasets/something-something-v2/'\nROOT_DATASET_STH_RGB = '/usr/home/sut/datasets/something-something-v2/extracted-frames'\n\ndataset_name = 'jester-v1'\nwith open('%s%s-labels.csv' % (ROOT_DATASET_JESTER, dataset_name)) as f:\n lines = f.readlines()\ncategories = []\nfor line in lines:\n line = line.rstrip()\n categories.append(line)\ncategories = sorted(categories)\nwith open(os.path.join(ROOT_DATASET_JESTER,'category.txt'), 'w') as f:\n f.write('\\n'.join(categories))\n\ndict_categories = {}\nfor i, category in enumerate(categories):\n dict_categories[category] = i\n\nfiles_input = ['%s%s-validation.csv' % (ROOT_DATASET_JESTER, dataset_name), '%s%s-train.csv' % (ROOT_DATASET_JESTER,dataset_name)]\nfiles_output = ['val_videofolder.txt', 'train_videofolder.txt']\nfor (filename_input, filename_output) in zip(files_input, files_output):\n with open(filename_input) as f:\n lines = f.readlines()\n folders = []\n idx_categories = []\n for line in lines:\n line = line.rstrip()\n items = line.split(';')\n folders.append(items[0])\n idx_categories.append(os.path.join(str(dict_categories[items[1]])))\n output = []\n for i in range(len(folders)):\n curFolder = folders[i]\n curIDX = idx_categories[i]\n # counting the number of frames in each video folders\n dir_files = os.listdir(os.path.join(ROOT_DATASET, curFolder))\n output.append('%s %d %d' % (curFolder, len(dir_files), int(curIDX)))\n print('%d/%d' % (i, len(folders)))\n with open(os.path.join(ROOT_DATASET_JESTER,filename_output), 'w') as f:\n f.write('\\n'.join(output))\n","sub_path":"process_dataset.py","file_name":"process_dataset.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"215470524","text":"from __future__ import unicode_literals\nfrom mongoengine import *\nfrom django_mongoengine.mongo_auth.models import User\nfrom django.utils import timezone\nfrom adminpanel.models import *\n\n\nclass Owner(Document):\n name = StringField()\n phone = StringField()\n email = EmailField()\n address = StringField()\n contract_number = IntField()\n photo = ImageField()\n created_at = DateTimeField()\n updated_at = DateTimeField()\n created_by = ReferenceField(User)\n\n\nclass BaseEstateRecord(Document):\n name = StringField()\n description = StringField()\n price = IntField()\n area = FloatField()\n geo_position = PointField()\n photo = ImageField()\n status = IntField()\n deal_type = IntField()\n owner = ReferenceField(Owner)\n record_type = IntField()\n info = GenericEmbeddedDocumentField()\n created_at = DateTimeField(default=timezone.now)\n updated_at = DateTimeField()\n\n\nclass ApartmentEstateRecord(EmbeddedDocument):\n storeys = IntField()\n living_area = FloatField()\n kitchen_area = FloatField()\n rooms = IntField()\n bathroom = IntField()\n year = IntField()\n renovation = IntField()\n balcony = IntField()\n building_type = IntField()\n internet = BooleanField()\n wired_phone = BooleanField()\n tv = BooleanField()\n\n\nclass HouseEstateRecord(EmbeddedDocument):\n storeys = IntField()\n living_area = FloatField()\n kitchen_area = FloatField()\n homestead_area = FloatField()\n year = IntField()\n building_type = IntField()\n readiness = IntField()\n water = BooleanField()\n electricity = BooleanField()\n gas = BooleanField()\n internet = BooleanField()\n wired_phone = BooleanField()\n tv = BooleanField()\n","sub_path":"userapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"67099894","text":"import cv2\r\n\r\nfrom imutils.video import FPS\r\n\r\n\r\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n\r\nOPENCV_OBJECT_TRACKERS = {\r\n\t\"csrt\": cv2.TrackerCSRT_create(),\r\n\t\"kcf\": cv2.TrackerKCF_create(),\r\n\t\"boosting\": cv2.TrackerBoosting_create(),\r\n\t\"mil\": cv2.TrackerMIL_create(),\r\n\t\"tld\": cv2.TrackerTLD_create(),\r\n\t\"medianflow\": cv2.TrackerMedianFlow_create(),\r\n\t\"mosse\": cv2.TrackerMOSSE_create()\r\n}\r\n\t\r\nmultiTracker = None\r\ncap = cv2.VideoCapture(0)\r\nfps = None\r\nframeCount = 0\r\n\r\nwhile(cap.isOpened()):\r\n\r\n\tret, frame = cap.read()\r\n\tframeCount = frameCount + 1\r\n\t\r\n\t\r\n\t(H, W) = frame.shape[:2]\r\n\t\r\n\r\n\r\n\tif ret == True:\r\n\t\t\r\n\t\tframe = cv2.flip(frame,1)\r\n\t\t\r\n\t\tif multiTracker is None or frameCount % 100 == 0:\r\n\t\t\tfaces = face_cascade.detectMultiScale(frame, 1.1, 5, minSize=(10, 10))\r\n\t\t\t\r\n\t\t\tif len(faces) > 0:\r\n\t\t\t\tmultiTracker = cv2.MultiTracker_create()\r\n\t\t\t\tfor (x, y, w, h) in faces:\r\n\t\t\t\t\tcv2.putText(frame,'Detected',(x,y), cv2.FONT_HERSHEY_SIMPLEX, 2,(255,0,0),2,cv2.LINE_AA)\r\n\t\t\t\t\tcv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)\r\n\t\t\t\t\tmultiTracker.add(cv2.TrackerKCF_create(), frame, (x, y, w, h))\r\n\t\t\t\t\tfps = FPS().start()\r\n\t\r\n\t\telse:\r\n\t\t\tsuccess, boxes = multiTracker.update(frame)\r\n\t\t\t\r\n\t\t\tif len(boxes) == 0:\r\n\t\t\t\tframeCount = 99\r\n\t\t\t\t\r\n\t\t\tif success:\t\r\n\t\t\t\tfor box in boxes:\t\t\r\n\t\t\t\t\t(x, y, w, h) = [int(v) for v in box]\r\n\t\t\t\t\tcv2.putText(frame,'Tracked',(x,y), cv2.FONT_HERSHEY_SIMPLEX, 2,(255,0,0),2,cv2.LINE_AA)\r\n\t\t\t\t\tcv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)\r\n\t\t\tfps.update()\r\n\t\t\tfps.stop()\r\n\t\t\tinfo = [\r\n\t\t\t(\"Tracker\", \"KCF\"),\r\n\t\t\t(\"Success\", \"Yes\" if success else \"No\"),\r\n\t\t\t(\"FPS\", \"{:.2f}\".format(fps.fps())),\r\n\t\t\t]\r\n\t\t\t\r\n\t\t\tfor (i, (k, v)) in enumerate(info):\r\n\t\t\t\ttext = \"{}: {}\".format(k, v)\r\n\t\t\t\tcv2.putText(frame, text, (10, H - ((i * 20) + 20)),\r\n\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)\r\n\t\t\t\r\n\t\tcv2.imshow(\"Frame\",frame)\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\t\r\n\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\r\n\t\t\tbreak\t\t\t\r\n\t\t\r\n\t\t\r\n\telse:\r\n\t\tbreak\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n\t\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"detectAndTrack.py","file_name":"detectAndTrack.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"431593901","text":"from pangolin.core.test import TestCase\nfrom pangolin.users.factories import UserFactory\n\n\nclass TestExample(TestCase):\n\n def test_example(self):\n UserFactory()\n resp = self.client.get('/')\n self.assertEqual(resp.status_code, 200)\n","sub_path":"tests/test_example.py","file_name":"test_example.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"290618427","text":"class Solution(object):\n def longestUnivaluePath(self, root):\n res = [0]\n dfs(root, res)\n # print(res)\n return max(res)\n\n\ndef dfs(node, res):\n if not node:\n return None\n else:\n l = dfs(node.left, res)\n r = dfs(node.right, res)\n\n '''\n temp vaiable to modify the return variable\n\n '''\n ll1 = 0\n ll2 = 0\n ll3 = 0\n # print(\"node.val,l,r = \",node.val,l,r)\n if l == None and r == None:\n ll1 = 0\n ll2 = 0\n if node.left and node.left.val == node.val:\n ll1 = l + 1\n if node.right and node.right.val == node.val:\n ll2 = r + 1\n # print(\"HERE\",ll2,r)\n\n if node.right and node.left and (node.right.val == node.left.val == node.val):\n ll3 = ll1 + ll2\n res.append(ll3)\n\n if node.right and node.left and node.right.val != node.val and node.left.val != node.val:\n # print(\"HERE2\",ll2,r)\n ll1 = 0\n ll2 = 0\n ll3 = 0\n\n res.append(max(ll1, ll2))\n # print(\"node.val,ll1,ll2,ll3 = \",node.val,ll1,ll2,ll3)\n\n return max(ll1, ll2)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Archive/P/leetcode_random/univalue-path.py","file_name":"univalue-path.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"174028031","text":"# standard libraries\nimport collections\nimport copy\nimport functools\nimport gettext\nimport typing\n\n# third party libraries\nimport numpy\n\n# local libraries\nfrom nion.data import xdata_1_0 as xd\nfrom nion.swift import DocumentController\nfrom nion.swift.model import Connection\nfrom nion.swift.model import DataItem\nfrom nion.swift.model import DisplayItem\nfrom nion.swift.model import DocumentModel\nfrom nion.swift.model import Graphics\nfrom nion.swift.model import Symbolic\nfrom nion.eels_analysis import eels_analysis\nfrom nion.eels_analysis import PeriodicTable\n\n_ = gettext.gettext\n\n\nclass EELSBackgroundSubtraction:\n def __init__(self, computation, **kwargs):\n self.computation = computation\n\n def execute(self, eels_xdata, region, fit_interval, signal_interval):\n eels_spectrum_xdata = xd.sum_region(eels_xdata, region.mask_xdata_with_shape(eels_xdata.data_shape[0:2]))\n signal = eels_analysis.make_signal_like(eels_analysis.extract_original_signal(eels_spectrum_xdata, [fit_interval], signal_interval), eels_spectrum_xdata)\n background_xdata = eels_analysis.make_signal_like(eels_analysis.calculate_background_signal(eels_spectrum_xdata, [fit_interval], signal_interval), eels_spectrum_xdata)\n subtracted_xdata = signal - background_xdata\n self.__xdata = xd.vstack((eels_spectrum_xdata, background_xdata, subtracted_xdata))\n\n def commit(self):\n self.computation.set_referenced_xdata(\"data\", self.__xdata)\n\n\nasync def pick_new_edge(document_controller, model_data_item, edge) -> None:\n \"\"\"Set up a new edge pick from the model data item and the given edge.\n\n The library will have the following new components and connections:\n - a pick region on the model data item\n - a pick data item with a fit/signal connected to the edge data structure\n - a background subtraction computation with model data item, and edge intervals as inputs\n - a background data item, computed by the background subtraction computation\n - a subtracted data item, computed by the background subtraction computation\n - a eels line plot with pick, background, and subtracted data items as components\n - an edge reference, owned by eels line plot, with reference to edge\n - the edge reference is used to recognize the eels line plot as associated with the referenced edge\n \"\"\"\n document_model = document_controller.document_model\n project = document_model.get_project_for_item(model_data_item)\n model_display_item = document_model.get_display_item_for_data_item(model_data_item)\n pick_region = Graphics.RectangleGraphic()\n pick_region.size = min(1 / 16, 16 / model_data_item.dimensional_shape[0]), min(1 / 16, 16 / model_data_item.dimensional_shape[1])\n pick_region.label = \"{} {}\".format(_(\"Pick\"), str(edge.electron_shell))\n model_display_item.add_graphic(pick_region)\n\n # set up the computation for this edge.\n eels_data_item = DataItem.DataItem()\n document_model.append_data_item(eels_data_item, project=project)\n eels_data_item.title = \"{} EELS Data of {}\".format(pick_region.label, model_data_item.title)\n eels_data_item.source = pick_region\n eels_display_item = document_model.get_display_item_for_data_item(eels_data_item)\n eels_display_item.display_type = \"line_plot\"\n eels_display_item.display_layers = [\n {\"label\": \"Signal\", \"data_index\": 0, \"data_row\": 2, \"fill_color\": \"#0F0\"},\n {\"label\": \"Background\", \"data_index\": 0, \"data_row\": 1, \"fill_color\": \"rgba(255, 0, 0, 0.3)\"},\n {\"label\": \"Data\", \"data_index\": 0, \"data_row\": 0, \"fill_color\": \"#1E90FF\"},\n ]\n eels_display_item.set_display_property(\"legend_position\", \"top-right\")\n fit_region = Graphics.IntervalGraphic()\n fit_region.label = _(\"Fit\")\n fit_region.graphic_id = \"fit\"\n fit_region.interval = edge.fit_interval\n eels_display_item.add_graphic(fit_region)\n signal_region = Graphics.IntervalGraphic()\n signal_region.label = _(\"Signal\")\n signal_region.graphic_id = \"signal\"\n signal_region.interval = edge.signal_interval\n eels_display_item.add_graphic(signal_region)\n document_model.append_connection(Connection.PropertyConnection(edge.data_structure, \"fit_interval\", fit_region, \"interval\", parent=eels_data_item), project=project)\n document_model.append_connection(Connection.PropertyConnection(edge.data_structure, \"signal_interval\", signal_region, \"interval\", parent=eels_data_item), project=project)\n\n computation = document_model.create_computation()\n computation.create_input_item(\"eels_xdata\", Symbolic.make_item(model_data_item, type=\"xdata\"))\n computation.create_input_item(\"region\", Symbolic.make_item(pick_region))\n computation.create_input_item(\"fit_interval\", Symbolic.make_item(edge.data_structure), property_name=\"fit_interval\")\n computation.create_input_item(\"signal_interval\", Symbolic.make_item(edge.data_structure), property_name=\"signal_interval\")\n computation.processing_id = \"eels.background_subtraction11\"\n computation.create_output_item(\"data\", Symbolic.make_item(eels_data_item))\n document_model.append_computation(computation, project=project)\n\n # the eels item will need the initial computation results to display properly (view to intervals)\n await document_model.compute_immediate(document_controller.event_loop, computation)\n\n # ensure computation is deleted when eels is deleted\n computation.source = eels_data_item\n\n # create an elemental_mapping_edge_ref data structure, owned by the eels data item, with a referenced\n # object pointing to the edge. used for recognizing the eels data item as such.\n data_structure = document_model.create_data_structure(structure_type=\"elemental_mapping_edge_ref\", source=eels_data_item)\n data_structure.set_referenced_object(\"spectrum_image\", model_data_item)\n data_structure.set_referenced_object(\"edge\", edge.data_structure)\n data_structure.set_referenced_object(\"data\", eels_data_item)\n data_structure.set_referenced_object(\"pick_region\", pick_region)\n document_model.append_data_structure(data_structure, project=project)\n\n # display it\n eels_display_item.view_to_intervals(eels_data_item.xdata, [edge.data_structure.fit_interval, edge.data_structure.signal_interval])\n document_controller.show_display_item(eels_display_item)\n\n\nasync def change_edge(document_controller: DocumentController.DocumentController, model_data_item: DataItem.DataItem, eels_data_item: DataItem.DataItem, edge: \"ElementalMappingEdge\") -> None:\n \"\"\"Change the eels data item and associated items to display new edge.\n\n The library will be changed in the following way:\n - the pick region will be renamed\n - the pick data item will connect fit/signal regions to new edge data structure\n - the background subtraction computation will use edge intervals from new edge\n - the pick, background, subtracted, and eels line plot data items will be renamed\n - the eels line plot will connect fit/signal regions to new edge data structure\n - the edge reference will reference the new edge\n \"\"\"\n document_model = document_controller.document_model\n project = document_model.get_project_for_item(model_data_item)\n\n computation = None # type: Symbolic.Computation\n for computation_ in document_model.computations:\n if computation_.source == eels_data_item and computation_.processing_id == \"eels.background_subtraction11\":\n computation = computation_\n break\n\n edge_ref_data_structure = None\n old_edge_data_structure = None\n for data_structure in document_model.data_structures:\n if data_structure.source == eels_data_item and data_structure.structure_type == \"elemental_mapping_edge_ref\":\n edge_ref_data_structure = data_structure\n old_edge_data_structure = data_structure.get_referenced_object(\"edge\")\n break\n\n if not computation or not edge_ref_data_structure or not old_edge_data_structure:\n return\n\n pick_region = edge_ref_data_structure.get_referenced_object(\"pick_region\")\n\n if not eels_data_item or not pick_region:\n return\n\n pick_region.label = \"{} {}\".format(_(\"Pick\"), str(edge.electron_shell))\n\n for connection in copy.copy(document_model.connections):\n if connection.parent == eels_data_item and connection.source_property in (\"fit_interval\", \"signal_interval\"):\n source_property = connection.source_property\n target_property = connection.target_property\n target = connection._target\n document_model.remove_connection(connection)\n new_connection = Connection.PropertyConnection(edge.data_structure, source_property, target, target_property, parent=eels_data_item)\n document_model.append_connection(new_connection, project=project)\n\n computation.set_input_item(\"fit_interval\", Symbolic.make_item(edge.data_structure))\n computation.set_input_item(\"signal_interval\", Symbolic.make_item(edge.data_structure))\n\n eels_data_item.title = \"{} EELS Data of {}\".format(pick_region.label, model_data_item.title)\n\n for connection in copy.copy(document_model.connections):\n if connection.parent == eels_data_item and connection.source_property in (\"fit_interval\", \"signal_interval\"):\n source_property = connection.source_property\n target_property = connection.target_property\n target = connection._target\n document_model.remove_connection(connection)\n new_connection = Connection.PropertyConnection(edge.data_structure, source_property, target, target_property, parent=eels_data_item)\n document_model.append_connection(new_connection, project=project)\n\n edge_ref_data_structure.remove_referenced_object(\"edge\")\n edge_ref_data_structure.set_referenced_object(\"edge\", edge.data_structure)\n\n # the eels item will need the initial computation results to display properly (view to intervals)\n await document_model.compute_immediate(document_controller.event_loop, computation)\n eels_display_item = document_model.get_display_item_for_data_item(eels_data_item)\n eels_display_item.view_to_intervals(eels_data_item.xdata, [edge.fit_interval, edge.signal_interval])\n\n\nclass EELSMapping:\n def __init__(self, computation, **kwargs):\n self.computation = computation\n\n def execute(self, spectrum_image_xdata, fit_interval, signal_interval):\n self.__mapped_xdata = eels_analysis.map_background_subtracted_signal(spectrum_image_xdata, None, [fit_interval], signal_interval)\n\n def commit(self):\n self.computation.set_referenced_xdata(\"map\", self.__mapped_xdata)\n\n\nasync def map_new_edge(document_controller, model_data_item, edge) -> None:\n document_model = document_controller.document_model\n project = document_model.get_project_for_item(model_data_item)\n\n map_data_item = DataItem.new_data_item()\n map_data_item.title = \"{} of {}\".format(_(\"Map\"), str(edge.electron_shell))\n map_data_item.category = model_data_item.category\n map_data_item.source = model_data_item\n document_model.append_data_item(map_data_item, project=project)\n\n computation = document_model.create_computation()\n computation.source = map_data_item\n computation.create_input_item(\"spectrum_image_xdata\", Symbolic.make_item(model_data_item, type=\"xdata\"))\n computation.create_input_item(\"fit_interval\", Symbolic.make_item(edge.data_structure), property_name=\"fit_interval\")\n computation.create_input_item(\"signal_interval\", Symbolic.make_item(edge.data_structure), property_name=\"signal_interval\")\n computation.processing_id = \"eels.mapping\"\n computation.create_output_item(\"map\", Symbolic.make_item(map_data_item))\n document_model.append_computation(computation, project=project)\n\n await document_model.compute_immediate(document_controller.event_loop, computation)\n\n map_display_item = document_model.get_display_item_for_data_item(map_data_item)\n document_controller.show_display_item(map_display_item)\n\n\nclass ElementalMappingEdge:\n def __init__(self, *, data_structure=None, electron_shell: PeriodicTable.ElectronShell=None, fit_interval=None, signal_interval=None):\n self.__data_structure = data_structure\n self.__fit_interval = fit_interval\n self.__signal_interval = signal_interval\n self.__electron_shell = electron_shell\n if self.__data_structure:\n self.read(self.__data_structure)\n\n @property\n def data_structure(self):\n return self.__data_structure\n\n def read(self, data_structure) -> None:\n atomic_number = data_structure.get_property_value(\"atomic_number\")\n shell_number = data_structure.get_property_value(\"shell_number\")\n subshell_index = data_structure.get_property_value(\"subshell_index\")\n self.__electron_shell = PeriodicTable.ElectronShell(atomic_number, shell_number, subshell_index)\n self.__fit_interval = data_structure.get_property_value(\"fit_interval\", (0.4, 0.5))\n self.__signal_interval = data_structure.get_property_value(\"signal_interval\", (0.5, 0.6))\n\n def write(self, data_structure) -> None:\n self.__write_electron_shell(data_structure)\n self.__write_fit_interval(data_structure)\n self.__write_signal_interval(data_structure)\n\n def matches(self, data_structure) -> bool:\n return self.__data_structure is not None and self.__data_structure.uuid == data_structure.uuid\n\n def __write_electron_shell(self, data_structure):\n if self.__electron_shell:\n data_structure.set_property_value(\"atomic_number\", self.__electron_shell.atomic_number)\n data_structure.set_property_value(\"shell_number\", self.__electron_shell.shell_number)\n data_structure.set_property_value(\"subshell_index\", self.__electron_shell.subshell_index)\n else:\n data_structure.remove_property_value(\"atomic_number\")\n data_structure.remove_property_value(\"shell_number\")\n data_structure.remove_property_value(\"subshell_index\")\n\n def __write_signal_interval(self, data_structure):\n if self.__signal_interval is not None:\n data_structure.set_property_value(\"signal_interval\", copy.copy(self.__signal_interval))\n else:\n data_structure.remove_property_value(\"signal_interval\")\n\n def __write_fit_interval(self, data_structure):\n if self.__fit_interval is not None:\n data_structure.set_property_value(\"fit_interval\", copy.copy(self.__fit_interval))\n else:\n data_structure.remove_property_value(\"fit_interval\")\n\n @property\n def electron_shell(self):\n return self.__electron_shell\n\n @electron_shell.setter\n def electron_shell(self, value):\n if self.__electron_shell != value:\n self.__electron_shell = value\n self.__write_electron_shell(self.__data_structure)\n\n @property\n def fit_interval(self):\n return self.__fit_interval\n\n @fit_interval.setter\n def fit_interval(self, value):\n if self.__fit_interval != value:\n self.__fit_interval = value\n self.__write_fit_interval(self.__data_structure)\n\n @property\n def signal_interval(self):\n return self.__signal_interval\n\n @signal_interval.setter\n def signal_interval(self, value):\n if self.__signal_interval != value:\n self.__signal_interval = value\n self.__write_signal_interval(self.__data_structure)\n\n\nclass ElementalMappingController:\n def __init__(self, document_model: DocumentModel.DocumentModel):\n self.__document_model = document_model\n\n self.__current_data_item = None\n self.__model_data_item = None\n self.__edge_data_structure = None\n\n self.__explorer_interval = None\n\n self.__explorer_property_changed_listeners = dict() # typing.Dict[uuid.UUID, Any]\n\n self.__energy_intervals = dict() # typing.Dict[uuid.UUID, typing.Tuple[float, float]]\n\n def item_inserted(key, value, before_index):\n if key == \"data_item\":\n data_item = value\n if self.__is_explorer(document_model, data_item):\n self.__connect_explorer_interval(document_model, data_item)\n\n def item_removed(key, value, index):\n if key == \"data_item\":\n data_item = value\n self.__disconnect_explorer_interval(data_item)\n\n self.__item_inserted_listener = document_model.item_inserted_event.listen(item_inserted)\n self.__item_removed_listener = document_model.item_removed_event.listen(item_removed)\n\n for index, data_item in enumerate(document_model.data_items):\n item_inserted(\"data_item\", data_item, index)\n\n def close(self):\n self.__item_inserted_listener.close()\n self.__item_inserted_listener = None\n self.__item_removed_listener.close()\n self.__item_removed_listener = None\n\n def set_current_data_item(self, data_item):\n \"\"\"Set the current data item.\n\n If the data item is an explorer, update the explorer interval, otherwise cleaar it.\n \"\"\"\n self.__current_data_item = data_item\n\n is_explorer = self.__is_explorer(self.__document_model, data_item)\n if is_explorer:\n self.__explorer_interval = self.__energy_intervals.get(data_item.uuid)\n else:\n self.__explorer_interval = None\n\n self.__model_data_item = None\n self.__edge_data_structure = None\n\n if self.__is_model(data_item):\n self.__model_data_item = data_item\n elif data_item:\n for data_structure in copy.copy(self.__document_model.data_structures):\n # check to see if the data item is a eels data item with an associated edge. the data item is a\n # eels data item when there is an elemental_mapping_edge_ref with its source being the data item.\n if data_structure.source == data_item and data_structure.structure_type == \"elemental_mapping_edge_ref\":\n self.__edge_data_structure = data_structure.get_referenced_object(\"edge\")\n self.__model_data_item = data_structure.get_referenced_object(\"spectrum_image\")\n if is_explorer:\n self.__model_data_item = data_item.source\n\n @property\n def model_data_item(self):\n return self.__model_data_item\n\n @property\n def edge(self):\n return ElementalMappingEdge(data_structure=self.__edge_data_structure) if self.__edge_data_structure else None\n\n def __explorer_interval_changed(self, data_item, interval) -> None:\n if data_item == self.__current_data_item:\n self.__explorer_interval = interval\n\n @property\n def explorer_interval(self):\n return self.__explorer_interval\n\n def __is_model(self, data_item) -> bool:\n if isinstance(data_item, DataItem.DataItem):\n return data_item.is_data_3d\n return False\n\n def __is_explorer(self, document_model, data_item) -> bool:\n if isinstance(data_item, DataItem.DataItem):\n if data_item.is_data_1d:\n for display_item in document_model.get_display_items_for_data_item(data_item):\n for graphic in display_item.graphics:\n if isinstance(graphic, Graphics.IntervalGraphic) and graphic.graphic_id == \"explore\":\n return True\n return False\n\n def __is_calibrated_map(self, data_item) -> bool:\n if isinstance(data_item, DataItem.DataItem):\n if data_item.is_data_2d:\n return data_item.title.startswith(\"Map\") and data_item.intensity_calibration.units.startswith(\"~\")\n return False\n\n async def explore_edges(self, document_controller):\n document_model = document_controller.document_model\n model_data_item = self.__model_data_item\n model_display_item = document_model.get_display_item_for_data_item(model_data_item)\n pick_region = Graphics.RectangleGraphic()\n pick_region.size = min(1 / 16, 16 / model_data_item.dimensional_shape[0]), min(1 / 16, 16 / model_data_item.dimensional_shape[1])\n pick_region.label = _(\"Explore\")\n model_display_item.add_graphic(pick_region)\n pick_data_item = document_model.get_pick_region_new(model_display_item, pick_region=pick_region)\n if pick_data_item:\n explore_interval = Graphics.IntervalGraphic()\n explore_interval.interval = 0.4, 0.6\n explore_interval.label = _(\"Explore\")\n explore_interval.graphic_id = \"explore\"\n pick_data_item.source = model_data_item\n pick_display_item = document_model.get_display_item_for_data_item(pick_data_item)\n pick_display_item.add_graphic(explore_interval)\n document_controller.show_display_item(pick_display_item)\n await self.__document_model.compute_immediate(document_controller.event_loop, document_model.get_data_item_computation(pick_data_item)) # need the data to make connect_explorer_interval work; so do this here. ugh.\n self.__connect_explorer_interval(document_model, pick_data_item)\n\n def __add_edge(self, data_item, electron_shell, fit_interval, signal_interval) -> ElementalMappingEdge:\n project = self.__document_model.get_project_for_item(data_item)\n data_structure = self.__document_model.create_data_structure(structure_type=\"elemental_mapping_edge\", source=data_item)\n self.__document_model.append_data_structure(data_structure, project=project)\n edge = ElementalMappingEdge(electron_shell=electron_shell, fit_interval=fit_interval, signal_interval=signal_interval)\n edge.write(data_structure)\n return ElementalMappingEdge(data_structure=data_structure)\n\n def __remove_edge(self, edge: ElementalMappingEdge) -> None:\n for data_structure in copy.copy(self.__document_model.data_structures):\n if data_structure.source == self.__model_data_item and edge.matches(data_structure):\n self.__document_model.remove_data_structure(data_structure)\n\n def graphic_property_changed(self, graphic, data_item, dimensional_shape, dimensional_calibrations, key):\n if key == \"interval\":\n value = graphic.interval\n ss = value[0] * dimensional_shape[-1]\n ee = value[1] * dimensional_shape[-1]\n s = dimensional_calibrations[-1].convert_to_calibrated_value(ss)\n e = dimensional_calibrations[-1].convert_to_calibrated_value(ee)\n self.__energy_intervals[data_item.uuid] = s, e\n self.__explorer_interval_changed(data_item, (s, e))\n\n def __connect_explorer_interval(self, document_model, data_item):\n if data_item.is_data_1d:\n for display_item in document_model.get_display_items_for_data_item(data_item):\n for graphic in display_item.graphics:\n if isinstance(graphic, Graphics.IntervalGraphic) and graphic.graphic_id == \"explore\":\n dimensional_shape = data_item.dimensional_shape\n dimensional_calibrations = data_item.dimensional_calibrations\n self.__explorer_property_changed_listeners[data_item.uuid] = graphic.property_changed_event.listen(functools.partial(self.graphic_property_changed, graphic, data_item, dimensional_shape, dimensional_calibrations))\n self.graphic_property_changed(graphic, data_item, dimensional_shape, dimensional_calibrations, \"interval\")\n\n def __disconnect_explorer_interval(self, data_item):\n listener = self.__explorer_property_changed_listeners.get(data_item.uuid)\n if listener:\n listener.close()\n del self.__explorer_property_changed_listeners[data_item.uuid]\n\n def add_edge(self, electron_shell: PeriodicTable.ElectronShell) -> typing.Optional[ElementalMappingEdge]:\n model_data_item = self.__model_data_item\n if model_data_item:\n binding_energy_eV = PeriodicTable.PeriodicTable().nominal_binding_energy_ev(electron_shell)\n signal_interval_eV = binding_energy_eV, binding_energy_eV * 1.10\n fit_interval_eV = binding_energy_eV * 0.93, binding_energy_eV * 0.98\n dimensional_shape = model_data_item.dimensional_shape\n dimensional_calibrations = model_data_item.dimensional_calibrations\n if dimensional_shape is not None and dimensional_calibrations is not None and len(dimensional_calibrations) > 0:\n calibration = dimensional_calibrations[-1]\n if calibration.units == \"eV\":\n fit_region_start = calibration.convert_from_calibrated_value(fit_interval_eV[0]) / dimensional_shape[-1]\n fit_region_end = calibration.convert_from_calibrated_value(fit_interval_eV[1]) / dimensional_shape[-1]\n signal_region_start = calibration.convert_from_calibrated_value(signal_interval_eV[0]) / dimensional_shape[-1]\n signal_region_end = calibration.convert_from_calibrated_value(signal_interval_eV[1]) / dimensional_shape[-1]\n fit_interval = fit_region_start, fit_region_end\n signal_interval = signal_region_start, signal_region_end\n return self.__add_edge(model_data_item, electron_shell, fit_interval, signal_interval)\n return None\n\n def remove_edge(self, edge: ElementalMappingEdge) -> None:\n self.__remove_edge(edge)\n\n def build_edge_bundles(self, document_controller):\n document_model = self.__document_model\n model_data_item = self.__model_data_item\n current_data_item = self.__current_data_item\n edge_data_structure = self.__edge_data_structure\n\n EdgeBundle = collections.namedtuple(\"EdgeBundle\", [\"electron_shell_str\", \"selected\", \"select_action\", \"pick_action\", \"map_action\", \"delete_action\"])\n\n edge_bundles = list()\n\n edges = list()\n for data_structure in copy.copy(document_model.data_structures):\n if data_structure.source == model_data_item and data_structure.structure_type == \"elemental_mapping_edge\":\n edge = ElementalMappingEdge(data_structure=data_structure)\n edges.append(edge)\n\n for index, edge in enumerate(edges):\n\n def change_edge_action(edge):\n document_controller.event_loop.create_task(change_edge(document_controller, model_data_item, current_data_item, edge))\n\n def pick_edge_action(edge):\n document_controller.event_loop.create_task(pick_new_edge(document_controller, model_data_item, edge))\n\n def map_edge_action(edge):\n document_controller.event_loop.create_task(map_new_edge(document_controller, model_data_item, edge))\n\n def delete_edge_action(edge):\n self.__remove_edge(edge)\n\n edge_bundle = EdgeBundle(electron_shell_str=edge.electron_shell.to_long_str(),\n selected=edge.data_structure == edge_data_structure,\n select_action=functools.partial(change_edge_action, edge),\n pick_action=functools.partial(pick_edge_action, edge),\n map_action=functools.partial(map_edge_action, edge),\n delete_action=functools.partial(delete_edge_action, edge))\n\n edge_bundles.append(edge_bundle)\n\n return edge_bundles\n\n def build_multiprofile(self, document_controller):\n document_model = document_controller.document_model\n model_data_item = self.__model_data_item\n if not model_data_item:\n return\n project = document_model.get_project_for_item(model_data_item)\n multiprofile_display_item = None\n line_profile_regions = list()\n\n colors = (\"rgba(0, 0, 255, 0.5)\", \"rgba(255, 0, 0, 0.5)\", \"rgba(0, 255, 0, 0.5)\")\n\n for index, dependent_data_item in enumerate(document_model.get_dependent_data_items(model_data_item)):\n if self.__is_calibrated_map(dependent_data_item):\n dependent_display_item = document_model.get_display_item_for_data_item(dependent_data_item)\n if not multiprofile_display_item:\n multiprofile_display_item = DisplayItem.DisplayItem()\n multiprofile_display_item.title = _(\"Multi-profile\")\n multiprofile_display_item.display_type = \"line_plot\"\n multiprofile_display_item.set_display_property(\"legend_position\", \"top-right\")\n document_model.append_display_item(multiprofile_display_item, project=project)\n line_profile_data_item = document_model.get_line_profile_new(dependent_display_item)\n line_profile_display_item = document_model.get_display_item_for_data_item(line_profile_data_item)\n line_profile_display_data_channel = line_profile_display_item.get_display_data_channel_for_data_item(line_profile_data_item)\n line_profile_region = dependent_display_item.graphics[0]\n line_profile_region.vector = (0.5, 0.2), (0.5, 0.8)\n line_profile_regions.append(line_profile_region)\n multiprofile_display_item.append_display_data_channel_for_data_item(line_profile_data_item)\n display_layers = multiprofile_display_item.display_layers\n display_layers[-1][\"label\"] = dependent_data_item.title[dependent_data_item.title.index(\" of \") + 4:]\n display_layers[-1][\"fill_color\"] = colors[index % len(colors)]\n multiprofile_display_item.display_layers = display_layers\n if multiprofile_display_item:\n for line_profile_region in line_profile_regions[1:]:\n document_model.append_connection(Connection.PropertyConnection(line_profile_regions[0], \"vector\", line_profile_region, \"vector\", parent=multiprofile_display_item), project=project)\n document_model.append_connection(Connection.PropertyConnection(line_profile_regions[0], \"width\", line_profile_region, \"width\", parent=multiprofile_display_item), project=project)\n document_controller.show_display_item(multiprofile_display_item)\n\nSymbolic.register_computation_type(\"eels.background_subtraction11\", EELSBackgroundSubtraction)\nSymbolic.register_computation_type(\"eels.mapping\", EELSMapping)\n","sub_path":"nionswift_plugin/nion_eels_analysis/ElementalMappingController.py","file_name":"ElementalMappingController.py","file_ext":"py","file_size_in_byte":30595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"70346745","text":"from datetime import datetime\n\nfrom monocliche.model import Property, Bid\n\n\nclass Auction:\n\n def __init__(self, start_time: datetime, end_time: datetime, auction_property: Property):\n self.start_time = start_time\n self.end_time = end_time\n self.auction_property = auction_property\n self.offers: list[Bid] = None\n\n def add_bid(self, bid: Bid):\n if self.offers is None:\n self.offers = []\n\n self.offers.append(bid)\n","sub_path":"monocliche/model/Auction.py","file_name":"Auction.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"385551806","text":"import functools\nimport asyncio\nfrom enum import Enum\n\nfrom sccc_contestbot.models import Contest, ContestData\n\n\nclass RenewalFlag(Enum):\n CREATED = 1\n CHANGED = 2\n\n\nclass ContestManager:\n \"\"\"\n 컨테스트를 관리합니다.\n 봇 실행 이외에 단독으로 실행할 수는 없습니다.\n 직접 생성하지 마세요.\n\n Args:\n renewal_call_back :\n 컨테스트 갱신시, 새로 추가되었거나 바뀐사항이 있을 경우\n 이 콜백을 호출합니다.\n 콜백은 다음과 같은 꼴을 만족해야합니다.\n def renewal_call_back(contest:ContestData, flag:RenewalFlag)\n\n 참고:\n sqlalchemy의 API는 block을 유발하기 때문에,\n ThreadPoolExecutor을 사용해 별도의 스레드에서 관리합니다.\n\n 이 때문에, ContestBot 클래스의 run 메서드에 강하게 종속적입니다.\n 세가지 코드에 종속성이 있습니다.\n - self.thread_pool_executor = ThreadPoolExecutor( ... )\n 스레드 내부에 scopped_session을 만드는 작업이 필요합니다.\n - with self.thread_pool_executor as pool: ...\n ThreadPoolExecutor의 context가 활성화 상태여야 합니다.\n - loop.set_default_executor(pool)\n 이벤트 루프의 기본 executor가 활성화된 상태의 \n ThreadPoolExecutor 이어야 합니다.\n loop.run_in_executor(executor=None ... )은\n 디폴트 executor을 실행시킨다는걸 주의하세요!\n \"\"\"\n\n def __init__(self, event_loop, thread_local_data, renewal_call_back):\n self.event_loop = event_loop\n self.thread_local_data = thread_local_data\n self.renewal_call_back = renewal_call_back\n self.lock = asyncio.Lock()\n\n async def renewal_contest(self, contest: ContestData):\n \"\"\"\n 데이터베이스에 컨테스트를 추가하거나 갱신합니다.\n 만약 동일한 컨테스트가 이미 존재할경우, 아무것도 하지 않습니다.\n\n 새로 추가되었거나 갱신했을 경우, 미리 지정한 콜백을 호출합니다.\n\n \"\"\"\n\n def _impl(thread_local_data):\n session = thread_local_data.Session()\n item = (\n session.query(Contest)\n .filter(Contest.contest_id == contest.contest_id)\n .first()\n )\n\n if item is None:\n session.add(Contest(contest))\n session.commit()\n session.close()\n return RenewalFlag.CREATED\n\n if item.hash_value != contest.hash_value:\n setattr(item, \"contest_name\", contest.contest_name)\n setattr(item, \"start_date\", contest.start_date)\n setattr(item, \"URL\", contest.URL)\n setattr(item, \"hash_value\", contest.hash_value)\n session.commit()\n session.close()\n return RenewalFlag.CHANGED\n\n # Non changed\n session.close()\n return None\n\n _impl = functools.partial(_impl, self.thread_local_data)\n\n async with self.lock:\n result_flag = await self.event_loop.run_in_executor(\n executor=None, func=_impl\n )\n\n if result_flag is None:\n pass\n else:\n self.renewal_call_back(contest, result_flag)\n\n async def delete_contest(self, contest: ContestData):\n \"\"\"\n 해당하는 콘테스트가 존재한다면 제거합니다.\n 콘테스트가 존재하지 않아도 예외를 발생시키지는 않습니다.\n\n 주의:\n 콘테스트를 제거하는 키 값은 contest_id 를 기준으로 합니다.\n \"\"\"\n\n def _impl(thread_local_data):\n session = thread_local_data.Session()\n session.query(Contest).filter(\n Contest.contest_id == contest.contest_id\n ).delete()\n session.commit()\n session.close()\n\n _impl = functools.partial(_impl, self.thread_local_data)\n\n async with self.lock:\n await self.event_loop.run_in_executor(executor=None, func=_impl)\n\n async def is_latest(self, contest: ContestData) -> bool:\n \"\"\"\n 이 콘테스트 객체가, 최신정보인지 확인합니다.\n\n returns:\n 최신정보와 동일하다면 True, 아니면 False를 반환합니다.\n 대회정보가 존재하지 않아도 False를 반환합니다.\n \"\"\"\n\n def _impl(thread_local_data):\n session = thread_local_data.Session()\n hash_value = (\n session.query(Contest.hash_value)\n .filter(Contest.contest_id == contest.contest_id)\n .scalar()\n )\n session.close()\n\n if hash_value is None:\n return False\n\n return hash_value == contest.hash_value\n\n _impl = functools.partial(_impl, self.thread_local_data)\n\n return await self.event_loop.run_in_executor(executor=None, func=_impl)\n","sub_path":"sccc_contestbot/contest_manager.py","file_name":"contest_manager.py","file_ext":"py","file_size_in_byte":5133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"342970245","text":"from itertools import product\r\n\r\nimport flygame as fly\r\nimport pygame as pg\r\nfrom pygame.math import Vector2\r\n\r\ncontrol = fly.InputHandler()\r\ngame = fly.GameState()\r\n\r\nsize = (800, 800)\r\n\r\nSKY = (200,230,230)\r\nBLACK = (30,30,30)\r\nPLAYERS = [\r\n (255, 0, 0),\r\n (0, 255, 0),\r\n (0, 0, 255),\r\n (0, 255, 255),\r\n (255, 255, 0),\r\n (255, 0, 255),\r\n]\r\nRW = 100\r\nimg = pg.image.load(\"sprite.png\")\r\nimage = pg.transform.scale(img, (RW, RW))\r\ndel img\r\n\r\ndone = False\r\n\r\nscreen = pg.display.set_mode(size)\r\nclock = pg.time.Clock()\r\n#all_sprites = pg.sprite.Group()\r\npg.init()\r\narial18 = pg.font.SysFont('arial',18, False, False)\r\n\r\n\r\n\r\nwhile not done:\r\n\r\n # UPDATE\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n done = True\r\n elif event.type == pg.KEYDOWN:\r\n control.startMoving(event.key)\r\n elif event.type == pg.KEYUP:\r\n control.stopMoving(event.key)\r\n\r\n # RENDER\r\n screen.fill(SKY)\r\n\r\n for coord in product(range(0,8),range(0,8)):\r\n if game.tiles[coord]:\r\n screen.blit(image, (coord[0]*RW, coord[1]*RW, RW, RW))\r\n\r\n #if game.objects[coord]:\r\n # screen.blit(image, (coord[0]*RW, coord[1]*RW, RW, RW))\r\n\r\n for i,player in game.players.items():\r\n pg.draw.rect(screen, PLAYERS[i], (player.x, player.y, 48, 48))\r\n\r\n text = arial18.render(str(i)+' '+str(player.x)+','+str(player.y), True, BLACK)\r\n textX = text.get_rect().width\r\n textY = text.get_rect().height\r\n screen.blit(text, ((40 - (textX / 2)), (10+(i*22) - (textY / 2))))\r\n\r\n pg.display.flip()\r\n clock.tick(30)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"546907174","text":"import re\nimport requests\nimport time\n\ndomain = ['vshare.eu']\nname = 'Vshare'\nsources = []\n\ndef resolve(url):\n html = requests.get(url).content\n op = re.findall('',html)[0]\n ID = re.findall('',html)[0]\n fname = re.findall('',html)[0]\n headers = {\n \"referer\":url,\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0\",\n \"Content-Type\":\"application/x-www-form-urlencoded\",\n \"Cookie\":'__utma=254669071.73767129.1504312517.1504312517.1504312517.1; __utmz=254669071.1504312517.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); aff=2920; ref_url=http%3A%2F%2Fvshare.eu%2Fj2swq8r2636c.htm'\n }\n data = {\n \"op\":op,\n \"id\":ID,\n \"fname\":fname,\n 'usr_login':'',\n 'referer':url,\n \"method_free\":\"Proceed+to+video\"\n }\n time.sleep(5)\n html2 = requests.post(url,headers=headers,data=data).content\n play = re.findall(\"file:'(.+?)'\",html2)\n for playlink in play:\n if '/vid.mp4' in playlink:\n sources.append({'source': name, 'quality': 'SD', 'scraper': name, 'url': playlink,'direct': True})\n\n return sources\n\n","sub_path":"zips/script.module.coriginresolver/lib/sources/vshare.py","file_name":"vshare.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"123782306","text":"from flask import Flask\nimport os\n\ndef create_app():\n print('create app...')\n base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n templates_dir = os.path.join(base_dir, 'templates')\n static_dir = os.path.join(base_dir, 'static')\n app = Flask(__name__)\n # app = Flask(__name__, static_folder=static_dir, template_folder=templates_dir)\n # 将app交由blue管理\n from app.main import bp as main_bp\n app.register_blueprint(main_bp)\n app.logger.info('Workblog startup...')\n return app\n \n\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"272477463","text":"#coding=utf-8\r\n\r\nfrom __future__ import division\r\nimport numpy as np\r\nfrom abc import abstractmethod\r\nimport logging\r\nimport warnings\r\n\r\nfrom foolbox import distances\r\nfrom foolbox.utils import crossentropy\r\n\r\nfrom foolbox.attacks.base import Attack\r\nfrom foolbox.attacks.base import call_decorator\r\n\r\ndef sigmoid(inX):\r\n return 1.0 / (1 + np.exp(-inX))\r\n\r\n\r\nclass IterativeProjectedGradientBaseAttack(Attack):\r\n @abstractmethod\r\n def _gradient(self, a, adv_x, class_, strict=True, x=None):\r\n raise NotImplementedError\r\n\r\n @abstractmethod\r\n def _clip_perturbation(self, a, noise, epsilon):\r\n raise NotImplementedError\r\n\r\n @abstractmethod\r\n def _check_distance(self, a):\r\n raise NotImplementedError\r\n\r\n def _get_mode_and_class(self, a):\r\n target_class = a.target_class()\r\n targeted = target_class is not None\r\n\r\n if targeted:\r\n class_ = target_class\r\n else:\r\n class_ = a.original_class\r\n return targeted, class_\r\n\r\n def _run(self, a, binary_search,\r\n epsilon, stepsize, iterations,\r\n random_start, return_early, scale, bb_step, RO, m, RC, TAP, uniform_or_not, moment_or_not):\r\n if not a.has_gradient():\r\n warnings.warn('applied gradient-based attack to model that'\r\n ' does not provide gradients')\r\n return\r\n\r\n self._check_distance(a)\r\n\r\n targeted, class_ = self._get_mode_and_class(a)\r\n self.success_dir = 0 #record of the average direction of all adversarial examples\r\n self.success_adv = 0\r\n\r\n self.best = 9999\r\n\r\n if binary_search:\r\n if isinstance(binary_search, bool):\r\n k = 20\r\n else:\r\n k = int(binary_search)\r\n return self._run_binary_search(\r\n a, epsilon, stepsize, iterations,\r\n random_start, targeted, class_, return_early, k=k, scale=scale, bb_step=bb_step, RO=RO, m=m, RC=RC, TAP=TAP, uniform_or_not=uniform_or_not, moment_or_not=moment_or_not)\r\n else:\r\n return self._run_one(\r\n a, epsilon, stepsize, iterations,\r\n random_start, targeted, class_, return_early, scale=scale, bb_step=bb_step, RO=RO, m=m, RC=RC, TAP=TAP, uniform_or_not=uniform_or_not, moment_or_not=moment_or_not)\r\n\r\n def _run_binary_search(self, a, epsilon, stepsize, iterations,\r\n random_start, targeted, class_, return_early, k, scale, bb_step, RO, m, RC, TAP, uniform_or_not, moment_or_not):\r\n\r\n factor = stepsize / epsilon\r\n\r\n def try_epsilon(epsilon):\r\n stepsize = factor * epsilon\r\n return self._run_one(\r\n a, epsilon, stepsize, iterations,\r\n random_start, targeted, class_, return_early, scale, bb_step, RO, m, RC, TAP, uniform_or_not, moment_or_not)\r\n\r\n for i in range(k*2):\r\n if try_epsilon(epsilon):\r\n break\r\n epsilon = epsilon * 1.5\r\n else:\r\n return\r\n\r\n bad = 0\r\n good = epsilon\r\n\r\n for i in range(k):\r\n epsilon = (good + bad) / 2\r\n if try_epsilon(epsilon):\r\n good = epsilon\r\n else:\r\n bad = epsilon\r\n\r\n\r\n def update_success_dir(self, new_adv):\r\n self.success_adv += new_adv\r\n\r\n new_adv_norm = np.sqrt(np.mean(np.square(self.success_adv)))\r\n new_adv_norm = max(1e-12, new_adv_norm)\r\n self.success_dir = self.success_adv/new_adv_norm\r\n\r\n\r\n def _run_one(self, a, epsilon, stepsize, iterations,\r\n random_start, targeted, class_, return_early, scale, \r\n bb_step=15, RO=False, m=2, RC=False, TAP=False, uniform_or_not=False, moment_or_not=False):\r\n min_, max_ = a.bounds()\r\n s = max_ - min_\r\n\r\n original = a.original_image.copy()\r\n\r\n if random_start:\r\n noise = np.random.uniform(\r\n -epsilon * s, epsilon * s, original.shape).astype(\r\n original.dtype)\r\n x = original + self._clip_perturbation(a, noise, epsilon)\r\n strict = False\r\n else:\r\n x = original\r\n strict = True\r\n\r\n if RC: #use curl iteration to update adversarial example\r\n success = False\r\n momentum_up = 0\r\n momentum_down = 0\r\n go_up_flag = True #gradient descend flag\r\n x_up = x.copy()\r\n\r\n logits_init, is_adversarial_init = a.predictions(np.round(x))\r\n ce_init = crossentropy(class_, logits_init)\r\n up_better_start = x.copy()\r\n\r\n for _ in range(iterations):\r\n avg_gradient_down = 0\r\n avg_gradient_up = 0\r\n for m_counter in range(m):\r\n #gradient ascent trajectory\r\n if RO:\r\n if uniform_or_not: #add uniform noise to gradient calculation process \r\n temp_x_up = np.clip(np.random.uniform(-scale, scale, original.shape) + x_up + stepsize*self.success_dir, min_, max_).astype(np.float32)\r\n else: #add gaussian noise to gradient calculation process\r\n temp_x_up = np.clip(np.random.normal(loc=x_up, scale=scale) + stepsize*self.success_dir, min_, max_).astype(np.float32)\r\n else:\r\n if uniform_or_not:\r\n temp_x_up = np.clip(np.random.uniform(-scale, scale, original.shape) + x_up, min_, max_).astype(np.float32)\r\n else:\r\n temp_x_up = np.clip(np.random.normal(loc=x_up, scale=scale), min_, max_).astype(np.float32)\r\n temp_x_up.dtype = \"float32\"\r\n gradient_up = self._gradient(a, temp_x_up, class_, strict=strict) #calculate gradient on substitute model\r\n avg_gradient_up += gradient_up\r\n\r\n #gradient descent trajectory\r\n if RO:\r\n if uniform_or_not:\r\n temp_x_down = np.clip(np.random.uniform(-scale, scale, original.shape) + x + stepsize*self.success_dir, min_, max_).astype(np.float32)\r\n else:\r\n temp_x_down = np.clip(np.random.normal(loc=x, scale=scale) + stepsize*self.success_dir, min_, max_).astype(np.float32)\r\n else:\r\n if uniform_or_not:\r\n temp_x_down = np.clip(np.random.uniform(-scale, scale, original.shape) + x, min_, max_).astype(np.float32)\r\n else:\r\n temp_x_down = np.clip(np.random.normal(loc=x, scale=scale), min_, max_).astype(np.float32)\r\n temp_x_down.dtype = \"float32\"\r\n gradient_down = self._gradient(a, temp_x_down, class_, strict=strict)\r\n avg_gradient_down += gradient_down\r\n \r\n avg_gradient_up = avg_gradient_up/m\r\n avg_gradient_down = avg_gradient_down/m\r\n\r\n strict = True\r\n if targeted:\r\n avg_gradient_down = -avg_gradient_down\r\n avg_gradient_up = -avg_gradient_up\r\n\r\n if moment_or_not: #whether use momentum as in MI-FGSM\r\n momentum_up += avg_gradient_up\r\n momentum_up_norm = np.sqrt(np.mean(np.square(momentum_up)))\r\n momentum_up_norm = max(1e-12, momentum_up_norm) # avoid divsion by zero\r\n\r\n momentum_down += avg_gradient_down\r\n momentum_down_norm = np.sqrt(np.mean(np.square(momentum_down)))\r\n momentum_down_norm = max(1e-12, momentum_down_norm) # avoid divsion by zero\r\n if go_up_flag:\r\n x_up = x_up - stepsize * (momentum_up/momentum_up_norm)\r\n else:\r\n x_up = x_up + stepsize * (momentum_up/momentum_up_norm)\r\n\r\n x = x + stepsize * (momentum_down/momentum_down_norm)\r\n\r\n else: \r\n if go_up_flag:\r\n avg_gradient_up = -avg_gradient_up\r\n x_up = x_up + stepsize * avg_gradient_up\r\n else:\r\n x_up = x_up + stepsize * avg_gradient_up\r\n\r\n x = x + stepsize * avg_gradient_down\r\n\r\n x = original + self._clip_perturbation(a, x - original, epsilon)\r\n x_up = original + self._clip_perturbation(a, x_up - original, epsilon)\r\n\r\n x = np.clip(x, min_, max_)\r\n x_up = np.clip(x_up, min_, max_)\r\n\r\n logits_down, is_adversarial_down = a.predictions(np.round(x))\r\n logits_up, is_adversarial_up = a.predictions(np.round(x_up))\r\n\r\n if logging.getLogger().isEnabledFor(logging.DEBUG):\r\n if targeted:\r\n ce = crossentropy(a.original_class, logits_down)\r\n logging.debug('crossentropy to {} is {}'.format(\r\n a.original_class, ce))\r\n ce = crossentropy(class_, logits_down)\r\n logging.debug('crossentropy to {} is {}'.format(class_, ce))\r\n\r\n if is_adversarial_up:\r\n if RO:\r\n self.update_success_dir(x_up)\r\n #start binary search\r\n left = original\r\n right = x_up\r\n for binary_counter in range(bb_step):\r\n middle = np.clip((left + right)/2, min_, max_)\r\n temp_logits, temp_is_adversarial = a.predictions(np.round(middle))\r\n\r\n if temp_is_adversarial: #find a better adversarial example\r\n if RO:\r\n self.update_success_dir(middle)\r\n right = middle\r\n else:\r\n left = middle\r\n if return_early:\r\n return True\r\n else:\r\n success = True\r\n\r\n if is_adversarial_down:\r\n if RO:\r\n self.update_success_dir(x)\r\n left = original\r\n right = x\r\n for binary_counter in range(bb_step):\r\n middle = np.clip((left + right)/2, min_, max_)\r\n temp_logits, temp_is_adversarial = a.predictions(np.round(middle))\r\n\r\n if temp_is_adversarial:\r\n if RO:\r\n self.update_success_dir(middle)\r\n right = middle\r\n else:\r\n left = middle\r\n if return_early:\r\n return True\r\n else:\r\n success = True\r\n\r\n \r\n if go_up_flag:\r\n ce_now = crossentropy(class_, logits_up)\r\n if ce_now < ce_init:\r\n ce_init = ce_now\r\n up_better_start = x_up\r\n else:\r\n go_up_flag = False #stop gradient descent, start gradient ascent\r\n momentum_up = 0\r\n x_up = up_better_start\r\n\r\n\r\n else: #normal iterative process\r\n success = False\r\n momentum_down = 0\r\n\r\n for _ in range(iterations):\r\n avg_gradient_down = 0\r\n avg_gradient_up = 0\r\n for m_counter in range(m):\r\n if RO:\r\n if uniform_or_not:\r\n temp_x_down = np.clip(np.random.uniform(-scale, scale, original.shape) + x + stepsize*self.success_dir, min_, max_).astype(np.float32)\r\n else:\r\n temp_x_down = np.clip(np.random.normal(loc=x, scale=scale) + stepsize*self.success_dir, min_, max_).astype(np.float32)\r\n else:\r\n if uniform_or_not:\r\n temp_x_down = np.clip(np.random.uniform(-scale, scale, original.shape) + x, min_, max_).astype(np.float32)\r\n else:\r\n temp_x_down = np.clip(np.random.normal(loc=x, scale=scale), min_, max_).astype(np.float32)\r\n temp_x_down.dtype = \"float32\"\r\n gradient_down = self._gradient(a, temp_x_down, class_, strict=strict)\r\n avg_gradient_down += gradient_down\r\n \r\n avg_gradient_down = avg_gradient_down/m\r\n\r\n strict = True\r\n if targeted:\r\n avg_gradient_down = -avg_gradient_down\r\n\r\n if moment_or_not:\r\n momentum_down += avg_gradient_down\r\n momentum_down_norm = np.sqrt(np.mean(np.square(momentum_down)))\r\n momentum_down_norm = max(1e-12, momentum_down_norm) # avoid divsion by zero\r\n x = x + stepsize * (momentum_down/momentum_down_norm)\r\n\r\n else: \r\n x = x + stepsize * avg_gradient_down\r\n\r\n x = original + self._clip_perturbation(a, x - original, epsilon)\r\n x = np.clip(x, min_, max_)\r\n\r\n logits_down, is_adversarial_down = a.predictions(np.round(x))\r\n\r\n if logging.getLogger().isEnabledFor(logging.DEBUG):\r\n if targeted:\r\n ce = crossentropy(a.original_class, logits_down)\r\n logging.debug('crossentropy to {} is {}'.format(\r\n a.original_class, ce))\r\n ce = crossentropy(class_, logits_down)\r\n logging.debug('crossentropy to {} is {}'.format(class_, ce))\r\n\r\n if is_adversarial_down:\r\n if RO:\r\n self.update_success_dir(x)\r\n left = original\r\n right = x\r\n for binary_counter in range(bb_step):\r\n middle = np.clip((left + right)/2, min_, max_)\r\n temp_logits, temp_is_adversarial = a.predictions(np.round(middle))\r\n\r\n if temp_is_adversarial: \r\n if RO:\r\n self.update_success_dir(middle)\r\n right = middle\r\n else:\r\n left = middle\r\n if return_early:\r\n return True\r\n else:\r\n success = True\r\n return success\r\n \r\n\r\n\r\nclass L2GradientMixin(object):\r\n def _gradient(self, a, adv_x, class_, strict=True, x=None):\r\n if x is None:\r\n gradient = a.gradient(adv_x, class_, strict=strict)\r\n else:\r\n gradient = a.gradient(x, adv_x, class_, strict=strict)\r\n # using mean to make range of epsilons comparable to Linf\r\n gradient = gradient / np.sqrt(max(1e-12, np.mean(np.square(gradient))))\r\n min_, max_ = a.bounds()\r\n gradient = (max_ - min_) * gradient\r\n return gradient\r\n\r\n\r\n\r\nclass L2ClippingMixin(object):\r\n def _clip_perturbation(self, a, perturbation, epsilon):\r\n # using mean to make range of epsilons comparable to Linf\r\n norm = np.sqrt(np.mean(np.square(perturbation)))\r\n norm = max(1e-12, norm) # avoid divsion by zero\r\n min_, max_ = a.bounds()\r\n s = max_ - min_\r\n # clipping, i.e. only decreasing norm\r\n factor = min(1, epsilon * s / norm)\r\n return perturbation * factor\r\n\r\n\r\n\r\nclass L2DistanceCheckMixin(object):\r\n def _check_distance(self, a):\r\n if not isinstance(a.distance, distances.MSE):\r\n logging.warning('Running an attack that tries to minimize the'\r\n ' L2 norm of the perturbation without'\r\n ' specifying foolbox.distances.MSE as'\r\n ' the distance metric might lead to suboptimal'\r\n ' results.')\r\n\r\n\r\n\r\nclass L2BasicIterativeAttack(\r\n L2GradientMixin,\r\n L2ClippingMixin,\r\n L2DistanceCheckMixin,\r\n IterativeProjectedGradientBaseAttack):\r\n\r\n \"\"\"Modified version of the Basic Iterative Method\r\n that minimizes the L2 distance.\r\n\r\n .. seealso:: :class:`LinfinityBasicIterativeAttack`\r\n\r\n \"\"\"\r\n\r\n @call_decorator\r\n def __call__(self, input_or_adv, label=None, unpack=True,\r\n binary_search=True,\r\n epsilon=0.3,\r\n stepsize=0.05,\r\n iterations=10,\r\n random_start=False,\r\n return_early=True,\r\n scale = 2,\r\n bb_step = 10,\r\n RO = False, \r\n m=1, \r\n RC=False, \r\n TAP=False, \r\n uniform_or_not=False, \r\n moment_or_not=False):\r\n\r\n \"\"\"Simple iterative gradient-based attack known as\r\n Basic Iterative Method, Projected Gradient Descent or FGSM^k.\r\n\r\n Parameters\r\n ----------\r\n input_or_adv : `numpy.ndarray` or :class:`Adversarial`\r\n The original, unperturbed input as a `numpy.ndarray` or\r\n an :class:`Adversarial` instance.\r\n label : int\r\n The reference label of the original input. Must be passed\r\n if `a` is a `numpy.ndarray`, must not be passed if `a` is\r\n an :class:`Adversarial` instance.\r\n unpack : bool\r\n If true, returns the adversarial input, otherwise returns\r\n the Adversarial object.\r\n binary_search : bool or int\r\n Whether to perform a binary search over epsilon and stepsize,\r\n keeping their ratio constant and using their values to start\r\n the search. If False, hyperparameters are not optimized.\r\n Can also be an integer, specifying the number of binary\r\n search steps (default 20).\r\n epsilon : float\r\n Limit on the perturbation size; if binary_search is True,\r\n this value is only for initialization and automatically\r\n adapted.\r\n stepsize : float\r\n Step size for gradient descent; if binary_search is True,\r\n this value is only for initialization and automatically\r\n adapted.\r\n iterations : int\r\n Number of iterations for each gradient descent run.\r\n random_start : bool\r\n Start the attack from a random point rather than from the\r\n original input.\r\n return_early : bool\r\n Whether an individual gradient descent run should stop as\r\n soon as an adversarial is found.\r\n scale : float\r\n Variance of gaussian noise add to gradient calculation.\r\n bb_step : int\r\n Binary search step after each round of iteration.\r\n RO : bool\r\n Whether to update initial direction by the average direction\r\n of all adversarial examples.\r\n m : int\r\n Times of gradient calculation as mentioned in vr-IGSM attack.\r\n RC : bool\r\n Whether use Curls iteration to update adversarial trajectory.\r\n TAP : bool\r\n Discarded parameter.\r\n Uniform_or_not : bool\r\n Whether use uniform noise (if set to True) or gaussian noise (if\r\n set to False).\r\n moment_or_not : bool\r\n Whether use momentum for update in iteration. \r\n \"\"\"\r\n\r\n a = input_or_adv\r\n del input_or_adv\r\n del label\r\n del unpack\r\n\r\n assert epsilon > 0\r\n\r\n self._run(a, binary_search,\r\n epsilon, stepsize, iterations,\r\n random_start, return_early, scale, bb_step, RO, m, RC, TAP, uniform_or_not, moment_or_not)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"attacks/curls_untargeted.py","file_name":"curls_untargeted.py","file_ext":"py","file_size_in_byte":20222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"258617633","text":"\nimport managers\nfrom utils.file_argument import File_argument\nfrom ... import errors\nfrom ...subtypes import integer, generic, string, binary, v_empty, v_mismatch, v_nothing\nfrom ...variables import variant\nfrom ...conversions import pack, unpack\n\n\nclass v_cookiescollection(generic):\n\n\tdef __call__(self, name, **keywords):\n\t\tif \"let\" in keywords:\n\t\t\tmanagers.request_manager.current.cookies()[name.as_string.encode(\"utf-8\")]= \\\n\t\t\t\tkeywords[\"let\"].as_string.encode(\"utf-8\")\n\t\telif \"set\" in keywords:\n\t\t\traise errors.object_has_no_property\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn string(managers.request_manager.current.cookies()[name.as_string.encode(\"utf-8\")] \\\n\t\t\t\t\t.value.decode(\"utf-8\"))\n\t\t\texcept KeyError:\n\t\t\t\treturn v_empty\n\n\tdef __iter__(self):\n\t\tfor cookie in managers.request_manager.current.cookies().cookies():\n\t\t\tyield variant(string(unicode(cookie)))\n\n\nclass v_argumentscollection(generic):\n\n\tdef __call__(self, name, **keywords):\n\t\tif \"let\" in keywords or \"set\" in keywords:\n\t\t\traise errors.object_has_no_property\n\t\telse:\n\t\t\ttry:\n\t\t\t\tvalue=managers.request_manager.current.arguments().arguments()[name.as_string][0]\n\t\t\texcept KeyError:\n\t\t\t\treturn v_empty\n\t\t\ttry: \t\t\t\n\t\t\t\tif isinstance(value, str):\n\t\t\t\t\treturn string(unicode(value.decode(\"utf-8\", \"ignore\")))\n\t\t\t\telse:\n\t\t\t\t\treturn string(unicode(value))\n\t\n\t\t\texcept UnicodeDecodeError: \n\t\t\t\treturn binary(value)\n\n\tdef __iter__(self):\n\t\tfor argument in managers.request_manager.current.arguments().arguments():\n\t\t\tyield variant(string(unicode(argument)))\n\n\nclass v_file(generic):\n\t\n\tdef __init__(self, file_argument):\n\t\tself._file_argument=file_argument\n\n\tdef v_name(self, **keywords):\n\t\tif \"let\" in keywords or \"set\" in keywords:\n\t\t\traise errors.object_has_no_property(\"name\")\n\t\telse:\n\t\t\treturn string(unicode(self._file_argument[1]))\n\n\tdef v_contents(self, **keywords):\n\t\tif \"let\" in keywords or \"set\" in keywords:\n\t\t\traise errors.object_has_no_property(\"contents\")\n\t\telse:\n\t\t\treturn binary(unicode(self._file_argument[0]))\n\nclass v_filescollection(generic):\n\n\tdef __call__(self, name, **keywords):\n\t\tif \"let\" in keywords or \"set\" in keywords:\n\t\t\traise errors.object_has_no_property\n\t\telse:\n\t\t\ttry:\n\t\t\t\tvalue=managers.request_manager.current.arguments().arguments()[name.as_string]\n\t\t\texcept KeyError:\n\t\t\t\treturn v_nothing\n\t\t\tif isinstance(value, File_argument):\n\t\t\t\treturn v_file(value)\n\t\t\telse:\n\t\t\t\treturn v_nothing\n\n\tdef __iter__(self):\n\t\tfor argument, value in managers.request_manager.current.arguments().arguments().iteritems():\n\t\t\tif isinstance(value, File_argument):\n\t\t\t\tyield variant(string(unicode(argument)))\n\n\nclass v_servervariablescollection(generic):\n\n\tvariable_table={\n\t\tu\"ALL_HTTP\": lambda self: string(u\"\\n\".join([u\"HTTP_%s=%s\"%(unicode(name.upper()) \\\n\t\t\t.replace(u\"-\", u\"_\"), unicode(value)) for name, value in managers.request_manager \\\n\t\t\t.get_request().headers().headers().items()])),\n\t\tu\"RAW_HTTP\": lambda self: string(u\"\\n\".join([u\"%s=%s\"%(unicode(name), \\\n\t\t\tunicode(value)) for name, value in managers.request_manager.current.headers() \\\n\t\t\t.headers().items()])),\n\t\tu\"AUTH_PASSWORD\": lambda self: v_empty,\n\t\tu\"AUTH_TYPE\": lambda self: string(u\"Basic\"),\n\t\tu\"AUTH_USER\": lambda self: string(unicode(managers.request_manager.current.session().user)),\n\t\tu\"CONTENT_LENGTH\": lambda self: v_empty,\n\t\tu\"CONTENT_TYPE\": lambda self: v_empty,\n\t\tu\"GATEWAY_INTERFACE\": lambda self: string(unicode(managers.request_manager \\\n\t\t\t.get_request().environment().environment()[\"GATEWAY_INTERFACE\"])),\n\t\tu\"QUERY_STRING\": lambda self: string(unicode(managers.request_manager \\\n\t\t\t.get_request().environment().environment()[\"QUERY_STRING\"])),\n\t\tu\"LOCAL_ADDR\": lambda self: v_empty,\n\t\tu\"REMOTE_ADDR\": lambda self: string(unicode(managers.request_manager \\\n\t\t\t.get_request().environment().environment()[\"REMOTE_ADDR\"])),\n\t\tu\"REMOTE_HOST\": lambda self: v_empty,\n\t\tu\"REMOTE_PORT\": lambda self: string(unicode(managers.request_manager \\\n\t\t\t.get_request().environment().environment()[\"REMOTE_PORT\"])),\n\t\tu\"REMOTE_USER\": lambda self: string(unicode(managers.request_manager.current.session().user)),\n\t\tu\"REQUEST_METHOD\": lambda self: string(unicode(managers.request_manager \\\n\t\t\t.get_request().environment().environment()[\"REQUEST_METHOD\"])),\n u\"SCRIPT_NAME\": lambda self: string(unicode(managers.request_manager \\\n\t\t\t.get_request().environment().environment()[\"SCRIPT_NAME\"])),\n\t\tu\"SERVER_NAME\": lambda self: string(unicode(managers.request_manager \\\n\t\t\t.get_request().environment().environment()[\"SERVER_NAME\"])),\n\t\tu\"SERVER_PORT\": lambda self: string(unicode(managers.request_manager \\\n\t\t\t.get_request().environment().environment()[\"SERVER_PORT\"])),\n\t\tu\"SERVER_PORT_SECURE\": lambda self: integer(0),\n\t\tu\"SERVER_PROTOCOL\": lambda self: string(unicode(managers.request_manager \\\n\t\t\t.get_request().environment().environment()[\"SERVER_PROTOCOL\"])),\n\t\tu\"SERVER_SOFTWARE\": lambda self: string(unicode(managers.request_manager \\\n\t\t\t.get_request().environment().environment()[\"SERVER_SOFTWARE\"])),\n\t\tu\"UNENCODED_URL\": lambda self: string(unicode(managers.request_manager \\\n\t\t\t.get_request().environment().environment()[\"SCRIPT_NAME\"]+managers.request_manager \\\n\t\t\t.get_request().environment().environment()[\"QUERY_STRING\"])),\n\t\tu\"SCRIPT_NAME\": lambda self: string(unicode(managers.request_manager \\\n\t\t\t.get_request().environment().environment()[\"SCRIPT_NAME\"]))}\n\n\tdef __call__(self, name, **keywords):\n\t\tname=name.as_string.upper()\n\t\tif \"let\" in keywords or \"set\" in keywords:\n\t\t\traise errors.object_has_no_property\n\t\telif name.startswith(u\"HEADER_\"):\n\t\t\treturn string(unicode(managers.request_manager.current.headers().headers()[name[7:]]))\n\t\telif name.startswith(u\"HTTP_\"):\n\t\t\treturn string(unicode(managers.request_manager.current.headers().headers()[name[5:].replace(u\"_\", u\"-\")]))\n\t\telse:\n\t\t\treturn self.variable_table[name](self)\n\n\tdef __iter__(self):\n\t\tfor name in self.variable_table:\n\t\t\tyield variant(string(unicode(name)))\n\t\tfor name in managers.request_manager.current.headers().headers():\n\t\t\tyield variant(string(u\"HEADER_%s\"%name))\n\t\t\tyield variant(string(u\"HTTP_%s\"%name.upper().replace(u\"-\", u\"_\")))\n\n\nclass v_parameterscollection(generic):\n\n\tdef __call__(self, index, **keywords):\n\t\tif \"let\" in keywords or \"set\" in keywords:\n\t\t\traise errors.object_has_no_property\n\t\telse:\n\t\t\ttry:\n\t\t\t\tindex, parameters=index.as_integer, managers.request_manager \\\n\t\t\t\t\t.get_request().arguments().arguments()[\"xml_data\"][0]\n\t\t\t\tif isinstance(parameters, list):\n\t\t\t\t\treturn string(unicode(parameters[index]))\n\t\t\t\telse:\n\t\t\t\t\treturn v_empty if index else string(unicode(parameters))\n\t\t\texcept KeyError:\n\t\t\t\treturn v_empty\n\n\tdef __iter__(self):\n\t\ttry:\n\t\t\tparameters=managers.request_manager.current.arguments().arguments()[\"xml_data\"][0]\n\t\texcept KeyError:\n\t\t\treturn\n\t\tif isinstance(parameters, list):\n\t\t\tfor parameter in parameters: yield variant(string(unicode(parameter)))\n\t\telse:\n\t\t\tyield variant(string(unicode(parameters)))\n\n\nclass v_sharedvariablescollection(generic):\n\n\tdef __call__(self, name, **keywords):\n\t\tif \"let\" in keywords:\n\t\t\tmanagers.request_manager.current.shared_variables[name.as_string]=unpack(keywords[\"let\"].as_simple)\n\t\telif \"set\" in keywords:\n\t\t\traise errors.object_has_no_property\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn pack(managers.request_manager.current.shared_variables[name.as_string])\n\t\t\texcept KeyError:\n\t\t\t\treturn v_empty\n\n\tdef __iter__(self):\n\t\tfor name in managers.request_manager.current.shared_variables:\n\t\t\tyield variant(string(unicode(name)))\n\n\nclass v_request(generic):\n\n\tdef __init__(self):\n\t\tself._cookies=v_cookiescollection()\n\t\tself._arguments=v_argumentscollection()\n\t\tself._files=v_filescollection()\n\t\tself._servervariables=v_servervariablescollection()\n\t\tself._parameters=v_parameterscollection()\n\t\tself._sharedvariables=v_sharedvariablescollection()\n\n\tdef v_cookies(self, name=None, **keywords):\n\t\tif name is None:\n\t\t\tif \"let\" in keywords or \"set\" in keywords:\n\t\t\t\traise errors.object_has_no_property(\"cookies\")\n\t\t\telse:\n\t\t\t\treturn self._cookies\n\t\telse:\n\t\t\treturn self._cookies(name, **keywords)\n\n\tdef v_arguments(self, name=None, **keywords):\n\t\tif name is None:\n\t\t\tif \"let\" in keywords or \"set\" in keywords:\n\t\t\t\traise errors.object_has_no_property(\"arguments\")\n\t\t\telse:\n\t\t\t\treturn self._arguments\n\t\telse:\n\t\t\treturn self._arguments(name, **keywords)\n\n\tdef v_form(self, name=None, **keywords):\n\t\tif name is None:\n\t\t\tif \"let\" in keywords or \"set\" in keywords:\n\t\t\t\traise errors.object_has_no_property(\"form\")\n\t\t\telse:\n\t\t\t\treturn self._arguments\n\t\telse:\n\t\t\treturn self._arguments(name, **keywords)\n\n\tdef v_files(self, name=None, **keywords):\n\t\tif name is None:\n\t\t\tif \"let\" in keywords or \"set\" in keywords:\n\t\t\t\traise errors.object_has_no_property(\"form\")\n\t\t\telse:\n\t\t\t\treturn self._files\n\t\telse:\n\t\t\treturn self._files(name, **keywords)\n\n\tdef v_querystring(self, name=None, **keywords):\n\t\tif name is None:\n\t\t\tif \"let\" in keywords or \"set\" in keywords:\n\t\t\t\traise errors.object_has_no_property(\"querystring\")\n\t\t\telse:\n\t\t\t\treturn self._arguments\n\t\telse:\n\t\t\treturn self._arguments(name, **keywords)\n\n\tdef v_servervariables(self, name=None, **keywords):\n\t\tif name is None:\n\t\t\tif \"let\" in keywords or \"set\" in keywords:\n\t\t\t\traise errors.object_has_no_property(\"servervariables\")\n\t\t\telse:\n\t\t\t\treturn self._servervariables\n\t\telse:\n\t\t\treturn self._servervariables(name, **keywords)\n\n\tdef v_parameters(self, index=None, **keywords):\n\t\tif index is None:\n\t\t\tif \"let\" in keywords or \"set\" in keywords:\n\t\t\t\traise errors.object_has_no_property(\"parameters\")\n\t\t\telse:\n\t\t\t\treturn self._parameters\n\t\telse:\n\t\t\treturn self._parameters(index, **keywords)\n\n\tdef v_sharedvariables(self, name=None, **keywords):\n\t\tif name is None:\n\t\t\tif \"let\" in keywords or \"set\" in keywords:\n\t\t\t\traise errors.object_has_no_property(\"sharedvariables\")\n\t\t\telse:\n\t\t\t\treturn self._sharedvariables\n\t\telse:\n\t\t\treturn self._sharedvariables(name, **keywords)\n","sub_path":"sources/vscript/wrappers/environment/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":9796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"467705770","text":"# _*_coding:utf-8\n# author='sandra'\n\"\"\"\n练习使用散点图模式,plt.scatter, marker设置点的形状,s设置点的size\n\"\"\"\nfrom pylab import mpl\nimport matplotlib.pyplot as plt\nmpl.rcParams['font.sans-serif'] = ['SimHei'] #设置字体\nmpl.rcParams['axes.unicode_minus'] = False\nx = [1,2,3,4,5,6,7,8]\ny = [2,4,6,3,6,7,4,6]\n\nplt.scatter(x, y, s=50, marker='*', label='scatter', color='m')\nplt.title(\"散点图\", color='m')\nplt.show()\n","sub_path":"matplotlib_scatter.py","file_name":"matplotlib_scatter.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"275545348","text":"#-*- coding:utf-8 -*-\r\nimport cv2\r\nimport numpy as np\r\nimport pytesseract\r\nimport os\r\n\r\nimport ocr\r\nimport time\r\nimport shutil\r\nimport numpy as np\r\nimport sys\r\nfrom PIL import Image\r\nfrom glob import glob\r\n\r\nprint(sys.path)\r\ndef bank_flow_identity(imagePath): # imagePath './test_images/gs.jpg'\r\n image = cv2.imread(imagePath, 1)\r\n print(image)\r\n #二值化\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n binary = cv2.adaptiveThreshold(~gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 35, -5)\r\n\r\n rows,cols=binary.shape\r\n scale = 40\r\n #识别横线\r\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(cols//scale,1))\r\n eroded = cv2.erode(binary,kernel,iterations = 1)\r\n dilatedcol = cv2.dilate(eroded,kernel,iterations = 1)\r\n\r\n\r\n #识别竖线\r\n scale = 20\r\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(1,rows//scale))\r\n eroded = cv2.erode(binary,kernel,iterations = 1)\r\n dilatedrow = cv2.dilate(eroded,kernel,iterations = 1)\r\n\r\n\r\n #标识交点\r\n bitwiseAnd = cv2.bitwise_and(dilatedcol,dilatedrow)\r\n\r\n\r\n #标识表格\r\n merge = cv2.add(dilatedcol,dilatedrow)\r\n\r\n\r\n #识别黑白图中的白色点\r\n ys,xs = np.where(bitwiseAnd>0)\r\n mylisty=[]\r\n mylistx=[]\r\n\r\n #通过排序,获取跳变的x和y的值,说明是交点,否则交点会有好多像素值,我只取最后一点\r\n i = 0\r\n myxs=np.sort(xs)\r\n for i in range(len(myxs)-1):\r\n if(myxs[i+1]-myxs[i]>10):\r\n mylistx.append(myxs[i])\r\n i=i+1\r\n mylistx.append(myxs[i])\r\n # print(mylistx)\r\n # print(len(mylistx))\r\n\r\n i = 0\r\n myys=np.sort(ys)\r\n #print(np.sort(ys))\r\n for i in range(len(myys)-1):\r\n if(myys[i+1]-myys[i]>20):\r\n mylisty.append(myys[i])\r\n i=i+1\r\n mylisty.append(myys[i])\r\n\r\n i=0\r\n t = time.time()\r\n res=''\r\n for i in range(13): #只有13行有效数字\r\n ROI = image[mylisty[i]:mylisty[i+1],mylistx[9]:mylistx[10]]\r\n\r\n result, image_framed = ocr.model(ROI)\r\n for key in result:\r\n print(result[key][1])\r\n res=res+result[key][1]+'||'\r\n i=i+1\r\n print(\"tsk complete, it took {:.3f}s\".format(time.time() - t))\r\n return res\r\n\r\nif __name__ == \"__main__\":\r\n\r\n testImgPath = os.path.join(os.getcwd(), 'test_images/gs.jpg')\r\n print(testImgPath)\r\n bank_flow_identity(testImgPath)","sub_path":"bank_flow_gs_identity.py","file_name":"bank_flow_gs_identity.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"595151396","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\nfrom gym.core import Env\r\nfrom gym.envs.classic_control import rendering\r\n\r\nimport collections\r\nimport math\r\nimport numpy as np\r\nimport time\r\n\r\nStep = collections.namedtuple('Step', ['reward', 'new_obs', 'p_continue'])\r\n\r\nclass CartPole(Env):\r\n\r\n # Initialize the environment.\r\n def __init__(self, verbose=False):\r\n self.gravity = 9.8\r\n self.masscart = 1.0\r\n self.masspole = 0.1\r\n self.total_mass = (self.masspole + self.masscart)\r\n self.length = 0.5 # actually half the pole's length\r\n self.polemass_length = (self.masspole * self.length)\r\n self.force_mag = 10.0\r\n self.tau = 0.02 # seconds between state updates\r\n\r\n # The probability of failed action, i.e. action 'left' moving cart to the\r\n # right and action 'right' moving cart to the left.\r\n self.p_opposite_direction = 0.1\r\n # Probability of no reward.\r\n self.p_no_reward = 0.25\r\n\r\n # Angle at which to fail the episode\r\n self.theta_threshold_radians = 12 * 2 * math.pi / 360\r\n self.x_threshold = 2.4\r\n self.steps_beyond_done = None\r\n\r\n # The action space.\r\n # The better the learning algorithm, the less you’ll have to try to\r\n # interpret these numbers yourself.\r\n self.action_space = [0, 1, 2]\r\n\r\n # The actions that move the cart have a cost\r\n self.move_cost = 0.1\r\n\r\n self.verbose = verbose\r\n\r\n\r\n # Let the environment's state evolve based on the chosen action and\r\n # observe the next state.\r\n def step(self, action):\r\n assert action in [0, 1, 2], \"%r (%s) invalid\"%(action, type(action))\r\n\r\n state = self.state\r\n x, x_dot, theta, theta_dot = state\r\n\r\n # With some probability an action moves the cart to the opposite\r\n # direction than the one it should.\r\n if np.random.random() < self.p_opposite_direction:\r\n force = -(action - 1) * self.force_mag\r\n else:\r\n force = (action - 1) * self.force_mag\r\n\r\n # Compute the next state.\r\n costheta = math.cos(theta)\r\n sintheta = math.sin(theta)\r\n temp = \\\r\n (force + self.polemass_length * theta_dot * theta_dot * sintheta) \\\r\n / self.total_mass\r\n thetaacc = \\\r\n (self.gravity * sintheta - costheta * temp) \\\r\n / (self.length *\r\n (4.0/3.0 - self.masspole * costheta * costheta / self.total_mass))\r\n xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass\r\n x = x + self.tau * x_dot\r\n x_dot = x_dot + self.tau * xacc\r\n theta = theta + self.tau * theta_dot\r\n theta_dot = theta_dot + self.tau * thetaacc\r\n\r\n self.state = (x, x_dot, theta, theta_dot)\r\n\r\n # Determine the probability of continuing.\r\n p_continue = x >= -self.x_threshold \\\r\n and x <= self.x_threshold \\\r\n and theta >= -self.theta_threshold_radians \\\r\n and theta <= self.theta_threshold_radians\r\n\r\n # Determine the reward.\r\n if p_continue == 1.0:\r\n reward = np.random.binomial(n=1, p=1-self.p_no_reward)\r\n elif self.steps_beyond_done is None:\r\n # Pole just fell!\r\n self.steps_beyond_done = 0\r\n reward = np.random.binomial(n=1, p=1-self.p_no_reward)\r\n else:\r\n if self.steps_beyond_done == 0:\r\n print(\"You are calling 'step()' even though this environment has\"\r\n \"already returned done = True. You should always call 'reset()'\"\r\n \"once you receive 'done = True' -- \"\r\n \"any further steps are undefined behavior.\")\r\n self.steps_beyond_done += 1\r\n reward = 0.0\r\n reward -= self.move_cost * np.abs(action - 1)\r\n\r\n # Return the observation.\r\n step = Step(reward, np.array(self.state), p_continue)\r\n if self.verbose:\r\n print(step)\r\n\r\n return step\r\n\r\n\r\n # Reset the state of the environment, e.g. at the beginning of a learning\r\n # episode.\r\n def reset(self):\r\n self.state = np.random.uniform(low=-0.05, high=0.05, size=(4,))\r\n self.steps_beyond_done = None\r\n return np.array(self.state)\r\n\r\n def max_per_period_reward(self):\r\n return 1.0\r\n\r\n # Utility method to render the cartpole given a list of state action pairs.\r\n def render(self, state_action_list, mode='human'):\r\n\r\n action_dict = {0: \"left\", 1: \"nothing\", 2: \"right\"}\r\n\r\n screen_width = 600\r\n screen_height = 400\r\n\r\n world_width = 5\r\n scale = screen_width/world_width\r\n carty = 150\r\n polewidth = 10.0\r\n polelen = scale * 1.0\r\n cartwidth = 50.0\r\n cartheight = 30.0\r\n\r\n viewer = rendering.Viewer(screen_width, screen_height)\r\n\r\n l,r,t,b = -cartwidth/2, cartwidth/2, cartheight/2, -cartheight/2\r\n axleoffset =cartheight/4.0\r\n cart = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])\r\n carttrans = rendering.Transform()\r\n cart.add_attr(carttrans)\r\n viewer.add_geom(cart)\r\n\r\n l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2\r\n pole = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])\r\n pole.set_color(.8,.6,.4)\r\n poletrans = rendering.Transform(translation=(0, axleoffset))\r\n pole.add_attr(poletrans)\r\n pole.add_attr(carttrans)\r\n viewer.add_geom(pole)\r\n\r\n axle = rendering.make_circle(polewidth/2)\r\n axle.add_attr(poletrans)\r\n axle.add_attr(carttrans)\r\n axle.set_color(.5,.5,.8)\r\n viewer.add_geom(axle)\r\n\r\n\r\n right_arrow_points = [(0, 0), (-2, 1), (-2, 0.5), (-6, 0.5),\r\n (-6, -0.5), (-2, -0.5), (-2, -1), (0, 0)]\r\n right_arrow_points = [(screen_width / 2 - cartwidth / 2 + 7.5 * x,\r\n carty + cartheight / 2 + 7.5 * y)\r\n for (x, y) in right_arrow_points]\r\n right_arrow = rendering.FilledPolygon(right_arrow_points)\r\n right_arrow.set_color(0, 0, 0)\r\n right_arrow_trans = rendering.Transform()\r\n right_arrow.add_attr(right_arrow_trans)\r\n\r\n\r\n left_arrow_points = [(0, 0), (2, 1), (2, 0.5), (6, 0.5),\r\n (6, -0.5), (2, -0.5), (2, -1), (0, 0)]\r\n left_arrow_points = [(screen_width / 2 + cartwidth / 2 + 7.5 * x,\r\n carty + cartheight / 2 + 7.5 * y)\r\n for (x, y) in left_arrow_points]\r\n left_arrow = rendering.FilledPolygon(left_arrow_points)\r\n left_arrow.set_color(0, 0, 0)\r\n left_arrow_trans = rendering.Transform()\r\n left_arrow.add_attr(left_arrow_trans)\r\n\r\n track = rendering.Line((-self.x_threshold*scale + screen_width/2.0,carty),\r\n (self.x_threshold*scale + screen_width/2.0,carty))\r\n track.set_color(0,0,0)\r\n viewer.add_geom(track)\r\n\r\n for state, action in state_action_list:\r\n x, x_dot, theta, theta_dot = state\r\n cartx = x*scale + screen_width/2.0 # MIDDLE OF CART\r\n carttrans.set_translation(cartx, carty)\r\n poletrans.set_rotation(-theta)\r\n if action_dict[action] == \"right\":\r\n right_arrow_trans.set_translation(x*scale, 0)\r\n viewer.add_onetime(right_arrow)\r\n if action_dict[action] == \"left\":\r\n left_arrow_trans.set_translation(x*scale, 0)\r\n viewer.add_onetime(left_arrow)\r\n\r\n viewer.render(return_rgb_array = mode=='rgb_array')\r\n time.sleep(0.1)\r\n\r\n viewer.close()\r\n\r\n","sub_path":"environments.py","file_name":"environments.py","file_ext":"py","file_size_in_byte":7094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"268047173","text":"from ergstrap import app\nfrom flask import json, request\nfrom ergstrap.database import db, Workout\nimport datetime\n\n@app.route('/', methods=['GET', 'POST'])\ndef workouts():\n if request.method=='POST':\n new_workout = record_workout()\n\n workouts = []\n for workout in get_workouts():\n workouts.append({'id': workout.id, 'date': workout.date, 'notes': workout.notes})\n\n return json.jsonify(workouts=workouts)\n\ndef get_workouts():\n return Workout.query.all()\n\ndef record_workout():\n new_workout = Workout(date=datetime.datetime.strptime(request.form['date'], '%Y-%m-%d %H:%M:%S'), notes=request.form['notes'])\n db.session.add(new_workout)\n db.session.commit()\n","sub_path":"web/ergstrap/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"641003633","text":"import argparse\n\nfrom stock.verify_trade import VerifyTrade\n\n# トレードクラスの検証\n# 内部パラメータを少しずつ変えながらそれぞれがどの程度パフォーマンスが出るか検証する\n# また、検証結果の表示も行う\n#\n# python verify_ope.py -r view_list # 全てのジョブの検証設定ファイルの内容と最終結果を表示する\n# python verify_ope.py -r view_trade -j 2019_0919_1933_21 -p 3 # ジョブ2019_0919_1933_21のindex3のトレード結果を表示する\n# python verify_ope.py -r view_brand -j 2019_0919_1933_21 -p 3 # ジョブ2019_0919_1933_21のindex3のトレード結果(銘柄毎に集計された結果)を表示する\n# python verify_ope.py -r view_execution -j 2019_0919_1933_21 # ジョブ2019_0919_1933_21の実行パラメータファイルの内容表示\n# python verify_ope.py -r view_original_verification -v ./data/statistics/verification_config.txt # 検証設定ファイル ./data/statistics/verification_config.txtの内容表示\n# python verify_ope.py -r execute -v ./data/statistics/verification_config.txt # ./data/statistics/verification_config.txtの内容を元に検証を実行\n#\ndef parse_args():\n parser = argparse.ArgumentParser(description='Verify Trade Execute')\n parser.add_argument('--run_type', '-r', type=str, default='view_list', choices=[\n 'view_trade', 'view_brand', 'view_list', 'view_execution', 'view_original_verification', 'execute'], help='実行タイプを指定')\n parser.add_argument('--verification_file', '-v', type=str, default='', help='実行する(表示する)検証設定ファイルのパスを指定')\n parser.add_argument('--job_name', '-j', type=str, default='', help='表示するジョブ名を指定')\n parser.add_argument('--param_index', '-p', type=int, default=0, help='表示するトレード結果のパラメータインデックス')\n args = parser.parse_args()\n return args\n\ndef main():\n args = parse_args()\n\n vt = VerifyTrade()\n # トレード結果(銘柄毎に集計された情報)を表示\n if args.run_type == 'view_trade':\n verification_file = vt.get_verification_file(args.job_name)\n vt.view_verification_data(verification_file, param_index=args.param_index)\n vt.view_trade_result(args.job_name, args.param_index, is_brand=False)\n\n # トレード結果(銘柄毎に集計された情報)を表示\n elif args.run_type == 'view_brand':\n verification_file = vt.get_verification_file(args.job_name)\n vt.view_verification_data(verification_file, param_index=args.param_index)\n vt.view_trade_result(args.job_name, args.param_index, is_brand=True)\n\n # 検証設定ファイルの内容と対応する最終結果のリスト表示\n elif args.run_type == 'view_list':\n job_list = vt.get_job_list()\n for j in job_list:\n print('[{0}] ==========================================='.format(j))\n verification_file = vt.get_verification_file(j)\n vt.view_verification_data(verification_file)\n vt.view_strategy_result(j)\n print('')\n\n # 実行パラメータファイルの内容表示\n elif args.run_type == 'view_execution':\n vt.view_execution_data(args.job_name)\n\n # オリジナルの(jobに依存してない)検証設定ファイルの内容表示\n elif args.run_type == 'view_original_verification':\n vt.view_verification_data(args.verification_file)\n\n # 検証を実行\n elif args.run_type == 'execute':\n vt.view_verification_data(args.verification_file)\n job_name = vt.create_job(args.verification_file)\n print('ジョブ名: {0}'.format(job_name))\n vt.execute(job_name, is_saved=True)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"finance/verify_ope.py","file_name":"verify_ope.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"40878793","text":"from tensorflow.keras.layers import (\n Activation,\n Add,\n BatchNormalization,\n Conv2D,\n)\n\n# https://github.com/tensorflow/tensorflow/blob/2b96f3662bd776e277f86997659e61046b56c315/tensorflow/python/keras/applications/resnet.py#L32\nBASE_WEIGHTS_PATH = (\n \"https://storage.googleapis.com/tensorflow/keras-applications/resnet/\"\n)\nWEIGHTS_HASHES = {\n \"resnet50\": \"4d473c1dd8becc155b73f8504c6f6626\",\n}\n\n\n# https://github.com/tensorflow/tensorflow/blob/2b96f3662bd776e277f86997659e61046b56c315/tensorflow/python/keras/applications/resnet.py#L262\ndef stack1(x, filters, blocks, stride1=2, name=None):\n \"\"\"\n A set of stacked residual blocks.\n Arguments:\n x: input tensor.\n filters: integer, filters of the bottleneck layer in a block.\n blocks: integer, blocks in the stacked blocks.\n stride1: default 2, stride of the first layer in the first block.\n name: string, stack label.\n Returns:\n Output tensor for the stacked blocks.\n \"\"\"\n x = block1(x, filters, stride=stride1, name=name + \"_block1\")\n for i in range(2, blocks + 1):\n x = block1(x, filters, conv_shortcut=False, name=name + \"_block\" + str(i))\n return x\n\n\n# https://github.com/tensorflow/tensorflow/blob/2b96f3662bd776e277f86997659e61046b56c315/tensorflow/python/keras/applications/resnet.py#L217\ndef block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):\n \"\"\"\n A residual block.\n Arguments:\n x: input tensor.\n filters: integer, filters of the bottleneck layer.\n kernel_size: default 3, kernel size of the bottleneck layer.\n stride: default 1, stride of the first layer.\n conv_shortcut: default True, use convolution shortcut if True,\n otherwise identity shortcut.\n name: string, block label.\n Returns:\n Output tensor for the residual block.\n \"\"\"\n # channels_last format\n bn_axis = 3\n\n if conv_shortcut:\n shortcut = Conv2D(4 * filters, 1, strides=stride, name=name + \"_0_conv\")(x)\n shortcut = BatchNormalization(\n axis=bn_axis, epsilon=1.001e-5, name=name + \"_0_bn\",\n )(shortcut)\n else:\n shortcut = x\n\n x = Conv2D(filters, 1, strides=stride, name=name + \"_1_conv\")(x)\n x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + \"_1_bn\")(x)\n x = Activation(\"relu\", name=name + \"_1_relu\")(x)\n\n x = Conv2D(filters, kernel_size, padding=\"SAME\", name=name + \"_2_conv\")(x)\n x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + \"_2_bn\")(x)\n x = Activation(\"relu\", name=name + \"_2_relu\")(x)\n\n x = Conv2D(4 * filters, 1, name=name + \"_3_conv\")(x)\n x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + \"_3_bn\")(x)\n\n x = Add(name=name + \"_add\")([shortcut, x])\n x = Activation(\"relu\", name=name + \"_out\")(x)\n return x\n","sub_path":"tf2_keras_fcn_from_classification_utils.py","file_name":"tf2_keras_fcn_from_classification_utils.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"631649439","text":"import numpy as np\nimport mpl_toolkits.mplot3d.axes3d as p3\nimport pylab as plt\n# if you wish to make some marvelous movies with matplotlib, run these commands\nplt.rcParams['animation.ffmpeg_path'] = 'C:\\\\FFmpeg\\\\bin\\\\ffmpeg.exe'\n# Achtung: the above statement is needed to save the movies, therefore you must first install FFMpeg (see WikiHow)\n# But if you only want to see the orbit animations, this suffices:\nimport matplotlib.animation as animation\n# When using Spyder, make sure you go to Tools > Preferences > IPython console > Graphics, and set 'Backend' to 'Automatic'\n#################################################\n#datafile='C:\\\\Users\\\\Gebruiker\\\\source\\\\repos\\\\NBodyProblem\\\\NBodyProblem\\\\output\\\\'\nsituatie, tijdsstap,='2b','var' \nprojectievlak='xy'\n#vertalen naar 'scheun' Engels\nsituatiedict={'cirkel': '7 deeltjes op een cirkel', 'zs1': 'Zonnestelsel', '2b': 'Twee-lichamen systeem', 'Burrau': 'Burrauprobleem'}\ntijdsdict={'var': 'variabele', 'vast': 'vaste', 'adap': 'adaptieve'}\ndatafile=situatie\nplotparameters=situatie+'_params'\ndatafile+='_{}_'\ndatafile+=tijdsstap\nmethodes=['PEFRL']#[input('Gebruikte methode (RK4/FR/Verlet/ERK/PEFRL): ')] \nSplots= 'Yes' #input('Alles op 1 figuur? (Yes/No)') \nlabels= ['deeltje 1', 'deeltje 2'] #[input(' Naam van de hemellichamen (in zelfde volgorde als inleesbestand, e.g,. [\\'Zon\\', \\'Mercurius\\'])?')]\nif methodes == 'All': methodes=['ERK', 'Verlet', 'PEFRL', 'RK4', 'FR']\n# uncomment this, for the solar system\n'''\nplanetsizes= [1,0.09,0.24,0.5,0.27,2.74,2.29] \nplanetcolors= [(252,212,64),(213,210,209),(139,125,130), (107,147,214), (193,68,14), (180,92,61), (52,62,71)] \nplanetlabels=['Sun', 'Mercury', 'Venus', 'Earth', 'Mars', 'Jupiter', 'Saturnus', 'Pluto']\n3labels=['deeltje 1', 'deeltje 2', 'deeltje 3']\nplanetcolors=[(color[0]/255,color[1]/255,color[2]/255) for color in planetcolors] #andere normering van rgb ......\nsizes=[i*2 for i in sizes]\n'''\n\n# make the figure with the results\n#\n#\n#################################\n# set a bunch of plot parameters#\n#################################\nparams = {'legend.fontsize' : 5,\n 'font.sans-serif' : 'Arial', \n# 'legend.linewidth': 2,\n 'axes.linewidth' : 0.8,\n 'axes.labelsize' : 6,\n 'axes.labelpad' : 1,\n 'lines.linewidth' : 0.75, \n 'xtick.major.width' : 2,\n 'xtick.major.size' : 3,\n 'ytick.major.width' : 2,\n 'ytick.major.size' : 2,\n 'xtick.major.pad' : 4,\n 'xtick.minor.width' : 2,\n 'xtick.minor.size' : 2,\n 'ytick.minor.width' : 2,\n 'ytick.minor.size' : 2,\n 'xtick.labelsize' : 5,\n 'ytick.labelsize' : 5,\n 'legend.numpoints' : 1, \n 'font.size' : 15,\n 'text.usetex' : True ,\n 'text.latex.unicode' : False }\nplt.rcParams.update(params)\nplt.rc(\"font\",family=\"serif\",weight='normal', size=8)\n# at this point parameters for the figures are set\n\nparameters=open(plotparameters+'.txt', 'r').readlines()\nhbase, dmax=parameters[0].split(' = ')[1],parameters[3].split(' = ')[1]\ndeltamax, deltamin=parameters[4].split(' = ')[1],parameters[5].split(' = ')[1]\n\nfor methode in methodes:\n \n data = np.loadtxt(datafile.format(methode)+'.txt')\n \n #clear figure\n if Splots=='Yes':\n fig=plt.figure(figsize=(4,3), dpi=300)\n fig.suptitle(situatiedict[situatie] + ': ' + methode + ', '+ tijdsdict[tijdsstap] + ' tijdstap', fontsize=9)\n ax_2D, ax_EF = fig.add_subplot(221), fig.add_subplot(222)\n ax_3D, ax_d = fig.add_subplot(223, projection='3d'), fig.add_subplot(224)\n fig_2D, fig_3D, fig_EF, fig_d = fig, fig, fig, fig\n left = 0.125 # the left side of the subplots of the figure\n right = 0.95 # the right side of the subplots of the figure\n bottom = 0.1 # the bottom of the subplots of the figure\n top = 0.85 # the top of the subplots of the figure\n wspace = 0.4 # the amount of width reserved for blank space between subplots\n hspace = 0.4 # the amount of height reserved for white space between subplots\n plt.subplots_adjust(left, bottom, right, top, wspace, hspace)\n\n\n else:\n fig_2D, ax_2D= plt.subplots(figsize=(4,3), dpi=300)\n #ax_2D.set_xlim(-2,2), ax_2D.set_ylim(-2,2)\n fig_3D = plt.figure(dpi=20, facecolor='black')\n ax_3D=fig_3D.add_subplot(111,projection='3d')\n #ax_3D.set_xlim(-2,2), ax_3D.set_ylim(-2,2), ax_3D.set_zlim(-2,2)\n fig_EF, ax_EF= plt.subplots()\n #ax_EF.set_xlim(-2,2), ax_EF.set_ylim(-2,2)\n fig_d, ax_d= plt.subplots()\n \n # Initialise 2D (xy) plot van orbits voor file die alle data bevat\n ''' \n to be done!\n if projectievlak == 'xy':\n ax_2D.set_xlabel(r'$x$')\n ax_2D.set_ylabel(r'$y$')\n a = 1\n b = 2\n elif projectievlak == 'xz':\n ax_2D.set_xlabel(r'$x$')\n ax_2D.set_ylabel(r'$z$')\n a = 1\n b = 3\n elif projectievlak == 'yz':\n ax_2D.set_xlabel(r'$y$')\n ax_2D.set_ylabel(r'$z$')\n a = 2\n b = 3\n '''\n ax_2D.set_title('2D banen', fontsize=5)\n ax_2D.set_xlabel(r'$x$'), ax_2D.set_ylabel(r'$y$')\n ax_2D.axis('equal'), ax_2D.grid(True)\n \n # Initialise 3D (xyz) plot van orbits voor file die alle data bevat\n ax_3D.set_title('3D banen', fontsize=5)\n ax_3D.set_xlabel(r'$x$',labelpad=-1),ax_3D.set_ylabel(r'$y$' ,labelpad=-1),ax_3D.set_zlabel(r'$z$' ,labelpad=-1)\n ax_3D.grid(True) #ax_3D.set_axis_off()\n ax_3D.tick_params(pad=-1)\n\n \n # Initialise plot van de energiefout in functie van de tijd voor een file die alle data bevat\n ax_EF.set_title('Relatieve energiefout', fontsize=5)\n ax_EF.set_xlabel(r'$t$'), ax_EF.set_ylabel(r'$\\Delta E$')\n ax_EF.grid(True), ax_EF.set_yscale('log')\n \n # Initialise plot van de energie in functie van de tijd voor een file die alle data bevat\n ax_d.set_title('Dichtste nadering', fontsize=5)\n ax_d.set_xlabel(r'$t$'), ax_d.set_ylabel(r'$d_{min}$')\n ax_d.grid(True)\n \n # Initialise figures\n N = (len(data[0,:]) - 4) // 6 #tijd, energie en energiefout staan er ook in\n lines_EF, = ax_EF.plot(data[1:,0], data[1:,-2])\n lines_d, = ax_d.plot(data[:,0], data[:,-1])\n lines_2D, lines_3D = [0] * N, [0] * N\n for j in range(N):\n x,y,z=data[:,6 * j + 1], data[:,6 * j + 2], data[:,6 * j + 3]\n lines_2D[j], = ax_2D.plot(x,y, '-', label=labels[j])\n lines_3D[j], = ax_3D.plot(x,y,z,'-o', marker='o', label=labels[j], markersize=0.8)\n ax_2D.legend(), ax_3D.legend()\n #assen gelijk zetten\n xlim, ylim, zlim = ax_3D.get_xlim(), ax_3D.get_ylim(), ax_3D.get_zlim()\n lim = (min(xlim[0], ylim[0], zlim[0]), max(xlim[1], ylim[1], zlim[1]))\n ax_3D.set_xlim(lim), ax_3D.set_ylim(lim), ax_3D.set_zlim(lim)\n \n #houd parameters bij\n plt.annotate(r\"$h=${{}}\\\\n$d_max$={{}}\\\\n$\\Delta max$={{}}\\\\n$\\Delta min$={{}}\".format(hbase,dmax,deltamax,deltamin), \n xy = (-1.50, 0.68), xycoords = 'axes fraction', fontsize=4,\n bbox= dict(boxstyle=\"round\", alpha=0.05,lw=0.2))\n #the animation function updates plot on each timestep (i = frame counter)\n def animate_2D(i,lines_2D): \n for j in range(N):\n x,y=data[:i,6 * j + 1], data[:i,6 * j + 2]\n lines_2D[j].set_data(x,y)\n return lines_2D\n def animate_3D(i, lines_3D):\n for j in range(N):\n x,y,z=data[i,6 * j + 1], data[i,6 * j + 2],data[i,6 * j + 3]\n lines_3D[j].set_data(x,y)\n lines_3D[j].set_3d_properties(z) # NOTE: there is no .set_data() for 3 dim data\n return lines_3D\n def animate_EF(i, lines_EF):\n lines_EF.set_data(data[1:i,0], data[1:i,-2])\n return lines_EF,\n def animate_d(i, lines_d):\n lines_d.set_data(data[:i,0], data[:i, -1])\n return lines_d,\n #############################\n #make the movie\n # Needed? ... figure, functie waar over ge for loopt wordt, loop variabelen\n # 'blit': only update parts of the frame which have changed (for smoother animations)\n \n # interval': draw new frame every 'interval' ms\n # 'frames': number of frames to draw\n \n #############################\n orbit_2D = animation.FuncAnimation(fig_2D, animate_2D,frames=np.arange(0,len(data),10000),fargs=[lines_2D], interval=10, blit=True)\n orbit_3D = animation.FuncAnimation(fig_3D, animate_3D,frames=np.arange(0,len(data),10000),fargs=[lines_3D], interval=100, blit=True)\n orbit_EF = animation.FuncAnimation(fig_EF, animate_EF,frames=np.arange(0,len(data),10000), fargs=[lines_EF], interval=100, blit=True)\n orbit_d = animation.FuncAnimation(fig_d, animate_d,frames=np.arange(0,len(data),10000),fargs=[lines_d], interval=100, blit=True)\n\n #FFwriter=animation.FFMpegWriter(fps=30, extra_args=['-vcodec', 'libx264'])#bitrate -1 chooses the best\n #orbit_2D.save(datafile.format(methode) + '_2D.mp4', extra_anim=[orbit_3D,orbit_EF, orbit_E], writer=FFwriter, dpi=300)\n #orbit_3D.save(datafile.format(methode) + '_3D.mp4', writer=animation.FFMpegWriter())\n #if you want to save all the subplots at once, do this:\n orbit_2D.save(datafile.format(methode) + '_subplots.mov', extra_anim=[orbit_3D,orbit_EF, orbit_d], writer=animation.FFMpegWriter())\n \n \n plt.pause(0.001*len(data))\n #plt.close()\n plt.show(block=True)\n #plt.close(fig_2D), plt.close(fig_3D), plt.close(fig_E), plt.close(fig_EF)\n \n \n \n \n ##############################\n #label the planets and give them fancy colors -> TO DO\n #marker='.', #color=color[j], ms=size[j],\n #label=label[j])\n ##############################\n","sub_path":"output/Plotten_Animation.py","file_name":"Plotten_Animation.py","file_ext":"py","file_size_in_byte":9742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"382672674","text":"from lib.PipeUtil import load_json_file, save_json_file, cfe, bound_cnt, convert_filename_to_date_cam\nfrom lib.PipeAutoCal import get_image_stars, get_catalog_stars , pair_stars, eval_cnt, update_center_radec, fn_dir\nfrom lib.PipeDetect import fireball, apply_frame_deletes, find_object, analyze_object, make_base_meteor_json, fireball_fill_frame_data, calib_image, apply_calib, grid_intensity_center, make_roi_video_mfd\nfrom lib.PipeVideo import ffprobe, load_frames_fast\n\nimport os\nimport cv2\n #ny = int(int(y) / hdm_y)\nfrom FlaskLib.FlaskUtils import parse_jsid\nimport glob\nimport numpy as np\n#def vid_to_frames(vid, out_dir, suffix, ow, oh ):\n #/usr/bin/ffmpeg -i /mnt/ams2/meteors/2020_10_18/2020_10_18_10_28_12_000_010006-trim-0501.mp4 -vf 'scale=960:640' /mnt/ams2/CACHE/2020/10/2020_10_18_10_28_12_000_010006-trim-0501/2020_10_18_10_28_12_000_010006-trim-0501-half-%04d.jpg > /dev/null 2>&1\n\n\ndef crop_video(in_file, x,y,w,h):\n json_conf = load_json_file(\"../conf/as6.json\")\n in_file = \"/mnt/ams2\" + in_file\n sf = in_file.replace(\".mp4\", \"-stacked.jpg\")\n print (\"STACK:\", sf)\n #hd_frames,hd_color_frames,subframes,sum_vals,max_vals,pos_vals = load_frames_fast(out_file, json_conf, 0, 0, 1, 1,[])\n vw,vh,frames = ffprobe(in_file)\n vw,vh = int(vw),int(vh)\n hdm_x = 960 / int(vw)\n hdm_y = 540/ int(vh)\n nx = int(int(x) / hdm_x)\n ny = int(int(y) / hdm_y)\n nw = int(int(w) )\n nh = int(int(h) )\n print(\"ORG XY:\", x,y)\n print(\"ORG WH:\", w,h)\n print(\"V WH:\", vw, vh )\n print(\"HDMXY:\", hdm_x, hdm_y)\n print(\"NEW XY:\", nx,ny)\n print(\"NEW WH:\", nw,nh)\n\n if cfe(sf) == 1:\n print(\"SF\", sf)\n img = cv2.imread(sf)\n cv2.rectangle(img, (int(nx), int(ny)), (int(nx+nw) , int(ny+nh) ), (255, 255, 255), 1)\n nf = sf.replace(\".jpg\", \"-test.jpg\")\n print(\"saved\", nf)\n #cv2.imwrite(nf, img)\n #cv2.imshow('pepe', img)\n\n out_file = in_file.replace(\".mp4\", \"-crop.mp4\")\n cmd = \"./FFF.py crop_video \" + in_file + \" \" + out_file + \" \" + str(nx) + \",\" + str(ny) + \",\" + str(nw) + \",\" + str(nh)\n os.system(cmd)\n cv2.waitKey(0)\n if cfe(out_file) == 0:\n # crop failed. \n resp['status'] = 0\n return resp\n else:\n # crop worked, lets load the crop frames and try to auto detect inside here\n resp = {}\n objects = {}\n resp['status'] = 1\n hd_frames,hd_color_frames,subframes,sum_vals,max_vals,pos_vals = load_frames_fast(out_file, json_conf, 0, 0, 1, 1,[])\n fn = 0\n mean_val = np.mean(sum_vals[0:10])\n if mean_val < 50:\n mean_val = 50\n for val in sum_vals:\n print(\"FN:\", fn, mean_val, val,max_vals[fn])\n if val >= mean_val * 2 and max_vals[fn] > 10: \n x,y = pos_vals[fn]\n x = int(x)\n y = int(y)\n rx1,ry1,rx2,ry2 = bound_cnt(x,y,vw,vh, 10)\n roi_img = hd_frames[fn][ry1:ry2,rx1:rx2]\n adj_x, adj_y = grid_intensity_center(roi_img, 20, fn)\n x = x + adj_x + nx\n y = y + adj_y + ny\n\n object, objects = find_object(objects, fn,x-5, y-5, 10, 10, sum_vals[fn], 0, 0, None )\n objects[object] = analyze_object(objects[object], 1, 1)\n\n print( object, fn, val, pos_vals[fn])\n\n fn += 1\n\n meteors = []\n for object in objects:\n if objects[object]['report']['meteor'] == 1:\n print(\"METEOR:\",objects[object])\n meteors.append(objects[object])\n\n if len(meteors) == 0 and len(objects) == 1:\n for obj in objects:\n meteors.append(objects[obj])\n\n if len(meteors) >= 2:\n merge_x = []\n merge_y = []\n merge_w = []\n merge_h = []\n merge_int = []\n most_frames_obj = 0\n most_frames = 0\n for meteor in meteors:\n ff = len(meteor['ofns'])\n if ff > most_frames:\n most_frames = ff\n bm = meteor\n meteors = []\n meteors.append(bm)\n \n\n if len(meteors) == 1:\n # the auto detect worked, resave the json file and make a reduced.json, then make the crop frames cache files \n\n jsf = in_file.replace(\".mp4\", \".json\")\n best_meteor = meteors[0]\n o_frames,o_color_frames,o_subframes,o_sum_vals,o_max_vals,o_pos_vals = load_frames_fast(in_file, json_conf, 0, 0, 1, 1,[])\n best_meteor, frame_data = fireball_fill_frame_data(in_file,best_meteor, o_frames)\n if cfe(jsf) == 1:\n mj = load_json_file(jsf)\n if \"hd_trim\" in mj:\n hd_trim = mj['hd_trim']\n else:\n hd_trim = None\n if \"cp\" in \"mj\":\n cp = mj['cp']\n else:\n mj = None\n hd_trim = None\n cp = None \n hd_img = cv2.resize(o_frames[0],(1920,1080))\n cp = calib_image(in_file, hd_img, json_conf)\n if cp is not None:\n best_meteor = apply_calib(in_file, best_meteor, cp, json_conf)\n\n base_js, base_jsr = make_base_meteor_json(in_file, hd_trim,best_meteor)\n if \"user_mods\" in mj:\n base_js['user_mods'] = mj['user_mods']\n\n jsfr = jsf.replace(\".json\", \"-reduced.json\")\n base_jsr['cal_params'] = cp\n base_js['cp'] = cp\n base_js['best_meteor'] = best_meteor\n save_json_file(jsf, base_js)\n save_json_file(jsfr, base_jsr)\n \n cmd = \"./Process.py roi_mfd \" + in_file + \" >/mnt/ams2/tmp/api.points 2>&1\"\n print(cmd)\n os.system(cmd)\n\n\n return(resp)\n\ndef delete_frame(meteor_file, fn):\n resp = {}\n date = meteor_file[0:10]\n meteor_dir = \"/mnt/ams2/meteors/\" + date + \"/\"\n if \"json\" in meteor_file:\n meteor_vid = meteor_file.replace(\".json\", \".mp4\")\n jsf = meteor_dir + meteor_file\n else:\n jf = meteor_file.replace(\".mp4\", \".json\")\n jsf = meteor_dir + jf \n meteor_vid = meteor_file\n mj = load_json_file(jsf)\n jsrf = jsf.replace(\".json\", \"-reduced.json\")\n if \"user_mods\" not in mj:\n mj['user_mods'] = {}\n mj['user_mods']['user_stars'] = []\n mj['user_mods']['frames'] = {}\n mj['user_mods']['del_frames'] = []\n else:\n if \"del_frames\" not in mj['user_mods']:\n mj['user_mods']['del_frames'] = []\n mj['user_mods']['del_frames'].append(fn)\n resp = {}\n resp['status'] = 1\n resp['msg'] = \"frame deleted.\"\n\n if \"best_meteor\" in mj:\n print(\"BEST METEOR EXISTS IN MJ\")\n if \"cp\" in mj['best_meteor']:\n print(\"CP EXISTS IN BEST METEOR\")\n mj['cal_params'] = mj['best_meteor']['cp']\n del(mj['best_meteor']['cp'])\n\n \n print(\"MJCAL:\", mj['cp'])\n mj,mjr = apply_frame_deletes(jsf,mj,None,None)\n save_json_file(jsf, mj)\n save_json_file(jsrf, mjr)\n return(resp)\n\n\ndef reduce_meteor(meteor_file):\n resp = {}\n date = meteor_file[0:10]\n meteor_dir = \"/mnt/ams2/meteors/\" + date + \"/\"\n if \"json\" in meteor_file:\n meteor_vid = meteor_file.replace(\".json\", \".mp4\")\n else:\n meteor_vid = meteor_file\n \n cmd = \"./Process.py fireball \" + meteor_dir + meteor_vid + \" > /mnt/ams2/trash/fb.txt 2>&1\"\n print(cmd)\n os.system(cmd)\n resp['msg'] = \"reduced.\"\n resp['status'] = 1 \n resp['sd_meteor_frame_data'] = []\n return resp\n\n\ndef restore_meteor(jsid, data):\n resp = {}\n json_conf = load_json_file(\"../conf/as6.json\")\n amsid = json_conf['site']['ams_id']\n video_file = jsid + \".mp4\"\n json_file = video_file.replace(\".mp4\", \".json\")\n sd_root = jsid\n day = jsid[0:10]\n trash_dir = \"/mnt/ams2/trash/\" + day + \"/\" \n meteor_dir = \"/mnt/ams2/meteors/\" + day + \"/\" \n mj = load_json_file(\"/mnt/ams2/trash/\" + day + \"/\" + json_file)\n if \"hd_trim\" in mj:\n hd_root, hd_dir = fn_dir(mj['hd_trim'])\n hd_root = hd_root.replace(\".mp4\", \"\")\n hd_cmd = \"mv \" + trash_dir + hd_root + \"* \" + meteor_dir\n os.system(hd_cmd)\n print(hd_cmd)\n sd_cmd = \"mv \" + trash_dir + sd_root + \"* \" + meteor_dir\n os.system(sd_cmd)\n print(sd_cmd)\n\n\n print(\"RESTORE:\", video_file, json_file)\n return(\"OK\" + sd_root + \" \" + hd_root)\n\ndef delete_meteor(jsid, data):\n resp = {}\n json_conf = load_json_file(\"../conf/as6.json\")\n amsid = json_conf['site']['ams_id']\n video_file = parse_jsid(jsid)\n json_file = video_file.replace(\".mp4\", \".json\")\n trash_file = json_file.replace(\".json\", \".trash\")\n print(\"VID:\", video_file)\n resp['msg'] = \"deleted.\"\n delete_log = \"/mnt/ams2/SD/proc2/json/\" + amsid + \".del\"\n if cfe(delete_log) == 1:\n try:\n del_data = load_json_file(delete_log)\n except:\n del_data = {}\n else:\n del_data = {}\n fn, dir = fn_dir(video_file)\n el = fn.split(\".\")\n base = el[0]\n del_data[base] = 1\n\n save_json_file(delete_log, del_data)\n os.system(\"mv \" + json_file + \" \" + trash_file)\n\n return resp\n\ndef delete_meteors(data):\n resp = {}\n json_conf = load_json_file(\"../conf/as6.json\")\n amsid = json_conf['site']['ams_id']\n detections = data['detections'].split(\";\")\n delete_log = \"/mnt/ams2/SD/proc2/json/\" + amsid + \".del\"\n if cfe(delete_log) == 1:\n try:\n del_data = load_json_file(delete_log)\n except:\n del_data = {}\n os.system(\"rm \" + delete_log)\n else:\n del_data = {} \n for det in detections:\n if len(det) < 5:\n continue\n video_file = parse_jsid(det)\n fn, dir = fn_dir(video_file)\n el = fn.split(\".\")\n base = el[0]\n del_data[base] = 1\n \n save_json_file(delete_log, del_data) \n resp['msg'] = \"deleted multi.\"\n return resp\n\ndef show_cat_stars (video_file, hd_stack_file, points):\n (f_datetime, cam, f_date_str,fy,fmin,fd, fh, fm, fs) = convert_filename_to_date_cam(video_file)\n json_conf = load_json_file(\"../conf/as6.json\")\n STATION_ID = json_conf['site']['ams_id']\n cp = None\n if \"meteors\" in video_file:\n app_type = \"meteor\"\n else:\n app_type = \"calib\"\n\n if app_type == \"meteor\":\n mjf = \"/mnt/ams2/\" + video_file.replace(\".mp4\", \".json\")\n mjrf = \"/mnt/ams2/\" + video_file.replace(\".mp4\", \"-reduced.json\")\n mj = load_json_file(mjf)\n mjr = load_json_file(mjrf)\n if \"cp\" in mj:\n cp = mj['cp']\n elif \"best_meteor\" in mj:\n if \"cp\" in mj['best_meteor']:\n mj['cp'] = mj['best_meteor']['cp']\n cp = mj['cp']\n elif \"cal_params\" in mjr:\n cp = mjr['cal_params']\n\n cp = update_center_radec(video_file,cp,json_conf)\n print(cp['center_az'])\n print(cp['center_el'])\n print(cp['ra_center'])\n print(cp['dec_center'])\n print(cp['position_angle'])\n print(cp['pixscale'])\n mcp_file = \"/mnt/ams2/cal/\" + \"multi_poly-\" + STATION_ID + \"-\" + cam + \".info\" \n print(\"MCP:\", mcp_file)\n if cfe(mcp_file) == 1:\n mcp = load_json_file(mcp_file)\n cp['x_poly'] = mcp['x_poly']\n cp['y_poly'] = mcp['y_poly']\n cp['x_poly_fwd'] = mcp['x_poly_fwd']\n cp['y_poly_fwd'] = mcp['y_poly_fwd']\n print(cp['x_poly'])\n if \"hd_stack\" in mj:\n hd_img = cv2.imread(mj['hd_stack'], 0)\n print(\"HD IMG:\", hd_img.shape)\n if \"short_bright_stars\" in cp:\n del (cp['short_bright_stars'])\n else:\n cal_r = video_file.replace(\"-half-stack.png\", \"\")\n cal_root = \"/mnt/ams2\" + cal_r \n cps = glob.glob(cal_root + \"*calparams.json\")\n sfs = glob.glob(cal_root + \"*stacked.png\")\n if len(sfs) == 0:\n ttt = cal_root + \".png\"\n if cfe(ttt) == 1:\n sfs.append(ttt)\n else:\n return(\"Problem can't find cal file\")\n print(\"GLOB:\" + cal_root + \"*stacked.png\")\n stack_file = sfs[0]\n cpf = cps[0]\n cp = load_json_file(cpf)\n hd_img = cv2.imread(stack_file, 0)\n\n if cp is None:\n resp = {\n \"status\" : 0\n }\n return(resp)\n\n pts = points.split(\"|\")\n user_stars = []\n for pt in pts:\n ddd = pt.split(\",\")\n if len(ddd) != 2:\n continue\n sx, sy = pt.split(\",\")\n sx = int(float(sx)) + 5\n sy = int(float(sy)) + 5\n sx = int(float(sx)) * 2\n sy = int(float(sy)) * 2\n rx1,ry1,rx2,ry2 = bound_cnt(sx,sy,hd_img.shape[1],hd_img.shape[0], 10)\n cnt_img = hd_img[ry1:ry2, rx1:rx2]\n #cv2.imwrite(\"/mnt/ams2/test.jpg\", cnt_img)\n min_val, max_val, min_loc, (mx,my)= cv2.minMaxLoc(cnt_img)\n # subtract away 1/2 shape for center pos starting point\n #mx = mx - 5\n #my = my - 5\n #mx = 0\n #my = 0\n grid_val = max_val - 25\n #max_px, avg_px, px_diff,max_loc,grid_int = eval_cnt(cnt_img, grid_val)\n #nsx = rx1 + max_loc[0]\n #nsy = ry1 + max_loc[1]\n nsx = rx1 + mx\n nsy = ry1 + my\n #print(\"CLOSE IMAGE STAR LOCATION:\", sx, sy, nsx, nsy, mx, my)\n user_stars.append((nsx,nsy,999))\n\n cp['user_stars'] = user_stars\n cp = pair_stars(cp, video_file, json_conf, hd_img)\n print(\"USER STARS:\", len(user_stars))\n print(\"PAIRED STARS:\", len(cp['cat_image_stars']))\n resp = {}\n\n if app_type == \"meteor\":\n if \"user_mods\" not in mj:\n mj['user_mods'] = {}\n if \"user_stars\" not in mj['user_mods']:\n mj['user_mods']['user_stars'] = user_stars\n else:\n mj['user_mods']['user_stars'] = user_stars\n mj['cp'] = cp\n mjr['cal_params'] = cp\n save_json_file(mjf, mj)\n save_json_file(mjrf, mjr)\n resp['crop_box'] = mjr['crop_box']\n else:\n resp['crop_box'] = [0,0,0,0]\n if \"user_mods\" not in cp:\n cp['user_mods'] = {}\n cp['user_mods']['user_stars'] = cp['user_stars']\n save_json_file(cpf, cp)\n print(\"SAVED CALPARAMS IN:\", cpf)\n\n\n resp['msg'] = \"good\"\n resp['status'] = 1\n resp['cp'] = cp\n return(resp)\n\n\ndef update_meteor_points(sd_video_file,frames):\n json_conf = load_json_file(\"../conf/as6.json\")\n json_file = \"/mnt/ams2/\" + sd_video_file.replace(\".mp4\", \".json\")\n full_vid = \"/mnt/ams2/\" + sd_video_file\n print(\"FV:\", full_vid)\n print(\"JS:\", json_file)\n\n rjson_file = json_file.replace(\".json\", \"-reduced.json\")\n \n mj = load_json_file(json_file)\n print(\"MJ LOADED:\", mj)\n if \"user_mods\" in mj:\n user_mods = mj['user_mods']\n else:\n user_mods = {}\n if \"frames\" not in user_mods:\n user_mods['frames'] = {}\n for row in frames:\n \n fn = row['fn']\n x = row['x']\n y = row['y']\n user_mods['frames'][fn] = [x,y]\n mj['user_mods'] = user_mods\n save_json_file(json_file, mj)\n resp = {\n \"msg\": \"frames updated.\" \n }\n #cmd = \"./Process.py roi_mfd /mnt/ams2/\" + sd_video_file + \" >/mnt/ams2/tmp/api.points 2>&1\"\n #print(\"COMMAND:\", cmd)\n #os.system(cmd)\n make_roi_video_mfd(\"/mnt/ams2/\" + sd_video_file, json_conf)\n\n #cmd = \"./Learn.py add \" + json_file + \" >/mnt/ams2/tmp/api.points 2>&1 &\"\n #print(\"COMMAND:\", cmd)\n #os.system(cmd)\n\n mjr = load_json_file(rjson_file)\n resp['status'] = 1\n if \"cal_params\" in mj:\n resp['calib'] = mj['cal_params']\n if \"cp\" in mj:\n resp['calib'] = mj['cp']\n if \"meteor_frame_data\" in mj:\n resp['frames'] = mjr['meteor_frame_data']\n vid_fn = json_file.split(\"/\")[-1]\n vid_fn = vid_fn.replace(\".json\", \".mp4\")\n #cmd = \"./DynamoDB.py update_obs \" + vid_fn + \" >/mnt/ams2/tmp/api.points 2>&1 &\"\n #print(\"COMMAND:\", cmd)\n #os.system(cmd)\n\n return(resp)\n #for frame in frames:\n\ndef update_user_stars(amsid, data):\n print(\"YO\")\n\ndef find_stars_in_pic(amsid, data):\n print(\"YO\")\n\ndef blind_solve(amsid, data):\n print(\"YO\")\n\ndef delete_cal(amsid, data):\n print(\"YO\")\n\ndef update_cal_params(amsid, data):\n print(\"YO\")\n \n\n","sub_path":"pipeline/FlaskLib/api_funcs.py","file_name":"api_funcs.py","file_ext":"py","file_size_in_byte":15362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"1019833","text":"class Solution(object):\n def combinationSum(self, candidates, target):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n tmp = self.search(candidates, target, 0, (), [])\n return [list(i) for i in list(set(tmp))]\n\n def search(self, candidates, target, sumNumber, combination, result=[]):\n if sumNumber == target:\n result.append(combination)\n\n if len(candidates) > 0:\n if candidates[0] + sumNumber <= target:\n self.search(candidates, target, sumNumber +\n candidates[0], combination + (candidates[0],), result)\n self.search(candidates[1:], target, sumNumber +\n candidates[0], combination + (candidates[0],), result)\n self.search(candidates[1:], target, sumNumber, combination, result)\n return result\n\n\nif __name__ == '__main__':\n print(Solution().combinationSum([2, 3, 6, 7], 7))\n","sub_path":"algo_problems/q21-40/q39.py","file_name":"q39.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"403676964","text":"import os\nfrom preprocessing.time_series.df_to_dataset_synthetic import data_to_dataset_3D, data_to_dataset_4D, split_input_target\nimport numpy as np\n\ndef split_covid_data(arr_path, normalize=True, split=0.8):\n covid_data = np.load(arr_path)\n covid_data = covid_data.astype(np.float32)\n num_samples = covid_data.shape[0]\n TRAIN_SPLIT = int(num_samples*split)\n VAL_SPLIT = TRAIN_SPLIT + int(num_samples*(1-split)*0.5) + 1\n\n if normalize:\n data_mean = np.mean(covid_data, axis=1, keepdims=True)\n data_std = np.std(covid_data, axis=1, keepdims=True)\n covid_data = (covid_data - data_mean) / data_std\n stats_train = (data_mean[:TRAIN_SPLIT, :], data_std[:TRAIN_SPLIT, :])\n stats_val = (data_mean[TRAIN_SPLIT:VAL_SPLIT, :], data_std[TRAIN_SPLIT:VAL_SPLIT, :])\n stats_test = (data_mean[VAL_SPLIT:, :], data_std[VAL_SPLIT:, :])\n stats = (stats_train, stats_val, stats_test)\n else:\n stats = None\n\n train_data = covid_data[:TRAIN_SPLIT, :]\n val_data = covid_data[TRAIN_SPLIT:VAL_SPLIT, :]\n test_data = covid_data[VAL_SPLIT:, :]\n\n # reshaping arrays:\n train_data = np.reshape(train_data, newshape=(train_data.shape[0], train_data.shape[1], 1))\n val_data = np.reshape(val_data, newshape=(val_data.shape[0], val_data.shape[1], 1))\n test_data = np.reshape(test_data, newshape=(test_data.shape[0], test_data.shape[1], 1))\n\n return train_data, val_data, test_data, stats\n\ndef rescale_covid_data(data_sample, stats, index):\n data_mean, data_std = stats\n mean, std = data_mean[index], data_std[index]\n data_sample = std * data_sample + mean\n data_sample = data_sample.astype(np.int32)\n return data_sample\n\n\nif __name__ == '__main__':\n arr_path = '../../../data/covid_preprocess.npy'\n train_data, val_data, test_data, stats = split_covid_data(arr_path=arr_path)\n stats_train, stats_val, stats_test = stats\n first_sample = rescale_covid_data(train_data[0], stats_train, 0)\n last_sample = rescale_covid_data(test_data[-1], stats_test, -1)\n data_unnorm = np.load(arr_path)\n\n # checking mean and max values of each dataset:\n # test_sum = np.sum(data_unnorm[798:], axis=1)\n # test_max = np.max(test_sum)\n # test_mean = np.mean(test_sum)\n # val_sum = np.sum(data_unnorm[709:798], axis=1)\n # val_max = np.max(val_sum)\n # val_mean = np.max(val_sum)\n # train_sum = np.sum(data_unnorm[:709], axis=1)\n # train_mean = np.mean(train_sum)\n # train_max = np.max(train_sum)\n\n # saving train, val\n data_path = '../../../data'\n train_data_path = os.path.join(data_path, 'covid_train_data.npy')\n val_data_path = os.path.join(data_path, 'covid_val_data.npy')\n test_data_path = os.path.join(data_path, 'covid_test_data.npy')\n\n np.save(val_data_path, val_data)\n np.save(train_data_path, train_data)\n np.save(test_data_path, test_data)\n\n print('train_data', train_data.shape)\n print('train_data', train_data[0, :, :])\n print('val_data', val_data.shape)\n print('val_data', val_data[0, :, :])\n print('test_data', test_data.shape)\n print('test_data', test_data[0, :, :])\n\n\n # ---- covid data rescaled ------------------------------------------------\n\n print(\"data with rescaling...\")\n arr_path = '../../../data/covid_preprocess_rescaled.npy'\n train_data_s, val_data_s, test_data_s, stats = split_covid_data(arr_path=arr_path, normalize=False)\n\n print('train_data', train_data_s.shape)\n print('train_data', train_data_s[0, :, :])\n print('val_data', val_data_s.shape)\n print('val_data', val_data_s[0, :, :])\n print('test_data', test_data_s.shape)\n print('test_data', test_data_s[0, :, :])\n\n # saving train, val\n data_path = '../../../data'\n train_data_path = os.path.join(data_path, 'covid_train_data_rescaled.npy')\n val_data_path = os.path.join(data_path, 'covid_val_data_rescaled.npy')\n test_data_path = os.path.join(data_path, 'covid_test_data_rescaled.npy')\n\n np.save(val_data_path, val_data_s)\n np.save(train_data_path, train_data_s)\n np.save(test_data_path, test_data_s)\n\n\n train_dataset, val_dataset, test_dataset = data_to_dataset_3D(train_data, val_data, test_data,\n split_fn=split_input_target, BUFFER_SIZE=50, BATCH_SIZE=32, cv=False)\n for (inp, tar) in train_dataset.take(1):\n print('input', inp.shape)\n print('input', inp[0,:,:])\n print('target', tar.shape)\n print('target', tar[0,:,:])\n train_dataset, val_dataset, test_dataset = data_to_dataset_4D(train_data, val_data, test_data,\n split_fn=split_input_target, BUFFER_SIZE=50,\n BATCH_SIZE=32, cv=False)\n for (inp, tar) in train_dataset.take(1):\n print('input', inp.shape)\n print('input', inp[0,:,:])\n print('target', tar.shape)\n print('target', tar[0,:,:])\n","sub_path":"src/preprocessing/time_series/df_to_dataset_covid.py","file_name":"df_to_dataset_covid.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"548236003","text":"def collatz(num):\n\tif num == 1:\n\t\treturn num\n\tif num % 2 == 0:\n\t\tnum = num // 2\n\telif num % 2 == 1:\n\t\tnum = 3 * num + 1\n\tprint(num)\n\treturn collatz(num)\n\ninput_number = int(input(\"type an integer\"))\ncollatz(input_number)","sub_path":"collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"210343627","text":"def check(arr):\r\n\tfor i in range(len(arr)):\r\n\t\tif arr[i] is 0:\r\n\t\t\treturn False\r\n\treturn True\r\n\r\ndef flip(arr):\r\n\tstart = 0\r\n\tend = len(arr) - 1\r\n\tif arr[0] is 1:\r\n\t\twhile arr[start] is 1:\r\n\t\t\tarr[start] = 0\r\n\t\t\tstart = start + 1\r\n\t\treturn arr\r\n\twhile arr[end] is 1:\r\n\t\tend = end - 1\r\n\tstart = 0\r\n\twhile end >= start:\r\n\t\tarr[start], arr[end] = arr[end], arr[start]\r\n\t\tif start == end:\r\n\t\t\tarr[start] = 1 - arr[start]\r\n\t\telse:\r\n\t\t\tarr[start] = 1 - arr[start]\r\n\t\t\tarr[end] = 1 - arr[end]\r\n\t\tstart = start + 1\r\n\t\tend = end - 1\r\n\treturn arr\r\n\r\ndef testcase(arr):\r\n\tnum_flips = 0\r\n\twhile check(arr) is False:\r\n\t\tarr = flip(arr)\r\n\t\tnum_flips = num_flips + 1\r\n\treturn num_flips\r\n\t\r\ndef conv(s):\r\n\tret = []\r\n\tfor i in range(len(s)):\r\n\t\tif s[i] is '+':\r\n\t\t\tret.append(1)\r\n\t\telif s[i] is '-':\r\n\t\t\tret.append(0)\r\n\treturn ret\r\n\t\r\ninput = open('B-large.in', 'r')\r\noutput = open('B-large-output', 'w')\r\nT = int(input.readline())\r\n\r\nfor i in range(T):\r\n\tinp = input.readline()\r\n\tinp = conv(inp)\r\n\tprint(inp)\r\n\tout = 'Case #' + str(i + 1) + ': ' + str(testcase(inp)) + '\\n'\r\n\toutput.write(out)\r\n\tprint(out)\r\n\t","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_VeniVidiVici_Qual2.py","file_name":"16_0_2_VeniVidiVici_Qual2.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"450078990","text":"import json\nimport os\nimport sys\n\nfrom IPython.utils.tempdir import TemporaryDirectory\nfrom jupyter_client.kernelspec import KernelSpecManager\n\n\nkernel_name = \"intel\"\nkernel_json = {\n \"argv\": [sys.executable, \"-m\", \"kernels.\" + kernel_name,\n \"-f\", \"{connection_file}\"],\n \"display_name\": kernel_name.capitalize()\n}\n\n\ndef install_kernel(user=True, prefix=None):\n with TemporaryDirectory() as tempdir:\n os.chmod(tempdir, 0o755) # Starts off as 700, not user readable\n with open(os.path.join(tempdir, \"kernel.json\"), \"w\") as f:\n json.dump(kernel_json, f, sort_keys=True)\n print(\"Installing\", kernel_name, \"kernel...\")\n KernelSpecManager().install_kernel_spec(tempdir, kernel_name, user,\n prefix=prefix)\n print(kernel_name.capitalize(), \"kernel installation complete\")\n\n\nif __name__ == '__main__':\n install_kernel()\n","sub_path":"kernels/intel/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"146387290","text":"import functools\nimport numpy as np\nfrom numpy.polynomial import laguerre as lag\nfrom scipy.special import eval_laguerre\nfrom mpi4py_fft import fftw\nfrom shenfun.spectralbase import SpectralBase, work, Transform\nfrom shenfun.utilities import inheritdocstrings\n\n#pylint: disable=method-hidden,no-else-return,not-callable,abstract-method,no-member,cyclic-import\n\n\n@inheritdocstrings\nclass LaguerreBase(SpectralBase):\n \"\"\"Base class for all Laguerre bases\n\n Parameters\n ----------\n N : int, optional\n Number of quadrature points\n quad : str, optional\n Type of quadrature\n\n - LG - Laguerre-Gauss\n\n Note\n ----\n We are using Laguerre functions and not the regular Laguerre polynomials\n as basis functions. A Laguerre function is defined as\n\n .. math::\n\n L_k = P_k \\cdot \\exp(-x/2)\n\n where :math:`L_k` and :math:`P_k` are the Laguerre function and Laguerre\n polynomials of order k, respectively.\n\n \"\"\"\n\n def __init__(self, N=0, quad=\"LG\"):\n SpectralBase.__init__(self, N, quad, domain=(0., np.inf))\n self.forward = functools.partial(self.forward, fast_transform=False)\n self.backward = functools.partial(self.backward, fast_transform=False)\n self.scalar_product = functools.partial(self.scalar_product, fast_transform=False)\n\n @staticmethod\n def family():\n return 'laguerre'\n\n def reference_domain(self):\n return (0., np.inf)\n\n def domain_factor(self):\n return 1\n\n def points_and_weights(self, N=None, map_true_domain=False):\n if N is None:\n N = self.N\n if self.quad == \"LG\":\n points, weights = lag.laggauss(N)\n weights *= np.exp(points)\n else:\n raise NotImplementedError\n\n return points, weights\n\n def vandermonde(self, x):\n V = lag.lagvander(x, self.N-1)\n return V\n\n def evaluate_basis(self, x, i=0, output_array=None):\n x = np.atleast_1d(x)\n if output_array is None:\n output_array = np.zeros(x.shape)\n output_array = eval_laguerre(i, x, out=output_array)\n output_array *= np.exp(-x/2)\n return output_array\n\n def evaluate_basis_derivative_all(self, x=None, k=0):\n if x is None:\n x = self.mesh(False, False)\n V = self.vandermonde(x)\n M = V.shape[1]\n if k == 1:\n D = np.zeros((M, M))\n D[:-1, :] = lag.lagder(np.eye(M), 1)\n W = np.dot(V, D)\n W -= 0.5*V\n V = W*np.exp(-x/2)[:, np.newaxis]\n\n elif k == 2:\n D = np.zeros((M, M))\n D[:-2, :] = lag.lagder(np.eye(M), 2)\n D[:-1, :] -= lag.lagder(np.eye(M), 1)\n W = np.dot(V, D)\n W += 0.25*V\n V = W*np.exp(-x/2)[:, np.newaxis]\n\n elif k == 0:\n V *= np.exp(-x/2)[:, np.newaxis]\n\n else:\n raise NotImplementedError\n\n return self._composite_basis(V)\n\n def evaluate_basis_all(self, x=None):\n if x is None:\n x = self.mesh(False, False)\n V = self.vandermonde(x)\n V *= np.exp(-x/2)[:, np.newaxis]\n return self._composite_basis(V)\n\n def evaluate_basis_derivative(self, x=None, i=0, k=0, output_array=None):\n if x is None:\n x = self.mesh(False, False)\n x = np.atleast_1d(x)\n v = eval_laguerre(i, x, out=output_array)\n X = x[:, np.newaxis]\n if k == 1:\n D = np.zeros((self.N, self.N))\n D[:-1, :] = lag.lagder(np.eye(self.N), 1)\n V = np.dot(v, D)\n V -= 0.5*v\n V *= np.exp(-X/2)\n v[:] = V\n\n elif k == 2:\n D = np.zeros((self.N, self.N))\n D[:-2, :] = lag.lagder(np.eye(self.N), 2)\n D[:-1, :] -= lag.lagder(np.eye(self.N), 1)\n V = np.dot(v, D)\n V += 0.25*v\n V *= np.exp(-X/2)\n v[:] = V\n\n elif k == 0:\n v *= np.exp(-X/2)\n\n else:\n raise NotImplementedError\n\n return v\n\n def _composite_basis(self, V):\n \"\"\"Return composite basis, where ``V`` is primary Vandermonde matrix.\"\"\"\n return V\n\n def plan(self, shape, axis, dtype, options):\n if isinstance(axis, tuple):\n assert len(axis) == 1\n axis = axis[0]\n\n if isinstance(self.forward, Transform):\n if self.forward.input_array.shape == shape and self.axis == axis:\n # Already planned\n return\n\n U = fftw.aligned(shape, dtype=dtype)\n V = fftw.aligned(shape, dtype=dtype)\n U.fill(0)\n V.fill(0)\n self.axis = axis\n self.forward = Transform(self.forward, None, U, V, V)\n self.backward = Transform(self.backward, None, V, V, U)\n self.scalar_product = Transform(self.scalar_product, None, U, V, V)\n\n@inheritdocstrings\nclass Basis(LaguerreBase):\n \"\"\"Basis for regular Laguerre functions\n\n Parameters\n ----------\n N : int, optional\n Number of quadrature points\n quad : str, optional\n Type of quadrature\n\n - LG - Laguerre-Gauss\n\n Note\n ----\n We are using Laguerre functions and not the regular Laguerre polynomials\n as basis functions. A Laguerre function is defined as\n\n .. math::\n\n L_k = P_k \\cdot \\exp(-x/2)\n\n where :math:`L_k` and :math:`P_k` are the Laguerre function and Laguerre\n polynomials of order k, respectively.\n \"\"\"\n\n def __init__(self, N=0, quad=\"LG\"):\n LaguerreBase.__init__(self, N, quad)\n self.plan(N, 0, np.float, {})\n\n def eval(self, x, u, output_array=None):\n if output_array is None:\n output_array = np.zeros(x.shape)\n output_array[:] = lag.lagval(x, u)*np.exp(-x/2)\n return output_array\n\n@inheritdocstrings\nclass ShenDirichletBasis(LaguerreBase):\n \"\"\"Shen Laguerre basis for Dirichlet boundary conditions\n\n Parameters\n ----------\n N : int, optional\n Number of quadrature points\n quad : str, optional\n Type of quadrature\n\n - LG - Laguerre-Gauss\n\n \"\"\"\n def __init__(self, N=0, quad=\"LG\", bc=(0., 0.)):\n LaguerreBase.__init__(self, N, quad)\n self.LT = Basis(N, quad)\n self.plan(N, 0, np.float, {})\n\n @staticmethod\n def boundary_condition():\n return 'Dirichlet'\n\n def _composite_basis(self, V):\n assert self.N == V.shape[1]\n P = np.zeros(V.shape)\n P[:, :-1] = V[:, :-1] - V[:, 1:]\n return P\n\n def slice(self):\n return slice(0, self.N-1)\n\n def evaluate_basis(self, x, i=0, output_array=None):\n x = np.atleast_1d(x)\n if output_array is None:\n output_array = np.zeros(x.shape)\n output_array[:] = eval_laguerre(i, x) - eval_laguerre(i+1, x)\n output_array *= np.exp(-x/2)\n return output_array\n\n def eval(self, x, u, output_array=None):\n if output_array is None:\n output_array = np.zeros(x.shape)\n w_hat = work[(u, 0, True)]\n w_hat[1:] = u[:-1]\n output_array[:] = lag.lagval(x, u) - lag.lagval(x, w_hat)\n output_array *= np.exp(-x/2)\n return output_array\n\n def plan(self, shape, axis, dtype, options):\n if isinstance(axis, tuple):\n assert len(axis) == 1\n axis = axis[0]\n\n if isinstance(self.forward, Transform):\n if self.forward.input_array.shape == shape and self.axis == axis:\n # Already planned\n return\n\n self.LT.plan(shape, axis, dtype, options)\n U, V = self.LT.forward.input_array, self.LT.forward.output_array\n self.axis = axis\n self.forward = Transform(self.forward, None, U, V, V)\n self.backward = Transform(self.backward, None, V, V, U)\n self.scalar_product = Transform(self.scalar_product, None, U, V, V)\n","sub_path":"shenfun/laguerre/bases.py","file_name":"bases.py","file_ext":"py","file_size_in_byte":7937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"464087349","text":"import bar\n\n#Trying to get rid of the stock stats library slowly\nclass StockMath:\n\tdef sma_3_close(self, bars):\n\t\tthree_bar_avg = 0.0\n\n\t\ti = 0\n\t\tfor bar in bars:\n\t\t\tif i >= 3:\n\t\t\t\tbreak\n\n\t\t\tthree_bar_avg += bar.close\n\t\t\ti += 1\n\n\t\treturn three_bar_avg / 3\n\n\tdef sma_5_close(self, bars):\n\t\tthree_bar_avg = 0.0\n\n\t\ti = 0\n\t\tfor bar in bars:\n\t\t\tif i >= 3:\n\t\t\t\tbreak\n\n\t\t\tthree_bar_avg += bar.close\n\t\t\ti += 1\n\n\t\treturn three_bar_avg / 5\n\n\tdef rsi_10_close(self, bars):\n\t\tgain = 0.0\n\t\tgain_count = 0\n\t\tloss = 0.0\n\t\tloss_count = 0\n\n\t\ti = 0\n\t\tfor bar in bars:\n\t\t\tif i >= 10:\n\t\t\t\tbreak\n\n\t\t\tdiff = bar.close - bar.open\n\n\t\t\tif diff < 0.0:\n\t\t\t\tloss += diff\n\t\t\t\tloss_count += 1\n\t\t\telse:\n\t\t\t\tgain += diff\n\t\t\t\tgain_count += 1\n\n\t\t\ti += 1\n\n\t\tavg_gain = 0\n\t\tavg_loss = 0\n\n\t\tif gain_count != 0:\n\t\t\tavg_gain = gain / gain_count\n\n\t\tif loss_count != 0:\n\t\t\tavg_loss = loss / loss_count\n\n\t\tif avg_loss == 0:\n\t\t\treturn 100 - (100 / (1 + avg_gain))\n\t\telse:\n\t\t\treturn 100 - (100 / (1 + (avg_gain / -avg_loss)))\n","sub_path":"stock_math.py","file_name":"stock_math.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"486307765","text":"\"\"\"\nUpdate 311 SRs with their due date. The due date is stored on a related flex note record.\n\"\"\"\nfrom datetime import datetime\nimport pdb\n\nimport arrow\nimport knackpy\n\nimport _setpath\nfrom config.secrets import *\nfrom config.knack.config import SR_DUE_DATE as cfg\n\nfrom tdutils import argutil\n\n\ndef sr_filter(sr_id, field_id):\n\n return {\n \"match\": \"and\",\n \"rules\": [\n {\"field\": f\"field_1455\", \"operator\": \"is\", \"value\": f\"SRSLADAT\"},\n {\"field\": f\"{field_id}\", \"operator\": \"is\", \"value\": f\"{sr_id}\"},\n ],\n }\n\n\ndef get_due_date(date):\n \"\"\" \n Parse date and return as mm/dd/yyyy.\n\n Input format, e.g.: 'DEC 04, 2018'\n \"\"\"\n date = date.title()\n date = datetime.strptime(date, \"%b %d, %Y\")\n return date.strftime(\"%m/%d/%Y\")\n\n\ndef cli_args():\n parser = argutil.get_parser(\n \"sr_due_date.py\", \"Update 311 SRs with their due date.\",\"app_name\"\n )\n\n args = parser.parse_args()\n\n return args\n\n\ndef main():\n\n args = cli_args()\n\n app_name = args.app_name\n\n srs = knackpy.Knack(\n view=cfg[\"tmc_issues\"][\"view\"],\n scene=cfg[\"tmc_issues\"][\"scene\"],\n ref_obj=cfg[\"tmc_issues\"][\"ref_obj\"],\n app_id=KNACK_CREDENTIALS[app_name][\"app_id\"],\n api_key=KNACK_CREDENTIALS[app_name][\"api_key\"],\n )\n\n count = 0\n\n if not srs.data:\n return 0\n \n for sr in srs.data:\n\n filters = sr_filter(sr[\"SR_NUMBER\"], cfg[\"flex_notes\"][\"sr_id_field\"])\n\n flex_note = knackpy.Knack(\n view=cfg[\"flex_notes\"][\"view\"],\n scene=cfg[\"flex_notes\"][\"scene\"],\n ref_obj=cfg[\"flex_notes\"][\"ref_obj\"],\n app_id=KNACK_CREDENTIALS[app_name][\"app_id\"],\n api_key=KNACK_CREDENTIALS[app_name][\"api_key\"],\n filters=filters,\n page_limit=1, # limit records, to be safe (there are lots)\n rows_per_page=10,\n )\n\n if not flex_note.data:\n continue\n\n \"\"\"\n Always take the first due date in the list. there are occasionally duplicate\n due date flex records for one SR. We don't know why.\n \"\"\"\n due_date = get_due_date(flex_note.data[0][\"FLEX_ATTRIBUTE_VALUE\"])\n\n record = {cfg[\"tmc_issues\"][\"due_date_field_id\"]: due_date, \"id\": sr[\"id\"]}\n\n res = knackpy.record(\n record,\n obj_key=cfg[\"tmc_issues\"][\"ref_obj\"][0],\n app_id=KNACK_CREDENTIALS[app_name][\"app_id\"],\n api_key=KNACK_CREDENTIALS[app_name][\"api_key\"],\n method=\"update\",\n )\n\n count +=1\n\n return count\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"transportation-data-publishing/data_tracker/sr_due_date.py","file_name":"sr_due_date.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"543014286","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport time\nimport codecs\nimport urllib\nimport urlparse\n\nimport raven\nimport requests\nimport BeautifulSoup\n\nimport post\nimport attachment\n\nUSER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; AS; rv:11.0) like Gecko'\n\nclass ThreadListParser(object):\n\n def __init__(self, url=None, ip=None):\n self.url = url\n self.ip = ip\n \n self.hostname = urlparse.urlparse(url=self.url).hostname\n self.session = requests.Session()\n self.headers = {\n 'User-Agent': USER_AGENT,\n 'Referer': self.url,\n 'Host': self.hostname,\n 'Access-Control-Allow-Origin': 'http://' + self.hostname,\n }\n\n def parse(self):\n response = self._get_response()\n response.encoding = 'utf-8'\n if not response.status_code / 100 == 2:\n return []\n\n doc = BeautifulSoup.BeautifulSoup(response.text)\n urls = {}\n threads = []\n\n for e in doc.findAll('a'):\n href = e.get('href', None)\n if not href:\n continue\n if not '/mangaup/' in href and not '/manga/' in href:\n continue\n if 'write' in href:\n continue\n\n texts = e.findAll(text=True)\n title = texts[-1].strip()\n if not title:\n if not len(texts) > 2:\n continue\n title = texts[2].strip()\n\n title = ' '.join(title.split())\n decoded = BeautifulSoup.BeautifulSoup(title, convertEntities=BeautifulSoup.BeautifulSoup.HTML_ENTITIES)\n title = str(unicode(decoded))\n title = title.replace('&', '&')\n title = title.replace('<', '<')\n title = title.replace('>', '>')\n title = title.replace('"', '\"')\n title = title.replace(codecs.BOM_UTF8, '')\n \n url = 'http://' + self.hostname + href\n urls[url] = title\n\n for url in urls.keys():\n title = urls[url]\n t = post.Thread(title=title, url=url)\n threads.append(t)\n\n return threads\n\n def _get_response(self, url=None):\n if not url:\n url = self.url\n\n if self.hostname in url:\n url = url.replace(self.hostname, self.ip)\n \n return self.session.get(url, headers=self.headers)\n \nclass ThreadParser(object):\n\n def __init__(self, session=None, thread=None, ip=None):\n if session == None:\n session = requests.Session()\n\n self.session = session\n self.thread = thread\n self.ip = ip\n\n self.hostname = urlparse.urlparse(url=self.thread.url).hostname\n self.headers = {\n 'User-Agent': USER_AGENT,\n 'Referer': self.thread.url,\n 'Host': self.hostname,\n 'Access-Control-Allow-Origin': 'http://' + self.hostname,\n }\n\n def parse(self):\n posts = []\n response = self._get_response(self.thread.url)\n response.encoding = 'utf-8'\n if not response.status_code / 100 == 2:\n return posts\n\n doc = BeautifulSoup.BeautifulSoup(response.text)\n content = doc.find('div', attrs={'id': 'vContent'})\n if not content:\n return posts\n\n images = content.findAll('img')\n if not images:\n return posts\n\n cover_image = None\n\n # cover image\n for e in images:\n src = e.get('src', None)\n if not src:\n continue\n\n if 'blogspot.com' in src:\n break\n\n if 'share' in src:\n break\n\n if 'like.png' in src:\n break\n\n if not src or not 'http' in src or not self.hostname in src:\n continue\n \n r = self._get_response(src)\n if not r.status_code / 100 == 2 or not r.content:\n return []\n \n if '')\n title = title.replace('"', '\"')\n title = title.replace(codecs.BOM_UTF8, '')\n\n if not self.thread.title.split()[0] in title:\n continue\n\n p = post.Post(title=title, url=href)\n p.cover_image = cover_image\n posts.append(p)\n\n return posts\n\n def _get_response(self, url=None):\n if self.hostname in url:\n url = url.replace(self.hostname, self.ip)\n \n return self.session.get(url, headers=self.headers)\n\nclass PostParser(object):\n\n def __init__(self, post=None, ip=None):\n self.post = post\n self.ip = ip\n\n self.image_src = []\n\n self.hostname = urlparse.urlparse(url=self.post.url).hostname\n self.headers = {\n 'User-Agent': USER_AGENT,\n 'Referer': self.post.url,\n 'Host': self.hostname,\n 'Access-Control-Allow-Origin': 'http://' + self.hostname,\n }\n\n def parse(self):\n response = self._get_response(self.post.url)\n response.encoding = 'utf-8'\n if not response.status_code / 100 == 2:\n return self.post\n \n doc = BeautifulSoup.BeautifulSoup(response.text)\n\n title = None\n images = []\n\n for e in doc.findAll('h1'):\n decoded = BeautifulSoup.BeautifulSoup(e.text.strip(), convertEntities=BeautifulSoup.BeautifulSoup.HTML_ENTITIES)\n title = str(unicode(decoded))\n title = title.replace('&', '&')\n title = title.replace('<', '<')\n title = title.replace('>', '>')\n title = title.replace('"', '\"')\n title = title.replace(codecs.BOM_UTF8, '')\n title = ' '.join(title.split())\n \n for e in doc.findAll('img'):\n src = e.get('src', None)\n lazy_src = e.get('data-lazy-src', None)\n loaded = e.get('data-lazy-loaded', None)\n\n if not src and not lazy_src:\n continue\n\n if not lazy_src and not loaded:\n continue\n\n if lazy_src:\n src = lazy_src\n\n if len(src.split('.')) < 2:\n continue\n \n self.image_src.append(src)\n\n self.post.title = title\n\n return self.post\n\n def get_images(self):\n images = []\n \n for src in self.image_src:\n r = None\n\n if self.hostname in src:\n src = src.replace(self.hostname, self.ip)\n \n try:\n r = requests.get(src, timeout=5, headers=self.headers)\n except Exception as e:\n r = requests.get(src.split('?')[0], timeout=5, headers=self.headers)\n else:\n r = requests.get(src, timeout=5)\n\n if not r.status_code / 100 == 2 or not r.content:\n continue\n\n if 'BGR\n\n\t\tif i == 0:\n\t\t\t# the first frame, detect face, here we only use the first face, you can change depending on your need\n\t\t\tboxes = face_boxes(frame_bgr)\n\t\t\tboxes = [boxes[0]]\n\t\t\tparam_lst, roi_box_lst = tddfa(frame_bgr, boxes)\n\t\t\tver = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=False)[0]\n\n\t\t\t# refine\n\t\t\tparam_lst, roi_box_lst = tddfa(frame_bgr, [ver], crop_policy='landmark')\n\t\t\tver = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=False)[0]\n\t\telse:\n\t\t\tparam_lst, roi_box_lst = tddfa(frame_bgr, [pre_ver], crop_policy='landmark')\n\n\t\t\troi_box = roi_box_lst[0]\n\t\t\t# todo: add confidence threshold to judge the tracking is failed\n\t\t\tif abs(roi_box[2] - roi_box[0]) * abs(roi_box[3] - roi_box[1]) < 2020:\n\t\t\t\tboxes = face_boxes(frame_bgr)\n\t\t\t\tboxes = [boxes[0]]\n\t\t\t\tparam_lst, roi_box_lst = tddfa(frame_bgr, boxes)\n\n\t\t\tver = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=False)[0]\n\n\t\tpre_ver = ver # for tracking\n\n\t\tif args.sensor_data == \"\":\n\t\t\tres = viz_pose(frame_bgr.copy(), param_lst, [ver], print_angle=False)\n\t\telif i < len(sensor_data):\n\t\t\tres = viz_pose(frame_bgr.copy(), param_lst, [ver], print_angle=False, rotation=sensor_data[i])\n\t\telse:\n\t\t\tres = frame_bgr.copy()\n\n\t\twriter.append_data(res[..., ::-1]) # BGR->RGB\n\n\twriter.close()\n\tprint(f'Dump to {video_wfp}')\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description='The demo of video of 3DDFA_V2')\n\tparser.add_argument('-c', '--config', type=str, default='configs/mb1_120x120.yml')\n\tparser.add_argument('-f', '--video_fp', type=str)\n\tparser.add_argument('-d', '--sensor_data', type=str, default=\"\")\n\tparser.add_argument('-m', '--mode', default='cpu', type=str, help='gpu or cpu mode')\n\tparser.add_argument('--onnx', action='store_true', default=False)\n\n\targs = parser.parse_args()\n\tmain(args)\n","sub_path":"pose_video.py","file_name":"pose_video.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"575464883","text":"import sqlalchemy\nimport databases\n\n\n# SQLAlchemy specific code, as with any other app\nDATABASE_URL = \"sqlite:///./test.db\"\n# DATABASE_URL = \"postgresql://user:password@postgresserver/db\"\n\ndatabase = databases.Database(DATABASE_URL)\n\nmetadata = sqlalchemy.MetaData()\n\nnotes = sqlalchemy.Table(\n \"notes\",\n metadata,\n sqlalchemy.Column(\"id\", sqlalchemy.Integer, primary_key=True),\n sqlalchemy.Column(\"text\", sqlalchemy.String),\n sqlalchemy.Column(\"completed\", sqlalchemy.Boolean),\n )\n\nengine = sqlalchemy.create_engine(\n DATABASE_URL, connect_args={\"check_same_thread\": False}\n )\n\nmetadata.create_all(engine)\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"186014141","text":"import cv2\nimport imageio\n# Render avi or gif\n\n\ndef render_frames(frame_array, savePath, fileName, fps, otype='AVI'):\n print('Creating replay ...', end=' ')\n if otype == 'AVI':\n fileName += '.avi'\n height, width, layers = frame_array[0].shape\n if layers == 1:\n layers = 0\n size = (width, height)\n out = cv2.VideoWriter(\n savePath + fileName, cv2.VideoWriter_fourcc(*'DIVX'), fps, size, layers)\n for i in range(len(frame_array)):\n out.write(frame_array[i])\n out.release()\n print('Done. Saved to {}'.format(savePath + fileName))\n else:\n print('Error: Invalid type, must be avi.')\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"290894696","text":"from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\r\nfrom pdfminer.converter import TextConverter\r\nfrom pdfminer.layout import LAParams\r\nfrom pdfminer.pdfpage import PDFPage\r\nimport os\r\nimport sys, getopt\r\nimport io\r\n\r\n#converts pdf, returns its text content as a string\r\ndef convert(fname, pages=None):\r\n if not pages:\r\n pagenums = set()\r\n else:\r\n pagenums = set(pages)\r\n\r\n output = io.StringIO()\r\n manager = PDFResourceManager()\r\n converter = TextConverter(manager, output, laparams=LAParams())\r\n interpreter = PDFPageInterpreter(manager, converter)\r\n\r\n infile = open(fname, 'rb')\r\n for page in PDFPage.get_pages(infile, pagenums, check_extractable=False, password=\"\"):\r\n interpreter.process_page(page)\r\n\r\n infile.close()\r\n converter.close()\r\n text = output.getvalue()\r\n output.close\r\n return text\r\n\r\n#converts all pdfs in directory pdfDir, saves all resulting txt files to txtdir\r\ndef convertMultiple(pdfDir, txtDir):\r\n if pdfDir == \"\": pdfDir = os.getcwd() + \"\\\\\" #if no pdfDir passed in \r\n for pdf in os.listdir(pdfDir): #iterate through pdfs in pdf directory\r\n fileExtension = pdf.split(\".\")[-1]\r\n if fileExtension == \"pdf\":\r\n try: \r\n pdfFilename = pdfDir + pdf \r\n text = pdfparser(pdfFilename) #get string of text content of pdf\r\n textFilename = txtDir + pdf + \".txt\"\r\n textFile = open(textFilename, \"w\", encoding='UTF-8' ) #make text file\r\n textFile.write(text) #write text to text file\r\n print (textFilename)\r\n except:\r\n print (txtDir + pdf + \" ERROR!!!\")\r\n pass\r\n\r\n'''\r\n\"Comparative Law eJournal\"\r\n\"LSN International Human Rights Issues (Topic)\"\r\n\"LSN Investment (Topic)\"\r\n\"LSN Securities Law U.S. (Topic)\"\r\n\"Property, Land Use & Real Estate Law eJournal\"\r\n\"Torts & Products Liability Law eJournal\"\r\n\r\n\r\nsubFolders = [\r\n \"LSN International Human Rights Issues (Topic)\"]\r\nfor sub in subFolders:\r\n pdfDir = \"C:/Users/kingw/Desktop/Law Papers/\" + sub + \"/\"\r\n txtDir = \"C:/Users/kingw/Desktop/Law Papers txt/\" + sub + \"/\"\r\n \r\n convertMultiple(pdfDir, txtDir)\r\n'''\r\n'''\r\nsubFolders2 = [\r\n \"Comparative Law eJournal\",\r\n \"LSN International Human Rights Issues (Topic)\",\r\n \"LSN Investment (Topic)\",\r\n \"Property, Land Use & Real Estate Law eJournal\",\r\n \"Torts & Products Liability Law eJournal\"]\r\n\r\nfor sub in subFolders2:\r\n pdfDir = \"C:/Users/kingw/Desktop/Law Papers/\" + sub + \"/foreign papers/\"\r\n txtDir = \"C:/Users/kingw/Desktop/Law Papers txt/\" + sub + \"/foreign papers/\"\r\n convertMultiple(pdfDir, txtDir)\r\n\r\npdfDir = \"C:/Users/kingw/Desktop/Law Papers/LSN Securities Law U.S. (Topic)/old versions/\"\r\ntxtDir = \"C:/Users/kingw/Desktop/Law Papers txt/LSN Securities Law U.S. (Topic)/old versions/\"\r\nconvertMultiple(pdfDir, txtDir)\r\n\r\n'''\r\npdfDir = \"C:/Users/kingw/Desktop/Law Papers/test/\"\r\ntxtDir = \"C:/Users/kingw/Desktop/Law Papers/test/\"\r\n\r\nconvertMultiple(pdfDir, txtDir)\r\n\r\n","sub_path":"python/pdf-to-txt/PDF2TXT.py","file_name":"PDF2TXT.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"482751007","text":"\n\nfrom django import template\nimport re\nfrom collections import OrderedDict\nregister = template.Library()\n\n# @register.filter\n@register.inclusion_tag('menu.html')\ndef menu(request):\n # menu_list = request.session.get('menu_list')\n menu_dict = request.session.get('menu_dict')\n\n menu_order_key = sorted(menu_dict, key=lambda x: menu_dict[x]['weight'], reverse=True)\n\n # print(sort_d1)\n\n menu_order_dict = OrderedDict()\n\n for key in menu_order_key:\n menu_order_dict[key] = menu_dict[key]\n\n path = request.path\n for k,v in menu_order_dict.items():\n v['class'] = ''\n for i in v['children']:\n # /customer/add/\n # if re.match(i['url'],path): # 获取当前访问路径对应的parent_id == i['permissions__pk']\n if request.pid == i['second_menu_id']: #\n v['class'] = 'show'\n i['class'] = 'active'\n\n # else:\n # pass\n\n # for i in menu_dict:\n # if i.get('permissions__url') == request.path:\n # i['class'] = 'active'\n\n print(menu_dict)\n # [{'permissions__url': '/customer/list/', 'permissions__title': '客户管理', 'permissions__menu': True, 'permissions__icon': 'fa fa-camera'}]\n # menu_data = {'menu_data':menu_dict}\n menu_data = {'menu_order_dict':menu_order_dict}\n return menu_data\n\n\n@register.inclusion_tag('breadcrumb.html')\ndef bread_crumb(request):\n bread_crumb = request.bread_crumb\n data = {'bread_crumb':bread_crumb}\n return data\n\n\nfrom django.conf import settings\n\n@register.filter\ndef has_permission(request, permission):\n if permission in request.session.get(settings.PERMISSION_SESSION_KEY):\n return True\n\n\n@register.simple_tag\ndef gen_role_url(request, rid):\n params = request.GET.copy()\n \n params._mutable = True\n params['rid'] = rid\n # params = {'uid': 1,'rid':'a=1'}\n return params.urlencode()\n\n","sub_path":"IGnb/rbac/templatetags/mytags.py","file_name":"mytags.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"271614974","text":"\"\"\"fixing relation bid and proposal\n\nRevision ID: c865017a0449\nRevises: ffa8732e25ee\nCreate Date: 2021-07-15 09:23:48.871302\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c865017a0449'\ndown_revision = 'ffa8732e25ee'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('bids', sa.Column('proposal_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'bids', 'proposals', ['proposal_id'], ['id'])\n op.drop_constraint('proposals_bid_id_fkey', 'proposals', type_='foreignkey')\n op.drop_column('proposals', 'bid_id')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('proposals', sa.Column('bid_id', sa.INTEGER(), autoincrement=False, nullable=True))\n op.create_foreign_key('proposals_bid_id_fkey', 'proposals', 'bids', ['bid_id'], ['id'])\n op.drop_constraint(None, 'bids', type_='foreignkey')\n op.drop_column('bids', 'proposal_id')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/c865017a0449_fixing_relation_bid_and_proposal.py","file_name":"c865017a0449_fixing_relation_bid_and_proposal.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"494800331","text":"from sklearn.neighbors import KNeighborsClassifier\r\nimport sklearn.metrics as metrics\r\nimport torch\r\nfrom torch.autograd import Variable\r\nfrom torch_geometric.datasets import TUDataset\r\nfrom torch_geometric.data import DataLoader\r\nfrom torch_geometric import utils\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.utils.data import random_split\r\nimport numpy as np\r\nimport argparse\r\nimport os\r\nfrom network import Net\r\n\r\n\r\n\r\n# Test & Evaluation code\r\ndef test(model,loader):\r\n model.eval()\r\n correct = 0.\r\n loss = 0.\r\n for data in loader:\r\n data = data.to(device)\r\n out = model(data)\r\n pred = out.max(dim=1)[1]\r\n correct += pred.eq(data.y).sum().item()\r\n loss += F.nll_loss(out,data.y,reduction='sum').item()\r\n return correct / len(loader.dataset),loss / len(loader.dataset)\r\n\r\n# evaluate using kNN\r\ndef evaluate(train_loader, val_loader, model, device):\r\n model.eval()\r\n correct = 0.\r\n loss = 0.\r\n\r\n train_embeddings = []\r\n train_labels = []\r\n val_embeddings = []\r\n val_labels = []\r\n\r\n for data in train_loader:\r\n data = data.to(device)\r\n out = model(data)\r\n learned_feat = out[0].cpu().data.numpy()\r\n train_embeddings.append(learned_feat)\r\n train_labels.append(data.y.long().numpy())\r\n\r\n\r\n for data in val_loader:\r\n data = data.to(device)\r\n out = model(data)\r\n learned_feat = out[0].cpu().data.numpy()\r\n val_embeddings.append(learned_feat)\r\n val_labels.append(data.y.long().numpy())\r\n\r\n neigh = KNeighborsClassifier(n_neighbors=3)\r\n neigh.fit(train_embeddings, train_labels)\r\n\r\n val_preds = neigh.predict(val_embeddings)\r\n train_preds = neigh.predict(train_embeddings)\r\n\r\n\r\n\r\n result = {'prec': metrics.precision_score(val_labels, val_preds, average='macro'),\r\n 'recall': metrics.recall_score(val_labels, val_preds, average='macro'),\r\n 'acc': metrics.accuracy_score(val_labels, val_preds),\r\n 'F1': metrics.f1_score(val_labels, val_preds, average=\"micro\"),\r\n 'train acc': metrics.accuracy_score(train_labels, train_preds)}\r\n\r\n return result\r\n\r\n# evaluate using MLP classifier\r\ndef evaluate_mlp(train_loader, val_loader, model, device):\r\n model.eval()\r\n correct = 0.\r\n loss = 0.\r\n\r\n train_embeddings = []\r\n train_labels = []\r\n val_embeddings = []\r\n val_labels = []\r\n\r\n for data in train_loader:\r\n data = data.to(device)\r\n out = model(data)\r\n learned_feat = out[0].cpu().data.numpy()\r\n train_embeddings.append(learned_feat)\r\n train_labels.append(data.y.long().numpy())\r\n\r\n\r\n for data in val_loader:\r\n data = data.to(device)\r\n out = model(data)\r\n learned_feat = out[0].cpu().data.numpy()\r\n val_embeddings.append(learned_feat)\r\n val_labels.append(data.y.long().numpy())\r\n\r\n # Initialization of the final classifier\r\n in_feat = len(learned_feat)\r\n pred_layers = []\r\n pred_layers.append(nn.Linear(in_feat, 64).to(device))\r\n pred_layers.append(nn.LeakyReLU())\r\n pred_layers.append(nn.Linear(64, 32).to(device))\r\n pred_layers.append(nn.LeakyReLU())\r\n pred_layers.append(nn.Linear(32, 2).to(device))\r\n pred_model = nn.Sequential(*pred_layers)\r\n\r\n # The to-be-finetuned model and the optimizer\r\n optimizer_2 = torch.optim.Adam(pred_model.parameters(), lr=0.001)\r\n\r\n # Train on the train embeddings\r\n for i in range(len(train_embeddings)):\r\n pred_prob = pred_model(Variable(torch.Tensor(train_embeddings[i]), requires_grad=False))\r\n # print(pred_prob)\r\n pred_prob = torch.unsqueeze(pred_prob, 0)\r\n loss = F.cross_entropy(pred_prob, Variable(torch.LongTensor([int(train_labels[i])])))\r\n loss.backward()\r\n optimizer_2.step()\r\n optimizer_2.zero_grad()\r\n\r\n # Make predictions on the val/test embeddings\r\n correct = 0\r\n for i in range(len(val_embeddings)):\r\n pred_prob = pred_model(Variable(torch.Tensor(val_embeddings[i]), requires_grad=False))\r\n pred = pred_prob.argmax(dim=0)\r\n # print(pred)\r\n correct += pred.eq(torch.Tensor(val_labels[i])).sum().item()\r\n\r\n\r\n\r\n result = {'acc': correct /len(val_embeddings)}\r\n\r\n return result\r\n\r\n# Hyperparameters & Setup\r\n\r\nparser = argparse.ArgumentParser()\r\n\r\nparser.add_argument('--seed', type=int, default=777, help='seed')\r\nparser.add_argument('--batch_size', type=int, default=128, help='batch size')\r\nparser.add_argument('--lr', type=float, default=0.0005, help='learning rate')\r\nparser.add_argument('--weight_decay', type=float, default=0.0001, help='weight decay')\r\nparser.add_argument('--nhid', type=int, default=128, help='hidden size')\r\nparser.add_argument('--pooling_ratio', type=float, default=0.5, help='pooling ratio')\r\nparser.add_argument('--dropout_ratio', type=float, default=0.5, help='dropout ratio')\r\nparser.add_argument('--dataset', type=str, default='DD')\r\nparser.add_argument('--iterations', type=int, default=5, help='Number of iterations')\r\nparser.add_argument('--epochs', type=int, default=100000, help='maximum number of epochs')\r\nparser.add_argument('--patience', type=int, default=50, help='patience for earlystopping')\r\nparser.add_argument('--pooling_layer_type', type=str, default='GCNConv')\r\nparser.add_argument('--num_features', type=int, default=64, help='Dimension of input features')\r\nparser.add_argument('--final_dim', type=int, default=64, help='Dimension of final embeddings')\r\n\r\nargs = parser.parse_args()\r\ndevice = 'cpu'\r\ntorch.manual_seed(args.seed)\r\nif torch.cuda.is_available():\r\n torch.cuda.manual_seed(args.seed)\r\n device = 'cuda:0'\r\ndataset = TUDataset(os.path.join('data',args.dataset), name=args.dataset)\r\nnum_classes = dataset.num_classes\r\nnum_features = dataset.num_features\r\n\r\nnum_training = int(len(dataset)*0.8)\r\nnum_val = int(len(dataset)*0.1)\r\nnum_test = len(dataset) - (num_training+num_val)\r\n\r\n# Train\r\nmin_loss = 1e10\r\npatience = 0\r\nval_accs = []\r\ntest_accs = []\r\n\r\n\r\n\r\n# Train loop\r\nfor iter in range(args.iterations):\r\n training_set,validation_set,test_set = random_split(dataset,[num_training,num_val,num_test])\r\n\r\n train_loader = DataLoader(training_set, batch_size=args.batch_size, shuffle=True)\r\n val_loader = DataLoader(validation_set,batch_size=args.batch_size,shuffle=False)\r\n test_loader = DataLoader(test_set,batch_size=1,shuffle=False)\r\n model = Net(num_features, args.nhid, num_classes, args.pooling_ratio, args.dropout_ratio).to(device)\r\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\r\n\r\n\r\n\r\n for epoch in range(args.epochs):\r\n model.train()\r\n for i, data in enumerate(train_loader):\r\n data = data.to(device)\r\n out = model(data)\r\n loss = F.nll_loss(out, data.y)\r\n print(\"Training loss:{}\".format(loss.item()))\r\n loss.backward()\r\n optimizer.step()\r\n optimizer.zero_grad()\r\n val_acc,val_loss = test(model,val_loader)\r\n print(\"Validation loss:{}\\taccuracy:{}\".format(val_loss,val_acc))\r\n if val_loss < min_loss:\r\n torch.save(model.state_dict(),'latest.pth')\r\n print(\"Model saved at epoch{}\".format(epoch))\r\n min_loss = val_loss\r\n patience = 0\r\n else:\r\n patience += 1\r\n if patience > patience:\r\n break\r\n\r\n model = Net(num_features, args.nhid, num_classes, args.pooling_ratio, args.dropout_ratio).to(device)\r\n model.load_state_dict(torch.load('latest.pth'))\r\n val_acc, val_loss = test(model, val_loader)\r\n test_acc, test_loss = test(model, test_loader)\r\n val_accs.append(val_acc)\r\n test_accs.append(test_acc)\r\n print(\"Test accuracy:{}\".format(test_acc))\r\n\r\n\r\n# Evaluation\r\n# Validation avg. and std\r\nprint('Validation accuracy of ' + str(args.iterations) + ' times is: ' + str(np.mean(val_accs)) + ' with std: ' + str(np.std(val_accs)))\r\n# Test avg. and std\r\nprint('Test accuracy of ' + str(args.iterations) + ' times is: ' + str(np.mean(test_accs)) + ' with std: ' + str(np.std(test_accs)))\r\n","sub_path":"Code/sag/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"525126031","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.utils.data\nimport random\nimport os\nimport sys\n\nsys.path.append(os.environ.get('HYPERNOMAD_HOME') + \"/src/blackbox/blackbox\")\n\n\nclass DataHandler(object):\n def __init__(self, dataset, batch_size):\n self.__dataset = dataset\n available_datasets = ['MNIST', 'Fashion-MNIST', 'KMNIST', 'EMNIST', 'CIFAR10', 'CIFAR100', 'STL10', 'SVHN',\n 'CUSTOM', 'MINIMNIST']\n self.__batch_size = batch_size\n self.__transform_train = None\n self.__transform_test = None\n assert dataset in available_datasets, 'Choose a valid dataset \\n'\n\n @property\n def dataset(self):\n return self.__dataset\n\n @property\n def batch_size(self):\n return self.__batch_size\n\n @property\n def transform_train(self):\n return self.__transform_train\n\n @transform_train.setter\n def transform_train(self, t_train):\n self.__transform_train = t_train\n\n @property\n def transform_test(self):\n return self.__transform_test\n\n @transform_test.setter\n def transform_test(self, t_test):\n self.__transform_test = t_test\n\n @property\n def get_mean_and_std(self):\n \"\"\"Compute the mean and std value of dataset.\"\"\"\n\n dataloader = self.get_loaders()\n image_size, num_classes = self.get_info_data\n input_channels = image_size[0]\n mean = torch.zeros(input_channels)\n std = torch.zeros(input_channels)\n print('==> Computing mean and std..')\n\n for inputs, targets in dataloader:\n for i in range(input_channels):\n mean[i] += inputs[:, i, :, :].mean()\n std[i] += inputs[:, i, :, :].std()\n mean.div_(len(self.dataset))\n std.div_(len(self.dataset))\n return mean, std\n\n @property\n def get_info_data(self):\n \"\"\"Get the size of the images and number of classes for each dataset\"\"\"\n image_size = None\n total_number_classes = 0\n if self.dataset in ['MINIMNIST', 'MNIST', 'Fashion-MNIST', 'KMNIST', 'EMNIST']:\n image_size = (1, 28, 28)\n total_number_classes = 10\n if self.dataset in ['CIFAR10', 'SVNH']:\n image_size = (3, 32, 32)\n total_number_classes = 10\n if self.dataset == 'CIFAR100':\n image_size = (3, 32, 32)\n total_number_classes = 100\n if self.dataset == 'STL10':\n image_size = (3, 96, 96)\n total_number_classes = 10\n return image_size, total_number_classes\n\n def get_loaders(self, resolution=None):\n trainloader = None\n validloader = None\n testloader = None\n if not resolution:\n resolution = 1\n root = os.path.join('./data', self.dataset)\n\n if self.dataset == 'MINIMNIST':\n print(\">>> Preparing the simplifed MNIST dataset...\")\n transform_train = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,)), ])\n trainset = torchvision.datasets.MNIST(root=root, train=True, download=True, transform=transform_train)\n testset = torchvision.datasets.MNIST(root=root, train=False, download=True, transform=transform_train)\n\n n_train = 1000\n n_test = 200\n indices = list(range(len(trainset)))\n random.shuffle(indices)\n\n indices_test = list(range(len(testset)))\n random.shuffle(indices_test)\n\n train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:n_train])\n test_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices_test[:n_test])\n\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=self.batch_size, num_workers=12,\n sampler=train_sampler)\n testloader = torch.utils.data.DataLoader(testset, batch_size=100, sampler=test_sampler, num_workers=12)\n\n if self.dataset in ['MNIST', 'Fashion-MNIST', 'KMNIST', 'EMNIST']:\n transform_train = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,)), ])\n if self.dataset == 'MNIST':\n print(\">>> Preparing MNIST dataset...\")\n trainset = torchvision.datasets.MNIST(root=root, train=True, download=True, transform=transform_train)\n testset = torchvision.datasets.MNIST(root=root, train=False, download=True, transform=transform_train)\n if self.dataset == 'Fashion-MNIST':\n print(\">>> Preparing Fashion-MNIST dataset...\")\n trainset = torchvision.datasets.FashionMNIST(root, train=True, transform=transform_train,\n target_transform=None, download=True)\n testset = torchvision.datasets.FashionMNIST(root, train=False, transform=transform_train,\n target_transform=None, download=True)\n if self.dataset == 'KMNIST':\n print(\">>> Preparing KMNIST dataset...\")\n trainset = torchvision.datasets.KMNIST(root, train=True, transform=transform_train,\n target_transform=None, download=True)\n testset = torchvision.datasets.KMNIST(root, train=False, transform=transform_train,\n target_transform=None, download=True)\n if self.dataset == 'EMNIST':\n print(\">>> Preparing EMNIST dataset...\")\n trainset = torchvision.datasets.EMNIST(root, train=True, transform=transform_train,\n target_transform=None, download=True)\n testset = torchvision.datasets.EMNIST(root, train=False, transform=transform_train,\n target_transform=None, download=True)\n\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=self.batch_size, shuffle=True,\n num_workers=8)\n testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=True, num_workers=8)\n\n if self.dataset in ['CIFAR10', 'CIFAR100']:\n\n new_image_size = int(32 * resolution)\n print(new_image_size)\n transform_train = transforms.Compose([\n transforms.Resize((new_image_size, new_image_size)),\n transforms.RandomCrop(new_image_size, padding=4), # resolution\n # transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.4914, 0.4822, 0.4465],\n [0.2023, 0.1994, 0.2010]),\n # AutoAugment(),\n # Cutout()\n ])\n\n transform_test = transforms.Compose([transforms.Resize((new_image_size, new_image_size)),\n transforms.ToTensor(),\n transforms.Normalize([0.4914, 0.4822, 0.4465],\n [0.2023, 0.1994, 0.2010]),\n ])\n\n if self.dataset == 'CIFAR10':\n print(\">>> Preparing CIFAR-10 dataset...\")\n trainset = torchvision.datasets.CIFAR10(root=root, train=True, download=True,\n transform=transform_train)\n testset = torchvision.datasets.CIFAR10(root=root, train=False, download=True,\n transform=transform_test)\n else:\n print(\">>> Preparing CIFAR-100 dataset...\")\n trainset = torchvision.datasets.CIFAR100(root=root, train=True, download=True,\n transform=transform_train)\n testset = torchvision.datasets.CIFAR100(root=root, train=False, download=True,\n transform=transform_test)\n\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=self.batch_size, shuffle=True,\n num_workers=8)\n testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=8)\n\n if self.dataset == 'STL10':\n print(\">>> Preparing STL10 dataset...\")\n trainset = torchvision.datasets.STL10(root, train=True, transform=None, target_transform=None,\n download=True)\n testset = torchvision.datasets.STL10(root, train=False, transform=None, target_transform=None,\n download=True)\n\n n_valid = 40000\n indices = list(range(len(trainset)))\n random.shuffle(indices)\n\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=self.batch_size, shuffle=False,\n sampler=torch.utils.data.sampler.SubsetRandomSampler(\n indices[:n_valid]), num_workers=12)\n validloader = torch.utils.data.DataLoader(trainset, batch_size=100,\n sampler=torch.utils.data.sampler.SubsetRandomSampler(\n indices[n_valid:]), num_workers=12)\n testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=12)\n\n return trainloader, testloader\n\n\nif __name__ == '__main__':\n dl = DataHandler('Fashion-MNIST', 128)\n image_size, num_classes = dl.get_info_data\n mean, std = dl.get_mean_and_std\n print(mean)\n print(std)\n","sub_path":"Formulation2/datahandler.py","file_name":"datahandler.py","file_ext":"py","file_size_in_byte":10110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"523240539","text":"import cv2\r\nimport numpy as np\r\nimport os\r\nfrom tqdm import tqdm\r\n\r\n\r\ndef linear_enhance(img1, c, b): # 亮度就是每个像素乘以c加上b\r\n rows, cols, channel = img1.shape\r\n blank = np.zeros([rows, cols, channel], img1.dtype) # np.zeros(img1.shape, dtype=uint8)\r\n linear = cv2.addWeighted(img1, c, blank, 0, b)\r\n return linear\r\n\r\n\r\nroot = '/gdata1/zhuqi/DarkFace_coco_0.666/train/'\r\noutput_path = '/gdata1/zhuqi/DarkFace_coco_0.666/equalization_train/'\r\n\r\nfile_list = os.listdir(root)\r\nfile_list.sort(key=lambda x: int(x[:-4])) # 以文件名倒数第四个数左边(.txt左边)的名称进行排序\r\n\r\nwith tqdm(total=len(file_list)) as load_bar:\r\n for i in range(len(file_list)):\r\n img = cv2.imread(root + file_list[i], 1) # 1 3 4 5 7\r\n\r\n (b, g, r) = cv2.split(img)\r\n bH = cv2.equalizeHist(b)\r\n gH = cv2.equalizeHist(g)\r\n rH = cv2.equalizeHist(r)\r\n result = cv2.merge((bH, gH, rH))\r\n\r\n cv2.imwrite(output_path + file_list[i], result)\r\n load_bar.update(1)\r\n","sub_path":"tools/image_enhance/equalization.py","file_name":"equalization.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"80215354","text":"# use open cv to show new images from AirSim \n\nfrom AirSimClient import *\n# requires Python 3.5.3 :: Anaconda 4.4.0\n# pip install opencv-python\nimport cv2\nimport time\nimport sys\nimport numpy as np\nimport random as rnd\nimport PIL\nprint(\"hi\")\n\n\ndef printUsage():\n print(\"Usage: python camera.py [depth|segmentation|scene]\")\n\ncameraType = \"scene\"\n\nfor arg in sys.argv[1:]:\n cameraType = arg.lower()\n\ncameraTypeMap = { \n \"depth\": AirSimImageType.DepthVis,\n \"segmentation\": AirSimImageType.Segmentation,\n \"seg\": AirSimImageType.Segmentation,\n \"scene\": AirSimImageType.Scene,\n \"disparity\": AirSimImageType.DisparityNormalized,\n \"normals\": AirSimImageType.SurfaceNormals\n}\n\nif (not cameraType in cameraTypeMap):\n printUsage()\n sys.exit(0)\n\n#print (cameraTypeMap[cameraType])\n\nclient = MultirotorClient()\nclient.confirmConnection()\nclient.enableApiControl(True)\n#client.armDisarm(True)\n#client.takeoff()\n\nhelp = False\nbbox = None\nfontFace = cv2.FONT_HERSHEY_SIMPLEX\nfontScale = 0.5\nthickness = 2\ntextSize, baseline = cv2.getTextSize(\"FPS\", fontFace, fontScale, thickness)\nprint (textSize)\ntextOrg = (10, 10 + textSize[1])\nframeCount = 0\nstartTime=time.clock()\nfps = 0\n\nlk_params = dict( winSize = (15,15),\n maxLevel = 2,\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n\nimg = None\n\ndef boundedbox(img):\n bbox = cv2.selectROI(img, False)\n frameGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n roi = frameGray[bbox[1]:(bbox[1]+bbox[3]),bbox[0]:(bbox[0]+bbox[2])]\n new_corners = cv2.goodFeaturesToTrack(roi,50,0.01,10) \n new_corners[:,0,0] = new_corners[:,0,0] + int(bbox[0])\n new_corners[:,0,1] = new_corners[:,0,1] + int(bbox[1])\n for corner in new_corners:\n print(corner)\n return frameGray,new_corners,bbox\n\ntime1 = time.time()\npos = [-50, 10, -8]\ntracking = 0\nclient.moveToPosition(pos[0], pos[1], pos[2], 5, 10)\napi = True\nwhile True:\n # because this method returns std::vector, msgpack decides to encode it as a string unfortunately.\n rawImage = client.simGetImage(0, cameraTypeMap[cameraType])\n if (rawImage == None):\n print(\"Camera is not returning image, please check airsim for error messages\")\n sys.exit(0)\n else:\n img = cv2.imdecode(AirSimClientBase.stringToUint8Array(rawImage),cv2.IMREAD_UNCHANGED)\n #print(png.shape)\n cv2.putText(img,'FPS ' + str(fps),textOrg, fontFace, fontScale,(255,255,255),thickness)\n #cv2.imshow(\"Depth\", png)\n # calculate\n \n if cv2.waitKey(1) & 0xFF == ord('s'):\n oldGray,corners,bbox = boundedbox(img)\n client.moveToPosition(-10, pos[1], pos[2], 1, 0) \n continue\n if bbox is not None:\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n new_corners, st, err = cv2.calcOpticalFlowPyrLK(oldGray, gray, corners, None, **lk_params)\n r_add,c_add = 0,0\n for corner in new_corners:\n r_add = r_add + corner[0][1]\n c_add = c_add + corner[0][0]\n centroid_row = int(1.0*r_add/len(new_corners))\n centroid_col = int(1.0*c_add/len(new_corners))\n #draw centroid\n print(img.shape)\n cv2.circle(img,(int(centroid_col),int(centroid_row)),5,(255,0,0)) \n #add only those corners to new_corners_updated which are at a distance of 30 or lesse\n new_corners_updated = new_corners.copy()\n tobedel = []\n for index in range(len(new_corners)):\n if findDistance(new_corners[index][0][1],new_corners[index][0][0],int(centroid_row),int(centroid_col)) > 90:\n tobedel.append(index)\n new_corners_updated = np.delete(new_corners_updated,tobedel,0)\n \n \n\n #drawing the new points\n for corner in new_corners_updated:\n cv2.circle(img, (int(corner[0][0]),int(corner[0][1])) ,5,(0,255,0))\n if len(new_corners_updated) < 1:\n print(\"OBJECT LOST, Reinitialize for tracking\")\n break\n #finding the min enclosing circle\n ctr , rad = cv2.minEnclosingCircle(new_corners_updated)\n \n cv2.circle(img, (int(ctr[0]),int(ctr[1])) ,int(rad),(0,0,255),thickness = 5)\t\n \n #updating old_corners and oldFrameGray \n oldGray = gray.copy()\n corners = new_corners_updated.copy()\n\n #p1 = (int(bbox[0]), int(bbox[1]))\n #p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))\n #cv2.rectangle(img,p1,p2,color = (100,255,100),thickness = 4)\n cv2.imshow(\"Flow\", img)\n frameCount = frameCount + 1\n endTime=time.clock()\n diff = endTime - startTime\n if (diff > 1):\n fps = frameCount\n frameCount = 0\n startTime = endTime\n\n if time.time() - time1 > 1.0 and api:\n # get new random place \n #pos = [rnd.randint(-10, 10), rnd.randint(-10, 10), rnd.randint(-10, -3)]\n time1 = time.time()\n\n currPos = client.simGetPose().position\n \n #print( ((pos[0]-currPos.x_val)**2 + (pos[1]-currPos.y_val)**2 + (pos[2]-currPos.z_val)**2)**(1/2) )\n #client.rotateByYawRate(15, 0.1)\n\n","sub_path":"PythonClient/follow.py","file_name":"follow.py","file_ext":"py","file_size_in_byte":5239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"496208929","text":"## util.py\n## Author: Yangfeng Ji\n## Date: 09-13-2014\n## Time-stamp: \n\n##################################################################\n# Imports\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nfrom scipy.sparse import lil_matrix\nimport logging\nimport os\n\n\n##################################################################\n# Variables and Constants\nDFLT_ENCODING = \"utf-8\"\nDFLT_MODEL_PATH = os.path.join(\n os.path.dirname(__file__),\n \"data\",\n \"rstpaser.model\"\n)\nLOG_LVL = logging.INFO\nLOGGER = logging.getLogger(\"RSTParser\")\nLOGGER.setLevel(LOG_LVL)\nformatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n)\nsh = logging.StreamHandler()\nsh.setLevel(LOG_LVL)\nsh.setFormatter(formatter)\nLOGGER.addHandler(sh)\n\n\n##################################################################\n# Methods\ndef label2action(label):\n \"\"\" Transform label to action\n \"\"\"\n items = label.split('-')\n if len(items) == 1:\n action = (items[0], None, None)\n elif len(items) == 3:\n action = tuple(items)\n else:\n raise ValueError(\"Unrecognized label: {}\".format(label))\n return action\n\n\ndef action2label(action):\n \"\"\" Transform action into label\n \"\"\"\n if action[0] == 'shift':\n label = action[0]\n elif action[0] == 'reduce':\n label = '-'.join(list(action))\n else:\n raise ValueError(\"Unrecognized parsing action: {}\".format(action))\n return label\n\n\ndef vectorize(features, vocab):\n \"\"\" Transform a feature list into a numeric vector\n with a given vocab\n \"\"\"\n vec = lil_matrix((1, len(vocab)))\n for feat in features:\n try:\n fidx = vocab[feat]\n vec[0, fidx] += 1.0\n except KeyError:\n pass\n return vec\n\n\ndef extractrelation(s, level=0):\n \"\"\" Extract discourse relation on different level\n \"\"\"\n return s.lower().split('-')[0]\n\n\ndef reversedict(dct):\n \"\"\" Reverse the {key:val} in dct to\n {val:key}\n \"\"\"\n # print labelmap\n newmap = {}\n for (key, val) in dct.iteritems():\n newmap[val] = key\n return newmap\n\n\ndef getgrams(text, tokendict):\n \"\"\" Generate first one, two words from the token list\n\n :type text: list of int\n :param text: indices of words with the text span\n\n :type tokendict: dict of Token (data structure)\n :param tokendict: all tokens in the doc, indexing by the\n document-level index\n \"\"\"\n n = len(text)\n grams = []\n # Get lower-case of words\n if n >= 1:\n grams.append(tokendict[text[0]].lemma.lower())\n grams.append(tokendict[text[-1]].lemma.lower())\n grams.append(tokendict[text[0]].pos)\n grams.append(tokendict[text[-1]].pos)\n if n >= 2:\n token = tokendict[text[0]].lemma.lower() \\\n + ' ' + tokendict[text[1]].lemma.lower()\n grams.append(token)\n token = tokendict[text[-2]].lemma.lower() \\\n + ' ' + tokendict[text[-1]].lemma.lower()\n grams.append(token)\n return grams\n","sub_path":"rstparser/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"80686448","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nGiven a value x, put all nodes < x at the front and all > x after that\n\"\"\"\n\nfrom linked_list import LinkedList\n\ndef partition(ll, val):\n \n assert len(ll) > 3, 'Partition makes sense for list with at least 3 elements'\n \n current = ll.head\n\n for l in ll:\n \n if current.next.data < val:\n temp = current.next\n current.next = current.next.next\n temp.next = ll.head\n ll.head = temp\n \n current = current.next\n \n return ll\n\n\n\nll = LinkedList()\nll.generate(10,0,9)\nprint(ll)\npartition(ll, 5)\nprint(ll)\n","sub_path":"Cracking the Coding Interview/02_Linked_Lists/04_partition.py","file_name":"04_partition.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"279617091","text":"#!/usr/bin/env python\n\n\"\"\"\nROS/IoT Bridge\n\nThis program intends act as a bridge bwtween ROS and IoT.\nMQTT/HTTP requests are received/sent here.\n\"\"\"\n\nimport threading\nimport json\nimport rospy\nimport actionlib\n\nfrom pkg_task5.msg import incomingMsg\nfrom pkg_ros_iot_bridge.msg import msgRosIotAction\nfrom pkg_ros_iot_bridge.msg import msgRosIotResult\n\nfrom pyiot import iot\n\n\nclass IotRosBridge(object):\n \"\"\"\n ROS/Iot Bridge Class : Performs MQTT Sub + Spreadsheet Pub\n \"\"\"\n # pylint: disable=too-many-instance-attributes\n def __init__(self):\n self._as = actionlib.ActionServer('/action_ros_iot',\n msgRosIotAction,\n self.on_goal,\n on_cancel,\n auto_start=False)\n\n param_config_iot = rospy.get_param('config_iot')\n self._config_mqtt_unique_id = param_config_iot['mqtt_unique_id']\n self._config_mqtt_server_url = param_config_iot['mqtt']['server_url']\n self._config_mqtt_server_port = param_config_iot['mqtt']['server_port']\n self._config_mqtt_qos = param_config_iot['mqtt']['qos']\n self._config_mqtt_incoming_orders = param_config_iot['mqtt']['incoming_orders']\n\n self._config_mqtt_sub_cb_ros_topic = param_config_iot['mqtt']['sub_cb_ros_topic']\n\n self._config_http_inventory_pub = param_config_iot['http']['inventory']\n self._config_http_dispatch_pub = param_config_iot['http']['dispatch']\n self._config_http_shipped_pub = param_config_iot['http']['shipped']\n\n self._handle_incoming_orders = rospy.Publisher(self._config_mqtt_sub_cb_ros_topic,\n incomingMsg,\n queue_size=10)\n\n try:\n iot.mqtt_subscribe_thread_start(self.func_incoming_order_callback,\n self._config_mqtt_server_url,\n self._config_mqtt_server_port,\n self._config_mqtt_incoming_orders,\n self._config_mqtt_qos)\n\n rospy.loginfo(\"MQTT Subscribe Thread Started\")\n except: # pylint: disable=bare-except\n rospy.logerr(\"Failed to start MQTT Subscribe Thread\")\n\n self._as.start()\n rospy.loginfo(\"Started ROS-IoT Bridge Action Server\")\n\n def on_goal(self, goal_handle):\n \"\"\"\n Callback Function\n\n This function will be called when Action Server receives a Goal.\n\n Parameters: \n goal_handle(object): Goal handler for the incoming goal.\n \"\"\"\n goal = goal_handle.get_goal()\n\n rospy.loginfo(\"Received new goal from Client\")\n rospy.loginfo(goal)\n\n # Validate incoming goal parameters\n if goal.protocol == \"http\":\n\n if (goal.mode == \"pub\") or (goal.mode == \"sub\"):\n goal_handle.set_accepted()\n\n # Start a new thread to process new goal from the client\n thread = threading.Thread(name=\"worker\",\n target=self.process_goal,\n args=(goal_handle,))\n thread.start()\n\n else:\n goal_handle.set_rejected()\n return\n\n else:\n goal_handle.set_rejected()\n return\n\n def process_goal(self, goal_handle):\n \"\"\"\n Goal Processing Function\n\n This function is called is a separate thread to process Goal\n\n Parameters: \n goal_handle(object): Goal handler for the incoming goal.\n \"\"\"\n result = msgRosIotResult()\n\n goal_id = goal_handle.get_goal_id()\n rospy.loginfo(\"Processing goal : \" + str(goal_id.id))\n\n goal = goal_handle.get_goal()\n\n if (goal.protocol == \"http\") and (goal.mode == \"pub\"):\n\n rospy.logwarn(\"HTTP PUB Goal ID: \" + str(goal_id.id))\n rospy.logwarn(goal.topic + \" > \" + goal.message)\n\n if goal.topic == self._config_http_inventory_pub:\n http_status = func_inventory_data_callback(goal.message)\n\n if goal.topic == self._config_http_dispatch_pub:\n http_status = func_dispatch_data_callback(goal.message)\n\n if goal.topic == self._config_http_shipped_pub:\n http_status = func_shipped_data_callback(goal.message)\n\n if http_status == 0:\n rospy.loginfo(\"HTTP requests were successfully sent!\")\n result.flag_success = True\n else:\n rospy.loginfo(\"HTTP requests failed\")\n result.flag_success = False\n\n rospy.loginfo(\"Send goal result to client\")\n\n if result.flag_success:\n rospy.loginfo(\"Succeeded\")\n goal_handle.set_succeeded(result)\n else:\n rospy.loginfo(\"Goal Failed. Aborting.\")\n goal_handle.set_aborted(result)\n\n rospy.loginfo(\"Goal ID: \" + str(goal_id.id) + \" Goal Processing Done.\")\n\n def func_incoming_order_callback(self, client, userdata, message):\n \"\"\"\n Callback Function\n\n This method is triggered when there is an incoming order.\n\n Parameters:\n client(string): Information regarding the MQTT client.\n userdata(string): Information regarding the userdata (if any).\n message(string): Contains the MQTT message.\n \"\"\"\n rospy.loginfo(client)\n rospy.loginfo(userdata)\n payload = str(message.payload.decode(\"utf-8\"))\n msg_obj = json.loads(payload)\n\n if msg_obj[\"item\"] == 'Clothes':\n msg_obj[\"priority\"] = 'LP'\n msg_obj[\"cost\"] = '100'\n elif msg_obj[\"item\"] == 'Food':\n msg_obj[\"priority\"] = 'MP'\n msg_obj[\"cost\"] = '200'\n else:\n msg_obj[\"priority\"] = 'HP'\n msg_obj[\"cost\"] = '300'\n\n kwargs = {\n \"id\": \"IncomingOrders\",\n \"Order ID\": msg_obj[\"order_id\"],\n \"Order Date and Time\": msg_obj[\"order_time\"],\n \"Item\": msg_obj[\"item\"],\n \"Order Quantity\": msg_obj[\"qty\"],\n \"City\": msg_obj[\"city\"],\n \"Latitude\": msg_obj[\"lat\"],\n \"Longitude\": msg_obj[\"lon\"],\n \"Priority\": msg_obj[\"priority\"],\n \"Unique Id\": self._config_mqtt_unique_id,\n \"Team Id\": \"VB#1004\",\n \"Cost\": msg_obj[\"cost\"]\n }\n\n iot.publish_to_spreadsheet(**kwargs)\n\n self._handle_incoming_orders.publish(str(kwargs))\n\n kwargs['id'] = 'Dashboard'\n kwargs['Quantity'] = msg_obj[\"qty\"]\n kwargs['Order Time'] = msg_obj[\"order_time\"]\n\n iot.publish_to_spreadsheet(**kwargs)\n\ndef on_cancel(goal_handle):\n \"\"\"\n Goal Canceling Function\n\n This function will be called when Goal Cancel request is send to the Action Server.\n\n Parameters:\n goal_handle(object): Goal handler for the incoming goal.\n \"\"\"\n rospy.loginfo(\"Received cancel request.\")\n goal_id = goal_handle.get_goal_id()\n rospy.logerr(\"Canceled \" + str(goal_id))\n\ndef func_inventory_data_callback(data):\n \"\"\"\n Callback Function\n\n This method is triggered when there is a detected package on the shelf.\n\n Parameters:\n data(string): Relevant information regarding the detected package.\n \"\"\"\n kwargs = eval(data)\n\n rospy.logwarn('\\n\\nINVENTORY UPDATE RECEIVED!\\n\\n')\n rospy.logwarn(kwargs)\n\n resp = iot.publish_to_spreadsheet(**kwargs)\n return resp\n\ndef func_dispatch_data_callback(data):\n \"\"\"\n Callback Function\n\n This method is triggered when a package is dispatched.\n\n Parameters:\n data(string): Relevant information regarding the dispatched package.\n \"\"\"\n kwargs = eval(data)\n\n rospy.logwarn('\\n\\nDISPATCH UPDATE RECEIVED!\\n\\n')\n rospy.logwarn(kwargs)\n\n resp1 = iot.publish_to_spreadsheet(**kwargs)\n\n kwargs['id'] = 'Dashboard'\n kwargs['Order Dispatched'] = 'YES'\n kwargs['Dispatch Time'] = kwargs[\"Dispatch Date and Time\"]\n\n resp2 = iot.publish_to_spreadsheet(**kwargs)\n\n return resp1 + resp2\n\ndef func_shipped_data_callback(data):\n \"\"\"\n Callback Function\n\n This method is triggered when a package is shipped.\n\n Parameters:\n data(string): Relevant information regarding the shipped package.\n \"\"\"\n kwargs = eval(data)\n\n rospy.logwarn('\\n\\nSHIPPING UPDATE RECEIVED!\\n\\n')\n rospy.logwarn(kwargs)\n\n resp1 = iot.publish_to_spreadsheet(**kwargs)\n\n kwargs['id'] = 'Dashboard'\n kwargs['Order Shipped'] = 'YES'\n kwargs['Shipping Time'] = kwargs[\"Shipped Date and Time\"]\n\n resp2 = iot.publish_to_spreadsheet(**kwargs)\n\n return resp1 + resp2\n\ndef main():\n \"\"\"\n Main Function\n \"\"\"\n rospy.init_node('node_ros_iot_bridge_action_server')\n\n IotRosBridge()\n\n rospy.spin()\n\nif __name__ == '__main__':\n main()\n","sub_path":"ws_moveit/src/pkg_task_6/vb_t6_bonus_1004/ros_packages/pkg_ros_iot_bridge/scripts/node_action_server_ros_iot_bridge.py","file_name":"node_action_server_ros_iot_bridge.py","file_ext":"py","file_size_in_byte":9006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"188481435","text":"#!/usr/bin/env python3\n\"\"\"\n phd-autonomous-cars-frank\n\"\"\"\n# Operating System\nimport os, math\nfrom platform import node\nfrom random import random, randint\nfrom time import time\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom svgwrite import Drawing\nfrom svgwrite.shapes import *\nfrom copy import copy\n\n# from test.Test2 import car\nimport copy as copy_module\n\nos.chdir(os.path.dirname(__file__))\nif not os.path.exists(\"drawings\"):\n os.mkdir(\"drawings\")\n\ngravity = 9.81\nuHf = 0.64\nAVHV_total_cars = 100\n\n\nclass Vector2:\n def __init__(self, x=None, y=None):\n # Vector is a tuple\n if isinstance(x, Vector2):\n y = x.y\n x = x.x\n if isinstance(x, list):\n y = x[1]\n x = x[0]\n self.x = x if isinstance(x, int) or isinstance(x, float) else 0\n self.y = y if isinstance(y, int) or isinstance(y, float) else 0\n\n def copy(self):\n return copy(self)\n\n def get_value(self):\n return [self.x, self.y]\n\n def speed(self):\n #return math.sqrt(self.x ** 2 + self.y ** 2)\n return math.sqrt(self.velocity.x ** 2 + self.velocity.y ** 2)\n\n def reset_self(self):\n self.x = 0\n self.y = 0\n return self\n\n def draw(self, offset):\n if offset is None:\n offset = Vector2()\n return self.copy().add(Vector2([-offset.x, -offset.y]))\n\n def distance(self, point):\n return math.sqrt((self.x - point.x) ** 2 + (self.y - point.y) ** 2)\n\n def magnitude(self):\n return self.distance(Vector2().reset_self())\n\n def redirect(self, angle):\n vector = self.copy()\n angle = math.radians(angle)\n magnitude = self.distance(Vector2([0, 0]))\n vector.x = magnitude * math.sin(angle)\n vector.y = magnitude * math.cos(angle)\n return vector\n\n def redirect_self(self, point):\n vector = self.redirect(point)\n self.x = vector.x\n self.y = vector.y\n return self\n\n def cap(self, bound):\n vector = self.copy()\n if vector.x > bound:\n vector.x = bound\n if vector.y > bound:\n vector.y = bound\n bound *= -1\n if vector.x < bound:\n vector.x = bound\n if vector.y < bound:\n vector.y = bound\n return vector\n\n def cap_self(self, bound):\n vector = self.cap(bound)\n self.x = vector.x\n self.y = vector.y\n return self\n\n def cap_magnitude(self, value):\n vector = self.copy()\n magnitude = math.sqrt(self.x * 2 + self.y * 2)\n if magnitude > value:\n vector.scale(value / magnitude)\n return vector\n\n def cap_magnitude_self(self, value):\n vector = self.cap_magnitude(value)\n self.x = vector.x\n self.y = vector.y\n return self\n\n def scale(self, scale):\n vector = self.copy()\n vector.x *= scale\n vector.y *= scale\n return vector\n\n def scale_self(self, scale_vector):\n vector = self.scale(scale_vector)\n self.x = vector.x\n self.y = vector.y\n return self\n\n def add(self, add_vector):\n vector = self.copy()\n vector.x += add_vector.x\n vector.y += add_vector.y\n return vector\n\n def add_self(self, add_vector):\n vector = self.add(add_vector)\n self.x = vector.x\n self.y = vector.y\n return self\n\n def add_self_velocity(self, add_vector):\n vector = self.add(add_vector)\n self.x = abs(vector.x)\n self.y = abs(vector.y)\n return self\n\n def direction(self, vector=None):\n # Return direction of this vector to the vector supplied, if no vector supplied, from origin to this vector.\n return math.atan2(vector.y - self.y, vector.x - self.x) if isinstance(vector, Vector2) else math.atan2(self.y,\n self.x)\n\n\nclass TrafficNode:\n def __init__(self, name, position=None, traffic_light=None, curve_radius=0, curve_center=[]):\n if position is None:\n position = [0, 0]\n\n self.pos = position\n self.id = name\n self.name = str(name)\n self.position = Vector2(position)\n self.traffic_light = traffic_light\n self.destination_nodes = []\n self.curve_radius = curve_radius\n self.curve_center = curve_center\n\n # the nodes concerned for test 4 = 2,4,6,7,8,12,14,16 and 18\n def check(self, node):\n if isinstance(node, TrafficNode):\n return self.position == node.position and self.name == node.name\n return False\n\n def get_info(self):\n output = str.format(\"{:7s} [{:3d}, {:3d}]\", self.name, self.position.x, self.position.y)\n return output + str(self.destination_nodes)\n\n def connect(self, node, debug=False):\n if isinstance(node, TrafficNode):\n if not self.check(node) and not self.connected(node):\n self.destination_nodes.append(node)\n node.connect(self)\n if debug:\n print(\"Connecting \" + self.name + \" to \" + node.name)\n\n def connected(self, node):\n for destination_node in self.destination_nodes:\n if destination_node.check(node):\n return True\n return False\n\n def distance(self, pos):\n x = (self.pos[0] - pos[0])\n y = (self.pos[1] - pos[1])\n return math.sqrt(x*x+y*y)\n\nclass PedestrianNode(TrafficNode):\n def __init__(self, name, position=None, traffic_light=None):\n super(PedestrianNode, self).__init__(name=name, position=position, traffic_light=traffic_light)\n\n\nclass RoadNode(TrafficNode):\n def __init__(self, name, position=None, traffic_light=None):\n super(RoadNode, self).__init__(name=name, position=position, traffic_light=traffic_light)\n\n\nclass _TrafficSystem:\n def __init__(self, nodes=None, edges=None, colors=None, stroke_width=10):\n self.draw_node = True\n self.stroke_width = stroke_width\n if nodes is None:\n nodes = []\n self.nodes = nodes\n if edges is not None:\n self.add_edges(edges)\n\n if colors is None:\n colors = {}\n\n if colors.get('node') is None:\n colors['node'] = 'blue'\n if colors.get('edge') is None:\n colors['edge'] = 'aqua'\n if colors.get('text') is None:\n colors['text'] = 'white'\n\n self.colors = colors\n\n def get_nodes(self):\n return self.nodes\n\n def node(self, name):\n if isinstance(name, TrafficNode):\n return name\n for node in self.nodes:\n if str(node.name) == str(name):\n return node\n return None\n\n def add_nodes(self, nodes=None):\n self.nodes += nodes\n return self\n\n def add_edges(self, edges=None):\n for node, destinations in edges.items():\n node = self.node(node)\n if isinstance(node, TrafficNode):\n for destination in destinations:\n destination = self.node(destination)\n if isinstance(destination, TrafficNode):\n node.connect(destination)\n return self\n\n def print_system(self):\n print(\"\\n\\n\\n\\nNODE INFO\")\n for n in self.nodes:\n print(n.get_info())\n\n def draw_edges(self, canvas, offset):\n for node in self.nodes:\n for con in node.destination_nodes:\n canvas.add(Line(\n start=node.position.draw(offset).get_value(),\n end=con.position.draw(offset).get_value(),\n stroke=self.colors.get('edge'),\n stroke_width=self.stroke_width\n ))\n\n def draw_nodes(self, canvas, offset):\n for node in self.nodes:\n if self.draw_node:\n canvas.add(\n Circle(center=node.position.draw(offset=offset).get_value(), r=7, fill=self.colors.get('node')))\n\n def draw_text(self, canvas, offset):\n for node in self.nodes:\n canvas.add(canvas.text(\n node.name,\n insert=node.position.draw(offset=offset).add(Vector2([-7, 7])).get_value(),\n font_size=16,\n fill=self.colors.get('text')\n ))\n\n def draw(self, canvas, offset):\n self.draw_edges(canvas=canvas, offset=offset)\n self.draw_nodes(canvas=canvas, offset=offset)\n self.draw_text(canvas=canvas, offset=offset)\n\n\nclass PedestrianSystem(_TrafficSystem):\n def __init__(self, nodes, edges):\n colors = {\n 'node': '#aa0000',\n 'edge': '#330000',\n 'text': 'white'\n }\n super(PedestrianSystem, self).__init__(nodes=nodes, edges=edges, colors=colors)\n\n\nclass RoadSystem(_TrafficSystem):\n edges = []\n nodes = []\n route = {} # The routing table looks like:\n def __init__(self, nodes, edges):\n colors = {\n 'node': '#0000aa',\n 'edge': '#000033',\n 'text': 'white'\n }\n self.edges = edges\n self.nodes = nodes\n # The routing table shall tell us what is the next node for the target node (if any)\n route = {}\n # Implement BFS to find the route\n changed = True\n # Preseed with initial route\n for x in nodes:\n route[x.id] = {}\n for e in edges:\n for x in edges[e]:\n route[e][x] = x\n # From node 1 to go to node 2 you go via node 2\n # {1: {2: 2},\n # 2: {3: 3, 4: 4, 18: 18} }\n # Next step: go over all edges\n # Results must be:\n # {1: {2:2, 3:2, 4:2, 18:2}}\n while (changed):\n changed = False\n oldRoute = copy_module.deepcopy(route)\n for r in oldRoute:\n for c in oldRoute[r]:\n for n in oldRoute[c]:\n if n not in route[r]:\n route[r][n] = c\n changed = True\n self.route = route\n super(RoadSystem, self).__init__(nodes=nodes, edges=edges, colors=colors)\n\n def getNextNode(self, nodeID, targetID):\n if not nodeID in self.route:\n return None\n tgt = self.route[nodeID][targetID]\n if tgt == None:\n return tgt\n return self.getNode(tgt)\n\n def getNode(self, id):\n for x in self.nodes:\n if x.id == id:\n return x\n return None\n\n def getAllNextNodes(self, node):\n return self.edges[node]\n\n\nclass RoadSystem1(_TrafficSystem):\n edges = []\n nodes = []\n route = {} # The routing table looks like:\n\n def __init__(self, nodes, edges):\n self.edges = edges\n self.nodes = nodes\n # The routing table shall tell us what is the next node for the target node (if any)\n route = {}\n # Implement BFS to find the route\n changed = True\n # Preseed with initial route\n for x in nodes:\n route[x.id] = {}\n for e in edges:\n for x in edges[e]:\n route[e][x] = x\n # From node 1 to go to node 2 you go via node 2\n # {1: {2: 2},\n # 2: {3: 3, 4: 4, 18: 18} }\n # Next step: go over all edges\n # Results must be:\n # {1: {2:2, 3:2, 4:2, 18:2}}\n while(changed):\n changed = False\n oldRoute = copy_module.deepcopy(route)\n for r in oldRoute:\n for c in oldRoute[r]:\n for n in oldRoute[c]:\n if n not in route[r]:\n route[r][n] = c\n changed = True\n self.route = route\n print(route)\n colors = {\n 'node': '#0000aa',\n 'edge': '#000033',\n 'text': 'white'\n }\n super(RoadSystem1, self).__init__(nodes=nodes, edges=edges, colors=colors)\n\n def getNextNode(self, nodeID, targetID):\n if not nodeID in self.route:\n return None\n tgt = self.route[nodeID][targetID]\n if tgt == None:\n return tgt\n return self.getNode(tgt)\n\n def getNode(self, id):\n for x in self.nodes:\n if x.id == id:\n return x\n return None\n\n def getAllNextNodes(self, node):\n return self.edges[node]\n\n\n\nclass _EnvironmentObject:\n\n def __init__(self, name, position=None, velocity=None, acceleration=None, direction=None, size=10, mass=None,\n color=None, speed=None, car_ratio=None):\n\n # Initialise Default Values\n if not isinstance(name, str):\n name = str(name)\n if not isinstance(direction, int) and not isinstance(direction, float):\n direction = 0\n if not isinstance(mass, int) and not isinstance(mass, float):\n mass = 1\n if not isinstance(color, str):\n color = \"white\"\n\n # Set Values\n self.name = name\n self.position = Vector2(position)\n self.velocity = Vector2(velocity)\n self.acceleration = Vector2(acceleration)\n self.direction = direction\n self.size = Vector2(size)\n self.mass = mass\n self.color = color\n self.speed = speed\n self.car_ratio = car_ratio\n self.total_cars_percent = 0\n\n self.last = None\n\n # Environment Object Values\n self.environment = None\n\n # Initialise Data Collector\n self.data = {}\n for metric in ['time', 'position', 'velocity', 'acceleration', 'speed']:\n self.data[metric] = []\n\n # Set Environment\n def set_environment(self, environment):\n self.environment = environment\n\n # Update of how the object should act\n def behaviour_update(self, t):\n def __init__(self, pos=[0, 0], mass=1020, max_acceleration=1, drag=1.0, max_deacceleration=1,\n acceleration=[0, 0], velocity=[0, 0]):\n self.pos = pos # in meterso it\n self.mass = mass # kg\n self.max_acceleration = max_acceleration\n self.max_deacceleration = max_deacceleration\n self.acceleration = acceleration # in m/s^2\n self.velocity = velocity # in m/s\n self.drag = drag\n\n def get_speed(self):\n \"\"\"Returns the absolute speed in m/s\"\"\"\n return math.sqrt(self.velocity[0] * self.velocity[0] + self.velocity[1] * self.velocity[1])\n\n def get_force(self):\n return math.sqrt(\n self.acceleration[0] * self.acceleration[0] + self.acceleration[1] * self.velocity[1]) * self.mass\n\n def __str__(self):\n return \"pos: %s mass:%s maxAccl:%s maxDeaccl:%s Accl:%s Velocity:%s\" % (\n self.pos, self.mass, self.max_acceleration, self.max_deacceleration, self.acceleration, self.velocity)\n\n # Update of how the object should move\n def physics_update(self, t):\n def __init__(self, friction=0.75, gravity=9.8):\n self.friction = friction\n self.gravity = gravity\n\n def update_acceleration(self, Car, delta_accl):\n Car.acceleration[0] = Car.acceleration[0] + delta_accl[0]\n Car.acceleration[1] = Car.acceleration[1] + delta_accl[1]\n\n def update_velocity(self, Car, delta_t):\n Car.velocity[0] = Car.velocity[0] + delta_t * Car.acceleration[0]\n Car.velocity[1] = Car.velocity[1] + delta_t * Car.acceleration[1]\n\n def update_pos(self, Car, delta_t):\n Car.pos[0] = Car.pos[0] + delta_t * Car.velocity[0]\n Car.pos[1] = Car.pos[1] + delta_t * Car.velocity[1]\n\n def max_speed_curve(self, curve_radius):\n return math.sqrt(self.gravity * self.friction * curve_radius)\n # TODO Fix Calculation of Drag\n # Adjust velocity by acceleration relative to how much time has passed\n self.velocity.add_self(self.acceleration.copy().scale(t))\n # Adjust position by velocity relative to how much time has passed\n self.position.add_self(self.velocity.copy().scale(t))\n\n class CurveMovement():\n def __init__(self, time, car, radius, curveCenter, startDegree, endDegree):\n self.curveCenter = curveCenter\n self.lengthCircle = 2 * math.pi * radius\n self.radius = radius\n self.startTime = time\n self.endDegree = endDegree\n self.actualDegree = startDegree\n self.startDegree = startDegree\n self.car = car\n\n def move(self, time):\n reachedEnd = False\n degree = 360 * self.car.get_speed() / self.lengthCircle\n if self.endDegree < self.startDegree:\n degree = - degree\n self.actualDegree = (time - self.startTime) * degree\n\n if self.endDegree >= self.startDegree:\n if (self.actualDegree >= self.endDegree):\n reachedEnd = True\n else:\n degree = - degree\n if (self.actualDegree <= self.endDegree):\n reachedEnd = True\n\n if reachedEnd:\n # compute amount of way driven\n timeAfterEndingCurve = (self.actualDegree - self.endDegree) / 360 * self.lengthCircle / self.car.get_speed()\n self.actualDegree = self.endDegree\n # use up timeAfterEndingCurve for moving straight ...\n absSpeed = self.car.get_speed()\n # self.car.velocity =\n\n alpha = self.actualDegree / 180 * math.pi\n # in this case it is a 270 - 360° turn\n self.car.pos[0] = self.curveCenter[0] + self.radius * math.sin(alpha)\n self.car.pos[1] = self.curveCenter[1] + self.radius * math.cos(alpha)\n\n if (self.actualDegree == self.endDegree):\n return [True, timeAfterEndingCurve]\n return [False]\n\n # Record Values\n def data_update(self, t):\n # Update the Data Collector with Values for Time and Position\n self.data['time'].append(copy(t))\n self.data['position'].append(self.position.copy())\n self.data['velocity'].append(self.velocity.copy())\n self.data['acceleration'].append(self.acceleration.copy())\n\n # Update the object in order\n def update(self, t, record=False):\n self.behaviour_update(t)\n self.physics_update(t)\n if record:\n self.data_update(t)\n\n # Get Object Information\n def get_info(self):\n # Return Position and Size as Default Information\n return (\n str.format(\"{:12s}\", self.name) +\n self._format_components(\"Position\", self.position) +\n self._format_components(\"Velocity\", self.velocity) +\n self._format_components(\"Acceleration\", self.acceleration)\n )\n\n def _format_components(self, name, vector):\n if not isinstance(vector, Vector2):\n vector = Vector2(vector)\n return str.format('{:<32s}', str.format(\n '{:12s} [{:.2f}, {:.2f}]',\n name,\n vector.x,\n vector.y\n ))\n\n def check_overlap(self, colliding):\n # Check if Objects are Colliding / Overlapping by Cropping infinite areas around the object away.\n # If the flow is not stopped by any check, then the objects are colliding\n \"\"\"This method checks if this object and the other object overlaps, which could be a crash for cars and stuff\"\"\"\n # Crop the left side\n if colliding.position.x + colliding.size.x < self.position.x:\n return False\n # Crop the bottom side\n if colliding.position.y + colliding.size.x < self.position.y:\n return False\n # Crop the right side\n if self.position.x + self.size.x < colliding.position.x:\n return False\n # Crop the top side\n if self.position.y + self.size.y < colliding.position.y:\n return False\n return\n\n def draw(self, canvas, offset):\n if isinstance(canvas, type(Drawing())):\n canvas.add(Circle(\n center=self.position.draw(offset).get_value(),\n r=self.size.x,\n fill=self.color\n ))\n\n def draw_direction(self, canvas, offset):\n if isinstance(canvas, type(Drawing())):\n canvas.add(Line(\n start=self.position.draw(offset).get_value(),\n end=(self.position.x + math.sin(math.radians(self.direction)) * 600,\n self.position.y + math.cos(math.radians(self.direction)) * 600),\n fill='red',\n stroke_width=200\n ))\n\n def apply_force(self, t, magnitude, direction=None):\n if direction is None:\n direction = self.direction\n acceleration_due_to_force = Vector2((magnitude / self.mass) * t).redirect(direction)\n # Apply force in a direction (changed from Degrees to Radians)\n self.acceleration = self.acceleration.add(acceleration_due_to_force)\n\n def calculate_friction(self, mu):\n mu = 2.7 # How did we arrive at this figure? It was earlier defined as something close to 0.7\n return mu * self.mass * self.gravity\n\n def air_resistance(self, t):\n # Apply a force in the opposite direction to travel.\n coefficient = 1 # I will put in the correct value for this later\n density = 1.1839 # Density of Air? I found to be approx. 1.2 at 25 degrees celsius\n air_resistance = (coefficient * density * self.drag_area * (self.get_speed() ** 2)) / 2\n return air_resistance\n\n\nclass Car(_EnvironmentObject):\n def __init__(self, name=None, position=None, velocity=None, acceleration=None, direction=None, size=10, mass=None,\n route=None, color=None, power=1000, velocity_max=30, acceleration_max=30, easy_physics=True,\n car_type=None):\n if not isinstance(color, str):\n color = \"red\"\n if not isinstance(mass, int) and not isinstance(mass, float):\n mass = 1200\n if not isinstance(power, int) and not isinstance(power, float):\n power = 1000\n super(Car, self).__init__(name=name, position=position, velocity=velocity, acceleration=acceleration,\n direction=direction, size=size, mass=mass, color=color)\n if not isinstance(route, list):\n route = []\n self.route = route\n\n self.power = power\n\n self.turning_angle = 0\n self.idle_time = 0\n self.traffic_light = None\n self.easy_physics = easy_physics\n self.velocity_max = velocity_max\n self.acceleration_max = acceleration_max\n self.total_time = 0\n\n def set_environment(self, environment):\n super(Car, self).set_environment(environment)\n if len(self.route) > 0:\n for _ in range(0, len(self.route)):\n self.route[_] = self.environment.road_system.node(self.route[_])\n\n if isinstance(self.route[0], RoadNode):\n self.position = self.route[0].position.copy()\n self.last = self.route[0]\n self.route = self.route[1:]\n if self.last:\n pass\n self.position = self.last.position.copy()\n\n def get_speed(self):\n return self.velocity\n\n def draw(self, canvas, offset):\n if isinstance(canvas, type(Drawing())):\n color = 'blue'\n if len(self.route) > 0:\n color = 'grey' if self.traffic_light is None \\\n else 'yellow' if self.traffic_light.amber \\\n else 'green' if self.traffic_light.green \\\n else 'red' if self.traffic_light.red \\\n else 'blue'\n\n if self.environment.environment_objects is not None:\n if self.environment.environment_objects[CarSpawner] is not None:\n for carSpawn in self.environment.environment_objects[CarSpawner]:\n if carSpawn.name == 'GentleCar':\n color = 'blue'\n elif carSpawn.name == 'AggressiveCar':\n color = 'red'\n canvas.add(Circle(\n center=self.position.draw(offset).get_value(),\n r=self.size.x,\n fill=color\n ))\n super(Car, self).draw_direction(canvas=canvas, offset=offset)\n\n def get_centripetal_force(self):\n return (self.mass * math.sqrt(self.velocity.x ** 2 + self.velocity.y ** 2)) / self.get_radius_of_turn()\n\n def get_radius_of_turn(self):\n return (self.length / 2) * math.tan(math.pi / 4 - self.turning_angle)\n\n def turn(self, turning_angle_adjustment):\n self.turning_angle -= turning_angle_adjustment\n\n def reset_turn(self):\n self.turning_angle = 0\n\n def behaviour_update(self, t):\n super(Car, self).behaviour_update(t)\n self.next_node()\n #print(self.route) # <---- Here\n if len(self.route) > 0:\n self.turning(t)\n # Obey Traffic Lights\n self.obey_traffic_light(t)\n\n\n # car waiting timeeuioiro[ioeioio\n # if isinstance(self.traffic_light, TrafficLight):\n # if t in range and not moving:\n # if self.traffic_light.position.distance(self.position) < self.traffic_light.distance \\\n # and self.get_speed() < 1:\n # self.idle_time += t\n\n # car friction controlling the curve\n # def curve(mu, magnitude, )\n\n # self.apply_force(t, magnitude=5000) # Direction = self.direction\n # if self.acceleration.direction():\n # Direction = self.direction\n # friction = mu * magntude * gravity\n # return friction\n\n def physics_update(self, t):\n super(Car, self).physics_update(t)\n if self.easy_physics:\n self.acceleration.cap_self(self.acceleration_max)\n self.velocity.cap_self(self.velocity_max)\n\n def next_node(self):\n if len(self.route) > 0:\n if isinstance(self.route[0], RoadNode):\n if self.position.distance(self.route[0].position) < 10:\n\n if isinstance(self.traffic_light, TrafficLight):\n self.traffic_light.count_cars += 1\n for car in range(0, len(self.traffic_light.cars)):\n if self.traffic_light.cars[car] is self:\n try:\n self.traffic_light.cars.remove(car)\n except ValueError:\n pass\n self.route = self.route[1:]\n self.direction = self.direction_to_next_node()\n\n #test curve data\n #print('velocity')\n #print('gravity')\n #radius = self.get_radius_of_turn()\n #print(radius)\n #mass = 1\n #curve_data = self.curve(self, self.mass, radius, self.velocity, uHf, gravity)\n self.idle_time = 0\n if len(self.route) > 0 and isinstance(self.route[0], RoadNode):\n self.traffic_light = self.route[0].traffic_light\n if isinstance(self.traffic_light, TrafficLight):\n self.traffic_light.cars.append(self)\n if self.easy_physics:\n self.acceleration.reset_self()\n self.velocity.reset_self()\n #self.velocity.redirect(self.direction)\n\n else:\n self.velocity.reset_self()\n self.acceleration.reset_self()\n\n def obey_traffic_light(self, t):\n # Test for traffic Light\n if self.traffic_light is None:\n if len(self.route) > 0:\n if isinstance(self.route[0], RoadNode):\n if self.route[0].traffic_light is not None:\n angle = self.direction - self.route[0].traffic_light.direction - 180\n while angle < 0:\n angle += 360\n angle %= 360\n if angle < 30 or angle > 360 - 30:\n self.traffic_light = self.route[0].traffic_light\n # Obey Traffic Light\n if isinstance(self.traffic_light, TrafficLight):\n # Obey that light\n # If the traffic light is red, kill the velocity and acceleration\n if self.traffic_light.red and not self.traffic_light.amber and not self.traffic_light.green:\n # self.apply_force(t, -100000)\n self.velocity.scale(0.1)\n self.acceleration.reset_self()\n # If the light is red and amber, apply a forward force.\n elif self.traffic_light.red and self.traffic_light.amber and not self.traffic_light.green:\n self.apply_force(t, 10000)\n # If the light is green, accelerate forwards faster\n elif not self.traffic_light.red and not self.traffic_light.amber and self.traffic_light.green:\n self.apply_force(t, 50000)\n # If the light is amber, prepare to break by applying a break force.\n elif self.traffic_light.amber:\n self.velocity.scale(0.9)\n self.acceleration.reset_self()\n else:\n self.apply_force(t, 5000)\n\n def turning(self, t):\n if self.route[0] is not None:\n self.direction = math.degrees(math.atan2(\n self.route[0].position.x - self.position.x,\n self.route[0].position.y - self.position.y\n ))\n\n def direction_to_next_node(self):\n return math.degrees(self.last.position.direction(self.route[0].position)) \\\n if len(self.route) > 0 and isinstance(self.route[0], RoadNode) \\\n else 0\n\n def get_info(self):\n return str.format(\"{:s}\", super(Car, self).get_info())\n\n def curve(self, mass, radius, final_velocity, uHf, gravity):\n self.curve = mass * final_velocity ** 2 / radius\n print(self.curve)\n print('curve')\n print(final_velocity < math.sqrt(uHf * gravity))\n return final_velocity < math.sqrt(uHf * gravity)\n\n # Deleting Car once goal is reached\n def __del__(car):\n del car\n\nclass Aggressive_Car(Car):\n def __init__(self, name=None, route=None, direction=None, car_type='aggressive'):\n self.car_type = car_type\n self.route_old = route\n #car = Car(name=name, route=route, direction=direction)\n super(Aggressive_Car, self).__init__(name=name, route=route, direction=direction, car_type = car_type)\n #print(self.route)\n print('__init__', car_type)\n\n def behaviour_update1(self, t):\n super(Car, self).behaviour_update(t)\n print('next_node', self.next_node())\n self.next_node()\n self.direction\n self.turning(t)\n #color = color.red\n if len(self.route):\n if self.Traffic_light is None and self.Car is None and self.Car >0:\n self.apply_force(t, 4000)\n else:\n self.obey_traffic_light()\n\n\nclass Gentle_Car(Car):\n def __init__(self, name=None, route=None, direction=None, car_type='gentle'):\n self.car_type = car_type\n self.route = route\n super(Gentle_Car, self).__init__(name=name, route=self.route, direction=direction, car_type = car_type)\n print('__init__', car_type)\n\n def behaviour_update1(self, t):\n #color=color.green\n print('gentle car')\n if isinstance(self.Car, Aggressive_Car):\n self.next_node()\n if self.Aggressive_Car is None and self.Traffic_light is True:\n if self.safe_distance < 30:\n self.direction = self.direction_to_next_node()\n self.turning(t)\n self.oby_traffic();\n self.velocity.scale(0.9)\n self.acceleration.re\n else:\n self.apply_force(t, 5000)\n\n '''def behaviour_update(self, t):\n print('beh', t)\n super(Car, self).behaviour_update(t)\n self.next_car()\n if len(self.car) > 0:\n self.turning(t)\n # Obey obstacle\n self.obey_obstacle(t)'''\n\n\nclass Pedestrian(_EnvironmentObject):\n def __init__(self, name, position=None, velocity=None, acceleration=None, direction=None, size=10, mass=None,\n route=None, color=None):\n if not isinstance(color, str):\n color = \"green\"\n if not isinstance(mass, int) and not isinstance(mass, float):\n mass = 1200\n super(Pedestrian, self).__init__(name=name, position=position, velocity=velocity, acceleration=acceleration,\n direction=direction, size=size, mass=mass, color=color)\n if not isinstance(route, list):\n route = []\n self.route = route\n self.idle_time = 0\n self.pedestrian_light = None\n\n def draw(self, canvas, offset):\n if isinstance(canvas, type(Drawing())):\n color = 'yellow'\n if len(self.route) > 0:\n color = 'grey' if self.pedestrian_light is None \\\n else 'green' if self.pedestrian_light.green \\\n else 'red' if self.pedestrian_light.red \\\n else 'green'\n canvas.add(Circle(\n center=self.position.draw(offset).get_value(),\n r=self.size.x,\n fill=color\n ))\n super(pedestrian, self).draw_direction(canvas=canvas, offset=offset)\n\n\n def obey_pedestrian_light(self, t):\n # Test for traffic Light\n if self.pedestrian_light is None:\n if len(self.route) > 0:\n if isinstance(self.route[0], PedestrianNode):\n if self.route[0].pedestrian_light is not None:\n self.pedestrian_light = self.route[0].pedestrian_ligh\n\n\nclass TrafficLight(_EnvironmentObject):\n\n def __init__(self, name=None, position=None, size=None, direction=None, traffic_node=None, timings=None,\n timer=None, distance=None, throughput=None):\n if size is None:\n size = [20, 60]\n # Timings\n if timings is None:\n timings = [5, 1]\n if timer is None:\n timer = 0\n if distance is None:\n distance = 100\n self.cars = [4000]\n self.max_queue = 30\n if len(self.cars) > self.max_queue:\n self.max_queue = len(self.cars)\n # self.throughput = 100\n\n super(TrafficLight, self).__init__(name=name, position=position, size=size, direction=direction)\n self.traffic_node = traffic_node\n # Lights\n self.cycle = [\n [True, False, False], # Green\n [False, True, False], # Amber\n [False, False, True], # Red\n [False, True, True] # Red and Amber\n ]\n self.green, self.amber, self.red = self.cycle[0]\n self.phase = 0\n self.timings = timings\n self.timer = timer\n self.change(0)\n self.distance = distance\n\n self.count_cars = 0\n\n def set_environment(self, environment):\n super(TrafficLight, self).set_environment(environment=environment)\n if not isinstance(self.traffic_node, RoadNode):\n self.traffic_node = self.environment.road_system.node(self.traffic_node)\n if isinstance(self.traffic_node, RoadNode):\n self.traffic_node.traffic_light = self\n self.position = Vector2(\n [self.traffic_node.position.x + self.position.x, self.traffic_node.position.y + self.position.y])\n\n def set(self, phase=None):\n self.phase = (self.phase + 1 if phase is None else phase if isinstance(phase, int) else 0) % len(self.cycle)\n (self.green, self.amber, self.red) = self.cycle[self.phase % len(self.cycle)]\n\n def change(self, t):\n self.timer += t\n while self.timer >= self.timings[self.phase % len(self.timings)]:\n self.timer -= self.timings[self.phase % len(self.timings)]\n self.set()\n\n def behaviour_update(self, t):\n super(TrafficLight, self).behaviour_update(t)\n self.change(t)\n\n def car_queue_length(self):\n\n return len(self.cars)\n\n def traffic_light_decision(self, lane):\n self.lane = lane\n\n def lane_to_obey(self, car_queue_length, time):\n if self.car_queue_length() > 20 and self.car.idle_time < 10:\n pass # Increment\n else:\n pass # Do not increment\n self.count_cars += 1\n\n def get_info(self):\n return str.format(\n '{:s} {:10s} {:10s} {:10s} {:s} {:s}',\n super(TrafficLight, self).get_info(),\n 'Red' if self.red else '',\n 'Amber' if self.amber else '',\n 'Green' if self.green else '',\n 'Queue Length : ' + str(self.car_queue_length()),\n 'Cars that pass :' + str(self.count_cars),\n )\n\n def draw(self, canvas, offset, rectangle=None, r=None):\n\n if (self.direction == 90 or self.direction == 270):\n canvas.add(Rect(\n insert=((self.position.draw(offset=offset).x - self.size.x / 2),\n (self.position.draw(offset=offset).y - self.size.y / 2)),\n size=(self.size.x, self.size.y),\n fill=\"#222222\"\n ))\n\n def draw_light(active_color, inactive_color, active, location):\n canvas.add(Circle(\n fill=active_color if active else inactive_color,\n center=location,\n r=self.size.y * 0.4\n ))\n\n draw_light(\"#E6342F\", \"#350604\", self.red,\n (self.position.draw(offset=offset).x - self.size.x / 3, self.position.draw(offset=offset).y))\n draw_light(\"#E69C2F\", \"#422A05\", self.amber,\n (self.position.draw(offset=offset).x, self.position.draw(offset=offset).y))\n draw_light(\"#32AD51\", \"#0B4219\", self.green,\n (self.position.draw(offset=offset).x + self.size.x / 3, self.position.draw(offset=offset).y))\n else:\n canvas.add(Rect(\n insert=((self.position.draw(offset=offset).x - self.size.x / 2),\n (self.position.draw(offset=offset).y - self.size.y / 2)),\n size=(self.size.x, self.size.y),\n fill=\"#222222\"\n ))\n\n def draw_light(active_color, inactive_color, active, location):\n canvas.add(Circle(\n fill=active_color if active else inactive_color,\n center=location,\n r=self.size.x * 0.4\n ))\n\n draw_light(\"#E6342F\", \"#350604\", self.red, (\n self.position.draw(offset=offset).x, self.position.draw(offset=offset).y - self.size.y / 3))\n draw_light(\"#E69C2F\", \"#422A05\", self.amber,\n (self.position.draw(offset=offset).x, self.position.draw(offset=offset).y))\n draw_light(\"#32AD51\", \"#0B4219\", self.green, (\n self.position.draw(offset=offset).x, self.position.draw(offset=offset).y + self.size.y / 3))\n super(TrafficLight, self).draw_direction(canvas=canvas, offset=offset)\n\n\nclass Intersection(_EnvironmentObject):\n\n def __init__(self, name=None, position=None, size=None, direction=None, lanes=None):\n super(Intersection, self).__init__(name=name, position=position, size=size, direction=direction)\n self.lanes = lanes\n\n\n\n def update(self, t):\n super(Intersection, self).update(t)\n\n\nclass Environment:\n\n def __init__(self, name, layout):\n self.name = name\n if not isinstance(layout, dict):\n layout = {layout.__class__: layout}\n if not layout.get(RoadSystem):\n layout[RoadSystem] = RoadSystem([], {})\n if not layout.get(PedestrianSystem):\n layout[PedestrianSystem] = PedestrianSystem([], {})\n self.layout = layout\n # Environment Objects\n self.environment_objects = {\n # Inanimate Objects\n Intersection: [],\n TrafficLight: [],\n CarSpawner: [],\n # Animate Objects\n Car: []\n }\n self.road_system = layout.get(RoadSystem)\n self.pedestrian_system = layout.get(PedestrianSystem)\n\n def draw(self, canvas, offset=None):\n if offset is None:\n offset = Vector2()\n for environment_object_type, environment_object_group in self.environment_objects.items():\n for environment_object in environment_object_group:\n environment_object.draw(canvas=canvas, offset=offset)\n\n def update(self, delta_time, record=False):\n output = ''\n # For each\n for environment_object_type, environment_object_group in self.environment_objects.items():\n for environment_object in environment_object_group:\n # Update\n environment_object.update(delta_time, record=record)\n # Increment Output\n output += \"\\n\" + environment_object.get_info()\n # Record Values\n if record:\n self.record_values(delta_time)\n\n return output\n\n def add_objects(self, environment_objects):\n self.total_cars_percent = 0\n if not isinstance(environment_objects, list):\n environment_objects = [environment_objects]\n for environment_object in environment_objects:\n if environment_object.car_ratio is not None:\n if (environment_object.name == 'GentleCar'):\n self.total_cars_percent += environment_object.car_ratio\n elif (environment_object.name == 'AggressiveCar'):\n self.total_cars_percent += environment_object.car_ratio\n environment_object.total_cars_percent = self.total_cars_percent\n environment_object.set_environment(self)\n\n if not self.environment_objects.get(type(environment_object)):\n self.environment_objects[type(environment_object)] = []\n self.environment_objects[type(environment_object)].append(environment_object)\n return self\n\n def get_object(self, class_name, name):\n for environment_object in self.environment_objects[class_name]:\n if environment_object.name == name:\n return environment_object\n\n def record_values(self, t):\n for environment_object_type, environment_object_group in self.environment_objects.items():\n for environment_object in environment_object_group:\n environment_object.data_update(t)\n\n\nclass StatisticsReporter:\n def __init__(self, environment):\n # History\n self.history = {}\n # Records\n self.history.update({'time': []})\n self.history.update({'collisions': []})\n self.history.update({'intersections': []})\n self.environment = environment\n\n def record(self, t):\n # Time\n self.history['time'].append(t)\n # Values\n self.record_car_collisions()\n self.record_cars_in_intersections()\n\n def record_car_collisions(self):\n collisions = []\n for car_a in self.environment.environment_objects[Car]:\n for car_b in self.environment.environment_objects[Car]:\n if car_a is not car_b:\n if car_a.check_overlap(car_b):\n collisions.append([car_a, car_b])\n self.history['collisions'].append(collisions)\n\n def record_cars_in_intersections(self):\n intersections = []\n for car in self.environment.environment_objects[Car]:\n for intersection in self.environment.environment_objects[Intersection]:\n if intersection.check_overlap(car):\n intersections.append([intersection, car])\n self.history['intersections'].append(intersections)\n\n def frequency_dumper(self, metric):\n count = []\n for _ in metric:\n count.append(len(_))\n return count\n\n def calculate_car_collisions(self):\n return np.divide(self.frequency_dumper(self.history['collisions']), 2)\n\n def calculate_cars_in_intersection(self):\n return self.frequency_dumper(self.history['intersections'])\n\n def get_data_with_filter(self, set_name, name_filter):\n result = []\n intersection_collisions = self.history[set_name]\n for tick in intersection_collisions:\n tick_result = []\n for collision in tick:\n if collision[0].name == name_filter:\n tick_result.append(collision)\n result.append(tick_result)\n return result\n\n def plot_graph_of_collisions(self):\n plt.plot(self.calculate_car_collisions())\n plt.show()\n\n def plot_graph_of_collisions_vs_intersection(self, intersection_name):\n collisions = []\n # For Each Frame\n for tick in range(0, len(self.history['time'])):\n # Register the Intersection and Collision Snapshot\n intersection_snapshot = self.history['intersections'][tick]\n collisions_snapshot = self.history['collisions'][tick]\n # For Every Intersection Record\n collisions_in_tick = []\n for intersection_record in intersection_snapshot:\n # Data\n intersection = intersection_record[0]\n car = intersection_record[1]\n # Check Intersection is Useful\n if intersection.name == intersection_name:\n # Check if the Car in the Intersection was involved in any collisions\n collision = None\n for collision_record in collisions_snapshot:\n # Data\n car_a = collision_record[0]\n car_b = collision_record[1]\n # Check if it collided\n if car_a.name == car.name or car_b.name == car.name:\n collision = collision_record\n if collision is not None:\n collisions_in_tick.append(collision)\n collisions.append(collisions_in_tick)\n\n count_collisions = self.frequency_dumper(collisions)\n count_cars_in_intersection = self.frequency_dumper(\n self.get_data_with_filter('intersections', intersection_name))\n\n # Plot\n plt.scatter(count_cars_in_intersection, count_collisions)\n\n def result(self):\n # Plot\n intersection_names = [\"First Intersection\", \"Second Intersection\"]\n title = \"Collisions in\"\n for intersection_name in intersection_names:\n title += \" \" + intersection_name + \",\"\n self.plot_graph_of_intersection_vs_collisions(intersection_name)\n plt.show()\n\n title = title[:-1]\n plt.title(title)\n plt.xlabel('No of collisions')\n plt.ylabel('No of cars in Intersection')\n plt.savefig(self.environment.name + \"_collisions.pdf\")\n\n\nclass FileHandler:\n\n def __init__(self, directory='output'):\n # Init Data\n self.directory = self.fix_directory(directory)\n self.files = []\n\n # Create Directory\n if not os.path.isdir(directory):\n os.mkdir(directory)\n self.update_files()\n\n @staticmethod\n def fix_directory(directory):\n if not directory.endswith('/'):\n directory += '/'\n return directory\n\n def update_files(self):\n self.files = os.listdir(self.directory)\n\n def list_files(self, display=False):\n self.update_files()\n if display:\n for file in self.files:\n print(file)\n return self.files\n\n def write_file(self, filename='', file_contents='', sub_directory=''):\n file = open(self.directory + self.fix_directory(sub_directory) + filename, 'w')\n file.write(file_contents)\n\n def remove_files(self, files, display=False):\n self.update_files()\n if files is None:\n files = self.files\n for file in files:\n if display:\n print('Menus ' + file)\n os.remove(file)\n self.update_files()\n\n\nclass CarSpawner(_EnvironmentObject): # new car arrivals to grow the queue length\n def __init__(self, name, node, direction, route=None, safe_distance=None, car_ratio=None, easy_physics=True):\n self.route = route\n if safe_distance is None:\n safe_distance = 30\n self.node = node\n self.car_ratio = car_ratio\n super(CarSpawner, self).__init__(name=name, position=self.node.position, direction=direction, car_ratio=car_ratio)\n self.cars = []\n self.safe_distance = safe_distance\n self.easy_physics = easy_physics\n try:\n pass\n except:\n print('test')\n pass\n\n\n def behaviour_update(self, t):\n super(CarSpawner, self).behaviour_update(t)\n\n # If no cars have been spawned\n if len(self.cars) <= 0:\n # Spawn a new car\n self.spawn_another_car(self.route)\n else:\n # Otherwise, get the last car that was spawned\n last_car = self.cars[-1]\n\n # Check that the car is actually a car (this should always be the case)\n if self.car_ratio is not None:\n try:\n count_carspwan = len(self.environment.environment_objects[CarSpawner]) - 1\n total_cars_percent = self.environment.environment_objects[CarSpawner][count_carspwan].total_cars_percent\n if total_cars_percent == 100:\n cars_running = round((AVHV_total_cars * self.car_ratio) / 100)\n else:\n try:\n cars_num = (self.car_ratio / total_cars_percent) * 100\n cars_running = round((AVHV_total_cars * cars_num) / 100)\n except TypeError as e:\n print(str(e))\n except:\n print('error')\n except:\n pass\n\n if (cars_running is not None and len(self.cars) < cars_running):\n if isinstance(last_car, Car):\n # Set the distance to the car as the distance between the spawner and the car\n distance_to_last_car = self.position.distance(last_car.position)\n # If the distance to the car is greater than the minimum safe distance, it's safe to spawn a car\n if distance_to_last_car >= self.safe_distance:\n # Spawn another car\n self.spawn_another_car(self.route)\n else:\n if isinstance(last_car, Car):\n # Set the distance to the car as the distance between the spawner and the car\n distance_to_last_car = self.position.distance(last_car.position)\n # If the distance to the car is greater than the minimum safe distance, it's safe to spawn a car\n if distance_to_last_car >= self.safe_distance:\n # Spawn another car\n self.spawn_another_car(self.route)\n\n def spawn_another_car(self, route):\n if route is not None:\n route = route\n if len(route) > 0:\n for _ in range(0, len(route)):\n route[_] = self.environment.road_system.node(route[_])\n if isinstance(route[0], RoadNode):\n self.position = route[0].position.copy()\n self.last = route[0]\n #self.route = route[1:]\n if self.last:\n pass\n self.position = self.last.position.copy()\n else:\n route = [self.node]\n for i in range(0, 4):\n last = route[-1]\n if isinstance(last, RoadNode):\n destinations = last.destination_nodes.copy()\n visited = True\n while visited and len(destinations) > 0:\n visited = False\n destination = destinations[randint(0, len(destinations) - 1)]\n if isinstance(destination, RoadNode):\n for node in route:\n if node == destination:\n visited = True\n destinations.remove(node)\n if not visited:\n route.append(destination)\n if visited:\n break\n self.cars.append(Car(\n name=self.name + \" : \" + str(len(self.cars) + 1),\n position=self.node.position,\n route=route\n ))\n self.environment.add_objects(self.cars[-1])\n\n\nclass Simulation:\n\n def __init__(self, environment, time_end=10, time_increment=0.1, debugging=False):\n # Environment\n self.environment = environment\n # Timing Control\n self.__end_time = time_end\n self.__time_increment = time_increment\n self.__debug_counter = 0\n self.__current_time = 0\n # Statistics\n self.debugging = debugging\n self.__reporter = None\n # Drawings\n self.__frame = None\n self.__scene = Drawing()\n self.__min_bound = Vector2([0, 0])\n self.__max_bound = Vector2([0, 0])\n # File Path\n self.__drawing_directory = \"drawings/\"\n self.__drawing_prefix = \"svgwriter_frame_\"\n # Start\n self.__start_simulation()\n\n def __calculate_blank(self):\n if isinstance(self.environment, Environment):\n layout_nodes = self.environment.road_system.nodes + self.environment.pedestrian_system.nodes\n if len(layout_nodes) > 0:\n self.__min_bound = layout_nodes[0].position.copy()\n self.__max_bound = layout_nodes[0].position.copy()\n else:\n self.__min_bound = Vector2(-600, -600)\n self.__max_bound = Vector2(600, 600)\n for node in layout_nodes:\n # Min X\n if node.position.x < self.__min_bound.x:\n self.__min_bound.x = node.position.x\n # Min Y\n if node.position.y < self.__min_bound.y:\n self.__min_bound.y = node.position.y\n # Max X\n if node.position.x > self.__max_bound.x:\n self.__max_bound.x = node.position.x\n # Max Y\n if node.position.y > self.__max_bound.y:\n self.__max_bound.y = node.position.y\n border = 100\n self.__min_bound.x -= border\n self.__min_bound.y -= border\n self.__max_bound.x += border\n self.__max_bound.y += border\n\n self.__scene = Drawing(\n \"blank.svg\",\n size=[\n abs(self.__max_bound.x - self.__min_bound.x),\n abs(self.__max_bound.y - self.__min_bound.y)\n ]\n )\n self.__scene.add(Polygon(\n [\n # Bottom Left\n [0, 0],\n # Top Left\n [0, abs(self.__max_bound.y - self.__min_bound.y)],\n # Top Right\n [abs(self.__max_bound.x - self.__min_bound.x), abs(self.__max_bound.y - self.__min_bound.y)],\n # Bottom Right\n [abs(self.__max_bound.x - self.__min_bound.x), 0],\n ],\n fill='white'\n ))\n\n def __calculate_layout(self):\n self.__calculate_blank()\n for layout_type, layout in self.environment.layout.items():\n layout.draw_edges(canvas=self.__scene, offset=self.__min_bound)\n for layout_type, layout in self.environment.layout.items():\n layout.draw_nodes(canvas=self.__scene, offset=self.__min_bound)\n for layout_type, layout in self.environment.layout.items():\n layout.draw_text(canvas=self.__scene, offset=self.__min_bound)\n\n def __draw_current(self, timestep):\n # New Canvas\n self.__frame = self.__scene.copy()\n self.__frame.filename = str.format(\n \"{:s}{:s}{:.2f}{:s}\",\n self.__drawing_directory, self.__drawing_prefix, timestep, \".svg\"\n )\n # Draw Elements and Save\n self.environment.draw(canvas=self.__frame, offset=self.__min_bound)\n self.__frame.save()\n\n def __start_simulation(self):\n self.__calculate_layout()\n if isinstance(self.environment, Environment):\n self.__reporter = StatisticsReporter(self.environment)\n self.__debug_counter = 0\n self.__current_time = 0\n # Drawing Prep\n for frame in os.listdir(self.__drawing_directory):\n os.remove(self.__drawing_directory + frame)\n self.__draw_current(self.__current_time)\n # Canvas and Line\n while self.__current_time < self.__end_time:\n # Calculate Time Change\n debug = self.environment.update(delta_time=self.__time_increment, record=False)\n debug = str.format(\"\\nTick : {:3.3f}\\t\", self.__current_time) + debug\n # Check if Debug should be run\n if self.__debug_counter <= self.__current_time:\n self.__debug_counter += self.__time_increment\n # Data RecordingpendingCars\n self.environment.record_values(self.__current_time)\n self.__reporter.record(self.__debug_counter)\n if self.debugging:\n pass\n #print(debug)\n # Finish Data Reporting\n # Increment at end\n self.__current_time = self.__current_time + self.__time_increment\n self.__draw_current(self.__current_time)\n\n\nif __name__ == '__main__':\n print(\"Ekene's Simulation, please create a Simulation and add and Environment\")","sub_path":"AVHVCONTROL/Simulator.py","file_name":"Simulator.py","file_ext":"py","file_size_in_byte":58670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"317850059","text":"# Author: Guðjón Ingi Valdimarsson\n# Date: 09.03.2020\n\nclass DLL():\n class _Node():\n def __init__(self, tree_node=None, next=None, prev=None):\n self.tree_node = tree_node\n self.next = next\n self.prev = prev\n\n def __init__(self):\n self.head = self._Node()\n self.tail = self._Node()\n self.tail.prev = self.head\n self.head.next = self.tail\n\n def append(self, tree_node):\n new_node = self._Node(tree_node, self.tail, self.tail.prev)\n self.tail.prev.next = new_node\n self.tail.prev = new_node\n\n def insert_ordered(self, tree_node):\n self.append(tree_node)\n walker = self.tail.prev\n while walker.prev != self.head and walker.tree_node < walker.prev.tree_node:\n temp = walker.tree_node\n walker.tree_node = walker.prev.tree_node\n walker.prev.tree_node = temp\n walker = walker.prev\n \n def find_child(self, name):\n walker = self.head.next\n while walker != self.tail:\n if walker.tree_node.name == name:\n return walker\n walker = walker.next\n return False\n \n def rm_child(self, node):\n node.prev.next = node.next\n node.next.prev = node.prev\n\n def __str__(self):\n ret_str = \"\"\n walker = self.head.next\n while walker != self.tail:\n ret_str += \"\\n\" + walker.tree_node.name\n walker = walker.next\n return ret_str\n\nclass TreeNode():\n def __init__(self, name = \"\"):\n self.name = name\n self.children = DLL()\n \n def add_subdir(self, name):\n self.children.insert_ordered(TreeNode(name))\n \n def find_dir(self, name):\n return self.children.find_child(name)\n \n def __lt__(self, other):\n return self.name < other.name\n\n def rm_dir(self, node):\n self.children.rm_child(node)\n\n\ndef run_commands_on_tree(tree):\n print(\" current directory: \" + tree.name)\n while True:\n user_input = input()\n command = user_input.split()\n if command[0] == \"mkdir\":\n print(\" Making subdirectory \" + command[1])\n if tree.find_dir(command[1]):\n print(\" Subdirectory with same name already in directory\")\n else:\n tree.add_subdir(command[1])\n\n elif command[0] == \"ls\":\n print(\" Listing the contents of current directory, \" + str(tree.name) + str(tree.children))\n\n elif command[0] == \"cd\":\n print(\" switching to directory \" + command[1])\n if command[1] == \"..\":\n return\n else:\n next_dir = tree.find_dir(command[1])\n if next_dir:\n run_commands_on_tree(next_dir.tree_node)\n else: \n print(\" No folder with that name exists\")\n print(\" current directory: \" + str(tree.name))\n\n elif command[0] == \"rm\":\n print(\" removing directory \" + command[1])\n child_dir = tree.find_dir(command[1])\n if child_dir:\n tree.rm_dir(child_dir)\n print(\" directory successfully removed!\")\n else:\n print(\" No folder with that name exists\")\n else:\n print(\" command not recognized\")\n\n\ndef run_directories_program():\n run_commands_on_tree(TreeNode(\"root\"))\n print(\"Exiting directory program\")\n\n\nif __name__ == \"__main__\":\n run_directories_program()\n \n","sub_path":"Assignment_4/DirectoryTreeBase/directories_program.py","file_name":"directories_program.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"518861178","text":"# 王泽昊\n# 1700010718\n# 5 min\n# Computing 10.4\n\ndef dict_intersect(dic1,dic2):\n s1 = set(iter(dic1))\n s2 = set(iter(dic2))\n s0 = s1.intersection(s2)\n dic = {}\n for x in s0:\n dic[x] = dic2[x]\n return dic\n\ndef dict_union(dic1,dic2):\n s1 = set(iter(dic1))\n s2 = set(iter(dic2))\n s0 = s1.intersection(s2)\n s = s1.union(s2)\n dic = {}\n for x in s:\n if x in s0:\n dic[x] = dic2[x]\n elif x in s1:\n dic[x] = dic1[x]\n else:\n dic[x] = dic2[x]\n return dic\n\ndic1 = {1:2,2:3,'a':6,'asd':'www'}\ndic2 = {1:5,'a':6,'asd':'w','b':0}\nprint('dic1:',dic1)\nprint('dic2:',dic2)\nprint('dict_intersect(dic1,dic2):',dict_intersect(dic1,dic2))\nprint('dict_union(dic1,dic2):',dict_union(dic1,dic2))\n","sub_path":"计算概论/作业/0524/10.4.py","file_name":"10.4.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"617667087","text":"import numpy as np\nimport netCDF4\nimport methods as m\nimport pylab as pl\n\n# In this file the influence of the wind stress is evaluated\n# 2 model runs with wind stress and no wind stress are evaluated a different \n# stations.\n\n# Loading files \n# salt\n# Arkona Station\narc_r01_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V01R01/arcona_v01r01_full.nc')\narc_r02_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V01R02/arcona_v01r02_full.nc')\narc_v03r01 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V03R01/arcona_v03r01_full.nc')\narc_v03r02 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V03R02/arcona_v03r02_full.nc')\narc_v05r01_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V05R01/arcona_v05r01_full.nc')\narc_v05r02_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V05R02/arcona_V05R02_full.nc')\narc_v06r01_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V06R01/arcona_v06r01_full.nc')\n# Bornholm Station\nborn_r01_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V01R01/bornholm_v01r01_full.nc')\nborn_r02_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V01R02/bornholm_v01r02_full.nc')\nborn_v03r01 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V03R01/bornholm_v03r01_full.nc')\nborn_v03r02 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V03R02/bornholm_v03r02_full.nc')\nborn_v05r01_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V05R01/bornholm_v05r01_full.nc')\nborn_v05r02_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V05R02/bornholm_V05R02_full.nc')\nborn_v06r01_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V06R01/bornholm_v06r01_full.nc')\n# Gotland Station\ngot_r01_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V01R01/gotland_v01r01_full.nc')\ngot_r02_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V01R02/gotland_v01r02_full.nc')\ngot_v03r01 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V03R01/gotland_v03r01_full.nc')\ngot_v03r02 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V03R02/gotland_v03r02_full.nc')\ngot_v05r01_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V05R01/gotland_v05r01_full.nc')\ngot_v05r02_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V05R02/gotland_V05R02_full.nc')\ngot_v06r01_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V06R01/gotland_v06r01_full.nc')\n# Bothnian Bay\nboth_r01_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V01R01/both_v01r01_full.nc')\nboth_r02_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V01R02/both_v01r02_full.nc')\nboth_v03r01 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V03R01/both_v03r01_full.nc')\nboth_v03r02 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V03R02/both_v03r02_full.nc')\nboth_v05r01_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V05R01/both_v05r01_full.nc')\nboth_v05r02_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V05R02/both_V05R02_full.nc')\nboth_v06r01_sa = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V06R01/both_v06r01_full.nc')\n\n# eta_t\n# Arkona Station\narc_r01 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V01R01/eta_arc_v01r01.nc')\narc_r02 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V01R02/eta_arc_v01r02.nc')\narc_v05r01 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V05R01/eta_arc_v05r01.nc')\narc_v05r02 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V05R02/eta_arc_v05r02.nc')\n# Bornholm Station\nborn_r01 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V01R01/eta_born_v01r01.nc')\nborn_r02 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V01R02/eta_born_v01r02.nc')\nborn_v05r01 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V05R01/eta_born_v05r01.nc')\nborn_v05r02 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V05R02/eta_born_v05r02.nc')\n# Gotland Station\ngot_r01 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V01R01/eta_got_v01r01.nc')\ngot_r02 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V01R02/eta_got_v01r02.nc')\ngot_v05r01 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V05R01/eta_got_v05r01.nc')\ngot_v05r02 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V05R02/eta_got_v05r02.nc')\n# Bothnian Bay\nboth_r01 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V01R01/eta_both_v01r01.nc')\nboth_r02 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V01R02/eta_both_v01r02.nc')\nboth_v05r01 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V05R01/eta_both_v05r01.nc')\nboth_v05r02 = netCDF4.Dataset('/work/boergel/modellauf/auswertung_nc_files/V05R02/eta_both_v05r02.nc')\n\n# Extract variables\n# Arkona Station\narc_r01_salt = arc_r01_sa.variables['salt'][:]\narc_r02_salt = arc_r02_sa.variables['salt'][:]\narc_v03r01_salt = arc_v03r01.variables['salt'][:]\narc_v03r02_salt = arc_v03r02.variables['salt'][:]\narc_v05r01_salt = arc_v05r01_sa.variables['salt'][:]\narc_v05r02_salt = arc_v05r02_sa.variables['salt'][:]\narc_v06r01_salt = arc_v06r01_sa.variables['salt'][:]\n# Bornholm Station\nborn_r01_salt = born_r01_sa.variables['salt'][:]\nborn_r02_salt = born_r02_sa.variables['salt'][:]\nborn_v03r01_salt = born_v03r01.variables['salt'][:]\nborn_v03r02_salt = born_v03r02.variables['salt'][:]\nborn_v05r01_salt = born_v05r01_sa.variables['salt'][:]\nborn_v05r02_salt = born_v05r02_sa.variables['salt'][:]\nborn_v06r01_salt = born_v06r01_sa.variables['salt'][:]\n# Gotland Station\ngot_r01_salt = got_r01_sa.variables['salt'][:]\ngot_r02_salt = got_r02_sa.variables['salt'][:]\ngot_v03r01_salt = got_v03r01.variables['salt'][:]\ngot_v03r02_salt = got_v03r02.variables['salt'][:]\ngot_v05r01_salt = got_v05r01_sa.variables['salt'][:]\ngot_v05r02_salt = got_v05r02_sa.variables['salt'][:]\ngot_v06r01_salt = got_v06r01_sa.variables['salt'][:]\n# Bothnian Bay\nboth_r01_salt = both_r01_sa.variables['salt'][:]\nboth_r02_salt = both_r02_sa.variables['salt'][:]\nboth_v03r01_salt = both_v03r01.variables['salt'][:]\nboth_v03r02_salt = both_v03r02.variables['salt'][:]\nboth_v05r01_salt = both_v05r01_sa.variables['salt'][:]\nboth_v05r02_salt = both_v05r02_sa.variables['salt'][:]\nboth_v06r01_salt = both_v06r01_sa.variables['salt'][:]\n# mean\n# Arkona Station\narc_r01_salt_mean = np.mean(arc_r01_salt, axis = 1)\narc_r02_salt_mean = np.mean(arc_r02_salt, axis = 1)\narc_v03r01_salt_mean = np.mean(arc_v03r01_salt, axis = 1)\narc_v03r02_salt_mean = np.mean(arc_v03r02_salt, axis = 1)\narc_v05r01_salt_mean = np.mean(arc_v05r01_salt, axis = 1)\narc_v05r02_salt_mean = np.mean(arc_v05r02_salt, axis = 1)\narc_v06r01_salt_mean = np.mean(arc_v06r01_salt, axis = 1)\n# Bornholm Station\nborn_r01_salt_mean = np.mean(born_r01_salt, axis = 1)\nborn_r02_salt_mean = np.mean(born_r02_salt, axis = 1)\nborn_v03r01_salt_mean = np.mean(born_v03r01_salt, axis = 1)\nborn_v03r02_salt_mean = np.mean(born_v03r02_salt, axis = 1)\nborn_v05r01_salt_mean = np.mean(born_v05r01_salt, axis = 1)\nborn_v05r02_salt_mean = np.mean(born_v05r02_salt, axis = 1)\nborn_v06r01_salt_mean = np.mean(born_v06r01_salt, axis = 1)\n# Gotland Station\ngot_r01_salt_mean = np.mean(got_r01_salt, axis = 1)\ngot_r02_salt_mean = np.mean(got_r02_salt, axis = 1)\ngot_v03r01_salt_mean = np.mean(got_v03r01_salt, axis = 1)\ngot_v03r02_salt_mean = np.mean(got_v03r02_salt, axis = 1)\ngot_v05r01_salt_mean = np.mean(got_v05r01_salt, axis = 1)\ngot_v05r02_salt_mean = np.mean(got_v05r02_salt, axis = 1)\ngot_v06r01_salt_mean = np.mean(got_v06r01_salt, axis = 1)\n# Bothnian Station\nboth_r01_salt_mean = np.mean(both_r01_salt, axis = 1)\nboth_r02_salt_mean = np.mean(both_r02_salt, axis = 1)\nboth_v03r01_salt_mean = np.mean(both_v03r01_salt, axis = 1)\nboth_v03r02_salt_mean = np.mean(both_v03r02_salt, axis = 1)\nboth_v05r01_salt_mean = np.mean(both_v05r01_salt, axis = 1)\nboth_v05r02_salt_mean = np.mean(both_v05r02_salt, axis = 1)\nboth_v06r01_salt_mean = np.mean(both_v06r01_salt, axis = 1)\n# Difference\n# Arkona Station\ndiff_arc_v05v01_nover_salt = arc_v05r01_salt[0:11322,:,0,0] - arc_r01_salt[0:11322,:,0,0]\ndiff_arc_r01r02_salt = arc_r01_salt_mean - arc_r02_salt_mean\ndiff_arc_v01v05_salt = arc_r01_salt_mean[0:11322,0,0] - arc_v05r01_salt_mean[0:11322,0,0]\ndiff_arc_v05_salt = arc_v05r01_salt_mean[0:9131,0,0] - arc_v05r02_salt_mean[0:9131,0,0]\ndiff_arc_v03_salt = arc_v03r01_salt_mean - arc_v03r02_salt_mean\ndiff_arc_v01v03_salt = arc_r01_salt_mean - arc_v03r01_salt_mean\ndiff_arc_v01v03_r02_salt = arc_r02_salt_mean - arc_v03r02_salt_mean\ndiff_arc_v01v05_r02_salt = arc_r02_salt_mean[0:11322,0,0] - arc_v05r02_salt_mean[0:11322,0,0]\ndiff_arc_v01v06_salt = arc_r01_salt_mean - arc_v06r01_salt_mean\n# Bornholm Station\ndiff_born_v05v01_nover_salt = born_v05r01_salt[0:9131,:,0,0] - born_r01_salt[0:9131,:,0,0]\ndiff_born_v01v05_salt = born_r01_salt_mean[0:11322,0,0] - born_v05r01_salt_mean[0:11322,0,0]\ndiff_born_v05_salt = born_v05r01_salt_mean[0:9131,0,0] - born_v05r02_salt_mean[0:9131,0,0]\ndiff_born_r01_v05r02_salt = born_v05r02_salt_mean[0:9131,0,0] - born_r01_salt_mean[0:9131,0,0]\ndiff_born_r01_v03r02_salt = born_v03r02_salt_mean - born_r01_salt_mean\ndiff_born_v01v03_salt = born_r01_salt_mean - born_v03r01_salt_mean\ndiff_born_r01r02_salt = born_r01_salt_mean - born_r02_salt_mean\ndiff_born_v03_salt = born_v03r01_salt_mean - born_v03r02_salt_mean\ndiff_born_v01v03_r02_salt = born_r02_salt_mean - born_v03r02_salt_mean\ndiff_born_v01v05_r02_salt = born_r02_salt_mean[0:11322,0,0] - born_v05r02_salt_mean[0:11322,0,0]\ndiff_born_v01v06_salt = born_r01_salt_mean - born_v06r01_salt_mean\n\n# Gotland Station\ndiff_got_v05v01_nover_salt = got_v05r01_salt[0:9131,:,0,0] - got_r01_salt[0:9131,:,0,0]\ndiff_got_r01r02_salt = got_r01_salt_mean - got_r02_salt_mean\ndiff_got_v01v05_salt = got_r01_salt_mean[0:11322,0,0] - got_v05r01_salt_mean[0:11322,0,0]\ndiff_got_r01_v05r02_salt = got_v05r02_salt_mean[0:9131,0,0] - got_r01_salt_mean[0:9131,0,0]\ndiff_got_v05_salt = got_v05r01_salt_mean[0:9131,0,0] - got_v05r02_salt_mean[0:9131,0,0]\ndiff_got_v01v03_salt = got_r01_salt_mean - got_v03r01_salt_mean\ndiff_got_v03_salt = got_v03r01_salt_mean - got_v03r02_salt_mean\ndiff_got_v01v03_r02_salt = got_r02_salt_mean - got_v03r02_salt_mean\ndiff_got_v01v05_r02_salt = got_r02_salt_mean[0:11322,0,0] - got_v05r02_salt_mean[0:11322,0,0]\ndiff_got_v01v06_salt = got_r01_salt_mean - got_v06r01_salt_mean\n\n# Bothnian Station\ndiff_both_v05v01_nover_salt = both_v05r01_salt[0:9131,:,0,0] - both_r01_salt[0:9131,:,0,0]\ndiff_both_r01r02_salt = both_r01_salt_mean - both_r02_salt_mean\ndiff_both_v01v05_salt = both_r01_salt_mean[0:11322,0,0] - both_v05r01_salt_mean[0:11322,0,0]\ndiff_both_v05_salt = both_v05r01_salt_mean[0:9131,0,0] - both_v05r02_salt_mean[0:9131,0,0]\ndiff_both_v01v03_salt = both_r01_salt_mean - both_v03r01_salt_mean\ndiff_both_v03_salt = both_v03r01_salt_mean - both_v03r02_salt_mean\ndiff_both_v01v03_r02_salt = both_r02_salt_mean - both_v03r02_salt_mean\ndiff_both_v01v05_r02_salt = both_r02_salt_mean[0:11322,0,0] - both_v05r02_salt_mean[0:11322,0,0]\ndiff_both_v01v06_salt = both_r01_salt_mean - both_v06r01_salt_mean\n\n# eta_t\n# Arkona Station\narc_r01_eta_t = arc_r01.variables['eta_t'][:]\narc_r02_eta_t = arc_r02.variables['eta_t'][:]\narc_v05r01_eta_t = arc_v05r01.variables['eta_t'][:]\narc_v05r02_eta_t = arc_v05r02.variables['eta_t'][:]\n# Bornholm Station\nborn_r01_eta_t = born_r01.variables['eta_t'][:]\nborn_r02_eta_t = born_r02.variables['eta_t'][:]\nborn_v05r01_eta_t = born_v05r01.variables['eta_t'][:]\nborn_v05r02_eta_t = born_v05r02.variables['eta_t'][:]\n# Gotland Station\ngot_r01_eta_t = got_r01.variables['eta_t'][:]\ngot_r02_eta_t = got_r02.variables['eta_t'][:]\ngot_v05r01_eta_t = got_v05r01.variables['eta_t'][:]\ngot_v05r02_eta_t = got_v05r02.variables['eta_t'][:]\n# Bothnian Bay\nboth_r01_eta_t = both_r01.variables['eta_t'][:]\nboth_r02_eta_t = both_r02.variables['eta_t'][:]\nboth_v05r01_eta_t = both_v05r01.variables['eta_t'][:]\nboth_v05r02_eta_t = both_v05r02.variables['eta_t'][:]\n\n# Difference\n# Arkona\ndiff_arc_r01r02 = arc_r01_eta_t - arc_r02_eta_t\ndiff_arc_r01v05r01 = arc_r01_eta_t[0:11322,:,:] - arc_v05r01_eta_t[0:11322,:,:]\n#diff_arc_r01v05r02 = arc_r01_eta_t - arc_v05r02_eta_t\n#diff_arc_v05r01r02 = arc_v05r01_eta_t - arc_v05r02_eta_t\n# Bornholm\ndiff_born_r01r02 = born_r01_eta_t - born_r02_eta_t\ndiff_born_r01v05r01 = born_r01_eta_t[0:11322,:,:] - born_v05r01_eta_t[0:11322,:,:]\n#diff_born_r01v05r02 = born_r01_eta_t - born_v05r02_eta_t\n#diff_born_v05r01r02 = born_v05r01_eta_t - born_v05r02_eta_t\n# Gotland\ndiff_got_r01r02 = got_r01_eta_t - got_r02_eta_t\ndiff_got_r01v05r01 = got_r01_eta_t[0:11322,:,:] - got_v05r01_eta_t[0:11322,:,:]\n#diff_got_r01v05r02 = got_r01_eta_t - got_v05r02_eta_t\n#diff_got_v05r01r02 = got_v05r01_eta_t - got_v05r02_eta_t\n# Bothnian\ndiff_both_r01r02 = both_r01_eta_t - both_r02_eta_t\ndiff_both_r01v05r01 = both_r01_eta_t[0:11322,:,:] - both_v05r01_eta_t[0:11322,:,:]\n#diff_both_r01v05r02 = both_r01_eta_t - both_v05r02_eta_t\n#diff_both_v05r01r02 = both_v05r01_eta_t - both_v05r02_eta_t\n\n###################\n# Plotting\n###################\n\n# Sea level over time\nf, (ax1,ax2,ax3,ax4) = pl.subplots(4,figsize=(25.6, 14.4))\n# Arkona\nax1.plot(arc_r01_eta_t[:,0,0], label='V01R01')\nax1.plot(arc_r02_eta_t[:,0,0], label='V01R02')\nax1.plot(arc_r01_eta_t[:,0,0], label='V05R01')\nax1.plot(arc_r02_eta_t[:,0,0], label='V05R02')\nax1.set_ylabel('Sealevel in meter')\nax1.set_title('Arkona')\nax1.legend()\n# Bornholm\nax2.plot(born_r01_eta_t[:,0,0], label='V01R01')\nax2.plot(born_r02_eta_t[:,0,0], label='V01R02')\nax2.plot(born_v05r01_eta_t[:,0,0], label='V05R01')\nax2.plot(born_v05r02_eta_t[:,0,0], label='V05R02')\nax2.set_ylabel('Sealevel in meter')\nax2.set_title('Bornholm')\nax2.legend()\n# Gotland\nax3.plot(got_r01_eta_t[:,0,0], label='V01R01')\nax3.plot(got_r02_eta_t[:,0,0], label='V01R02')\nax3.plot(got_v05r01_eta_t[:,0,0], label='V05R01')\nax3.plot(got_v05r02_eta_t[:,0,0], label='V05R02')\nax3.set_ylabel('Sealevel in meter')\nax3.set_title('Gotland')\nax3.legend()\n# Bothnian\nax4.plot(both_r01_eta_t[:,0,0], label='V01R01')\nax4.plot(both_r02_eta_t[:,0,0], label='V01R02')\nax4.plot(both_v05r01_eta_t[:,0,0], label='V05R01')\nax4.plot(both_v05r02_eta_t[:,0,0], label='V05R02')\nax4.set_ylabel('Sealevel in meter')\nax4.set_xlabel('Time in days')\nax4.set_title('Bothnian')\nax4.legend()\npl.savefig('figures/sea_level.png')\n\n# Difference in sea leves for modelruns\n###################\n# Plotting\n###################\n\n# Sea level over time\nf, (ax1,ax2,ax3,ax4) = pl.subplots(4,figsize=(25.6, 14.4))\n# Arkona\nax1.plot(diff_arc_r01r02[:,0,0], label='V01R01 - V01R02')\nax_1 = ax1.twinx()\nax_1.plot(diff_arc_r01r02_salt[:,0,0],'--r', label='Salt Mean')\n#ax1.plot(diff_arc_r01v05r01[:,0,0], label='V01R01 - V05R01')\n#ax1.plot(diff_arc_r01v05r02[:,0,0], label='V05R01')\n#ax1.plot(diff_arc_v05r01r02[:,0,0], label='V05R02')\nax1.set_ylabel('Sealevel in meter')\nax1.set_title('Arkona')\nax1.legend()\nax_1.legend()\n# Bornholm\nax2.plot(diff_born_r01r02[:,0,0], label='V01R01 - V01R02')\nax_2 = ax2.twinx()\nax_2.plot(diff_born_r01r02_salt[:,0,0],'--r', label='Diff Salt Mean')\n#ax2.plot(diff_born_r01v05r01[:,0,0], label='V01R01 - V05R01')\n#ax2.plot(diff_born_r01v05r02[:,0,0], label='V05R01')\n#ax2.plot(diff_born_v05r01r02[:,0,0], label='V05R02')\nax2.set_ylabel('Sealevel in meter')\nax2.set_title('Bornholm')\nax2.legend()\n# Gotland\nax3.plot(diff_got_r01r02[:,0,0], label='V01R01 - V01R02')\nax_3 = ax3.twinx()\nax_3.plot(diff_got_r01r02_salt[:,0,0],'--r', label='Diff Salt Mean')\n#ax3.plot(diff_got_r01v05r01[:,0,0], label='V01R01 - V05R01')\n#ax3.plot(diff_got_r01v05r02[:,0,0], label='V05R01')\n#ax3.plot(diff_got_v05r01r02[:,0,0], label='V05R02')\nax3.set_ylabel('Sealevel in meter')\nax3.set_title('Gotland')\nax3.legend()\n# Bothnian\nax4.plot(diff_both_r01r02[:,0,0], label='V01R01 - V01R02')\nax_4 = ax4.twinx()\nax_4.plot(diff_both_r01r02_salt[:,0,0],'--r', label='Diff Salt Mean')\n#ax4.plot(diff_both_r01v05r01[:,0,0], label='V01R01 - V05R01')\n#ax4.plot(diff_both_r01v05r02[:,0,0], label='V05R01')\n#ax4.plot(diff_both_v05r01r02[:,0,0], label='V05R02')\nax4.set_ylabel('Sealevel in meter')\nax4.set_xlabel('Time in days')\nax4.set_title('Bothnian')\nax4.legend()\npl.savefig('figures/diff_sea_level.png')\n\n\n# Mean Distribution Difference \nf, (ax1,ax2,ax3,ax4) = pl.subplots(4,figsize=(25.6, 14.4))\n# Arkona\nax1.plot(diff_arc_r01r02_salt[:,0,0], label='Difference V01R01 - V01R02')\nax1.plot(diff_arc_v01v05_salt, label='Difference V01R01 - V05R01')\nax1.plot(diff_arc_v05_salt, label='Difference V05R01 - V05R02')\nax1.plot(diff_arc_v01v03_salt[:,0,0], label='Difference V01R01 - V03R01')\nax1.plot(diff_arc_v01v06_salt[:,0,0], label='Difference V01R01 - V06R01')\nax1.set_ylabel('Salt concentration in psu')\nax1.set_title('Arkona')\nax1.legend()\n# Bornholm\nax2.plot(diff_born_r01r02_salt[:,0,0], label='Difference V01R01 - V01R02')\nax2.plot(diff_born_v01v05_salt, label='Difference V01R01 - V05R01')\nax2.plot(diff_born_v05_salt, label='Difference V05R01 - V05R02')\nax2.plot(diff_born_v01v03_salt[:,0,0], label='Difference V01R01 - V03R01')\nax2.plot(diff_born_v01v06_salt[:,0,0], label='Difference V01R01 - V06R01')\nax2.set_ylabel('Salt concentration in psu')\nax2.set_title('Bornholm')\nax2.legend()\n# Gotland\nax3.plot(diff_got_r01r02_salt[:,0,0], label='Difference V01R01 - V01R02')\nax3.plot(diff_got_v01v05_salt, label='Difference V01R01 - V05R01')\nax3.plot(diff_got_v05_salt, label='Difference V05R01 - V05R02')\nax3.plot(diff_got_v01v03_salt[:,0,0], label='Difference V01R01 - V03R01')\nax3.plot(diff_got_v01v06_salt[:,0,0], label='Difference V01R01 - V06R01')\nax3.set_ylabel('Salt concentration in psu')\nax3.set_title('Gotland')\nax3.legend()\n# Bothnian\nax4.plot(diff_both_r01r02_salt[:,0,0], label='Difference V01R01 - V01R02')\nax4.plot(diff_both_v01v05_salt, label='Difference V01R01 - V05R01')\nax4.plot(diff_both_v05_salt, label='Difference V05R01 - V05R02')\nax4.plot(diff_both_v01v03_salt[:,0,0], label='Difference V01R01 - V03R01')\nax4.plot(diff_both_v01v06_salt[:,0,0], label='Difference V01R01 - V06R01')\nax4.set_ylabel('Salt concentration in psu')\nax4.set_xlabel('Time in days')\nax4.set_title('Bothnian')\nax4.legend()\npl.savefig('figures/comparison_salt_influence.png')\n\n# Difference in vertical distribution\n# Vertical distribution over time at stations\nf, (ax1,ax2,ax3,ax4) = pl.subplots(4,figsize=(25.6, 14.4))\nf.set_figwidth = 70\nf.set_figheight = 70\ncm = pl.get_cmap('bwr')\n# Arkona\nim = ax1.contourf(diff_arc_v05v01_nover_salt[:,:].T, label='V05R01 - V01R01', vmin=-5, vmax=5, cmap=cm)\nax1.set_ylabel('Depth')\nf.colorbar(im,ax=ax1)\nax1.set_title('Arkona V05R01 - V01R01')\nax1.legend()\n# Bornholm\nim = ax2.contourf(diff_born_v05v01_nover_salt[:,:].T, label='V05R01 - V01R01',vmin=-2,vmax=2, cmap=cm)\nax2.set_ylabel('Depth')\nf.colorbar(im, ax=ax2)\nax2.set_title('Bornholm')\nax2.legend()\n# Gotland\nim = ax3.contourf(diff_got_v05v01_nover_salt[:,:].T, label='V05R01 - V01R01',vmin=-2, vmax=2 ,cmap=cm)\nax3.set_ylabel('Depth')\nf.colorbar(im, ax=ax3)\nax3.set_title('Gotland')\nax3.legend()\n# Bothnian\nax4.contourf(diff_both_v05v01_nover_salt[:,:].T, label='V05R01 - V01R01',vmin=-1.5,vmax=1.5, cmap=cm)\nax4.set_ylabel('Depth')\nf.colorbar(im, ax=ax4)\nax4.set_xlabel('Time in days')\nax4.set_title('Bothnian')\nax4.legend()\npl.savefig('figures/diff_vert_dist.png')\n\n# Diff \nf, (ax1,ax2,ax3,ax4) = pl.subplots(4,figsize=(25.6, 14.4))\n# Arkona\nax1.plot(diff_arc_v01v05_salt[0:9131] + diff_arc_v05_salt[0:9131], label='Difference V05')\nax1.plot(diff_arc_v01v03_salt[:,0,0] + diff_arc_v03_salt[:,0,0], label='Difference V03')\nax1.set_ylabel('Salt concentration in psu')\nax1.set_title('Arkona')\nax1.legend()\n# Bornholm\nax2.plot(diff_born_v01v05_salt[0:9131] + diff_born_v05_salt[0:9131], label='Difference V05')\nax2.plot(diff_born_v01v03_salt[:,0,0] + diff_born_v03_salt[:,0,0], label='Difference V03')\nax2.set_ylabel('Salt concentration in psu')\nax2.set_title('Bornholm')\nax2.legend()\n# Gotland\nax3.plot(diff_got_v01v05_salt[0:9131] + diff_got_v05_salt[0:9131], label='Difference V05')\nax3.plot(diff_got_v01v03_salt[:,0,0] + diff_got_v03_salt[:,0,0], label='Difference V03')\nax3.set_ylabel('Salt concentration in psu')\nax3.set_title('Gotland')\nax3.legend()\n# Bothnian\nax4.plot(diff_both_v01v05_salt[0:9131] + diff_both_v05_salt[0:9131], label='Difference V05')\nax4.plot(diff_both_v01v03_salt[:,0,0] + diff_both_v03_salt[:,0,0], label='Difference V03')\nax4.set_ylabel('Salt concentration in psu')\nax4.set_xlabel('Time in days')\nax4.set_title('Bothnian')\nax4.legend()\npl.savefig('figures/validation_diff.png')\n\n# Check mean diff for r02\n# Mean Distribution Difference \nf, (ax1,ax2,ax3,ax4) = pl.subplots(4,figsize=(25.6, 14.4))\n# Arkona\nax1.plot(diff_arc_v01v05_salt, label='Difference V01R01 - V05R01')\nax1.plot(diff_arc_v01v05_r02_salt, label='Difference V01R02 - V05R02')\nax1.plot(diff_arc_v01v03_salt[:,0,0], label='Difference V01R01 - V03R01')\nax1.plot(diff_arc_v01v03_r02_salt[:,0,0], label='Difference V01R02 - V03R02')\nax1.set_ylabel('Salt concentration in psu')\nax1.set_title('Arkona')\nax1.legend()\n# Bornholm\nax2.plot(diff_born_v01v05_salt, label='Difference V01R01 - V05R01')\nax2.plot(diff_born_v01v05_r02_salt, label='Difference V01R02 - V05R02')\nax2.plot(diff_born_v01v03_salt[:,0,0], label='Difference V01R01 - V03R01')\nax2.plot(diff_born_v01v03_r02_salt[:,0,0], label='Difference V01R02 - V03R02')\nax2.set_ylabel('Salt concentration in psu')\nax2.set_title('Bornholm')\nax2.legend()\n# Gotland\nax3.plot(diff_got_v01v05_salt, label='Difference V01R01 - V05R01')\nax3.plot(diff_got_v01v05_r02_salt, label='Difference V01R02 - V05R02')\nax3.plot(diff_got_v01v03_salt[:,0,0], label='Difference V01R01 - V03R01')\nax3.plot(diff_got_v01v03_r02_salt[:,0,0], label='Difference V01R02 - V03R02')\nax3.set_ylabel('Salt concentration in psu')\nax3.set_title('Gotland')\nax3.legend()\n# Bothnian\nax4.plot(diff_both_v01v05_salt, label='Difference V01R01 - V05R01')\nax4.plot(diff_both_v01v05_r02_salt, label='Difference V01R02 - V05R02')\nax4.plot(diff_both_v01v03_salt[:,0,0], label='Difference V01R01 - V03R01')\nax4.plot(diff_both_v01v03_r02_salt[:,0,0], label='Difference V01R02 - V03R02')\nax4.set_ylabel('Salt concentration in psu')\nax4.set_xlabel('Time in days')\nax4.set_title('Bothnian')\nax4.legend()\npl.savefig('figures/comparison_salt_influence_r01r02.png')","sub_path":"sea_level_evaluation.py","file_name":"sea_level_evaluation.py","file_ext":"py","file_size_in_byte":22595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"459634589","text":"from platform import uname as uname\nfrom re import compile as regex\n\nunix_dict = dict(\n gnu=True,\n linux=True,\n cygwin=True,\n darwin=True,\n windows=False,\n openvms=False,\n)\n\n_hp_ux = regex(r'^hp-?ux$')\ndef known_uname(un=uname()[0]):\n un = un.lower()\n if unix_dict.get(un) != None:\n return un\n if un.endswith(r'bsd'):\n return un\n if un.endswith(r'solaris'):\n return un\n if _hp_ux.match(un):\n return un\n raise UnknownUname(un)\n\nclass UnknownUname(Exception):\n pass\n","sub_path":"x19290/platforms/knownpf.py","file_name":"knownpf.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"325037337","text":"from django import forms\nfrom django.conf import settings\nfrom django.core.mail import send_mail\n\n\nclass CommentForm(forms.Form):\n name = forms.CharField()\n email = forms.EmailField(required=False)\n message = forms.CharField()\n\n\ndef comment_action(request, noun):\n form = CommentForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n send_mail(\n 'Comment from ' + cd['name'] + ' about ' + noun,\n cd['message'],\n cd.get('email', 'noreply@gedgo.com'),\n settings.SERVER_EMAIL,\n )\n return form\n\n\nclass UpdateForm(forms.Form):\n gedcom_file = forms.FileField(\n label='Select a file',\n help_text='Max file size: 42M.'\n )\n","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"516776983","text":"from ..object import get as db\n\nfrom flask import Blueprint, render_template, request, redirect, url_for\n\nblueprint = Blueprint('listings', __name__, template_folder='templates')\n\nratings = {\n\t1: \"terrible\",\n\t2: \"very bad\",\n\t3: \"bad\",\n\t4: \"bearable\",\n\t5: \"average\",\n\t6: \"alright\",\n\t7: \"decent\",\n\t8: \"entertaining\",\n\t9: \"enthralling\",\n\t10: \"perfect\"\n}\n\n\ndef handle_add(shows, categories, update=False):\n\tif not update:\n\t\tshow = None\n\t\tif \"show\" not in request.form:\n\t\t\treturn \"no show specified\"\n\t\telse:\n\t\t\tshow = request.form['show'].strip()\n\t\t\tif not show or show == \"0\":\n\t\t\t\treturn \"no show specified\"\n\n\t\tif db().get_listing_by_show_id(show) is not None:\n\t\t\treturn \"listing for show already exists\"\n\n\tcategory = None\n\tif \"category\" not in request.form:\n\t\treturn \"no category specified\"\n\telse:\n\t\tcategory = request.form['category'].strip()\n\t\tif not category or category == \"0\":\n\t\t\treturn \"no show specified\"\n\n\trating = None\n\tif \"rating\" not in request.form:\n\t\treturn \"no rating specified\"\n\telse:\n\t\ttry:\n\t\t\trating = int(request.form['rating'].strip())\n\t\texcept:\n\t\t\treturn \"rating was not a valid number\"\n\t\tif rating < 1 or rating > 10:\n\t\t\treturn \"rating was not in the range of 1 - 10\"\n\n\tepisodes = None\n\tif \"episodes\" not in request.form:\n\t\treturn \"no episodes specified\"\n\telse:\n\t\ttry:\n\t\t\tepisodes = int(request.form['episodes'].strip())\n\t\texcept:\n\t\t\tepisodes = 0\n\t\tif episodes < 0:\n\t\t\treturn \"episodes must be greater than one >:|\"\n\n\tif update:\n\t\tstatus = db().update_listing(update['id'], category, episodes, rating)\n\t\treturn True\n\telse:\n\t\tstatus = db().create_listing(category, show, episodes, rating)\n\n\tif status is not None:\n\t\treturn \"created listing for \" + shows[int(show) - 1]['title']\n\telse:\n\t\tprint(status)\n\t\treturn \"failed to create listing\"\n\n\n@blueprint.route('/add/', methods=['GET', 'POST'])\n@blueprint.route('/add/c', methods=['GET', 'POST'])\n@blueprint.route('/add/s', methods=['GET', 'POST'])\n@blueprint.route('/add/cs', methods=['GET', 'POST'])\ndef add(category=None, show=None):\n\tstatus = \"\"\n\tshows = db().get_shows()\n\tcategories = db().get_categories()\n\n\tif request.method == \"POST\":\n\t\tstatus = handle_add(shows, categories)\n\n\treturn render_template(\"listing/add.html\", ratings=ratings, category=category, show=show, shows=shows, categories=categories, status=status)\n\n\n@blueprint.route('/remove/', methods=['GET', 'POST'])\n@blueprint.route('/remove/')\ndef remove(id=None):\n\ttry:\n\t\tid = int(id)\n\texcept ValueError:\n\t\treturn render_template(\"error.html\", error=\"not a valid integer\")\n\n\tdb().remove_listing(id)\n\treturn redirect(url_for(\"routes.root\"))\n\n\n@blueprint.route('/edit/', methods=['GET', 'POST'])\ndef edit(id=None):\n\ttry:\n\t\tid = int(id)\n\texcept ValueError:\n\t\treturn render_template(\"error.html\", error=\"not a valid integer\")\n\n\tstatus = \"\"\n\tshows = db().get_shows()\n\tcategories = db().get_categories()\n\n\tif request.method == \"POST\":\n\t\tstatus = handle_add(shows, categories, update={\"id\": id})\n\t\tif status is True:\n\t\t\treturn redirect(url_for(\"routes.root\"))\n\n\tlisting = db().get_listing_by_show_id(id)\n\tcategory = listing['category']\n\n\treturn render_template(\"listing/add.html\", status=status, ratings=ratings, listing=listing, shows=shows, categories=categories, category=category, show=None)\n\n\n@blueprint.route('/increment/', methods=['GET', 'POST'])\n@blueprint.route('/increment/')\ndef increment(id=None):\n\tif id is None:\n\t\treturn render_template(\"error.html\", error=\"not a valid id\")\n\ttry:\n\t\tid = int(id)\n\texcept ValueError:\n\t\treturn render_template(\"error.html\", error=\"not a valid id\")\n\tdb().increment_listing(id, 1)\n\treturn redirect(url_for(\"routes.root\"))\n","sub_path":"mokuroku/routes/listings.py","file_name":"listings.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"503106268","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 16 11:26:13 2019\n\n@author: smokha\n\"\"\"\nimport os\nimport main_func\nimport pandas as pd\nimport random\nfrom twilio.rest import Client\n\nauth_dataset = 'auth.csv'\ndata_path = 'data.csv'\n\n#Secure by storing in environment variables\n#API key and token\naccount_sid = account_sid = '' ##Your account ID here\nauth_token = auth_token = '' #Your auth token here\n\n\n\n\ndef generate_code(): #Function to generate random value between 100,000 and 999,999\n \n return str(random.randrange(100000, 999999))\n\n\ndef send_sms(to_number, body): #Function to send specified SMS to specified user\n \n twilio_number = '' #Twilio phone numbers here\n client = Client(account_sid, auth_token)\n message = client.messages \\\n .create(\n body= body,\n from_=twilio_number,\n to=to_number\n )\n\n return message.sid #Return back message id\n\n\n\ndef check_api_csv(number, original_code): #Code to verify the user phone number\n \n usrdata_df = pd.DataFrame(pd.read_csv(os.path.join(data_path))) \n \n try:\n auth_df = pd.DataFrame(pd.read_csv(os.path.join(auth_dataset)))\n except:\n print(\"Internal error, try again later\")\n\n found_df = auth_df[auth_df['number'].astype(str) == number] \n \n if (found_df.count()[0] == 0):\n print(\"Number not found! Please retry later\")\n \n if (found_df.count()[0] != 0):\n found__df_2 = found_df[found_df['code'].astype(str) == original_code]\n if found__df_2.count()[0] == 0:\n print(\"Code not found! Please retry later\")\n \n if (found__df_2.count()[0] != 0): #If code is confirmed, send a random follow up sequence\n print(\"Code confirmed\") \n found_df_3 = usrdata_df[usrdata_df['phone_number'].astype(str) == number]\n \n if (found_df_3['opt_in'].iloc[0] == True): #Check if follow up sequence already exists\n print(\"You are already opted-in. Your follow up sequence is: \", found_df_3['follow_up_seq'].iloc[0])\n input(\"Press enter to return to the main menu\")\n main_func.hyderate.main_menu()\n \n found_df_3['opt_in'].iloc[0] = True\n if found_df_3['opt_in'].iloc[0] == True:\n \n follow_seq = random.randint(1,3) #Artificial accountability function HERE\n \n found_df_3['follow_up_seq'].iloc[0] = follow_seq\n text = 'Your follow up sequence is: ' + str(follow_seq)\n m_id_2 = send_sms(number, text)\n print(\"Message sent with message id: \", m_id_2)\n usrdata_df.loc[usrdata_df['phone_number'].astype(str) == str(number), 'opt_in'] = True\n usrdata_df.loc[usrdata_df['phone_number'].astype(str) == str(number), 'follow_up_seq'] = int(follow_seq)\n usrdata_df.to_csv(os.path.join(data_path), index = False)\n input(\"Press enter to return to main menu\")\n main_func.hyderate.main_menu()\n \n if m_id_2 is not None:\n print(\"Response recieved\") #Confirmation that follow up sequence is sent to the user\n else:\n print(\"Message not sent, try again later\")\n else:\n ip5 = input(\"Code not verified, try again?(Y/N): \")\n if (ip5.lower() == 'y' or ip5.lower() == 'yes'): #Function call to try again if wrong code is recieved\n verify(number)\n elif (ip5.lower() == 'n' or ip5.lower == 'no'):\n main_func.hyderate.main_menu()\n \n \ndef verify(number): #Function call to generate random code and send it to user. Once recieved, check the verification code and send follow up sequence\n \n usrdata_df = pd.DataFrame(pd.read_csv(os.path.join(data_path))) \n found_df = usrdata_df[usrdata_df['phone_number'].astype(str) == str(number)]\n \n if found_df['opt_in'].iloc[0] == True: #Check if user wants to opt out\n print(\"You are already opted-in. Your follow up sequence is: \", found_df['follow_up_seq'].iloc[0])\n ip6 = input(\"You have already opted-in for the service. Opt out?(Y/N): \") \n if (ip6.lower() == 'y' or ip6.lower() == 'yes'): \n usrdata_df.loc[usrdata_df['phone_number'].astype(str) == str(number), 'opt_in'] = False #Reset opt in and follow up sequence\n usrdata_df.loc[usrdata_df['phone_number'].astype(str) == str(number), 'follow_up_seq'] = 0\n usrdata_df.to_csv(os.path.join(data_path), index = False)\n print(\"Yo have opted out\")\n input(\"Press enter to return to main menu\")\n main_func.hyderate.main_menu() #Return to main menu\n elif (ip6.lower() == 'n' or ip6.lower == 'no'):\n main_func.hyderate.main_menu()\n msg = 'To opt-in, please reply with the verification code on your phone.' #If the user has not opted in, send verification code\n code = generate_code()\n m_id = send_sms(number, msg)\n print(\"Message sent to {} with message id {} \" .format(number, m_id)) #Confirmation that verification code is sent to user\n print(\"YOUR VERIFICATION CODE IS: \", code)\n input(\"After you have responded on your phone, please press enter.\")\n number = ''.join(e for e in str(number) if e.isalnum())\n check_api_csv(number, str(code))\n","sub_path":"smsauth.py","file_name":"smsauth.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"213254488","text":"import os\nimport os.path\nimport subprocess\nimport hashlib\n\nfrom simple_task_queue import task, Task, WorkerPool\n\nfrom .MOAIRuntime import *\nfrom .MOAIRuntime import _G, _GII\nfrom gii.core import *\n\nfrom .ScriptHelpers import compileLua\n\n##--------------------------------------------##\nsignals.register ( 'script.load' )\nsignals.register ( 'script.reload' )\nsignals.register ( 'script.unload' )\n\n##----------------------------------------------------------------##\ndef getModulePath( path ):\n\timport os.path\n\treturn os.path.dirname( __file__ ) + '/' + path\n\n##----------------------------------------------------------------##\ndef _hashPath( path ):\n\tname, ext = os.path.splitext( os.path.basename( path ) )\n\tm = hashlib.md5()\n\tm.update( path.encode('utf-8') )\n\treturn m.hexdigest()\n\ndef _affirmPath( path ):\n\tif os.path.exists( path ): return True\n\ttry:\n\t\tos.mkdir( path )\n\t\treturn True\n\texcept Exception as e:\n\t\treturn False\n\ndef _convertToGameModuleName( path ):\n\tbody, ext = os.path.splitext( path )\n\treturn body.replace( '/', '.' )\n\n##----------------------------------------------------------------##\ndef _isNewer( f1, f2 ):\n\tif os.path.exists( f1 ) and os.path.exists( f2 ):\n\t\tt1 = os.path.getmtime( f1 )\n\t\tt2 = os.path.getmtime( f2 )\n\t\treturn t1 > t2\n\telse:\n\t\treturn None\n\n##----------------------------------------------------------------##\n@task( 'CompileLuaScript' )\ndef taskCompileLuaScript( context, srcPath, dstPath, version = 'luajit', checktime = False ):\n\tcompileLua( srcPath, dstPath, version, checktime )\n\n@task( 'CompileLuaScriptAndRemoveSource' )\ndef taskCompileLuaScript( context, srcPath, dstPath, version = 'luajit', checktime = False ):\n\tcompileLua( srcPath, dstPath, version, checktime )\n\tos.remove( srcPath )\n\n##----------------------------------------------------------------##\n_GII_SCRIPT_LIBRARY_EXPORT_NAME = 'script_library'\n\n##----------------------------------------------------------------##\n\nclass LuaScriptAssetManager( AssetManager ):\n\tdef getName( self ):\n\t\treturn 'asset_manager.script'\n\n\tdef acceptAssetFile(self, filepath):\n\t\tif not os.path.isfile(filepath): return False\t\t\n\t\tname,ext = os.path.splitext(filepath)\n\t\treturn ext in [ '.lua' ]\n\n\tdef importAsset(self, node, reload = False ):\n\t\tnode.assetType = 'lua'\n\t\tif reload:\t\t\t\n\t\t\tlib = app.getModule( 'script_library' )\n\t\t\tlib.markModified( node )\n\t\t\treturn True\n\t\telse:\n\t\t\tlib = app.getModule( 'script_library' )\n\t\t\tlib.loadScript( node )\n\t\t\treturn True\t\t\n\n\tdef dropAsset( self , node ):\n\t\tlib = app.getModule( 'script_library' )\n\t\tlib.releaseScript( node )\n\n\tdef getDefaultAssetDeployConfig( self, node ):\n\t\treturn dict(\n\t\t\tpackage = 'script',\n\t\t\tcompress = False\n\t\t)\n\n\tdef deployAsset( self, node, context ):\n\t\t#DO NOTHING, ScriptLibrary handles it\n\t\tpass\n\n\tdef getMetaType( self ):\n\t\treturn 'script'\n\n##----------------------------------------------------------------##\nclass ScriptLibrary( EditorModule ):\n\tdef getName( self ):\n\t\treturn 'script_library'\n\n\tdef getDependency( self ):\n\t\treturn ['moai']\n\n\tdef onLoad( self ):\n\t\tself.scripts = {}\n\t\tself.modifiedScripts = {}\n\t\tsignals.connect( 'project.deploy', self.onDeploy )\n\t\tsignals.connect( 'project.post_deploy', self.postDeploy )\n\n\n\tdef convertScriptPath( self, node ):\n\t\tpath = node.getNodePath()\n\t\tname, ext = os.path.splitext( path )\n\t\treturn name.replace( '/', '.' )\n\n\tdef markModified( self, node ):\n\t\tself.modifiedScripts[ node ] = True\n\n\tdef isModified( self ):\n\t\tif self.modifiedScripts: return True\n\t\treturn False\n\n\tdef loadScript( self, node ):\n\t\tpath = self.convertScriptPath( node )\n\t\tlogging.info( 'loading script %s', path )\n\t\tm, err = _GII.GameModule.updateGameModule( path )\n\t\tif not m:\n\t\t\tfor info in list(err.values()):\n\t\t\t\tlogging.error( 'script error <%s>: %s', info.path, info.msg )\n\n\tdef loadModifiedScript( self ):\n\t\tlogging.info( 'reloading modified scripts' )\n\t\tmodified = self.modifiedScripts\n\t\tself.modifiedScripts = {}\n\t\tload = False\n\t\tfor node in modified:\n\t\t\tload = True\n\t\t\tself.loadScript( node )\n\t\tsignals.emit( 'script.reload' )\n\t\treturn load\n\n\tdef releaseScript( self, node ):\n\t\t_GII.GameModule.unloadGameModule( self.convertScriptPath( node ) ) #force\n\n\tdef onStart( self ):\n\t\tfor node in self.getAssetLibrary().enumerateAsset( 'lua' ):\n\t\t\t_GII.GameModule.loadGameModule( self.convertScriptPath( node ) )\n\n\tdef onDeploy( self, context ):\n\t\tversion = context.getMeta( 'lua_version', 'lua' )\n\t\tscriptFormat = context.getMeta( 'script_format', 'source' )\n\t\texportIndex = {}\n\t\tsourceIndex = {}\n\t\tcount = 0\n\t\t\n\t\tprint(('deploying lua scripts in format:', scriptFormat))\n\t\twith WorkerPool( worker_num = 8 ):\n\t\t\tfor node in self.getAssetLibrary().enumerateAsset( 'lua' ):\n\t\t\t\tcount += 1\n\t\t\t\thashed = _hashPath( node.getFilePath() )\n\t\t\t\t# dstPath = context.getAssetPath( hashed )\n\t\t\t\tdstPath = context.requestFile( hashed, package = 'script' )\n\t\t\t\texportIndex[ _convertToGameModuleName( node.getNodePath() ) ] = dstPath\n\t\t\t\tsourceIndex[ _convertToGameModuleName( node.getNodePath() ) ] = node.getNodePath()\n\t\t\t\tTask( 'CompileLuaScript' ).promise( context, node.getAbsFilePath(), context.getPath( dstPath ), scriptFormat, True )\n\t\t\t\t# _compileLuaScript( \n\t\t\t\t# \tcontext, node.getAbsFilePath(), context.getPath( dstPath ), scriptFormat, True\n\t\t\t\t# )\n\t\t\t\t\n\t\t\n\t\toutputScriptIndex = context.requestFile( \n\t\t\t_GII_SCRIPT_LIBRARY_EXPORT_NAME, \n\t\t\tpackage = 'config'\n\t\t)\n\n\t\tJSONHelper.trySaveJSON(\n\t\t\t\t{\n\t\t\t\t\t'export' : exportIndex,\n\t\t\t\t\t'source' : sourceIndex\n\t\t\t\t}, \n\t\t\t\tcontext.getAbsPath( outputScriptIndex ), \n\t\t\t\t'script index' \n\t\t\t)\n\t\tcontext.setMeta( 'mock_script_library', outputScriptIndex )\n\n\n\tdef postDeploy( self, context ):\n\t\t#compile lib scripts\n\t\tscriptFormat = context.getMeta( 'script_format', 'source' )\n\t\tpackageLib = context.affirmPackage( 'lib' )\n\t\tif not packageLib: return\n\t\tbuildPath = packageLib.getBuildPath()\n\t\tif not os.path.isdir( buildPath ): return\n\n\t\tcontext.notify( ' > Compiling Lib Lua scripts ...' )\n\t\t#TODO: precompile libs\n\t\tsrcRootPath = context.getProject().getGamePath( 'lib' )\n\n\t\twith WorkerPool():\n\t\t\tfor root, dirs, files in os.walk( srcRootPath ):\n\t\t\t\tsubDir = os.path.relpath( root, srcRootPath )\n\t\t\t\tbuildRoot = buildPath + '/' + subDir\n\t\t\t\t_affirmPath( buildRoot )\n\t\t\t\tfor f in files:\n\t\t\t\t\tif not f.endswith( '.lua' ): continue\n\t\t\t\t\tsrcPath = root + '/' + f\n\t\t\t\t\tname, ext = os.path.splitext( f )\n\t\t\t\t\t# dstPath = root + '/' + name + '_'\n\t\t\t\t\tdstPath = buildRoot + '/' + name + '_'\n\t\t\t\t\tTask( 'CompileLuaScript' ).promise(\n\t\t\t\t\t\tcontext, srcPath, dstPath, scriptFormat, True\n\t\t\t\t\t\t)\n\t\t\t\tfor d in dirs[:]:\n\t\t\t\t\tif d.startswith( '.' ):\n\t\t\t\t\t\tdirs.remove( d )\n\t\t\t\t\t# _compileLuaScript( context, srcPath, dstPath, scriptFormat, True )\n\t\t\t\t\t# os.remove( srcPath )\n\n##----------------------------------------------------------------##\nScriptLibrary().register()\nLuaScriptAssetManager().register()\n##----------------------------------------------------------------##\n\nclass RemoteCommandReloadScript( RemoteCommand ):\n\tname = 'reload_script'\n\tdef run( self, *args ):\n\t\t# app.getAssetLibrary().scheduleScanProject()\n\t\tapp.getAssetLibrary().tryScanProject()\n\t\tlib = app.getModule( 'script_library' )\n\t\tlib.loadModifiedScript()\n\t\tgiiSync = app.getModule( 'gii_sync_support' )\n\t\tif giiSync:\n\t\t\tgiiSync.query( 'cmd.reload_script' )\n\n\nclass RemoteCommandReloadGameScript( RemoteCommand ):\n\tname = 'reload_game_script'\n\tdef run( self, *args ):\n\t\tapp.getAssetLibrary().tryScanProject()\n\t\tgiiSync = app.getModule( 'gii_sync_support' )\n\t\tif giiSync:\n\t\t\tgiiSync.query( 'cmd.reload_script' )","sub_path":"lib/gii/moai/LuaScriptAsset.py","file_name":"LuaScriptAsset.py","file_ext":"py","file_size_in_byte":7497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"515799586","text":"from flask import Flask,render_template,request\r\nfrom keras.models import model_from_json\r\nfrom keras.preprocessing import image\r\nimport base64\r\nimport numpy as np\r\nimport pandas as pd\r\n#import matplotlib.pyplot as plt\r\n\r\napp=Flask(__name__,template_folder=\"templates\")\r\n\r\nwith open('model/model.json','r') as f:\r\n\tload_json=f.read()\r\nmodel=model_from_json(load_json)\r\nmodel.load_weights('model/model.h5')#weights in model.h5 file\r\n\r\n@app.route('/')\r\ndef dos():\r\n\treturn render_template(\"index.html\")\r\n\r\n@app.route('/data',methods=['POST','GET'])\r\ndef fumx():\r\n\tif request.method==\"POST\":\r\n\t\tdata=str(request.form.get('image'))\r\n\t\tbase_Data=data.encode('ascii')\r\n\t\tdata=base64.b64decode(base_Data)\r\n\t\twith open('imageToSave.png','wb') as fh:\r\n\t\t\tfh.write(data)\r\n\t\timg=image.load_img('imageToSave.png',target_size=(28,28),grayscale=True)\r\n\t\tres=np.argmax(model.predict(np.expand_dims(img,0)))\r\n\t\treturn render_template(\"index.html\",ans=str(res))\r\n\telse:\r\n\t\treturn \"NULL\"\r\n\t\r\nif __name__ == '__main__':\r\n\tapp.run(debug=True)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"54649518","text":"from flask import jsonify\n\nfrom tests.virtual_webapp.vshopify import vshopify\nfrom tests import testing_constants\nfrom webapp.common import get_post_payload\n\nwebhooks_container = []\n\n@vshopify.route('/admin/orders.json')\ndef orders():\n return jsonify({'orders': [{\n 'id': testing_constants.NEW_ORDER_PLATFORM_ID,\n 'fulfillment_status': None,\n 'created_at': '2015-11-28T14:45:50+00:00',\n 'cancelled_at': None,\n 'browser_ip': testing_constants.NEW_ORDER_BROWSER_IP,\n 'line_items': [{\n 'id': testing_constants.NEW_PRODUCT_PLATFORM_ID,\n 'product_id': testing_constants.NEW_PRODUCT_PLATFORM_ID,\n 'variant_id': testing_constants.NEW_PRODUCT_PLATFORM_ID\n }],\n 'customer': {\n 'first_name': '',\n 'last_name': ''\n }\n }]})\n\n\n@vshopify.route('/admin/orders/count.json')\ndef orders_count():\n return jsonify({\n 'count': 1\n })\n\n\n@vshopify.route('/admin/products/count.json')\ndef products_count():\n return jsonify({\n 'count': 1\n })\n\n\n@vshopify.route('/admin/products.json')\ndef products():\n return jsonify({'products': [{\n 'id': testing_constants.NEW_PRODUCT_PLATFORM_ID,\n 'title': testing_constants.NEW_PRODUCT_NAME\n }]})\n\n\n@vshopify.route('/admin/shop.json')\ndef shop():\n return jsonify({'shop': {'email': testing_constants.NEW_USER_EMAIL,\n 'shop_owner': testing_constants.NEW_USER_NAME}})\n\n\n@vshopify.route('/admin/webhooks.json', methods=['POST'])\ndef webhooks():\n global webhooks_container\n payload = get_post_payload()\n webhooks_container.append(payload)\n return jsonify({})\n\n@vshopify.route('/admin/webhooks/count.json')\ndef webhooks_count():\n global webhooks_container\n return jsonify({'count': len(webhooks_container)})\n\n@vshopify.route('/clean_webhooks')\ndef clean_webhooks():\n global webhooks_container\n webhooks_container = []\n return jsonify({})\n\n@vshopify.route('/admin/oauth/access_token', methods=['POST'])\ndef access_token():\n return jsonify({'access_token': 'hello'})\n","sub_path":"tests/virtual_webapp/vshopify/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"446711115","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport argparse\nimport csv\nimport json\nimport numpy as np\nimport os\nimport pandas as pd\nimport rasterio\nimport tensorflow as tf\nfrom glob import glob\nfrom tqdm import tqdm\n\n# Path to the BigEarthNet extracted files\nbig_earth_path = '/workspace/app/data/raw/BigEarthNet-v1.0/'\n\n# Models folder is already checkin. No need to download the models\nbig_earth_models_folder = '/workspace/app/data/raw/bigearthnet-models/'\n\n# Stores the TFRecords\nout_folder = '/workspace/app/data/processed'\n\nif not os.path.exists(big_earth_path):\n print('ERROR: folder', big_earth_path, 'does not exist')\n\nif not os.path.exists(big_earth_models_folder):\n print('ERROR: folder', big_earth_models_folder, 'does not exist')\n\nif not os.path.exists(out_folder):\n print('ERROR: folder', out_folder, 'does not exist')\n\nprint(f'Using Pandas Version: {pd.__version__}')\nprint(f'Using TensorFlow Version: {tf.__version__}')\n\n# Set up a symbolic link to allow for easy Python module imports. Then check to make sure the link works (it is a Unix link so check from shell)\n\nos.system(\"rm bemodels\")\nos.system(\"ln -s '/workspace/app/data/raw/bigearthnet-models/' bemodels\")\nos.system('ls bemodels')\nfrom bemodels import tensorflow_utils\n\n# Downloads the data from teh Bigearthnet website\ndef download_data():\n os.system(\"curl http://bigearth.net/downloads/BigEarthNet-S2-v1.0.tar.gz -o data/raw/BigEarthNet-v1.0.zip\")\n os.system(\"tar -xvf data/raw/BigEarthNet-v1.0.zip -C data/raw\")\n\n# Process All of the BigEarthNet data\ndef preprocess_tfrecords():\n with open(big_earth_models_folder + 'label_indices.json', 'rb') as f:\n label_indices = json.load(f)\n\n root_folder = big_earth_path\n\n csv_file_path_list = ['splits/train.csv', 'splits/test.csv', 'splits/val.csv']\n\n for csv_file in csv_file_path_list:\n splits = glob(f\"{big_earth_models_folder}{csv_file}\")\n patch_names_list = []\n split_names = []\n for csv_file in splits:\n patch_names_list.append([])\n split_names.append(os.path.basename(csv_file).split('.')[0])\n with open(csv_file, 'r') as fp:\n csv_reader = csv.reader(fp, delimiter=',')\n for row in csv_reader:\n patch_names_list[-1].append(row[0].strip())\n tensorflow_utils.prep_tf_record_files(\n root_folder, out_folder,\n split_names, patch_names_list,\n label_indices, False, True)\n\n # Shard the Train data\n raw_dataset = tf.data.TFRecordDataset(out_folder + \"/train.tfrecord\")\n shards = 50\n for i in range(shards):\n writer = tf.data.experimental.TFRecordWriter(f\"{out_folder}/train-part-{i}.tfrecord\")\n writer.write(raw_dataset.shard(shards, i))\n\n # Shard the Test data\n raw_dataset = tf.data.TFRecordDataset(out_folder + \"/test.tfrecord\")\n shards = 20\n for i in range(shards):\n writer = tf.data.experimental.TFRecordWriter(f\"{out_folder}/test-part-{i}.tfrecord\")\n writer.write(raw_dataset.shard(shards, i))\n\n # Shard the Val data\n raw_dataset = tf.data.TFRecordDataset(out_folder + \"/val.tfrecord\")\n shards = 20\n for i in range(shards):\n writer = tf.data.experimental.TFRecordWriter(f\"{out_folder}/val-part-{i}.tfrecord\")\n writer.write(raw_dataset.shard(shards, i))\n\n# Shards the data\n# tf_main_file is a main tf file name. The function will look for corresponding train, test, and val files\n# Ex: balanced, balanced_vy\ndef shard_tfrecords(tf_main_file):\n\n # Shard the Train data\n raw_dataset = tf.data.TFRecordDataset(out_folder + \"/\"+tf_main_file+\"_train.tfrecord\")\n shards = 50\n print(\"Sharding Train data\")\n for i in range(shards):\n writer = tf.data.experimental.TFRecordWriter(f\"{out_folder}/{tf_main_file}_train-part-{i}.tfrecord\")\n writer.write(raw_dataset.shard(shards, i))\n\n # Shard the Test data\n raw_dataset = tf.data.TFRecordDataset(out_folder + \"/\"+tf_main_file+\"_test.tfrecord\")\n shards = 20\n print(\"Sharding Test data\")\n for i in range(shards):\n writer = tf.data.experimental.TFRecordWriter(f\"{out_folder}/{tf_main_file}_test-part-{i}.tfrecord\")\n writer.write(raw_dataset.shard(shards, i))\n\n # Shard the Val data\n raw_dataset = tf.data.TFRecordDataset(out_folder + \"/\"+tf_main_file+\"_val.tfrecord\")\n shards = 20\n print(\"Sharding Validation data\")\n for i in range(shards):\n writer = tf.data.experimental.TFRecordWriter(f\"{out_folder}/{tf_main_file}_val-part-{i}.tfrecord\")\n writer.write(raw_dataset.shard(shards, i))\n\n# Count BigEarthNet posiive and negative samples\ndef count_bn_positive_negative():\n with open(big_earth_models_folder + 'label_indices.json', 'rb') as f:\n label_indices = json.load(f)\n\n root_folder = big_earth_path\n splits = glob(f'{big_earth_models_folder}splits/all.csv')\n\n # Checks the existence of patch folders and populate the list of patch folder paths\n folder_path_list = []\n if not os.path.exists(root_folder):\n print('ERROR: folder', root_folder, 'does not exist')\n\n patch_names_list = []\n split_names = []\n for csv_file in splits:\n patch_names_list.append([])\n split_names.append(os.path.basename(csv_file).split('.')[0])\n with open(csv_file, 'r') as fp:\n csv_reader = csv.reader(fp, delimiter=',')\n for row in csv_reader:\n patch_names_list[-1].append(row[0].strip())\n\n len(patch_names_list[0])\n\n irrigated_examples = []\n nonirrigated_examples = []\n missing_count = 0\n for patch_name in tqdm(patch_names_list[0]):\n patch_folder_path = os.path.join(root_folder, patch_name)\n patch_json_path = os.path.join(\n patch_folder_path, patch_name + '_labels_metadata.json')\n try:\n with open(patch_json_path, 'rb') as f:\n patch_json = json.load(f)\n except:\n # print(f'Missing Labels for {patch_name}')\n missing_count += 1\n continue\n\n if 'Permanently irrigated land' in patch_json['labels']:\n irrigated_examples.append(patch_folder_path)\n else:\n nonirrigated_examples.append(patch_folder_path)\n\n print ('irrigated_examples', len(irrigated_examples))\n print('non-irrigated_examples', len(nonirrigated_examples))\n print('missing_count', missing_count)\n # Check for Vineyards\n vy_examples = []\n nonvy_examples = []\n missing_count = 0\n for patch_name in tqdm(patch_names_list[0]):\n patch_folder_path = os.path.join(root_folder, patch_name)\n patch_json_path = os.path.join(\n patch_folder_path, patch_name + '_labels_metadata.json')\n try:\n with open(patch_json_path, 'rb') as f:\n patch_json = json.load(f)\n except:\n # print(f'Missing Labels for {patch_name}')\n missing_count += 1\n continue\n\n if 'Vineyards' in patch_json['labels']:\n vy_examples.append(patch_folder_path)\n else:\n nonvy_examples.append(patch_folder_path)\n\n print('vineyard_examples', len(vy_examples))\n print('non-vineyard_examples', len(nonvy_examples))\n print('missing_count', missing_count)\n\ndef preprocess_tfrecords_labelled(split, ratio = '50-50', include_vineyard = False):\n with open(big_earth_models_folder + 'label_indices.json', 'rb') as f:\n label_indices = json.load(f)\n\n print('Called preprocess_tfrecords_labelled with split ratio ',ratio )\n\n root_folder = big_earth_path\n\n splits = glob(f'{big_earth_models_folder}splits/{split}.csv')\n\n # Checks the existence of patch folders and populate the list of patch folder paths\n folder_path_list = []\n if not os.path.exists(root_folder):\n print('ERROR: folder', root_folder, 'does not exist')\n\n patch_names_list = []\n split_names = []\n for csv_file in splits:\n patch_names_list.append([])\n split_names.append(os.path.basename(csv_file).split('.')[0])\n with open(csv_file, 'r') as fp:\n csv_reader = csv.reader(fp, delimiter=',')\n for row in csv_reader:\n patch_names_list[-1].append(row[0].strip())\n\n len(patch_names_list[0])\n\n irrigated_examples = []\n nonirrigated_examples = []\n all_examples = []\n missing_count = 0\n for patch_name in tqdm(patch_names_list[0]):\n patch_folder_path = os.path.join(root_folder, patch_name)\n patch_json_path = os.path.join(\n patch_folder_path, patch_name + '_labels_metadata.json')\n try:\n with open(patch_json_path, 'rb') as f:\n patch_json = json.load(f)\n except:\n # print(f'Missing Labels for {patch_name}')\n missing_count += 1\n continue\n\n if 'Permanently irrigated land' in patch_json['labels']:\n irrigated_examples.append(patch_folder_path)\n else:\n nonirrigated_examples.append(patch_folder_path)\n # Add all\n all_examples.append(patch_folder_path)\n\n # Check for Vineyards\n vy_examples = []\n nonvy_examples = []\n missing_count = 0\n for patch_name in tqdm(patch_names_list[0]):\n patch_folder_path = os.path.join(root_folder, patch_name)\n patch_json_path = os.path.join(\n patch_folder_path, patch_name + '_labels_metadata.json')\n try:\n with open(patch_json_path, 'rb') as f:\n patch_json = json.load(f)\n except:\n # print(f'Missing Labels for {patch_name}')\n missing_count += 1\n continue\n\n if 'Vineyards' in patch_json['labels']:\n vy_examples.append(patch_folder_path)\n else:\n nonvy_examples.append(patch_folder_path)\n\n len(vy_examples) * 2\n len(nonvy_examples)\n\n # New: This was added as the next code directly reads the csv and creates a dataframe\n pos_df = pd.DataFrame(irrigated_examples, columns=['file'])\n neg_df = pd.DataFrame(nonirrigated_examples, columns=['file'])\n all_df = pd.DataFrame(all_examples, columns=['file'])\n if ratio == '50-50':\n pos_df.to_csv(big_earth_models_folder + 'splits/positive_' + split + '.csv')\n neg_df.to_csv(big_earth_models_folder + 'splits/negative_' + split + '.csv')\n # Read back\n pos_irr_df = pd.read_csv(big_earth_models_folder + 'splits/positive_' + split + '.csv')\n neg_irr_df = pd.read_csv(big_earth_models_folder + 'splits/negative_' + split + '.csv')\n elif ratio == '10-90':\n pos_df.to_csv(big_earth_models_folder + 'splits/positive_10_90_' + split + '.csv')\n neg_df.to_csv(big_earth_models_folder + 'splits/negative_10_90_' + split + '.csv')\n # Read back\n pos_irr_df = pd.read_csv(big_earth_models_folder + 'splits/positive_10_90_' + split + '.csv')\n neg_irr_df = pd.read_csv(big_earth_models_folder + 'splits/negative_10_90_' + split + '.csv')\n elif ratio.startswith('u'): #'u64', 'u128', 'u256', 'u512', 'u1024' unbalanced\n all_df.to_csv(big_earth_models_folder + 'splits/all_'+ratio+'_'+ split + '.csv')\n # Read back\n all_df = pd.read_csv(big_earth_models_folder + 'splits/all_'+ratio+'_'+ split + '.csv')\n else: #'64', '128', '256', '512', '1024' balanced\n pos_df.to_csv(big_earth_models_folder + 'splits/positive_'+ratio+'_'+ split + '.csv')\n neg_df.to_csv(big_earth_models_folder + 'splits/negative_'+ratio+'_'+ split + '.csv')\n # Read back\n pos_irr_df = pd.read_csv(big_earth_models_folder + 'splits/positive_'+ratio+'_'+ split + '.csv')\n neg_irr_df = pd.read_csv(big_earth_models_folder + 'splits/negative_'+ratio+'_'+ split + '.csv')\n\n\n # # Create Data sets for finetuning. Make total dataset size divisible by 32 or 64 for easy batching\n\n # Unused code\n #len(pos_df)\n\n #pos_df_1_percent = pos_irr_df.sample(frac=0.0065)\n #pos_df_3_percent = pos_irr_df.sample(frac=0.0258)\n #pos_df_10_percent = pos_irr_df.sample(frac=0.103)\n\n #print(len(pos_df_1_percent))\n #print(len(pos_df_3_percent))\n #print(len(pos_df_10_percent))\n\n #sample_frac_1p = len(pos_df_1_percent) / len(neg_irr_df)\n #sample_frac_3p = len(pos_df_3_percent) / len(neg_irr_df)\n #sample_frac_10p = len(pos_df_10_percent) / len(neg_irr_df)\n\n #subset_neg_df_1p = neg_irr_df.sample(frac=sample_frac_1p)\n #subset_neg_df_3p = neg_irr_df.sample(frac=sample_frac_3p)\n #subset_neg_df_10p = neg_irr_df.sample(frac=sample_frac_10p)\n\n neg_ir_df = None\n\n if ratio == '50-50':\n sample_frac_ir = len(pos_df) / len(neg_df)\n neg_ir_df = neg_df.sample(frac=sample_frac_ir)\n elif ratio == '10-90':\n # Assume current positive as 10%, take 90% negative\n neg_ir_df = neg_df.sample(n=len(pos_df) * 9)\n elif ratio.startswith('u'): # 'u64', 'u128', 'u256', 'u512', 'u1024' unbalanced\n all_df = all_df.sample(n=int(ratio[1:]))\n # Initialize the positive and negative cache\n unsampled_values = []\n pos_unsampled_values = []\n neg_unsampled_values = []\n for i, j in all_df.iterrows():\n unsampled_values.append(j['file'])\n\n for sampled_val in unsampled_values:\n if sampled_val in irrigated_examples:\n pos_unsampled_values.append(sampled_val)\n else:\n neg_unsampled_values.append(sampled_val)\n pos_df = pd.DataFrame(pos_unsampled_values, columns=['file'])\n print(\"Count of Positive Irrigation Samples \", len(pos_unsampled_values))\n neg_ir_df = pd.DataFrame(neg_unsampled_values, columns=['file'])\n print(\"Count of Negative Irrigation Samples \", len(neg_unsampled_values))\n else: #'64', '128', '256', '512', '1024'\n pos_df = pos_df.sample(n=int(ratio))\n neg_ir_df = neg_df.sample(n=len(pos_df))\n\n # New\n balanced_df = pd.concat([pos_df, neg_ir_df])\n # Shuffle the examples\n balanced_df = balanced_df.sample(frac=1)\n if ratio == '50-50':\n balanced_df.to_csv(f'{big_earth_models_folder}splits/balanced_{split}.csv')\n splits = glob(f'{big_earth_models_folder}splits/balanced_{split}.csv')\n elif ratio == '10-90':\n balanced_df.to_csv(f'{big_earth_models_folder}splits/balanced_10_90_{split}.csv')\n splits = glob(f'{big_earth_models_folder}splits/balanced_10_90_{split}.csv')\n elif ratio.startswith('u'): # 'u64', 'u128', 'u256', 'u512', 'u1024' unbalanced\n balanced_df.to_csv(f'{big_earth_models_folder}splits/{ratio}_{split}.csv')\n splits = glob(f'{big_earth_models_folder}splits/{ratio}_{split}.csv')\n else: #'64', '128', '256', '512', '1024'\n balanced_df.to_csv(f'{big_earth_models_folder}splits/balanced_{ratio}_{split}.csv')\n splits = glob(f'{big_earth_models_folder}splits/balanced_{ratio}_{split}.csv')\n\n patch_names_list = []\n split_names = []\n for csv_file in splits:\n patch_names_list.append([])\n split_names.append(os.path.basename(csv_file).split('.')[0])\n csv_df = pd.read_csv(csv_file)\n patch_names_list[-1] = list(csv_df.file)\n patch_names_list[-1] = [name.split('/')[-1] for name in patch_names_list[-1]]\n\n tensorflow_utils.prep_tf_record_files(\n root_folder, out_folder,\n split_names, patch_names_list,\n label_indices, False, True)\n\n # Start for vineyards data\n if not include_vineyard:\n return\n\n pos_df = pd.DataFrame(vy_examples, columns=['file'])\n neg_df = pd.DataFrame(nonvy_examples, columns=['file'])\n if ratio == '50-50':\n pos_df.to_csv(big_earth_models_folder + 'splits/positive_vy_' + split + '.csv')\n neg_df.to_csv(big_earth_models_folder + 'splits/negative_vy_' + split + '.csv')\n elif ratio == '10-90':\n pos_df.to_csv(big_earth_models_folder + 'splits/positive_10_90_vy_' + split + '.csv')\n neg_df.to_csv(big_earth_models_folder + 'splits/negative_10_90_vy_' + split + '.csv')\n else: #'64', '128', '256', '512', '1024'\n pos_df.to_csv(big_earth_models_folder + 'splits/positive_'+ratio+'_vy_'+ split + '.csv')\n neg_df.to_csv(big_earth_models_folder + 'splits/negative_'+ratio+'_vy_'+ split + '.csv')\n\n # # Create Data sets for finetuning. Make total dataset size divisible by 32 or 64 for easy batching\n\n len(pos_df)\n\n pos_vy_df_1_percent = pos_df.sample(frac=0.0092)\n pos_vy_df_3_percent = pos_df.sample(frac=0.0366)\n\n print(len(pos_vy_df_1_percent))\n print(len(pos_vy_df_3_percent))\n\n sample_frac_vy_1p = len(pos_vy_df_1_percent) / len(neg_df)\n sample_frac_vy_3p = len(pos_vy_df_3_percent) / len(neg_df)\n\n subset_neg_vy_df_1p = neg_df.sample(frac=sample_frac_vy_1p)\n subset_neg_vy_df_3p = neg_df.sample(frac=sample_frac_vy_3p)\n\n print(len(subset_neg_vy_df_1p))\n print(len(subset_neg_vy_df_3p))\n\n neg_vy_df = None\n if ratio == '50-50':\n sample_frac_vy = len(pos_df) / len(neg_df)\n neg_vy_df = neg_df.sample(frac=sample_frac_vy)\n elif ratio == '10-90':\n # Assume current positive as 10%, take 90% negative\n neg_vy_df = neg_df.sample(n=len(pos_df) * 9)\n else: #'64', '128', '256', '512', '1024'\n pos_df = pos_df.sample(n=int(ratio))\n neg_vy_df = neg_df.sample(n=len(pos_df))\n\n\n balanced_df = pd.concat([pos_df, neg_vy_df])\n # Shuffle the examples\n balanced_df = balanced_df.sample(frac=1)\n\n if ratio == '50-50':\n balanced_df.to_csv(f'{big_earth_models_folder}splits/balanced_vy_{split}.csv')\n splits = glob(f'{big_earth_models_folder}splits/balanced_vy_{split}.csv')\n elif ratio == '10-90':\n balanced_df.to_csv(f'{big_earth_models_folder}splits/balanced_vy_10_90_{split}.csv')\n splits = glob(f'{big_earth_models_folder}splits/balanced_vy_10_90_{split}.csv')\n else: #'64', '128', '256', '512', '1024'\n balanced_df.to_csv(f'{big_earth_models_folder}splits/balanced_vy_{ratio}_{split}.csv')\n splits = glob(f'{big_earth_models_folder}splits/balanced_vy_{ratio}_{split}.csv')\n\n patch_names_list = []\n split_names = []\n for csv_file in splits:\n patch_names_list.append([])\n split_names.append(os.path.basename(csv_file).split('.')[0])\n csv_df = pd.read_csv(csv_file)\n patch_names_list[-1] = list(csv_df.file)\n patch_names_list[-1] = [name.split('/')[-1] for name in patch_names_list[-1]]\n\n tensorflow_utils.prep_tf_record_files(\n root_folder, out_folder,\n split_names, patch_names_list,\n label_indices, False, True)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='This script creates TFRecord files for the BigEarthNet train, validation and test splits. It also shards the files.')\n parser.add_argument('-d', '--download', default=False, type=bool,\n help=\"whether to download bigearthnet data\")\n parser.add_argument('-tf', '--tfrecords', default=False, type=bool,\n help=\"whether to create tfrecords\")\n\n # process tf records labelled for balanced data\n parser.add_argument('-tfl', '--tfrecordslabeled', default=False, type=bool,\n help=\"whether to create tfrecords with labelled\")\n parser.add_argument('-s', '--split', default='train', type=str,\n help=\"which dataset split to create (train,val,test)\")\n # U before a number indicates unsampled total count.\n parser.add_argument('-sr', '--ratio', default='50-50', choices=['50-50', '10-90', '64', '128', '256', '512', '1024',\n 'u32', 'u64', 'u128', 'u256', 'u512', 'u1024'],\n help='Split ratio')\n\n # Shard the data\n parser.add_argument('-sd', '--sharddata', default=False, type=bool,\n help=\"whether to shard the data or not\")\n parser.add_argument('-sdn', '--shardname', default='balanced', type=str,\n help=\"which main file to shard\")\n\n # Count bigearthnet positive and negative count\n parser.add_argument('-cbpn', '--countbnposneg', default=False, type=bool,\n help=\"Count bigearthnet psoitive and negative\")\n\n args = parser.parse_args()\n\n if args.download:\n print('download data---START')\n download_data()\n print('download data---END')\n\n if args.tfrecords:\n print('preprocess_tfrecords---START')\n preprocess_tfrecords()\n print('preprocess_tfrecords---END')\n\n if args.tfrecordslabeled:\n print('preprocess_tfrecords_labelled---START')\n preprocess_tfrecords_labelled(args.split, args.ratio)\n print('preprocess_tfrecords_labelled---END')\n\n if args.sharddata:\n print('shard_tfrecords---START')\n shard_tfrecords(args.shardname)\n print('shard_tfrecords---END')\n\n if args.countbnposneg:\n print('count_bn_positive_negative---START')\n count_bn_positive_negative()\n print('count_bn_positive_negative---END')\n","sub_path":"src/data/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":21030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"170306864","text":"from cs50 import get_int\n# gerekli olan fonksiyon\n# needed function\n\ncheck = False\n\nwhile check is False:\n height = get_int(\"Height: \")\n if height >= 1 and height <= 8:\n check = True\n # input control\n # girdi kontrolu\n\n# get input from user\n# kullanicidan girdi alma\n\ndefiner = 0\n# used to align the pyramid\n# piramidin terslemesi icin\n\n# printing the pyramid\n# piramit yazdiriliyor\nfor i in range(height):\n definer+=1\n print(\" \"*(height - definer), end=\"\")\n print(\"#\"*definer)\n\n","sub_path":"marioless.py","file_name":"marioless.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"232513556","text":"from .baseplayer import BasePlayer\nfrom utils import log, choose, BLACKJACK\n\n\nclass Player(BasePlayer):\n def __init__(self):\n super().__init__(\"사용자\", \"cyan\")\n self.wins = 0\n self.plays = 0\n self.bets = 0\n\n def set_chips(self):\n if self.chips == 0:\n result = [\"기본칩 50개를 증정합니다.\"]\n self.chips = 50\n else:\n result = [\n \"전에 오신 적이 있으시군요.\",\n f\"남은 칩은 {self.chips}개 입니다.\",\n ]\n result.append(\"게임을 시작합니다.\")\n log(*result)\n\n def prepare(self):\n super().prepare()\n self.set_chips()\n self.bet_chips()\n self.plays += 1\n\n @property\n def win_rate(self):\n return int(self.wins / self.plays * 100)\n\n @property\n def info(self):\n return (\n f\"게임 횟수 : {self.plays} / \"\n f\"승리 횟수 : {self.wins} / \"\n f\"승률 : {self.win_rate}%\\n\"\n f\"남은 칩은 {self.chips}개입니다.\"\n )\n\n def cardcal(self, cardlast):\n if \"A\" in cardlast:\n print(f\"{cardlast}는 1점 또는 11점으로 적용할 수 있습니다.\")\n return int(choose(\"1\", \"11\"))\n return super().cardcal(cardlast)\n\n def play_turn(self):\n self.draw(2)\n while not (self.blackjack or self.bust):\n self.show_hands()\n if choose(\"힛\", \"스테이\") == \"스테이\":\n break\n self.draw()\n\n def win(self):\n super().win()\n self.wins += 1\n\n multiplier = 1.5\n if self.card_sum == BLACKJACK:\n multiplier = 2\n\n self.chips += int(self.bets * multiplier)\n\n def bet_chips(self):\n def get_int_input(ask: str, max: int):\n while True:\n try:\n result = int(input(ask))\n if 0 < result <= max:\n return result\n print(\"칩이 부족합니다.\")\n except:\n print(\"잘못 입력하셨습니다\")\n\n self.bets = get_int_input(\n f\"남은 칩 : {self.chips}\\n베팅 금액을 정해주십시오. \",\n self.chips,\n )\n self.say(f\"{self.bets}개 베팅하셨습니다.\")\n self.chips -= self.bets\n","sub_path":"players/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"293388956","text":"# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nfrom datetime import datetime\n\nfrom metadata.config.common import ConfigModel\nfrom metadata.generated.schema.entity.data.database import Database\nfrom metadata.generated.schema.entity.data.table import ColumnJoins, Table, TableJoins\nfrom metadata.ingestion.api.bulk_sink import BulkSink, BulkSinkStatus\nfrom metadata.ingestion.api.common import WorkflowContext\nfrom metadata.ingestion.models.table_queries import (\n ColumnJoinedWith,\n TableColumn,\n TableUsageCount,\n TableUsageRequest,\n)\nfrom metadata.ingestion.ometa.client import APIError\nfrom metadata.ingestion.ometa.ometa_api import OpenMetadata\nfrom metadata.ingestion.ometa.openmetadata_rest import MetadataServerConfig\n\nlogger = logging.getLogger(__name__)\n\n\nclass MetadataUsageSinkConfig(ConfigModel):\n filename: str\n\n\nclass MetadataUsageBulkSink(BulkSink):\n config: MetadataUsageSinkConfig\n\n def __init__(\n self,\n ctx: WorkflowContext,\n config: MetadataUsageSinkConfig,\n metadata_config: MetadataServerConfig,\n ):\n super().__init__(ctx)\n self.config = config\n self.metadata_config = metadata_config\n self.wrote_something = False\n self.file_handler = open(self.config.filename, \"r\")\n self.metadata = OpenMetadata(self.metadata_config)\n self.status = BulkSinkStatus()\n self.tables_dict = {}\n self.table_join_dict = {}\n self.__map_tables()\n self.today = datetime.today().strftime(\"%Y-%m-%d\")\n\n def __map_tables(self):\n table_entities = self.metadata.list_entities(entity=Table)\n for table in table_entities.entities:\n if table.name.__root__ not in self.tables_dict.keys():\n self.tables_dict[table.name.__root__] = table\n\n @classmethod\n def create(\n cls, config_dict: dict, metadata_config_dict: dict, ctx: WorkflowContext\n ):\n config = MetadataUsageSinkConfig.parse_obj(config_dict)\n metadata_config = MetadataServerConfig.parse_obj(metadata_config_dict)\n return cls(ctx, config, metadata_config)\n\n def handle_work_unit_start(self, wu):\n pass\n\n def handle_work_unit_end(self, wu):\n pass\n\n def write_records(self) -> None:\n usage_records = [json.loads(l) for l in self.file_handler.readlines()]\n for record in usage_records:\n table_usage = TableUsageCount(**json.loads(record))\n if \".\" in table_usage.table:\n table_usage.table = table_usage.table.split(\".\")[1]\n if table_usage.table in self.tables_dict:\n table_entity = self.tables_dict[table_usage.table]\n table_usage_request = TableUsageRequest(\n date=table_usage.date, count=table_usage.count\n )\n try:\n self.metadata.publish_table_usage(table_entity, table_usage_request)\n except APIError as err:\n self.status.failures.append(table_usage_request)\n logger.error(\n \"Failed to update usage for {} {}\".format(\n table_usage.table, err\n )\n )\n\n table_join_request = self.__get_table_joins(table_usage)\n logger.debug(\"table join request {}\".format(table_join_request))\n try:\n if (\n table_join_request is not None\n and len(table_join_request.columnJoins) > 0\n ):\n self.metadata.publish_frequently_joined_with(\n table_entity, table_join_request\n )\n except APIError as err:\n self.status.failures.append(table_join_request)\n logger.error(\n \"Failed to update query join for {}, {}\".format(\n table_usage.table, err\n )\n )\n\n else:\n logger.warning(\n \"Table does not exist, skipping usage publish {}, {}\".format(\n table_usage.table, table_usage.database\n )\n )\n try:\n self.metadata.compute_percentile(Table, self.today)\n self.metadata.compute_percentile(Database, self.today)\n except APIError:\n logger.error(\"Failed to publish compute.percentile\")\n\n def __get_table_joins(self, table_usage):\n table_joins: TableJoins = TableJoins(columnJoins=[], startDate=table_usage.date)\n column_joins_dict = {}\n joined_with = {}\n for column_join in table_usage.joins:\n if column_join.table_column is None or len(column_join.joined_with) == 0:\n continue\n\n if column_join.table_column.column in column_joins_dict.keys():\n joined_with = column_joins_dict[column_join.table_column.column]\n else:\n column_joins_dict[column_join.table_column.column] = {}\n\n main_column_fqdn = self.__get_column_fqdn(column_join.table_column)\n for column in column_join.joined_with:\n joined_column_fqdn = self.__get_column_fqdn(column)\n\n if joined_column_fqdn in joined_with.keys():\n column_joined_with = joined_with[joined_column_fqdn]\n column_joined_with.joinCount += 1\n joined_with[joined_column_fqdn] = column_joined_with\n elif joined_column_fqdn is not None:\n joined_with[joined_column_fqdn] = ColumnJoinedWith(\n fullyQualifiedName=joined_column_fqdn, joinCount=1\n )\n else:\n logger.info(\"Skipping join columns for {}\".format(column))\n column_joins_dict[column_join.table_column.column] = joined_with\n\n for key, value in column_joins_dict.items():\n table_joins.columnJoins.append(\n ColumnJoins(columnName=key, joinedWith=list(value.values()))\n )\n return table_joins\n\n def __get_column_fqdn(self, table_column: TableColumn):\n if table_column.table not in self.tables_dict:\n return None\n table_entity = self.tables_dict[table_column.table]\n for tbl_column in table_entity.columns:\n if table_column.column.lower() == tbl_column.name.__root__.lower():\n return tbl_column.fullyQualifiedName.__root__\n\n def get_status(self):\n return self.status\n\n def close(self):\n self.file_handler.close()\n self.metadata.close()\n","sub_path":"ingestion/src/metadata/ingestion/bulksink/metadata_usage.py","file_name":"metadata_usage.py","file_ext":"py","file_size_in_byte":7491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"339787524","text":"## https://www.kaggle.com/c/GiveMeSomeCredit/data\n## https://www.youtube.com/watch?v=yX8KuPZCAMo\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.utils import resample\nfrom scipy import stats\n\n##### Creating charts\ndef graphs_charts(df):\n print(pd.crosstab(df['SeriousDlqin2yrs'], columns=\"count\"))\n plt.show(df.boxplot('age', by='SeriousDlqin2yrs'))\n plt.show(df.boxplot('NumberOfTime30-59DaysPastDueNotWorse', by='SeriousDlqin2yrs'))\n plt.show(df.boxplot('DebtRatio', by='SeriousDlqin2yrs'))\n plt.show(df.boxplot('MonthlyIncome', by='SeriousDlqin2yrs'))\n plt.show(df.boxplot('NumberOfOpenCreditLinesAndLoans', by='SeriousDlqin2yrs'))\n plt.show(df.boxplot('NumberOfTimes90DaysLate', by='SeriousDlqin2yrs'))\n plt.show(df.boxplot('NumberRealEstateLoansOrLines', by='SeriousDlqin2yrs'))\n plt.show(df.boxplot('NumberOfTime60-89DaysPastDueNotWorse', by='SeriousDlqin2yrs'))\n plt.show(df.boxplot('NumberOfDependents', by='SeriousDlqin2yrs'))\n\n\n##### Reading the data set\ndef read_dataset():\n df = pd.read_csv(\"./cs_training.csv\")\n print(df.describe()) ## Describe the data set\n df=df.fillna(0) ## Assign All NaN to zero\n print('Import dataframe shape',df.shape) ##datframe size\n print(df.head(2)) ##first 5 rows of the data frmae\n print(list(df)) ##column names\n ########## Adjust sammple to balance the data\n df_majority = df[df.SeriousDlqin2yrs == 'N'] ##Identifying majority\n df_minority = df[df.SeriousDlqin2yrs == 'Y'] ##Identifying minority\n print('majority', df_majority.shape)\n print('majority', df_minority.shape)\n print('********************')\n # Downward the majority class\n df_majority_resample = resample(df_majority, replace=False, n_samples=16000, random_state=123)\n print('new maj sample', df_majority_resample.shape)\n df_downsample = pd.concat([df_majority_resample, df_minority])\n print('Down sample final size', df_downsample.shape)\n print('************')\n #graphs_charts(df_downsample)\n df_Final = df_downsample[(np.abs(stats.zscore(df_downsample[df_downsample.columns[1:11]])) < 2).all(axis=1)]\n print('After Removing Outliers', df_Final.shape)\n #graphs_charts(df_Final)\n #########33\n x = df_Final[df_Final.columns[1:11]] ##columns = ['b', 'c'] ,df1 = pd.DataFrame(df, columns=columns) getting columns\n y = df_Final[df_Final.columns[0]] ##columns = ['b', 'c'] ,df1 = pd.DataFrame(df, columns=columns) getting columns\n encoder=LabelEncoder()\n encoder.fit(y)\n y=encoder.transform(y)\n Y=one_hot_encode(y)\n print(x.shape)\n return(x,Y)\n\n# define the Encorder function\ndef one_hot_encode(labels):\n n_labels=len(labels)\n n_unique_labels=len(np.unique(labels))\n one_hot_encode=np.zeros((n_labels,n_unique_labels))\n one_hot_encode[np.arange(n_labels),labels]=1\n return one_hot_encode\n\n# read the data set\nx,Y=read_dataset()\n\n#shuffle the dataset to mix up the rows\nx,Y=shuffle(x,Y,random_state=10)\n\n# convert data set into training and testing data set\ntrain_x,test_x,train_y,test_y=train_test_split(x,Y,test_size=0.20,random_state=42)\n\n\n# Scaled to a small range like 0 to 1\nscaler = MinMaxScaler(feature_range=(0, 1))\n\n# Scale both the training & testing data\ntrain_x1 = scaler.fit_transform(train_x.astype(np.float32))\ntest_x1 = scaler.fit_transform(test_x.astype(np.float32))\n\n# Inspect the shape of the training and testing\nprint(train_x.shape)\nprint(train_x1.shape)\nprint(train_y.shape)\nprint(test_x.shape)\nprint(test_x1.shape)\nprint(test_y.shape)\n\n# Define parameters and variables\nlearning_rate=0.05\ntraining_epochs=1000\ncost_history=np.empty(shape=[1],dtype=float)\nn_dim=x.shape[1]\nprint(\"n_dim\",n_dim)\nn_class=2\nmodel_path=\"D:/Sajith/test1/CrForcat/test\"\n\n#Define number of hidden layers and neurons for each layer\nn_hidden_1=10\nn_hidden_2=50\nn_hidden_3=60\nn_hidden_4=20\n\nx = tf.placeholder(tf.float32,[None,n_dim])\nW = tf.Variable(tf.zeros([n_dim,n_class]))\nb = tf.Variable(tf.zeros([n_class]))\ny_=tf.placeholder(tf.float32,[None,n_class])\n\n# Define the model\ndef multilayer_perception(x,weights,biases):\n layer_1=tf.add(tf.matmul(x,weights['h1']),biases['b1'])\n layer_1=tf.nn.tanh(layer_1)\n\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n layer_2 = tf.nn.tanh(layer_2)\n\n layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])\n layer_3 = tf.nn.tanh(layer_3)\n\n layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])\n layer_4 = tf.nn.tanh(layer_4)\n\n #output layer with linear activation\n out_layer=tf.matmul(layer_4,weights['out'])+biases['out']\n return out_layer\n\n#Weights\nweights={\n 'h1':tf.Variable(tf.truncated_normal([n_dim,n_hidden_1])),\n 'h2':tf.Variable(tf.truncated_normal([n_hidden_1,n_hidden_2])),\n 'h3':tf.Variable(tf.truncated_normal([n_hidden_2,n_hidden_3])),\n 'h4':tf.Variable(tf.truncated_normal([n_hidden_3,n_hidden_4])),\n 'out':tf.Variable(tf.truncated_normal([n_hidden_4,n_class]))\n }\n#Biases\nbiases={\n 'b1':tf.Variable(tf.truncated_normal([n_hidden_1])),\n 'b2':tf.Variable(tf.truncated_normal([n_hidden_2])),\n 'b3':tf.Variable(tf.truncated_normal([n_hidden_3])),\n 'b4':tf.Variable(tf.truncated_normal([n_hidden_4])),\n 'out':tf.Variable(tf.truncated_normal([n_class]))\n }\n\n# Initialize all the variables\ninit=tf.global_variables_initializer()\nsaver=tf.train.Saver()\n\n# Call the model defined\ny=multilayer_perception(x,weights,biases)\n\n#Cost function\ncost_function=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=y,labels=y_))\n#Optimizer\ntraining_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)\n\nsess=tf.Session()\nsess.run(init)\n\n#calculate the cost and the accuracy for each epoch\nmse_history=[]\naccuracy_history=[]\n\nfor epoch in range(training_epochs):\n sess.run(training_step,feed_dict={x:train_x1,y_:train_y})\n cost=sess.run(cost_function,feed_dict={x:train_x1,y_:train_y})\n cost_history=np.append(cost_history,cost)\n correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))\n accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n\n pred_y=sess.run(y,feed_dict={x:test_x1})\n mse=tf.reduce_mean(tf.square(pred_y-test_y))\n mse_=sess.run(mse)\n mse_history.append(mse_)\n accuracy=(sess.run(accuracy,feed_dict={x:train_x1,y_:train_y}))\n accuracy_history.append(accuracy)\n\n print('epoch:',epoch,'-','cost',cost,\"-MSE\",mse,\"-Train Accuracy\",accuracy)\n\nsave_path=saver.save(sess,model_path)\nprint(\"Model saved in the file %s\" % save_path)\n\n#MSE and accuracy grpah\nplt.show(plt.plot(mse_history,'r'))\nplt.show(plt.plot(accuracy_history))\n\n#print the final accuracy\ncorrect_prediction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))\naccuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\nprint('Test accuracy: ',sess.run(accuracy,feed_dict={x:test_x1,y_:test_y}))\nprint('***********')\n\n#print final mse\npred_y=sess.run(y,feed_dict={x:test_x1})\nmse=tf.reduce_mean(tf.square(pred_y-test_y))\nprint(\"MSE : %.4f\" % sess.run(mse))\n\n","sub_path":"CrFrReGitHub.py","file_name":"CrFrReGitHub.py","file_ext":"py","file_size_in_byte":7270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"234581805","text":"import gym\nimport numpy as np\nfrom baselines.ppo2 import ppo2\n\nfrom baselines.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom baselines.common.vec_env.vec_normalize import VecNormalize\nclass MyReward(gym.Wrapper):\n def __init__(self, env):\n super(MyReward, self).__init__(env)\n self.m_RwardList = []\n self.m_count = 0\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n # reward=0\n self.m_count += 1\n if self.m_count%60000==0:\n print(\"frame\",self.m_count)\n # print(\"reware\",self.m_count,action,reward,done,info)\n if not done:\n self.m_RwardList.append(reward)\n else:\n # if info[\"done\"]:\n # reward=100\n # self.m_RwardList.append(reward)\n iMeanReward = np.sum(self.m_RwardList)\n print(\"mean_reward\", iMeanReward)\n self.m_RwardList = []\n return obs, reward, done, info\n\n\ndef EnvFunc():\n oEnv = gym.make(\"MountainCar-v0\")\n oEnv = MyReward(oEnv)\n return oEnv\n\ndef Train():\n env=DummyVecEnv([EnvFunc]*1)\n env = VecNormalize(env,ob=True, ret=True)\n act=ppo2.learn(\n network=\"mlp\",\n env=env,\n # lr=3e-4,\n nsteps=256,\n nminibatches=8,\n # lam=0.94,\n total_timesteps=6000000,\n # log_interval=100,\n # save_interval=500,\n num_layers=3,\n num_hidden=256,\n value_network=\"copy\"\n )\n act.save(\"ppo2_model/model\")\n\n\n\nif __name__==\"__main__\":\n Train()","sub_path":"baselines_usage/baselines/baselines_ppo_moutaincar.py","file_name":"baselines_ppo_moutaincar.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"22483762","text":"import random\r\n\r\ndef rand(n):\r\n numbers = []\r\n for x in range(n):\r\n numbers.append(x)\r\n random.shuffle(numbers)\r\n return numbers\r\n\r\ndef nearlySort(n):\r\n numbers = []\r\n for x in range(n):\r\n numbers.append(x)\r\n \r\n for x in range(n-1):\r\n if random.random() < 0.5:\r\n numbers[x], numbers[x+1] = numbers[x+1], numbers[x]\r\n return numbers\r\n\r\ndef nearlyRev(n):\r\n numbers = []\r\n for x in range(n-1, -1, -1):\r\n numbers.append(x)\r\n \r\n for x in range(n-1):\r\n if random.random() < 0.5:\r\n numbers[x], numbers[x+1] = numbers[x+1], numbers[x]\r\n return numbers\r\n\r\ndef gauss(n):\r\n random.seed(1)\r\n numbers = []\r\n for x in range(n):\r\n numbers.append(random.gauss(0,1))\r\n return numbers\r\n\r\ndef fromSet(n, set):\r\n numbers = []\r\n for x in range(n):\r\n numbers.append(random.choice(set))\r\n return numbers","sub_path":"Adrian-Gorski-Zestaw11/zadanie1.py","file_name":"zadanie1.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"491334735","text":"# Import modules for running the Flask webapp\nfrom __future__ import print_function, division\nimport os, sys, flask\nfrom flask import request, render_template, send_from_directory, current_app, session\nfrom ..model.database import db\n\nfrom .oauth import *\nimport calImport, datetime\n\n\n# Parse user input\ndef validate_user(data):\n\tvalid = {}\n\ttestEvent = calImport.calevent()\n\n\t# Basic event information\n\tif data['name'] != '':\n\t\tvalid['name'] = True\n\t\ttestEvent.save_title(data['name'])\n\telse: valid['name'] = False\n\tif data['description'] != '':\n\t\tvalid['description'] = True\n\t\ttestEvent.save_description(data['description'])\n\telse: valid['description'] = False\n\tif data['image'] != '':\n\t\tvalid['image'] = True\n\t\ttestEvent.save_image(data['image'])\n\n\t# Event location\n\tif data['location'] != '':\n\t\ttestEvent.save_location(data['location'])\n\t\tif testEvent.displayLocation is not None: valid['location'] = True\n\t\telse: valid['location'] = False\n\telse: valid['location'] = False\n\n\t# Event dates\n\tif data['start_date'] != '':\n\t\ttmp = data['start_date'].replace('/', ' ').replace('-', ' ').split()\n\t\tif len(tmp) == 3: valid['start_date'] = True\n\t\telse: valid['start_date'] = False\n\telse: valid['start_date'] = False\n\tif data['end_date'] != '':\n\t\ttmp = data['end_date'].replace('/', ' ').replace('-', ' ').split()\n\t\tif len(tmp) == 3: valid['end_date'] = True\n\t\telse: valid['end_date'] = False\n\telse: valid['end_date'] = False\n\n\t# Event times\n\tif data['start_time'] != '':\n\t\ttmp = data['start_time'].lower().replace(':',' ').replace('a',' a').replace('p',' p').split()\n\t\tprint(len(tmp))\n\t\tif len(tmp) == 3: valid['start_time'] = True\n\t\telse: valid['start_time'] = False\n\telse: valid['start_time'] = False\n\tif data['end_time'] != '':\n\t\ttmp = data['end_time'].lower().replace(':',' ').replace('a',' a').replace('p',' p').split()\n\t\tif len(tmp) == 3: valid['end_time'] = True\n\t\telse: valid['end_time'] = False\n\telse: valid['end_time'] = False\n\n\t# Save dates and times, if both are valid\n\tif valid['start_date'] and valid['start_time'] and valid['end_date'] and valid['end_time']:\n\t\tstart_date = ' '.join(data['start_date'].replace('/', ' ').replace('-', ' ').split())\n\t\tstart_time = ''.join(data['start_time'].lower().replace(':',' ').split())\n\t\tprint(start_date, start_time)\n\t\tstartdt = datetime.datetime.strptime(start_date+start_time, '%m %d %Y%I%M%p')\n\t\tend_date = ' '.join(data['end_date'].replace('/', ' ').replace('-', ' ').split())\n\t\tend_time = ''.join(data['end_time'].lower().replace(':',' ').split())\n\t\tenddt = datetime.datetime.strptime(end_date+end_time, '%m %d %Y%I%M%p')\n\t\ttestEvent.save_time(start = startdt, end = enddt)\n\n\t# Optional event information\n\tif data['contact_email'] != '':\n\t\ttmp = data['contact_email'].split('@')\n\t\tif len(tmp) != 2 or len(data['contact_email']) < 5: valid['contact_email'] = False\n\t\telse:\n\t\t\tif data['contact_email'][-4] != '.': valid['contact_email'] = False\n\t\t\telse: \n\t\t\t\tvalid['contact_email'] = True\n\t\t\t\ttestEvent.save_contact(email=data['contact_email'])\n\tif data['contact_phone'] != '':\n\t\ttmp = ''.join(data['contact_phone'].replace('.','').replace('(','').replace(')','').replace('-','').split())\n\t\tif len(tmp) != 10 or not tmp.isdigit(): valid['contact_phone'] = False\n\t\telse: \n\t\t\ttestEvent.save_contact(phone=data['contact_phone'])\n\t\t\tif testEvent.contactPhone is not None: valid['contact_phone'] = True\n\n\t# Ticket information\n\tif data['ticket_price'] != '':\n\t\ttry:\n\t\t\tprice = float(data['ticket_price'])\n\t\t\tvalid['ticket_price'] = True\n\t\t\ttestEvent.save_tickets(price=price)\n\t\texcept: valid['ticket_price'] = False\n\tif data['ticket_info'] != '':\n\t\tvalid['ticket_info'] = True\n\t\ttestEvent.save_tickets(info=data['ticket_info'])\n\tif data['ticket_link'] != '':\n\t\tvalid['ticket_link'] = True\n\t\ttestEvent.save_tickets(link=data['ticket_link'])\n\n\treturn valid, testEvent\n\n\naddevent_page = flask.Blueprint(\"addevent_page\", __name__)\n@addevent_page.route('/add.html', methods=['GET', 'POST'])\ndef func_name():\n\ttemplateDict = {}\n\ttemplateDict['login'] = oauth_name()\n\tif 'action' in request.form.keys():\n\t\tpost_data = request.form\n\telse: post_data = {'action': 'none'}\n\tprint(post_data)\n\n\n\t# ------------------------------------\n\t# NOT LOGGED IN\n\t# ------------------------------------\n\tif not oauth_name():\n\t\treturn render_template(\"add_anon.html\", **templateDict)\n\n\n\t# ------------------------------------\n\t# CREATE\n\t# ------------------------------------\n\tif post_data['action'] == 'none':\n\t\treturn render_template(\"add.html\", **templateDict)\n\n\n\t# ------------------------------------\n\t# REVIEW\n\t# ------------------------------------\n\tif post_data['action'] == 'review':\n\t\tvalid, testEvent = validate_user(post_data)\n\t\ttemplateDict['data'] = post_data\n\t\ttemplateDict['valid'] = valid\n\t\ttemplateDict['test'] = testEvent\n\t\tsubmit = 1\n\t\tfor k,v in valid.iteritems():\n\t\t\tif not v: submit = 0\n\t\ttemplateDict['submit'] = (submit == 1)\n\n\t\treturn render_template(\"add_review.html\", **templateDict)\n\n\n\t# ------------------------------------\n\t# SUBMISSION\n\t# ------------------------------------\n\tif post_data['action'] == 'submit':\t\n\t\t# Do one more validation\n\t\tvalid, testEvent = validate_user(post_data)\n\t\tif valid:\n\t\t\t# Event data is correct, save to database\n\t\t\ttestEvent.generate_hash()\n\t\t\tcalImport.add_db_event(db, [testEvent])\n\t\t\treturn render_template(\"add.html\", **templateDict)\n\t\telse:\n\t\t\t# Data isn't valid anymore, return the review\n\t\t\ttemplateDict['data'] = post_data\n\t\t\ttemplateDict['valid'] = valid\n\t\t\ttemplateDict['test'] = testEvent\n\t\t\tsubmit = 1\n\t\t\tfor k,v in valid.iteritems():\n\t\t\t\tif not v: submit = 0\n\t\t\ttemplateDict['submit'] = (submit == 1)\n\t\t\treturn render_template(\"add_review.html\", **templateDict)\n","sub_path":"python/web/controllers/addevent.py","file_name":"addevent.py","file_ext":"py","file_size_in_byte":5708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"507017880","text":"#!/usr/bin/env python\n\nimport mapnik\nfrom osgeo import ogr\nfrom osgeo import osr\n\n\nshp_file = \"data/world_merc.shp\"\nlabel_field = 'NAME'\n\n\ndef getFieldInShapefile(shapefile, field):\n datasource = ogr.Open(shapefile)\n layer = datasource.GetLayer(0)\n layerDefinition = layer.GetLayerDefn()\n for i in range(layerDefinition.GetFieldCount()):\n if field == layerDefinition.GetFieldDefn(i).GetName():\n return True\n return False\n\ndef getProj4FromShapefile(shapefile):\n '''Get epsg from shapefile'''\n # Read prj file\n prj_file = shapefile[0:-4] + '.prj'\n prj_filef = open(prj_file, 'r')\n prj_txt = prj_filef.read()\n prj_filef.close()\n # Create spatial reference object\n srs = osr.SpatialReference()\n srs.ImportFromESRI([prj_txt])\n srs.AutoIdentifyEPSG()\n return srs.ExportToProj4()\n\ndef _normalizeGeomTypes(geom_types):\n normalized = set()\n for geom_type in geom_types:\n geom_type = geom_type.replace('MULTI', '')\n normalized.add(geom_type)\n return list(normalized)\n\ndef getGeometryTypes(shapefile):\n geom_types = set()\n driver = ogr.GetDriverByName(\"ESRI Shapefile\")\n datasource = driver.Open(shapefile, 0)\n layer = datasource.GetLayer()\n for feature in layer:\n geom = feature.GetGeometryRef()\n geom_type = geom.GetGeometryName()\n geom_types.add(geom_type)\n geom_types = list(geom_types)\n geom_types = _normalizeGeomTypes(geom_types)\n return geom_types\n\n\ndef getLineStyle():\n # add style\n style = mapnik.Style() # style object to hold rules\n rule = mapnik.Rule() # rule object to hold symbolizers\n # to add outlines to a polygon we create a LineSymbolizer\n line_symbolizer = mapnik.LineSymbolizer()\n line_symbolizer.stroke = mapnik.Color('black')\n line_symbolizer.stroke_width = 0.1\n rule.symbols.append(line_symbolizer) # add the symbolizer to the rule object\n style.rules.append(rule) # now add the rule to the style and we're done\n return style\n\ndef getPolygonStyle():\n # add style\n style = mapnik.Style() # style object to hold rules\n rule = mapnik.Rule() # rule object to hold symbolizers\n # to fill a polygon we create a PolygonSymbolizer\n polygon_symbolizer = mapnik.PolygonSymbolizer()\n polygon_symbolizer.fill = mapnik.Color('#f2eff9')\n rule.symbols.append(polygon_symbolizer) # add the symbolizer to the rule object\n # to add outlines to a polygon we create a LineSymbolizer\n line_symbolizer = mapnik.LineSymbolizer()\n line_symbolizer.stroke = mapnik.Color('black')\n line_symbolizer.stroke_width = 0.1\n rule.symbols.append(line_symbolizer) # add the symbolizer to the rule object\n style.rules.append(rule) # now add the rule to the style and we're done\n return style\n\ndef getPointStyle():\n # add style\n style = mapnik.Style() # style object to hold rules\n rule = mapnik.Rule() # rule object to hold symbolizers\n marker = mapnik.MarkersSymbolizer()\n # marker.fill_opacity = .5\n # marker.opacity = .5\n marker.height = mapnik.Expression(\"3\")\n marker.width = mapnik.Expression(\"3\")\n marker.fill = mapnik.Color('black')\n rule.symbols.append(marker)\n style.rules.append(rule)\n return style\n\ndef getTextStyle(fieldname):\n style = mapnik.Style() # style object to hold rules\n rule = mapnik.Rule() # rule object to hold symbolizers\n # print(help(mapnik.TextSymbolizer))\n print(mapnik.mapnik_version())\n symbolizer = mapnik.TextSymbolizer(\n mapnik.Expression('['+fieldname+']'),\n 'DejaVu Sans Book',\n 10,\n mapnik.Color('black')\n )\n\n # symbolizer = mapnik.TextSymbolizer()\n\n # print(symbolizer, dir(symbolizer))\n\n # print(symbolizer.properties.format_tree.text)\n\n # symbolizer.face_name = mapnik.FormattingText('DejaVu Sans Book')\n # symbolizer.face_name = 'DejaVu Sans Book'\n # symbolizer.properties.format_tree = mapnik.FormattingText('DejaVu Sans Book')\n # symbolizer.name = mapnik.Expression('['+fieldname+']')\n\n\n symbolizer.halo_fill = mapnik.Color('white')\n symbolizer.halo_radius = 1\n symbolizer.label_placement = label_placement.LINE_PLACEMENT # POINT_PLACEMENT is default\n symbolizer.allow_overlap = False\n symbolizer.avoid_edges = True\n rule.symbols.append(symbolizer)\n style.rules.append(rule)\n return style\n\n\ndef main():\n # print( getFieldInShapefile(shp_file, label_field) )\n\n geom_types = getGeometryTypes(shp_file)\n\n epsg = getProj4FromShapefile(shp_file)\n\n m = mapnik.Map(600,300)\n\n for geom_type in geom_types:\n if 'POLYGON' == geom_type:\n s = getPolygonStyle()\n m.append_style('Polygon Style', s) # Styles are given names only as they are applied to the map\n elif 'LINESTRING' == geom_type:\n s = getLineStyle()\n m.append_style('LineString Style', s) # Styles are given names only as they are applied to the map\n elif 'POINT' == geom_type:\n s = getPointStyle()\n m.append_style('Point Style', s) # Styles are given names only as they are applied to the map\n else:\n raise ValueError('Uncaught geometry type: ' + geom_type)\n\n # if label_field and getFieldInShapefile(shp_file, label_field):\n # s = getTextStyle(label_field)\n # m.append_style('Label Style', s)\n\n\n s = getPointStyle()\n m.append_style('Point Style',s) # Styles are given names only as they are applied to the map\n\n # add datasource\n ds = mapnik.Shapefile(file=shp_file)\n\n # add layer\n layer = mapnik.Layer('Layer')\n # note: layer.srs will default to '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'\n layer.datasource = ds\n layer.styles.append('Polygon Style')\n layer.styles.append('LineString Style')\n layer.styles.append('Point Style')\n\n # add layer to map\n m.layers.append(layer)\n m.zoom_all()\n\n print(mapnik.save_map_to_string(m))\n mapnik.save_map(m, 'style.xml')\n\n mapnik.render_to_file(m,'sample.png', 'png')\n\n\n\nif __name__ == '__main__':\n main()\n\n\n# getEpsg2Mapnik(epsg)\n","sub_path":"mapnik_style_generator/old/gen_stylesheet.py","file_name":"gen_stylesheet.py","file_ext":"py","file_size_in_byte":6075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"133437379","text":"\"\"\"\nMNIST 데이터를 사용한 가중치 초깃값과 신경망 성능 비교\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom ch06.ex02_sgd import Sgd\nfrom ch06.ex03_momentum import Momentum\nfrom ch06.ex05_adam import Adam\nfrom common.multi_layer_net import MultiLayerNet\nfrom dataset.mnist import load_mnist\n\nif __name__ == '__main__':\n # 실험 조건 세팅\n weight_init_types = {\n 'std=0.01': 0.01,\n 'Xavier': 'sigmoid', # 가중치 초깃값: N(0, sqrt(1/n))\n 'He': 'relu' # 가중치 초깃값: N(0, sqrt(2/n))\n }\n\n # 각 실험조건 별로 테스트할 신경망을 생성\n neural_nets = dict()\n train_losses = dict()\n for key, type in weight_init_types.items():\n neural_nets[key] = MultiLayerNet(input_size=784,\n hidden_size_list=[100, 100, 100, 100],\n output_size=10,\n weight_init_std=type,\n activation='sigmoid')\n train_losses[key] = [] # 빈 리스트 생성 - 실험(학습)하면서 손실값을 저장\n\n # MNIST train/test 데이터 로드\n (X_train, Y_train), (X_test, Y_test) = load_mnist(one_hot_label=True)\n\n iterations = 2_000 # 학습 회수\n batch_size = 128 # 1회 학습에 사용할 샘플 개수(미니배치)\n # optimizer = Sgd(learning_rate=0.01) # 파라미터 최적화 알고리즘\n # optimizer를 변경하면서 테스트(나중에)\n optimizer = Adam(lr=0.01)\n # optimizer = Momentum(lr=0.01)\n\n np.random.seed(109)\n # 2,000번 반복하면서\n for _ in range(iterations):\n # 미니배치 샘플 랜덤 추출\n idx = np.random.choice(len(X_train), batch_size)\n X_batch = X_train[idx]\n Y_batch = Y_train[idx]\n # 테스트 신경망 종류마다 반복\n for key, net in neural_nets.items():\n # gradient 계산\n grad = net.gradient(X_batch, Y_batch)\n # 파라미터(W, b) 업데이트\n optimizer.update(net.params, grad)\n # 손실(loss) 계산 -> 리스트에 추가\n train_losses[key].append(net.loss(X_batch, Y_batch))\n # 손실 일부 출력\n if (_ + 1) % 100 == 0:\n print(f'========== iteration {_ + 1} / {iterations} ==========')\n for key in weight_init_types:\n print(key, ':', train_losses[key][-1])\n\n # x축 - 반복 회수, y축 - 손실 그래프\n x = np.arange(iterations)\n for key in weight_init_types:\n plt.plot(x, train_losses[key], label=key)\n plt.xlabel('iteration')\n plt.ylabel('loss')\n plt.legend()\n plt.show()\n","sub_path":"ch06/ex10_weight_compare.py","file_name":"ex10_weight_compare.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"399724552","text":"import logging\nimport re\nfrom datetime import datetime\nfrom typing import Iterator, Type, Union\n\nfrom nintendeals.classes import N3dsGame, SwitchGame\nfrom nintendeals.classes.games import Game\nfrom nintendeals.constants import NA, N3DS\nfrom nintendeals.noa.external import algolia\n\nBASE = \"https://www.nintendo.com\"\n\nlog = logging.getLogger(__name__)\n\n\ndef _list_games(\n game_class: Type,\n **kwargs\n) -> Iterator[Union[N3dsGame, SwitchGame]]:\n\n for data in algolia.search_games(platform=game_class.platform, **kwargs):\n game = game_class(\n region=NA,\n title=data[\"title\"],\n nsuid=data.get(\"nsuid\"),\n )\n\n game.slug = data[\"slug\"]\n game.genres = list(sorted(data.get(\"categories\", [])))\n\n try:\n release_date = data[\"releaseDateMask\"].split(\"T\")[0]\n game.release_date = datetime.strptime(release_date, '%Y-%m-%d')\n except ValueError:\n pass\n\n try:\n game.players = int(re.sub(r\"[^\\d]*\", \"\", data[\"players\"]))\n except ValueError:\n game.players = 0\n\n game.description = data.get(\"description\")\n game.free_to_play = data.get(\"msrp\") == 0.0\n game.publisher = data.get(\"publishers\", [None])[0]\n game.developer = data.get(\"developers\", [None])[0]\n\n box_art = data.get(\"boxArt\")\n game.cover_img = (BASE + box_art) if box_art else None\n\n if game.platform == N3DS:\n game.virtual_console = data.get(\"virtualConsole\", \"na\") != \"na\"\n\n yield game\n\n\ndef list_3ds_games(**kwargs) -> Iterator[Game]:\n \"\"\"\n List all the 3DS games in Nintendo of America. The following subset\n of data will be available for each game.\n\n Game data\n ---------\n * platform: str [\"Nintendo 3DS\"]\n * region: str [\"NA\"]\n * title: str\n * nsuid: str (optional)\n * product_code: str (unsupported)\n\n * slug: str\n\n * description: str\n * developer: str\n * free_to_play: bool\n * genres: List[str]\n * publisher: str\n * release_date: datetime\n * virtual_console: bool\n\n * cover_img: str\n\n Yields\n -------\n nintendeals.classes.N3dsGame:\n 3DS game from Nintendo of America.\n \"\"\"\n log.info(\"Fetching list of Nintendo 3DS games\")\n\n yield from _list_games(N3dsGame, **kwargs)\n\n\ndef list_switch_games(**kwargs) -> Iterator[SwitchGame]:\n \"\"\"\n List all the Switch games in Nintendo of America. The following subset\n of data will be available for each game.\n\n Game data\n ---------\n * platform: str [\"Nintendo Switch\"]\n * region: str [\"NA\"]\n * title: str\n * nsuid: str (optional)\n * product_code: str (unsupported)\n\n * slug: str\n\n * description: str\n * developer: str\n * free_to_play: bool\n * genres: List[str]\n * publisher: str\n * release_date: datetime\n\n * box_art: str\n\n Yields\n -------\n nintendeals.classes.SwitchGame:\n Switch game from Nintendo of America.\n \"\"\"\n log.info(\"Fetching list of Nintendo Switch games\")\n\n yield from _list_games(SwitchGame, **kwargs)\n","sub_path":"nintendeals/noa/listing.py","file_name":"listing.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"278625670","text":"import asyncio\nimport logging\nimport math\nimport os\nfrom collections import namedtuple\nfrom random import choice\nfrom time import time\n\nimport aiofiles\nimport aiohttp\nfrom aiohttp import client_exceptions\n\nlogging.basicConfig(\n format=\"%(asctime)s | %(levelname)-8s | %(message)s\",\n level=logging.INFO\n)\nlogger = logging.getLogger(__name__)\n\nCloudFrontAuth = namedtuple(\"CloudFrontAuth\", \"key_pair_id signature policy\")\nGeoPoint = namedtuple(\"GeoPoint\", \"latitude longitude\")\nscript_abs_dir = os.path.abspath(os.path.dirname(__file__))\ncache_dir = os.path.join(script_abs_dir, 'cache')\n\nauth_data = CloudFrontAuth(os.getenv('KEY_PAIR_ID'),\n os.getenv('SIGNATURE'),\n os.getenv('POLICY'))\n\npoint_1_x, point_1_y = os.getenv('AREA_APEX', '46.90946, 30.19284').split(',')\npoint_2_x, point_2_y = os.getenv('AREA_VERTEX', '46.10655, 31.39070').split(',')\n\nEMPTY_TILE = b\"\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\x00\\x01\\x03\\x00\\x00\\x00f\\xbc:%\" \\\n b\"\\x00\\x00\\x00\\x03PLTE\\x00\\x00\\x00\\xa7z=\\xda\\x00\\x00\\x00\\x01tRNS\\x00@\\xe6\\xd8f\\x00\\x00\\x00\\x1f\" \\\n b\"IDATh\\xde\\xed\\xc1\\x01\\r\\x00\\x00\\x00\\xc2 \\xfb\\xa76\\xc77`\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00q\\x07!\\x00\" \\\n b\"\\x00\\x01\\xa7W)\\xd7\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82\"\n\n\nclass Tile:\n def __init__(self, x: int, y: int, z: int) -> None:\n self.x = x\n self.y = y\n self.z = z\n\n @classmethod\n def create_from_geo_coordinates(cls,\n point: GeoPoint,\n zoom: int):\n x, y = cls.geo_to_tile(point, zoom)\n return cls(x, y, zoom)\n\n @staticmethod\n def geo_to_tile(point: GeoPoint, zoom):\n lat_rad = math.radians(point.latitude)\n n = 2.0 ** zoom\n x = int((point.longitude + 180.0) / 360.0 * n)\n y = int((1.0 - math.asinh(math.tan(lat_rad)) / math.pi) / 2.0 * n)\n return x, y\n\n @classmethod\n def generate_from_area(cls,\n geo_coordinates_1: GeoPoint,\n geo_coordinates_2: GeoPoint,\n zoom_range):\n apex = GeoPoint(max(geo_coordinates_1.latitude, geo_coordinates_2.latitude),\n min(geo_coordinates_1.longitude, geo_coordinates_2.longitude))\n vertex = GeoPoint(min(geo_coordinates_1.latitude, geo_coordinates_2.latitude),\n max(geo_coordinates_1.longitude, geo_coordinates_2.longitude))\n\n for z in zoom_range:\n first_tile_x, first_tile_y = cls.geo_to_tile(apex, z)\n last_tile_x, last_tile_y = cls.geo_to_tile(vertex, z)\n for y in range(first_tile_y, last_tile_y + 1):\n for x in range(first_tile_x, last_tile_x + 1):\n yield Tile(x, y, z)\n\n def __repr__(self) -> str:\n return f\"Tile({self.x}, {self.y}, {self.z})\"\n\n def __eq__(self, other):\n return self.x == other.x and self.y == other.y and self.z == other.z\n\n\ndef filename_for_file(tile: Tile) -> str:\n return f\"{tile.z}/{tile.x}/{tile.y}png.tile\"\n\n\nclass Cache:\n def __init__(self, cache_abs_path: str) -> None:\n self.cache_abs_path = cache_abs_path\n\n def read(self, tile: Tile):\n with open(self.abs_tile_path(tile), \"rb\") as file:\n return file.read()\n\n async def write(self, tile: Tile, content: bytes):\n self.mkdir(tile)\n async with aiofiles.open(self.abs_tile_path(tile), \"wb\") as file:\n await file.write(content)\n\n def tile_already_in_cache(self, tile: Tile) -> bool:\n return os.path.isfile(self.abs_tile_path(tile))\n\n def mkdir(self, tile: Tile):\n path = '/'.join(self.abs_tile_path(tile).split('/')[:-1])\n if not os.path.exists(path):\n os.makedirs(path)\n\n def abs_tile_path(self, tile):\n return os.path.join(self.cache_abs_path, filename_for_file(tile))\n\n\nclass StravaFetcher:\n free_tile_max_zoom = 11\n url_auth = \"https://heatmap-external-{server}.strava.com/tiles-auth/{activity}/{color}/{z}/{x}/{y}.png\"\n url_free = \"https://heatmap-external-{server}.strava.com/tiles/{activity}/{color}/{z}/{x}/{y}.png\"\n semaphore_value = 10\n\n def __init__(self,\n auth: CloudFrontAuth,\n cache: Cache,\n activity='ride',\n color='bluered') -> None:\n self.auth = auth\n self.cache = cache\n self.activity = activity\n self.color = color\n\n def fetch(self, tiles: list[Tile]):\n asyncio.run(self.task_queue(tiles))\n\n def __url(self, tile) -> str:\n if self.__tile_is_free(tile):\n url = self.url_free\n else:\n url = self.url_auth\n server = choice(('a', 'b', 'c',))\n return url.format(server=server,\n activity=self.activity,\n color=self.color,\n x=tile.x, y=tile.y, z=tile.z)\n\n def __url_params(self, tile: Tile):\n params = {\"px\": \"256\",\n \"v\": \"19\"}\n if not self.__tile_is_free(tile):\n params.update({\"Key-Pair-Id\": self.auth.key_pair_id,\n \"Signature\": self.auth.signature,\n \"Policy\": self.auth.policy})\n return params\n\n def __tile_is_free(self, tile: Tile):\n return tile.z <= self.free_tile_max_zoom\n\n async def task_queue(self, tiles: list[Tile]):\n tasks = []\n for tile in tiles:\n tasks.append(asyncio.create_task(self.download_tile(tile)))\n semaphore = asyncio.Semaphore(self.semaphore_value)\n async with semaphore:\n return await asyncio.gather(*tasks)\n\n async def download_tile(self, tile: Tile):\n url = self.__url(tile)\n params = self.__url_params(tile)\n async with aiohttp.ClientSession() as session:\n try:\n async with session.get(url, params=params) as response:\n if response.status == 200:\n content = await response.read()\n await self.cache.write(tile, content)\n elif response.status == 404:\n await self.cache.write(tile, EMPTY_TILE)\n else:\n logger.warning(\n f\"For {tile} received an unexpected status code {response.status}: {await response.text()}\"\n )\n except client_exceptions.ServerDisconnectedError as e:\n raise PermissionError(\"It is necessary to lower the value of the semaphore. %s\" % e) from e\n except client_exceptions.ClientOSError as e:\n raise PermissionError(\"%s\" % e) from e\n\n\nclass CacheWarmer:\n def __init__(self, cache: Cache, strava_fetcher: StravaFetcher) -> None:\n self.cache = cache\n self.strava_fetcher = strava_fetcher\n\n def warm_up(self,\n geo_coordinates_1: GeoPoint,\n geo_coordinates_2: GeoPoint,\n zoom_range,\n max_tiles=None):\n tiles = []\n for tile in Tile.generate_from_area(geo_coordinates_1, geo_coordinates_2, zoom_range):\n if not self.cache.tile_already_in_cache(tile):\n tiles.append(tile)\n if max_tiles and len(tiles) >= max_tiles:\n break\n if not tiles:\n logger.info(\"There are no tiles to load\")\n else:\n self.strava_fetcher.fetch(tiles)\n\n\nif __name__ == '__main__':\n cache = Cache(cache_dir)\n strava_fetcher = StravaFetcher(auth_data, cache)\n warmer = CacheWarmer(cache, strava_fetcher)\n\n start_time = time()\n logger.info(\"Start building cache.\")\n try:\n warmer.warm_up(GeoPoint(float(point_1_x.strip()), float(point_1_y.strip())),\n GeoPoint(float(point_2_x.strip()), float(point_2_y.strip())),\n range(7, 17),\n max_tiles=8000)\n except PermissionError as e:\n logger.error(e)\n logger.info(f\"Spent in {round((time() - start_time), 2)} seconds.\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"173528429","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom pandas import ExcelWriter\nimport pandas as pd\nimport datetime\n\n\ndef get_page(url):\n r = requests.get(url)\n page = BeautifulSoup(r.text, 'html.parser')\n data = page.find('ul', class_=\"map__points\").find_all(\"li\")\n return data\n\ndef get_good_data(all_store):\n all_data = []\n for li in all_store:\n\n coords = li['data-point'].split(',')\n x, y = coords[1], coords[0]\n\n address = li.find(\"div\", class_=\"map-box__contact--address\")\n if address !=None:\n address = address.text.strip()\n\n region = li.find(\"span\", class_=\"city_name_in_card\")\n if region != None:\n region = region.text.strip()\n\n phone = li.find(\"div\", class_=\"map-box__contact map-box__contact--phone\")\n if phone !=None:\n phone = phone.text.strip()\n\n store_dict = {\n 'address': address,\n 'phone': phone,\n 'x': x,\n 'y': y,\n 'region': region,\n 'brand_name': 'У Палыча',\n 'holding_name': 'У Палыча',\n 'website': 'https://palich.ru',\n 'date_review': datetime.datetime.now(),\n }\n all_data.append(store_dict)\n\n return all_data\n\ndef write_xlsx(df, name_file):\n writer = ExcelWriter(f'{name_file}.xlsx')\n df.to_excel(writer, 'Sheet1')\n writer.save()\n return 'ФАЙЛ СОХРАНЕН'\n\ndef palich_pd_data():\n '''\n 1. Отпраляем запрос по url https://palich.ru/shops/\n 2. C помошью bs4 находим фрагм��нт с данными по магазинам\n data = page.find('ul', class_=\"map__points\").find_all(\"li\")\n 3. В функции get_good_data() рядом проверок на присутствие данных в конкретной строке формируем store_dict\n 4. Преобразуме в store_dict записываем в all_data\n 5. Из all_data формируем DF\n :return:\n '''\n url = 'https://palich.ru/shops/'\n data = get_page(url)\n data = get_good_data(data)\n df = pd.DataFrame(data)\n #write_xlsx(df, 'palich')\n return df\n\n\n","sub_path":"crawlers/confectionery/palich.py","file_name":"palich.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"537993211","text":"#!/usr/bin/python3\n\"\"\" use fabric to backup directory \"\"\"\nfrom fabric.operations import local, run\nfrom datetime import datetime\nimport os\nfrom fabric.context_managers import lcd\n\n\ndef do_pack():\n \"\"\" Generate tgz \"\"\"\n time = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n name = \"web_static_{}.tgz\".format(time)\n local(\"mkdir -p versions\")\n with lcd(\"./versions\"):\n local_name = local(\"pwd\", capture=True)\n packin = local(\"tar -czvf {} ../web_static\".format(name))\n for r, d, f in os.walk(local_name):\n for files in f:\n if files == name:\n path = os.path.join(r, files)\n if packin.succeeded:\n return path\n else:\n return None\n\n\nif __name__ == \"__main__\":\n do_pack()\n","sub_path":"1-pack_web_static.py","file_name":"1-pack_web_static.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"203997236","text":"from typing import Text, List, Any, Dict\n\nfrom rasa_sdk import Tracker, FormValidationAction\nfrom rasa_sdk.executor import CollectingDispatcher\nfrom rasa_sdk.types import DomainDict\n\n\nclass ValidateInquiryForm(FormValidationAction):\n def name(self) -> Text:\n return \"validate_inquiry_form\"\n\n @staticmethod\n def account_number_db() -> List[Text]:\n return [\"012345678912\", \"012345678913\", \"012345678914\"]\n\n def validate_account_number(\n self,\n slot_value: Any,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: DomainDict,\n ) -> Dict[Text, Any]:\n \"\"\"Validate cuisine value.\"\"\"\n if slot_value.lower() in self.account_number_db():\n # validation succeeded, set the value of the \"cuisine\" slot to value\n return {\"account_number\": slot_value}\n else:\n # validation failed, set this slot to None so that the user will be asked for the slot again\n return {\"account_number\": None}\n","sub_path":"validation_actions.py","file_name":"validation_actions.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"59526276","text":"import json\nimport pdb\nimport requests\nimport sys\nfrom json.decoder import JSONDecodeError\n\nimport scrapy\nfrom scrapy.http import Request\nfrom scrapy.spidermiddlewares.httperror import HttpError\n\nfrom twisted.internet.error import (\n DNSLookupError,\n ConnectionRefusedError,\n TimeoutError,\n TCPTimedOutError,\n ConnectError,\n)\n\nfrom haipproxy.crawler.items import ProxyStatInc\nfrom haipproxy.utils import get_redis_conn\n\n\nclass BaseValidator(scrapy.Spider):\n # per test, both http and https proxies respond https request equally,\n # while https proxies don't response http request\n # It's common that a proxy succeed on other sites but not on httpbin\n custom_settings = {\n \"CONCURRENT_REQUESTS\": 100,\n \"CONCURRENT_REQUESTS_PER_DOMAIN\": 100,\n \"RETRY_ENABLED\": False,\n \"RETRY_TIMES\": 0,\n \"ITEM_PIPELINES\": {\"haipproxy.crawler.pipelines.ProxyStatPipeline\": 200},\n \"DOWNLOADER_MIDDLEWARES\": {\n \"scrapy.downloadermiddlewares.useragent.UserAgentMiddleware\": None,\n \"scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware\": None,\n \"haipproxy.crawler.middlewares.RandomUserAgentMiddleware\": 400,\n },\n }\n success_key = \"\"\n good_count = 0\n redis_conn = get_redis_conn()\n\n def start_requests(self):\n for proxy in self.redis_conn.scan_iter(match=\"*://*\"):\n if self.redis_conn.hget(proxy, \"used_count\") > b\"1\":\n continue\n proxy = proxy.decode()\n req = Request(\n self.get_url(proxy),\n dont_filter=True,\n meta={\"proxy\": proxy},\n callback=self.parse,\n errback=self.parse_error,\n )\n yield req\n\n def parse(self, response):\n proxy = response.meta.get(\"proxy\")\n seconds = int(response.meta.get(\"download_latency\"))\n success = 1\n fail = \"\"\n if not self.is_ok(response):\n success = 0\n fail = \"badcontent\"\n self.logger.error(f\"{proxy} got bad content\")\n else:\n self.good_count += 1\n self.logger.info(f\"good ip {self.good_count} {proxy} ####\")\n yield ProxyStatInc(proxy=proxy, success=success, seconds=seconds, fail=fail)\n\n def parse_error(self, failure):\n request = failure.request\n proxy = request.meta.get(\"proxy\")\n self.logger.warning(f\"proxy {proxy} has failed with:\\n{repr(failure)}\")\n fail = \"unknown\"\n if failure.check(HttpError):\n # these exceptions come from HttpError spider middleware\n # you can get the non-200 response\n fail = \"HttpError \" + failure.response.status\n elif failure.check(DNSLookupError):\n fail = \"DNSLookupError\"\n # this is the original request\n elif failure.check(TimeoutError):\n fail = \"TimeoutError\"\n elif failure.check(TCPTimedOutError):\n fail = \"TCPTimedOutError\"\n elif failure.check(ConnectionRefusedError):\n fail = \"ConnectionRefusedError\"\n elif failure.check(ConnectError):\n # port exhaustion: no more ports for connection\n # netsh int ipv4 set dynamicport tcp start=10000 num=55535\n pdb.set_trace()\n fail = \"ConnectError\"\n yield ProxyStatInc(proxy=proxy, success=0, seconds=0, fail=fail)\n\n def is_ok(self, response):\n # TODO: check len(response.text)\n return self.success_key in response.text\n\n def get_url(self, proxy=\"\"):\n raise NotImplementedError\n\n\nclass HttpbinValidator(BaseValidator):\n name = \"vhttpbin\"\n\n def __init__(self):\n super().__init__()\n self.origin_ip = requests.get(\"http://httpbin.org/ip\").json().get(\"origin\")\n\n def get_url(self, proxy=\"\"):\n if proxy.startswith(\"https\"):\n return \"https://httpbin.org/ip\"\n elif proxy.startswith(\"http\"):\n return \"http://httpbin.org/ip\"\n else:\n self.logger.warning(f\"Unknown proxy: {proxy}\")\n return \"http://httpbin.org\"\n\n def is_ok(self, response):\n # example: 'http://198.211.121.46:80'\n try:\n ip = json.loads(response.text).get(\"origin\")\n except Exception as e:\n self.logger.error(f\"Unexpected error:{e}\")\n return False\n if self.origin_ip in ip:\n self.logger.error(f\"{proxy} is transparent\")\n return False\n return True\n\n\nclass CctvValidator(BaseValidator):\n name = \"vcctv\"\n\n def __init__(self):\n self.success_key = \"中央电视台\"\n\n def get_url(self, proxy=\"\"):\n return \"http://www.cctv.com/\"\n\n\nclass UqerValidator(BaseValidator):\n name = \"vuqer\"\n\n def __init__(self):\n self.success_key = \"优矿\"\n\n def get_url(self, proxy=\"\"):\n return \"https://uqer.io/\"\n","sub_path":"haipproxy/crawler/spiders/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"129409930","text":"from wagtail.core import blocks\nfrom wagtail.documents.blocks import DocumentChooserBlock\nfrom wagtail.images.blocks import ImageChooserBlock\n\n\nclass AnnouncementBannerBlock(blocks.StructBlock):\n title = blocks.CharBlock()\n info = blocks.TextBlock()\n page = blocks.PageChooserBlock(required=False)\n url = blocks.URLBlock(required=False)\n start_date = blocks.DateBlock()\n end_date = blocks.DateBlock()\n\n class Meta:\n icon = 'media'\n template = 'dalme_public/blocks/_announcement_banner.html'\n\n\nclass BibliographyBlock(blocks.StructBlock):\n collection = blocks.ChoiceBlock(\n choices=[\n ('A4QHN348', 'Editions'),\n ('BKW2PVCM', 'Glossaries and dictionaries'),\n ('QM9AZNT3', 'Methodology'),\n ('SLIT6LID', 'Studies'),\n ('FRLVXUWL', 'Other resources')\n ],\n )\n\n class Meta:\n icon = 'list-ul'\n template = 'dalme_public/blocks/_bibliography.html'\n\n\nclass CarouselBlock(blocks.ListBlock):\n class Meta:\n icon = 'cogs'\n template = 'dalme_public/blocks/_carousel.html'\n\n\nclass ChartEmbedBlock(blocks.StructBlock):\n html = blocks.RawHTMLBlock()\n alignment = blocks.ChoiceBlock(\n choices=[\n ('left', 'Left-aligned'),\n ('right', 'Right-aligned'),\n ('full', 'Full-width')\n ],\n )\n\n class Meta:\n icon = 'image'\n template = 'dalme_public/blocks/_chart_embed.html'\n\n\nclass DocumentBlock(blocks.StructBlock):\n type = blocks.ChoiceBlock(\n choices=[\n ('document', 'Document'),\n ('publication', 'Publication'),\n ('talk', 'Talk')\n ],\n )\n title = blocks.CharBlock()\n abstract = blocks.CharBlock(required=False)\n author = blocks.CharBlock()\n detail = blocks.CharBlock(required=False)\n version = blocks.FloatBlock(required=False)\n document = DocumentChooserBlock(required=False)\n url = blocks.URLBlock(required=False)\n page = blocks.PageChooserBlock(required=False)\n date = blocks.DateBlock()\n\n class Meta:\n icon = 'doc-full'\n template = 'dalme_public/blocks/_document.html'\n\n\nclass FooterPageChooserBlock(blocks.StructBlock):\n title = blocks.CharBlock()\n page = blocks.PageChooserBlock()\n\n class Meta:\n icon = 'doc-full'\n template = 'dalme_public/blocks/_footer_page.html'\n\n\nclass ExternalResourceBlock(blocks.StructBlock):\n title = blocks.CharBlock()\n info = blocks.CharBlock()\n url = blocks.URLBlock()\n date = blocks.DateBlock()\n\n class Meta:\n icon = 'link'\n template = 'dalme_public/blocks/_external_resource.html'\n\n\nclass MainImageBlock(ImageChooserBlock):\n class Meta:\n icon = 'image'\n template = 'dalme_public/blocks/_main_image.html'\n\n\nclass InlineImageBlock(blocks.StructBlock):\n image = ImageChooserBlock()\n caption = blocks.RichTextBlock(required=False)\n alignment = blocks.ChoiceBlock(\n choices=[\n ('left', 'Left-aligned'),\n ('right', 'Right-aligned')\n ],\n )\n show_caption = blocks.BooleanBlock(required=False, default=True)\n\n class Meta:\n icon = 'image'\n template = 'dalme_public/blocks/_inline_image.html'\n\n\nclass PersonBlock(blocks.StructBlock):\n name = blocks.CharBlock()\n job = blocks.CharBlock(required=False)\n institution = blocks.CharBlock(required=False)\n url = blocks.URLBlock(required=False)\n photo = ImageChooserBlock(required=False)\n\n class Meta:\n icon = 'user'\n template = 'dalme_public/blocks/_person.html'\n\n\nclass SocialBlock(blocks.StructBlock):\n fa_icon = blocks.CharBlock()\n url = blocks.URLBlock(required=False)\n css_class = blocks.CharBlock(required=False)\n\n class Meta:\n icon = 'group'\n template = 'dalme_public/blocks/_social.html'\n\n\nclass SponsorBlock(blocks.StructBlock):\n logo = ImageChooserBlock()\n url = blocks.URLBlock()\n\n class Meta:\n icon = 'user'\n template = 'dalme_public/blocks/_sponsor.html'\n\n\nclass SubsectionBlock(blocks.StructBlock):\n subsection = blocks.CharBlock()\n collapsed = blocks.BooleanBlock(required=False, default=True)\n\n class Meta:\n icon = 'collapse-up'\n template = 'dalme_public/blocks/_subsection.html'\n","sub_path":"dalme_public/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"232046954","text":"import pandas as pd\nimport numpy as np\nimport datetime\nfrom pandas.api.types import is_datetime64_any_dtype as is_datetime\nimport json\n\nfrom src.helpers.utilities import pandas_replace\n\n\nclass DataLoader:\n \"\"\"Class to load all required files\n \"\"\"\n\n def __init__(self, config_dict, filepath_dict, logger):\n self.config_dict = config_dict\n self.filepath_dict = filepath_dict\n self.input_dict = {}\n self.logger = logger\n\n def load_file(self, filename, sheet_name=None, lowercase=None):\n \"\"\"\n Parameters\n ----------\n filename: str, Path of the file present\n sheet_name: if reading excel file\n\n Returns\n ----------\n df: Pandas data frame\n \"\"\"\n try:\n if filename.split(\".\")[-1] == 'csv':\n try:\n df = pd.read_csv(filename)\n except:\n df = pd.read_csv(filename)\n elif filename.split(\".\")[-1] == 'xlsx':\n if sheet_name:\n df = pd.read_excel(filename, sheet_name=sheet_name)\n else:\n df = pd.read_excel(filename)\n elif filename.split(\".\")[-1] == 'json':\n df = json.load(open(filename))\n\n if lowercase:\n # Convert the columns to lower\n if isinstance(df, pd.DataFrame):\n df.columns = df.columns.str.lower().tolist()\n self.logger.info(f'SUCCESS: Reading file {filename}')\n except Exception as e:\n self.logger.info(f'Exception: Reading file {filename}')\n self.logger.info(f'Exception message: {str(e)}')\n\n return df\n\n def import_files(self, lowercase=None):\n \"\"\"\n Loads all files in filepath dict\n :return: Output dictionary with all files loaded\n \"\"\"\n for file in self.filepath_dict:\n if len(self.filepath_dict[file]) == 1:\n self.input_dict[file] = self.load_file(self.filepath_dict[file]['file'], lowercase=lowercase)\n else:\n self.input_dict[file] = self.load_file(self.filepath_dict[file]['file']\n , self.filepath_dict[file]['sheet']\n , lowercase=lowercase)\n\n # return self.input_dict\n\n def align_taxonomy(self, lower=1, verbose=1):\n \"\"\"\n Align taxonomy across all tables loaded. Use the taxonomy dict to replace across all columns\n :param verbose: 1, if detailed logs 0 otherwise\n :return:\n \"\"\"\n if lower == 1:\n self.logger.info('Convert columns to lower case')\n self.input_dict['taxonomy'].columns = self.input_dict['taxonomy'].columns.str.lower().tolist()\n\n self.logger.info('Create dictionary')\n self.input_dict['taxonomy'].dropna(subset=['from'], inplace=True)\n self.input_dict['taxonomy'].dropna(subset=['to'], inplace=True)\n self.taxonomy_dict = {s: a for s, a in zip(self.input_dict['taxonomy']['from'], self.input_dict['taxonomy']['to'])}\n for key in self.input_dict.keys():\n if (isinstance(self.input_dict[key], pd.DataFrame)):\n if key != 'taxonomy':\n self.input_dict[key] = pandas_replace(self.input_dict[key], self.taxonomy_dict, verbose=verbose)\n\n def filter_df(self, df, filters):\n filter_string = \"\"\n keys = list(filters.keys())\n values = list(filters.values())\n for i in range(len(filters)):\n if not str(values[i][1]).isdigit():\n values[i][1] = \"'\"+str(values[i][1])+\"'\"\n if i > 0:\n filter_string += ('&('+keys[i]+str(values[i][0])+str(values[i][1])+')')\n else:\n filter_string += ('('+keys[i]+str(values[i][0])+str(values[i][1])+')')\n\n return df[df.eval(filter_string)]\n\n def get_relevant_curves(self, _coeffA_col, _coeffB_col, _coeffC_col, form_col, _rating_col):\n \"\"\"\n Keep only the curves with confidence rating of 4 and with values that make sense\n :param _coeffA_col: str, column name of the Coefficient A\n :param _coeffB_col: str, column name of the Coefficient B\n :param _coeffC_col: str, column name of the Coefficient C\n :param form_col: str, column name of the Functional form\n :param _rating_col: str, column name of the confidence rating\n :return:\n \"\"\"\n self.logger.info('----- Keeping only the 4 rating curves')\n curves_4 = self.input_dict['curves'][(self.input_dict['curves'][_rating_col] == 4)]\n curves_4 = curves_4[(curves_4[_coeffA_col] > 0) | (curves_4[_coeffB_col] > 0) | (curves_4[_coeffC_col] > 0)]\n curves_4 = curves_4[~((curves_4[_coeffA_col] == 0) & (curves_4[form_col] == 'POWER'))]\n curves_4.reset_index(inplace=True, drop=True)\n return curves_4\n\n def keep_relevant_data(self, filter_df):\n \"\"\"\n Keep only the data that is needed based on BCVs present in curves\n :param filter_df: pandas, df with BCVs in curves\n :return:\n \"\"\"\n self.input_dict['spend'] = self.input_dict['spend'].merge(filter_df, how='inner')\n self.input_dict['media'] = self.input_dict['media'].merge(filter_df, how='inner')\n\n def convert_date(self, df, source, destination, dateformat=\"%m/%d/%Y\", keep=0, datetime=0):\n \"\"\"\n Convert date format of a series in pandas dataframe\n :param df: pandas, dataframe where the date format is to be converted\n :param source: str, column name of the source date field\n :param destination: str, column name of the destination date field\n :param dateformat: str, format of the date\n :param keep: 1, if we need to keep both source and destination columns\n :param datetime: 1, if destination column is to be converted to datetime\n :return:\n \"\"\"\n if ~ is_datetime(df[source]):\n df[source] = pd.to_datetime(df[source])\n df[destination] = df[source].dt.strftime(dateformat)\n if datetime==1:\n df[destination] = pd.to_datetime(df[destination])\n # df[destination] = pd.to_datetime(df[source], format=dateformat)\n\n if (keep == 0) & (source != destination):\n del df[source]\n return df\n\n def get_calendar(self, _week_col, _plan_period_col, _startdate_col, _enddate_col, _planperiodname_col):\n \"\"\"\n Creates a master calendar file with week starts based on the master calendar info with FY week starts and ends\n :param _week_col: str, column name of the week field\n :param _plan_period_col: str, column name of the FY plan period in output calendar\n :param _startdate_col: str, column name of first week in FY\n :param _enddate_col: str, column name of last week in FY\n :param _planperiodname_col: str, column name of the FY plan period in input master calendar\n :return: pandas, Calendar with every week start between start and end weeks of every FY in master calendar\n \"\"\"\n self.logger.info('------ Converting date formats for start and end dates')\n self.input_dict['calendar_master'] = self.convert_date(self.input_dict['calendar_master']\n , _startdate_col, _startdate_col, datetime=1)\n self.input_dict['calendar_master'] = self.convert_date(self.input_dict['calendar_master']\n , _enddate_col, _enddate_col, datetime=1)\n self.input_dict['calendar_master'].reset_index(inplace=True, drop=True)\n\n self.logger.info('------ Looping through different FYs')\n try:\n for i in range(len(self.input_dict['calendar_master'])):\n # i=0\n calendar_sub = pd.DataFrame()\n sdate = self.input_dict['calendar_master'][_startdate_col][i]\n edate = self.input_dict['calendar_master'][_enddate_col][i]\n calendar_sub[_week_col] = pd.date_range(sdate, edate + datetime.timedelta(days=1), freq='7D')\n calendar_sub[_plan_period_col] = self.input_dict['calendar_master'][_planperiodname_col][i]\n calendar_sub = self.convert_date(calendar_sub, _week_col, _week_col)\n if i == 0:\n calendar = calendar_sub.copy()\n else:\n calendar = calendar.append(calendar_sub.copy())\n self.logger.info(f'SUCCESS: Creating calendar')\n except Exception as e:\n self.logger.info(f'Exception: Creating calendar')\n self.logger.info(f'Exception message: {str(e)}')\n self.input_dict['Calendar'] = calendar\n\n def filter_currency(self, _source_currency_col, _target_currency_symbol_col\n , _conversion_factor_col, target_currency='GBP'):\n \"\"\"\n Filters out the master currency conversion table to only the relevant source target currency\n :param _source_currency_col: str, column name\n :param _target_currency_symbol_col: str, column name\n :param _conversion_factor_col: str, column name\n :param target_currency: str, currency code\n :return: Filtered currency conversion table\n \"\"\"\n try:\n self.input_dict['currency_conversion_sub'] = self.input_dict['currency_conversion'].loc[\n self.input_dict['currency_conversion'][_target_currency_symbol_col] == target_currency\n , [_source_currency_col, _target_currency_symbol_col, _conversion_factor_col]].drop_duplicates()\n self.logger.info(f'SUCCESS: Filtering only for Target: {target_currency} currency conversion')\n except Exception as e:\n self.logger.info(f'Exception message: {str(e)}')\n self.logger.info(f'ERROR: Filtering only for Target: {target_currency} currency conversion')\n\n\nclass CalculationEngine:\n \"\"\"Class to load all required files and calculations\n \"\"\"\n\n def __init__(self, input_dict, logger):\n self.input_dict = input_dict\n self.logger = logger\n\n\n def convert_date(self, df, source, destination, dateformat=\"%m/%d/%Y\", keep=0, datetime=0):\n \"\"\"\n Convert date format of a pandas series\n :param df: pandas, dataframe with series to be converted\n :param source: str, column name of the source series\n :param destination: str, column name of the destination series\n :param dateformat: str, format of the destination date\n :param keep: int, 1 to keep the source column, 0 otherwise\n :param datetime: int, 1 to convert output series to datetime format\n :return: pandas, dataframe with the series converted\n \"\"\"\n if ~ is_datetime(df[source]):\n df[source] = pd.to_datetime(df[source])\n df[destination] = df[source].dt.strftime(dateformat)\n if datetime == 1:\n df[destination] = pd.to_datetime(df[destination])\n # df[destination] = pd.to_datetime(df[source], format=dateformat)\n\n if (keep == 0) & (source != destination):\n del df[source]\n return df\n\n def calculate_stimuli(self, spend_cols, media_cols, _stimuli_col, _spend_col, _cost_col):\n \"\"\"\n Merge spend and media info and calculate stimuli\n :param spend_cols: list, columns names of spend df to be joined on\n :param media_cols: list, columns names of media df to be joined on\n :param _stimuli_col: str, column name of the stimuli\n :param _spend_col: str, column name of spend\n :param _cost_col: str, column name of media cost\n :return: pandas, joined spend media df\n \"\"\"\n try:\n spend_media = pd.merge(self.input_dict['spend'], self.input_dict['media'][media_cols]\n , on=spend_cols, how='inner')\n spend_media[_stimuli_col] = spend_media[_spend_col] / spend_media[_cost_col]\n except:\n raise ValueError('CalcEngine.calculate_stimuli: Issue with merging spend, media')\n self.spend_media = spend_media\n\n def get_region_col(self, df,_region_col, _geo_col):\n \"\"\"\n Merge with geo master to get the region data\n :param df: pandas, dataframe to which region column needs to be added\n :param _region_col: str, column name of the region\n :param _geo_col: str, column name of the country\n :return: pandas, dataframe with region column added\n \"\"\"\n df = df.merge(self.input_dict['geo_master'][[_region_col, _geo_col]].drop_duplicates(), how='left')\n return df\n\n # def get_geo_col(self, _geo_name_col, _geo_col):\n # # Get ISO code\n # self.input_dict['finance'] = self.input_dict['finance'].merge(self.input_dict['geo_master'][[_geo_name_col, _geo_col]].drop_duplicates()\n # , how='left')\n def get_volume_info(self, df, volume_info, _geo_col, _brand_col):\n self.logger.info('----- Get volume info for curves and bus inputs')\n # df['Country-Brand'] = df[_geo_col] + \" - \" + df[_brand_col]\n df = pd.merge(df, volume_info, how='left')\n # del df['Country-Brand']\n return df\n\n def get_spend_mean(self, _spend_col, _geo_col, _brand_col, _instrument_col, _week_col\n ,_plan_period_col, plan_period):\n \"\"\"\n Calculate spend mean, min and max on the spend media df\n :param _spend_col: str, column name of the spend\n :param _geo_col: str, column name of the country\n :param _brand_col: str, column name of the brand\n :param _instrument_col: str, column name of the instrument/vehicle\n :param _week_col: str, column name of the week\n :param _plan_period_col: str, column name of the plan period\n :param plan_period: str, plan period to be filtered on e.g., FY-2020\n :return:\n assign filtered spend media data on plan period\n assign spend mean calculated to spend_media_sub_min_max\n \"\"\"\n spend_media_sub = self.spend_media\n spend_media_sub = pd.merge(spend_media_sub, self.input_dict['Calendar'], on=_week_col, how='left')\n spend_media_sub = spend_media_sub[spend_media_sub[_plan_period_col] == plan_period]\n spend_media_sub_min_max = spend_media_sub.groupby([_geo_col, _brand_col, _instrument_col]).agg(\n {_spend_col: ['min', 'max', 'mean']})\n spend_media_sub_min_max.columns = [\"_\".join(x) for x in spend_media_sub_min_max.columns.ravel()]\n spend_media_sub_min_max.reset_index(inplace=True)\n self.spend_media_sub_min_max = spend_media_sub_min_max\n self.spend_media_sub = spend_media_sub\n\n def get_market_currency(self, to_symbol, _local_currency_col, _source_currency_col, _target_currency_symbol_col):\n market_currency_rate = self.input_dict['market_currency'].merge(self.input_dict['currency_conversion']\n , left_on=_local_currency_col\n , right_on=_source_currency_col\n , how='left')\n self.market_currency_rate = market_currency_rate[market_currency_rate[_target_currency_symbol_col]==to_symbol]\n\n def get_country_brand_volume(self, _geo_col, _country_col, _geo_name_col, _brand_col\n , _plan_period_col, plan_period, _volume_col):\n # Get FY year\n self.input_dict['finance'][_plan_period_col] = 'FY-' + self.input_dict['finance']['year'].astype('int').astype('str')\n\n # Group volume for Country-Brand\n finance_grouped = pd.DataFrame(self.input_dict['finance'].groupby([_country_col, _brand_col, _plan_period_col])[_volume_col].sum())\n finance_grouped.reset_index(inplace=True)\n\n # Filter for the required period\n finance_grouped_20 = finance_grouped[finance_grouped[_plan_period_col] == plan_period]\n # Set columns\n finance_grouped_20.columns = [_geo_name_col, _brand_col, _plan_period_col, _volume_col]\n\n # Get ISO code\n finance_grouped_20 = finance_grouped_20.merge(self.input_dict['geo_master'][[_geo_name_col, _geo_col]].drop_duplicates()\n , how='left')\n\n # Create country-brand key\n finance_grouped_20['Country-Brand'] = finance_grouped_20[_geo_col] + \" - \" + finance_grouped_20[_brand_col]\n self.finance_grouped_20 = finance_grouped_20\n\n\n def get_aggregated_volume(self, _region_col, _geo_name_col, _brand_col\n , _plan_period_col, plan_period, _volume_col, filter_df=pd.DataFrame()):\n finance = self.input_dict['finance'].copy()\n\n # Filter for only the brands/countries needed\n if not filter_df.empty:\n finance = finance.merge(filter_df, how='inner')\n\n # Aggregate the volume\n finance_grouped_region = pd.DataFrame(\n finance.groupby([_region_col, _brand_col, _plan_period_col])[_volume_col].sum())\n finance_grouped_region.reset_index(inplace=True)\n\n # Filter for the required time period\n finance_grouped_region_20 = finance_grouped_region[finance_grouped_region[_plan_period_col] == plan_period]\n finance_grouped_region_20.columns = [_geo_name_col, _brand_col, _plan_period_col, _volume_col]\n\n # Create country-brand key\n finance_grouped_region_20['Country-Brand'] = finance_grouped_region_20[_geo_name_col] + \" - \" + \\\n finance_grouped_region_20[_brand_col]\n\n ## Append to all finance\n finance_grouped_sub = self.finance_grouped_20.append(finance_grouped_region_20)\n return finance_grouped_sub\n\n def get_cost_per_stimuli(self, _geo_col, _geo_name_col, _brand_col, _instrument_col\n , _plan_period_col, plan_period, _spend_col, _stimuli_col, _cost_per_stimuli_col\n , filter_df=pd.DataFrame()):\n\n spend_media_sub = self.spend_media_sub.copy()\n\n # Filter for only the brands/countries needed\n if not filter_df.empty:\n spend_media_sub = spend_media_sub.merge(filter_df, how='inner')\n\n self.logger.info('----- Group spends and stimuli by Country-Vehicle')\n spend_media_sub_grouped = spend_media_sub[spend_media_sub[_plan_period_col] ==\n plan_period].groupby([_instrument_col, _geo_col]) \\\n .agg({_spend_col: np.sum, _stimuli_col: np.sum})\n spend_media_sub_grouped.reset_index(inplace=True)\n\n self.logger.info('----- Calculate cost per stimuli')\n spend_media_sub_grouped[_cost_per_stimuli_col] = spend_media_sub_grouped[_spend_col] / spend_media_sub_grouped[\n _stimuli_col]\n\n self.logger.info('----- Get combination')\n spend_media_sub_grouped['Combination'] = spend_media_sub_grouped[_geo_col] + \" - \" + spend_media_sub_grouped[\n _instrument_col]\n # self.spend_media_sub_grouped = spend_media_sub_grouped\n return spend_media_sub_grouped\n\n def get_aggregated_media(self, _region_col, _geo_col, _instrument_col\n , _spend_col, _stimuli_col, _cost_per_stimuli_col\n , filter_df=pd.DataFrame()):\n\n spend_media_sub_grouped = self.spend_media_sub_grouped.merge(\n self.input_dict['geo_master'][[_region_col, _geo_col]].drop_duplicates(),\n how='left')\n\n # Filter for only the brands/countries needed\n if not filter_df.empty:\n spend_media_sub_grouped = spend_media_sub_grouped.merge(filter_df, how='inner')\n\n self.logger.info('----- Group spends and stimuli by Region-Vehicle')\n spend_media_sub_grouped_region = spend_media_sub_grouped.groupby([_region_col, _instrument_col]).agg(\n {_spend_col: np.sum, _stimuli_col: np.sum})\n spend_media_sub_grouped_region.reset_index(inplace=True)\n\n self.logger.info('----- Calculate cost per stimuli')\n spend_media_sub_grouped_region[_cost_per_stimuli_col] = spend_media_sub_grouped_region[_spend_col] / \\\n spend_media_sub_grouped_region[_stimuli_col]\n spend_media_sub_grouped_region['Combination'] = spend_media_sub_grouped_region[_region_col] + \" - \" + \\\n spend_media_sub_grouped_region[_instrument_col]\n\n return spend_media_sub_grouped_region\n\n def calc_cost_cartesian(self, grouped_spend_df, _cost_per_stimuli_col):\n self.logger.info('----- Append to all cost per stimuli')\n spend_media_sub_grouped = self.spend_media_sub_grouped.append(grouped_spend_df)\n spend_media_sub_grouped.reset_index(inplace=True)\n\n self.logger.info('----- Cost per stimuli ratio - cartesian')\n cost_combinations = spend_media_sub_grouped.loc[:,['Combination', _cost_per_stimuli_col]]\n cost_combinations['key'] = 1\n cost_combinations_copy = cost_combinations.copy()\n cost_combinations_copy.columns = ['Combination1', 'CostPerStimuli1', 'key']\n\n cost_cartesian = cost_combinations.merge(cost_combinations_copy, how='left')\n cost_cartesian['Differential_Cost'] = cost_cartesian[_cost_per_stimuli_col] / cost_cartesian['CostPerStimuli1']\n cost_cartesian['key_for_cost'] = cost_cartesian['Combination'] + \" to \" + cost_cartesian['Combination1']\n\n return cost_cartesian\n\n def filter_curves_for_aggregation(self, curves, aggregation, exclusions, inclusions\n , _instrument_col, _region_col, _spend_mean_col\n , _coeffA_col, _coeffB_col, _coeffC_col, adbudg_coeffs=1):\n region_curves = curves.copy()\n region_selected = aggregation.split(' - ')[0]\n instrument_selected = ' - '.join(aggregation.split(' - ')[1:])\n if region_selected != 'Global':\n self.logger.info(f'----- Fetching curves for the region {region_selected} and instrument {instrument_selected}')\n region_curves = region_curves.loc[(region_curves[_instrument_col] == instrument_selected) &\n (region_curves[_region_col] == region_selected), :]\n region_curves.reset_index(inplace=True, drop=True)\n else:\n self.logger.info(f'----- Global is selected. Fetching all curves with instrument {instrument_selected}.')\n region_curves = region_curves.loc[(region_curves[_instrument_col] == instrument_selected), :]\n region_curves.reset_index(inplace=True, drop=True)\n\n if adbudg_coeffs:\n del region_curves[_coeffA_col], region_curves[_coeffB_col], region_curves[_coeffC_col]\n\n # rename columns\n region_curves.rename(columns={\"coefficienta_ad\": \"coefficienta\", \"coefficientb_ad\": \"coefficientb\",\n \"coefficientc_ad\": \"coefficientc\"\n , \"investment_mean\": _spend_mean_col}, inplace=True)\n\n # Exclusions\n if exclusions:\n if not pd.isnull(exclusions):\n self.logger.info('----- Excluding certain curves in aggregation')\n filter_field = exclusions.split(':')[0]\n filter_value = eval(exclusions.split(':')[1])\n filter_value = filter_value if isinstance(filter_value, list) else [filter_value]\n region_curves = region_curves[region_curves.eval(filter_field + ' not in ' + str(filter_value))]\n region_curves.reset_index(inplace=True, drop=True)\n\n # Inclusions\n if inclusions:\n if not pd.isnull(inclusions):\n self.logger.info('----- Including certain curves in aggregation as per selection')\n filter_field = inclusions.split(':')[0]\n filter_value = [eval(inclusions.split(':')[1])]\n filter_value = filter_value if isinstance(filter_value, list) else [filter_value]\n region_curves = region_curves[region_curves.eval(filter_field + ' in ' + str(filter_value))]\n region_curves.reset_index(inplace=True, drop=True)\n\n return region_curves\n\n def transform_curves_format(self, df, curves_4\n , _geo_col, _brand_col, _instrument_col, _coeffA_col, _coeffB_col, _coeffC_col\n , _state_col, _subbrand_col, form_col, _stimuli_type_col, _rating_col):\n curves_1 = df.loc[:, [_geo_col, _brand_col, _instrument_col, _coeffA_col, _coeffB_col, _coeffC_col]]\n curves_1[_state_col] = '-'\n curves_1[_subbrand_col] = '-'\n dimension_cols = [_geo_col, _brand_col, _subbrand_col, _state_col]\n r_dimension_cols = [\"receiving\" + s for s in dimension_cols]\n curves_1[r_dimension_cols] = curves_1.loc[:, dimension_cols]\n curves_1[form_col] = 'ADBUDG'\n curves_1[_stimuli_type_col] = 'Spend'\n curves_1[_rating_col] = 3\n curves_1['carryover'] = 0.0\n curves_1['lag'] = 0.0\n curves_1['kpi'] = 'Default'\n curves_1['isdeleted'] = 'f'\n curves_1['isnonmeasured'] = 'f'\n curves_1['spend_type'] = 'Mixed'\n\n # curves_1 = curves_1.merge(CalcEngine.input_dict['geo_master'][[_geo_name_col, _geo_col]].drop_duplicates(), how='left')\n curves_1 = curves_1[curves_4.columns.tolist()]\n return curves_1\n\n def get_media_cost_df(self, curves_t, _brand_col, _subbrand_col, _geo_col, _state_col, _instrument_col, _week_col,\n _cost_col):\n mediacost = curves_t.loc[:, [_brand_col, _subbrand_col, _geo_col, _state_col, _instrument_col]]\n mediacost.drop_duplicates(inplace=True)\n mediacost['GrowthDriver'] = 'Default'\n mediacost['Activity'] = 'Default'\n mediacost['key'] = 1\n self.input_dict['Calendar']['key'] = 1\n mediacost = mediacost.merge(self.input_dict['Calendar'].loc[:, [_week_col, 'key']], how='left')\n mediacost[_cost_col] = 1.0\n del mediacost['key']\n return mediacost\n\n def get_actual_plan_df(self, curves_t, _brand_col, _subbrand_col, _geo_col, _state_col, _instrument_col, _week_col,\n _cost_col, _plan_period_col, plan_period):\n act_plan = curves_t.loc[:, [_brand_col, _subbrand_col, _geo_col, _state_col, _instrument_col]]\n act_plan.drop_duplicates(inplace=True)\n act_plan['GrowthDriver'] = 'All'\n act_plan['Activity'] = 'Activity 1'\n act_plan['key'] = 1\n self.input_dict['Calendar']['key'] = 1\n\n # TODO keep only 30 weeks of data\n Calendar = self.input_dict['Calendar'].loc[self.input_dict['Calendar'][_plan_period_col]==plan_period, :]\n Calendar.reset_index(inplace=True, drop=True)\n c_rows = min(max(30, Calendar.shape[0]), 30)\n Calendar = Calendar.iloc[0:c_rows, :]\n # act_plan = act_plan.merge(self.input_dict['Calendar'].loc[:, [_week_col, 'key']], how='left')\n act_plan = act_plan.merge(Calendar.loc[:, [_week_col, 'key']], how='left')\n\n # TODO change investment to actual sample spend -> coefficient c?\n act_plan['Investment'] = 10000.0\n act_plan['Agency'] = 'Default'\n act_plan['Type'] = 'PLAN'\n act_plan['ActualEndWeek'] = 'N'\n del act_plan['key']\n return act_plan\n\n def get_kpi_master(self, curves_t, kpi_template_df, _brand_col, _subbrand_col, _geo_col, _state_col\n , _instrument_col, _region_col):\n\n self.logger.info('----- Get unique dimensions from curves data')\n curves_t_copy = curves_t.copy()\n curves_t_copy = self.get_region_col(curves_t_copy, _region_col, _geo_col)\n curves_t_copy[_state_col] = '-'\n curves_t_copy[_subbrand_col] = '-'\n final_kpi = curves_t_copy.loc[:,\n [_brand_col, _subbrand_col, _geo_col, _state_col, _instrument_col, _region_col]]\n final_kpi.drop_duplicates(inplace=True)\n dimension_cols = [_brand_col, _subbrand_col, _geo_col, _state_col]\n r_dimension_cols = [\"receiving\" + s for s in dimension_cols]\n final_kpi[r_dimension_cols] = final_kpi.loc[:, dimension_cols]\n\n self.logger.info('----- Join on region/geo to get unique kpi parameter ids from template')\n NAM_index = final_kpi[_region_col] == 'NAM'\n final_kpi_nonNAM = final_kpi.loc[~NAM_index, :].merge(kpi_template_df[[_region_col, 'parameter_id']],\n how='left')\n\n del final_kpi[_region_col]\n final_kpi_NAM = final_kpi.loc[NAM_index, :].merge(kpi_template_df[[_region_col, 'parameter_id']],\n left_on=_geo_col\n , right_on=_region_col, how='left')\n del final_kpi_NAM[_region_col], final_kpi_nonNAM[_region_col]\n\n self.logger.info('----- Append RoW with USA, CAN')\n final_kpi_NAM = final_kpi_NAM[final_kpi_nonNAM.columns.tolist()]\n final_kpi = final_kpi_nonNAM.append(final_kpi_NAM)\n\n self.logger.info('----- Get region column')\n final_kpi = self.get_region_col(final_kpi, _region_col, _geo_col)\n return final_kpi\n\n\n def get_LT_RoW(self, ETL_kpi, final_kpi, _brand_col, _instrument_col, _region_col):\n \"\"\"\n Fetch LT parameters for Rest of World (RoW)\n :param ETL_kpi: class instance, with KPI input data\n :param final_kpi: pandas, dataframe with all KPI parameters\n :param _brand_col: str, column name of the brand\n :param _instrument_col: str, column name of the instrument/vehicle\n :param _region_col: str, column name of the region\n :return: pandas, dataframe with EURO LT kpi parameters\n \"\"\"\n # Get LT values\n self.logger.info('----- Melt LT values df')\n # lt_values = config_path+'ME/AdditionalData/Templates/LT Multipliers.xlsx'\n # lt_values_df_all = pd.read_excel(lt_values, sheet_name='ALL')\n lt_values_df_all = ETL_kpi.input_dict['lt_values_df_all'].melt(id_vars='Brand', var_name='Instrument')\n lt_values_df_all.columns = lt_values_df_all.columns.str.lower().tolist()\n lt_values_df_all['parameter_id'] = 'LT_Multiplier'\n\n self.logger.info('----- Filter for LT multiplier')\n final_kpi_LT = final_kpi[final_kpi['parameter_id'] == 'LT_Multiplier']\n # For RoW\n EURO_index = final_kpi_LT[_region_col] == 'EUROPE & TURKEY'\n final_kpi_LT_RoW = final_kpi_LT.loc[~EURO_index, :]\n final_kpi_LT_RoW.reset_index(inplace=True, drop=True)\n\n self.logger.info('----- Join with lt values df')\n final_kpi_LT_RoW_t = final_kpi_LT_RoW.merge(lt_values_df_all, how='left')\n final_kpi_LT_RoW_values = final_kpi_LT_RoW_t[~np.isnan(final_kpi_LT_RoW_t['value'])]\n final_kpi_LT_RoW_missing = final_kpi_LT_RoW_t[np.isnan(final_kpi_LT_RoW_t['value'])]\n\n self.logger.info('----- If Brand is available but instrument not then we need to use “Total Multiplier”')\n del final_kpi_LT_RoW_missing['value']\n final_kpi_LT_RoW_brand = final_kpi_LT_RoW_missing.merge(lt_values_df_all.loc[\n lt_values_df_all[\n _instrument_col] == 'Total Multiplier'\n , [_brand_col, 'value', 'parameter_id']]\n , on=[_brand_col, 'parameter_id'], how='left')\n final_kpi_LT_RoW_values_brand = final_kpi_LT_RoW_brand[~np.isnan(final_kpi_LT_RoW_brand['value'])]\n final_kpi_LT_RoW_missing_brand = final_kpi_LT_RoW_brand[np.isnan(final_kpi_LT_RoW_brand['value'])]\n\n self.logger.info('----- If instrument is available but brand not then we need to use “Total”')\n del final_kpi_LT_RoW_missing_brand['value']\n final_kpi_LT_RoW_instrument = final_kpi_LT_RoW_missing_brand.merge(lt_values_df_all.loc[\n lt_values_df_all[_brand_col] == 'Total'\n , [_instrument_col, 'value',\n 'parameter_id']]\n , on=[_instrument_col, 'parameter_id'],\n how='left')\n\n self.logger.info('----- Append all KPI LT multipliers from RoW')\n final_kpi_LT_RoW = final_kpi_LT_RoW_values.append(final_kpi_LT_RoW_values_brand).append(\n final_kpi_LT_RoW_instrument)\n final_kpi_LT_RoW['value'].fillna(1.0, inplace=True)\n\n return final_kpi_LT_RoW\n\n\n def get_LT_EU(self, ETL_kpi, final_kpi, _brand_col, _geo_col, _instrument_col, _region_col):\n \"\"\"\n Fetch LT parameters for Europe\n :param ETL_kpi: class instance, with KPI input data\n :param final_kpi: pandas, dataframe with all KPI parameters\n :param _brand_col: str, column name of the brand\n :param _geo_col: str, column name of the country/geography\n :param _instrument_col: str, column name of the instrument/vehicle\n :param _region_col: str, column name of the region\n :return: pandas, dataframe with EURO LT kpi parameters\n \"\"\"\n self.logger.info('Extract LT for EURO')\n # lt_values_df_eu = pd.read_excel(lt_values, sheet_name='EURO')\n # lt_values_df_eu.columns = lt_values_df_eu.columns.str.lower().tolist()\n lt_values_df_eu = ETL_kpi.input_dict['lt_values_df_eu'].loc[:, [_geo_col, _instrument_col, 'lt']]\n lt_values_df_eu['parameter_id'] = 'LT'\n\n self.logger.info('Filter for LT - EURO')\n final_kpi_LT = final_kpi.loc[final_kpi['parameter_id'] == 'LT', :]\n # For RoW\n EURO_index = final_kpi_LT[_region_col] == 'EUROPE & TURKEY'\n final_kpi_LT_EURO = final_kpi_LT.loc[EURO_index, :]\n final_kpi_LT_EURO.reset_index(inplace=True, drop=True)\n\n final_kpi_LT_EURO_t = final_kpi_LT_EURO.merge(lt_values_df_eu, how='left')\n final_kpi_LT_EURO_values = final_kpi_LT_EURO_t[~np.isnan(final_kpi_LT_EURO_t['lt'])]\n final_kpi_LT_EURO_missing = final_kpi_LT_EURO_t[np.isnan(final_kpi_LT_EURO_t['lt'])]\n\n self.logger.info('If a combination is not available, the average instrument level must be used')\n lt_values_df_eu_grouped = pd.DataFrame(lt_values_df_eu.groupby([_instrument_col])['lt'].mean())\n lt_values_df_eu_grouped.reset_index(inplace=True)\n del final_kpi_LT_EURO_missing['lt']\n final_kpi_LT_EURO_instrument = final_kpi_LT_EURO_missing.merge(lt_values_df_eu_grouped\n , on=[_instrument_col], how='left')\n\n self.logger.info('----- Append all KPI LT multipliers from EURO')\n final_kpi_LT_EURO = final_kpi_LT_EURO_values.append(final_kpi_LT_EURO_instrument)\n final_kpi_LT_EURO['lt'].fillna(1.0, inplace=True)\n final_kpi_LT_EURO.rename(columns={'lt': 'value'}, inplace=True)\n final_kpi_LT_EURO.reset_index(inplace=True, drop=True)\n\n return final_kpi_LT_EURO\n\n\n def get_LT_ALL(self, ETL_kpi, final_kpi, _brand_col, _geo_col, _instrument_col, _region_col, _region_selected):\n \"\"\"\n Fetch LT parameters for Rest of World and Europe separately and append together\n :param ETL_kpi: class instance, with KPI input data\n :param final_kpi: pandas, dataframe with all KPI parameters\n :param _brand_col: str, column name of the brand\n :param _geo_col: str, column name of the country/geography\n :param _instrument_col: str, column name of the instrument/vehicle\n :param _region_col: str, column name of the region\n :return: pandas, dataframe with ALL LT kpi parameters\n \"\"\"\n final_kpi_LT_RoW = self.get_LT_RoW(ETL_kpi, final_kpi, _brand_col, _instrument_col, _region_col)\n\n if _region_selected in ['EUROPE', 'EUROPE & TURKEY']:\n final_kpi_LT_EURO = self.get_LT_EU(ETL_kpi, final_kpi, _brand_col, _geo_col, _instrument_col, _region_col)\n final_kpi_LT_ALL = final_kpi_LT_EURO.append(final_kpi_LT_RoW.loc[:, final_kpi_LT_EURO.columns.tolist()])\n else:\n final_kpi_LT_ALL = final_kpi_LT_RoW\n\n del final_kpi_LT_ALL[_region_col]\n return final_kpi_LT_ALL\n\n def get_finance_KPI(CalcEngine, final_kpi, _brand_col, _geo_col, _region_col\n , _volume_col, _gp_per_eu_col, _nsv_per_eu_col, _gp_per_lit_col, _category_col):\n CalcEngine.logger.info('----- Finance KPIs')\n CalcEngine.input_dict['finance'].loc[:, 'Country-Brand'] = CalcEngine.input_dict['finance'].loc[:, _geo_col] + \" - \" + \\\n CalcEngine.input_dict['finance'].loc[:, _brand_col]\n CalcEngine.input_dict['finance']['Profit'] = CalcEngine.input_dict['finance'][_volume_col] * \\\n CalcEngine.input_dict['finance'][_gp_per_eu_col]\n finance_gp_aggregated = pd.DataFrame(\n CalcEngine.input_dict['finance'].groupby(['Country-Brand']).agg({_volume_col: 'sum'\n , _nsv_per_eu_col: 'sum'\n , 'Profit': 'sum'}))\n finance_gp_aggregated.reset_index(inplace=True)\n\n CalcEngine.logger.info('----- Get category for country-brand')\n finance_category = CalcEngine.input_dict['finance'].loc[:, ['Country-Brand', _category_col]].drop_duplicates(subset='Country-Brand')\n finance_gp_aggregated = finance_gp_aggregated.merge(finance_category, how='left')\n\n CalcEngine.logger.info('----- Calculate gp per eu and gp per liter')\n finance_gp_aggregated[_gp_per_eu_col] = finance_gp_aggregated['Profit'] / finance_gp_aggregated[_volume_col]\n finance_gp_aggregated.loc[finance_gp_aggregated[_category_col] == 'Spirits', _gp_per_lit_col] = finance_gp_aggregated.loc[finance_gp_aggregated[_category_col]=='Spirits', _gp_per_eu_col]/9\n finance_gp_aggregated.loc[finance_gp_aggregated[_category_col] != 'Spirits', _gp_per_lit_col] = finance_gp_aggregated.loc[finance_gp_aggregated[_category_col]=='Spirits', _gp_per_eu_col]/90\n del finance_gp_aggregated[_category_col]\n\n finance_gp_aggregated.dropna(subset=[_gp_per_eu_col], inplace=True)\n finance_gp_aggregated.columns = ['Country-Brand', 'volume', 'Price', 'Profit', 'Profitability', _gp_per_lit_col]\n finance_gp_aggregated_melt = finance_gp_aggregated.melt(id_vars='Country-Brand', var_name='parameter_id',\n value_name='value')\n\n final_kpi_finance = final_kpi.loc[\n final_kpi['parameter_id'].isin(['Price', 'Profitability', _gp_per_lit_col]), :]\n final_kpi_finance['Country-Brand'] = final_kpi_finance[_geo_col] + \" - \" + final_kpi_finance[_brand_col]\n final_kpi_finance_values = final_kpi_finance.merge(finance_gp_aggregated_melt, how='left')\n del final_kpi_finance_values['Country-Brand'], final_kpi_finance_values[_region_col]\n return final_kpi_finance_values\n\n\n def get_relevant_combinations(self, bus_inputs_data, curves, volume_info, _geo_col, _brand_col, _instrument_col\n , _spend_col):\n relevant_combinations = bus_inputs_data[[_geo_col, _brand_col, _instrument_col]].append(\n curves[[_geo_col, _brand_col, _instrument_col]])\n relevant_combinations.drop_duplicates(inplace=True)\n relevant_combinations.reset_index(inplace=True, drop=True)\n relevant_combinations[_spend_col] = pd.Series()\n\n self.logger.info('----- Get volume')\n relevant_combinations_vol = relevant_combinations[[_geo_col, _brand_col]].drop_duplicates()\n relevant_combinations_vol['Country-Brand'] = relevant_combinations_vol[_geo_col] + \" - \" + \\\n relevant_combinations_vol[_brand_col]\n relevant_combinations_vol = pd.merge(relevant_combinations_vol, volume_info, on='Country-Brand', how='left')\n del relevant_combinations_vol['Country-Brand']\n\n self.logger.info('----- Get mediacost')\n relevant_combinations_cost = relevant_combinations[[_geo_col, _instrument_col]].drop_duplicates()\n relevant_combinations_cost['MediaCost'] = pd.Series()\n\n return relevant_combinations, relevant_combinations_vol, relevant_combinations_cost\n\n\n","sub_path":"CurveAggregation/src/helpers/DataLoader.py","file_name":"DataLoader.py","file_ext":"py","file_size_in_byte":41230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"498696833","text":"\"\"\"\nCopyright (C) 2018 NuCypher\n\nThis file is part of pyUmbral.\n\npyUmbral is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\npyUmbral is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with pyUmbral. If not, see .\n\"\"\"\n\nfrom typing import Optional, Type\nfrom warnings import warn\n\nfrom umbral.curve import Curve, SECP256K1\nfrom umbral.params import UmbralParameters\n\n\nclass _CONFIG:\n __curve = None\n __params = None\n __CURVE_TO_USE_IF_NO_DEFAULT_IS_SET_BY_USER = SECP256K1\n __WARNING_IF_NO_DEFAULT_SET = \"No default curve has been set. Using SECP256K1. A slight performance penalty has been incurred for only this call. Set a default curve with umbral.config.set_default_curve().\"\n\n class UmbralConfigurationError(RuntimeError):\n \"\"\"Raised when somebody does something dumb re: configuration.\"\"\"\n\n @classmethod\n def __set_curve_by_default(cls):\n warn(cls.__WARNING_IF_NO_DEFAULT_SET, RuntimeWarning)\n cls.set_curve(cls.__CURVE_TO_USE_IF_NO_DEFAULT_IS_SET_BY_USER)\n\n @classmethod\n def params(cls) -> UmbralParameters:\n if not cls.__params:\n cls.__set_curve_by_default()\n return cls.__params\n\n @classmethod\n def curve(cls) -> Type[Curve]:\n if not cls.__curve:\n cls.__set_curve_by_default()\n return cls.__curve\n\n @classmethod\n def set_curve(cls, curve: Optional[Curve] = None) -> None:\n if cls.__curve:\n raise cls.UmbralConfigurationError(\n \"You can only set the default curve once. Do it once and then leave it alone.\")\n else:\n from umbral.params import UmbralParameters\n if curve is None:\n curve = _CONFIG.__CURVE_TO_USE_IF_NO_DEFAULT_IS_SET_BY_USER\n cls.__curve = curve\n cls.__params = UmbralParameters(curve)\n\n\ndef set_default_curve(curve: Optional[Curve] = None) -> None:\n return _CONFIG.set_curve(curve)\n\n\ndef default_curve() -> Type[Curve]:\n return _CONFIG.curve()\n\n\ndef default_params() -> UmbralParameters:\n return _CONFIG.params()\n","sub_path":"umbral/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"82584420","text":"from django.conf.urls.defaults import *\n\nfrom notification.views import notices, mark_all_seen, feed_for_user, single, delete, archive, mark_seen\n\nurlpatterns = patterns('',\n url(r'^$', notices, name=\"notification_notices\"),\n url(r'^(\\d+)/$', single, name=\"notification_notice\"),\n url(r'^delete/(?P\\d+)/$', delete, name=\"notification_delete\"),\n url(r'^archive/(?P\\d+)/$', archive, name=\"notification_archive\"),\n url(r'^mark_seen/(?P\\d+)/$', mark_seen, name=\"notification_mark_seen\"),\n url(r'^feed/$', feed_for_user, name=\"notification_feed_for_user\"),\n url(r'^mark_all_seen/$', mark_all_seen, name=\"notification_mark_all_seen\"),\n)\n","sub_path":"notification/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"380667465","text":"# Qingbo/Haotian Mar 8,2019\n\nimport loaddata\nfrom my_model.ObjectDetectModel import Model\nfrom my_model import laimodel, vgg16model, vgg17model\nimport sys \nimport os\nimport tensorflow as tf\n\nfrom datetime import timedelta\nimport math\nimport gc\nimport time\n\n#Define some global and local variables\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_string('action', 'training', 'training/testing/prediction')\nflags.DEFINE_string(\"img_path\", \"/Users/tarus/OnlyInMac/bully_data/bully_merge_train/JPEGImages/\", \"path for train img\")\nflags.DEFINE_string(\"xml_path\", \"/Users/tarus/OnlyInMac/bully_data/bully_merge_train/Annotations/\", \"path for train xml file\")\nflags.DEFINE_integer('iteration_steps', 20000, 'Number of steps to run trainer.')\nflags.DEFINE_integer('batch_size',8 , 'Number of batch size.')\nflags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')\nflags.DEFINE_float('test_num', 1, 'the number of images used for test.')\ndef set_parameter():\n #set some superparameters which can reset befor run\n # ?% of the data will be used for validation\n flags.DEFINE_float('validation_size', 0.2, 'validation size.')\n flags.DEFINE_integer('image_size', 128, 'image width=image height.')\n flags.DEFINE_string(\"train_path\", \"data_bully/training_data\", \"path of training data\")\n flags.DEFINE_string(\"output_labels\", \"trained_model/output_labels.txt\", \"store the labels\")\n flags.DEFINE_string(\"saved_dir\", \"trained_model\", \"save trained model\")\n flags.DEFINE_string(\"saved_file\", \"trained_model/bully_action\", \"save trained model\")\n\n \n flags.DEFINE_integer('img_depth', 3, 'Number of channels.')\n flags.DEFINE_integer('filter_size', 3, 'filter size.')\n flags.DEFINE_integer('filter_depth1', 32, 'filter depth for conv1.')\n flags.DEFINE_integer('filter_depth2', 32, 'filter depth for conv2.')\n flags.DEFINE_integer('filter_depth3', 64, 'filter depth for conv3.')\n flags.DEFINE_integer('pooling_num', 3, 'Number of pooling')\n flags.DEFINE_integer('flatten_num', pow(FLAGS.image_size//\n pow(2,FLAGS.pooling_num),2)*FLAGS.filter_depth3, \n 'Number of features after flattern')\n print(\"flatten_num is\", FLAGS.flatten_num)\n flags.DEFINE_integer('fc_depth', 128, 'fully connected layer depth.')\n\n flags.DEFINE_integer('vgg_filter_size1', 3, 'filter size.')\n flags.DEFINE_integer('vgg_filter_size2', 3, 'filter size.')\n flags.DEFINE_integer('vgg_filter_size3', 3, 'filter size.')\n flags.DEFINE_integer('vgg_filter_size4', 3, 'filter size.')\n flags.DEFINE_integer('vgg_filter_depth1', 64, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg_filter_depth2', 128, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg_filter_depth3', 256, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg_filter_depth4', 512, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg_num_hidden1', 4096, 'filter depth for conv3.')\n flags.DEFINE_integer('vgg_num_hidden2', 4096, 'filter depth for conv3.')\n\n flags.DEFINE_integer('vgg17_filter_size1', 3, 'filter size.')\n flags.DEFINE_integer('vgg17_filter_size2', 3, 'filter size.')\n flags.DEFINE_integer('vgg17_filter_size3', 3, 'filter size.')\n flags.DEFINE_integer('vgg17_filter_size4', 3, 'filter size.')\n flags.DEFINE_integer('vgg17_filter_depth1', 64, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg17_filter_depth2', 64, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg17_filter_depth3', 128, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg17_filter_depth4', 128, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg17_filter_depth5', 256, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg17_filter_depth6', 256, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg17_filter_depth7', 256, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg17_filter_depth8', 512, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg17_filter_depth9', 512, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg17_filter_depth10', 512, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg17_filter_depth11', 512, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg17_filter_depth12', 512, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg17_filter_depth13', 512, 'filter depth for conv1.')\n flags.DEFINE_integer('vgg17_num_hidden1', 4096, 'filter depth for conv3.')\n flags.DEFINE_integer('vgg17_num_hidden2', 4096, 'filter depth for conv3.')\n\n #parameters for object detection\n # return FLAGS\n\ndef main(_):\n #call set_parameter function to intialize the super parameters \n set_parameter()\n print(\"img_path is :\", FLAGS.img_path)\n # print(\"validation percent is :\", FLAGS.validation_size)\n\n #establish the session variable\n sess = tf.Session()\n # establish a model object\n model = Model(sess, FLAGS)\n #start training or testing\n getattr(model,FLAGS.action)()\n\ndef classify_cnn():\n #*prepare the training dataset & load data\n classes = os.listdir(FLAGS.train_path)\n # print(classes)\n classes.sort()\n # print(classes)\n flags.DEFINE_integer('class_number', len(classes), 'classes number.')\n # class_number= len(classes)\n print(\"class number is:\", FLAGS.class_number)\n \n #save classes/labels\n file_label =open(FLAGS.output_labels,mode='w')\n for field in classes:\n # print(field)\n file_label.write(field)\n file_label.write('\\n')\n file_label.flush()\n file_label.close()\n\n label_lst=[]\n rs = os.path.exists(FLAGS.output_labels)\n if rs==True:\n file_handler =open(FLAGS.output_labels,mode='r')\n contents = file_handler.readlines()\n for name in contents:\n name = name.strip('\\n')\n label_lst.append(name)\n file_handler.close()\n print(label_lst)\n \n input_data = loaddata.read_dataset(FLAGS.train_path, FLAGS.image_size, \n classes, FLAGS.validation_size)\n print(\"******The traning data have been loaded**********\")\n # print(input_data.train.cls)\n print(\"Number of files in Training-set: \\t{}\" \n .format(len(input_data.train.labels)))\n print(\"Number of files in Validation-set:\\t{}\" \n .format(len(input_data.valid.labels)))\n\n #create a dataflow graph\n mygraph = tf.Graph()\n with mygraph.as_default():\n #1) Define some data & labbel placeholder.\n ## data\n data_placeholder = tf.placeholder(tf.float32, \n shape=[None, FLAGS.image_size,FLAGS.image_size,FLAGS.img_depth], \n name='data_placeholder')\n ## labels\n label_placeholder = tf.placeholder(tf.float32, \n shape=[None, FLAGS.class_number], name='label_placeholder')\n # label_index = tf.argmax(label_placeholder, dimension=1)\n label_index = tf.argmax(label_placeholder, axis=1)\n # print(\"label_index is :\", label_in)\n\n #2) initilize the weight matrices and bias vectors \n lainet_coefficients = laimodel.define_coefficients(filter_size=FLAGS.filter_size, \n img_depth=FLAGS.img_depth, filter_depth1=FLAGS.filter_depth1,\n filter_depth2=FLAGS.filter_depth2,\n filter_depth3=FLAGS.filter_depth3,\n flatten_num=FLAGS.flatten_num, fc_depth=FLAGS.fc_depth,\n class_number=FLAGS.class_number)\n\n vgg16_coefficients = vgg16model.vgg16_coefficients(\n vgg_filter_size1=FLAGS.vgg_filter_size1,\n vgg_filter_size2=FLAGS.vgg_filter_size2, \n vgg_filter_size3=FLAGS.vgg_filter_size3,\n vgg_filter_size4=FLAGS.vgg_filter_size4, \n vgg_filter_depth1=FLAGS.vgg_filter_depth1, \n vgg_filter_depth2=FLAGS.vgg_filter_depth2, \n vgg_filter_depth3=FLAGS.vgg_filter_depth3, \n vgg_filter_depth4=FLAGS.vgg_filter_depth4, \n vgg_num_hidden1=FLAGS.vgg_num_hidden1,\n vgg_num_hidden2=FLAGS.vgg_num_hidden2,\n image_size=FLAGS.image_size, \n img_depth=FLAGS.img_depth , \n class_number=FLAGS.class_number)\n\n vgg17_coefficients = vgg17model.vgg17_coefficients(\n vgg_filter_size1=FLAGS.vgg17_filter_size1,\n vgg_filter_size2=FLAGS.vgg17_filter_size2, \n vgg_filter_size3=FLAGS.vgg17_filter_size3,\n vgg_filter_size4=FLAGS.vgg17_filter_size4, \n vgg_filter_depth1=FLAGS.vgg17_filter_depth1, \n vgg_filter_depth2=FLAGS.vgg17_filter_depth2, \n vgg_filter_depth3=FLAGS.vgg17_filter_depth3, \n vgg_filter_depth4=FLAGS.vgg17_filter_depth4, \n vgg_filter_depth5=FLAGS.vgg17_filter_depth5, \n vgg_filter_depth6=FLAGS.vgg17_filter_depth6, \n vgg_filter_depth7=FLAGS.vgg17_filter_depth7, \n vgg_filter_depth8=FLAGS.vgg17_filter_depth8, \n vgg_filter_depth9=FLAGS.vgg17_filter_depth9, \n vgg_filter_depth10=FLAGS.vgg17_filter_depth10, \n vgg_filter_depth11=FLAGS.vgg17_filter_depth11, \n vgg_filter_depth12=FLAGS.vgg17_filter_depth12, \n vgg_filter_depth13=FLAGS.vgg17_filter_depth13, \n vgg_num_hidden1=FLAGS.vgg17_num_hidden1,\n vgg_num_hidden2=FLAGS.vgg17_num_hidden2,\n image_size=FLAGS.image_size, \n img_depth=FLAGS.img_depth , \n class_number=FLAGS.class_number)\n\n # coefficients =lainet_coefficients \n # coefficients =vgg16_coefficients \n coefficients =vgg17_coefficients \n\n #3. construct the CNN model\n # train_net=laimodel.lainet\n #train_net=vgg16model.vgg16net\n train_net=vgg17model.vgg17net\n logits = train_net(data_placeholder, coefficients)\n\n #4. calculate the cross entropy between the logits and actual labels\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits= \n logits, labels=tf.stop_gradient(label_placeholder))\n #tensorflow1.5 compatible\n # cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits= \n # logits, labels=label_placeholder)\n cost = tf.reduce_mean(cross_entropy)\n\n #5. use optimizer to calculate the gradients of the loss function \n optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) \\\n .minimize(cost)\n\n # Predictions for the training, validation, and test data.\n labels_pred = tf.nn.softmax(logits,name='labels_pred')\n class_pred= tf.argmax(labels_pred, axis=1)\n #class_pred= tf.argmax(labels_pred, dimension=1)\n correct_pred = tf.equal(class_pred, label_index)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n with tf.Session(graph=mygraph) as sess:\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n for i in range(FLAGS.iteration_steps):\n train_data_batch, train_label_batch, _, train_cls_batch = \\\n input_data.train.next_batch(FLAGS.batch_size)\n val_data_batch, val_label_batch, _, val_cls_batch = \\\n input_data.valid.next_batch(FLAGS.batch_size)\n \n feed_dict_train = {data_placeholder: train_data_batch,\n label_placeholder: train_label_batch}\n feed_dict_val = {data_placeholder: val_data_batch,\n label_placeholder: val_label_batch}\n sess.run(optimizer, feed_dict=feed_dict_train)\n\n if i % int(input_data.train.num_examples/FLAGS.batch_size) == 0: \n val_loss = sess.run(cost, feed_dict=feed_dict_val)\n epoch = int(i / int(input_data.train.num_examples/FLAGS.batch_size)) \n train_acc = sess.run(accuracy, feed_dict=feed_dict_train)\n val_acc = sess.run(accuracy, feed_dict=feed_dict_val)\n msg = (\"Epoch/Step {0}/{1} --- Train Acc:{2:>6.1%}\" \n \"||Val Acc:{3:>6.1%} ||Val Loss:{4:.3f}\")\n print(msg.format(epoch + 1, i, train_acc, val_acc, val_loss))\n if val_acc>95:\n break\n\n #save the result\n if not os.path.exists(FLAGS.saved_dir):\n os.makedirs(FLAGS.saved_dir)\n saver.save(sess, FLAGS.saved_file) \n\nif __name__ == \"__main__\":\n os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'\n tf.app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"636647729","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# Tom van Steijn, Royal HaskoningDHV\n# Erik van Onselen, Deltares\n\nfrom xsboringen import cross_section\nfrom xsboringen.calc import SandmedianClassifier, AdmixClassifier, LithologyClassifier\nfrom xsboringen.csvfiles import cross_section_to_csv\nfrom xsboringen.datasources import boreholes_from_sources, points_from_sources\nfrom xsboringen.point import PointsOfInterest\nfrom xsboringen.surface import Surface, RefPlane\nfrom xsboringen.solid import Solid\nfrom xsboringen.groundlayermodel import GroundLayerModel\nfrom xsboringen.utils import input_or_default\nfrom xsboringen import plotting\nfrom xsboringen import shapefiles\nfrom xsboringen import styles\n\nimport click\nimport yaml\n\nfrom collections import ChainMap\nfrom pathlib import Path\nimport logging\nimport os\n\nlog = logging.getLogger(os.path.basename(__file__))\n\n\ndef plot_cross_section(**kwargs):\n # args\n datasources = kwargs['datasources']\n cross_section_lines = kwargs['cross_section_lines']\n result = kwargs['result']\n config = kwargs['config']\n\n # optional args\n points_of_interest = kwargs.get('points_of_interest')\n min_depth = kwargs.get('min_depth', 0.)\n buffer_distance = kwargs.get('buffer_distance', 0.)\n xtickstep = kwargs.get('xtickstep')\n ylim = kwargs.get('ylim')\n xlabel = kwargs.get('xlabel')\n ylabel = kwargs.get('ylabel')\n metadata = kwargs.get('metadata')\n \n # optional args for bearing/range labels\n if buffer_distance < 100:\n # labels short enough for horizontal plot\n dist_txt = (kwargs.get('distance_labels'), 0, 'double_line', 'center', 'bottom')\n else:\n # labels must be plot vertically if they become too long\n dist_txt = (kwargs.get('distance_labels'), 90, 'single_line', 'center', 'bottom')\n windlabels = kwargs.get('windlabels')\n winddirs = kwargs.get('winddirs')\n\n # create image folder\n folder = Path(result['folder'])\n folder.mkdir(exist_ok=True)\n\n # read boreholes and CPT's from data folders\n admixclassifier = AdmixClassifier(\n config['admix_fieldnames']\n )\n borehole_sources = datasources.get('boreholes') or []\n boreholes = boreholes_from_sources(borehole_sources, admixclassifier)\n\n # segment styles lookup\n segmentstyles = styles.SegmentStylesLookup(**input_or_default(config, ['styles', 'segments']))\n\n # vertical styles lookup\n verticalstyles = styles.SimpleStylesLookup(**input_or_default(config, ['styles', 'verticals']))\n\n # surface styles lookup\n surfacestyles = styles.SimpleStylesLookup(**input_or_default(config, ['styles', 'surfaces']))\n \n # reference plane styles lookup\n referenceplanestyles = styles.SimpleStylesLookup(**input_or_default(config, ['styles', 'referenceplanes']))\n\n # solid styles lookup\n solidstyles = styles.SimpleStylesLookup(**input_or_default(config, ['styles', 'solids']))\n\n # translate CPT to lithology if needed\n if result.get('translate_cpt', False):\n ruletype = result.get('cpt_classifier') or 'isbt'\n table = config['cpt_classification']\n lithologyclassifier = LithologyClassifier(table, ruletype=ruletype)\n boreholes = (\n b.to_lithology(lithologyclassifier, admixclassifier)\n for b in boreholes\n )\n\n # classify sandmedian if needed\n if result.get('classify_sandmedian', False):\n bins = config['sandmedianbins']\n sandmedianclassifier = SandmedianClassifier(bins)\n boreholes = (\n b.update_sandmedianclass(sandmedianclassifier) for b in boreholes\n )\n\n # simplify if needed\n if result.get('simplify'):\n min_thickness = result.get('min_thickness')\n by_legend = lambda s: {'record': segmentstyles.lookup(s)}\n\n boreholes = (\n b.simplified(min_thickness=min_thickness, by=by_legend) if b.format in result.get('simplify') \n else b\n for b in boreholes\n )\n\n # read points\n point_sources = datasources.get('points') or []\n points = points_from_sources(point_sources)\n\n # surfaces\n surfaces = datasources.get('surfaces') or []\n \n # reference planes\n refplanes = datasources.get('referenceplanes') or []\n\n # solids\n solids = datasources.get('solids') or []\n\n # regis\n regismodel = datasources.get('regismodel')\n if regismodel is not None:\n regismodel = GroundLayerModel.from_folder(\n folder=regismodel['folder'],\n indexfile=regismodel['indexfile'],\n fieldnames=regismodel['fieldnames'],\n delimiter=regismodel.get('delimiter') or ',',\n res=regismodel.get('res', 10.),\n default=config['cross_section_plot']['regis_style'],\n name='Regis',\n )\n\n # sort regis by layer number\n regismodel.sort()\n\n # filter missing coordinates and less than minimal depth\n boreholes = [\n b for b in boreholes\n if\n (b.x is not None) and\n (b.y is not None) and\n (b.z is not None) and\n (b.depth is not None) and\n (b.depth >= min_depth)\n ]\n\n points = [\n p for p in points\n if\n ((p.top is not None) or (p.base is not None))\n ]\n \n if points_of_interest is not None:\n poi = []\n for row in shapefiles.read(points_of_interest['file']):\n poi.append(PointsOfInterest(row, \n row['properties'][points_of_interest.get('labelfield')],\n points_of_interest.get('ylim'),\n )\n )\n else:\n poi = None\n\n # default labels\n defaultlabels = iter(config['defaultlabels'])\n if windlabels is None:\n windlabels = config['defaultwindlabels']\n if winddirs is None:\n winddirs = config['defaultwinddirs']\n\n # selected set\n selected = cross_section_lines.get('selected')\n if selected is not None:\n selected = set(selected)\n\n css = []\n for row in shapefiles.read(cross_section_lines['file']):\n # get label\n if cross_section_lines.get('labelfield') is not None:\n label = row['properties'][cross_section_lines['labelfield']]\n else:\n label = next(defaultlabels)\n\n if (selected is not None) and (label not in selected):\n log.warning('skipping {label:}'.format(label=label))\n continue\n \n if cross_section_lines.get('titlefield') is not None:\n title = row['properties'][cross_section_lines['titlefield']]\n else:\n title = None\n \n if cross_section_lines.get('labeloption') is not None:\n label_option = cross_section_lines['labeloption']\n else:\n label_option = config['defaultlabeloption'] \n\n # log message\n log.info('cross-section {label:}'.format(label=label))\n\n # define cross-section\n cs = cross_section.CrossSection(\n geometry=row['geometry'],\n label=label,\n title=title,\n windlabels=windlabels,\n winddirs=winddirs,\n buffer_distance=buffer_distance,\n )\n\n # add boreholes to cross-section and optionally filter points too close to eachother\n cs.add_boreholes(boreholes)\n if result.get('min_borehole_dist') is not None:\n cs.filter_close_boreholes(result.get('min_borehole_dist'))\n\n # add points to cross_section\n cs.add_points(points)\n \n # add points of interest\n cs.add_pois(poi)\n\n # add surfaces to cross-section \n for surface in surfaces:\n cs.add_surface(Surface(\n name=surface['name'],\n surfacefile=surface['file'],\n res=surface['res'],\n stylekey=surface.get('style') or 'default',\n ))\n \n # add reference planes to cross-section and optionally find and pass \n # the Surface instance that the reference plane is tied to.\n for refplane in refplanes: \n tied = refplane.get('tied')\n if tied is not None:\n tied_surface = cs.surfaces[[s.name for s in cs.surfaces].index(tied)] \n else:\n tied_surface=None\n \n cs.add_refplane(RefPlane(\n name=refplane['name'],\n value=refplane['value'],\n tied_surface=tied_surface,\n stylekey=refplane.get('style') or 'default',\n ))\n\n # add solids to cross-section\n for solid in solids:\n cs.add_solid(Solid(\n name=solid['name'],\n topfile=solid['topfile'],\n basefile=solid['basefile'],\n res=solid['res'],\n stylekey=solid('style') or 'default',\n ))\n\n # add regis solids to cross-section\n solidstyles_with_regis = solidstyles.copy(deep=True)\n if regismodel is not None:\n # get coordinates along cross-section line\n _, coords = zip(*cs.discretize(regismodel.res))\n\n # add solids to cross-section\n for number, solid in regismodel.solids:\n if not regismodel.solid_has_values(solid, coords, ylim):\n continue\n cs.add_solid(solid)\n solidstyles_with_regis.add(\n key=solid.name,\n label=solid.name,\n record=regismodel.styles.get(solid.name) or {},\n )\n\n # definest styles lookup\n plotting_styles = {\n 'segments': segmentstyles,\n 'verticals': verticalstyles,\n 'surfaces': surfacestyles,\n 'referenceplanes': referenceplanestyles,\n 'solids': solidstyles_with_regis,\n }\n\n # define plot\n plt = plotting.CrossSectionPlot(\n cross_section=cs,\n config=config['cross_section_plot'],\n styles=plotting_styles,\n xtickstep=xtickstep,\n ylim=ylim,\n xlabel=xlabel,\n ylabel=ylabel,\n dist_txt=dist_txt,\n label_option=label_option,\n metadata=metadata,\n legend_ncol=int(regismodel is not None) + 1,\n )\n\n # plot and save to PNG file\n if title:\n file_label = title\n else:\n file_label = label \n \n imagefilename = config['image_filename_format'].format(label=file_label)\n imagefile = folder / imagefilename\n log.info('saving {f.name:}'.format(f=imagefile))\n plt.to_image(str(imagefile))\n\n # save to CSV file\n csvfilename = config['csv_filename_format'].format(label=file_label)\n csvfile = folder / csvfilename\n log.info('saving {f.name:}'.format(f=csvfile))\n extra_fields = result.get('extra_fields') or {}\n extra_fields = {k: tuple(v) for k, v in extra_fields.items()}\n cross_section_to_csv(cs, str(csvfile),\n extra_fields=extra_fields,\n )\n\n # collect cross-sections\n css.append(cs)\n\n # export endpoints\n endpointsfile = folder / 'endpoints.shp'\n shapefiles.export_endpoints(str(endpointsfile), css,\n **config['shapefile'],\n )\n\n # export projection lines\n projectionlinesfile = folder / 'projectionlines.shp'\n shapefiles.export_projectionlines(str(projectionlinesfile), css,\n **config['shapefile'],\n )\n","sub_path":"xsboringen/scripts/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":11604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"315725036","text":"import clr\nclr.AddReferenceToFile(\"Armando.exe\")\n\nimport sys\nimport Armando\n\nclass Interrupt(Exception):\n def __init__(self):\n self.cause = None\n\nclass Environment(object): \n def __new__(cls):\n object.__new__(cls)\n moveHandler = Armando.CustomMoveHandler(Interrupt())\n return moveHandler.Env\n\nclass Store(object): \n def __new__(cls, env, capacity = sys.maxint):\n object.__new__(cls)\n return env.new_store(capacity)","sub_path":"Listati/Python_SimPy_Layer.py","file_name":"Python_SimPy_Layer.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"518949010","text":"# Project home: https://github.com/mchobby/esp8266-upy/tree/master/ili934x\n# In this sample we will:\n# * See the screen axes (in related image)\n# * Draw a hline (optimized drawing)\n#\nfrom machine import SPI,Pin\nfrom ili934x import *\n\n# PYBStick config (idem with PYBStick-Feather-Face)\nspi = SPI( 1, baudrate=40000000 )\ncs_pin = Pin(\"S15\")\ndc_pin = Pin(\"S13\")\nrst_pin = None\n\n# Raspberry-Pi Pico\n# spi = SPI( 0 )\n# cs_pin = Pin(5) # GP5\n# dc_pin = Pin(3) # GP3\n# rst_pin = None\n\n# r in 0..3 is rotation, r in 4..7 = rotation+miroring\n# Use 3 for landscape mode\nlcd = ILI9341( spi, cs=cs_pin, dc=dc_pin, rst=rst_pin, w=320, h=240, r=0)\nlcd.erase()\n\n# Correct positionning\nlcd.pixel( 80, 130, YELLOW ) # x=80, y=130\nlcd.hline( 81, 130, 20, BLUE )\n\n# Half width of screen\nlcd.hline( 0, 160, 120, GREEN )\n\nlcd.hline( 0, 170, 240, PURPLE )\n","sub_path":"ili934x/examples/test_hlines.py","file_name":"test_hlines.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"322955165","text":"#!/usr/bin/env python\n\n#\n#\n#\n#\n#\n\n#\n# IMPORT SOURCES:\n#\n#\n\n#\n# Get DisProt.\n#\n\n# PRE-CODE\nimport faulthandler\nfaulthandler.enable()\n\n# IMPORTS\n\n# Imports for recognizing modules.\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../../..\"))\n\n# Import modules.\nimport gnomics.objects.compound\n\n# Other imports.\nimport json\nimport pubchempy as pubchem\nimport requests\nimport urllib.error\nimport urllib.parse\nimport urllib.request\n\n# MAIN\ndef main():\n disprot_unit_tests(\"P24592\", \"IBP6_HUMAN\")\n \n# Get DisProt ID.\ndef get_disprot_id(prot):\n disprot_array = []\n \n for ident in prot.identifiers:\n if ident[\"identifier_type\"].lower() in [\"disprot\", \"disprot id\", \"disprot identifier\"]:\n disprot_array.append(ident[\"identifier\"])\n \n if disprot_array:\n return disprot_array\n \n for ident in prot.identifiers:\n if ident[\"identifier_type\"].lower() in [\"uniprotkb id\", \"uniprotkb identifier\", \"uniprot id\", \"uniprot identifier\"]:\n \n url = \"http://www.uniprot.org/uploadlists/\"\n params = {\n \"from\": \"ID\",\n \"to\": \"DISPROT_ID\",\n \"format\": \"tab\",\n \"query\": ident[\"identifier\"],\n }\n \n data = urllib.parse.urlencode(params)\n data = data.encode(\"utf-8\")\n request = urllib.request.Request(url, data)\n contact = \"\"\n request.add_header(\"User-Agent\", \"Python %s\" % contact)\n response = urllib.request.urlopen(request)\n page = response.read(200000).decode(\"utf-8\")\n \n newline_sp = page.split(\"\\n\")\n id_from = newline_sp[0].split(\"\\t\")[0].strip()\n id_to = newline_sp[0].split(\"\\t\")[1].strip()\n orig_id = newline_sp[1].split(\"\\t\")[0].strip()\n new_id = newline_sp[1].split(\"\\t\")[1].strip()\n if new_id not in disprot_array:\n disprot_array.append(new_id)\n \n elif ident[\"identifier_type\"].lower() in [\"acc\", \"uniprot ac\", \"uniprot acc\", \"uniprot accession\", \"uniprotkb ac\", \"uniprotkb acc\", \"uniprotkb accession\"]:\n \n url = \"http://www.uniprot.org/uploadlists/\"\n params = {\n \"from\": \"ACC\",\n \"to\": \"DISPROT_ID\",\n \"format\": \"tab\",\n \"query\": ident[\"identifier\"],\n }\n \n data = urllib.parse.urlencode(params)\n data = data.encode(\"utf-8\")\n request = urllib.request.Request(url, data)\n contact = \"\"\n request.add_header(\"User-Agent\", \"Python %s\" % contact)\n response = urllib.request.urlopen(request)\n page = response.read(200000).decode(\"utf-8\")\n \n newline_sp = page.split(\"\\n\")\n id_from = newline_sp[0].split(\"\\t\")[0].strip()\n id_to = newline_sp[0].split(\"\\t\")[1].strip()\n orig_id = newline_sp[1].split(\"\\t\")[0].strip()\n new_id = newline_sp[1].split(\"\\t\")[1].strip()\n if new_id not in disprot_array:\n disprot_array.append(new_id)\n \n return disprot_array\n \n# UNIT TESTS\ndef disprot_unit_tests(uniprot_kb_ac, uniprot_kb_id):\n uniprot_kb_ac_prot = gnomics.objects.protein.Protein(identifier = uniprot_kb_ac, language = None, identifier_type = \"UniProt accession\", source = \"UniProt\", taxon = \"Homo sapiens\")\n print(\"Getting DisProt ID from UniProtKB accession (%s):\" % uniprot_kb_ac)\n for iden in get_disprot_id(uniprot_kb_ac_prot):\n print(\"- \" + str(iden))\n \n uniprot_kb_id_prot = gnomics.objects.protein.Protein(identifier = uniprot_kb_id, language = None, identifier_type = \"UniProt identifier\", source = \"UniProt\", taxon = \"Homo sapiens\")\n print(\"\\nGetting DisProt ID from UniProtKB identifier (%s):\" % uniprot_kb_id)\n for iden in get_disprot_id(uniprot_kb_id_prot):\n print(\"- \" + str(iden))\n \n# MAIN\nif __name__ == \"__main__\": main()","sub_path":"gnomics/objects/protein_files/disprot.py","file_name":"disprot.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"521073704","text":"import numpy as np\nimport cv2 as cv\nimport glob\nimport glimpse\nimport glimpse.convert as gc\nfrom glimpse.helpers import merge_dicts\nimport matplotlib.pyplot as plt\nimport sys\nimport os\ndef parse_camera_matrix(x):\n \"\"\"\n Return fx, fy, cx, and cy from camera matrix.\n Arguments:\n x (array-like): Camera matrix [[fx 0 cx], [0 fy cy], [0 0 1]]\n \n Returns:\n dict: fx, fy, cx, and cy\n \"\"\"\n x = np.asarray(x)\n return {'fx': x[0, 0], 'fy': x[1, 1], 'cx': x[0, 2], 'cy': x[1, 2]}\n\n\ndef parse_distortion_coefficients(x):\n\n x = np.asarray(x)\n labels = ('k1', 'k2', 'p1', 'p2', 'k3', 'k4', 'k5', 'k6')\n return {key: x[i] if i < len(x) else 0 for i, key in enumerate(labels)}\n\n# termination criteria\ncriteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)\nchessboard_flags = cv.CALIB_CB_ADAPTIVE_THRESH + cv.CALIB_CB_FAST_CHECK + cv.CALIB_CB_NORMALIZE_IMAGE\n# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\ndimx = 6\ndimy = 9\nedgesizeMeters = 0.0204216 #meters\nobjp = np.zeros((dimx*dimy,3), np.float32)\nobjp[:,:2] = np.mgrid[0:dimx,0:dimy].T.reshape(-1,2)*edgesizeMeters\n# Arrays to store object points and image points from all the images.\nobjpoints = [] # 3d point in real world space\nimgpoints = [] # 2d points in image plane.\ndir_ = sys.argv[1]\nimages = glob.glob(os.path.join(dir_,'*.JPG'))\nprint(\"Found \",len(images),\" Images\")\nprint(\"Dims: {} {}\".format(dimx,dimy))\ncount = 0\nfor fname in images:\n print(fname,\"\\n\")\n img = cv.imread(fname)\n gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\n # Find the chess board corners\n ret, corners = cv.findChessboardCorners(gray, (dimx,dimy),flags=chessboard_flags)\n # If found, add object points, image points (after refining them)\n print(\"Corner status: \",ret)\n keep = False\n if ret == True:\n count += 1\n cv.drawChessboardCorners(gray, (dimx, dimy), corners, ret)\n result_name = 'board'+\"_\"+fname\n cv.imwrite(result_name, gray)\n objpoints.append(objp)\n corners = np.squeeze(corners)\n cv.cornerSubPix(gray,corners, (3,3), (-1,-1), criteria)\n imgpoints.append(corners)\nprint( \"{}/{} Good Images Used\".format(count,len(images)) )\n \nret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)\nprint(\"Camera Matrix: \",mtx)\nprint(\"Distortion Coefficients: \",dist)\ncam = glimpse.Image(images[0],exif=glimpse.Exif(images[0]),cam=dict(sensorsz=(35.9,24))).cam\ncam_matdict = parse_camera_matrix(mtx)\ncam_distcoefs = parse_distortion_coefficients(np.squeeze(dist))\ncammodel = merge_dicts(cam_matdict,cam_distcoefs)\ncammodel = merge_dicts(cam.as_dict(),cammodel)\ncammodel[\"f\"][0] = cammodel[\"fx\"]\ncammodel[\"f\"][1] = cammodel[\"fy\"]\ncammodel.pop(\"fx\")\ncammodel.pop(\"fy\")\ncammodel[\"c\"][0] = cammodel[\"cx\"] \ncammodel[\"c\"][1] = cammodel[\"cy\"] \ncammodel[\"p\"][0] = cammodel[\"p1\"]\ncammodel[\"p\"][1] = cammodel[\"p2\"]\ncammodel[\"k\"][0] = cammodel[\"k1\"]\ncammodel[\"k\"][1] = cammodel[\"k2\"] \ncammodel[\"k\"][2] = cammodel[\"k3\"]\ncammodel.pop(\"cy\")\ncammodel.pop(\"cx\")\ncammodel.pop(\"p1\")\ncammodel.pop(\"p2\")\ncammodel.pop(\"k1\")\ncammodel.pop(\"k2\")\ncammodel.pop(\"k3\")\ncammodel.pop(\"k4\")\ncammodel.pop(\"k5\")\ncammodel.pop(\"k6\")\ncamera = glimpse.Camera(**cammodel)\ncamera.write(\"intrinsicmodel.json\")\n\n","sub_path":"scripts/optimization/calibrate_cam.py","file_name":"calibrate_cam.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"246902290","text":"import os\n\nROOT_PATH = os.getcwd()\n\nDEFAULT_TEXT_STYLE = 'gru@bow_filterstop@word2vec_filterstop'\nDEFAULT_IMG_FEATURE = 'pyresnet152-pool5os'\n\nDEFAULT_BOW_VOCAB = 'word_vocab_5.txt'\nDEFAULT_RNN_VOCAB = 'word_vocab_5.txt'\n\nDEFAULT_CORPUS = 'flickr'\nDEFAULT_WORD2VEC = 'vec500flickr30m'\n","sub_path":"basic/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"111359442","text":"#!/usr/bin/env python\n# encoding: utf-8\n# qiushengming-minnie\nimport random\nimport datetime as dtime\n\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport pandas as pd\n\nfrom minnie.common import moracle\nfrom datetime import datetime\nfrom matplotlib.ticker import MultipleLocator\n\nplt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签\nplt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号\ncursor = moracle.OralceCursor()\n\n\ndef getFundName(code):\n \"\"\"\n 通过基金代码返回基金名称\n :param code:\n :return:\n \"\"\"\n sql = 'SELECT FUND_NAME FROM FUND_OPENFUNDNETVALUE WHERE FUND_CODE = :fundCode'\n return cursor.fechall(sql, {'fundCode': code})[0][0]\n\n\ndef getData(code, _minDate=None, maxDate=None):\n \"\"\"\n 获取基金时间段内的每日净值数据\n :param code: 基金编码\n :param _minDate: is Note为max时间的前12周的日期\n :param maxDate: is Note为当天日期\n :return:\n \"\"\"\n if maxDate is None:\n maxDate = datetime.now().strftime('%Y-%m-%d')\n\n if _minDate is None:\n delta = dtime.timedelta(weeks=12)\n _minDate = (datetime.strptime(maxDate, '%Y-%m-%d').date() - delta).strftime('%Y-%m-%d')\n\n data = cursor.fechall(\n \"SELECT NAV,NC,FUND_DATE,FUND_CODE,NVL(GROWTHRATE, '0') GROWTHRATE FROM FUND_NET_VALUE_HISTORY WHERE FUND_CODE = :fundCode AND TO_DATE(FUND_DATE, 'yyyy-MM-dd') > TO_DATE(:minDate, 'yyyy-MM-dd') AND TO_DATE(FUND_DATE, 'yyyy-MM-dd') < TO_DATE(:maxDate, 'yyyy-MM-dd') ORDER BY FUND_DATE\",\n {'fundCode': code, 'minDate': _minDate, 'maxDate': maxDate}\n )\n\n if not len(data):\n return [], [], ''\n\n # 净值\n nav = []\n # 累计净值\n nc = []\n # 日期\n date = []\n # 涨幅\n ups_downs = []\n for d in data:\n nav.append(float(d[0]))\n # nc.append(float(d[1]))\n date.append(datetime.strptime(d[2], '%Y-%m-%d').date())\n ups_downs.append(float(str(d[4]).replace('%', '')))\n\n return nav, date, getFundName(code), ups_downs\n\n\ndef showDateView(_codes, _minDate=None, maxDate=None):\n \"\"\"\n 数据可视化展示\n :param maxDate:\n :param _minDate:\n :param _codes: 基金编码列表\n :return:\n \"\"\"\n if type(_codes) is not list:\n raise TypeError('类型错误')\n\n plt.figure()\n\n # x轴的取值范围\n # plt.xlim((-2, 4))\n # y轴的取值范围\n plt.ylim((-2, 4))\n\n # x轴的描述\n plt.xlabel(u'交易日期')\n # y轴的描述\n plt.ylabel(u'净值(RMB)')\n\n # plt.xticks(pd.date_range(data.index[0], data.index[-1], freq='1min')) # 时间间隔\n # plt.xticks(rotation=90)\n\n ax = plt.gca()\n # 设置x轴显示格式及其间距\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%m%d')) # %Y%m\n ax.xaxis.set_major_locator(mdates.DayLocator())\n # 设置y轴显示格式及其间距\n ax.yaxis.set_major_locator(MultipleLocator(0.5))\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.spines['bottom'].set_position(('data', 0))\n # ax.spines['left'].set_position(('data', 0))\n \"\"\"\n plot方法接收需要绘制图形的数据,根据接收数据进行绘制图像。\n label-标签,图例标识\n color-绘制线条的颜色,颜色可以用CSS的颜色代码\n linewidth-线条的粗细\n\n \"\"\"\n for code in _codes:\n nav, date, label, ups_downs = getData(code, _minDate, maxDate)\n if nav.__len__() <= 0:\n continue\n # plt.plot(date, nav, label=label)\n plt.plot(date, ups_downs, label=label + '涨幅')\n # plt.plot(date, nc, label=u'累计净值')\n\n # plt.gcf().autofmt_xdate() # 自动旋转日期标记\n # 设置标题\n plt.title(minDate + '~' + maxDate + u'基金趋势图')\n \"\"\"\n 展示图例简介 upper left\n handles,label联合使用,修改标签的名称\n \"\"\"\n plt.legend(loc='best')\n plt.show()\n\n\nif __name__ == '__main__':\n # ,'161725', '070032', '110022', '000457'\n codes = ['340008', '161725', '070032', '110022', '000457']\n minDate = '2017-12-01'\n maxDate = '2018-01-01'\n showDateView(codes, minDate, maxDate)\n\n\"\"\"\n 1.怎么通过数据分析出优质基金;\n 2.通过昨日数据,预测明日的涨跌情况;\n 3.那些基金的涨幅受到大盘的涨幅的影响程度;\n\"\"\"\n","sub_path":"python/no_work/utils/mmatplotlib.py","file_name":"mmatplotlib.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"551138744","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [ \n path(\"schedule\",views.schedule_view, name=\"schedule\"),\n path(\"matchdatasubmission\",views.matchdatasubmission,name=\"matchdatasubmission\"),\n path(\"matchinput\",views.matchinput_view,name=\"matchinput\"),\n path(\"tools/replayanalyzersubmission\",views.replayanalyzer_view,name=\"replayanalyzersubmission\"),\n path(\"replayanalyzer\",views.replayanalyzer,name=\"replayanalyzer\"),\n]","sub_path":"schedule/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"378581025","text":"# This program will find the 13 adjacent digits in series.txt which result in\n# the largest possible product\n# Reference: https://projecteuler.net/problem=8\n\nfrom prettytable import PrettyTable\n\ndef main():\n # Open series.txt and read the digits into an array\n digits = []\n with open(\"series.txt\", \"r\") as f:\n for line in f:\n line = line.strip()\n for c in line:\n digits.append(int(c))\n\n # Find all blocks without a zero\n allBlocks = []\n ind = 0\n while (ind < len(digits)):\n block = [digits[ind:ind+13], ind]\n if (not 0 in block[0]):\n allBlocks.append(block)\n ind += 1\n\n # Format a table to display the results\n table = PrettyTable([\"SERIES\", \"POSITION\", \"PRODUCT\", \"COMMENTS\"])\n table.align[\"PRODUCT\"] = \"l\"\n\n # Find the maximum product\n prodMax = 0\n prodInd = 0\n ind = 0\n for block in allBlocks:\n total = 1\n xout = \"\"\n comments = \"\"\n\n for d in block[0]:\n total *= d\n xout += str(d)\n\n if (total > prodMax):\n prodInd = ind\n prodMax = total\n comments = \"* New Max *\"\n\n out = [str(xout), str(block[1]), str(total), comments]\n table.add_row(out)\n ind += 1\n\n print(table)\n print (\"Series: \" + str(allBlocks[prodInd][0]) + \" | Index: \" + str(allBlocks[prodInd][1]) + \" | Product: \" + str(prodMax))\n\nmain()\n","sub_path":"largest-product-in-series/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"63914655","text":"from django.urls import path\nfrom factories import views\n\n\napp_name = 'factories'\n\n\nurlpatterns = [\n path('add/', views.add_factory, name='add_factory'),\n path('update//', views.update_factory, name='update_factory'),\n path('delete//', views.delete_factory, name='delete_factory'),\n]","sub_path":"factories/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"50663481","text":"import sys, getopt\nimport numpy as np \nimport matplotlib.pyplot as plt\n\ndef dv_distr_plotter(argv):\n# dv_distr_plotter(a=0, b=2, N_vals=30, title='', distr_type='gauss', \n# pfname='distribution_plot.png'):\n#\n\n try:\n opts, args = getopt.getopt(argv[1:], \"a:b:N:title:d:p\",\n [\"a=\", \"b=\", \"N=\", \"title=\", \"d=\"\n , \"p=\"])\n print(opts)\n print(args)\n except:\n print('Input variable syntax seems wrong.')\n sys.exit(2)\n \n print('Input variables are: ')\n for opt, arg in opts:\n print(opt)\n if opt in (\"-a\", \"--a\"):\n arg_a = arg\n print(arg_a)\n elif opt in (\"-b\", \"--b\"):\n arg_b = arg\n print(arg_b)\n elif opt in (\"-N\", \"--N\"):\n arg_N_vals = arg\n print(arg_N_vals)\n elif opt in (\"-title\", \"--title\"):\n arg_title = arg\n print(arg_title)\n elif opt in (\"-d\", \"--d\"):\n arg_distr_type = arg\n print(arg_distr_type)\n elif opt in (\"-p\", \"--p\"):\n arg_pfname = arg\n print(arg_pfname)\n\n # default values\n a = 0\n b = 2\n N_vals = 500\n title = 'dummy distribution'\n distr_type = 'gauss'\n pfname = 'dummy_distr_plot'\n\n if 'arg_a' not in locals(): \n arg_a = a # or some other default value.\n else:\n arg_a = float(arg_a)\n \n if 'arg_b' not in locals(): \n arg_b = b # or some other default value.\n else:\n arg_b = float(arg_b)\n \n if 'arg_N_vals' not in locals(): \n arg_N_vals = N_vals # or some other default value.\n else:\n arg_N_vals = int(arg_N_vals)\n\n if 'arg_title' not in locals(): \n arg_title = title # or some other default value.\n\n if 'arg_distr_type' not in locals(): \n arg_distr_type = distr_type # or some other default value.\n\n if 'arg_pfname' not in locals(): \n arg_pfname = pfname # or some other default value.\n \n\n print('Distribution plotting funtion')\n if arg_distr_type == 'gauss':\n print('Default or not. You chose the gauss distribution')\n print('Therefore -> ' + 'a=mu : ' + str(arg_a) + \n ' & b=sigma : ' + str(arg_b))\n distr_data = np.random.normal(arg_a, arg_b, arg_N_vals)\n elif arg_distr_type == 'uniform':\n print('You chose the uniform distribution')\n print('Therefore -> ' + 'a=mu : ' + str(arg_a) +\n ' & b=sigma : ' + str(arg_b))\n distr_data = np.random.uniform(arg_a, arg_b, arg_N_vals)\n\n elif arg_distr_type == 'lognormal':\n print('Default or not. You chose the lognormal distribution')\n print('Therefore -> ' + 'a=alpha : ' + str(arg_a) +\n ' & b=beta : ' + str(arg_b))\n distr_data = np.random.lognormal(arg_a, arg_b, arg_N_vals)\n\n\n plt.cla()\n plt.hist(distr_data, bins=150)\n plt.ylabel('Probability')\n plt.title(arg_title)\n plot_fname = arg_pfname + '.png'\n print(plot_fname)\n plt.savefig(plot_fname, dpi=150)\n\nif __name__ == '__main__':\n # Map command line arguments to function arguments.\n print('Function input: -a: int(), -b: int(), -N=int(), --title=str(), d=str(''gauss''), p=filepath / str()')\n dv_distr_plotter(sys.argv)\n","sub_path":"dv_distr_plotter.py","file_name":"dv_distr_plotter.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"116768252","text":"# coding: utf-8\nfrom __future__ import unicode_literals\nimport os\nimport time\nfrom termcolor import colored\n\n\nclass BaseScene(object):\n # prints per seconds\n pps = 1200\n rows = 23\n cols = 80\n blank_char = ' '\n char = ' '\n color = None\n bg_color = None\n playing = False\n objects = []\n actions = []\n\n def __init__(self, **kwargs):\n self.objects = []\n self.actions = []\n\n for key, value in kwargs.items():\n if key not in ('objects', 'actions') and hasattr(self, key):\n setattr(self, key, value)\n else:\n raise Exception('Key \"{}\" not found'.format(key))\n\n def add_object(self, obj):\n obj.scene = self\n self.objects.append(obj)\n\n def remove_object(self, obj):\n self.objects.remove(obj)\n\n def add_action(self, action_class, *args, **kwargs):\n auto_start = True\n if kwargs.get('_auto_start') is not None:\n auto_start = kwargs.pop('_auto_start')\n\n kwargs['scene'] = self\n action = action_class(*args, **kwargs)\n self.actions.append(action)\n\n if auto_start:\n action.start()\n\n def remove_action(self, action):\n self.actions.remove(action)\n\n def clean(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n\n def get_terminal_size(self):\n # TODO: compatibility\n row, col = os.popen('stty size', 'r').read().split()\n row = int(row)\n col = int(col)\n return row, col\n\n def render(self):\n screen_len = self.rows * self.cols\n if self.color or self.bg_color:\n screen = [colored(self.char, self.color, self.bg_color)] * screen_len\n else:\n screen = [self.char] * screen_len\n\n total_rows, total_cols = self.get_terminal_size()\n\n for obj in self.objects:\n lines = str(obj).split('\\n')\n for i, text in enumerate(lines):\n y = obj.y + i\n for j, char in enumerate(text):\n x = obj.x + j\n\n if obj.color:\n char = colored(char, obj.color, obj.bg_color)\n\n position = self.cols * y + x\n if position >= 0 and position < screen_len:\n screen[position] = char\n\n to_print = ''\n for i, char in enumerate(screen, start=1):\n to_print += char\n if i % self.cols == 0 and total_cols > self.cols:\n to_print += self.blank_char * (total_cols - self.cols)\n self.clean()\n print(to_print)\n\n def start(self):\n first = True\n self.playing = True\n while self.playing:\n if not first:\n time.sleep(0.05)\n else:\n first = False\n if self.playing:\n self.render()\n\n def stop(self):\n self.playing = False\n\n for action in self.actions:\n action.stop()\n\n #self.clean()\n","sub_path":"pygamii/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"182730080","text":"__author__ = 'jameskreft'\r\n\r\nimport requests\r\nimport feedparser\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nfrom operator import itemgetter\r\nfrom pubs_ui import app\r\nimport json\r\nfrom urlparse import urljoin\r\nfrom copy import deepcopy\r\n\r\n\r\n#should requests verify the certificates for ssl connections\r\nverify_cert = app.config['VERIFY_CERT']\r\n\r\ndef pubdetails(pubdata):\r\n \"\"\"\r\n build the ordered list to make the 'Publications details' box\r\n\r\n :param pubdata: the data pulled from the pubs warehouse web service\r\n :return: pubdata with an additional \"details\" element\r\n \"\"\"\r\n\r\n pubdata['details'] = []\r\n #details list element has len of 2 or 3. If 2, the item is coming back as a simple Key:value object, but if three\r\n # there are either lists or dicts. the first item in the list is the param in pubdata, the 2nd or 3rd is the display\r\n # descriptor and the second if it exists is the secondary key needed to get the text.\r\n detailslist = [\r\n ['publicationType', 'text', 'Publication type:'],\r\n ['publicationSubtype', 'text', 'Publication Subtype:'],\r\n ['seriesName', 'Series name:'],\r\n ['seriesNumber', 'Series number:'],\r\n ['subseriesTitle', 'Subseries'],\r\n ['chapter', 'Chapter:'],\r\n ['subchapterNumber', 'Sub-chapter:'],\r\n ['issn', 'online', 'ISSN (online):'],\r\n ['issn', 'print', 'ISSN (print):'],\r\n ['isbn', 'ISBN:'],\r\n ['doi', 'DOI:'],\r\n ['edition', 'Edition:'],\r\n ['volume', 'Volume'],\r\n ['issue', 'Issue:'],\r\n ['publicationYear', 'Year Published:'],\r\n ['language', 'Language:'],\r\n ['publisher', 'Publisher:'],\r\n ['publisherLocation', 'Publisher location:'],\r\n ['costCenters', 'text', 'Contributing office(s):'],\r\n ['productDescription', 'Description:'],\r\n ['numberofPages', 'Number of pages:'],\r\n ['largerWorkType', 'text', 'Publication type:'],\r\n ['largerWorkSubtype', 'text', 'Publication Subtype:'],\r\n ['largerWorkTitle', 'text', 'Larger Work Title:'],\r\n ['startPage', 'Start page:'],\r\n ['endPage', 'End page:'],\r\n ['temporalStart', 'Time Range Start:'],\r\n ['temporalEnd', 'Time Range End:'],\r\n ['conferenceTitle', 'Conference Title:'],\r\n ['conferenceLocation', 'Conference Location:'],\r\n ['conferenceDate', 'Conference Date:'],\r\n ['country', 'Country:'],\r\n ['state', 'State:'],\r\n ['county', 'County:'],\r\n ['city', 'City:'],\r\n ['otherGeospatial', 'Other Geospatial:'],\r\n ['datum', 'Datum:'],\r\n ['projection', 'Projection:'],\r\n ['scale', 'Scale:'],\r\n ['onlineOnly', 'Online Only (Y/N):'],\r\n ['additionalOnlineFiles', 'Additional Online Files(Y/N):']\r\n\r\n ]\r\n for detail in detailslist:\r\n\r\n if len(detail) == 3:\r\n #if the detail exists and is a dict with a couple key:value pairs, get the right value\r\n if pubdata.get(detail[0]) is not None and isinstance(pubdata.get(detail[0]), dict):\r\n pubdata['details'].append({detail[2]: pubdata[detail[0]].get(detail[1])})\r\n #if the thing is a list of dicts and if there is something in the list, concatenate the values into a string\r\n elif pubdata.get(detail[0]) is not None and isinstance(pubdata.get(detail[0]), list) \\\r\n and len(pubdata.get(detail[0])) > 0:\r\n dd = []\r\n for det in pubdata.get(detail[0]):\r\n dd.append(det.get(detail[1]))\r\n dd = ', '.join(dd)\r\n pubdata['details'].append({detail[2]: dd})\r\n elif len(detail) == 2 and pubdata.get(detail[0]) is not None and len(pubdata.get(detail[0])) > 0:\r\n pubdata['details'].append({detail[1]: pubdata.get(detail[0])})\r\n return pubdata\r\n\r\n\r\ndef create_display_links(pubdata):\r\n \"\"\"\r\n restructures links from the API so that they are easy to display in a jinja template\r\n :param pubdata:\r\n :return: pubdata with new displayLinks array\r\n \"\"\"\r\n display_links = {\r\n 'Abstract': [],\r\n 'Additional Report Piece': [],\r\n 'Appendix': [],\r\n 'Application Site': [],\r\n 'Authors Website': [],\r\n 'Chapter': [],\r\n 'Companion Files': [],\r\n 'Cover': [],\r\n 'Database': [],\r\n 'Digital Object Identifier': [],\r\n 'Document': [],\r\n 'Errata': [],\r\n 'Illustration': [],\r\n 'Image': [],\r\n 'Index Page': [],\r\n 'Metadata': [],\r\n 'Plate': [],\r\n 'Project Site': [],\r\n 'Raw Data': [],\r\n 'Read Me': [],\r\n 'Referenced Work': [],\r\n 'Related Work': [],\r\n 'Spatial Data': [],\r\n 'Thumbnail': [],\r\n 'Version History': []\r\n }\r\n links = pubdata.get(\"links\")\r\n for linktype in display_links:\r\n rankcounter = 1\r\n for link in links:\r\n if link['type']['text'] == linktype:\r\n if link.get('rank') is None:\r\n link['rank'] = rankcounter\r\n rankcounter += 1\r\n display_links[linktype].append(link)\r\n display_links = manipulate_plate_links(display_links)\r\n pubdata[\"displayLinks\"] = display_links\r\n return pubdata\r\n\r\n\r\ndef manipulate_plate_links(display_links):\r\n \"\"\"\r\n This function rejiggers plate link displays for plate links that are named regularly but do not have display text or\r\n a proper order\r\n :param display_links:\r\n :return: display links with rejiggered plate link order\r\n \"\"\"\r\n #only do something if there are links in the plate links section\r\n if len(display_links.get(\"Plate\")) > 0:\r\n for link in display_links[\"Plate\"]:\r\n url = link[\"url\"]\r\n file_name = url.split(\"/\")[-1].split(\".\")\r\n text = file_name[0]\r\n if link.get(\"text\") is None:\r\n if len(file_name[0].title().split('-')) > 1:\r\n try:\r\n text = file_name[0].title().split('-')\r\n text[1] = int(text[1])\r\n except (ValueError, IndexError):\r\n text = file_name[0].title().split('-')\r\n if len(file_name[0].split(\"_\")) > 1:\r\n try:\r\n text = file_name[0].split(\"_\")[-1]\r\n text = re.split('(\\d+)', text)[0:2]\r\n text[1] = int(text[1])\r\n except (ValueError, IndexError):\r\n try:\r\n text = file_name[0].split(\"_\")[0]\r\n text = re.split('(\\d+)', text)[0:2]\r\n text[1] = int(text[1])\r\n except (ValueError, IndexError):\r\n text = file_name[0].split(\"_\")\r\n\r\n link[\"text\"] = text\r\n if link.get('linkFileType') is None:\r\n link['linkFileType'] = {'text': file_name[1]}\r\n display_links[\"Plate\"] = sorted(display_links[\"Plate\"], key=itemgetter('text'))\r\n rankcounter = 1\r\n for link in display_links[\"Plate\"]:\r\n link['rank'] = rankcounter\r\n rankcounter += 1\r\n link['text'][1] = str(link['text'][1])\r\n link['text'] = \" \".join(link['text']).title()\r\n return display_links\r\n\r\n\r\ndef pull_feed(feed_url):\r\n \"\"\"\r\n pull page data from a my.usgs.gov confluence wiki feed\r\n :param feed_url: the url of the feed, created in confluence feed builder\r\n :return: the html of the page itself, stripped of header and footer\r\n \"\"\"\r\n feed = feedparser.parse(feed_url)\r\n\r\n # Process html to remove unwanted mark-up and fix links\r\n post = ''\r\n if len(feed['entries']) > 0:\r\n soup = BeautifulSoup(feed['entries'][0].summary)\r\n\r\n # Remove edited by paragraph\r\n soup.p.extract()\r\n\r\n # Remove final div in the feed\r\n feed_div = soup.find('div', class_='feed')\r\n children_divs = feed_div.findAll('div')\r\n children_divs[len(children_divs) - 1].extract()\r\n\r\n # Translate any in page links to use relative URL\r\n base = feed['entries'][0].summary_detail.base\r\n links = feed_div.select('a[href^=\"' + base + '\"]')\r\n for link in links:\r\n link['href'] = link['href'].replace(base, '')\r\n post = unicode(soup)\r\n\r\n return post\r\n\r\n\r\ndef supersedes(supersedes_url, index_id):\r\n \"\"\"\r\n pull supersede info for a pub from legacy \"extras\" endpoint\r\n :param supersedes_url:url for extras endpoint\r\n :param index_id: index_id of pub\r\n :return: dict of relevant supersede info\r\n \"\"\"\r\n\r\n supersede_array = requests.get(supersedes_url,\r\n params={'prod_id': index_id}, verify=verify_cert).json()['modsCollection']['mods'][0]['relatedItem'][0]\r\n #TODO: deal with pubs with more than one relationship\r\n return {'type': supersede_array['@type'], 'index_id': supersede_array['identifier']['#text'],\r\n 'title': supersede_array['titleInfo']['title']}\r\n\r\n\r\ndef getbrowsecontent(browseurl, browsereplace):\r\n \"\"\"\r\n Gets the content of the legacy browse interface so that it can be used without extension.\r\n :param browseurl: url of legacy browse interface\r\n :return: html content of links, breadcrumb, and title\r\n \"\"\"\r\n app.logger.info('The get_browse_content function is being called')\r\n content = requests.get(browseurl, verify=verify_cert)\r\n app.logger.info(str(content.status_code)+ \" \" +str(content.url))\r\n soup = BeautifulSoup(content.text)\r\n for a in soup.findAll('a'):\r\n a['href'] = a['href'].replace(\"browse\", browsereplace)\r\n browse_content = {'links':soup.find('div', {\"id\": \"pubs-browse-links\"}).contents}\r\n browse_content['breadcrumbs'] = soup.find('div', {\"id\": \"pubs-browse-breadcrumbs\"}).contents\r\n browse_content['header'] = soup.find('div', {\"id\": \"pubs-browse-header\"}).contents\r\n\r\n return browse_content\r\n\r\n\r\nclass SearchPublications(object):\r\n \r\n \"\"\"\r\n Methods for executing various types\r\n of searches against the backend\r\n Pubs API.\r\n \r\n :param str search_url: URL without any search parameters appended\r\n \"\"\"\r\n \r\n def __init__(self, search_url):\r\n self.search_url = search_url\r\n \r\n def get_pubs_search_results(self, params=None):\r\n \"\"\"\r\n Searches Pubs API for a specified query parameter\r\n \r\n :param str search_url: URL without any search parameters appended\r\n :param dict params: dictionary of form {'key1': 'value1', 'key2': 'value2}\r\n :return: query results (or None) and response status code.\r\n :rtype: tuple\r\n \"\"\"\r\n search_result_obj = requests.get(url=self.search_url, params=params, verify=verify_cert)\r\n try:\r\n search_result_json = search_result_obj.json()\r\n for record in search_result_json['records']:\r\n if record.get(\"authors\") is not None:\r\n contributor_lists(record)\r\n error_response = None\r\n except ValueError:\r\n search_result_json = None\r\n error_response = search_result_obj.text\r\n except TypeError:\r\n search_result_json = None\r\n error_response = search_result_obj.text\r\n resp_status_code = search_result_obj.status_code\r\n return search_result_json, resp_status_code\r\n\r\n\r\ndef contributor_lists(record):\r\n contributor_types = ['authors', 'editors']\r\n for contributor_type in contributor_types:\r\n if record.get(contributor_type) is not None:\r\n record[contributor_type+\"List\"] = make_contributor_list(record[contributor_type])\r\n record[contributor_type+\"ListTyped\"] = concatenate_contributor_names(record[contributor_type])\r\n return record\r\n\r\n\r\ndef make_contributor_list(contributors):\r\n \"\"\"\r\n Makes a list of names for contributors regardless of type that is easy to join in jinja. Useful when you need\r\n a list of names and don't have to do all of the semantic jiggery-poky that one needs for names otherwise.\r\n\r\n :param list contributors: a list of dicts of a contributor type (authors, editors, etc)\r\n :return list of concatenated author names in given family suffix or corporate name\r\n :rtype: list\r\n \"\"\"\r\n #turn the list of dicts into smaller, sorted list of dicts\r\n typed_contributor_list = concatenate_contributor_names(contributors)\r\n #only grab the string portion of the tuple, put it into its own list.\r\n contributor_list = []\r\n for contributor in typed_contributor_list:\r\n contributor_list.append(contributor[\"text\"])\r\n return contributor_list\r\n\r\n\r\ndef concatenate_contributor_names(contributors):\r\n \"\"\"\r\n Turns a dict with a lot of split-out contributor information into a simpler format of (\"kind\", \"name\")\r\n\r\n :param list contributors: a list of dicts of a contributor type (authors, editors, etc)\r\n :return:\r\n \"\"\"\r\n #Sort the contributors by the rank that comes out of the web service- ranks is something that will always be there\r\n # (it is fundamental to the pubs data model, so we don't have to deal with it not being there\r\n sorted_contributors = sorted(contributors, key=itemgetter('rank'))\r\n #empty list to build the names\r\n contributor_list = []\r\n for contributor in sorted_contributors:\r\n #test for the boolean \"corporation\" flag for each contributor\r\n if contributor['corporation'] is False:\r\n #list to set up join\r\n contributor_name_list = []\r\n #add parts of name to the list if they exist and aren't empty strings\r\n if contributor.get(\"given\") is not None and len(contributor.get(\"given\")) > 0:\r\n contributor_name_list.append(contributor['given'])\r\n if contributor.get(\"family\") is not None and len(contributor.get(\"family\")) > 0:\r\n contributor_name_list.append(contributor['family'])\r\n if contributor.get(\"suffix\") is not None and len(contributor.get(\"suffix\")) > 0:\r\n contributor_name_list.append(contributor['suffix'])\r\n contributor_dict = {\"type\": 'person', \"text\": \" \".join(contributor_name_list)}\r\n #corporate authors- the other side of the boolean\r\n elif contributor['corporation'] is True:\r\n contributor_dict = {\"type\": 'corporation', \"text\": contributor.get('organization')}\r\n contributor_list.append(contributor_dict)\r\n return contributor_list\r\n\r\ndef jsonify_geojson(record):\r\n \"\"\"\r\n turns the stringified geojson into actual json\r\n :param record:\r\n :return record with geojson in geographicExtents:\r\n \"\"\"\r\n geojson = record.get('geographicExtents')\r\n if geojson is not None:\r\n try:\r\n geojson = json.loads(geojson)\r\n record['geographicExtents'] = geojson\r\n except Exception as e:\r\n app.logger.info(\"Prod ID \"+str(record['id'])+\" geographicExtents json parse error: \"+str(e))\r\n del record['geographicExtents']\r\n return record\r\n\r\ndef preceding_and_superseding(context_id, supersedes_service_url):\r\n \"\"\"\r\n Obtains supersede info for the context publication from an external (legacy) \r\n service, and converts that info into an unambiguous form. Note that, \r\n although the service endpoint is parameterized, that's only a convenience\r\n for exercising and testing this operation. This function contains \r\n hard-wired assumptions about \r\n - how the context_id is included in the service call;\r\n - the structure and semantics of the legacy service's return value.\r\n\r\n This function will therefore need to be changed if the supersedes service \r\n definition changes.\r\n\r\n :param context_id: prod_id of context publication\r\n :param supersedes_service_url: url for supersede information service\r\n :return: dict containing three items:\r\n 'predecessors':related items that the context list-valued ub supersedes\r\n 'context_id': the index (prod) ID of the context pub. Included as \r\n confirmation only; identical to the 'context_id' param.\r\n 'successors': related items that supersede the context pub\r\n \"\"\"\r\n response = requests.get(supersedes_service_url,params={'prod_id': context_id})\r\n related = response.json()['modsCollection']['mods'][0]['relatedItem']\r\n\r\n # REMARKS ABOUT SERVICE RETURNED VALUE ASSUMPTIONS\r\n #\r\n # The service returns JSON, which is converted into Python structures.\r\n #\r\n # Note that, despite the structure of the response, the \"mods\" array will\r\n # have at most only one contained element.\r\n #\r\n # Concerning the sense of the terminology, the occurrence of \r\n # '\"@type\": \"succeeding\"' or '\"@type\": \"preceding\"' refers to the \r\n # relationship of the linked pub TO the context pub. \r\n #\r\n # To put it another way, the \"@type\" relationship descriptor assumes \r\n # that the linked pub is the SUBJECT, and the context pub is the OBJECT. \r\n # This can be subtly confusing for those of us who have absorbed the RDF \r\n # conventions about framing the predicate from the viewpoint of the subject.\r\n # \r\n # Just think of the @type as saying \"This linked pub is ___ the context pub.\"\r\n\r\n predecessors = []\r\n successors = []\r\n\r\n for item in related:\r\n item_summary_info = {'index_id': item['identifier']['#text'],\r\n 'title': item['titleInfo']['title'], 'date': item['originInfo']['dateIssued']}\r\n\r\n if item['@type'] == 'preceding':\r\n predecessors.append(item_summary_info)\r\n elif item['@type'] == 'succeeding':\r\n successors.append(item_summary_info)\r\n\r\n return {'predecessors': predecessors, 'context_item': context_id, 'successors': successors}\r\n\r\n\r\ndef make_relationship_graph(context_pub_dict, related_pub_dict, related_pub_relation):\r\n \"\"\"\r\n Creates an \"@graph\" item for inclusion in the \"relationship\" element. This\r\n function exists to isolate the creation of the @graph element from external\r\n code. It will need to be modified if the desired return format changes, or \r\n if the assumed format of the parameters changes.\r\n \r\n The graph makes safe copies of its dict params.\r\n\r\n :param context_pub_dict: the graph's basic representation of the context publication\r\n :param related_pub_dict: the graph's basic representation of the related publication\r\n :param related_pub_relation: description of the related publication's relation to the\r\n context publication.\r\n :returns: a dictionary with one item: key=\"@graph\", value=a list containing \r\n safe copies of the context publication and related publication, both in \r\n @graph member form.\r\n \"\"\"\r\n # necessary to make a deep, rather than shallow, copy - we do\r\n # not want to make any changes to the parameter.\r\n return_context_pub_dict = deepcopy(context_pub_dict)\r\n return_related_pub_dict = deepcopy(related_pub_dict)\r\n\r\n related_pub_url = related_pub_dict['@id']\r\n\r\n # relationship type is stashed in context_pub_dict: the \"subject\", if we can\r\n # safely call it that. However, it points to the related item. (NOTE:\r\n # this should be revisited. It's a confusing way to represent\r\n # a predicate.)\r\n if related_pub_relation == 'successor':\r\n # context pub is replaced by related pub, so we describe the context pub as\r\n return_context_pub_dict['rdaw:replacedByWork'] = related_pub_url\r\n\r\n elif related_pub_relation == 'predecessor':\r\n # context pub replaces related pub, so we describe the context pub as\r\n return_context_pub_dict['rdaw:replacementOfWork'] = related_pub_url\r\n\r\n return {'@graph': [return_context_pub_dict, return_related_pub_dict]}\r\n\r\n\r\ndef apply_preceding_and_superseding(context_pubdata, supersedes_service_url, pubs_base_url):\r\n \"\"\"\r\n Accepts publication data JSON for the desired context publication,\r\n extracts the context publication's index_id, calls precedes_supersedes_url\r\n for that index_id. If the current publication supersedes, and/or\r\n is superseded by, any other publications, inserts summary info about \r\n those pubs into the passed context_pubdata. \r\n This function delegates formulation of @graph items to \r\n make_relationship_graph().\r\n\r\n context_pubdata: the Python decode of the JSON representation of the \r\n context publication\r\n supersedes_service_url: the endpoint of the service from which info about\r\n related items should be obtained\r\n param pubs_base_url: the url needed to compose a publication URL given \r\n a known prod_id\r\n \"\"\"\r\n return_pubdata = deepcopy(context_pubdata)\r\n index_id = context_pubdata['indexId']\r\n pub_url = urljoin(pubs_base_url, index_id)\r\n\r\n # this LITERAL is probably OK for this particular use. However, it\r\n # needs to be exported to a configuration.\r\n pub_type = 'rdac:Work'\r\n \r\n # obtain predecessor and successor related items\r\n pre_super = preceding_and_superseding(index_id, supersedes_service_url)\r\n\r\n if pre_super['predecessors'] or pre_super['successors']:\r\n\r\n # ensure 'relationships' is set up\r\n if 'relationships' not in return_pubdata:\r\n return_pubdata['relationships'] = {}\r\n if '@context' not in return_pubdata['relationships']:\r\n return_pubdata['relationships']['@context'] = {}\r\n\r\n # JSON - Python conversion: the JSON appears to have \r\n # multiple named elements with the samed name ('@graph'). \r\n # should use a list of dictionaries,\r\n # rather than a dictionary, to represent these named items\r\n # robustly in Python. This may turn out to be an issue,\r\n # since we need to name this list rather than letting it remain\r\n # anonymous.\r\n\r\n if not 'relationships' in return_pubdata:\r\n return_pubdata['relationships'] = []\r\n if not 'graphs' in return_pubdata['relationships']:\r\n return_pubdata['relationships']['graphs'] = []\r\n\r\n return_pubdata['relationships']['@context']['dc'] = 'http://purl.org/dc/elements/1.1/'\r\n return_pubdata['relationships']['@context']['xsd'] = 'http://www.w3.org/2001/XMLSchema#'\r\n return_pubdata['relationships']['@context']['rdac'] = 'http://rdaregistry.info/Elements/c/'\r\n return_pubdata['relationships']['@context']['rdaw'] = 'http://rdaregistry.info/Elements/w/'\r\n\r\n return_pubdata['relationships']['@context']['rdaw:replacedByWork'] = {'@type': '@id'}\r\n return_pubdata['relationships']['@context']['rdaw:replacementOfWork'] = {'@type': '@id'}\r\n\r\n # make parameter for context publication\r\n this_pub = {\r\n '@id': pub_url,\r\n '@type': pub_type,\r\n 'dc:title': return_pubdata['title']\r\n }\r\n\r\n # add any linked data for superseding another publication\r\n for item in pre_super['predecessors']:\r\n related_pub = {\r\n '@id': urljoin(pubs_base_url, item['index_id']),\r\n\r\n '@type': pub_type,\r\n 'dc:title': item['title']\r\n }\r\n if item['date']:\r\n related_pub['dc:date'] = item['date']\r\n\r\n return_pubdata['relationships']['graphs'].append(\r\n make_relationship_graph(this_pub, related_pub, 'predecessor'))\r\n\r\n # add any linked data for being superseded by another publication\r\n for item in pre_super['successors']:\r\n related_pub = {\r\n '@id': urljoin(pubs_base_url, item['index_id']),\r\n\r\n '@type': pub_type,\r\n 'dc:title': item['title']\r\n }\r\n if item['date']:\r\n related_pub['dc:date'] = item['date']\r\n\r\n return_pubdata['relationships']['graphs'].append(\r\n make_relationship_graph(this_pub, related_pub, 'successor'))\r\n\r\n return return_pubdata\r\n\r\n\r\ndef add_supersede_pubs(context_pubdata):\r\n \"\"\"\r\n Obtains superseding/superseded pubs info for a \"context\" pub from an \r\n external (legacy) endpoint. Inserts that info into a copy of the\r\n \"context_pubdata\" parameter.\r\n\r\n :param context_pubreturn: the decoded JSON describing the context pub\r\n :return: a copy of the \"context_pubdata\" parameter with all obtained\r\n supersede information inserted in the \"@context\" item.\r\n \"\"\"\r\n\r\n supersedes_service_url = 'http://pubs.er.usgs.gov/service/citation/json/extras' \r\n pubs_base_url = 'http://pubs.er.usgs.gov/publication/'\r\n\r\n\r\n return_pubdata = apply_preceding_and_superseding(context_pubdata, supersedes_service_url, pubs_base_url)\r\n\r\n return return_pubdata\r\n\r\n\r\n","sub_path":"pubs_ui/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":24784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"89852096","text":"def combinations(arr, data, start,end, index, r):\r\n if (index == r):\r\n \tcombis.append([i for i in data])\r\n \treturn\r\n i = start\r\n while(i <= end and end - i + 1 >= r - index):\r\n data[index] = arr[i]\r\n combinations(arr, data, i + 1,end, index + 1, r)\r\n i += 1\r\ndef min_max_diff(combin):\r\n\tl=[i[1] for i in combin]\r\n\tmi=min(l)\r\n\tma=max(l)\r\n\treturn ma-mi\r\n\r\ninputf=open('sample_input.txt','r+')\r\ngoodies=[]\r\nf=0\r\nfor line in inputf:\r\n\tif line.strip()=='':\r\n\t\tcontinue\r\n\tif line.startswith(\"Number of employees:\"):\r\n\t\tno_of_emp=int(line.strip().split()[-1])\r\n\tif line.startswith(\"Goodies and Prices:\"):\r\n\t\tf=1\r\n\t\tcontinue\r\n\tif f==1:\r\n\t\tl=line.strip().split(': ')\r\n\t\tgoodies.append([l[0],int(l[1])])\r\ncombis=[]\r\ndata=[0]*no_of_emp\r\ncombinations(goodies,data,0,len(goodies)-1,0,no_of_emp)\r\nmi=2**32-1\r\nresult=0\r\nfor combin in combis:\r\n\trmin=min_max_diff(combin)\r\n\tif rmin 1: \n hex_dump(sys.argv[1])\n else:\n print(\"Provide a C file!\")","sub_path":"203_Python_LABS/Lab2/m2p4.py","file_name":"m2p4.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"313612761","text":"a=0\nn = int(input(\"Enter length: \"))\narr=[]\nfor i in range(n):\n d=int(input(\"Enter element : \"))\n arr.append(d)\nfor i in range(0,n-1):\n if arr[i]>arr[i+1]:\n a=1\n break\nif a==1:\n print(\"Your array\",arr,\"is not sorted.\")\nelse:\n print(\"Your array\", arr, \"is sorted.\")\n","sub_path":"program6.py","file_name":"program6.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"529082123","text":"import copy\n\nk,x=[],[]\na=int(input())\nfor i in range(2,2*a+1):\n\n if iGitHub Zen\n# v1.0.0\n# Josh\n# andjosh\n# GitHub zen in your menu bar!\n# python\n# http://i.imgur.com/U4OHxDm.png\n\nimport urllib2\nimport os\n\napi_key = os.getenv('GITHUB_TOKEN', 'Enter your GitHub.com Personal Access Token here...')\nurl = 'https://api.github.com/zen'\n\nrequest = urllib2.Request( url, headers = { 'Authorization': 'token ' + api_key } )\nresponse = urllib2.urlopen( request )\nprint ( '%s' % (response.read())).encode( 'utf-8' )\n","sub_path":"Lifestyle/githubzen.1m.py","file_name":"githubzen.1m.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"393083882","text":"from LightPipes import *\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport math\nimport numpy as np\n\n#Convert rgb to gray with weighted average of rgb pixels:\ndef rgb2gray(rgb):\n return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])\n\n#read image from disk and check if it is square:\nimg=mpimg.imread('arrow.png')\nimg=rgb2gray(img)\ndata = np.asarray( img, dtype='uint8' )\nNr=data.shape[0]\nNc=data.shape[1]\nif Nc != Nr:\n print ('image must be square')\n exit()\nelse:\n N=Nc\n\nsize=25*mm\nwavelength=1*um\n\nR=6*mm\nxs=2*mm\nys=0*mm\n\nF=Begin(size,wavelength,N)\nF=GaussAperture(R,xs,ys,1,F)\nF=MultIntensity(img,F)\nI=Intensity(0,F)\n\nplt.imshow(I,cmap='gray'); plt.axis('off'); plt.title('Intensity')\nplt.show()\n\n\n","sub_path":"sphinx-sources/Examples/tests/subintphase2.py","file_name":"subintphase2.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"431829168","text":"from __future__ import division\nimport torch\nfrom torch.autograd import Variable\nfrom torch.utils import data\n# from resnet import FCN\n# from upsample import FCN\n# from jg_upsample import FCN\nfrom jg_concatener import FCN\n# from gcn import FCN\nfrom datasets import VOCDataSet, sunrgbd_rgb\nfrom loss import CrossEntropy2d, CrossEntropyLoss2d\nfrom visualize import LinePlotter\nfrom transform import ReLabel, ToLabel, ToSP, Scale\nfrom torchvision.transforms import Compose, CenterCrop, Normalize, ToTensor\nfrom tqdm import * #tqdm\nfrom PIL import Image\nimport numpy as np\nimport getpass\n\nnum_classes = 38\nbatch_size=1\n\ninput_transform = Compose([\n Scale((224, 224), Image.BILINEAR),\n ToTensor(),\n Normalize([.485, .456, .406], [.229, .224, .225]),\n\n])\ntarget_transform = Compose([\n Scale((224, 224), Image.NEAREST),\n ToSP(224),\n ToLabel(),\n ReLabel(255, num_classes),\n])\n\n\n\ndef main():\n\n dataloader = data.DataLoader(sunrgbd_rgb(split='train', img_transform=input_transform,label_transform=target_transform),batch_size=batch_size, shuffle=True, pin_memory=True)\n model = FCN(num_classes)\n # for param in model.parameters():\n # param.requires_grad = False\n print(model)\n\n if torch.cuda.is_available():\n # model = FCN(38) #torch.nn.DataParallel(FCN(22))\n model.cuda()\n else:\n print('Cuda not available.')\n\n model.train()\n tq_bar = tqdm(enumerate(dataloader),total=len(dataloader),ncols=80,desc='Training')\n for batch_id, (images, labels_group) in tq_bar:\n # if i>25:\n # break\n if torch.cuda.is_available():\n images = [Variable(image.cuda()) for image in images]\n labels_group = [labels for labels in labels_group]\n else:\n print('Cuda not available')\n images = [Variable(image) for image in images]\n labels_group = [labels for labels in labels_group]\n\n\n for img, labels in zip(images, labels_group):\n outputs = model(img)\n net_batch_size = outputs[0].size(0)\n if torch.cuda.is_available():\n labels = [Variable(label.cuda()) for label in labels]\n else:\n labels = [Variable(label) for label in labels]\n # break\nif __name__ == '__main__':\n main()\n","sub_path":"architecte.py","file_name":"architecte.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"558785087","text":"import numpy as np\n\nimport warnings\nwarnings.simplefilter(action=\"ignore\", category=FutureWarning)\n\nfrom keras.models import Sequential\nfrom keras.layers import Embedding, LSTM, Dense\nfrom keras.utils import to_categorical\nfrom keras.callbacks import EarlyStopping\n\n# Renvoie les données sous la forme suivante\n# X.shape = (end-start-last_mn, last_mn, 4)\n# Y.shape = (end-start-last_mn, last_mn)\n# X[n][0] = [open, high, low, close]\n# Y[n] = 1 si augmentation, 0 si diminution\ndef prepare_data(path, start, end, last_mn=120):\n X = []\n Y = []\n\n with open(path, 'r') as file:\n lines = file.readlines()\n for i in range(start, end-last_mn):\n x = []\n for j in range(i, i+last_mn):\n data = lines[j].replace(',', '.').split(';')\n xj = [float(data[1]), float(data[2]), float(data[3]), float(data[4])]\n x.append(xj)\n if float(lines[i+last_mn].replace(',', '.').split(';')[4]) - float(lines[i+last_mn-1].replace(',', '.').split(';')[4]) >= 0:\n y = 1\n else:\n y = 0\n X.append(x)\n Y.append(y)\n\n X = np.asarray(X)\n Y = np.asarray(Y)\n return X, Y\n\ndef load_dataset():\n trainX, trainY = prepare_data(\"/home/tom/Téléchargements/TP Python/ForexData/data_2019.csv\", 0, 10000)\n testX, testY = prepare_data(\"/home/tom/Téléchargements/TP Python/ForexData/data_2019.csv\", 10000, 15000)\n\n print(f'nb augmentation : {np.count_nonzero(trainY == 1)}')\n print(f'nb diminutions : {np.count_nonzero(trainY == 0)}')\n\n trainY = to_categorical(trainY)\n testY = to_categorical(testY)\n\n print(f'Dataset chargé!')\n print(f'trainX: {trainX.shape}, trainY: {trainY.shape}, testX: {testX.shape}, testY: {testY.shape}')\n\n return trainX, trainY, testX, testY\n\ndef buildLSTM():\n model = Sequential()\n\n model.add(LSTM(4, input_shape=(120, 4), dropout=0.5))\n model.add(Dense(outputs, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n return model\n# ---------------------------------------\ntrainX, trainY, testX, testY = load_dataset()\n\ntimesteps = trainX.shape[1]\nfeatures = trainX.shape[2]\noutputs = trainY.shape[1]\n\nprint(f'timesteps = {timesteps}')\nprint(f'features = {features}')\nprint(f'outputs = {outputs}')\n\nmodel = buildLSTM()\nmodel.summary()\n\nearly = EarlyStopping(monitor='accuracy', min_delta=0, patience=3, verbose=1, mode='auto')\n\nmodel.fit(trainX, trainY, epochs=10, batch_size=1, verbose=1, callbacks=[early])\nprint(\"[!] Training done!\")\n\n_, accuracy = model.evaluate(testX, testY, verbose=1)\nprint(f'Test accurracy : {accuracy:.3f}')\n","sub_path":"tp9_lstm.py","file_name":"tp9_lstm.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"490447789","text":"from pythonds.basic import Stack\r\n\r\ns = Stack()\r\ndef tostr(num,base):\r\n strconv = \"0123456789ABCDEF\"\r\n while num > 0:\r\n if num < base:\r\n s.push(strconv[num])\r\n else:\r\n s.push(strconv[num%base])\r\n num = num // base\r\n out = \"\"\r\n while not s.isEmpty():\r\n out = out + str(s.pop())\r\n\r\n return out\r\n\r\n\r\n\r\nprint(tostr(999999,2))","sub_path":"data-structures_algorithms/stackframes.py","file_name":"stackframes.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"59050101","text":"#!/usr/bin/python\n#!/usr/bin/pypy\n# this also works in python3\n'''\nlibev ctypes helloworld test1\n\"touch /tmp/hi\"\nrun this script\nrun a few more times \"touch /tmp/hi\"\n'''\n\nimport os, sys, time, ctypes\nimport libev as ev\nprint('libev version: %s.%s' %(ev.version_major(), ev.version_minor()))\n\nassert ev.EVBACKEND_POLL == 2\nmain = ev.default_loop( ev.EVBACKEND_POLL )\n\nwatcher = ev.ev_stat()\ndef mycallback( loop, watcher, revents ):\n\tprint(loop, watcher, revents)\n\nev.init( watcher, mycallback )\nev.stat_init( watcher, mycallback, '/tmp/hi' )\nptr = ctypes.pointer( watcher )\nev.stat_start( main, ptr )\n\nmain.run()\n\nprint('watchers', main.pending_count())\n\nmain.suspend()\nmain.resume()\n\nev.stat_stop( main, ptr )\n\nmain.loop_destroy()\n\nprint('test done')\n","sub_path":"examples/libev-helloworld.py","file_name":"libev-helloworld.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"143437936","text":"# \"Graph Partitions\" data points (Tree)\n# Less nodes = less partitions\n# Would expect outlier data pts to have less complex partitions\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import IsolationForest\n\n\ndef randomState(n, size):\n '''\n n: int --> Random State integer\n size: list --> Ex [20,2] --> Shape of data\n '''\n for i in size:\n print(i)\n return np.random.RandomState(n), size\n\n\ndef trainingData(rng, size):\n # Generating training data (regular observations)\n dat = .3 * rng.randn(size[0]*5, size[1])\n dat_train = np.r_[dat + size[1], dat - size[1]]\n \n return dat_train\n\n\ndef testingData(rng, size):\n # Generating regular observations\n dat = .3 * rng.randn(size[0], size[1])\n dat_test = np.r_[dat + size[1], dat - size[1]]\n\n return dat_test\n\n\ndef outlierData(rng, size):\n dat_outliers = rng.uniform(low=-4, high=4, size=tuple(size))\n\n return dat_outliers\n\ndef main():\n rng, size = randomState(42, [20,2])\n training = trainingData(rng, size)\n testing = testingData(rng, size)\n outliers = outlierData(rng, size)\n\n fit = IsolationForest(behaviour='new', max_samples=100,\n random_state=rng, contamination='auto')\n fit.fit(training)\n\n pred_train = fit.predict(training)\n pred_test = fit.predict(testing)\n pred_outliers = fit.predict(outliers)\n\n\n print(pred_outliers)\n\n\n \n\n\nif __name__ == '__main__':\n main()","sub_path":"anomalydetection/isolationforest.py","file_name":"isolationforest.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"73051473","text":"from setuptools import setup\nfrom codecs import open\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='bdr_replication',\n version='0.1',\n description='A wrapper around BDR Replications',\n long_description=long_description,\n\n url='https://github.com/sebinjohn/cloudera_bdr',\n author='Sebin John',\n author_email='sebin.john.sebin@gmail.com',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7'\n ],\n keywords='cloudera bdr replication manager',\n py_modules=['replications'],\n install_requires=[\n 'cm-api>=3',\n 'requests'\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"638010590","text":"import sys\nsys.path.append(\"../..\")\n\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt\n\nfrom src import kernels,utilstorch,gpobject\n\n\ndata = np.load(\"../datasets/process2b.npz\")\n\nT = data['T'][:200]\nY = data['Y'][:200]\nZ = data['Z'][:200]\n\n\ndef set_data(T,Y,Z,p=0.7):\n ndata = len(T)\n nprevious = 100\n npred = ndata - nprevious\n ind_list = np.arange(nprevious,dtype=int)\n inds_training = np.random.choice(ind_list,size=int(nprevious*(1-p)),\n replace=False)\n ntraining = len(inds_training)\n# inds_test = [i for i in ind_list if i not in inds_training]\n xtrain = T[inds_training]\n ytrain = Y[inds_training]\n ztrain = Z[inds_training]\n prep1 = np.vstack([np.zeros((ntraining,1)),\n np.ones((ntraining,1)),\n np.zeros((npred,1))])\n prep2 = np.vstack([np.tile(xtrain.reshape(-1,1),[2,1]),\n T[nprevious:].reshape(-1,1)])\n X = np.hstack([prep1,prep2])\n Y = np.vstack([ytrain.reshape(-1,1),ztrain.reshape(-1,1),\n Y[nprevious:].reshape(-1,1)])\n# xtest = T[inds_test]\n# ytest = T[inds_test]\n return X,Y,ntraining\n \nxtrain,ytrain,ntraining = set_data(T,Y,Z)\nkernel = kernels.TensorProd(kernels.SphericalCorr(2),\n kernels.IsoMatern12(dim=1))\nnoisekernel = kernels.IIDNoiseKernel()\nhparams = [1.0,1.0,np.pi/3,10.0,1e-2]\npositives = [True,True,False,True,True]\n#Kernel testing\n#xkern,ykern = torch.tensor(xtrain)\ngp = gpobject.GPObject(kernel,noisekernel,hparams,(xtrain,ytrain))\ngp = gp.optimize(positives)\nprint([p.item() for p in gp.hparams])\n#Prediction\nYpred = []\nZpred = []\nfor t in T:\n ypred = gp.predict([[0.0,np.float(t)]])[0][0,0]\n zpred = gp.predict([[1.0,np.float(t)]])[0][0,0]\n Ypred.append(ypred)\n Zpred.append(zpred)\nplt.figure()\nplt.plot(T,Y,'b')\n#plt.plot(T,xtrain[:ntraining,1],'go')\nplt.plot(T,Ypred,'r')\nplt.figure()\nplt.plot(T,Z,'b')\n#plt.plot(T,xtrain[ntraining:,1],'go')\nplt.plot(T,Zpred,'r')","sub_path":"tests/gptorch/gpregression_mo_pred.py","file_name":"gpregression_mo_pred.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"478723668","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import Page, Category, Text, Revisions, Organization\nfrom django.template import Template, Context\nfrom django.shortcuts import redirect\nfrom django.db.models import Case, Value, When \nfrom pathlib import Path\nimport markdown2\n\ndef index(request, **args):\n \n # default org\n if Organization.objects.filter(slug = 'bim').exists():\n o = Organization.objects.get(slug = 'bim')\n else:\n o = create_new_org('BIM', '0.0.0.0')\n\n # try to get org from args\n if 'org' in args:\n # verify org\n if Organization.objects.filter(slug = args['org']).exists():\n o = Organization.objects.get(slug = args['org'])\n\n # if org does not exist then redirect with default org\n else:\n o = Organization.objects.get(slug = 'bim')\n resp = redirect('documentation:index',org=o.slug)\n resp.set_cookie('org',o.slug)\n return resp\n\n # try to get org from cookies\n elif 'org' in request.COOKIES:\n # verify org\n if Organization.objects.filter(slug = request.COOKIES['org']).exists():\n o = Organization.objects.get(slug = request.COOKIES['org'])\n\n # if org does not exist then redirect with default org\n else:\n o = Organization.objects.get(slug = 'bim')\n resp = redirect('documentation:index',org=o.slug)\n resp.set_cookie('org',o.slug)\n return resp\n \n # if no arg is sent do a redirect with correct org to keep urls clean\n else:\n resp = redirect('documentation:index',org=o.slug)\n resp.set_cookie('org',o.slug)\n return resp\n\n \n # get default home page\n c = Category.objects.get(slug='special', org_id = o)\n\n # only return pages that are not deleted\n p = Page.objects.get(slug='home', cat_id = c)\n\n # try to get page from args if possible. otherwise we stay with default.\n if 'cat' in args:\n if Category.objects.filter(org_id = o, slug = args['cat']).exists():\n c = Category.objects.get(slug=args['cat'], org_id = o)\n\n if 'page' in args:\n if Page.objects.filter(cat_id = c, slug=args['page']).exists():\n p = Page.objects.get(cat_id = c, slug=args['page'])\n\n else:\n resp = redirect('documentation:index',org=o.slug)\n resp.set_cookie('org',o.slug)\n return resp\n else:\n resp = redirect('documentation:index',org=o.slug)\n resp.set_cookie('org',o.slug)\n return resp\n else:\n resp = redirect('documentation:index',org=o.slug)\n resp.set_cookie('org',o.slug)\n return resp\n\n \n # if this is a history request, get that rev\n if 'rev' in args:\n if Revisions.objects.filter(page_id=p, id=args['rev']).exists():\n r = Revisions.objects.get(page_id=p, id=args['rev'])\n else:\n r = Revisions.objects.filter(page_id=p).latest('text_id')\n resp = redirect('documentation:index',org=o.slug, cat=c.slug, page=p.slug)\n resp.set_cookie('org',o.slug)\n return resp\n\n # else get current rev\n else:\n r = Revisions.objects.filter(page_id=p).latest('text_id')\n\n\n next_r = Revisions.objects.filter(page_id=p,id__gt=r.id)\n last_r = Revisions.objects.filter(page_id=p,id__lt=r.id)\n #print(next_r[0].id)\n #print(last_r[0].id)\n\n all_r = Revisions.objects.filter(page_id=p).order_by('-id')\n\n # get page list\n organizations = Organization.objects.all()\n categorys = Category.objects.filter(page__in = Page.objects.filter(deleted=None), org_id = o).distinct()\n pages = Page.objects.filter(cat_id__in=categorys, deleted = None).select_related()\n \n edits = Revisions.objects.all().count()\n\n if request.META['PATH_INFO'].split('/')[-1] == 'history':\n page_html = 'documentation/history.html'\n else:\n page_html = 'documentation/documentation.html'\n all_r = all_r[1:]\n\n \n resp = render(request, page_html,{\n 'title':\"docs | \" + o.name,\n #'data':\"test\"\n 'pages':pages,\n 'page_text':r,\n 'text':markdown2.markdown(r.text_id.text, extras=[\"fenced-code-blocks\",\"cuddled-lists\",\"tables\"]),\n 'revisions':all_r,\n 'orgs':organizations,\n 'cats':categorys,\n 'edits':edits,\n 'next_revision':next_r,\n 'last_revision':last_r,\n 'site':'docs',\n })\n\n resp.set_cookie('org',o.slug)\n\n return resp\n\n\n# used to update pages\n@csrf_exempt\ndef update(request):\n if request.method == 'POST':\n\n org = request.POST.get('org')\n page = request.POST.get('page')\n category = request.POST.get('category')\n text = request.POST.get('text')\n print(request.META['HTTP_REFERER'])\n print(org)\n print(page)\n print(category)\n print(text)\n\n # get remote host\n try:\n remote_host = request.META['HTTP_X_FORWARDED_FOR'].split(\",\")[0]\n\n except:\n try:\n remote_host = request.META['REMOTE_ADDR']\n \n except:\n remote_host = \"\"\n\n # get org\n if not Organization.objects.filter(name = org).exists():\n o = create_new_org(org, remote_host)\n\n else:\n o = Organization.objects.get(name=org)\n\n # get category\n if not Category.objects.filter(name = category,org_id = o).exists():\n c = Category(name=category,org_id = o)\n c.save()\n\n else:\n c = Category.objects.get(name=category,org_id = o)\n\n # get page\n if not Page.objects.filter(name=page,cat_id=c).exists():\n p = Page(name=page, cat_id=c)\n p.save()\n\n else:\n p = Page.objects.get(name=page, cat_id=c) \n \n\n \n # create revision and text\n t = Text(text=text)\n t.save()\n Revisions(page_id=p,text_id=t,updated_by=remote_host).save()\n\n if request.META['HTTP_REFERER'] is not None:\n return redirect('documentation:index',org=o.slug, cat=c.slug, page=p.slug) \n return HttpResponse(\"success\")\n else:\n return redirect('documentation:index')\n\n\ndef delete(request,org,page,cat):\n if request.method == 'POST':\n if request.POST.get('password') == 'adm1n':\n\n # get page\n o = Organization.objects.get(slug=org)\n c = Category.objects.get(slug=cat, org_id = o)\n\n # toggle status of delete field\n p = Page.objects.filter(slug=page,cat_id = c).update(deleted=Case(\n When(deleted=True, then=None),\n When(deleted=None, then=True),\n default=None))\n print(Page.objects.filter(slug=page,cat_id = c).values('deleted'))\n return redirect('documentation:index', org=org) \n\n return redirect('documentation:index', org=org, page=page, cat=cat)\n\n\ndef lock(request,org,page,cat):\n\n if request.method == 'POST':\n if request.POST.get('password') == 'adm1n':\n\n # get page\n o = Organization.objects.get(slug=org)\n c = Category.objects.get(slug=cat, org_id = o)\n\n # toggle status of delete field\n p = Page.objects.filter(slug=page,cat_id = c).update(locked=Case(\n When(locked=True, then=None),\n When(locked=None, then=True),\n default=None))\n print(Page.objects.filter(slug=page,cat_id = c).values('locked'))\n \n return redirect('documentation:index', org=org, page=page, cat=cat)\n\ndef create_new_org(org, remote_host):\n\n home_text = \"Welcome home.\"\n help_text = \"Sorry, I can't help you.\"\n statistics = \"Nothing added yet.\"\n\n\n # create org\n o = Organization(name=org)\n o.save()\n\n print(o.name)\n # create category\n c = Category(name='Special',org_id = o)\n c.save()\n\n # create pages\n\n # Home\n p = Page(name='Home', cat_id=c, locked=True)\n p.save()\n t = Text(text=home_text)\n t.save()\n Revisions(page_id=p,text_id=t,updated_by=remote_host).save()\n\n # Help\n p = Page(name='Help', cat_id=c, locked=True)\n p.save()\n t = Text(text=help_text)\n t.save()\n Revisions(page_id=p,text_id=t,updated_by=remote_host).save()\n\n # Stats\n p = Page(name='Stats', cat_id=c, locked=True)\n p.save()\n t = Text(text=statistics)\n t.save()\n Revisions(page_id=p,text_id=t,updated_by=remote_host).save()\n\n return o\n\ndef handle_uploaded_file(f):\n\n print(Path(__file__).parents[1].joinpath('static','uploads',str(f)).resolve())\n\n with open(Path(__file__).parents[1].joinpath('static','uploads',str(f)).resolve(), 'wb') as destination:\n\n# with open(static.url('uploads/Screenshot20Nov2019-44-18.png'), 'wb') as destination:\n for chunk in f.chunks():\n destination.write(chunk)\n\n@csrf_exempt\ndef upload(request):\n if request.method == 'POST':\n \n print(request.FILES['file'])\n\n handle_uploaded_file(request.FILES['file'])\n return HttpResponse(\"success\")\n\n return render(request, 'documentation/upload.html')\n\n\ndef edit(request,org,page,cat):\n\n o = Organization.objects.get(slug=org)\n c = Category.objects.get(slug=cat, org_id = o)\n p = Page.objects.get(slug=page,cat_id = c)\n\n\n if request.method == 'GET':\n\n r = Revisions.objects.filter(page_id=p).latest('text_id') \n \n resp = render(request, 'documentation/edit.html',{\n 'title':\"docs | \" + o.name,\n 'page_text':r,\n })\n\n return HttpResponse(resp)\n \n if request.method == 'POST':\n # get remote host\n try:\n remote_host = request.META['HTTP_X_FORWARDED_FOR'].split(\",\")[0]\n\n except:\n try:\n remote_host = request.META['REMOTE_ADDR']\n \n except:\n remote_host = \"\"\n\n # change org\n if request.POST.get('org') != o.name:\n if not Organization.objects.filter(name = request.POST.get('org')).exists():\n o = create_new_org(request.POST.get('org'), remote_host)\n\n else:\n o = Organization.objects.get(name= request.POST.get('org'))\n \n # change cat\n # change page category.. create if not existing \n if not Category.objects.filter(name = request.POST.get('category'),org_id = o).exists():\n print(\"creating cat\")\n c = Category(name=request.POST.get('category'),org_id = o)\n c.save()\n\n else:\n print(\"exists\")\n c = Category.objects.get(name=request.POST.get('category'),org_id = o)\n\n p.cat_id = c\n p.save()\n\n # change page name\n if request.POST.get('page') != p.name:\n p.name = request.POST.get('page')\n p.save() \n\n # create revision and text\n text = request.POST.get('text')\n print(text)\n t = Text(text=text)\n t.save()\n Revisions(page_id=p,text_id=t,updated_by=remote_host).save()\n\n r = Revisions.objects.filter(page_id=p).latest('text_id') \n\n \n return redirect('documentation:index', org=o.slug, page=p.slug, cat=c.slug)\n\n return redirect('documentation:index')\n","sub_path":"documentation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"421033722","text":"import random\nkeep_playing = \"yes\"\nwhile keep_playing.lower() == \"yes\":\n magic_number = random.randint(1,20)\n guess = -1\n number_of_guesses = 0\n\n print(\"Please guess between 1 and 100! \")\n\n while guess != magic_number:\n guess = int(input(\"What is your guess? \"))\n number_of_guesses = number_of_guesses + 1\n if guess > magic_number:\n print(\"Lower\")\n elif guess < magic_number:\n print(\"Higher \") \n\n print(\"Correct Answer!\")\n print(f\"You guessed {number_of_guesses} times!\")\n\n keep_playing = input(\"Play Again? \")\n while keep_playing.lower() != \"yes\" and keep_playing.lower() != \"no\":\n keep_playing = input(\"Play Again? \")\n\nprint(\"thanks for playing! have a good day. \")\n","sub_path":"guessing_game.py","file_name":"guessing_game.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"591428577","text":"\"\"\" Test cases for nested lists file.\n\"\"\"\nimport unittest\nimport nested_lists\n\n\nclass TestNestedLists(unittest.TestCase):\n \"\"\" Test cases for nested list function.\n \"\"\"\n def test_remove_nested_loop_dict(self):\n \"\"\" Test case for nested lists passing a dictionary.\n \"\"\"\n expected = {}\n result = nested_lists.remove_nested_loop(expected)\n self.assertFalse(result)\n\n def test_remove_nested_loop_list(self):\n \"\"\" Test case for nested lists passing a simple list.\n \"\"\"\n expected = [1, 2, 3, 4]\n result = nested_lists.remove_nested_loop(expected)\n self.assertEqual(result, expected)\n\n def test_remove_nested_loop_nested_list(self):\n \"\"\" Test case for nested lists passing a nested list.\n \"\"\"\n expected = [1, 2, [3, 4], 5]\n result = nested_lists.remove_nested_loop(expected)\n expected = [1, 2, 3, 4, 5]\n self.assertEqual(result, expected)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test_nested_list.py","file_name":"test_nested_list.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"306870175","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pdb\nfrom data import cfg\n\nMAX_LENGTH = 500\n\n\ndef init_lstm_wt(lstm):\n for names in lstm._all_weights:\n for name in names:\n if name.startswith('weight_'):\n wt = getattr(lstm, name)\n wt.data.uniform_(-cfg.RAND_UNIF_INIT_MAG, cfg.RAND_UNIF_INIT_MAG)\n elif name.startswith('bias_'):\n # set forget bias to 1\n bias = getattr(lstm, name)\n n = bias.size(0)\n start, end = n // 4, n // 2\n bias.data.fill_(0.)\n bias.data[start:end].fill_(1.)\n\ndef init_linear_wt(linear):\n linear.weight.data.normal_(std=cfg.TRUNC_NORM_STD)\n if linear.bias is not None:\n linear.bias.data.normal_(std=cfg.TRUNC_NORM_STD)\n\n\nclass AttnDecoderRNN(nn.Module):\n def __init__(self, weights, hidden_size, output_size, num_layers=1, dropout_p=0.1, max_length=MAX_LENGTH):\n super(AttnDecoderRNN, self).__init__()\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.dropout_p = dropout_p\n self.max_length = max_length\n self.num_layers = num_layers\n\n # self.embedding = nn.Embedding(self.output_size, self.hidden_size)\n self.embedding = nn.Embedding.from_pretrained(weights)\n self.attn = nn.Linear(self.hidden_size + 100, self.max_length)\n self.attn_combine = nn.Linear(self.hidden_size + 100, self.hidden_size)\n self.dropout = nn.Dropout(self.dropout_p)\n self.gru = nn.GRU(self.hidden_size, self.hidden_size, num_layers=num_layers)\n self.out = nn.Linear(self.hidden_size, self.output_size)\n\n def forward(self, input, hidden, encoder_outputs):\n embedded = self.embedding(input).view(1, 1, -1)\n embedded = self.dropout(embedded)\n\n # TODO: recalculate weight, include coverage\n attn_weights = F.softmax(\n self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)\n\n attn_applied = torch.bmm(attn_weights.unsqueeze(0),\n encoder_outputs.unsqueeze(0))\n\n # c_t and y_t-1\n output = torch.cat((embedded[0], attn_applied[0]), 1)\n output = self.attn_combine(output).unsqueeze(0)\n\n output = F.relu(output)\n output, hidden = self.gru(output, hidden)\n\n output = F.log_softmax(self.out(output[0]), dim=1)\n return output, hidden, attn_weights\n\n\n\n def initHidden(self):\n return torch.zeros(self.num_layers, 1, self.hidden_size, device=device)\n\n\nclass Attention(nn.Module):\n def __init__(self):\n super(Attention, self).__init__()\n # attention\n if cfg.IS_COVERAGE:\n self.W_c = nn.Linear(1, cfg.HIDDEN_SIZE * 2, bias=False)\n if cfg.LSTM:\n self.decode_proj = nn.Linear(cfg.HIDDEN_SIZE * 2, cfg.HIDDEN_SIZE * 2)\n else:\n self.decode_proj = nn.Linear(cfg.HIDDEN_SIZE, cfg.HIDDEN_SIZE * 2)\n \n self.v = nn.Linear(cfg.HIDDEN_SIZE * 2, 1, bias=False)\n\n self.W_h = nn.Linear(cfg.HIDDEN_SIZE * 2, cfg.HIDDEN_SIZE * 2, bias=False)\n\n def forward(self, decoder_hidden_hat, encoder_outputs, coverage, input_mask):\n b, t_k, n = list(encoder_outputs.size())\n\n encoder_feature = encoder_outputs.view(-1, 2 * cfg.HIDDEN_SIZE) # B * t_k x 2*hidden_dim\n encoder_feature = self.W_h(encoder_feature)\n\n dec_fea = self.decode_proj(decoder_hidden_hat) # B x 2*hidden_dim\n dec_fea_expanded = dec_fea.unsqueeze(1).expand(b, t_k, n).contiguous() # B x t_k x 2*hidden_dim\n dec_fea_expanded = dec_fea_expanded.view(-1, n) # B * t_k x 2*hidden_dim\n\n att_features = encoder_feature + dec_fea_expanded # B * t_k x 2*hidden_dim\n if cfg.IS_COVERAGE:\n coverage_input = coverage.view(-1, 1) # B * t_k x 1\n coverage_feature = self.W_c(coverage_input) # B * t_k x 2*hidden_dim\n att_features = att_features + coverage_feature\n\n e = F.tanh(att_features) # B * t_k x 2*hidden_dim\n scores = self.v(e) # B * t_k x 1\n scores = scores.view(-1, t_k).masked_fill(input_mask==0, -1e10) # B x t_k\n\n attn_dist_ = F.softmax(scores, dim=1) # B x t_k\n normalization_factor = attn_dist_.sum(1, keepdim=True)\n attn_dist = attn_dist_ / normalization_factor\n\n attn_dist = attn_dist.unsqueeze(1) # B x 1 x t_k\n c_t = torch.bmm(attn_dist, encoder_outputs) # B x 1 x n\n c_t = c_t.view(-1, cfg.HIDDEN_SIZE * 2) # B x 2*hidden_dim\n\n attn_dist = attn_dist.view(-1, t_k) # B x t_k\n\n if cfg.IS_COVERAGE:\n coverage = coverage.view(-1, t_k)\n coverage = coverage + attn_dist\n\n return c_t, attn_dist, coverage\n\n\nclass ReduceState(nn.Module):\n def __init__(self):\n super(ReduceState, self).__init__()\n\n self.reduce_h = nn.Linear(cfg.HIDDEN_SIZE * 2, cfg.HIDDEN_SIZE)\n init_linear_wt(self.reduce_h)\n self.reduce_c = nn.Linear(cfg.HIDDEN_SIZE* 2, cfg.HIDDEN_SIZE)\n init_linear_wt(self.reduce_c)\n\n def forward(self, hidden):\n if cfg.LSTM:\n h, c = hidden # h, c dim = 2 x b x hidden_dim\n h_in = h.transpose(0, 1).contiguous().view(-1, cfg.HIDDEN_SIZE * 2)\n hidden_reduced_h = F.relu(self.reduce_h(h_in))\n c_in = c.transpose(0, 1).contiguous().view(-1, cfg.HIDDEN_SIZE * 2)\n hidden_reduced_c = F.relu(self.reduce_c(c_in))\n return hidden_reduced_h.unsqueeze(0), hidden_reduced_c.unsqueeze(0)\n else:\n h = hidden\n h_in = h.transpose(0, 1).contiguous().view(-1, cfg.HIDDEN_SIZE * 2)\n hidden_reduced_h = F.relu(self.reduce_h(h_in))\n return hidden_reduced_h.unsqueeze(0)# , hidden_reduced_c.unsqueeze(0) # h, c dim = 1 x b x hidden_dim\n\n\nclass AttnDecoderRNN_full(nn.Module):\n def __init__(self, weights):\n super(AttnDecoderRNN_full, self).__init__()\n self.reduce_state = ReduceState()\n self.attention_network = Attention()\n # decoder\n self.embedding = nn.Embedding.from_pretrained(weights)\n # init_wt_normal(self.embedding.weight)\n\n self.x_context = nn.Linear(cfg.HIDDEN_SIZE * 2 + cfg.EMBEDDING_SIZE, cfg.EMBEDDING_SIZE)\n\n if cfg.LSTM:\n self.rnn = nn.LSTM(cfg.EMBEDDING_SIZE, cfg.HIDDEN_SIZE, num_layers=1, batch_first=True)\n init_lstm_wt(self.rnn)\n if cfg.POINTER_GEN:\n self.p_gen_linear = nn.Linear(cfg.HIDDEN_SIZE * 4 + cfg.EMBEDDING_SIZE, 1) # was 4 if with c\n else:\n self.rnn = nn.GRU(cfg.EMBEDDING_SIZE, cfg.HIDDEN_SIZE, num_layers=1, batch_first=True)\n if cfg.POINTER_GEN:\n self.p_gen_linear = nn.Linear(cfg.HIDDEN_SIZE * 3 + cfg.EMBEDDING_SIZE, 1) # was 4 if with c\n\n # p_vocab\n self.out1 = nn.Linear(cfg.HIDDEN_SIZE * 3, cfg.HIDDEN_SIZE)\n self.out2 = nn.Linear(cfg.HIDDEN_SIZE, cfg.VOCAB_SIZE+3)\n init_linear_wt(self.out2)\n\n def forward(self, encoder_outputs, decoder_input, decoder_hidden,\n c_t_1, coverage, input_idx, step, input_mask):\n\n if not self.training and step == 0:\n if cfg.LSTM:\n h_decoder, c_decoder = decoder_hidden\n decoder_hidden_hat = torch.cat((h_decoder.view(-1, cfg.HIDDEN_SIZE),\n c_decoder.view(-1, cfg.HIDDEN_SIZE)), 1) # B x 2*hidden_dim\n else:\n decoder_hidden_hat = decoder_hidden\n\n c_t, _, coverage_next = self.attention_network(decoder_hidden_hat, encoder_outputs, coverage, input_mask)\n\n coverage = coverage_next\n\n embd = self.embedding(decoder_input)\n x = self.x_context(torch.cat((c_t_1, embd.view(c_t_1.shape[0], -1)), 1))\n decoder_output, decoder_hidden = self.rnn(x.unsqueeze(1), decoder_hidden)\n\n if cfg.LSTM:\n h_decoder, c_decoder = decoder_hidden\n decoder_hidden_hat = torch.cat((h_decoder.view(-1, cfg.HIDDEN_SIZE),\n c_decoder.view(-1, cfg.HIDDEN_SIZE)), 1) # B x 2*hidden_dim\n else:\n h_decoder = decoder_hidden\n decoder_hidden_hat = h_decoder.view(-1, cfg.HIDDEN_SIZE)\n\n c_t, attn_dist, coverage_next = self.attention_network(decoder_hidden_hat, encoder_outputs, coverage, input_mask)\n\n if self.training or step > 0:\n coverage = coverage_next\n\n p_gen = None\n if cfg.POINTER_GEN:\n p_gen_input = torch.cat((c_t, decoder_hidden_hat, x), 1) # lstm: B x (2*2*hidden_dim + emb_dim) gru: B x (2*2*hidden_dim + emb_dim)\n p_gen = self.p_gen_linear(p_gen_input)\n p_gen = F.sigmoid(p_gen)\n\n output = torch.cat((decoder_output.view(-1, cfg.HIDDEN_SIZE), c_t), 1) # B x hidden_dim * 3\n output = self.out1(output) # B x hidden_dim\n\n #output = F.relu(output)\n\n output = self.out2(output) # B x vocab_size\n vocab_dist = F.softmax(output, dim=1)\n\n if cfg.POINTER_GEN:\n vocab_dist_ = p_gen * vocab_dist\n attn_dist_ = (1 - p_gen) * attn_dist\n\n final_dist = vocab_dist_.scatter_add(1, input_idx, attn_dist_)\n # final_dist = vocab_dist_ + attn_dist_\n else:\n final_dist = vocab_dist\n\n return final_dist, decoder_hidden, c_t, attn_dist, p_gen, coverage\n","sub_path":"models/AttnDecoderRNN_bi.py","file_name":"AttnDecoderRNN_bi.py","file_ext":"py","file_size_in_byte":9429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"346609599","text":"import time\nimport datetime\nfrom flask import Flask, request\nfrom flask_pymongo import PyMongo\nfrom flask_cors import CORS, cross_origin\n\napp = Flask(__name__)\napp.config['MONGO_URI'] = 'mongodb://exceed_group03:n9tvpt6s@158.108.182.0:2255/exceed_group03'\n\ncors = CORS(app)\nmongo = PyMongo(app)\n\n#myCollection = mongo.db.user\n\n@app.route('/test', methods=['GET'])\n@cross_origin()\ndef test():\n now = datetime.datetime.now()\n print(now)\n print(now.strftime(\"%A\"))\n return {}\n\n#user\n@app.route('/create_user', methods=['POST'])\n@cross_origin()\ndef insert_one():\n myCollection = mongo.db.user\n data = request.json\n ts = time.time()\n \n last_user = myCollection.find_one(sort=[(\"user_id\", -1)])\n #last_id = (last_user == None) ? 1 : last_user['user_id']\n\n if(last_user == None):\n last_id = 0\n else:\n last_id = last_user[\"user_id\"]\n #print(last_id)\n data[\"last_update_timestamp\"] = ts\n data[\"user_id\"] = last_id + 1\n\n myCollection.insert_one(data)\n return {'result': 'Create succesfully'}\n\n@app.route('/update_status', methods=['POST'])\n@cross_origin()\ndef update():\n data = request.json\n Id = request.args.get('user_id')\n \n ts = time.time()\n filt = {\"user_id\": int(Id)}\n update_status = {\"$set\": {\n 'status': data['status'],\n 'last_update_timestamp': ts\n }}\n #print(filt, update_status)\n myCollection = mongo.db.user\n myCollection.update_one(filt, update_status)\n return {'result': 'update succesfully'}\n\n@app.route('/response', methods=['GET'])\n@cross_origin()\ndef response():\n myCollection = mongo.db.messages\n ID = request.args.get(\"user_id\")\n filt = {\"user_id\": int(ID), \"type\": \"live\" }\n\n query = myCollection.find_one(filt)\n output = {}\n output[\"message\"] = query[\"message\"]\n output[\"response\"] = query[\"response\"]\n output[\"sent\"] = query[\"sent\"]\n return output\n\n@app.route('/all_user', methods=['GET'])\n@cross_origin()\ndef find_all():\n myCollection = mongo.db.user\n all_user = myCollection.find()\n \n output = []\n\n for ele in all_user:\n output.append({\n \"user_id\": ele[\"user_id\"],\n \"name\": ele[\"name\"],\n \"age\": ele[\"age\"],\n \"address\": ele[\"address\"],\n \"status\": ele[\"status\"],\n \"last_update_timestamp\": ele[\"last_update_timestamp\"]\n })\n\n return {'result': output}\n\n#message\n@app.route('/new_msg', methods=['POST'])\n@cross_origin()\ndef new_msg():\n myCollection = mongo.db.messages\n user_id = request.args.get(\"user_id\")\n data = request.json\n filt = {\"user_id\": int(user_id), \"type\": \"live\"}\n query = myCollection.find_one(filt)\n if(query):\n update_msg = { \"$set\": {\n \"message\": data[\"message\"],\n \"sent\": False,\n \"response\": None\n }}\n\n myCollection.update_one(filt,update_msg)\n return {\"result\": \"Update successfully\"}\n else:\n last_msg = myCollection.find_one(sort=[(\"msg_id\", -1)])\n \n if(last_msg == None):\n last_id = 0\n else:\n last_id = last_msg[\"msg_id\"]\n\n data[\"user_id\"] = int(user_id)\n data[\"msg_id\"] = last_id + 1\n data[\"hasResponse\"] = True\n data[\"response\"] = None\n data[\"sent\"] = False\n data[\"type\"] = \"live\"\n myCollection.insert_one(data)\n return{'result': 'Create successfully'}\n \n@app.route('/get_live', methods=['GET'])\n@cross_origin()\ndef get_live():\n myCollection = mongo.db.messages\n user_id = request.args.get(\"user_id\")\n\n filt = {\"user_id\": int(user_id), \"type\": \"live\", \"sent\": False}\n\n query = myCollection.find_one(filt)\n output = {}\n if (query):\n output[\"message\"] = query[\"message\"]\n output[\"msg_id\"] = query[\"msg_id\"]\n \n update_sent = { \"$set\": {\n 'sent': True\n }}\n myCollection.update_one(filt,update_sent)\n\n return output\n\n@app.route('/get_schedule', methods=['GET'])\n@cross_origin()\ndef get_schedule():\n myCollection = mongo.db.messages\n user_id = request.args.get(\"user_id\")\n \n now = datetime.datetime.now()\n now_hour = now.hour + 7\n now_min = now.minute\n now_day = now.strftime(\"%A\")\n if(now_hour >= 24):\n now_hour = now_hour - 24\n if(now_day == \"Monday\"):\n now_day = \"Tuesday\"\n elif(now_day == \"Tuesday\"):\n now_day = \"Wednesday\"\n elif(now_day == \"Wednesday\"):\n now_day = \"Thursday\"\n elif(now_day == \"Thursday\"):\n now_day = \"Friday\"\n elif(now_day == \"Friday\"):\n now_day = \"Saturday\"\n elif(now_day == \"Saturday\"):\n now_day = \"Sunday\"\n elif(now_day == \"Sunday\"):\n now_day = \"Monday\"\n \n #now_day = \"Friday\"\n\n filt = {\"user_id\": int(user_id), \"type\": \"schedule\", \"day\": now_day, \"hour\": now_hour, \"minute\": now_min}\n \n query = myCollection.find(filt)\n\n output = []\n for ele in query:\n output.append({\n \"message\": ele[\"message\"],\n \"msg_id\": ele[\"msg_id\"]\n })\n \n \n return {'result': output }\n\n@app.route('/create_schedule', methods=['POST'])\n@cross_origin()\ndef input_schedule():\n myCollection = mongo.db.messages\n data = request.json\n user_id = request.args.get(\"user_id\")\n last_msg = myCollection.find_one(sort=[(\"msg_id\", -1)])\n \n if(last_msg == None):\n last_id = 0\n else:\n last_id = last_msg[\"msg_id\"]\n \n data[\"user_id\"] = int(user_id)\n data[\"msg_id\"] = last_id + 1\n data[\"hasResponse\"] = False\n data[\"response\"] = None\n \n #now = datetime.datetime.now()\n \n #print(now.strftime(\"%A\"), now.hour, now.minute, now.second)\n myCollection.insert_one(data) \n return { 'result': \"Create successful\"}\n\n@app.route('/delete_schedule', methods=['DELETE'])\n@cross_origin()\ndef delete_schedule():\n myCollection = mongo.db.messages\n msg_id = request.args.get(\"msg_id\")\n filt = {\"msg_id\": int(msg_id), \"type\": \"schedule\"}\n myCollection.delete_one(filt)\n \n return {'result': 'Delete successfully'}\n\n\n@app.route('/msg', methods=['GET'])\n@cross_origin()\ndef get_msg():\n myCollection = mongo.db.messages\n ID = request.args.get(\"user_id\")\n filt = {\"user_id\": int(ID) }\n\n query = myCollection.find(filt)\n\n output = []\n for ele in query:\n if(ele[\"type\"] == \"live\"):\n output.append({\n \"message\": ele[\"message\"],\n \"msg_id\": ele[\"msg_id\"],\n \"type\": ele[\"type\"]\n })\n else:\n output.append({\n \"message\": ele[\"message\"],\n \"msg_id\": ele[\"msg_id\"],\n \"type\": ele[\"type\"],\n \"day\": ele[\"day\"], \n \"hour\": ele[\"hour\"], \n \"minute\": ele[\"minute\"], \n \"second\": ele[\"second\"]\n })\n \n return {\"result\": output}\n\n#secure\n@app.route('/create_secure', methods=['POST'])\n@cross_origin()\ndef input_secure():\n myCollection = mongo.db.gas_gyro\n data = request.json\n myCollection.insert_one(data)\n return {'result': 'Create successfully'}\n\n@app.route('/update_gyro', methods=['POST'])\n@cross_origin()\ndef update_gyro():\n data = request.json\n ID = request.args.get('user_id')\n filt = {\"user_id\": int(ID)}\n update_gyro = {\"$set\": {\n \"gyro\": data['gyro'],\n \"timestamp_gyro\": time.time()\n }}\n myCollection = mongo.db.gas_gyro\n myCollection.update_one(filt, update_gyro)\n\n myCollection = mongo.db.user\n update = {\"$set\": {\n \"status\": \"danger\"\n }}\n #print(filt, update)\n myCollection.update_one(filt, update)\n\n return {\"result\": \"update successfully\"}\n \n@app.route('/update_gas', methods=['POST'])\n@cross_origin()\ndef update_gas():\n data = request.json\n ID = request.args.get('user_id')\n filt = {\"user_id\": int(ID)}\n update_gas = {\"$set\": {\n \"gas\": data['gas'],\n \"timestamp_gas\": time.time()\n }}\n myCollection = mongo.db.gas_gyro\n myCollection.update_one(filt, update_gas)\n myCollection = mongo.db.user\n update = {\"$set\": {\n \"status\": \"danger\"\n }}\n myCollection.update_one(filt, update)\n return {\"result\": \"update successfully\"}\n\n@app.route('/get_secure', methods=['GET'])\n@cross_origin()\ndef find_secure():\n myCollection = mongo.db.gas_gyro\n ID = request.args.get(\"user_id\")\n filt = {\"user_id\": int(ID)}\n query = myCollection.find_one(filt)\n \n output = {\n \"user_id\": query[\"user_id\"],\n \"gyro\": query[\"gyro\"],\n \"timestamp_gyro\": query[\"timestamp_gyro\"],\n \"gas\": query[\"gas\"],\n \"timestamp_gas\": query[\"timestamp_gas\"]\n }\n\n return {'result': output}\n\n#yer/no\n@app.route('/reply', methods=['POST'])\n@cross_origin()\ndef reply():\n myCollection = mongo.db.messages\n data = request.json\n msg_id = data[\"msg_id\"]\n response = data[\"response\"]\n print(data)\n filt = {\"msg_id\": int(msg_id)}\n\n update = {\"$set\": {\n \"response\": response\n }}\n myCollection.update_one(filt, update)\n return {\"result\": \"Reply successfully\"}\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port='3000', debug=True)\n","sub_path":"Backend/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":9260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"516756981","text":"from barbados.factories.base import BaseFactory\nfrom barbados.text import DisplayName\nfrom barbados.objects.inventory import Inventory\nfrom barbados.objects.inventoryitem import InventoryItem\nfrom barbados.exceptions import ValidationException\nfrom barbados.services.logging import Log\nfrom uuid import uuid4\nfrom barbados.models.inventorymodel import InventoryModel\nfrom barbados.caches.ingredienttree import IngredientTreeCache\n\n\nclass InventoryFactory(BaseFactory):\n _model = InventoryModel\n\n required_keys = {\n 'id': uuid4(),\n 'items': dict(),\n 'implicit_items': dict(),\n }\n\n @staticmethod\n def raw_to_obj(raw):\n raw_inventory = InventoryFactory.sanitize_raw(raw_input=raw, required_keys=InventoryFactory.required_keys)\n\n # Beware the Python dict copying bullshit!\n raw_inventory = InventoryFactory._parse_display_name(raw_inventory)\n raw_inventory = InventoryFactory._parse_items(raw_inventory)\n\n # Build the object\n i = Inventory(**raw_inventory)\n return i\n\n @staticmethod\n def _parse_display_name(raw_input):\n value_key = 'display_name'\n old_value = raw_input.get(value_key)\n\n # Log.info(\"Old value for %s is %s\" % (value_key, old_value))\n if not raw_input:\n new_value = DisplayName('Unnamed Inventory')\n elif type(old_value) is str:\n new_value = DisplayName(old_value)\n else:\n raise ValidationException(\"Bad display name given for inventory (%s)\" % old_value)\n\n # Log.info(\"New value for %s is %s\" % (value_key, new_value))\n raw_input.update({value_key: new_value})\n return raw_input\n\n @staticmethod\n def _parse_items(raw_input):\n value_key = 'items'\n items = raw_input.get(value_key)\n\n # Log.info(\"Old value for %s is %s\" % (value_key, old_value))\n # new_value = []\n for raw_item in items.keys():\n ii = InventoryItem(slug=raw_item)\n # new_value.append(ii)\n items.update({raw_item: ii})\n\n # Log.info(\"New value for %s is %s\" % (value_key, new_value))\n raw_input.update({value_key: items})\n return raw_input\n\n @classmethod\n def produce_obj(cls, session, id, expand=False):\n i = super().produce_obj(session=session, id=id)\n if expand:\n tree = IngredientTreeCache.retrieve()\n i.expand(tree)\n\n return i\n","sub_path":"barbados/factories/inventoryfactory.py","file_name":"inventoryfactory.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"610357230","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\nkeyword_list = []\n\n# input keyword 잘라내고 리스트에 넣기\nfor each in sys.argv[1].split('_'):\n keyword_list.append(each)\n\nlen_keyword = len(keyword_list)\n\n# load each syllabus txt file\ntry:\n sDirectory = os.listdir(\"/root/tokens/\")\nexcept Exception as ex:\n print(ex)\n\nf = open(\"/root/tokens/\" + sDirectory[0], 'r', encoding='utf-8')\n\ntry:\n target = f.read()\nexcept Exception as ex3:\n print(ex3)\n\ntoken_dic_list = []\n\n# Make Dic\nfor file in sDirectory:\n f = open(\"/root/tokens/\" + file, 'r', encoding='utf-8')\n target = f.read()\n token_dic = {}\n lines = target.split('\\n')\n for line in lines:\n if line != '':\n token_KV = line.split(' ')\n token_dic[token_KV[0]] = int(token_KV[1])\n token_dic['$code$'] = file.split('.')[0]\n token_dic_list.append(token_dic)\n\n# filterd Dictionary List\n# Key : 일치하는 키워드의 개수 / Value : 일치하는 키워드의 개수가 Key 인 강의들의 리스트\nkey_freq_dic = {}\n# 초기화\nfor i in range(0, len_keyword):\n key_freq_dic[i] = []\n\nfor dic in token_dic_list:\n # 일치하는 키워드의 개수 파악\n cnt = 0\n freq = 0\n for keyword in keyword_list:\n if keyword in dic.keys():\n cnt = cnt + 1\n freq = freq + dic.get(keyword) # 일치 하는 키워드의 빈도 수를 누적\n # 일치하는 키워드가 하나라도 있는 경우에만 해당 리스트에 append\n if cnt > 0:\n dic['$freq$'] = freq # 누적 변수 덧붙이기\n key_freq_dic[cnt-1].append(dic)\n # print(str(cnt) + ' / ' + str(dic.get('$freq$')))\n\n# 키워드 일치 수 그룹 내에서 빈도수로 정렬\nfor i in range(0, len_keyword):\n # 누적 변수로 내림차순 정렬\n sorted_dic = sorted(key_freq_dic.get(i), key=lambda t : t['$freq$'], reverse=True)\n # 정렬 된 리스트로 업데이트\n key_freq_dic[i] = sorted_dic\n\nfor i in range(0, len_keyword):\n for entry in key_freq_dic.get(len_keyword-1 - i):\n print(entry['$code$'], end=',')","sub_path":"python/filter_test.py","file_name":"filter_test.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"277880965","text":"\n\nfrom xai.brain.wordbase.nouns._antenna import _ANTENNA\n\n#calss header\nclass _ANTENNAE(_ANTENNA, ):\n\tdef __init__(self,): \n\t\t_ANTENNA.__init__(self)\n\t\tself.name = \"ANTENNAE\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"antenna\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_antennae.py","file_name":"_antennae.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"265490396","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import Ridge\nfrom sklearn.linear_model import Lasso\n\ndef get_data_points():\n df = pd.read_csv(\"clean_data.csv\", sep='\\t')\n df = df.dropna(how='any', axis=0)\n x_points = df[['Distance', 'Car','BuildingArea', 'YearBuilt']]\n y_points = df[['Price']]\n return np.array(x_points), np.array(y_points)\n\ndef ridge_regression():\n # Obtain training data\n x, y = get_data_points()\n x_train = x\n y_train = y\n\n ridge = Ridge(alpha=1)\n ridge_mod = ridge.fit(x_train, y_train) \n ridge_pred = ridge_mod.predict(x)\n\n print(\"======================= Ridge Regression =================\")\n print(\"Standard MSE(mean square error): %f\" % np.sqrt(np.mean((ridge_pred - y) ** 2)))\n print(\"MSE: %f\" % np.mean((ridge_pred - y) ** 2))\n\ndef lasso_regression():\n # Obtain training data\n x, y = get_data_points()\n x_train = x\n y_train = y\n\n lasso = Lasso(alpha=1)\n lasso_mod = lasso.fit(x_train, y_train) \n lasso_pred = lasso_mod.predict(x)\n\n print(\"======================= Lasso Regression =================\")\n print(\"Standard MSE(mean square error): %f\" % np.sqrt(np.mean((lasso_pred - y) ** 2)))\n print(\"MSE: %f\" % np.mean((lasso_pred - y) ** 2))\n\nridge_regression()\nlasso_regression()\n","sub_path":"src/preliminary/4.Training.py","file_name":"4.Training.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"495475163","text":"# -*- coding: utf-8 -*-\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n# ---\nimport sys\n# ---\nfrom package.ui.ui_main_window import Ui_MainWindow\n# ---\nfrom package.numbers_item_model import NumbersItemModel\nfrom package.numbers_delegate import NumbersDelegate\nfrom package.number_dialog import NumberDialog\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n def __init__(self, parent=None):\n QMainWindow.__init__(self, parent)\n self.setupUi(self)\n # ---\n self.app = QApplication.instance()\n self.numbersModel = NumbersItemModel()\n numbersDelegate = NumbersDelegate(self)\n self.numbersTableView.setItemDelegate(numbersDelegate)\n self.numbersTableView.setModel(self.numbersModel)\n self.numbersTableView.activated.connect(self.numberActivated)\n # ---\n self.actionNewNumber.triggered.connect(self.actionNewNumber_triggered)\n\n\n def actionNewNumber_triggered(self):\n numberDialog = NumberDialog(self)\n numberDialog.newNumber()\n numberDialog.exec_()\n self.numbersModel.reload()\n\n\n def numberActivated(self, index):\n numberDialog = NumberDialog(self)\n number_id = self.numbersModel.getNumberIdByIndex(index)\n activate_date = self.app.dateAfter(index.column())\n numberDialog.setNumber(number_id)\n numberDialog.setDate(activate_date)\n numberDialog.exec_()\n self.numbersModel.reload()\n\n","sub_path":"package/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"343229638","text":"import requests\nfrom bs4 import BeautifulSoup\nimport json\nfrom Article import Article\n\nbaseUrl = \"https://vietnamnet.vn/\"\ndef GetNews(limit_news = 20):\n s = requests.Session()\n response = s.get(baseUrl) # Thực hiện Get request\n soup = BeautifulSoup(response.content, 'html.parser') # Đưa vào biến soup chuẩn bị bóc tách dữ liệu\n article= soup.select(\"article.item-news\", limit=limit_news) # Tách dữ liệu phần thẻ article ra\n\n listArticle = []\n for element in article:\n title = element.select(\"span.m-t-5 d-b> a\") # Lấy phần thẻ chứa title\n description = element.select(\"div.lead m-t-5 > div\") # Lấy phần thẻ chứa description\n for x in range(len(title)): # serialize object này lại thành json để lấy dữ liệu dễ dàng hơn\n listArticle.append(json.dumps(Article(title[x]['title'], title[x]['href'], description[x].text).__dict__, ensure_ascii=False))\n return listArticle\n","sub_path":"New2.py","file_name":"New2.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"19355399","text":"class Solution:\n def isMatch(self, s: str, p: str) -> bool:\n \"\"\"\n https://leetcode.com/problems/wildcard-matching/\n Time Complexity - O(mn)\n Space Complexity - O(mn)\n 'm' and 'n' is the length of input strings.\n \"\"\"\n if s is None or p is None:\n return False\n s_len = len(s)\n p_len = len(p)\n dp = [[False for _ in range(p_len + 1)] for _ in range(s_len + 1)]\n\n # empty string matched with empty string\n dp[0][0] = True\n # char with empty string\n for r in range(1, s_len + 1):\n dp[r][0] = False\n # empty string with pattern string\n for c in range(1, p_len + 1):\n if p[c - 1] == '*':\n dp[0][c] = dp[0][c - 1]\n\n for r in range(1, s_len + 1):\n for c in range(1, p_len + 1):\n # if cur_char in both string match or if cur_pattern char is '?'\n # extend the string\n if (s[r - 1] == p[c - 1] or p[c - 1] == '?') and dp[r - 1][c - 1]:\n dp[r][c] = True\n # if cur_pattern char is *\n # extend from prev string (taking * into account) or\n # ignore '*'\n elif p[c - 1] == '*' and (dp[r - 1][c] or dp[r][c - 1]):\n dp[r][c] = True\n return dp[s_len][p_len]\n\n\nif __name__ == '__main__':\n print(Solution().isMatch(\"aa\", \"a\"))\n print(Solution().isMatch(\"adceb\", \"*a*b\"))\n print(Solution().isMatch(\"aa\", \"*\"))\n","sub_path":"44_wildcard_matching.py","file_name":"44_wildcard_matching.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"545401752","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\n dishes = [[\"Salad\", \"Tomato\", \"Cucumber\", \"Salad\", \"Sauce\"],\n [\"Pizza\", \"Tomato\", \"Sausage\", \"Sauce\", \"Dough\"],\n [\"Quesadilla\", \"Chicken\", \"Cheese\", \"Sauce\"],\n [\"Sandwich\", \"Salad\", \"Bread\", \"Tomato\", \"Cheese\"]]\n\noutput = [[\"Cheese\", \"Quesadilla\", \"Sandwich\"],\n [\"Chicken\", \"Chicken Curry\", \"Quesadilla\"],\n [\"Nuts\", \"Fried Rice\", \"Salad\"],\n [\"Onions\", \"Fried Rice\", \"Pasta\"]]\n\nconstrain - not big data\n\"\"\"\n\nfrom collections import defaultdict\ndef foo(dishes):\n ingredients = defaultdict(list)\n for dish in dishes:\n for i in dish[1:]:\n ingredients[i].append(dish[0])\n sorted_ingredients = []\n for i in sorted(ingredients):\n if len(ingredients[i]) > 1: \n temp = sorted(ingredients[i])\n temp.insert(0, i)\n sorted_ingredients.append(temp)\n return sorted_ingredients\n \n\n\nimport unittest\nclass FooTestCase(unittest.TestCase):\n def test_foo(self):\n dishes = [[\"Salad\", \"Tomato\", \"Cucumber\", \"Salad\", \"Sauce\"],\n [\"Pizza\", \"Tomato\", \"Sausage\", \"Sauce\", \"Dough\"],\n [\"Quesadilla\", \"Chicken\", \"Cheese\", \"Sauce\"],\n [\"Sandwich\", \"Salad\", \"Bread\", \"Tomato\", \"Cheese\"]]\n output = [[\"Cheese\", \"Quesadilla\", \"Sandwich\"],\n [\"Salad\", \"Salad\", \"Sandwich\"],\n [\"Sauce\", \"Pizza\", \"Quesadilla\", \"Salad\"],\n [\"Tomato\", \"Pizza\", \"Salad\", \"Sandwich\"]]\n self.assertEqual(foo(dishes), output)\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"coding-exercise/python/grouping-dishes.py","file_name":"grouping-dishes.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"602068282","text":"from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error\nfrom sklearn import decomposition, preprocessing\nfrom sklearn.model_selection import KFold, LeaveOneOut, train_test_split\n\nimport tensorflow as tf\nimport keras as K\nimport tensorflow as tf\nfrom keras import backend\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.callbacks import EarlyStopping, History\nfrom keras.utils import np_utils\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport os, csv, time, shutil\nimport cv_data, settings\nimport ast\nimport gc\nimport seaborn as sns\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n#plots train and validation errors as a function of training steps\ndef plot_model_history(model_history):\n fig, axs = plt.subplots(1,2,figsize=(15,5))\n # summarize history for accuracy\n axs[0].plot(range(1,len(model_history.history['loss'])+1),model_history.history['loss'])\n axs[0].plot(range(1,len(model_history.history['val_loss'])+1),model_history.history['val_loss'])\n axs[0].set_title('Model Accuracy')\n axs[0].set_ylabel('MSE')\n axs[0].set_xlabel('Epoch')\n axs[0].set_ylim(0,1000)\n axs[0].set_xticks(np.arange(1,len(model_history.history['loss'])+1),len(model_history.history['loss'])/10)\n axs[0].legend(['train', 'val'], loc='best')\n plt.show()\n\n\ndef averageOverLastN(a, n=3):\n return np.mean(a[-n:])\n\ndef main():\n section = 'Settings'\n args = dict.fromkeys(settings.args_keys)\n\n #read hyperparameters from Settings.ini\n for setting in settings.args_keys:\n value = settings.get_setting(settings.path, section, setting)\n args[setting] = ast.literal_eval(value)\n \n print(args)\n\n tf.reset_default_graph()\n start_time = time.time()\n \n\n #initialize statistics to be collected\n mse_test = []\n mae_test = []\n mae_std_test =[]\n preds = []\n r2_test = []\n val_average_over_last_n = []\n tr_average_over_last_n = []\n\n \n (XXX, yyy) = cv_data.load_data()\n XXt, X, yyt, y = train_test_split(XXX, yyy, test_size=args['train_percent'], random_state=42)\n\n #hartree to meV conversion of y values\n y *= args['reorg_norm_factor']\n yyt *= args['reorg_norm_factor']\n\n cv_split = []\n \n #for stratified split\n if(args['stratified']):\n min = 0\n max = len(X)\n for i in range(args['cv']):\n test_ind=np.arange(min,max,args['cv'])\n train_ind = np.arange(0,max)\n mask = np.ones(max,dtype=bool)\n mask[test_ind] = False\n train_ind = train_ind[mask]\n cv_split.append([train_ind,test_ind])\n min += 1\n print('*** Done stratification ***') \n else:\n kf = KFold(n_splits=args['cv'], shuffle=True, random_state=None)\n cv_split=kf.split(X)\n print('*** Done K-Fold ***') \n \n #k-fold cross validation loop\n foldNumber = 1\n for train_index, test_index in cv_split:\n start_time_fold = time.time()\n print('\\n --- %s. Fold started --- ' % (foldNumber))\n \n X_train, X_test = X.iloc[train_index], X.iloc[test_index]\n y_train, y_test = y.iloc[train_index], y.iloc[test_index]\n \n\n if args['normalization']:\n max_abs_scaler = preprocessing.MaxAbsScaler()\n X_train_maxabs = max_abs_scaler.fit_transform(X_train.values)\n X_test_maxabs = max_abs_scaler.transform(X_test.values)\n X_train2 = pd.DataFrame(X_train_maxabs, columns=X_train.columns, index=X_train.index)\n X_test2 = pd.DataFrame(X_test_maxabs, columns=X_test.columns, index=X_test.index) \n X_train = X_train2\n X_test = X_test2\n print('*** Done normalizing ***') \n\n if args['pca']:\n pca = decomposition.PCA(n_components=args['pcaVec'])\n pca.fit(X_train)\n columns = ['pca_%i' % i for i in range(args['pcaVec'])]\n X_train2 = pd.DataFrame(pca.transform(X_train), columns=columns, index=X_train.index)\n X_test2 = pd.DataFrame(pca.transform(X_test), columns=columns, index=X_test.index)\n X_train = X_train2\n X_test = X_test2\n print('*** Done PCA ***') \n \n \n\n config = tf.ConfigProto(\n allow_soft_placement=True,\n gpu_options = tf.GPUOptions(allow_growth=True))\n set_session(tf.Session(config=config))\n\n print(\"X_train shape:\", X_train.shape[1])\n\n #build the neural network\n model = Sequential()\n layers=args['hidden_units']\n for i in range(len(layers)):\n if i==0: #first hidden layer\n model.add(Dense(layers[i], input_shape=(X_train.shape[1],), activation='relu', \n kernel_initializer='he_normal'))\n model.add(Dropout(args['dropout']))\n elif i==len(layers)-1: #output layer\n model.add(Dense(layers[i], activation='linear', kernel_initializer=\"he_normal\"))\n else:\n model.add(Dense(layers[i], activation='relu', kernel_initializer=\"he_normal\"))\n model.add(Dropout(args['dropout']))\n\n model.compile(loss='mean_squared_error', optimizer=K.optimizers.Adam(lr=args['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=0.1, decay=0.0, amsgrad=False))\n #model.compile(loss='mean_squared_error', optimizer=K.optimizers.RMSprop(lr=args['learning_rate'], rho=0.9, epsilon=None, decay=0.0))\n #model.compile(loss='mean_squared_error', optimizer= K.optimizers.SGD(lr=args['learning_rate'], momentum=0.9, decay=0.0, nesterov=False))\n \n history = History()\n \n hist = model.fit(X_train, y_train, epochs=args['train_steps'], verbose=1, shuffle=True, batch_size=args['batch_size'], validation_data=(X_test, y_test))\n val_loss = hist.history['val_loss']\n loss = hist.history['loss']\n \n \n #average over last 20 validation loss values\n average_over = 20\n mov_av_val = averageOverLastN(np.sqrt(np.array(val_loss)), average_over) # moving average of RMSE\n val_average_over_last_n.append(mov_av_val)\n \n #average over last 20 train loss values\n mov_av_tr = averageOverLastN(np.sqrt(np.array(loss)), average_over)\n tr_average_over_last_n.append(mov_av_tr)\n \n \n print('*** Done training ***') \n\n # Evaluate how the model performs on data it has not yet seen.\n\n y_hat=model.predict(X_test, batch_size=args['batch_size'])\n y_train_hat=model.predict(X_train, batch_size=args['batch_size'])\n print('*** Done predictions ***')\n \n predictions = list(p[0] for p in y_hat) \n pred_train = list(p[0] for p in y_train_hat) \n\n error = mean_squared_error(y_test, predictions)\n mae = mean_absolute_error(y_test, predictions)\n mae_std = np.std(np.abs(y_test-predictions))\n r2 = r2_score(y_test, predictions)\n \n mse_test.append(error)\n r2_test.append(r2)\n mae_test.append(mae)\n mae_std_test.append(mae_std)\n \n\n print('\\nTest MSE: ', error)\n print('\\nR2 Score: ', r2)\n \n hrs,mins,sec = calculate_time(start_time_fold,time.time())\n print('\\n --- %s. Fold Time: %s h, %s min %s sec ---' % (foldNumber,hrs,mins,sec))\n\n model.reset_states()\n K.backend.clear_session()\n gc.collect()\n \n foldNumber += 1\n #endcvfor\n\n \n\n #average over cv folds\n cv_last_val_rmse = np.mean(np.sqrt(mse_test))\n cv_last_val_std_rmse = np.std(np.sqrt(mse_test))\n cv_last_val_mae = np.mean(mae_test)\n cv_last_val_std_mae = np.std(mae_test)\n cv_average_tr_rloss = np.mean(tr_average_over_last_n)\n cv_r2 = sum(r2_test)/float(len(r2_test)) \n cv_average_val_rloss = np.mean(val_average_over_last_n)\n \n\n print('all the mse_test: ',mse_test)\n print('all the rmse_test: ', np.sqrt(mse_test))\n print('all the mae_test: ',mae_test)\n print('all the mae_std_test', mae_std_test)\n print('all the r2_test',r2_test)\n print('\\n Average RMSE Train Loss :', cv_average_tr_rloss)\n print('Average R2 on Test:',cv_r2)\n print('\\n Average Validation RMSE Loss on Test:', cv_average_val_rloss)\n \n\n \n hrs,mins,sec = calculate_time(start_time,time.time()) \n print('\\n --- Total Time: %s h, %s min %s sec ---' % (hrs,mins,sec))\n\n\n #print statistics to csv file\n header = ['Last_R2','Last_Val_RMSE', 'Last_Val_Std_RMSE', 'Last_Val_MAE', 'Last_Val_Std_MAE', 'Average_Val_RMSE', 'Average_TR_RMSE','DataSize']\n fields = [float(cv_r2), float(cv_last_val_rmse), float(cv_last_val_std_rmse), float(cv_last_val_mae), float(cv_last_val_std_mae), float(cv_average_val_rloss), float(cv_average_tr_rloss), len(X)]\n for key in settings.args_keys: \n header.append(key)\n fields.append(settings.get_setting(settings.path,section,key))\n\n \n filename = settings.get_setting(settings.path,section,'csv_output_file')\n file_exists = os.path.isfile(filename)\n with open(filename, 'a', newline='') as csvfile:\n writer = csv.writer(csvfile)\n if not file_exists:\n writer.writerow(header);\n writer.writerow(fields);\n print('*** Done writing csv ***') \n\n# Calculate elapsed time\ndef calculate_time(s,f):\n sec = f - s\n hrs = int(sec / 3600)\n sec -= 3600*hrs\n mins = int(sec / 60)\n sec -= 60*mins\n return hrs,mins,sec\n \ndef savefig(y, preds, fields='None', show=False):\n plt.plot(y, preds, '.')\n if(show):\n plt.show()\n plt.savefig('{0}.png'.format(fields))\n print('*** Done saving plt ***') \n\ndef from_dataset(ds):\n return lambda: ds.make_one_shot_iterator().get_next()\n\n","sub_path":"dnn_osc.py","file_name":"dnn_osc.py","file_ext":"py","file_size_in_byte":9166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"324167967","text":"import numpy as np\nfrom chainercv.transforms import scale, resize_contain\n\ndef resize_with_pad(img, size, fill=255):\n\t\"\"\"\t\n\tArgs:\n\t\timg (np.array: (H, W, C)): PIL format image.\n\t\tsize (tuple of int: (H, W)): The size of resized image.\n\t\tfill (int): The pixel color value for padding.\n\tReturn:\n\t\timg (np.array: (H, W, C)): resized PIL format image\n\t\"\"\"\n\t# PIL -> chainer (C, H, W)\n\timg = img.transpose(2,0,1)\n\timg = scale(img, min(size), fit_short=False)\n\timg = resize_contain(img, size, fill=fill)\n\timg = img.transpose(1,2,0)\n\treturn img\n\ndef make_grid_image(imgs, whole_size=None, fill=255):\n\t\"\"\"\n\tArgs:\n\t\timgs (list of np.array: (H, W, C)): PIL format images\n\t\twhole_size (tuple of int: (H, W)): The size of grid image.\n\t\t\tIf this is `None`, no resizing each image.\n\t\tfill (int): The pixel color value for padding.\n\tReturn:\n\t\tgrid_img (np.array: (H, W, C)): Tiled given images as a grid,\n\t\t\tsuch that, \n\t\t\t\t\t\t\t\t\t [1][2][3]\n\t\t\t[1][2][3][4][5][6][7] -> [4][5][6]\n\t\t\t\t\t\t\t\t\t [7][ ][ ]\n\t\"\"\"\n\n\t# n: the number of images per edge\n\tn = np.ceil(np.sqrt(len(imgs)))\n\tif whole_size is None:\n\t\th, w, _ = imgs[0].shape\n\telse:\n\t\th = min(int(whole_size[0]/n), 60)\n\t\tw = min(int(whole_size[1]/n), 60)\n\n\timgs = [resize_with_pad(img, (h,w), fill=fill) for img in imgs]\n\n\tpads = [fill * np.ones((h,w,3)) for _ in range(int(n**2-len(imgs)))]\n\timgs = np.array(imgs+pads)\n\n\tgrid_img = np.concatenate([\n\t np.concatenate(row, axis=1)\n\t for row in np.split(imgs, int(n))\n\t], axis=0)\n\n\treturn grid_img\n\n#def tile_images_horizontal","sub_path":"scripts/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"571487408","text":"from tools.zhuanhuan import *\nfrom funs_domain import *\nfrom func_http import *\nfrom funs_signal import *\nimport sys ,os\nsys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))\nf=open(\"../file/af4bebd0-54ca-ffca-ca7c-25c7814574cd.txt\")\ndata=linesStr_to_lineDic(f.read())\nsignal_dic={}\nqr_0_host={}\nqr_1_ip={}\ndomain_dic={}\nhttp_dic={}\n\nfor dic in data :\n client = dic.get(\"sip\") + \":\" + dic.get(\"sport\")\n server = dic.get(\"dip\") + \":\" + dic.get(\"dport\")\n if dic.get(\"qr\") == \"0\":\n domain_qr0(dic,qr_0_host)\n elif dic.get(\"qr\") == \"1\" and dic.get(\"ip\"):\n domain_qr1(dic,qr_0_host,qr_1_ip)\n elif dic.get(\"flag\")== '40962':\n domain_40962(dic,client,server,qr_1_ip,domain_dic)\n elif dic.get(\"flag\")==\"20496\":\n domain_40296(dic,client,server,domain_dic)\n # 以下处理信令\n elif dic.get(\"type\") =='udp':\n signal_upd(dic,client,server,signal_dic) #处理updtype==udp的信令\n elif dic.get(\"flag\") == \"20504\":\n signal_tcp_20504(dic,client,server,signal_dic)\n http_20504(dic,client,server,http_dic)\n elif dic.get(\"flag\") == '20496':\n signal_tcp_20496(dic,client,server,signal_dic)\n http_20496(dic,client,server,signal_dic)\n\n#过滤掉没有有通过 DNS 发起tcp 连接的 dmain-info\n##为每个ip 添加cdn信息\nfor k,v in qr_1_ip.items():\n tcpData=list(filter( lambda ipinfo:k in ipinfo,domain_dic)) #过滤出有通过 DNS 发起tcp 连接的 TCP-info\n for ip in tcpData:\n domain_dic[ip][\"cdn\"]=v\ndomain_dic={ip:info for ip,info in domain_dic.items() if info.get(\"cdn\") }\ndb_list=domian_dic_to_db_dic(domain_dic,\"af4bebd0-54ca-ffca-ca7c-25c7814574cd\")\nfrom DB.tables import Domain\n# print(db_list)\n# print(Domain().query(\"domain,ip,dnsreplydelay,tcpbuilddelay,dns_suc_rate,tcp_suc_rate\"))\nDomain().insert_data(db_list)","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"68960993","text":"import pdb\nimport numpy as np\nimport cv2\nimport requests\nfrom flask import json\n\nclass CompetitionEnvironment:\n def __init__(self, map_size = 12):\n self.team_id = 'KOPB'\n # self.url = 'http://10.2.5.64/test'\n self.url = 'http://10.2.5.64/competition'\n self.map_size = map_size\n self.reset()\n\n def reset(self):\n # set the vars below\n self.step = 0\n self.score = 0\n\n while True:\n data = requests.post(self.url, data=json.dumps({'name':self.team_id})).json()\n if(data['msg']=='OK'):\n break\n else:\n print('connect err')\n self.env_id = data['id']\n self.parse_state(data['state'])\n\n def parse_state(self, state_data):\n self.posx = state_data['ai']['x']\n self.posy = state_data['ai']['y']\n self.map = np.zeros((self.map_size,self.map_size),'int')\n\n self.obs_cnt = len(state_data['walls'])\n for obs in state_data['walls']:\n self.map[obs['x'],obs['y']] = -1\n\n self.pkg_cnt = len(state_data['jobs'])\n for pkg in state_data['jobs']:\n self.map[pkg['x'],pkg['y']] = pkg['value']\n\n def restart(self):\n # set the vars below\n self.reset()\n\n def save(self, file):\n open(file,'w').write(' '.join([str(x) for x in [self.map_size]+self.map.ravel().tolist()]))\n\n # move dir (0,1,2,3) => (up, down, left, right)\n def move(self, movdir):\n direction = list('UDLR')\n while True:\n data = requests.post(self.url+'/'+self.env_id+'/move', data=json.dumps({'direction':direction[movdir]})).json()\n if data['msg'] == 'OK':\n break\n else:\n print('mov err')\n self.env_id = data['id']\n self.parse_state(data['state'])\n reward = data['reward']\n done = data['done']\n self.step += 1\n self.score+=reward\n return done, reward\n\n def get_show_map(self, K=20):\n r,c = self.map.shape[:2]\n drawimg = cv2.cvtColor(np.ones((r*K,c*K),'uint8')+254, cv2.COLOR_GRAY2BGR)\n for i in range(c):\n drawimg = cv2.line(drawimg, (0,i*K), (r*K,i*K), (250,0,0), 1)\n for i in range(r):\n drawimg = cv2.line(drawimg, (i*K,0), (i*K,c*K), (250,0,0), 1)\n\n for i in range(r):\n for j in range(c):\n if self.map[i,j]>0:\n drawimg[i*K:i*K+K, j*K:j*K+K] = (0, min(170*self.map[i,j]/20 + 80,255), 0)\n drawimg = cv2.putText(drawimg, '%d'%self.map[i,j], (int((j+0.1)*K),int((i+0.8)*K)), cv2.FONT_HERSHEY_SIMPLEX, K*0.02, (255,0,0), 2)\n elif self.map[i,j]<0:\n drawimg[i*K:i*K+K, j*K:j*K+K] = (50, 50, 50)\n \n drawimg = cv2.circle(drawimg, (int((self.posy+0.5)*K),int((self.posx+0.5)*K)), int(K/3), (0,0,255), -1)\n \n status_bar = np.ones((30,c*K,3),'uint8')+128\n status_bar = cv2.putText(status_bar, 'step:%d score:%d'%(self.step,self.score), (0,17), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255,255,0), 2)\n return np.vstack([drawimg,status_bar])\n\n def watch(self):\n cv2.imshow('map', self.get_show_map())\n\nif __name__ == '__main__':\n for epoch in range(100):\n env = CompetitionEnvironment()\n while env.step<=500:\n env.watch()\n key = cv2.waitKey(100)\n if key == 27:\n exit()\n env.move(np.random.choice([0,1,2,3]))\n","sub_path":"pacman/compitition_env.py","file_name":"compitition_env.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"509445159","text":"from selection import insertion_sort\n\ndef merge_lists(left_list, right_list):\n left_pointer = 0\n right_pointer = 0\n sorted_list = []\n\n # keep adding to the final list until its length is the same as that of\n # the original list\n while len(sorted_list) != len(left_list) + len(right_list):\n # look at two items, one from each list\n left_item = left_list[left_pointer]\n right_item = right_list[right_pointer]\n\n # add the smaller of the two items we are looking at, and move forward\n if left_item < right_item:\n sorted_list.append(left_item)\n left_pointer += 1\n else:\n sorted_list.append(right_item)\n right_pointer += 1\n\n # if we've passed the end of either list, then extend the sorted list\n # with what remains in the other\n if right_pointer >= len(right_list):\n sorted_list.extend(left_list[left_pointer:])\n break\n if left_pointer >= len(left_list):\n sorted_list.extend(right_list[right_pointer:])\n break\n\n return sorted_list\n\ndef divide_and_combine(list_to_sort):\n # find the middle of the list and split it into halves\n mid = len(list_to_sort) // 2\n left_list = list_to_sort[:mid]\n right_list = list_to_sort[mid:]\n\n # sort each half of the original list\n insertion_sort(left_list)\n insertion_sort(right_list)\n\n # merge the two sorted lists and return the result\n return merge_lists(left_list, right_list)\n\n\n\ndef merge_sort(list_to_sort):\n # base case: a list with 0 or 1 elements is already sorted, so return it\n if len(list_to_sort) <= 1:\n return list_to_sort\n\n # divide\n mid = len(list_to_sort) // 2 # find the middle of the list\n left_list = list_to_sort[:mid] # and split it into halves\n right_list = list_to_sort[mid:]\n\n # conquer\n left_list = merge_sort(left_list)\n right_list = merge_sort(right_list)\n\n # combine\n return merge_lists(left_list, right_list)\n\n\nif __name__ == '__main__':\n ls = [8, 3, 4, 9, 2]\n print(\"unsorted: \" + str(ls))\n print(\"sorted: \" + str(divide_and_combine(ls)))\n ls = [8, 3, 4, 9, 2, 12, 2, 3, 65, 9, 12, 1, 2, 1, 4, 0, 235, 2, 6, 5, 44]\n print(\"unsorted: \" + str(ls))\n print(\"sorted with merge sort: \" + str(merge_sort(ls)))\n","sub_path":"source/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"524976894","text":"from cassandra import ConsistencyLevel\r\nfrom cassandra.cluster import Cluster\r\nfrom cassandra.query import BatchStatement\r\n\r\n\r\nif __name__ == \"__main__\":\r\n cluster = Cluster(['127.0.0.1'], port=9042)\r\n session = cluster.connect('plain', wait_for_all_pools=True)\r\n session.execute('USE plain')\r\n session.execute(\r\n \"BEGIN BATCH \"\r\n \"UPDATE plain.Users SET username = 'artem' WHERE id = 1;\"\r\n \"UPDATE plain.UserDocuments SET username = 'artem' WHERE user_id = 1;\"\r\n \"APPLY BATCH;\")","sub_path":"batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"171354530","text":"# -*- coding: utf-8 -*-\n\n\ndef input_(x: int, y: int, ex: str) -> bool:\n \"\"\"\n >>> input_(1, 4, 'x**3 + x**2 + x + 1')\n True\n \"\"\"\n return eval(ex) == y\n\n\nif __name__ == '__main__':\n (x, y), ex = map(int, input().split()), input()\n print(input_(x, y, ex))\n","sub_path":"python/easy/built-ins/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"118217213","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport pdb\nfrom torch.nn.utils.rnn import pad_packed_sequence, pad_sequence, pack_padded_sequence\n\nclass LSTM_Audio(nn.Module):\n def __init__(self, hidden_dim, num_layers, device,dropout_rate=0 ,bidirectional=False):\n super(LSTM_Audio, self).__init__()\n self.device = device\n self.num_features = 39\n self.hidden_dim = hidden_dim\n self.num_layers = num_layers\n self.dropout_rate = dropout_rate\n self.bidirectional = bidirectional\n self.lstm = nn.LSTM(self.num_features, self.hidden_dim, self.num_layers, batch_first=True,\n dropout=self.dropout_rate, bidirectional=self.bidirectional).to(self.device)\n\n def forward(self, input):\n input = input.to(self.device)\n out, hn = self.lstm(input)\n return out\nclass LFLB(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size_cnn, stride_cnn, padding_cnn, padding_pool,kernel_size_pool, stride_pool, device):\n super(LFLB, self).__init__()\n self.device = device\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size_cnn = kernel_size_cnn\n self.stride_cnn = stride_cnn\n self.padding_cnn = padding_cnn\n self.padding_pool=padding_pool\n self.kernel_size_pool = kernel_size_pool\n self.stride_pool = stride_pool\n\n self.cnn = nn.Conv2d(self.in_channels, self.out_channels, self.kernel_size_cnn, stride=self.stride_cnn, padding=self.padding_cnn).to(self.device)\n self.batch = nn.BatchNorm2d(self.out_channels)\n self.max_pool = nn.MaxPool2d(self.kernel_size_pool, stride=self.stride_pool,padding=self.padding_pool)\n self.relu = nn.ReLU()\n\n def forward(self,input):\n input=input.to(self.device)\n out=self.cnn(input)\n out=self.batch(out)\n out=self.relu(out)\n out=self.max_pool(out)\n return out\n\n\nclass SpectrogramModel(nn.Module):\n\n def valid_cnn(self,x,k):\n return torch.floor(x-k+1)\n\n def valid_max(self,x,k,s):\n return torch.floor((x-k)/s+1)\n def cnn_shape(self,x,kc,sc,pc,km,sm,pm):\n temp=int((x+2*pc-kc)/sc+1)\n temp=int((temp+2*pm-km)/sm+1)\n return temp\n\n def __init__(self, in_channels, out_channels, kernel_size_cnn, stride_cnn, kernel_size_pool, stride_pool, \n hidden_dim, num_layers, dropout_rate, num_labels, batch_size, \n hidden_dim_lstm,num_layers_lstm,device, nfft,bidirectional=False):\n super(SpectrogramModel, self).__init__()\n self.device = device\n self.in_channels = [in_channels]+out_channels\n self.out_channels = out_channels\n self.kernel_size_cnn = kernel_size_cnn\n self.stride_cnn = stride_cnn\n self.padding_cnn =[(int((self.kernel_size_cnn[i][0]-1)/2),int((self.kernel_size_cnn[i][1]-1)/2)) for i in range(len(out_channels))]\n self.kernel_size_pool = kernel_size_pool\n self.padding_pool=[(int((self.kernel_size_pool[i][0]-1)/2),int((self.kernel_size_pool[i][1]-1)/2)) for i in range(len(out_channels))]\n self.stride_pool = stride_pool\n\n# lstm\n self.hidden_dim = hidden_dim\n self.num_layers = num_layers\n self.dropout_rate = dropout_rate\n self.num_labels = num_labels\n self.batch_size = batch_size\n self.bidirectional = bidirectional\n self.num_directions = 1 + self.bidirectional\n self.hidden_dim_lstm=hidden_dim_lstm\n\n# data shape\n self.nfft=nfft\n strideF=self.nfft//4\n\n# for putting all cells together\n self._all_layers = []\n self.num_layers_cnn=len(out_channels)\n for i in range(self.num_layers_cnn):\n name = 'cell{}'.format(i)\n cell=LFLB(self.in_channels[i], self.out_channels[i], self.kernel_size_cnn[i], self.stride_cnn[i], \n self.padding_cnn[i], self.padding_pool[i],self.kernel_size_pool[i], self.stride_pool[i], self.device)\n setattr(self, name, cell)\n self._all_layers.append(cell)\n strideF=self.cnn_shape(strideF,self.kernel_size_cnn[i][0],self.stride_cnn[i][0],self.padding_cnn[i][0],\n self.kernel_size_pool[i][0],self.stride_pool[i],self.padding_pool[i][0])\n \n self.lstm = nn.LSTM(self.out_channels[-1]*strideF, self.hidden_dim_lstm, self.num_layers, batch_first=True,\n dropout=self.dropout_rate, bidirectional=self.bidirectional).to(self.device)\n self.classification_hand = nn.Linear(self.hidden_dim, self.num_labels).to(self.device)\n self.classification_raw=nn.Linear(self.hidden_dim_lstm*self.num_directions, self.num_labels).to(self.device)\n #self.classification=nn.Linear(self.hidden_dim_lstm*self.num_directions+self.hidden_dim*2, self.num_labels).to(self.device)\n\n self.LSTM_Audio=LSTM_Audio(hidden_dim,num_layers,self.device,bidirectional=False)\n self.weight= nn.Parameter(torch.FloatTensor([0]),requires_grad=False)\n\n\n def forward(self, input_lstm,input, target,seq_length,seq_length_spec):\n x = input.to(self.device)\n target = target.to(self.device)\n for i in range(self.num_layers_cnn):\n name = 'cell{}'.format(i)\n #seq_length_spec=self.valid_max(self.valid_cnn(seq_length_spec,self.kernel_size_cnn[i]),self.kernel_size_pool[i],self.stride_pool[i])\n x=getattr(self,name)(x)\n temp=[]\n '''\n for s in seq_length_spec:\n if s==0 or s==1:\n temp.append(2)\n else:\n temp.append(s)\n seq_length_spec=torch.Tensor(temp)\n '''\n out = torch.flatten(x,start_dim=1,end_dim=2).permute(0,2,1)\n out, hn = self.lstm(out)\n out=out.permute(0,2,1)\n out_lstm=self.LSTM_Audio(input_lstm).permute(0,2,1)\n '''\n temp1=[torch.unsqueeze(torch.mean(out[k,:,:int(s.item())],dim=1),dim=0) for k,s in enumerate(seq_length_spec)]\n '''\n temp=[torch.unsqueeze(torch.mean(out_lstm[k,:,:int(s.item())],dim=1),dim=0) for k,s in enumerate(seq_length)]\n out=torch.mean(out,dim=2)\n #temp=torch.mean(out_lstm,dim=2)\n out_lstm=torch.cat(temp,dim=0)\n #out=torch.cat(temp1,dim=0)\n p=torch.exp(10*self.weight)/(1+torch.exp(10*self.weight))\n #out=torch.cat([p*out,(1-p)*out_lstm],dim=1)\n #out=torch.cat([out,out_lstm],dim=1)\n out=self.classification_raw(out)\n out_lstm=self.classification_hand(out_lstm)\n out_final=p*out+(1-p)*out_lstm\n #out_final=out\n #out=self.classification(out)\n target_index = torch.argmax(target, dim=1).to(self.device)\n correct_batch=torch.sum(target_index==torch.argmax(out_final,dim=1))\n losses_batch_raw=F.cross_entropy(out,torch.max(target,1)[1])\n losses_batch_hand=F.cross_entropy(out_lstm,torch.max(target,1)[1])\n losses_batch=p*losses_batch_raw+(1-p)*losses_batch_hand\n #losses_batch=losses_batch_raw\n #losses_batch=F.cross_entropy(out,torch.max(target,1)[1])\n\n\n correct_batch=torch.unsqueeze(correct_batch,dim=0)\n losses_batch=torch.unsqueeze(losses_batch, dim=0)\n if torch.isnan(losses_batch):\n pdb.set_trace()\n return losses_batch,correct_batch\n","sub_path":"src/speech/model_joint_spec_full_2d.py","file_name":"model_joint_spec_full_2d.py","file_ext":"py","file_size_in_byte":7383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"558314892","text":"# Deep Deterministic Policy Gradient (DDPG) for Bipedal Walker v3\n\n# Refer to the following for guidance:\n# https://arxiv.org/abs/1509.02971\n# https://towardsdatascience.com/td3-learning-to-run-with-ai-40dfc512f93\n# https://github.com/sweetice/Deep-reinforcement-learning-with-pytorch\n# https://github.com/udacity/deep-reinforcement-learning/tree/master/ddpg-bipedal\n\n# Imports\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n# Fan init from https://github.com/udacity/deep-reinforcement-learning for network initialisation\ndef hidden_init(layer):\n fan_in = layer.weight.data.size()[0]\n lim = 1. / np.sqrt(fan_in)\n return (-lim, lim)\n\n\n# Actor network\nclass Actor(nn.Module):\n \n def __init__(self, n_state, n_action, hidden1, hidden2):\n \n \"\"\" Creates the actor network.\n \n (params):\n n_state (int): The size of the state space\n n_action (int): The size of the action space\n hidden1 (int): The size of the first hidden layer\n hidden2 (int): The size of the first hidden layer\n \"\"\"\n \n super(Actor, self).__init__()\n self.fc1 = nn.Linear(n_state, hidden1)\n self.fc2 = nn.Linear(hidden1, hidden2)\n self.out = nn.Linear(hidden2, n_action)\n self.reset_params()\n \n def forward(self, x):\n \n \"\"\" Predicts actions from states \"\"\"\n \n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.tanh(self.out(x))\n return x\n \n def reset_params(self):\n \n \"\"\" Performs fan initialisation for the network parameters \"\"\" \n \n epsilon = 3e-3\n self.fc1.weight.data.uniform_(*hidden_init(self.fc1))\n self.fc2.weight.data.uniform_(*hidden_init(self.fc2))\n self.out.weight.data.uniform_(-epsilon, epsilon)\n \n# Critic network\nclass Critic(nn.Module):\n \n def __init__(self, n_state, n_action, state_hidden, hidden1, hidden2):\n \n \"\"\" Creates the actor network.\n \n (params):\n n_state (int): The size of the state space\n n_action (int): The size of the action space\n state_hidden (int): The number of units in the layer between states\n and concatination\n hidden (list of ints): The size of the hidden layers \"\"\"\n \n super(Critic, self).__init__()\n self.process = nn.Linear(n_state, state_hidden)\n self.fc1 = nn.Linear(state_hidden+n_action, hidden1)\n self.fc2 = nn.Linear(hidden1, hidden2)\n self.out = nn.Linear(hidden2, 1)\n self.reset_params()\n \n def forward(self, states, actions):\n \n \"\"\" Predicts Q values from states and actions \"\"\"\n \n state_process = F.relu(self.process(states))\n x = torch.cat((state_process, actions), dim=1)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n return self.out(x)\n return x\n \n def reset_params(self):\n \n \"\"\" Performs fan initialisation for the network parameters \"\"\"\n \n epsilon = 3e-3\n self.process.weight.data.uniform_(*hidden_init(self.process))\n self.fc1.weight.data.uniform_(*hidden_init(self.fc1))\n self.fc2.weight.data.uniform_(*hidden_init(self.fc2))\n self.out.weight.data.uniform_(-epsilon, epsilon)\n","sub_path":"working/AC_torch.py","file_name":"AC_torch.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"319741198","text":"import time\nimport math\nimport pandas as pd\nimport numpy as np\n\n\nCITY_DATA = {'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv'}\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('_.~\"~.'*10)\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n\n print(\"Let's get started: Which city's data would you like to explore?\\n\")\n city = \"\"\n while city == \"\":\n # catching weird errors\n try:\n city = input('Type \"C\" for Chicago, \"N\" for New York City or \"W\"\\\n for Washington.\\n').lower()\n except:\n print('Looks like something went wrong. Enter only a single letter please\\n')\n continue\n # check whether the answer is acceptable\n if city in [\"c\", \"n\", \"w\"]:\n cities = {\"c\": \"Chicago\", \"n\": \"New York City\", \"w\":\"Washington\"}\n print(\"Awesome you chose {}, what a nice city!\".format(cities[city]))\n city = cities[city].lower()\n else:\n print(\"You seem to have entered something else, please try again, with a single letter\\n\")\n city = \"\"\n\n # get user input for month (all, january, february, ... , june)\n print(\"\\n\")\n print(\"-\"*40)\n print(\"Now that we have a city lets look at the months\\n\")\n month = \"\"\n # dictionary to translate the numbers\n months = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:\"July\", 8:\"August\", 9:\"September\",10:\"Oktober\",11:\"November\", 12:\"December\"}\n print('Please type a number from 1 to 6 depending on which month you want to look at. There is sadly no data from July to December\\n\\\nIf you want to look at all months enter \"all\"\\n')\n while month == \"\":\n month = input()\n if month ==\"all\":\n print(\"Ok, let's look at all months.\")\n break\n try:\n if int(month) <= 6 and int(month)>0:\n print(\"Ok, lets look only at {}.\\n\".format(months[int(month)]))\n else:\n print(\"Please enter a number between 1 and 6 or 'all'\\n\")\n month = \"\"\n except:\n print(\"Looks like that wasn't a number or the word 'all'. Please try again.\")\n month = \"\"\n\n\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n print(\"\\n\")\n print(\"-\"*40)\n print(\"Now that we have a month lets look at the days\\n\")\n day = \"\"\n while day == \"\":\n days = [\"Mo\", \"Tu\", \"We\", \"Th\", \"Fr\", \"Sa\", \"Su\"]\n days_verb = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\n week = dict(zip(days, days_verb))\n\n day = input('Please enter the first two letters of the weekday you want to look at.\\n\\\nIf you want to look at all days enter \"all\"\\n')\n if day == \"all\":\n print(\"Ok, let's look at all days.\")\n return city, month, day\n break\n try:\n if day.title() in days:\n print(\"Ok, lets look only at {}.\\n\".format(week[day.title()]))\n else:\n print(\"Mmh that doesn't look like a day to me. :/\\n\")\n day = \"\"\n except:\n print(\"\\nWhoops, I'm expecting two letters of a weekday or 'all'.\")\n day = \"\"\n\n print('-'*40)\n\n return city, month, week[day.title()]\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df[\"Start Time\"])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df[\"Start Time\"].dt.month\n df[\"day_of_week\"] = df[\"Start Time\"].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n #months = ['january', 'february', 'march', 'april', 'may', 'june']\n #month = months.index(month.lower())+1\n #print(month)\n # filter by month to create the new dataframe\n\n df = df[df[\"month\"] == int(month)]\n\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df[\"day_of_week\"] == day.title()]\n\n # Get a column with the start time\n df['Start Time'] = pd.to_datetime(df[\"Start Time\"], dayfirst = True)\n # extract hour from the Start Time column to create an hour column\n df['hour'] =df[\"Start Time\"].dt.hour\n\n return df\n\n\ndef time_stats(df, month, day):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n if month == \"all\":\n print(\"Calculating the most common month:\")\n common_month = df.month.value_counts().idxmax()\n months = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:\"July\", 8:\"August\", 9:\"September\",10:\"Oktober\",11:\"November\", 12:\"December\"}\n print(\"~~~~~~~\")\n print(\"The most common month is {}\".format(months[common_month]))\n print(\"~~~~~~~\\n\")\n else:\n print(\"Calculation of month is skipped because 'all' wasn't selected.\\n\")\n\n\n\n # display the most common day of week\n if day == \"all\":\n print(\"Calculating the most common day:\")\n common_day = df.day_of_week.value_counts().idxmax()\n print(\"~~~~~~~\")\n print(\"The most common day is {}\".format(common_day))\n print(\"~~~~~~~\\n\")\n else:\n print(\"Calculation of day is skipped because 'all' wasn't selected.\\n\")\n\n # display the most common start hour\n print(\"Calculating the most common hour:\")\n # convert the Start Time column to datetime\n\n # find the most common hour (from 0 to 23)\n\n try:\n common_hour = df.hour.value_counts().idxmax()\n print(\"~~~~~~~\")\n print(\"The most common hour is {}\".format(common_hour))\n print(\"~~~~~~~\\n\")\n except:\n print(\"There are no rentals for your filters.\")\n\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n station= df[\"Start Station\"].value_counts().idxmax()\n visits = df[\"Start Station\"].value_counts().max()\n print(\"~~~~~~~\")\n print(\"The most commonly used start station is {} with {} visits\".format(station, visits))\n\n # display most commonly used end station\n end_station= df[\"End Station\"].value_counts().idxmax()\n end_visits = df[\"End Station\"].value_counts().max()\n print(\"The most commonly used end station is {} with {} visits\".format(end_station, end_visits))\n\n # display most frequent combination of start station and end station trip\n #Create a column with start and end stations\n df[\"Combination\"] = df[\"Start Station\"] + \" to \"+ df[\"End Station\"]\n comb_station= df[\"Combination\"].value_counts().idxmax()\n comb_visits = df[\"Combination\"].value_counts().max()\n print(\"The most commonly combination of start and end station is {} with {} visits\".format(comb_station, comb_visits))\n print(\"~~~~~~~\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef round_half_up(n, decimals=0):\n multiplier = 10 ** decimals\n return math.floor(n*multiplier + 0.5) / multiplier\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n tot_time = df[\"Trip Duration\"].sum()/3600\n tot_time = round_half_up(tot_time, 2)\n print(\"~~~~~~~\")\n print(\"The total travel time is {} hours\".format(tot_time))\n # display mean travel time\n travel_count = len(df[\"Trip Duration\"])\n #Calculating average and converting back to minutes\n mean = round_half_up(tot_time*60/travel_count,2)\n print(\"The average travel time is {} minutes\".format(mean))\n print(\"~~~~~~~\")\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n types = df[\"User Type\"].unique()\n print(\"~~~~~~~\")\n print(\"There are {} user types:\\n\".format(len(types)))\n for type in types:\n amount_type = len(df[df[\"User Type\"]==type])\n print(\" {} {}\".format(amount_type, type))\n print(\"~~~~~~~\")\n # Display counts of gender\n try:\n genders = df[\"Gender\"].unique()\n print(\"There are {} genders:\\n\".format(len(genders)))\n for gender in genders:\n amount_gender = len(df[df[\"Gender\"]==gender])\n print(\" {} {}\".format(amount_gender, gender))\n print(\"~~~~~~~\")\n except:\n print(\"No available data on genders for the chosen city.\")\n print(\"~~~~~~~\")\n # Display earliest, most recent, and most common year of birth\n try:\n min_birth = int(df[\"Birth Year\"].min())\n max_birth = int(df[\"Birth Year\"].max())\n com_birth = int(df[\"Birth Year\"].value_counts().idxmax())\n print(\"The oldest customer was born in {} the youngest customer was born in {} and the most common birth year is {}.\".format(min_birth, max_birth, com_birth))\n except:\n print(\"No available data on birth years for the chosen city.\")\n print(\"~~~~~~~\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef show_me_what_you_got(df):\n \"\"\" Displays 5 rows of raw data each time the user prompts the program\n to do so \"\"\"\n answer = input(\"Would you like to see some raw data? (y/n)\\n\")\n start=0\n while str(answer.lower()).startswith(\"y\"):\n stop=start+5\n print(df.iloc[start:stop])\n start=stop\n answer = input(\"Do you want to see more raw data? (y/n)\\n\")\n\n\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df, month, day)\n time.sleep(1)\n station_stats(df)\n time.sleep(1)\n trip_duration_stats(df)\n time.sleep(1)\n user_stats(df)\n time.sleep(1)\n show_me_what_you_got(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n print('_.~\"~.'*10)\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare_2.py","file_name":"bikeshare_2.py","file_ext":"py","file_size_in_byte":11541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"244450527","text":"# Author: Denis Nguyen\n# Created: 06/24/2017\n\n'''\nBy listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, w\ne can see that the 6th prime is 13.\n\nWhat is the 10 001st prime number?\n'''\n\n# Create function to find primes\ndef isPrime(number):\n\t# Test if number is divisible by other numbers other than 1 \n\t# and itself\n\tfor i in range(2, number):\n\t\tif number%i == 0:\n\t\t\treturn False\n\treturn True\n\n# Initiate variables\nlimit = 10001\ncount = 0\ni = 1\nwhile count != limit:\n\ti += 1\n\tif isPrime(i):\n\t\tcount += 1\nprint(i)\n# Answer = 104743","sub_path":"7. 10001st prime.py","file_name":"7. 10001st prime.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"640002337","text":"from decimal import Decimal\n\nfrom django.contrib.gis import admin\nfrom django.contrib.humanize.templatetags.humanize import intcomma\nfrom django.db.models import Max\nfrom django.template import Template, Context\nfrom django.template.defaultfilters import floatformat\nfrom django.utils.dateparse import parse_datetime\nfrom django.utils.safestring import mark_safe\n\nfrom .models import Calibration, Entry\n\n\nclass MonitorAdmin(admin.OSMGeoAdmin):\n list_display = ['name', 'county', 'is_sjvair', 'is_hidden', 'last_updated']\n list_editable = ['is_sjvair', 'is_hidden']\n list_filter = ['is_sjvair', 'is_hidden', 'county']\n fields = ['name', 'county', 'is_hidden', 'is_sjvair', 'location', 'position']\n\n change_form_template = 'admin/monitors/change_form.html'\n\n def get_form(self, request, obj=None, **kwargs):\n form = super().get_form(request, obj, **kwargs)\n if 'pm25_calibration_formula' in form.base_fields:\n form.base_fields['pm25_calibration_formula'].help_text = mark_safe(Template('''\n

Available operators, constants, and functions.

\n

Available variables:

\n
    \n {% for env in environment %}
  • {{ env }}
  • {% endfor %}\n
\n ''').render(Context({'environment': Entry.ENVIRONMENT})))\n return form\n\n def last_updated(self, instance):\n if instance.latest:\n return parse_datetime(instance.latest['timestamp'])\n return ''\n\n\n@admin.register(Calibration)\nclass CalibrationAdmin(admin.ModelAdmin):\n list_display = ('monitor_type', 'county', 'modified', 'get_pm25_formula')\n list_filter = ('monitor_type', 'county')\n\n def get_pm25_formula(self, instance):\n if instance.pm25_formula:\n return mark_safe(f'{instance.pm25_formula}')\n return '-'\n get_pm25_formula.short_description = 'PM2.5 Formula'\n","sub_path":"camp/apps/monitors/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"456975493","text":"\"\"\"Constants File\"\"\"\n\nNON_AGGR_FORM = 0 # Form: F(X1), ..., F(Xb), X1 != Xb, X1 != Xi, ..., Xi != Xb\nAGGR_FORM1 = 1 # Form: b <= #count{ X : f(X) }\nAGGR_FORM2 = 2 # Form: not #count{ Y : f(Y) } < b\nAGGR_FORM3 = 3 # Form: not b - 1 = #count{ Y : f(Y) }, ..., not 0 = #count{ Y : f(Y) }\n\nFORM_TRANSLATION = {\n NON_AGGR_FORM: \"Non-aggregate\",\n AGGR_FORM1: \"Inequality Aggregate\",\n AGGR_FORM2: \"Negated Inequality Aggregate\",\n AGGR_FORM3: \"Negated Equality Aggregate\"\n}\n\n# Useful for pretty printing in print_valid_output_forms method\nFORM_TRANSLATION_PADDING = {\n NON_AGGR_FORM: \" \",\n AGGR_FORM1: \" \",\n AGGR_FORM2: \"\",\n AGGR_FORM3: \" \"\n}\n\nLOCATION = { # Custom 'Location' value for aagg-created AST objects, which do not correspond to a real file location\n 'begin': {'column': 'inserted-by-aagg', 'line': 'inserted-by-aagg', 'filename': ''},\n 'end': {'column': 'inserted-by-aagg', 'line': 'inserted-by-aagg', 'filename': ''}\n}\n","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"187964004","text":"num=600851475143\ni=2\nx=1000000\nls=[]\nwhile i 'ASTObject':\n \"\"\"Fully process an expression into an AST,\n including optimizations.\n \n :param expr: string to be compiled.\n :return: Generated AST object.\n \"\"\"\n ast = compile_expr(prepare(expr))\n apply_coercion(ast)\n optimize(ast)\n return ast\n\n\ndef prepare(expr: str) -> str:\n \"\"\"Prepare a string for actual parsing by the compiler.\n Unprepared strings will generally not be compiled due to\n some characters the compiler doesn't deal with and some\n checks the compiler doesn't perform.\n \n :param expr: Expression to be compiled. Should be a str object\n :return: Prepared string\n \"\"\"\n return _prepare(expr)\n\n\ndef compile_expr(expr: str) -> 'ASTObject':\n \"\"\"Compile given expression into an AST.\n The string should be prepared by the 'prepare'\n function.\n \n :param expr: String to be compiled.\n :return: Generated AST object.\n \"\"\"\n return _compile(expr)\n\n\ndef apply_coercion(ast: 'ASTObject', *, cleanup=False) -> 'ASTObject':\n _apply_coercion(ast, cleanup)\n return ast\n\n\ndef optimize(ast: 'ASTObject', *, cleanup=True) ->'ASTObject':\n result = _optimize(ast, cleanup)\n if cleanup:\n _cleanup(result)\n return result\n\n\n##############################################################################\n##############################################################################\n# Some small private helper functions\n##############################################################################\n\n\ndef _get_group_id(expr: str) -> str:\n \"\"\"Return a unique identifier to mark something in the given expression.\n\n :param expr: String for which an id should be created\n :return: id\n \"\"\"\n uuid = ''\n while uuid in expr:\n uuid += random.choice(string.ascii_letters)\n return uuid\n\n\ndef _get_class(op):\n \"\"\"Convert the name of an operator into a class name\n \"\"\"\n opname = syntax.symbols[op]\n name = ''.join(part.capitalize() for part in opname.split('_'))\n return globals()[name]\n\n\ndef _pprint(ast, indent=0):\n \"\"\"Get the pprint representation of an AST object.\n \"\"\"\n return ast.__pprint__(indent)\n\n\ndef _walk_ast(ast, level=0) -> collections.defaultdict:\n \"\"\"Generate a dict containing the various levels\n in the AST.\n \n :param ast:\n :param level:\n :param reverse:\n :return:\n \"\"\"\n nodes = collections.defaultdict(list)\n nodes[level].append(ast)\n for attr in ('first', 'second'):\n if hasattr(ast, attr):\n result = _walk_ast(getattr(ast, attr), level+1)\n for key, value in result.items():\n nodes[key].extend(value)\n return nodes\n\n\n##############################################################################\n##############################################################################\n# Stuff that does the heavy lifting\n##############################################################################\n\n\ndef _prepare(expr: str) -> str:\n if re.search(r'\\d+\\s+\\d+', expr) is not None:\n raise SyntaxError()\n logic = syntax.logic.values()\n varname = syntax.constructs['varname']\n pattern = varname + r'\\s' + varname\n for match in re.findall(pattern, expr):\n first, second = match.split()\n if first not in logic and second not in logic:\n raise SyntaxError()\n for char in string.whitespace:\n expr = expr.replace(char, '')\n opened = expr.count(syntax.structure['group_open'])\n closed = expr.count(syntax.structure['group_close'])\n if opened != closed:\n raise SyntaxError('Unbalanced brackets')\n return expr\n\n_prepare.__doc__ = prepare.__doc__\n\n\ntypes = {}\n\n\ndef _apply_coercion(ast, cleanup=False):\n layers = _walk_ast(ast)\n for key in sorted(layers, reverse=True):\n for node in layers[key]:\n if hasattr(node, 'type_code'):\n node.TYPE = node.type_code\n elif isinstance(node, LoadName):\n try:\n node.TYPE = lan_ast.types[node.varname]\n except KeyError:\n raise NameError('Undefined variable: {}'.format(\n node.varname))\n elif isinstance(node, Operator):\n if isinstance(node, AdvancedOperator):\n coerce_1, coerce_2, node.TYPE = syntax.coercion_table[(\n type(node).__name__.upper(),\n node.first.TYPE,\n node.second.TYPE)]\n if coerce_1 != node.first.TYPE:\n node.coerce_1 = bytecode.coerce_codes[coerce_1]\n else:\n node.coerce_1 = bytecode.coerce_codes['NULL']\n if coerce_2 != node.second.TYPE:\n node.coerce_2 = bytecode.coerce_codes[coerce_2]\n else:\n node.coerce_2 = bytecode.coerce_codes['NULL']\n else:\n node.TYPE = 'INT'\n if cleanup:\n _cleanup(ast)\n return ast\n\n\ndef _optimize(ast, cleanup=True):\n layers = _walk_ast(ast)\n if len(layers) == 1:\n if cleanup:\n _cleanup(ast)\n return ast \n for key in sorted(layers, reverse=True):\n for node in layers[key]:\n #if isinstance(node, Literal)):\n # node.OPTIMIZE = True\n if isinstance(node, Operator):\n result1 = (isinstance(node.first, Literal) or\n getattr(node.first, 'OPTIMIZE', False))\n result2 = (isinstance(node.second, Literal) or\n getattr(node.second, 'OPTIMIZE', False))\n if result1 and result2:\n node.OPTIMIZE = True\n else:\n node.OPTIMIZE = False\n else:\n node.OPTIMIZE = False\n for key in sorted(layers):\n for node in layers[key]:\n if node.OPTIMIZE:\n literal = globals()['{}Literal'.format(node.TYPE.capitalize())]\n if key == 0:\n return literal(\n str(runtime.execute_statement(node.encode())))\n for parent in layers[key-1]:\n if parent.first is node:\n parent.first = literal(\n str(runtime.execute_statement(node.encode())))\n break\n elif parent.second is node:\n parent.second = literal(\n str(runtime.execute_statement(node.encode())))\n if cleanup:\n _cleanup(ast)\n return ast\n\n\ndef _cleanup(ast):\n for layer in _walk_ast(ast).values():\n for node in layer:\n try:\n del node.OPTIMIZE\n except AttributeError:\n pass\n try:\n del node.TYPE\n except AttributeError:\n pass\n\n\n##############################################################################\n##############################################################################\n# Functions for compiling\n##############################################################################\n\n\ndef _compile(expr: str) -> 'ASTObject':\n \"\"\"Function to dispatch compiling work to\n specialized compiling functions\n :param expr: Expression to compile in string form\n :return: Created AST tree\n \"\"\"\n logic = syntax.logic.copy()\n op_not = logic['not']\n del logic['not']\n for op in logic:\n if expr.startswith(op) or expr.endswith(op):\n raise SyntaxError('Missing expression')\n if any(op in expr for op in logic.values()):\n parts = tuple(\n reversed(tuple(\n lexer.TokenStream(expr).parts(*logic.values()))))\n root = current = None\n while True:\n if len(parts) == 1:\n node = _compile(parts[0])\n if root is None:\n root = node\n current.first = node\n return root\n second, op, *parts = parts\n cls = _get_class(op)\n node = cls(second=_compile(second))\n if root is None:\n root = node\n if current is not None:\n current.first = node\n current = node\n if op_not in expr:\n index = expr.find(op_not)\n expression = expr[index+len(op_not):]\n tree = Not(_compile(expression[len(op_not):]))\n uuid = _get_group_id(expr)\n expr = expr[:index] + uuid\n ast = _compile(expr)\n if isinstance(ast, LoadName):\n return tree\n if not ast.substitute(uuid, tree):\n raise ValueError('Failed substitution')\n return ast\n add = syntax.operators['addition']\n sub = syntax.operators['subtraction']\n if expr.startswith(sub):\n expr = '0' + expr\n elif expr.startswith(add):\n expr = expr[1:]\n if expr[0] in syntax.ops.values():\n raise SyntaxError('Missing operand')\n combinations = ((add+add, add), (add+sub, sub),\n (sub+add, sub), (sub+sub, add))\n while True:\n if all(char not in expr for char, _ in combinations):\n break\n for match, replace in combinations:\n expr = expr.replace(match, replace)\n if syntax.structure['group_open'] in expr:\n return _compile_advanced_expression(expr)\n for operators in syntax.precedence[-2:]:\n for operator in operators:\n op = syntax.operators[operator.lower()]\n expr = expr.replace(op+add, op)\n if (op + sub) in expr:\n return _compile_expression(expr)\n return _compile_simple_expression(expr)\n\n\ndef _compile_advanced_expression(expr: str) -> 'ASTObject':\n \"\"\"Compile an expression containing brackets,\n either for grouping or for a function call.\n \"\"\"\n group_open = syntax.structure['group_open']\n group_close = syntax.structure['group_close']\n groups = {}\n while True:\n result = re.search(syntax.grammar['function_call'], expr)\n if result is None:\n break\n stop = result.end()\n func_name = result.group('name')\n arg_list = result.group('args')\n opened = 0\n closed = 0\n for i, char in enumerate(result.group()):\n if char == group_open:\n opened += 1\n elif char == group_close:\n closed += 1\n if opened > 0 and opened == closed:\n stop = result.start() + i + 1\n arg_list = result.group()[len(func_name):i+1]\n break\n arg_list = arg_list[1:-1].split(',')\n pos_args = []\n kw_args = []\n found_kw = False\n match_kw = (syntax.constructs['varname'] + '=' +\n syntax.constructs['expr'])\n for arg in arg_list:\n if re.search(match_kw, arg) is not None:\n name, expression = arg.split('=', maxsplit=1)\n kw_args.append((name, process(expression)))\n found_kw = True\n else:\n if found_kw:\n msg = 'Positional argument following keyword argument'\n raise SyntaxError(msg)\n pos_args.append(process(arg))\n uuid = _get_group_id(expr)\n expr = expr[:result.start()] + uuid + expr[stop:]\n groups[uuid] = FunctionCall(func_name, pos_args, kw_args)\n cap = 0\n while group_open in expr:\n cap += 1\n if cap == 20:\n raise ValueError('cap reached')\n opened = 0\n closed = 0\n group = ''\n start, stop = 0, 0\n for index, char in enumerate(expr):\n if char == group_open:\n opened += 1\n if opened == 1:\n start = index\n if opened > 1:\n group += char\n elif char == group_close:\n closed += 1\n if opened > 1 and opened != closed:\n group += char\n elif opened > 0:\n group += char\n if closed > opened:\n raise SyntaxError('Closing bracket before opening bracket')\n elif opened > 0 and opened == closed:\n stop = index\n break\n uuid = _get_group_id(expr)\n expr = expr[:start] + uuid + expr[stop+1:]\n if group_open in group:\n groups[uuid] = _compile_advanced_expression(group)\n else:\n groups[uuid] = _compile_expression(group)\n ast = _compile_expression(expr)\n for uuid, tree in groups.items():\n if isinstance(ast, LoadName):\n if ast.varname == uuid:\n ast = tree\n elif not ast.substitute(uuid, tree):\n raise ValueError('Substitution failed')\n return ast\n\n\ndef _compile_expression(expr: str) -> 'ASTObject':\n \"\"\"Compile an expression with possible negative right\n operands.\n :param expr: expression to be compiled\n :return: Created tree\n \"\"\"\n # We'll only have to handle a negative right operand\n # for the two highest priority groups of operators.\n # a/-b = a/(0-b)\n # a / -b^3*5 = a / (0-b^3) * 5\n # a / -b+3 = a / (0-b) + 3\n\n # a / b / c should be interpreted as (a/b)/c\n # a^b^c should be interpreted as a^(b^c)\n sub = syntax.operators['subtraction']\n groups = {}\n for group in syntax.precedence[-2:]:\n for op in group:\n op = syntax.operators[op.lower()]\n while True:\n index = expr.find(op + sub)\n if index == -1:\n break\n group = sub\n index += 1\n for i, char in enumerate(expr[index+1:], index):\n if char in syntax.ops.values():\n break\n group += char\n uuid = _get_group_id(expr)\n expr = expr[:index] + uuid + expr[i+2:]\n groups[uuid] = _compile_simple_expression('0'+group)\n ast = _compile_simple_expression(expr)\n for uuid, tree in groups.items():\n if isinstance(ast, LoadName):\n if ast.varname == uuid:\n ast = tree\n elif not ast.substitute(uuid, tree):\n raise ValueError('Substitution failed')\n return ast\n\n\n_operators = syntax.precedence\n\n\ndef _compile_simple_expression(expr: str, ops=_operators) -> 'ASTObject':\n if sum(expr.count(op) for op in syntax.ops.values()) == 0:\n for name, pattern in syntax.literals.items():\n result = re.search(pattern, expr)\n if result is not None and result.group() == expr:\n return globals()[name.capitalize()+'Literal'](expr)\n result = re.search(syntax.constructs['varname'], expr)\n if result is not None and result.group() == expr:\n return LoadName(expr)\n raise SyntaxError(expr)\n elif len(ops) == 1:\n # Chained powers are interpreted in reverse order,\n # so the implementation for parsing them is quite different.\n # Although both algorithms are almost identical,\n # it was easier to write an alternate implementation to\n # prevent hard-to-follow, kind of hacky code,\n # or code with a lot of if statements.\n stream = lexer.TokenStream(expr)\n root = prev = current = None\n split_ops = [syntax.ops[op.lower()] for op in ops[0]]\n parts = list(stream.parts(*split_ops))\n while True:\n if len(parts) == 1:\n current.second = _compile_simple_expression(parts[0], ())\n return root\n first, op, *parts = parts\n if not parts:\n break\n cls = _get_class(op)\n current = cls(first=_compile_simple_expression(first, ()))\n if root is None:\n root = current\n if prev is not None:\n prev.second = current\n prev = current\n return root\n else:\n stream = lexer.TokenStream(expr)\n root = prev = current = None\n split_ops = [syntax.ops[op.lower()] for op in ops[0]]\n if sum(op in expr for op in split_ops) == 0:\n return _compile_simple_expression(expr, ops[1:])\n parts = list(reversed(list(stream.parts(*split_ops))))\n while True:\n if len(parts) == 1:\n current.first = _compile_simple_expression(parts[0], ops[1:])\n return root\n second, op, *parts = parts\n cls = _get_class(op)\n current = cls(second=_compile_simple_expression(second, ops[1:]))\n if root is None:\n root = current\n if prev is not None:\n prev.first = current\n prev = current\n return root\n\n\n##############################################################################\n##############################################################################\n# Basic AST object\n##############################################################################\n\n\nclass ASTObject(object):\n \"\"\"Base class for all AST object.\n Implements substitution protocol.\"\"\"\n\n _offset = 0\n\n fields = ()\n\n def __init__(self):\n self._children = {}\n\n def __repr__(self):\n attributes = [repr(self._children[attr]) for attr in self.fields]\n return '{}({})'.format(type(self).__name__, ', '.join(attributes))\n\n def __pprint__(self, indent=0):\n return '{}{}({})'.format(\n indent*'\\t', type(self).__name__,\n repr(self._children[self.fields[0]]))\n\n # TODO: Since there is a new substitution protocol,\n # TODO: the following two methods (__getattr__ and __setattr__),\n # TODO: might not be needed anymore.\n\n def __setattr__(self, attr, value):\n if attr in self.fields:\n self._children[attr] = value\n else:\n super().__setattr__(attr, value)\n\n def __getattr__(self, attr):\n if attr in self.fields:\n return self._children[attr]\n raise AttributeError('{} object has no such attribute \"{}\"'.format(\n type(self).__name__, attr))\n\n def substitute(self, name, tree):\n \"\"\"Find a LoadName with .varname attr 'name'\n node and replaces it with 'tree'.\n\n :param name: (str): 'name' attribute of LoadName object.\n :param tree: (ASTObject): tree to replace old LoadName object.\n :return: Boolean indicating if substitution was successful.\n :rtype: bool\n \"\"\"\n for child_name, child in self._children.items():\n if isinstance(child, LoadName):\n if child.varname == name:\n break\n else:\n for child in self._children.values():\n if child.substitute(name, tree):\n return True\n return False\n self._children[child_name] = tree\n return True\n\n##############################################################################\n##############################################################################\n# Specialized AST base-classes\n##############################################################################\n\n\nclass Operator(ASTObject):\n \"\"\"Base class for all comparison/operator AST nodes.\n\n :param first: Left operand\n :param second: Right operand\n \"\"\"\n\n fields = ('first', 'second')\n\n def __init__(self, first=None, second=None):\n super().__init__()\n self.first = first\n self.second = second\n\n def __repr__(self):\n attributes = [repr(self._children[attr]) for attr in self.fields]\n return '{}({})'.format(type(self).__name__, ', '.join(attributes))\n\n def __pprint__(self, indent=0):\n children = (_pprint(self._children[attr], indent+1)\n for attr in self.fields)\n return '{}{}(\\n{}\\n{})'.format(\n indent*'\\t', type(self).__name__,\n '\\n'.join(children), '\\t'*indent)\n\n def encode(self):\n if self.first is None or self.second is None:\n raise ValueError('{} object is missing arguments'.format(\n type(self).__name__))\n return b''.join([self.code_id[0],\n self.first.encode(), self.second.encode()])\n\n\nclass AdvancedOperator(Operator):\n \"\"\"Operator which encodes its own coercion types.\n\n :param first: Left operand\n :param second: Right operand\n :param coerce_1: Type to coerce left operand to before\n performing the operation.\n :param coerce_2: Type to coerce right operand to before\n performing the operation.\n \"\"\"\n\n def __init__(self, first=None, second=None, coerce_1=0, coerce_2=0):\n super().__init__(first, second)\n self.coerce_1 = coerce_1\n self.coerce_2 = coerce_2\n\n def __pprint__(self, indent=0):\n children = (_pprint(self._children[attr], indent+1)\n for attr in self.fields)\n return '{}{}(\\n{}\\n{})'.format(\n indent*'\\t', type(self).__name__,\n '\\n'.join(children), '\\t'*indent)\n\n def encode(self):\n attributes = (self.first, self.second, self.coerce_1, self.coerce_2)\n if any(attr is None for attr in attributes):\n raise ValueError('{} object is missing arguments'.format(\n type(self).__name__))\n return b''.join([self.code_id[0],\n struct.pack('>BB', self.coerce_1, self.coerce_2),\n self.first.encode(), self.second.encode()])\n\n\nclass Literal(ASTObject):\n \"\"\"Literal object.\n\n :param value: (str): String representation of the literal.\n \"\"\"\n\n def __init__(self, value):\n super().__init__()\n self.value = value\n\n def __repr__(self):\n return '{}({})'.format(type(self).__name__, self.value)\n\n def __pprint__(self, indent=0):\n return '\\t'*indent + repr(self)\n\n def encode(self):\n encoded = self.value.encode('ascii')\n return b''.join([self.code_id[0],\n struct.pack('>B', len(encoded)), encoded])\n\n\n##############################################################################\n##############################################################################\n# Easy AST classes\n##############################################################################\n\n\n# Comparison nodes\n\nclass Equal(Operator):\n code_id = bytecode.names['EQUAL']\n\nclass NotEqual(Operator):\n code_id = bytecode.names['NOT_EQUAL']\n\nclass Smaller(Operator):\n code_id = bytecode.names['SMALLER']\n\nclass SmallerOrEqual(Operator):\n code_id = bytecode.names['SMALLER_OR_EQUAL']\n\nclass Greater(Operator):\n code_id = bytecode.names['GREATER']\n\nclass GreaterOrEqual(Operator):\n code_id = bytecode.names['GREATER_OR_EQUAL']\n\n\n# Logical operators\nclass And(Operator):\n code_id = bytecode.names['AND']\n\nclass Or(Operator):\n code_id = bytecode.names['OR']\n\n\n# Regular operators\n\nclass Addition(AdvancedOperator):\n code_id = bytecode.names['ADDITION']\n\nclass Subtraction(AdvancedOperator):\n code_id = bytecode.names['SUBTRACTION']\n\nclass Multiplication(AdvancedOperator):\n code_id = bytecode.names['MULTIPLICATION']\n\nclass Division(AdvancedOperator):\n code_id = bytecode.names['DIVISION']\n\nclass Floor(AdvancedOperator):\n code_id = bytecode.names['FLOOR']\n\nclass Ceil(AdvancedOperator):\n code_id = bytecode.names['CEIL']\n\nclass Mod(AdvancedOperator):\n code_id = bytecode.names['MOD']\n\nclass Power(AdvancedOperator):\n code_id = bytecode.names['POWER']\n\n\n# Literals\n\nclass IntLiteral(Literal):\n code_id = bytecode.names['INT_LITERAL']\n type_code = TYPE = 'INT'\n\nclass FloatLiteral(Literal):\n code_id = bytecode.names['FLOAT_LITERAL']\n type_code = TYPE = 'FLOAT'\n\nclass FractionLiteral(Literal):\n code_id = bytecode.names['FRACTION_LITERAL']\n type_code = TYPE = 'FRACTION'\n\n\n##############################################################################\n##############################################################################\n# More specialized AST classes\n##############################################################################\n\n\nclass Not(ASTObject):\n\n code_id = bytecode.names['NOT']\n\n def __init__(self, expr):\n super().__init__()\n self.expr = expr\n\n def __pprint__(self, indent=0):\n return '{}{}(\\n{}{})'.format(\n indent*'\\t', type(self).__name__,\n _pprint(self.expr, indent+1), indent*'\\t'\n )\n\n def encode(self):\n return self.code_id[0] + self.expr.encode()\n\n\nclass LoadName(ASTObject):\n \"\"\"Variable/name loading object\n\n :param varname: (str): Name of variable to be loaded.\n \"\"\"\n\n code_id = bytecode.names['LOAD_NAME']\n\n def __init__(self, varname):\n super().__init__()\n self.varname = varname\n\n def __repr__(self):\n return '{}({})'.format(type(self).__name__, self.varname)\n\n def __pprint__(self, indent=0):\n return '\\t'*indent + repr(self)\n\n def encode(self):\n encoded = self.varname.encode('utf8')\n return b''.join([self.code_id[0],\n struct.pack('>B', len(encoded)), encoded])\n\n\nclass FunctionCall(ASTObject):\n \"\"\"AST node representing a function call\"\"\"\n\n code_id = bytecode.names['CALL_OBJECT']\n\n # encoded as:\n # OP_CODE + UBYTE:NAME_LEN + NAME + UBYTE:POS_ARG_COUNT +\n # POS_ARG_EXPRESSION_LIST + UBYTE:KW_ARG_COUNT +\n # GROUPS OF(UBYTE:NAME_LEN + NAME + EXPRESSION)\n\n def __init__(self, func_name, pos_args, kw_args):\n super().__init__()\n self.func = func_name\n self.args = pos_args\n self.kwargs = kw_args\n\n def __repr__(self):\n return '{}({}, {}, {})'.format(type(self).__name__,\n self.func, self.args, self.kwargs)\n\n def __pprint__(self, indent=0):\n return '{0}{1}(\\n{5}{2}\\n{3}\\n{4}\\n{0})'.format(\n indent*'\\t',\n type(self).__name__,\n self.func,\n '\\n'.join(_pprint(arg, indent+1) for arg in self.args),\n '\\n'.join('{}{} = {}'.format(\n (indent+1)*'\\t', name, _pprint(arg))\n for name, arg in self.kwargs),\n (indent+1)*'\\t'\n )\n\n def encode(self):\n args = bytearray().join([\n struct.pack('>B', len(self.args)),\n b''.join(arg.encode() for arg in self.args),\n struct.pack('>B', len(self.kwargs))\n ])\n for name, expr in self.kwargs:\n encoded_name = name.encode('utf8')\n args += b''.join([\n struct.pack('>B', len(encoded_name)),\n encoded_name,\n expr.encode()\n ])\n encoded_name = self.func.encode('utf8')\n return b''.join([\n self.code_id[0],\n struct.pack('>B', len(encoded_name)),\n encoded_name,\n args\n ])\n\n\n# Import this as last, to avoid circular import problems\nfrom . import lan_ast\n","sub_path":"compiler/expr_ast.py","file_name":"expr_ast.py","file_ext":"py","file_size_in_byte":28187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"552513544","text":"# --------\r\n# Imports\r\n# --------\r\n\r\nfrom os import environ\r\n\r\nfrom flask import Flask, render_template, request, redirect, url_for, flash , jsonify, Response,g\r\nfrom flask_oidc import OpenIDConnect\r\nfrom okta import UsersClient\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom database_setup import Base, UserInfo, BookStore, BookList\r\nfrom sqlalchemy.pool import SingletonThreadPool\r\nfrom flask import session as login_session\r\nimport random, string\r\nfrom oauth2client.client import flow_from_clientsecrets\r\nfrom oauth2client.client import FlowExchangeError\r\nimport httplib2\r\nimport json\r\nfrom flask import make_response \r\nimport requests\r\n\r\n#Refrence : https://developer.okta.com/blog/2018/07/23/build-a-simple-crud-app-with-flask-and-python\r\n# Flask instance\r\napp = Flask(__name__)\r\n\r\n# Client_id\r\nenviron[\"OKTA_ORG_URL\"] = \"https://dev-809078.oktapreview.com\"\r\nenviron[\"OKTA_AUTH_TOKEN\"]= \"00WjYmYkheuVGAy-Hul85b80VdTJkATDIvMaDxkih9\"\r\nenviron[\"SECRET_KEY\"]=\"notsecret\"\r\n\r\n# secret credentials for Okta connection\r\napp.config[\"OIDC_CLIENT_SECRETS\"] = \"openidconnect_secrets.json\"\r\napp.config[\"OIDC_COOKIE_SECURE\"] = False\r\napp.config[\"OIDC_CALLBACK_ROUTE\"] = \"/oidc/callback\"\r\napp.config[\"OIDC_SCOPES\"] = [\"openid\", \"email\", \"profile\"]\r\napp.config[\"SECRET_KEY\"] = environ.get(\"SECRET_KEY\")\r\napp.config[\"OIDC_ID_TOKEN_COOKIE_NAME\"] = \"oidc_token\"\r\n\r\n# instantiate OpenID client to handle user session\r\noidc = OpenIDConnect(app)\r\n# Okta client will determine if a user has an appropriate account\r\nokta_client = UsersClient(environ.get(\"OKTA_ORG_URL\"),\r\n environ.get(\"OKTA_AUTH_TOKEN\"))\r\n\r\n\r\n\r\n# Connet to database\r\nengine = create_engine('sqlite:///bookstore.db?check_same_thread=False')\r\nBase.metadata.bind = engine\r\n# Create session\r\nDBSession = sessionmaker(bind = engine)\r\nsession = DBSession()\r\n\r\n@app.before_request\r\ndef before_request():\r\n if oidc.user_loggedin:\r\n g.user = okta_client.get_user(oidc.user_getfield(\"sub\"))\r\n else:\r\n g.user = None\r\n\r\n\r\n@app.route(\"/login\")\r\n#@oidc.require_login\r\ndef showLogin():\r\n user = session.query(User).filter_by(\r\n email=oidc.user_getfield('email')).first()\r\n if not user:\r\n u = User(\r\n name=oidc.user_getfield('name'),\r\n email=oidc.user_getfield('email')\r\n )\r\n session.add(u)\r\n session.commit()\r\n session.close()\r\n return redirect(url_for(\".showBookStores\"))\r\n\r\n\r\n@app.route(\"/logout\")\r\ndef logout():\r\n oidc.logout()\r\n return redirect(url_for(\".showBookStores\"))\r\n \r\n# CREATE USER SESSION\r\ndef createUser(login_session):\r\n newUser = User(name=login_session['username'], email=login_session['email'])\r\n session.add(newUser)\r\n session.commit()\r\n user = session.query(User).filter_by(email=login_session['email']).one()\r\n return user.id\r\n\r\n\r\n# JSON API's\r\n@app.route('/Bookstore/JSON')\r\ndef showBookStoresJSON():\r\n BookStores = session.query(BookStore).all()\r\n return jsonify(BookStore=[i.serialize for i in BookStores])\r\n\r\n\r\n@app.route('/Bookstore//bookslist/JSON')\r\ndef showBookListJSON(bookstore_id):\r\n bookstore = session.query(BookStore).filter_by(id=bookstore_id).one()\r\n books = session.query(BookList).filter_by(bookstore_id= bookstore_id).all()\r\n return jsonify(BookList= [i.serialize for i in books])\r\n\r\n\r\n#-------\r\n# Flask Rounting\r\n#-------\r\n\r\n# maing page\r\n############ WORKS#########\r\n@app.route('/')\r\n@app.route('/bookstore/')\r\n@oidc.require_login\r\ndef showBookStores():\r\n user = session.query(User).filter_by(email=g.oidc_id_token['email']).first()\r\n if user:\r\n BookStores = session.query(BookStore).filter_by(user_id=user.id).all()\r\n return render_template('BookStore.html',BookStores=BookStores)\r\n else:\r\n return redirect(url_for('showLogin'))\r\n# add new bookstore\r\n############ WORKS ##########\r\n@app.route('/bookstore/new',methods=['GET','POST'])\r\n@oidc.require_login\r\ndef newBookStore(): \r\n if request.method == 'POST':\r\n user = session.query(User).filter_by(email=g.oidc_id_token['email']).one()\r\n newBookStore = BookStore(name=request.form['name'],user_id=user.id)\r\n session.add(newBookStore)\r\n session.commit()\r\n flash(\"new bookstore was added!\")\r\n return redirect(url_for('showBookStores'))\r\n else:\r\n return render_template('NewBookStore.html')\r\n#edite bookstore\r\n######### WORKS#######\r\n@app.route('/bookstore//edite',methods=['GET', 'POST'])\r\n@oidc.require_login\r\ndef editBookstore(bookstore_id):\r\n user = session.query(User).filter_by(email=g.oidc_id_token['email']).one()\r\n if user.email not in g.oidc_id_token['email']:\r\n return \"\"\r\n \r\n store = session.query(BookStore).filter_by(id=bookstore_id).first()\r\n if user.id != store.user_id:\r\n return \"\"\r\n \r\n if request.method == 'POST': \r\n store.name = request.form['name']\r\n session.add(store)\r\n flash(\"Bookstore Sucssfully Edited %s\" %store.name)\r\n session.commit()\r\n return redirect(url_for('showBookStores'))\r\n else:\r\n return render_template('EditeBookStore.html',bookstore_id = bookstore_id, bookstore=store)\r\n#delete booksotore\r\n###### WORKS ######\r\n@app.route('/bookstore//delete',methods=['GET', 'POST'])\r\n@oidc.require_login\r\ndef deleteBookstore(bookstore_id):\r\n user = session.query(User).filter_by(email=g.oidc_id_token['email']).one()\r\n if user.email not in g.oidc_id_token['email']:\r\n return \"\"\r\n \r\n store = session.query(BookStore).filter_by(id=bookstore_id).first()\r\n if user.id != store.user_id:\r\n return \"\"\r\n \r\n if request.method == 'POST': \r\n session.delete(store)\r\n books = session.query(BookList).filter_by(user_id = user.id).all()\r\n for i in books:\r\n session.delete(i)\r\n session.commit()\r\n flash(\"Bookstore Sucssfully Edited %s\" %store.name)\r\n session.commit()\r\n return redirect(url_for('showBookStores'))\r\n else:\r\n return render_template('DeleteBookStore.html',bookstore_id = bookstore_id, bookstore=store)\r\n\r\n#list books in bookstore \r\n############ WORKS#########\r\n@app.route('/bookstore//')\r\n@app.route('/bookstore//bookslist')\r\ndef showBookList(bookstore_id):\r\n user = session.query(User).filter_by(email=g.oidc_id_token['email']).one()\r\n if user.email not in g.oidc_id_token['email']:\r\n return \"\"\r\n \r\n store = session.query(BookStore).filter_by(id=bookstore_id).first()\r\n if user.id != store.user_id:\r\n return \"\"\r\n \r\n bookstore = session.query(BookStore).filter_by(id=bookstore_id).one()\r\n books = session.query(BookList).filter_by(bookstore_id= bookstore_id).all()\r\n # print BookList\r\n return render_template('BookList.html',bookstore=bookstore,books=books)\r\n\r\n#Add new book\r\n####### Kinda of WORKS######\r\n\r\n@app.route('/bookstore//new/',methods=['GET','POST'])\r\n@oidc.require_login\r\ndef newBookItem(bookstore_id):\r\n user = session.query(User).filter_by(email=g.oidc_id_token['email']).one()\r\n if user.email not in g.oidc_id_token['email']:\r\n return \"\"\r\n \r\n store = session.query(BookStore).filter_by(id=bookstore_id).first()\r\n if user.id != store.user_id:\r\n return \"\"\r\n if request.method == 'POST':\r\n newBook = BookList(book_name=request.form['book'],\r\n author_name=request.form['author'],\r\n description= request.form['description'],\r\n book_category= request.form['catogory'],\r\n bookstore_id=bookstore_id, user_id=user.id)\r\n session.add(newBook)\r\n session.commit()\r\n flash(\"new book was addedd!\")\r\n return redirect(url_for('showBookList',bookstore_id=bookstore_id))\r\n else:\r\n return render_template('NewBook.html',bookstore_id=bookstore_id)\r\n# if I add the login if return render_template('NewBook.html',bookstore = bookstore)\r\n\r\n#To edite the book detailes\r\n######### WORKS ###########\r\n@app.route('/bookstore//list//edit',methods=['GET','POST'])\r\n@oidc.require_login\r\ndef editListItem(bookstore_id,booklist_id):\r\n user = session.query(User).filter_by(email=g.oidc_id_token['email']).one()\r\n if user.email not in g.oidc_id_token['email']:\r\n return \"\"\r\n \r\n editebook = session.query(BookList).filter_by(id=booklist_id).one()\r\n if user.id != editebook.user_id:\r\n return \"\"\r\n if request.method == 'POST':\r\n if request.form['book']:\r\n editebook.book_name=request.form['book']\r\n if request.form['author']:\r\n editebook.author_name=request.form['author']\r\n if request.form['description']:\r\n editebook.description= request.form['description']\r\n if request.form['catogory']:\r\n editebook.book_category= request.form['catogory']\r\n session.add(editebook)\r\n session.commit()\r\n flash(\"Book had been edited\")\r\n return redirect(url_for('showBookList', bookstore_id=bookstore_id))\r\n else:\r\n return render_template('EditeBook.html',booklist_id=booklist_id,bookstore_id = bookstore_id, booklist=editebook)\r\n#Delete Book \r\n######### WORKS ########\r\n@app.route('/bookstore//list//delete',methods=['GET','POST'])\r\n@oidc.require_login\r\ndef deleteListeItem(bookstore_id,booklist_id):\r\n user = session.query(User).filter_by(email=g.oidc_id_token['email']).one()\r\n if user.email not in g.oidc_id_token['email']:\r\n return \"\"\r\n\r\n booktodelete = session.query(BookList).filter_by(id=booklist_id).one()\r\n if user.id != booktodelete.user_id:\r\n return \"\"\r\n \r\n if request.method == 'POST':\r\n session.delete(booktodelete)\r\n session.commit()\r\n return redirect(url_for('showBookList', bookstore_id=bookstore_id))\r\n else:\r\n return render_template('DeleteBook.html',booklist_id=booklist_id,bookstore_id = bookstore_id, booklist=booktodelete)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"170412198","text":"#6\nprint('Задание 6. В данном трехзначном числе переставьте цифры так, чтобы новое число оказалось наибольшим из возможных.')\na = float (input ('Введите трёхзначное число: '))\n\nwhile (-100999 or a<-999):\n print ('Вы ввели не трёхзначное число')\n a = float (input ('Введите ТРЁХЗНАЧНОЕ число: '))\nif (a<0):\n a = -a\nif (a>99 and a<1000):\n b = ((a % 1000) // 100)\n b1 = ((a % 100) // 10)\n b2 = (a % 10) \n\n if (b>b2 and b>b1):\n if (b1>b2):\n b = b * 100\n b1 = b1 * 10\n b2 = b2 \n elif (b2>b1 or b2==b1):\n b = b * 100\n b1 = b1\n b2 = b2 * 10\n elif(b1>b2 and b1>b):\n if (b2>b):\n b = b\n b1 = b1 * 100\n b2 = b2 * 10\n elif (b>b2 or b==b2):\n b = b\n b1 = b1 * 100\n b2 = b2 * 10\n \n elif(b2>b1 and b2>b):\n if (b1>b): \n b = b\n b1 = b1 * 10\n b2 = b2 * 100\n elif (b>b1 or b1==b):\n b = b * 10\n b1 = b1\n b2 = b2 * 100\n else:\n b = b\n b1 = b1 * 10\n b2= b2 * 100\n res = b + b1 + b2\n print ('Ответ:') \n print (res)\ninput ('')\n","sub_path":"files/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"267215947","text":"from flask import Blueprint,flash,redirect,url_for, jsonify\nfrom flask_login import login_required,current_user\nfrom wtforms.ext import sqlalchemy\n\nfrom app import db\nfrom app.models import Post\n\n\n'''\n封装了所有与博文相关的视图函数\n'''\n\n# 定义蓝图\npostbp = Blueprint('postbp', __name__)\n\n@postbp.route('/showall/')\ndef showall():\n return 'showall'\\\n\n@postbp.route('/post/')\n@login_required\ndef post():\n flash('发布成功!')\n return redirect(url_for('mainbp.index'))\n\n# 切换博文的收藏状态\n@postbp.route('/switch_collect//')\n@login_required\ndef switch_collect(pid):\n # 根据id找出博文\n post = Post.query.get(pid)\n\n # 切换收藏状态\n if current_user.is_collected(pid):\n\n # 如果已收藏就remove掉\n current_user.collections.remove(post)\n # 已经配置了自动提交\n # db.session.add(current_user)\n\n # 给前端返回json\n return jsonify({'result':False})\n else:\n # 如果未收藏就添加到收藏中\n current_user.collections.append(post)\n # 已经配置了自动提交\n # db.session.add(current_user)\n\n # 给前端返回json\n return jsonify({'result': True})\n\n\n\n","sub_path":"app/views/posts.py","file_name":"posts.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"147774348","text":"class Solution:\n def pacificAtlantic(self, heights: List[List[int]]) -> List[List[int]]:\n row = len(heights)\n col = len(heights[0])\n exploreA = [[False for i in range(col)] for j in range(row)]\n exploreB = [[False for i in range(col)] for j in range(row)]\n\n directions = ([0, 1], [0, -1], [1, 0], [-1, 0])\n\n def search(i, j, explore, h):\n if not explore[i][j] and heights[i][j] < h:\n explore[i][j] = False\n return\n explore[i][j] = True\n for ii, jj in directions:\n new_i, new_j = i+ii, j+jj\n if 0 <= new_i < row and 0 <= new_j < col and not explore[new_i][new_j]:\n search(new_i, new_j, explore, heights[i][j])\n\n for i in range(row):\n search(i, 0, exploreA, -1)\n search(i, col-1, exploreB, -1)\n for j in range(col):\n search(0, j, exploreA, -1)\n search(row-1, j, exploreB, -1)\n\n res = []\n for i in range(row):\n for j in range(col):\n if exploreA[i][j] and exploreB[i][j]:\n res.append([i, j])\n return res\n","sub_path":"recursion/417_Pacific_Atlantic_Water_Flow.py","file_name":"417_Pacific_Atlantic_Water_Flow.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"115033690","text":"import os\nimport util_fasta\nfrom Bio.SeqUtils import GC\nimport csv\nfrom sklearn.externals.joblib import dump, load\n#import random\n#import string\n#import shutil\n\ndef generar_modelo_CPAT(archivo_lncRNA, archivo_PCT, archivo_CDS, carpeta_cpat):\n _generar_modelo_CPAT_hexamer(archivo_lncRNA, archivo_CDS, carpeta_cpat)\n _generar_modelo_CPAT_logit(archivo_lncRNA, archivo_PCT, carpeta_cpat)\n\ndef _generar_modelo_CPAT_hexamer(archivo_lncRNA, archivo_CDS, carpeta_cpat):\n script = \"~/anaconda3/bin/make_hexamer_tab.py\"\n fasta_cds = \"'\" + archivo_CDS + \"'\" \n fasta_lncRNA = \"'\" + archivo_lncRNA + \"'\"\n salida = \"'\" + carpeta_cpat + \"/hexamer.tsv\" + \"'\"\n comando = \"{} -c {} -n {} > {}\".format(script, fasta_cds, fasta_lncRNA, salida)\n os.system(comando)\n \ndef _generar_modelo_CPAT_logit(archivo_lncRNA, archivo_PCT, carpeta_cpat):\n script = \"~/anaconda3/bin/make_logitModel.py\"\n hexamer = \"'\" + carpeta_cpat + \"/hexamer.tsv\" + \"'\"\n fasta_pct = \"'\" + archivo_PCT + \"'\" \n fasta_lncRNA = \"'\" + archivo_lncRNA + \"'\"\n salida = \"'\" + carpeta_cpat + \"/fold\" + \"'\"\n comando = \"{} -x {} -c {} -n {} -o {}\".format(script, hexamer, fasta_pct, fasta_lncRNA, salida)\n os.system(comando)\n \ndef ejecutar_diamond(archivo_entrada, diamond_db, archivo_salida):\n script = \"~/anaconda3/bin/diamond\"\n diamond_bd = \"'\" + diamond_db + \"'\"\n salida = \"'\" + archivo_salida + \"'\"\n comando = \"{} blastx -d {} -q {} -o {} -k 5 --gapopen 11 --gapextend 1 --more-sensitive -f 6 qseqid pident length qframe qstart qend sstart send evalue bitscore\".format(script, diamond_bd, archivo_entrada, salida)\n os.system(comando)\n\ndef ejecutar_cpat(archivo_entrada, carpeta_cpat, archivo_salida):\n script = \"~/anaconda3/bin/cpat.py\"\n logit = \"'\" + carpeta_cpat + \"/fold.logit.RData\" + \"'\"\n hexamer = \"'\" + carpeta_cpat + \"/hexamer.tsv\" + \"'\"\n salida = \"'\" + archivo_salida + \"'\"\n comando = \"{} -g {} -d {} -x {} -o {}\".format(script, archivo_entrada, logit, hexamer, salida)\n os.system(comando)\n \ndef generar_features_base(archivo_entrada, archivo_cpat, archivo_diamond, archivo_salida):\n transcritos = util_fasta.leer_fasta(archivo_entrada)\n transcript_dict = {}\n for k in transcritos.keys():\n transcript_dict[k.strip().upper()] = {\n \"length\" : len(transcritos[k]),\n \"gc\" : GC(transcritos[k]),\n \"orf_length\" : 0,\n \"orf_coverage\" : float(0),\n \"hexamer_score\" : float(0),\n \"fickett_score\" : float(0),\n \"identity\" : float(0),\n \"align_length\" : float(0),\n \"align_perc_len\" : float(0),\n \"align_perc_orf\" : float(0)\n }\n \n #adaptado de https://github.com/gbgolding/crema/blob/master/bin/featuresetup_module.py\n with open(archivo_cpat, \"r\") as f:\n cpat_reader = csv.reader(f, delimiter=(\"\\t\"))\n next(cpat_reader, None) # skip header\n for row in cpat_reader:\n cod_secuencia = row[0]\n transcript_dict[cod_secuencia][\"orf_length\"] = float(row[2])\n transcript_dict[cod_secuencia][\"orf_coverage\"] = float(row[2])/float(transcript_dict[cod_secuencia][\"length\"])\n transcript_dict[cod_secuencia][\"fickett_score\"] = float(row[3])\n transcript_dict[cod_secuencia][\"hexamer_score\"] = float(row[4])\n \n with open(archivo_diamond, \"r\") as f:\n tab_reader = csv.reader(f, delimiter=(\"\\t\"))\n line_1 = next(tab_reader)\n first = line_1[0].upper()\n score = [float(line_1[9])]\n with_len = [[first, float(line_1[1]), float(line_1[2]), float(line_1[3]), float(line_1[9])]] # name identity length frame score\n for row in tab_reader:\n if row[0].upper() == first:\n score.append(float(row[9]))\n with_len.append([row[0].upper(), float(row[1]), float(row[2]), float(row[3]), float(row[9])])\n else:\n transcript_dict[first][\"identity\"] = float(0)\n transcript_dict[first][\"align_length\"] = float(0)\n max_value = max(score)\n max_index = score.index(max_value)\n max_len_ident = with_len[max_index]\n if max_len_ident[3] > 0:\n transcript_dict[first][\"identity\"] = float(max_len_ident[1])\n transcript_dict[first][\"align_length\"] = float(max_len_ident[2])\n transcript_dict[first][\"align_perc_len\"] = float(transcript_dict[first][\"align_length\"]/transcript_dict[first][\"length\"])\n transcript_dict[first][\"align_perc_orf\"] = (0 if transcript_dict[first][\"orf_length\"] == 0 else float(transcript_dict[first][\"align_length\"]/transcript_dict[first][\"orf_length\"]))\n score = [float(row[9])]\n first = row[0].upper()\n with_len = [[first, float(row[1]), float(row[2]), float(row[3]), float(row[9])]]\n transcript_dict[first][\"identity\"] = float(0)\n transcript_dict[first][\"align_length\"] = float(0)\n max_value = max(score)\n max_index = score.index(max_value)\n max_len_ident = with_len[max_index]\n if max_len_ident[3] > 0:\n transcript_dict[first][\"identity\"] = float(max_len_ident[1])\n transcript_dict[first][\"align_length\"] = float(max_len_ident[2])\n #fin de código adaptado de https://github.com/gbgolding/crema/blob/master/bin/featuresetup_module.py\n \n dump(transcript_dict, archivo_salida)\n\ndef generar_features(archivo_entrada, features_base, archivo_cpat, archivo_salida):\n transcritos = util_fasta.leer_fasta(archivo_entrada)\n features_globales = load(features_base)\n transcript_dict = {}\n for k in transcritos.keys():\n transcript_dict[k.strip().upper()] = {\n \"length\" : features_globales[k.strip().upper()][\"length\"],\n \"gc\" : features_globales[k.strip().upper()][\"gc\"],\n \"orf_length\" : features_globales[k.strip().upper()][\"orf_length\"],\n \"orf_coverage\" : features_globales[k.strip().upper()][\"orf_coverage\"],\n \"hexamer_score\" : float(0),\n \"fickett_score\" : float(0),\n \"identity\" : features_globales[k.strip().upper()][\"identity\"],\n \"align_length\" : features_globales[k.strip().upper()][\"align_length\"],\n \"align_perc_len\" : features_globales[k.strip().upper()][\"align_perc_len\"],\n \"align_perc_orf\" : features_globales[k.strip().upper()][\"align_perc_orf\"]\n }\n \n with open(archivo_cpat, \"r\") as f:\n cpat_reader = csv.reader(f, delimiter=(\"\\t\"))\n next(cpat_reader, None) # skip header\n for row in cpat_reader:\n cod_secuencia = row[0].strip().upper()\n if cod_secuencia in transcript_dict:\n transcript_dict[cod_secuencia][\"fickett_score\"] = float(row[3])\n transcript_dict[cod_secuencia][\"hexamer_score\"] = float(row[4])\n \n dump(transcript_dict, archivo_salida)\n","sub_path":"Semana 09/libs/util_caracteristicas.py","file_name":"util_caracteristicas.py","file_ext":"py","file_size_in_byte":6980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"646265949","text":"\"\"\"SU(N) stuff.\"\"\"\n\nimport numpy as np\n\nfrom sympy import simplify, symbols, ask, Q\nfrom sympy import zeros, eye, sqrt, exp, sin, cos, atan2, re, im, diag, I, Poly, Matrix, flatten, Rational, solve, linsolve\n#from sympy.algebras.quaternion import Quaternion\n#from sympy.physics.matrices import msigma\n\ndef killing(x, y):\n \"\"\"Killing inner product in Lie algebras.\"\"\"\n\n return -Rational(1,2)*(x*y).trace()\n\ndef expand_in_basis(m, basis, real=True):\n \"\"\"Expand matrix in basis.\"\"\"\n\n cs = symbols('c0:{}'.format(len(basis)), real=real)\n eqs = flatten(m-sum(map(lambda c, e: c*e, cs, basis), np.full(m.shape, 0)))\n sol = linsolve(eqs, cs)\n\n if len(sol.args)==1:\n return sol.args[0]\n else:\n raise Exception('No valid solution: {}'.format(sol))\n\nclass LieAlgebra:\n \"\"\"Base class, for subclassing only.\"\"\"\n\n def __init__(self):\n self._generators = None\n\n @property\n def generators(self):\n return self._generators\n\n @generators.setter\n def generators(self, new):\n self._generators = new\n\n @property\n def structure(self):\n \"\"\"Structure constants.\"\"\"\n\n f = np.full((self.dim, self.dim, self.dim), None)\n\n for i,g in enumerate(self.generators):\n for j,h in enumerate(self.generators):\n f[i,j,:] = expand_in_basis(g*h-h*g, self.generators, real=True)\n\n return f.astype(np.complex)\n\n @property\n def killing(self):\n \"\"\"Killing form in coordinates given by generators.\"\"\"\n\n fs = np.array(self.structure) #.astype(np.float)\n\n return np.einsum('kil,ljk->ij', fs, fs)\n\n def adj(self, m):\n \"\"\"Find adjoint representation of matrix m.\"\"\"\n\n comm = lambda x,y: x*y-y*x\n cs = symbols('l0:{}'.format(self.dim), real=True)\n\n x = zeros(self.N)\n for c, e in zip(cs, self.generators):\n x += c*e\n\n y = comm(m,x)\n print(y)\n coords = expand_in_basis(y, self.generators, real=True)\n\n return Matrix([[coords[i].subs({l: 1 if l==c else 0 for l in cs}) for c in cs] for i, _ in enumerate(cs)])\n\n @property\n def roots(self):\n \"\"\"Root system.\"\"\"\n\n hs = [self.adj(g) for g in self.generators if g.is_diagonal()]\n\n return [Matrix(v) for v in filter(any, Matrix([[h.tolist()[i][i] for i in range(self.dim)] for h in hs]).T.tolist())]\n\n @property\n def _roots(self):\n \"\"\"Get root system.\"\"\"\n\n # TODO I'm using a dirty trick - computing commutators of generators\n # What I should do in general is get the adjoint representation\n # of the elements in the cartan subalgebra and diagonalise\n\n root = []\n gs = self.generators\n hs = [g for g in gs if g.is_diagonal()]\n xs = [g for g in gs if not g.is_diagonal()]\n\n for i, x in enumerate(gs):\n\n if x in hs:\n continue\n\n r = []\n\n for h in hs:\n comm = expand_in_basis(h*x-x*h, gs, real=False)\n print(comm)\n rr = [c for j,c in enumerate(comm) if c!=0 or j==i]\n if len(rr)==1:\n r += [rr[0]]\n else:\n break\n\n root += [Matrix(r)] if any(r) else []\n\n return root\n\nclass SL(LieAlgebra):\n \"\"\"Special linear Lie algebra.\"\"\"\n\n def __init__(self, N):\n \"\"\"SL(N)\"\"\"\n\n super(SL, self).__init__()\n\n self.N = N\n self.dim = self.N**2-1\n\n self._generators = self._getDefaultGenerators()\n\n def _getDefaultGenerators(self):\n \"\"\"Standard basis.\"\"\"\n\n gens = [diag(*[int((-1)**(i-j)*int(i==j or i==j+1)) for i in range(self.N)]) for j in range(self.N-1)]\n\n for i in range(self.N):\n for j in range(self.N):\n if i!=j:\n eij = Matrix([[int(k==i and l==j) for k in range(self.N)] for l in range(self.N)])\n gens += [eij]\n\n return gens\n\n def __repr__(self):\n return 'sl({})'.format(self.N)\n\nclass SU(LieAlgebra):\n \"\"\"Special unitary Lie algebra.\"\"\"\n\n def __init__(self, N):\n \"\"\"SU(N)\"\"\"\n\n super(SU, self).__init__()\n\n self.N = N\n self.dim = self.N**2-1\n\n self._generators = self._getDefaultGenerators()\n\n def _getDefaultGenerators(self):\n \"\"\"Standard basis.\"\"\"\n\n gens = [I*diag(*[int((-1)**(i-j)*int(i==j or i==j+1)) for i in range(self.N)]) for j in range(self.N-1)]\n\n for i in range(self.N):\n for j in range(i,self.N):\n if i!=j:\n eij = Matrix([[int(k==i and l==j) for k in range(self.N)] for l in range(self.N)])\n gens += [eij.T-eij]\n gens += [I*(eij+eij.T)]\n\n return gens\n\n def __repr__(self):\n return 'su({})'.format(self.N)\n\ndef GellMann(anti=True):\n \"\"\"The (anti-)Hermitian Gell-Mann matrices.\"\"\"\n\n l1 = Matrix([\n [0, 1, 0],\n [1, 0, 0],\n [0, 0, 0]\n ])\n\n l2 = Matrix([\n [0, -I, 0],\n [I, 0, 0],\n [0, 0, 0]\n ])\n\n l3 = Matrix([\n [1, 0, 0],\n [0, -1, 0],\n [0, 0, 0]\n ])\n\n l4 = Matrix([\n [0, 0, 1],\n [0, 0, 0],\n [1, 0, 0]\n ])\n\n l5 = Matrix([\n [0, 0, -I],\n [0, 0, 0],\n [I, 0, 0]\n ])\n\n l6 = Matrix([\n [0, 0, 0],\n [0, 0, 1],\n [0, 1, 0]\n ])\n\n l7 = Matrix([\n [0, 0, 0],\n [0, 0, -I],\n [0, I, 0]\n ])\n\n l8 = Matrix([\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, -2]\n ])\n\n ls = [l1, l2, l3, l4, l5, l6, l7, sqrt(3)*Rational(1,3)*l8]\n\n ls = [sqrt(12)*Rational(1,12)*l for l in ls]\n\n if anti:\n ls = [I*l for l in ls]\n\n return ls\n","sub_path":"h4/sun.py","file_name":"sun.py","file_ext":"py","file_size_in_byte":5769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"605499542","text":"class Solution(object):\n def calculateMinimumHP(self, dungeon):\n \"\"\"\n :type dungeon: List[List[int]]\n :rtype: int\n \"\"\"\n # dp[i][j] is the min blood level from dungeon[i][j] to dungeon[-1][-1] BEFORE entering the room\n m, n = len(dungeon), len(dungeon[0])\n dp = [ [ float('inf') for _ in xrange(n)] for _ in xrange(m)]\n dp[-1][-1] = 1 if dungeon[-1][-1] >= 0 else -dungeon[-1][-1] + 1 # make sure prince is alive there\n for i in xrange(m-2, -1, -1):\n dp[i][n-1] = max(dp[i+1][n-1] - dungeon[i][n-1], 1) # take care dp[i+1][n-1] - dungeon[i][n-1] could be neg, so make 1 as the baseline\n for j in xrange(n-2, -1, -1):\n dp[m-1][j] = max(dp[m-1][j+1] - dungeon[m-1][j], 1)\n for i in xrange(m-2, -1, -1):\n for j in xrange(n-2, -1, -1):\n dp[i][j] = max(min(dp[i+1][j], dp[i][j+1]) - dungeon[i][j], 1)\n return dp[0][0]","sub_path":"174_dugeon_game/prac.py","file_name":"prac.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"57417601","text":"import pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup \nimport numpy as np\nimport os\nimport datetime as dt\nfrom JapanMeteorologicalAgency_dataset import get_JMA_placelist\n\n\ndef get_WN_Forecast(latitude,longitude):\n\n base_url = \"https://weathernews.jp/onebox/%s/%s/\"\n\n r = requests.get(base_url%(latitude,longitude))\n r.encoding = r.apparent_encoding\n\n soup = BeautifulSoup(r.text,features=\"lxml\")\n main = soup.findAll('div',class_='weather-day')\n\n df = []\n for i in main:\n s = BeautifulSoup(str(i),features=\"lxml\")\n\n #date #####################\n date = s.findAll('div',class_='weather-day__day')[0]\n date = ( BeautifulSoup(str(date),features=\"lxml\").findAll('p') )[0]\n date = str(date)\n date = date.replace(\"

\",\"\")\n date = date.replace(\"

\",\"\")\n date = date.split(\", \")[-1]\n month,day = date.split(\" \")\n\n\n #header #####################\n head = s.findAll('div',class_='weather-day__head')[0]\n head = ( BeautifulSoup(str(head),features=\"lxml\").findAll('p') )\n head = [str(h.text) for h in head]\n print(head)\n\n\n #body #####################\n body = s.findAll('div',class_='weather-day__body')[0]\n body = ( BeautifulSoup(str(body),features=\"lxml\").findAll(\"div\",'weather-day__item') )\n body_table = []\n for row in body:\n tmp = row.findAll('p')\n tmp = [str(a) for a in tmp]\n\n r = []\n r.append( tmp[0].replace(\"

\",\"\").replace(\"

\",\"\") )\n r.append( tmp[1].replace(\"

\",\"\").replace(\"

\",\"\") )\n r.append( tmp[2].replace(\"

\",\"\").replace(\"

\",\"\").replace(\"mm/h\",\"\") )\n r.append( tmp[3].replace(\"

\",\"\").replace(\"

\",\"\").replace(\"°F\",\"\") )\n r.append( tmp[4].replace(\"

\",\"\").replace(\"

\",\"\") )\n\n body_table.append(r)\n\n tmp = pd.DataFrame(columns=head,data=body_table)\n tmp[\"Month\"] = month\n tmp[\"Day\"] = day\n\n df.append(tmp)\n\n df = pd.concat(df)\n df = df[[\"Month\",\"Day\",\"Hour\",\"Precip.\",\"Temp.\",\"Wind\"]]\n df = df.rename(columns={\"Precip.\":\"降水量(mm/h)\",\"Temp.\":\"気温(F)\",\"Wind\":\"風\"})\n \n return df\n\ndef get_WN_forecast_4_Allplace(place_list,out_dir):\n \n for idx,place in place_list.iterrows():\n \n dir_name = place[\"pref_name\"]+\"_\"+place[\"block_name\"]\n dir_name = os.path.join(out_dir,dir_name)\n\n if not(os.path.exists(dir_name)):\n os.makedirs(dir_name)\n \n \n latitude = place[\"latitude\"]\n longitude = place[\"longitude\"]\n now = dt.datetime.now()\n \n print(\">> \",place[\"pref_name\"],\"-\",place[\"block_name\"],\" date:\",now)\n try:\n df = get_WN_Forecast(latitude, longitude)\n \n filename = \"%04d-%02d-%02d_%02d.csv\"%(now.year,now.month,now.day,now.hour)\n df.to_csv(os.path.join(dir_name,filename),\n index=False,\n encoding=\"shift-jis\")\n except:\n df = None\n pass\n \n print(df)\n \n\n\nif __name__ == \"__main__\":\n \n # 場所のリスト\n print(\"getting place list...\")\n place_list = get_JMA_placelist()\n out_dir = \"D:\\weather_data\\WN_forecast\"\n \n get_WN_forecast_4_Allplace(place_list,out_dir)\n","sub_path":"weather_data/WeatherNews_dataset.py","file_name":"WeatherNews_dataset.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"379628824","text":"'''\n012 Faça um algorito que lia o preço de um produto e mostre sua novo preço com 5% de desconto\n'''\n\nprodunome = input('Que produto deseja ver com o desconto? ')\nprodu = float(input('Qual é o valor altual do {}?'.format(produnome)))\n\nproducomdesc = produ - (produ*0.05)\nprint('o {} com o desconto de 5% fica por {:.2f}'.format(produnome,producomdesc))\n\n","sub_path":"ex/ex 012.py","file_name":"ex 012.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"470811476","text":"# -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-\n\nVERSION='0.1'\nAPPNAME='ConcurrentSet'\n\nfrom waflib import Build, Utils, Logs\n\ndef options(opt):\n opt.load(['compiler_c', 'gnu_dirs'])\n\n opt.add_option('--with-tests', action='store_true', dest='with_tests', help='''build unit tests''')\n opt.add_option('--debug', action='store_true', dest='debug', help='''debugging mode''')\n\ndef configure(conf):\n conf.load(['compiler_c', 'gnu_dirs'])\n\n conf.env.append_value('CFLAGS', ['-std=gnu99'])\n\n conf.check(lib=['pthread'], uselib_store='PTHREAD')\n\n if conf.options.debug:\n conf.define('_DEBUG', 1)\n conf.env.append_value('CFLAGS', ['-O0', '-Wall', '-Werror', '-g3'])\n else:\n conf.env.append_value('CFLAGS', ['-O3', '-g'])\n\n if conf.options.with_tests:\n conf.env['WITH_TESTS'] = True\n conf.check_cc(lib='cunit', include='CUnit/CUnit.h', uselib_store='CUNIT', mandatory=True)\n\ndef build(bld):\n bld(target='concurrent-set',\n features='c cstlib',\n source=bld.path.ant_glob(['src/*.c', 'cityhash-c/city.c']),\n use='PTHREAD',\n includes='src',\n install_path='${LIBDIR}')\n\n bld.install_files('${INCLUDEDIR}', 'src/concurrent-set.h', cwd='src')\n\n if bld.env['WITH_TESTS']:\n bld(target='unit-tests',\n features='c cprogram',\n source=bld.path.ant_glob(['tests/*.c']),\n use='concurrent-set CUNIT',\n includes='src',\n install_path=None)\n","sub_path":"wscript","file_name":"wscript","file_ext":"","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"413766069","text":"# BINTERFACE.py\n# TIM TRIMBLE\n# SPRING 2016\n\nimport pygame, sys\nfrom pygame.locals import *\nfrom Die_Class import Die\n\nclass bInterface(object):\n\n def __init__(self, blist):\n self.BLIST = blist\n self.BLIST[0].active = True\n self.BLIST[1].active = False\n self.BLIST[2].active = False\n self.DIESURF = pygame.Surface((210, 100), flags=SRCALPHA, depth=32)\n Dice1 = Die(100, self.DIESURF, (0, 0))\n Dice2 = Die(100, self.DIESURF ,(105, 0))\n self.DLIST = [Dice1, Dice2]\n self.PLAYERTURN = 1\n self.HASMOVED = True\n\n def ifaceDown(self,mousexy): # for when mouse is clicked down\n if self.BLIST[0].clicked(mousexy):\n self.BLIST[0].hilighted = True\n self.HASMOVED = True\n elif self.BLIST[1].clicked(mousexy):\n self.BLIST[1].hilighted = True\n if self.BLIST[1].active == True:\n self.HASMOVED = False\n elif self.BLIST[2].clicked(mousexy):\n self.BLIST[2].hilighted = True\n self.HASMOVED = True\n self.HASMOVED = True\n return self.HASMOVED\n\n def ifaceUP(self, mousexy): # This function uses a player input which has to change before being inputed into this function\n if self.BLIST[0].clicked(mousexy):\n self.BLIST[0].hilighted = False\n self.BLIST[0].active = False\n self.BLIST[1].active = True\n\n elif self.BLIST[1].clicked(mousexy):\n self.BLIST[1].hilighted = False\n self.RollDice()\n self.BLIST[1].active = False\n self.BLIST[2].active = True\n elif self.BLIST[2].clicked(mousexy):\n self.BLIST[2].hilighted = False\n self.BLIST[2].active = False\n self.BLIST[0].active = True\n if self.PLAYERTURN == 1:\n self.PLAYERTURN = 2\n elif self.PLAYERTURN == 2:\n self.PLAYERTURN = 1\n return self.PLAYERTURN\n\n def DisplayDiceandButtons(self, surf, dpos,): # to keep the Main function small\n for b in self.BLIST:\n b.display()\n for d in self.DLIST:\n d.displayDie()\n surf.blit(self.DIESURF, dpos)\n\n def RollDice(self):\n for die in self.DLIST:\n die.setRandValue()\n\n def WhereCanMove(self, pList, player):\n for piece in pList:\n if ((abs(piece.GRIDPOS[0]-player.LOCATION.GRIDPOS[0]) == 1 and piece.GRIDPOS[1]-player.LOCATION.GRIDPOS[1] == 0) or\n (abs(piece.GRIDPOS[1]-player.LOCATION.GRIDPOS[1]) == 1 and piece.GRIDPOS[0]-player.LOCATION.GRIDPOS[0]==0)):\n piece.ACTIVE = True\n else:\n piece.ACTIVE = False\n\n def scoreDice(self):\n dvalue = self.DLIST[0].Value + self.DLIST[1].Value\n return dvalue\n\n def TextScore(self, diceScore, tiletype, surf, pos1, pos2):\n textFont1 = pygame.font.SysFont('timesnewroman.ttf', 36)\n textFont2 = pygame.font.SysFont('timesnewroman.ttf', 36)\n dice_score_font = textFont1.render('Dice Score: '+str(diceScore), True,(0,0,0),None)\n tile_font = textFont2.render(tiletype, True, (0,0,0), None)\n if tiletype == 'MOUNTAIN':\n roll = '6, 7, or 8'\n elif tiletype == 'FARM':\n roll = '7 or less'\n elif tiletype == 'FOREST':\n roll = '8 or greater'\n elif tiletype == 'LAKE':\n roll = '2, 3, 11, 12'\n elif tiletype == 'FLATLAND':\n roll = '4 or greater'\n elif tiletype == 'TREASUREP1' or tiletype == 'TREASUREP2' or tiletype == 'START1' or tiletype == 'START2':\n roll = 'anything'\n toMove = textFont2.render('Roll needed: '+roll, True, (0,0,0), None)\n surf.blit(dice_score_font, pos1)\n surf.blit(tile_font, pos2)\n surf.blit(toMove, (pos2[0], pos2[1] + 50))\n","sub_path":"BINTERFACE.py","file_name":"BINTERFACE.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"55577569","text":"class Solution(object):\n\tdef jump(self, nums):\n\t\t\"\"\"\n\t\t:type nums: List[int]\n\t\t:rtype: int\n\t\t\"\"\"\n\t\tso_far = 0\n\t\tcur = 0\n\t\tcount = 0\n\t\tfor i in range(len(nums)):\n\t\t\tif i <= so_far and i + nums[i] > so_far:\n\t\t\t\tso_far = nums[i] + i\n\n\t\t\tif i == cur and cur != len(nums) - 1:\n\t\t\t\tcount += 1\n\t\t\t\tcur = so_far\n\n\t\treturn count","sub_path":"LPractice/45. Jump Game II.py","file_name":"45. Jump Game II.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"152212339","text":"\"\"\"\nCommon utility functions\n\nTODO:\n need more cleanup\n\"\"\"\nfrom __future__ import print_function\nimport os\nimport sys\nimport functools\nimport traceback\nimport argparse\nimport random\nimport inspect\nimport collections\nimport itertools\nimport copy\nfrom contextlib import contextmanager\n\n\nNaN = float('nan')\nInf = float('inf')\n\n\ndef is_python3():\n return sys.version_info.major >= 3\n\n\nif is_python3():\n long = int\n iteritems = dict.items\nelse:\n iteritems = dict.iteritems\n\n\ndef is_sequence(obj):\n \"\"\"\n Returns:\n True if the sequence is a collections.Sequence and not a string.\n \"\"\"\n return (isinstance(obj, collections.Sequence)\n and not isinstance(obj, str))\n\n\ndef is_list_len(obj, length):\n \"\"\"\n For argument sanity check: assert if the arg is a list/tuple of `len`.\n \"\"\"\n return isinstance(obj, (tuple, list)) and len(obj) == length\n\n\ndef is_iterable(obj):\n \"\"\"\n Returns: True if obj is iterable.\n \"\"\"\n return hasattr(obj, '__iter__')\n\n\nfrom keyword import iskeyword\n\ndef is_variable_name(name):\n return name.isidentifier() and not iskeyword(name)\n\n\ndef iter_last(iterable):\n \"\"\"\n For processing the last element differently\n Yields: (is_last=bool, element)\n \"\"\"\n length = len(iterable)\n return ((i == length-1, x) for i, x in enumerate(iterable))\n\n\ndef raise_(ex):\n \"\"\"\n Workaround to use `raise` inside a lambda\n \"\"\"\n raise ex\n\n\ndef product(iterable, initial=1.0):\n p = initial\n for x in iterable:\n p = initial * p\n return p\n\n\ndef import_parent_dir():\n \"\"\"\n Import scripts from parent directory.\n \"\"\"\n sys.path.insert(0, '..')\n \n\ndef pop_head(array, n):\n \"\"\"\n pop n items from the head of the list. Input will be modified.\n \"\"\"\n ans = array[:n]\n del array[:n]\n return ans\n\n\ndef pop_tail(array, n):\n \"\"\"\n pop n items from the tail of the list. Input will be modified.\n \"\"\"\n ans = array[-n:]\n del array[-n:]\n return ans\n\n\ndef list_dim(lis):\n \"\"\"\n Return: dimensions of deep nested sequences. Only consider the \n dim of the first element. Strings are not sequences. \n \"\"\"\n if is_sequence(lis):\n if len(lis) == 0:\n inner_dims = []\n else:\n inner_dims = list_dim(lis[0])\n return [len(lis)] + inner_dims\n else:\n return []\n\n\ndef enlist(obj, N=1, check=False):\n \"\"\"\n Useful for repeating default option multiple times\n Args:\n check: if True, check len(obj) == N\n Returns:\n If obj is a list, return after checking\n Otherwise, repeat it into N-element list\n \"\"\"\n if isinstance(obj, list):\n if check and len(obj) != N:\n raise ValueError('length != {}'.format(N))\n else:\n return obj\n else:\n return [obj] * N\n\n\ndef filter2(array, include_filter=None, exclude_filter=None):\n \"\"\"\n Include an element if:\n 1. include_filter is None\n 2. include_filter is a list and contains the element\n 3. include_filter is a lambda and returns True on the element\n AND then exclude an element if:\n 1. exclude_filter is not None\n 2. exclude_filter is a list and contains the element\n 3. exclude_filter is a lambda and returns True on the element\n \"\"\"\n ans = []\n for a in array:\n if ((include_filter is None \n or is_sequence(include_filter) \n and a in include_filter \n or callable(include_filter) \n and include_filter(a))\n and (exclude_filter is None\n or is_sequence(exclude_filter) \n and not a in exclude_filter \n or callable(exclude_filter) \n and not exclude_filter(a))):\n ans.append(a)\n return ans\n\n\ndef flatten2d(list2d):\n return list(itertools.chain.from_iterable(list2d))\n\n\ndef fformat(float_num, precision):\n \"\"\"\n https://stackoverflow.com/a/44702621/3453033\n \"\"\"\n assert isinstance(precision, int) and precision > 0\n return '{{:.{}f}}'.format(precision).format(float_num).rstrip('0').rstrip('.')\n\n\n# ======================== Dict operations ========================\nclass AttributeDict(dict):\n \"\"\"\n vars() will return the (hacked) internal dictionary\n Always use dict() to convert if you want to manipulate it as a regular dict\n Fully supports pickle\n \"\"\"\n __getattr__ = dict.__getitem__\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n def __getattribute__(self, name):\n # hack to enable vars()\n if name == '__dict__':\n return self\n else:\n return object.__getattribute__(self, name)\n\n def __getstate__(self):\n return dict(self)\n\n def __setstate__(self, states):\n self.update(states)\n\n def __deepcopy__(self, memo=None):\n return AttributeDict(copy.deepcopy(dict(self), memo))\n\n def __hash__(self):\n return hash(tuple(self.items()))\n\n\ndef to_attribute_dict(dic):\n \"\"\"\n Recursively turn a nested dict to AttributeDict\n \"\"\"\n dic = AttributeDict(dic)\n for k, v in dic.items():\n if isinstance(v, dict):\n dic[k] = to_attribute_dict(v)\n return dic\n\n\ndef assert_has_keys(D, keys):\n \"\"\"\n Assert that a dictionary must have all the keys\n \"\"\"\n for k in keys:\n assert k in D, \\\n 'Missing `{}`. All keys {} must be in the dictionary'.format(k, keys)\n\n\ndef json_map(obj, fn):\n \"\"\"\n Map a function to all elements in the JSON dict recursively.\n\n Args:\n obj: nested dict\n fn: function that takes an element and returns its new value\n \"\"\"\n if isinstance(obj, dict):\n return {k: json_map(v, fn) for k, v in obj.items()}\n elif isinstance(obj, (tuple, list)):\n return [json_map(v, fn) for v in obj]\n else:\n return fn(obj)\n\n\ndef merge_dicts(*dicts):\n \"\"\"\n Given any number of dicts, shallow copy and merge into a new dict,\n precedence goes to key value pairs in latter dicts.\n \"\"\"\n result = {}\n for dic in dicts:\n result.update(dic)\n return result\n\n\ndef merge_attr_dicts(*dicts):\n \"\"\"\n Returns: merged AttributeDict\n \"\"\"\n return AttributeDict(merge_dicts(*dicts))\n\n\ndef reverse_dict(dic):\n \"\"\"\n Convert {key: value} to {value: key}\n \"\"\"\n return dict((value, key) for key, value in iteritems(dic))\n\n\ndef include_keys(include, d):\n \"\"\"\n Pick out the `include` keys from a dict\n\n Args:\n include: list or set of keys to be included\n d: raw dict that might have irrelevant keys\n \"\"\"\n assert is_sequence(include)\n return {k: v for k, v in d.items() if k in set(include)}\n\n\ndef exclude_keys(exclude, d):\n \"\"\"\n Remove the `exclude` keys from a kwargs dict.\n\n Args:\n exclude: list or set of keys to be excluded\n d: raw dict that might have irrelevant keys\n \"\"\"\n assert is_sequence(exclude)\n return {k: v for k, v in d.items() if k not in set(exclude)}\n\n\ndef _trace_key(dict_trace, key):\n return 'key \"{}\" '.format('/'.join(dict_trace + [key]))\n\ndef _has_required(config):\n for key, val in config.items():\n if val == 'REQUIRED':\n return True\n elif isinstance(val, dict):\n if _has_required(val):\n return True\n return False\n\ndef _fill_default_config(config, default_config, dict_trace):\n for key, default_value in default_config.items():\n if key not in config:\n if default_value == 'REQUIRED':\n raise KeyError(_trace_key(dict_trace, key) + 'is a required config')\n elif isinstance(default_value, dict):\n if _has_required(default_value):\n raise ValueError(_trace_key(dict_trace, key) + 'missing. '\n 'Its sub-dict has a required config')\n config[key] = default_value\n else:\n value = config[key]\n if isinstance(value, dict) and not isinstance(default_value, dict):\n raise ValueError(_trace_key(dict_trace, key)\n + 'must be a single value instead of a sub-dict')\n if isinstance(default_value, dict):\n if not isinstance(value, dict):\n raise ValueError(_trace_key(dict_trace, key)\n + 'must have a sub-dict instead of a single value')\n config[key] = _fill_default_config(value, default_value,\n dict_trace + [key])\n if value == default_value == 'REQUIRED':\n raise ValueError(_trace_key(dict_trace, key) + ' is required.')\n return config\n\n\ndef fill_default_config(config, default_config):\n \"\"\"\n Special: denote the value as 'REQUIRED' (all-caps) in default_config to enforce\n\n Returns:\n AttributeDict\n `config` filled by default values if certain keys are unspecified\n \"\"\"\n return to_attribute_dict(_fill_default_config(config, default_config, []))\n\n\n# ======================== Args process ========================\n# more advanced inspection utils are in `ml_utils.meta.inspect_utils`\n\ndef pack_args(args):\n \"\"\"\n Process `*args` variable-length positional args\n Enable to function to accept either unpacked or packed args\n\n def f1(*args):\n args = pack_args(args)\n\n Now f1 can be used in all the following ways:\n - f1(a, b, c)\n - f1([a, b, c])\n - f1(*[a, b, c])\n - f1([a,b,c],[a2,b2,c2]) -> will raise error\n \"\"\"\n assert is_sequence(args)\n if args and is_sequence(args[0]):\n assert len(args) == 1, 'only 1 iterable allowed in varargs'\n return args[0]\n else:\n return args\n\n\ndef parse_nums(spec):\n \"\"\"\n 2-7: a range of ids, both ends inclusive\n 2,5,9: comma separated\n 1-100^42-45: exclude a range of numbers\n 1-100^42^52^62: exclude a few numbers\n 1-6,3-8,100: mixture of range and commas\n 1-100^42^55,301,305,400-500^420-430: arbitrarily complicated ranges\n \"\"\"\n spec = spec.strip()\n try:\n if ',' in spec:\n nums = []\n for sub in spec.split(','):\n nums.extend(parse_nums(sub))\n # remove duplicates\n return sorted(list(set(nums)))\n elif '^' in spec:\n # everything after the first ^ are to be excluded\n subs = spec.split('^')\n assert len(subs) >= 2, '[initial]^[exclude]'\n include = set(parse_nums(subs[0]))\n for sub in subs[1:]:\n include -= set(parse_nums(sub))\n return sorted(list(include))\n elif '-' in spec:\n ends = list(map(int, filter(None, spec.split('-'))))\n assert len(ends) == 2, 'range `x-y` must have exactly one hyphen: '+spec\n assert ends[0] <= ends[1], 'end point must >= start point: '+spec\n return list(range(ends[0], ends[1]+1))\n else:\n return [int(spec)]\n except ValueError:\n raise ValueError('Invalid number spec. Example: 1-100^42^55,200-300,309')\n\n\n# ======================== ArgParser ========================\nclass ArgParser(object):\n def __init__(self, **kwargs):\n \"\"\"\n The following options are pre-configured\n --verbosity, or -vvv (number of v's indicate the level of verbosity)\n --debug: turn on debugging mode\n \"\"\"\n kwargs['formatter_class'] = ArgParser._SingleMetavarFormatter\n self.parser = argparse.ArgumentParser(**kwargs)\n self.parser.add_argument('--verbose', '-v', action='count', default=-1,\n help='can repeat, e.g. -vvv for level 3 verbosity')\n self.parser.add_argument('--debug', action='store_true', default=False,\n help='Turn on debugging mode. ')\n\n def add(self, *args, **kwargs):\n default = kwargs.get('default')\n dtype = kwargs.get('type')\n if dtype is None:\n if default is None:\n dtype = str\n else:\n dtype = type(default)\n typename = dtype.__name__\n if 'metavar' not in kwargs:\n # metavar: display --foo in help string\n if 'choices' in kwargs:\n choices = kwargs['choices']\n choices_str = '/'.join(['{}']*len(choices)).format(*choices)\n kwargs['metavar'] = '<{}: {}>'.format(typename, choices_str)\n elif 'nargs' in kwargs:\n # better formatting handled in _SingleMetavarFormatter\n kwargs['metavar'] = '{}'.format(typename)\n elif not kwargs.get('action'):\n # if 'store_true', then no metavar needed\n # list of actions: https://docs.python.org/3/library/argparse.html#action\n default_str = '={}'.format(default) if default else ''\n kwargs['metavar'] = '<{}{}>'.format(typename, default_str)\n self.parser.add_argument(*args, **kwargs)\n\n\n def add_boolean_flag(self, name, default=False, pair=True, help=None):\n \"\"\"Add a boolean flag to argparse parser.\n\n Args:\n parser: argparse.Parser\n parser to add the flag to\n name: str\n -- will enable the flag, while --no- will disable it\n default: bool or None\n default value of the flag\n pair:\n True to add both --myflag and --no-myflag\n help: str\n help string for the flag\n \"\"\"\n self.parser.add_argument(\"--\" + name,\n action=\"store_true\", default=default, help=help)\n if pair:\n self.parser.add_argument(\"--no-\" + name,\n action=\"store_false\", dest=name)\n\n # aliases\n add_argument = add\n def parse(self, *args, **kwargs):\n return self.parser.parse_args(*args, **kwargs)\n\n def __getattr__(self, attr):\n \"delegate any other methods to the underlying parser\"\n if attr in dir(self):\n return object.__getattribute__(self, attr)\n else:\n return getattr(self.parser, attr)\n\n class _SingleMetavarFormatter(argparse.HelpFormatter):\n \"Helper for better metavar display in ArgParser\"\n def _format_action_invocation(self, action):\n if not action.option_strings:\n metavar, = self._metavar_formatter(action, action.dest)(1)\n return metavar\n else:\n parts = []\n # if the Optional doesn't take a value, format is `-s, --long`\n if action.nargs == 0:\n parts.extend(action.option_strings)\n # if the Optional takes a value, format is:\n # -s , --long\n else:\n default = action.dest.upper()\n args_string = self._format_args(action, default)\n ## THIS IS THE PART REPLACED\n # for option_string in action.option_strings:\n # parts.append('%s %s' % (option_string, args_string))\n parts.extend(action.option_strings)\n # treat nargs different\n if action.nargs and action.default:\n parts[-1] += ' default={}'.format(action.default)\n parts[0] += ' ' + args_string\n return ', '.join(parts)\n\n\n# ==================== Exceptions handling ====================\n@contextmanager\ndef errorsafe(safe_handler, exclude):\n '''\n Safe-handle all errors except for those explicitly excluded\n Args:\n safe_handler: function that takes e and does safe handling\n if None, do nothing\n excluded: dict mapping ExceptionType to function that handles e of that type\n '''\n excluded_excs = tuple(exclude.keys())\n try:\n yield\n except excluded_excs as e:\n exclude[type(e)](e)\n except Exception as e:\n if safe_handler:\n safe_handler(e)\n \n\ndef errorsafe_mode(safe_handle_mode='traceback', \n exclude_excs=None, \n exclude_handle_mode='exit'):\n '''\n Safe-handle all errors except for those explicitly excluded\n \n Args:\n safe_handler_mode: \n - 'traceback': print raised exception's full traceback\n - 'print:{format-str-with-err}': print formatted error message\n e.g. 'print:oh no {err}!'\n - 'pass': do nothing\n excluded_excs: list of excluded ExceptionType(s)\n exclude_handle_mode: \n - 'exit': sys.exit(1) terminate the program\n - 'print:{format-str-with-err}': print formatted error message and exit\n e.g. 'print+exit:oh no {err}!'\n - 'raise': raise the exception as-is\n \n Returns:\n context manager.\n '''\n def _errmsg_format(mode):\n errmsg = mode[len('print:'):]\n if errmsg:\n return errmsg\n else: # empty string\n return '{err}'\n \n mode = safe_handle_mode\n if mode == 'traceback':\n def safe_handler(e):\n traceback.print_exc()\n elif mode.startswith('print'):\n errmsg_safe = _errmsg_format(mode)\n def safe_handler(e):\n print(errmsg_safe.format(err=str(e)))\n elif mode == 'pass':\n def safe_handler(e): pass\n else:\n raise NotImplementedError('Safe handler mode {} is not implemented'\n .format(mode))\n \n if exclude_excs is None:\n exclude_excs = []\n \n mode = exclude_handle_mode\n if mode == 'exit':\n def exc_handler(e):\n sys.exit(1)\n elif mode.startswith('print'):\n # MUST have a different name than above errmsg_safe, otherwise closure bug\n errmsg_exc = _errmsg_format(mode)\n def exc_handler(e):\n print(errmsg_exc.format(err=str(e)))\n sys.exit(1)\n elif mode == 'raise':\n def exc_handler(e):\n raise e\n else:\n raise NotImplementedError('Excluded handler mode {} is not implemented'\n .format(mode))\n \n return errorsafe(safe_handler=safe_handler, \n exclude={exc : exc_handler for exc in exclude_excs})\n\n\n# ======================== DEPRECATED ========================\n# No need for this in python 3\ndef enum(*sequential, **named):\n \"\"\"\n Numbers = enum('ZERO', 'ONE', 'TWO')\n Numbers.to_str(Numbers.ZERO) -> 'ZERO'\n Numbers.to_enum('ZERO') -> Numbers.ZERO\n Numbers.size == 3\n for n in Numbers():\n # 1, 2, 3\n \"\"\"\n enum_dict = dict(zip(sequential, range(len(sequential))), **named)\n rev_dict = reverse_dict(enum_dict)\n enum_class = type('Enum', (), enum_dict)\n enum_class.to_str = staticmethod(lambda value: rev_dict[value])\n enum_class.to_enum = staticmethod(lambda key: enum_dict[key])\n enum_class.size = len(enum_dict)\n enum_class.__iter__ = lambda self: (i for i in range(len(enum_dict)))\n return enum_class\n\n","sub_path":"ml_utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":19045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"149315062","text":"from alerts.parsing import Parser\nfrom multiprocessing import Process\n\nPARSE_WORKERS = 2\n\ndef parser():\n parser = Parser('main_alert_queue')\n parser.start('main_parse_queue')\n\nparser()\n\n\n#for i in range(PARSE_WORKERS):\n# p = Process(target=parser)\n# p.start()","sub_path":"parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"200925065","text":"#coding:utf-8\r\n\r\n\"\"\"\r\n\\u6570\\u636E\\u5E93\\u8FDE\\u63A5\\u7BA1\\u7406\r\n\"\"\"\r\n\r\n__author__ = \"liangxiaokai@21cn.com\"\r\n__version__ = \"1.0\"\r\n__date__ = \"2011/04/14\"\r\n__copyright__ = \"Copyright (c) 2011\"\r\n__license__ = \"Python\"\r\n\r\nfrom db.connect import *\r\n\r\nfrom sqlalchemy import Table,Column,func\r\nfrom sqlalchemy.types import *\r\nfrom sqlalchemy.orm import Mapper\r\n\r\ntab_flow_items = Table(\"flow_items\", metadata,\r\n Column(\"id\",Integer, primary_key=True),\r\n Column(\"icon\",Integer),\r\n Column(\"name\",String(255)),\r\n Column(\"sn\",Integer),\r\n Column(\"sn_name\",String(255)),\r\n Column(\"GSM\",String(20)),\r\n Column(\"description\",String(140)),\r\n Column(\"card\",Integer),\r\n Column(\"stack\",Integer),\r\n Column(\"used\",Integer),\r\n Column(\"create_time\", DateTime),\r\n Column(\"sort\", Integer),\r\n )\r\n \r\n\r\n \r\nclass TFlowItems(TableObject):\r\n def __init__(self):\r\n TableObject.__init__(self)\r\n\r\n def __repr__(self):\r\n return 'id=%d,name=%s' % (self.id, self.name)\r\n \r\nmapper_flow_items = Mapper(TFlowItems,tab_flow_items)\r\n\r\nif __name__==\"__main__\":\r\n pass","sub_path":"server/db/flow_items.py","file_name":"flow_items.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"595690810","text":"import json\r\nimport numpy as np\r\nfrom gensim.models.word2vec import Word2Vec \r\nfrom gensim.test.utils import common_texts\r\nfrom gensim.models import KeyedVectors\r\nfrom gensim.models import Word2Vec\r\nimport torch.nn as nn\r\n\r\nEMBED_SIZE = 250\r\ndef Create_Embed_layer() :\r\n\tmodel = Word2Vec.load('word2vec_min7.model')\r\n\tword2idx = {\"\": 0,\"\": 1 } # 初始化 [word : token] 字典,后期 tokenize 语料库就是用该词典。\r\n\tvocab_list = [(k, model.wv[k]) for k, v in model.wv.vocab.items()]\r\n\t#print(len(vocab_list))\r\n\r\n\t# 存储所有 word2vec 中所有向量的数组,留意其中多一位,词向量全为 0, 用于 padding\r\n\tembeddings_matrix = np.zeros((len(model.wv.vocab.items()) + 2, model.vector_size))\r\n\tfor i in range(len(vocab_list)):\r\n\t word = vocab_list[i][0]\r\n\t word2idx[word] = i + 2\r\n\t embeddings_matrix[i + 2] = vocab_list[i][1]\r\n\tembeddings_matrix[1] = np.random.rand(EMBED_SIZE)\r\n\r\n\tembedding_layer = nn.Embedding(len(embeddings_matrix),EMBED_SIZE)\r\n\treturn embedding_layer\r\n\r\n\r\nmodel = Word2Vec.load('word2vec_min7.model')\r\nword2idx = {\"\": 0, \"\": 1} # 初始化 [word : idx] 字典,后期 tokenize 语料库就是用该词典。\r\nidx2word = {0: \"\", 1: \"\"} # 初始化 [idx : word] 字典\r\n\r\nvocab_list = [(k, model.wv[k]) for k, v in model.wv.vocab.items()]\r\nprint(len(vocab_list)+ 2)\r\n# pad_idx = 0, unk_idx = 1, bos_idx = 2, eos_idx = 3\r\n\r\n# 存储所有 word2vec 中所有向量的数组,留意其中多一位,词向量全为 0, 用于 padding\r\nfor i in range(len(vocab_list)):\r\n\tword = vocab_list[i][0]\r\n\tidx2word[i+2] = word\r\n\tword2idx[word] = i + 2\r\n\r\n# 把eos的idx換到3\r\norg_eos_idx = word2idx['']\r\norg_3_word = idx2word[3]\r\nword2idx[''] = 3\r\nword2idx[org_3_word] = org_eos_idx\r\nidx2word[3] = ''\r\nidx2word[org_eos_idx] = org_3_word\r\n\r\njson1 = json.dumps(idx2word)\r\nf = open(\"idx2word_min7.json\",\"w\")\r\nf.write(json1)\r\nf.close()\r\n\r\njson2 = json.dumps(word2idx)\r\nf = open(\"word2idx_min7.json\",\"w\")\r\nf.write(json2)\r\nf.close()\r\n","sub_path":"hw2-2/hw2_2/hw2-2/dict.py","file_name":"dict.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"244155431","text":"import logging\nimport datetime\n\nclass Log:\n '''日志'''\n\n def __init__(self):\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='taxRobotBj.log',\n filemode='a', encode=\"utf-8\", encoding=\"UTF-8\", )\n self.book_name = ''\n\n def set_book_name(self, book_name):\n self.book_name = book_name\n\n def debug(self, message):\n if type(message) is not str :\n message = str(message)\n time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n print(time + \":\" + self.book_name + \":\" + message)\n logging.debug(time + \":\" + self.book_name + \":\" + message)","sub_path":"util/Log.py","file_name":"Log.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"202225796","text":"import re\nimport random\nfrom userbot import bot\nfrom userbot.utils import admin_cmd\n\nIF_EMOJI = re.compile(\n \"[\"\n \"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n \"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n \"\\U0001F600-\\U0001F64F\" # emoticons\n \"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n \"\\U0001F700-\\U0001F77F\" # alchemical symbols\n \"\\U0001F780-\\U0001F7FF\" # Geometric Shapes Extended\n \"\\U0001F800-\\U0001F8FF\" # Supplemental Arrows-C\n \"\\U0001F900-\\U0001F9FF\" # Supplemental Symbols and Pictographs\n \"\\U0001FA00-\\U0001FA6F\" # Chess Symbols\n \"\\U0001FA70-\\U0001FAFF\" # Symbols and Pictographs Extended-A\n \"\\U00002702-\\U000027B0\" # Dingbats \n \"]+\")\n\ndef deEmojify(inputString: str) -> str:\n \"\"\"Remove emojis and other non-safe characters from string\"\"\"\n return re.sub(IF_EMOJI, '', inputString)\n\n\n@borg.on(admin_cmd(pattern=\"uta(?: |$)(.*)\"))\n\nasync def nope(doit):\n ok = doit.pattern_match.group(1)\n if not ok:\n if doit.is_reply:\n what = (await doit.get_reply_message()).message\n else:\n await doit.edit(\"`Sir please give some query to search and download it for you..!`\")\n return\n sticcers = await bot.inline_query(\n \"Lybot\", f\"{(deEmojify(ok))}\")\n await sticcers[0].click(doit.chat_id,\n reply_to=doit.reply_to_msg_id,\n silent=True if doit.is_reply else False,\n hide_via=True)\n await doit.delete()\n\nimport asyncio\nimport os\nfrom pathlib import Path\nfrom telethon.errors.rpcerrorlist import YouBlockedUserError\nfrom userbot.utils import admin_cmd, edit_or_reply\n\nSEARCH_STRING = \"Ok weit, searching....\"\nNOT_FOUND_STRING = \"Sorry !I am unable to find any results to your query\"\nSENDING_STRING = \"Ok I found something related to that.....\"\nBOT_BLOCKED_STRING = \"Please unblock @utubebot and try again\"\n\n@bot.on(admin_cmd(pattern=\"ut (.*)\"))\nasync def fetcher(event):\n if event.fwd_from:\n return\n song = event.pattern_match.group(1)\n chat = \"@utubebot\"\n event = await event.edit(SEARCH_STRING, parse_mode=\"html\")\n async with event.client.conversation(chat) as conv:\n try:\n purgeflag = await conv.send_message(\"/start\")\n await conv.get_response()\n await conv.send_message(song)\n ok = await conv.get_response()\n while ok.edit_hide != True:\n await asyncio.sleep(0.1)\n ok = await event.client.get_messages(chat, ids=ok.id)\n baka = await event.client.get_messages(chat)\n if baka[0].message.startswith(\n (\"Sorry I found nothing..\")\n ):\n await delete_messages(event, chat, purgeflag)\n return await edit_delete(\n event, NOT_FOUND_STRING, parse_mode=\"html\", time=5\n )\n await event.edit(SENDING_STRING, parse_mode=\"html\")\n await baka[0].click(0)\n music = await conv.get_response()\n await event.client.send_read_acknowledge(conv.chat_id)\n except YouBlockedUserError:\n await event.edit(BOT_BLOCKED_STRING, parse_mode=\"html\")\n return\n await event.client.send_file(\n event.chat_id,\n music,\n caption=f\"==> {song}\",\n parse_mode=\"html\",\n )\n await event.delete()\n await delete_messages(event, chat, purgeflag)\n\n@borg.on(admin_cmd(pattern=\"utv(?: |$)(.*)\"))\n\nasync def nope(doit):\n ok = doit.pattern_match.group(1)\n if not ok:\n if doit.is_reply:\n what = (await doit.get_reply_message()).message\n else:\n await doit.edit(\"`Please give some query to search..!`\")\n return\n sticcers = await bot.inline_query(\n \"vid\", f\"{(deEmojify(ok))}\")\n await sticcers[0].click(doit.chat_id,\n reply_to=doit.reply_to_msg_id,\n silent=True if doit.is_reply else False,\n hide_via=True)\n await doit.delete()\n","sub_path":"userbot/plugins/utube.py","file_name":"utube.py","file_ext":"py","file_size_in_byte":4161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"60580340","text":"limit = 10 ** 6 + 1\nprimes = [False, False] + [True] * limit\np = 2\nwhile p * p <= limit:\n for i in range(p*2, limit, p):\n primes[i] = False\n p += 1\nprime_number = [i for i, v in enumerate(primes) if v]\nprime_factor = [0] * limit\nfor prime in prime_number:\n for j in range(prime, limit, prime):\n prime_factor[j] += 1\n\nfor _ in range(int(input())):\n n, m = [int(x) for x in input().split()]\n print(sum(prime_factor[n:m]))\n\n\n\n\n\n","sub_path":"codechef/VEGETA.py","file_name":"VEGETA.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"249522354","text":"#!/usr/bin/env python3\n\n\"\"\"\nInput: txt file containing a DNA string s\n\nOutput: txt file containing the reverse complement of the \nDNA string\n\nUsage (via command line): python \n\"\"\"\n\nimport sys\n\ndef complementDNA(s):\n\t\"\"\"\n\tThis creates the reverse complement of the input string by \n\treversing the symbols of s, then taking the complement of \n\teach symbol (A - T, G - C).\n\t\"\"\"\n\n\treverse = s[::-1]\n\tcomplement = str.maketrans(\"GATC\", \"CTAG\")\n\treturn reverse.translate(complement)\n\n\ndataset = open(sys.argv[1], \"r\")\ns = dataset.read()\nwith open(\"result_\" + sys.argv[1], \"w\") as f:\n\tprint(complementDNA(s), file = f)\n\tf.close()\ndataset.close()\n","sub_path":"Rosalind/Bioinformatics_Stronghold/complementDNA.py","file_name":"complementDNA.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"227769670","text":"\"\"\"\nThis file demonstrates writing tests using the unittest module. These will pass\nwhen you run \"manage.py test\".\n\"\"\"\n\nimport io\nimport urllib.parse\nfrom http.cookies import SimpleCookie\n\nimport pytest\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.test import TestCase\nfrom django.urls import reverse\n\nfrom geonode.documents.models import Document\nfrom ihp.survey.models import Survey, SurveyConfiguration\nfrom ihp.survey.templatetags.get_survey_route import get_survey_route\n\n\n@pytest.mark.django_db\nclass TestSurveyView(TestCase):\n def setUp(self):\n \"\"\"\n Setup tests to for user survey route forms\n \"\"\"\n self.imgfile = io.BytesIO(\n b'GIF87a\\x01\\x00\\x01\\x00\\x80\\x01\\x00\\x00\\x00\\x00ccc,\\x00'\n b'\\x00\\x00\\x00\\x01\\x00\\x01\\x00\\x00\\x02\\x02D\\x01\\x00;')\n gif_file = SimpleUploadedFile(\n 'test_img_file.gif',\n self.imgfile.read(),\n 'image/gif')\n user = get_user_model().objects.create(username='Pogba')\n self.download_resource = Document.objects.create(\n doc_file=gif_file,\n owner=user,\n title='theimg')\n\n # get survey route\n self.survey_route = get_survey_route(self.download_resource)\n\n self.survey_config = SurveyConfiguration.load()\n self.survey_config.survey_enabled = True\n self.survey_config.cookie_expiration_time = 25\n self.survey_config.save()\n\n self.survey_form_data = {\n \"name\": \"Sigmon Myers\",\n \"organization\": \"Unesco\",\n \"email\": \"messi@email.com\",\n \"country\": \"DZA\",\n \"reason_for_data_download\": \"Download for research\"\n }\n\n def test_successful_download_with_existing_cookies(self):\n \"\"\"\n Test that a user with survey cookies and has filled in form correctly can download a resource\n \"\"\"\n self.client.cookies = SimpleCookie({\"ihp_dlsurvey\": \"ihp_dlsurvey\"})\n response = self.client.get(\n u\"{}?download_url={}&next={}\".format(\n self.survey_route, urllib.parse.quote(\"http://example.co.uk\"), \"/\")\n )\n self.assertEqual(response.status_code, 302)\n\n def test_form_rendering_with_missing_survey_cookies(self):\n \"\"\"\n Test that a user with with no survey cookies view the download survey form\n \"\"\"\n response = self.client.get(\n u\"{}?download_url={}&next={}\".format(\n self.survey_route, urllib.parse.quote(\"http://example.com\"), \"/\")\n )\n self.assertEqual(response.status_code, 200)\n self.assertIsNotNone(response.context[\"form\"])\n\n def test_survey_form_prefilled_values_for_authenticate_users(self):\n \"\"\"\n Test that an authenticated user receives a form with some fields pre-filled but hidden\n \"\"\"\n user = get_user_model()\n user = user.objects.create_user(username=\"Thiago.Silver\", email=\"Thiago@Silver.com\",\n country=\"DZA\", organization=\"PSG\", password=\"very-secret\")\n\n self.client.login(username=user.username, password=\"very-secret\")\n response = self.client.get(\n u\"{}?download_url={}&next={}\".format(\n self.survey_route, urllib.parse.quote(\"http://example.com\"), \"/\")\n )\n self.assertEqual(\n response.context[\"form\"].fields['name'].initial, user.username)\n self.assertEqual(\n response.context[\"form\"].fields['email'].initial, user.email)\n self.assertEqual(\n response.context[\"form\"].fields['country'].initial, user.country)\n self.assertEqual(\n response.context[\"form\"].fields['organization'].initial, user.organization)\n\n def test_survey_hidden_form_fields_for_authenticate_users(self):\n \"\"\"\n Test that an authenticated user receives a form with some fields hidden\n \"\"\"\n user = get_user_model()\n user = user.objects.create_user(username=\"Thiago.Silver\", email=\"Thiago@Silver.com\",\n country=\"DZA\", organization=\"PSG\", password=\"very-secret\")\n\n self.client.login(username=user.username, password=\"very-secret\")\n response = self.client.get(\n u\"{}?download_url={}&next={}\".format(\n self.survey_route, urllib.parse.quote(\"http://example.com\"), \"/\")\n )\n self.assertEqual(\n response.context[\"form\"].fields['name'].widget.__class__, forms.HiddenInput().__class__)\n self.assertEqual(\n response.context[\"form\"].fields['email'].widget.__class__, forms.HiddenInput().__class__)\n self.assertEqual(\n response.context[\"form\"].fields['country'].widget.__class__, forms.HiddenInput().__class__)\n self.assertEqual(\n response.context[\"form\"].fields['organization'].widget.__class__, forms.HiddenInput().__class__)\n\n def test_successfully_survey_submission(self):\n \"\"\"\n Test that successfully submission of the survey form redirects\n to download and sets survey cookies that prevent survey form view till it expires\n \"\"\"\n response = self.client.post(\n u\"{}?download_url={}&next={}\".format(\n self.survey_route, urllib.parse.quote(\"http://example.io\"), \"/\"),\n data=self.survey_form_data\n )\n self.assertEqual(response.status_code, 302)\n\n survey = Survey.objects.get(name=self.survey_form_data[\"name\"])\n self.assertEqual(survey.name, self.survey_form_data[\"name\"])\n self.assertIn(\"ihp_dlsurvey\", response.client.cookies.keys())\n\n def test_failed_survey_submission_with_invalid_email(self):\n \"\"\"\n Test that a user cannot successfully submit a form with an invalid email\n \"\"\"\n self.survey_form_data[\"email\"] = \"invald-email\"\n response = self.client.post(\n u\"{}?download_url={}&next={}\".format(\n self.survey_route, urllib.parse.quote(\"http://example.org\"), \"/\"),\n data=self.survey_form_data\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context[\"form\"].errors[\"email\"], [\n \"Enter a valid email address.\"])\n\n def test_failed_survey_submission_with_missing_fields(self):\n \"\"\"\n Test that a user cannot successfully submit a form with missing required fields\n \"\"\"\n response = self.client.post(\n u\"{}?download_url={}&next={}\".format(\n self.survey_route, urllib.parse.quote(\"http://example.com\"), \"/\"),\n data={}\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context[\"form\"].errors[\"name\"], [\n \"This field is required.\"])\n self.assertEqual(response.context[\"form\"].errors[\"email\"], [\n \"This field is required.\"])\n self.assertEqual(response.context[\"form\"].errors[\"reason_for_data_download\"], [\n \"This field is required.\"])\n\n def test_direct_download_when_download_survey_is_disabled(self):\n \"\"\"\n Test that all downloads are direct when survey configuration 'survey_enabled' = False\n i.e. survey is disabled\n \"\"\"\n self.survey_config.survey_enabled = False\n self.survey_config.save()\n response = self.client.get(\n u\"{}?download_url={}&next={}\".format(\n self.survey_route, urllib.parse.quote(\"http://example.com\"), \"/\")\n )\n self.assertEqual(response.status_code, 302)\n\n def test_404_returned_for_invalid_survey_route(self):\n \"\"\"\n Test that an invalid survey route returns 404 error\n \"\"\"\n self.survey_config.survey_enabled = False\n self.survey_config.save()\n response = self.client.get(\n u\"survey/unknown/route/{}/?download_url={}&next={}\".format(\n self.download_resource.pk, urllib.parse.quote(\"http://example.com\"), \"/\")\n )\n self.assertEqual(response.status_code, 404)\n\n def test_404_returned_for_unknown_resource_id(self):\n \"\"\"\n Test that a non existing resource returns a 404 error\n \"\"\"\n self.survey_config.survey_enabled = False\n self.survey_config.save()\n response = self.client.get(\n u\"{}?download_url={}&next={}\".format(\n self.survey_route.replace(str(self.download_resource.pk), '-1'),\n urllib.parse.quote(\"http://example.com\"), \"/\")\n )\n self.assertEqual(response.status_code, 404)\n","sub_path":"ihp/survey/tests/test_survey_view.py","file_name":"test_survey_view.py","file_ext":"py","file_size_in_byte":8699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"532082430","text":"import smtplib, ssl\nimport urllib.request\nimport urllib.parse\nimport os\nimport time\nimport telegram_send\n\nLAST_MESSAGES = dict()\ndef send_email (event_id, to_addr, message):\n global LAST_MESSAGES\n last_send_time, last_send_message = LAST_MESSAGES.get(event_id, (0,\"\"))\n if last_send_message == message:\n if time.time() - last_send_time < 60*60: # the same message was sent within an hour\n return\n LAST_MESSAGES[event_id] = (time.time(), message)\n print(\"send email {}:{}\".format(event_id, message))\n smtp_server = \"smtp.gmail.com\"\n port = 587 # For starttls\n sender_email = \"disclosures.ru@gmail.com\"\n with open(\"example.txt\", \"r\") as inp:\n text_cut = inp.read()[1024:1034]\n\n context = ssl.create_default_context()\n\n try:\n server = smtplib.SMTP(smtp_server, port)\n server.starttls(context=context) # Secure the connection\n server.login(sender_email, text_cut)\n server.sendmail(sender_email, to_addr, message)\n except Exception as e:\n print(e)\n finally:\n server.quit()\n\n try:\n telegram_send.send(messages=[message])\n except Exception as e:\n print(e)\n\n\ndef check_ping(hostname):\n response = os.system(\"ping -c 1 \" + hostname)\n # and then check the response...\n return response == 0\n\n\ndef read_morda(url):\n try:\n f = urllib.request.urlopen(url, timeout=30)\n s = f.read().decode('utf-8')\n return len(s) > 100\n except Exception as exp:\n return False\n\ndef check_pdf_converter_server():\n try:\n f = urllib.request.urlopen('http://disclosures.ru:8091/ping', timeout=30)\n s = f.read().decode('utf-8')\n return s == \"yes\"\n except Exception as exp:\n return False\n\n\ndef main():\n url = 'http://disclosures.ru'\n admin_email = \"alexey.sokirko@gmail.com\"\n ping_flag = True\n morda_flag = True\n pdf_conv_srv_flag = True\n last_time_check_morda = 0\n last_time_check_pdf_conv_src = 0\n send_email(\"start\", admin_email, \"disclosures checker start\")\n #assert read_morda(url)\n #assert check_pdf_converter_server()\n\n ping_period = 60*5\n http_read_period = 60 * 30\n #ping_period = 10\n #http_read_period = 20\n while True:\n time.sleep(ping_period)\n if not check_ping('google.com'):\n continue\n\n if not check_ping('disclosures.ru'):\n send_email(\"ping\", admin_email, \"disclosures.ru is not reached, ping failed\")\n ping_flag = False\n else:\n if not ping_flag:\n send_email(\"ping\", admin_email, \"disclosures.ru ping succeeded\")\n ping_flag = True\n\n if not morda_flag or time.time() - last_time_check_morda >= http_read_period:\n last_time_check_morda = time.time()\n if read_morda(url):\n if not morda_flag:\n send_email(\"morda\", admin_email, \"disclosures.ru main page access restored\")\n morda_flag = True\n else:\n send_email(\"morda\", admin_email, \"disclosures.ru main page access failed\")\n morda_flag = False\n\n if not pdf_conv_srv_flag or time.time() - last_time_check_pdf_conv_src >= http_read_period:\n last_time_check_pdf_conv_src = time.time()\n if check_pdf_converter_server():\n if not pdf_conv_srv_flag:\n send_email(\"pdf_conv_srv\", admin_email, \"pdf conversion server access restored\")\n pdf_conv_srv_flag = True\n else:\n send_email(\"pdf_conv_srv\", admin_email, \"pdf conversion server access failed\")\n pdf_conv_srv_flag = False\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"tools/disclosures_site/scripts/disclosures.service/check_disclosures_health.py","file_name":"check_disclosures_health.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"113305485","text":"import re \nfrom datetime import datetime,timedelta\nfrom collections import Counter\n\ndef open_parser(filename):\n\twith open(filename) as logfile:\n\t\tpattern = (r''\n r'(\\d+.\\d+.\\d+.\\d+)\\s-\\s-\\s' # IP ??\n r'\\[(.+)\\]\\s' # ??\n r'\"GET\\s(.+)\\s\\w+/.+\"\\s' # ????\n r'(\\d+)\\s' # ???\n r'(\\d+)\\s' # ????\n r'\"(.+)\"\\s' # ???\n r'\"(.+)\"' # ?????\n )\n\t\tparsers = re.findall(pattern,logfile.read())\n\treturn parsers\n\ndef main():\n\t# logs = open_parser('/home/shiyanlou/Code/nginx.log')\n\tlogs = open_parser('nginx.log')\n\tmax_ip_count = 0\n\tip_count = {}\n\tip_dict = {}\n\tmax_ip_count = 0\n\n\turl_count_404 = {}\n\turl_dict = {}\n\tmax_url_count_404 = 0\n\n\n\tfor log in logs:\n\t\tip,_time,url,status,length,header,client = log\n\t\tutc_date = datetime.strptime(_time,'%d/%b/%Y:%H:%M:%S +0800')\n\t\tlocal_date = utc_date\n\t\tlocal_date_str = local_date.strftime('%Y%m%d')\n\t\tif local_date_str == '20170111':\n\t\t\tif ip not in ip_count:\n\t\t\t\tthis_ip_count = 1\n\t\t\telse:\n\t\t\t\tthis_ip_count = ip_count[ip] + 1\n\t\t\tip_count[ip] = this_ip_count\n\n\t\t\tif this_ip_count > max_ip_count:\n\t\t\t \tmax_ip_count = this_ip_count\n\t\t\t \tip_dict = {ip:this_ip_count}\n\t\t\telif this_ip_count == max_ip_count and ip not in ip_dict:\n\t\t\t \tip_dict[ip] = this_ip_count\n\n\t\tif status == '404':\n\t\t\tif url not in url_count_404:\n\t\t\t\tthis_url_count = 1\n\t\t\telse:\n\t\t\t\tthis_url_count = url_count_404[url] +1\n\t\t\turl_count_404[url] = this_url_count\n\n\t\t\tif this_url_count > max_url_count_404:\n\t\t\t\tmax_url_count_404 = this_url_count\n\t\t\t\turl_dict = {url:this_url_count}\n\t\t\telif this_url_count == max_url_count_404 and url not in url_dict:\n\t\t\t\turl_dict[url] = this_url_count\n\n\t# url_map = [ log[2] for log in logs if log[3] == '404']\n\t# print(Counter(url_map))\n\n\treturn ip_dict, url_dict\n\nif __name__ == '__main__':\n\t# main()\n\tip_dict, url_dict = main()\n\tprint(ip_dict,url_dict)","sub_path":"challenge8/challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"538437573","text":"#!/usr/bin/env python\nimport socketserver as SocketServer\nimport settings\nimport os, time\nfrom datetime import datetime\nimport pytz\n\nfrom config.database import Database\n\nmongo = Database().mongo\n\nclass SyslogUDPHandler(SocketServer.BaseRequestHandler):\n\tdef handle(self):\n\t\tdata = bytes.decode(self.request[0].strip())\n\t\tsocket = self.request[1]\n\n\t\t#collection_index = date_now = datetime.now().strftime(\"%d-%m-%Y\")\n\t\tdate_now = datetime.now().strftime(\"%d-%m-%Y %H:%M:%S\")\n\t\t\n\t\torigin = self.client_address[0]\n\t\tlevel = data[:data.index(' ')]\n\t\tmessage = data[data.index(' '):]\n\n\t\tprint(str(date_now) + \" \" + origin + \": \" + data)\n\t\tmongo[\"Messages\"].insert_one({ \"date\": datetime.now(), \"origin\": origin, \"message\": message, \"level\": level })\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tHOST, PORT = str(os.getenv(\"SYSLOG_HOST\")), int(os.getenv(\"SYSLOG_PORT\"))\n\t\tserver = SocketServer.UDPServer((HOST, PORT), SyslogUDPHandler)\n\t\tprint(\"Listening on \" + str(HOST) + \":\" + str(PORT))\n\t\tserver.serve_forever(poll_interval=0.5)\n\n\texcept (IOError, SystemExit):\n\t\traise\n\texcept KeyboardInterrupt:\n\t\tprint (\"Crtl+C Pressed. Shutting down.\")","sub_path":"syslog-infra/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"501540961","text":"class Node(object):\n def __init__(self, data, left=None, right=None):\n self.data = data\n self.left_child = left\n self.right_child = right\n\n def insert_left(self, data):\n if self.left_child:\n self.left_child.insert_left(data)\n else:\n self.left_child = Node(data)\n\n def insert_right(self, data):\n if self.right_child:\n self.right_child.insert_right(data)\n else:\n self.right_child = Node(data)\n\n def preorder(self):\n print(str(self.data), end=' ')\n if self.left_child:\n self.left_child.preorder()\n if self.right_child:\n self.right_child.preorder()\n\n def inorder(self):\n if self.left_child:\n self.left_child.inorder()\n print(str(self.data), end=' ')\n if self.right_child:\n self.right_child.inorder()\n \n def postorder(self):\n if self.left_child:\n self.left_child.postorder()\n if self.right_child:\n self.right_child.postorder()\n print(str(self.data), end=' ')\n\n\ndef insert(node, v, index):\n left_index, right_index = v[index][1], v[index][2]\n if left_index >= 0:\n left_node = Node(v[left_index][0])\n insert(left_node, v, left_index)\n node.left_child = left_node\n if right_index >= 0:\n right_node = Node(v[right_index][0])\n insert(right_node, v, right_index)\n node.right_child = right_node\n\n\nclass Tree(object):\n def __init__(self, v):\n self.root = Node(v[0][0])\n insert(self.root, v, 0)\n\n def preorder(self):\n self.root.preorder()\n\n def inorder(self):\n self.root.inorder()\n\n def postorder(self):\n self.root.postorder()\n\n\ndef main():\n n = int(input())\n vector = []\n for _ in range(n):\n vector.append(tuple(map(int, input().split())))\n tree = Tree(vector)\n tree.inorder()\n print()\n tree.preorder()\n print()\n tree.postorder()\n print()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"stepic/tree_traversal.py","file_name":"tree_traversal.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"11187571","text":"import socket\nimport time\n\nhost = \"127.0.0.1\" # правильней писать так(если поменяется адресс)\nport = 9090\n\n# список в котором хранятся адреса клиента(для того чтобы при отправке сообщения - оно приходило\n# только другим, а ему не дублировалось)\nclients = []\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # объявление протокола\ns.bind((host, port)) # создание порта\n\nprint(\"[ Server Started ]\")\n\nwhile True:\n try:\n data, addr = s.recvfrom(1024) # данные сообщения, адресс отправителя и в байтах максимальный размер сообщения\n\n if addr not in clients: # если адреса нет в списке клиентов(новый клиент)\n clients.append(addr) # добавить адрес\n\n # переменная нужна для того чтобы отобразить текущее время(когда сообщение было отправлено)\n itsatime = time.strftime(\"%Y-%m-%d-%H.%M.%S\", time.localtime())\n\n # ip адресс отправителя, личный адресс и время(отправки, выхода...)\n print(\"[\" + addr[0] + \"]=[\" + str(addr[1]) + \"]=[\" + itsatime + \"]/\", end=\"\")\n print(data.decode(\"utf-8\")) # декодирование сообщения которое отправил пользователь\n\n for client in clients: # если адрес не равняется тому кто отпрвляет\n if addr != client: # сообщение - тогда отправляем\n s.sendto(data, client)\n except:\n print(\"\\n[ Server Stopped ]\")\n break\n\ns.close()\n","sub_path":"Sockety/Chat/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"28996857","text":"\"\"\" \nDate Created : 9/2/2021\nDate Modified: 12/1/2021\nAuthor: Sarah Litz, Ryan Cameron\nDescription: This file contains the class definition for CANBus, the object containing the methods to read data coming in through a CAN-bus line on the raspberry pi and send/store it for reference.\nProperty of Donaldson Lab, University of Colorado Boulder, PI Zoe Donaldson\nhttp://www.zdonaldsonlab.com/\n\"\"\"\n\ntry: \n import can \n from can.interfaces.serial.serial_can import *\nexcept Exception as e: \n print(e)\n can = None \ntry: \n import serial \nexcept Exception as e: \n print(e)\n serial = None \nimport queue\nimport os\nimport threading\nimport asyncio \nimport time\n\nclass CANBus: \n \"\"\" class for recieving data from RFIDs\"\"\"\n\n def __init__(self, isserial=False): \n \n try: \n self.isSimulation = self.config_canbus(isserial)\n except OSError as e: \n print(e)\n self.isSimulation = True # simulating CANBus\n\n self.shared_rfidQ = queue.Queue() # queue shared among the rfids \n\n self.watch_RFIDs = [] # ModeABC adds any non-simulated rfids to this list so CANBus can perform quick cleaning and trash unimportant messages that it recieves\n\n self.active = True \n\n def config_canbus(self, isserial):\n \"\"\"\n [summary] Attempts connecting to the CAN bus and creating a Bus wrapper (provided by python-can) that provides utilities for simple recieving and sending of data on the CAN Bus. \n If connection to the hardware fails, the CANBus will automatically be simulated.\n Args: \n isserial (Boolean) : if True, sets up a serial method of communication, otherwise sets up a parallel method of communication (allowing for serveral bits at a time to be transmitted)\n \"\"\"\n\n if can is None or serial is None: \n # print('cannot setup CAN Bus without the can and serial module')\n return True \n\n print('initializing can bus')\n try: \n os.system('sudo /sbin/ip link set can0 up type can bitrate 125000')\n print(\"CANBus init ok\")\n except: \n raise OSError('CANBus init failed')\n return True \n \n # Check if its a serial bus\n if isserial:\n print(\"Seting up serial bus...\")\n self.bus = SerialBus(channel = \"/dev/tty1\")\n print(\"Serial bus created\")\n else:\n print(\"Setting up CAN bus...\")\n self.bus = can.interface.Bus(channel = \"can0\", bustype='socketcan') # Should auto configure the bus, make sure that this is interface.Bus NOT \n print(\"Bus created\")\n \n\n print('CANBus Created')\n print('CAN :', self.bus)\n attrs = vars(self.bus)\n print(', '.join(\"%s: %s\" % item for item in attrs.items()))\n return False\n\n def listen(self):\n \"\"\" Called from the rfidListener method in ModeABC\n This function runs __listen() on its own thread which will handle incoming data recieved on the CAN Bus. \n \"\"\"\n # Creates notifier on its own thread and returns immediately so rfidListener can continue running\n self.active = True \n notiThread = threading.Thread(target=self.__listen)\n notiThread.name = 'CANBus.__listen'\n notiThread.start()\n \n def stop_listen(self): \n \"\"\" Causes the __listen thread to break out of its loop. Stops the Bus Notifier object so data will not be recieved. \"\"\"\n self.active = False \n\n def __listen(self):\n \"\"\"Called by the listen() method. Runs on its own thread. Activated and Deactivated at the same time a Mode is activated/deactivated. \n Creates a Listener object that waits for data sent on the CAN Bus and places incoming data on the shared_rfidQ. \n \"\"\"\n\n # activated when a mode is activated \n if not self.active: return \n\n if self.isSimulation: \n if len(self.watch_RFIDs)>0: \n raise Exception(f'(CANBus.py, SimulatedMessageListener) Must simulate all RFIDs because can bus connection was not successful.')\n time.sleep(20)\n return \n\n\n class MessageListener(can.Listener): \n \"\"\" Inherits from python-can Listener Class. Overrides on_message_recieved method to define how incoming data is handled.\"\"\"\n def __init__(self, shared_rfidQ, watch_RFIDs): \n super().__init__(self)\n self.shared_rfidQ = shared_rfidQ\n self.watch_RFIDs = watch_RFIDs\n\n \n def on_message_received(self,msg): \n # registered as a listener. when a message is recieved, this fnctn gets called \n\n #\n # FILTER MESSAGES HERE ( trash messages that are for an rfid we are not tracking )\n #\n if msg.arbitration_id not in self.watch_RFIDs: \n # do nothing \n return \n\n # convert msg data to a vole_id \n\n\n # format for shared_rfidQ || Tuple: ( vole_id, rfid_id, timestamp )\n print('CANBus Pinged: ', (msg.data.hex(), msg.arbitration_id, msg.timestamp), '\\n')\n formatted_msg = (msg.data.hex(), msg.arbitration_id, msg.timestamp)\n \n # REFORMAT MESSAGES FOR THE SHARED_RFIDQ HERE \n self.shared_rfidQ.put(formatted_msg)\n return \n\n print(\"Listening...\")\n\n # Create Notifier \n listener = MessageListener(self.shared_rfidQ, self.watch_RFIDs)\n notifier = can.Notifier(bus=self.bus,listeners=[listener]) # listeners are the callback functions!\n\n while self.active: \n # deactivated when a mode is deactivated\n time.sleep(0.5)\n\n notifier.stop() # cleanup \n\n \n\n\n","sub_path":"Control/Classes/CANBus.py","file_name":"CANBus.py","file_ext":"py","file_size_in_byte":5840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"431714922","text":"'''\n\nName :- Siddharth Nahar\nEntry No :- 2016csb1043\nDate :- 11/9/18\nPurpose :- Extract Features data and create vectors\n\n'''\nimport numpy as np\nfrom numpy.linalg import inv\nimport math\nimport random\nimport matplotlib.pyplot as plt\n\n#-----------------------------------------------------------\n\n#Function to Plot data of X,Y classes\ndef plot(X,Y):\n\n\tfig = plt.figure(1)\n\tcountx = 0\n\tcounty = 0\n\tfor i in range(0,X.shape[0]):\n\n\t\tif(Y[i] == 1):\n\t\t\tif(countx == 0):\n\t\t\t\tplt.plot(X[i][0],X[i][1],'bs',label = \"Positve\")\n\t\t\telse:\n\t\t\t\tplt.plot(X[i][0],X[i][1],'bs')\n\n\t\t\tcountx += 1\n\t\telse:\n\t\t\tif(county == 0):\n\t\t\t\tplt.plot(X[i][0],X[i][1],'r^',label = \"Negative\")\n\t\t\telse:\n\t\t\t\tplt.plot(X[i][0],X[i][1],'r^')\n\t\t\tcounty += 1\n\n\tplt.legend(loc = \"upper right\")\n\t#plt.savefig(\"Logistic Regression Data\")\n\t#plt.show()\n\t\n\n#--------------------------------------------------------------\ndef plotDecisionBoundary(X,Y,W):\n\n\n\tplot(X,Y)\n\n\tplot_x = [np.min(X[:,1]),np.max(X[:,1])]\n\n\tplot_x = np.array(plot_x)\n\tplot_y = W[1]*plot_x + W[0]\n\tplot_y = -1*plot_y/W[2]\n\n\t\n\tplt.plot(plot_x,plot_y,'--b')\n\tplt.gca().set_ylim([0,8])\n\tplt.show()\n\n#------------------------------------------------------------\n\n#Function to get Sigmoid Fucntion\ndef sigmoid(Z):\n\n\treturn 1/(1 + np.exp(-1*Z))\n\n#------------------------------------------------------------\n\n#Function for Cost calculation\ndef costFunc(X,Y,W,lamb):\n\n\tf = sigmoid(X@W)\n\n\tcost = (np.log(f)).transpose()@Y + (np.log(1-f)).transpose()@(1-Y)\n\tcost = -1*cost/X.shape[0]\n\n\tsquare = W**2\n\n\tweights = (np.sum(square) - square[0])*lamb\n\tweights = weights/(2*X.shape[0])\n\n\tcost = cost + weights\n\n\treturn cost\n#------------------------------------------------------------\n#Function for Grad\ndef grad(X,Y,W,lamb):\n\n\tf = sigmoid(X@W)\n\tXt = X.transpose()\n\tgr = f-Y\n\n\tgr = Xt@gr\n\tgr = gr + lamb*W\n\n\tgr[0] = gr[0] - lamb*W[0]\n\n\tgr = gr/X.shape[0]\n\n\treturn gr\n\t\n#-------------------------------------------------------------\ndef evalFunc(X,Y,W):\n\n\tf = sigmoid(X@W)\n\n\tfor i in range(0,f.shape[0]):\n\t\tif(f[i] > 0.4):\n\t\t\tf[i] = 1\n\t\telse:\n\t\t\tf[i] = 0\n\n\terror = f - Y\n\n\terr = np.sum(error**2)\n\n\treturn err/X.shape[0]\n\n#--------------------------------------------------------------\n#Function for Gradient Descent\n\ndef gradientDescent(X,Y,W,nIter,alpha,lamb):\n\n\tWprev = W\n\tfor i in range(0,nIter):\n\n\t\terror = costFunc(X,Y,W,lamb)\n\n\t\tprint(\"After iteration : \"+str(error) + \" \"+str(i))\n\t\t\n\t\tWnew = Wprev - alpha*grad(X,Y,Wprev,lamb)\n\t\tif(np.sum(np.absolute(Wnew-Wprev)) <= 0.000005):\n\t\t\tbreak\n\n\t\tprint(Wnew)\n\t\tWprev = Wnew\n\n\treturn Wprev\t\n\n#--------------------------------------------------------------\n#Function to Return Inverse of f''\n\ndef hurestic(X,Y,W,lamb):\n\n\tf = sigmoid(X@W)\n\n\tR = np.identity(X.shape[0])\n\tXt = X.transpose()\n\n\tfdash = f*(1-f)\n\n\t#print(fdash.shape)\n\tfor i in range(0,X.shape[0]):\n\t\tR[i][i] = fdash[i]\n\n\tId = np.identity(X.shape[1])\n\n\tId[0][0] = 0\n\n\n\tH = R@X\n\tH = Xt@H\n\n\tH = H + lamb*Id\t\n\n\tH = H/X.shape[0]\n\tH = inv(H)\n\t\n\treturn H\n\n#----------------------------------------------------------------\n#Function for Newton Raphson method\ndef newtonRaphson(X,Y,W,nIter,lamb):\n\n\t#print(Y.shape)\n\tWprev = W\n\tfor i in range(0,nIter):\n\n\t\terror = costFunc(X,Y,W,lamb)\n\t\t#print(\"After iteration : \"+str(error))\n\t\tif(error < 0.05):\n\t\t\tbreak\n\t\tH = hurestic(X,Y,Wprev,lamb)\n\t\n\t\tG = grad(X,Y,Wprev,lamb)\n\t\n\t\tWnew = Wprev - H@G\n\t\tif(np.sum(np.absolute(Wnew-Wprev)) <= 0.000005):\n\t\t\tbreak\n\n\t\tWprev = Wnew\n\n\treturn Wprev\t\t\n\t\t\n\n#--------------------------------------------------------------\n\n#Function to create Polynomial Degrees\n\ndef kernel(X,degree):\n\n\tRows,Cols = X.shape\n\tfeatureVector = list()\n\tfor i in range(0,Rows):\n\n\t\tj = 0\n\t\t\n\t\tvector = list()\n\t\twhile(j <= degree):\n\t\t\tk = j\n\t\t\twhile(k >= 0):\n\t\t\t\tvector.append((X[i][0]**k)*(X[i][1]**(j-k)))\n\t\t\t\tk = k - 1\n\t\t\tj = j + 1\n\n\t\t#print(vector)\n\t\tfeatureVector.append(vector)\n\n\t#print(featureVector)\n\t\n\tXnew = np.array(featureVector)\n\treturn Xnew\n\n#-------------------------------------------------------------\n#Function to Run the Logistic Algorithm\n\ndef run(featureMatrix):\n\n\t\t\n\ttuples = featureMatrix.shape\n\n\t\n\t#Create X vector and Y vector\n\tfeatureMatrixX = featureMatrix[:,0:tuples[1]-1]\n\n\tfeatureMatrixY = featureMatrix[:,-1]\n\n\tplot(featureMatrixX,featureMatrixY)\n\tplt.show()\n\tOnes = np.ones((tuples[0],1),dtype = float)\n\n\tX = np.append(Ones,featureMatrixX,axis = 1)\n\tY = featureMatrixY\t\n\n\tY = np.reshape(Y,(tuples[0],1))\n\n\n\t#W = np.array([-1.453,0.295,0.0453])\n\t#W = np.reshape(W,(tuples[1],1))\n\t#W = np.zeros((tuples[1],1))\t\n\t'''\n\t#Linear Decision Boundary\n\tW = np.random.rand(tuples[1],1)\n\tW = W*0.2 - 0.1\n\t\n\t#Wnew = gradientDescent(X,Y,W,1000,0.002,1)\n\n\t#print(costFunc(X,Y,W,0))\n\t#print(grad(X,Y,W,0))\n\t#Wnew = newtonRaphson(X,Y,W,1000,1)\n\n\t#Wnew = gradientDescent(X,Y,W,10000,0.07,1)\n\t#print(costFunc(X,Y,Wnew,0))\n\t#print(Wnew)\n\t#print(\"After Gradient Descent:- \" + str(evalFunc(X,Y,Wnew)))\t\n\t#print(W.shape)\n\t#plotDecisionBoundary(X[:,1:],Y,Wnew)\n\t'''\n\n\t#---------------------------------------------------------\n\t#Polynomial decision boundary\n\n\tXdash = kernel(X,2)\n\tRow,Cols = Xdash.shape\n\n\tW = np.random.rand(Cols,1)\n\tW = W*0.2 - 0.1\n\n\tWnew = gradientDescent(Xdash,Y,W,1000,0.01,1)\n\n#------------------------------------------------------------\n\nif __name__ == '__main__':\n\n\t#Feature Extraction from linregdata file\n\tlogiRegData = open('l2/credit.txt','r')\n\n\tfeatureVector = list()\n\t#Traverse through file and create feature vector\n\n\n\tfor line in logiRegData:\n\n\t\tline = line.split(',')\n\t\t\n\t\tvector = list()\n\n\t\tfor data in line:\n\n\t\t\tval = float(data)\n\t\t\tvector.append(val)\n\n\t\tfeatureVector.append(vector)\n\n\n\tfeatureMatrix = np.array(featureVector)\n\trun(featureMatrix)\n","sub_path":"LinearAndLogisticRegression/logisticRegression.py","file_name":"logisticRegression.py","file_ext":"py","file_size_in_byte":5650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"264643015","text":"from utils.logger import Logger\nfrom utils.data import create_vrm_dataset\nfrom utils.word2vec import load_word2vec_wv\nfrom models.simple_keras import SimpleKeras\n\n# Variables\nmodel_name = \"180914-lstm-128-presumption\"\ninput_fn = '../dataset/vrm/vrm-single-tokenized-v3.json'\nmodel_fn = '../results/GoogleNews-vectors-negative300.bin'\n\n# Initiate Logger\nlogger = Logger(model_name)\n\n# Load word2vec\nlogger.console(\"Load word2vec embedding...\")\nwv = load_word2vec_wv(model_fn)\n\n# Initiate Keras model\nlogger.console(\"Initiate Simple Keras...\")\nsimple_keras = SimpleKeras(model_name, logger)\n\n# Create Training dataset\ndata = create_vrm_dataset(input_fn, wv, logger, target_vrm=['K', 'E', 'D', 'Q'])\n\n# Train keras model\nsimple_keras.train(data, epoch=10, save_per=10)","sub_path":"bin/keras-runner.py","file_name":"keras-runner.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"496695181","text":"from django.urls import path\n\n\nfrom . import views\n\napp_name = \"orgApplication\"\n\nurlpatterns = [\n path('', views.home, name='org'),\n path('',views.OrgDetailView,name='org-detail'),\n path('orgadmin/',views.orgnaziation_redirect,name='org-admin'),\n path(\"orgadmin/reg/\", views.self_org, name='self_org'),\n path(\"orgadmin/orgprofile/\", views.organizationProfile, name='org_profile'),\n path('orgadmin/orgprofile/',views.organizationProfile,name='org_profile_detail'),\n path('orgadmin/orgprofile//edit/',views.edit_org, name='org_edit'),\n \n path(\"orgadmin/orgprofile//orgprojectcreate/\", views.org_project_create_view, name='org-project-create'),\n path(\"org-project/\", views.org_project_view, name='org-project-list'),\n path(\"org-project//edit\", views.org_project_edit, name='org_project_view_edit'),\n path(\"org-project/\", views.org_project_details, name='org_project_view_details'),\n path(\"org-project/public/\", views.org_project_public_details, name='org_project_public_view_details'),\n path(\"org-project//delete\", views.org_project_delete_view, name='org_project_delete'),\n \n \n path(\"search/\", views.search, name='search'),\n path('ajax/load-districts/', views.load_district, name='ajax_load_districts'),\n path('ajax/load-thanas/', views.load_thana, name='ajax_load_thana'),\n path('ajax/load-divisions/', views.load_thana, name='ajax_load_division'),\n path('ajax/get_org_profile_list/', views.get_org_profile_list, name='ajax_get_org_profile_list'),\n\n \n]\n","sub_path":"orgApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"349780702","text":"# OLD SCRIPT\n# This is an old script, please use seobot.py instead for better results.\nimport time\nimport socks\nimport os\nfrom stem import Signal\nfrom stem.control import Controller\n\n\nsocks.set_default_proxy(socks.PROXY_TYPE_SOCKS5, \"127.0.0.1\", 9150)\nprint(\"Proxy == Local Host at 127.0.0.1 port 9150.\")\ntime.sleep(1)\n\nsocks_on = socks.socksocket()\nprint(\"Secure Socket Layer initialized with PySocks ...\")\ntime.sleep(1)\n\n\ndef tor_start():\n try:\n os.system('open /Applications/TorBrowser.app')\n except Exception as e:\n print(str(e), 'path fail')\n\n\ndef ip_switch():\n with Controller.from_port(port=9151) as controller:\n controller.authenticate()\n controller.signal(Signal.NEWNYM)\n\n\ntor_start()\ntime.sleep(30)\n\nfor x in range(100):\n ip_switch()\n time.sleep(30)\n\n\nquit()\n","sub_path":"oldseobot.py","file_name":"oldseobot.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"226710556","text":"import argparse\nimport jinja2\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument ('--hostname', help=\"Switch hostname\", required=True)\n\nparser.add_argument ('--loopback', help=\"'loopback port number' 'ip address mask' 'ipv6 address' {'enable'|'disable'} 'description'\", action=\"append\", nargs=6)\n\nparser.add_argument ('--portchannel', help=\"'port-channel port number' 'description' 'switchport trunk encapsulation' 'switchport trunk allowed vlan' 'switchport mode' 'switchport mode {nonegotiate|\"\"}' 'load-interval' 'mtu' {'shutdown'|'no shutdown'} \", action=\"append\", nargs=9)\n\nparser.add_argument ('--fastethernet', help=\"'fast ethernet port number' 'ip vrf forwarding' 'ip address mask | \"\" ' '{'shutdown'|'no shutdown'} \", action=\"append\", nargs=5)\n\nparser.add_argument ('--gigabit', help=\"'GigabitEthernet port number' 'description' 'switchport trunk encapsulation' 'switchport trunk allowed vlan' 'switchport mode' 'switchport mode {nonegotiate|\"\"}' 'load-interval' 'channel-group {number active|passive}' 'media-type' 'spanning-tree' {'shutdown'|'no shutdown'} \", action=\"append\", nargs=12)\n\nparser.add_argument ('--vlan', help=\"'vlan port number' 'description' 'ip address mask' 'ipv6 address' {'enable'|'disable'} {'shutdown'|'no shutdown'}\", action=\"append\", nargs=7)\n\nargs = parser.parse_args()\n\n#Create new list of dictionaries instead of list of lists for LOOPBACK\nlist_of_dict = []\nfor unnamed_list in args.loopback:\n num = 0\n named_list = {}\n for names in 'id','ipaddress','mask','ipv6','ipv6_enable','description':\n named_list[names] = unnamed_list[num]\n num += 1\n list_of_dict.append(named_list)\nargs.loopback = list_of_dict\n\n# Create new list of dictionaries instead of list of lists for PORT-CHANNEL\nlist_of_dict = []\nfor unnamed_list in args.portchannel:\n num = 0\n named_list = {}\n for names in 'id','description','trunk_encapsulation','trunk_allowed_vlan','switchport_mode','switchport_nonegotiate','load_interval','mtu','shutdown':\n named_list[names] = unnamed_list[num]\n num += 1\n list_of_dict.append(named_list)\nargs.portchannel = list_of_dict\n\n#Create new list of dictionaries instead of list of lists for FASTETHERNET\nlist_of_dict = []\nfor unnamed_list in args.fastethernet:\n num = 0\n named_list = {}\n for names in 'id','vrf_forwarding','ipaddress','mask','shutdown':\n named_list[names] = unnamed_list[num]\n num += 1\n list_of_dict.append(named_list)\nargs.fastethernet = list_of_dict\n\n# Create new list of dictionaries instead of list of lists for GIGABITETHERNET\nlist_of_dict = []\nfor unnamed_list in args.gigabit:\n num = 0\n named_list = {}\n for names in 'id','description','trunk_encapsulation','trunk_allowed_vlan','switchport_mode','switchport_nonegotiate','load_interval','media_type','channel_group','channel_group_mode','spanning_tree','shutdown':\n named_list[names] = unnamed_list[num]\n num += 1\n list_of_dict.append(named_list)\nargs.gigabit = list_of_dict\n\n# Create new list of dictionaries instead of list of lists for VLAN\nlist_of_dict = []\nfor unnamed_list in args.vlan:\n num = 0\n named_list = {}\n for names in 'vlanid','description','ipaddress','mask','ipv6','ipv6_enable','shutdown':\n named_list[names] = unnamed_list[num]\n num += 1\n list_of_dict.append(named_list)\nargs.vlan = list_of_dict\n\n\n#Here jinja2 starts\ntemplateLoader = jinja2.FileSystemLoader(searchpath=\"templates/\")\ntemplateEnv = jinja2.Environment( loader=templateLoader )\nTEMPLATE_FILE = \"cisco.jinja\"\ntemplate = templateEnv.get_template( TEMPLATE_FILE )\ntemplateVars = { \n\t\t'hostname' : args.hostname,\n 'loopback' : args.loopback,\n\t\t'portchannel' : args.portchannel,\n\t\t'fastethernet': args.fastethernet,\n\t\t'gigabit' : args.gigabit,\n \t\t'vlan' : args.vlan\n\t }\noutputText = template.render( templateVars )\n\n#print outputText\n\nf = open('output/cisco', 'w')\nf.write(outputText)\nf.close()\n","sub_path":"cisco.py","file_name":"cisco.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"454059107","text":"import re,requests\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',\n 'origin': 'https://y.qq.com',\n 'referer': 'https://y.qq.com/'\n}\nurl = 'https://c.y.qq.com/lyric/fcgi-bin/fcg_query_lyric_yqq.fcg'\nparams = {\n 'nobase64':'1',\n 'musicid': '5105986',\n '-': 'jsonp1',\n 'g_tk_new_20200303': '5381',\n 'g_tk': '5381',\n 'loginUin': '0',\n 'hostUin': '0',\n 'format': 'json',\n 'inCharset': 'utf8',\n 'outCharset': 'utf-8',\n 'notice': '0',\n 'platform': 'yqq.json',\n 'needNewCodeo': '0'\n}\nr = requests.get(url,headers=headers,params=params)\njs = r.json()\nmsc_tag = js['lyric']\nfor i in msc_tag.split(' '):\n lyric = re.sub(\"[A-Za-z0-9\\\\!\\\\%\\\\[\\\\]\\\\,\\\\。\\\\&\\\\#\\\\;]\",\"\",i)\n if lyric.strip():\n print(lyric)\n","sub_path":"python/webcrawler/5/msc_lyc.py","file_name":"msc_lyc.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"164283801","text":"###########################################################################\n# \n# Copyright 2019 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n###########################################################################\n\n'''\nConversion Upload Sheets\n\nMove form Sheets to DCM.\n\nS\np\ne\nc\ni\nf\ny\n \na\n \nD\nC\nM\n \nA\nc\nc\no\nu\nn\nt\n \nI\nD\n,\n \nF\nl\no\no\nd\nl\ni\ng\nh\n \nA\nc\nt\ni\nv\ni\nt\ny\n \nI\nD\n \na\nn\nd\n \nC\no\nn\nv\ne\nr\ns\ni\no\nn\n \nT\ny\np\ne\n.\n\n\nI\nn\nc\nl\nu\nd\ne\n \nS\nh\ne\ne\nt\ns\n \nu\nr\nl\n,\n \nt\na\nb\n,\n \na\nn\nd\n \nr\na\nn\ng\ne\n,\n \no\nm\ni\nt\n \nh\ne\na\nd\ne\nr\ns\n \ni\nn\n \nr\na\nn\ng\ne\n.\n\n\nC\no\nl\nu\nm\nn\ns\n:\n \nO\nr\nd\ni\nn\na\nl\n,\n \nt\ni\nm\ne\ns\nt\na\nm\np\nM\ni\nc\nr\no\ns\n,\n \ne\nn\nc\nr\ny\np\nt\ne\nd\nU\ns\ne\nr\nI\nd\n \n|\n \ne\nn\nc\nr\ny\np\nt\ne\nd\nU\ns\ne\nr\nI\nd\nC\na\nn\nd\ni\nd\na\nt\ne\ns\n \n|\n \ng\nc\nl\ni\nd\n \n|\n \nm\no\nb\ni\nl\ne\nD\ne\nv\ni\nc\ne\nI\nd\n\n\nI\nn\nc\nl\nu\nd\ne\n \ne\nn\nc\nr\ny\np\nt\ni\no\nn\n \ni\nn\nf\no\nr\nm\na\nt\ni\no\nn\n \ni\nf\n \nu\ns\ni\nn\ng\n \ne\nn\nc\nr\ny\np\nt\ne\nd\nU\ns\ne\nr\nI\nd\n \no\nr\n \ne\nn\nc\nr\ny\np\nt\ne\nd\nU\ns\ne\nr\nI\nd\nC\na\nn\nd\ni\nd\na\nt\ne\ns\n.\n\n'''\n\nfrom starthinker_airflow.factory import DAG_Factory\n \nUSER_CONN_ID = \"google_cloud_default\" # The connection to use for user authentication.\nGCP_CONN_ID = \"\" # The connection to use for service authentication.\n\nINPUTS = {\n 'dcm_account': '',\n 'floodlight_activity_id': '',\n 'floodlight_conversion_type': 'encryptedUserId',\n 'encryption_entity_id': '',\n 'encryption_entity_type': 'DCM_ACCOUNT',\n 'encryption_entity_source': 'DATA_TRANSFER',\n 'sheet_url': '',\n 'sheet_tab': '',\n 'sheet_range': '',\n}\n\nTASKS = [\n {\n 'conversion_upload': {\n 'auth': 'user',\n 'account_id': {\n 'field': {\n 'name': 'dcm_account',\n 'kind': 'string',\n 'order': 0,\n 'default': ''\n }\n },\n 'activity_id': {\n 'field': {\n 'name': 'floodlight_activity_id',\n 'kind': 'integer',\n 'order': 1,\n 'default': ''\n }\n },\n 'conversion_type': {\n 'field': {\n 'name': 'floodlight_conversion_type',\n 'kind': 'choice',\n 'order': 2,\n 'choices': [\n 'encryptedUserId',\n 'encryptedUserIdCandidates',\n 'gclid',\n 'mobileDeviceId'\n ],\n 'default': 'encryptedUserId'\n }\n },\n 'encryptionInfo': {\n 'encryptionEntityId': {\n 'field': {\n 'name': 'encryption_entity_id',\n 'kind': 'integer',\n 'order': 3,\n 'default': ''\n }\n },\n 'encryptionEntityType': {\n 'field': {\n 'name': 'encryption_entity_type',\n 'kind': 'choice',\n 'order': 4,\n 'choices': [\n 'ADWORDS_CUSTOMER',\n 'DBM_ADVERTISER',\n 'DBM_PARTNER',\n 'DCM_ACCOUNT',\n 'DCM_ADVERTISER',\n 'ENCRYPTION_ENTITY_TYPE_UNKNOWN'\n ],\n 'default': 'DCM_ACCOUNT'\n }\n },\n 'encryptionSource': {\n 'field': {\n 'name': 'encryption_entity_source',\n 'kind': 'choice',\n 'order': 5,\n 'choices': [\n 'AD_SERVING',\n 'DATA_TRANSFER',\n 'ENCRYPTION_SCOPE_UNKNOWN'\n ],\n 'default': 'DATA_TRANSFER'\n }\n }\n },\n 'sheets': {\n 'url': {\n 'field': {\n 'name': 'sheet_url',\n 'kind': 'string',\n 'order': 9,\n 'default': ''\n }\n },\n 'tab': {\n 'field': {\n 'name': 'sheet_tab',\n 'kind': 'string',\n 'order': 10,\n 'default': ''\n }\n },\n 'range': {\n 'field': {\n 'name': 'sheet_range',\n 'kind': 'string',\n 'order': 11,\n 'default': ''\n }\n }\n }\n }\n }\n]\n\nDAG_FACTORY = DAG_Factory('conversion_upload_from_sheets', { 'tasks':TASKS }, INPUTS)\nDAG_FACTORY.apply_credentails(USER_CONN_ID, GCP_CONN_ID)\nDAG = DAG_FACTORY.execute()\n\nif __name__ == \"__main__\":\n DAG_FACTORY.print_commandline()\n","sub_path":"starthinker_airflow/dags/conversion_upload_from_sheets_dag.py","file_name":"conversion_upload_from_sheets_dag.py","file_ext":"py","file_size_in_byte":4565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"330504536","text":"# Definition for a binary tree node.\n# class TreeNode:\n#\tdef __init__(self, x):\n#\t\tself.val = x\n#\t\tself.left = None\n#\t\tself.right = None\n\n\nclass Solution:\n\tdef isSymmetric(self, root):\n\t\tif not root:\n\t\t\treturn True\n\t\treturn self.isSymmetricHelper(root.left, root.right)\n\n\tdef isSymmetricHelper(self, left, right):\n\t\tif not (left or right):\n\t\t\treturn True\n\t\telif (not left and right) or (not right and left):\n\t\t\treturn False\n\t\telse:\n\t\t\treturn left.val == right.val and \\\n\t\t\t\t self.isSymmetricHelper(left.left, right.right) and \\\n\t\t\t\t self.isSymmetricHelper(left.right, right.left)\n\t\t\n\n\t# convert two subtree to arraies, and compare the two array\n\tdef isSymmetric2(self, root):\n\t\tif root == None:\n\t\t\treturn True\n\t\tif root.left == None and root.right == None:\n\t\t\treturn True\n\t\tif root.left == None or root.right == None:\n\t\t\treturn False\n\t\tarr1 = isSymmetricHelper1(root.left)\n\t\tarr2 = isSymmetricHelper2(root.right)\n\t\treturn arr1 == arr2\n\n\ndef isSymmetricHelper1(root):\n\tqueue = [root]\n\tresult = [queue[0].val]\n\twhile queue:\n\t\tif queue[0].left:\n\t\t\tqueue.append(queue[0].left)\n\t\t\tresult.append(queue[0].left.val)\n\t\telse:\n\t\t\tresult.append(\"null\")\n\t\tif queue[0].right:\n\t\t\tqueue.append(queue[0].right)\n\t\t\tresult.append(queue[0].right.val)\n\t\telse:\n\t\t\tresult.append(\"null\")\n\t\tqueue.pop(0)\n\treturn result\n\ndef isSymmetricHelper2(root):\n\tqueue = [root]\n\tresult = [queue[0].val]\n\twhile queue:\n\t\tif queue[0].right:\n\t\t\tqueue.append(queue[0].right)\n\t\t\tresult.append(queue[0].right.val)\n\t\telse:\n\t\t\tresult.append(\"null\")\n\t\tif queue[0].left:\n\t\t\tqueue.append(queue[0].left)\n\t\t\tresult.append(queue[0].left.val)\n\t\telse:\n\t\t\tresult.append(\"null\")\n\t\tqueue.pop(0)\n\treturn result\n\n\"\"\"\ndef stringToTreeNode(input):\n input = input.strip()\n input = input[1:-1]\n if not input:\n return None\n\n inputValues = [s.strip() for s in input.split(',')]\n root = TreeNode(int(inputValues[0]))\n nodeQueue = [root]\n front = 0\n index = 1\n while index < len(inputValues):\n node = nodeQueue[front]\n front = front + 1\n\n item = inputValues[index]\n index = index + 1\n if item != \"null\":\n leftNumber = int(item)\n node.left = TreeNode(leftNumber)\n nodeQueue.append(node.left)\n\n if index >= len(inputValues):\n break\n\n item = inputValues[index]\n index = index + 1\n if item != \"null\":\n rightNumber = int(item)\n node.right = TreeNode(rightNumber)\n nodeQueue.append(node.right)\n return root\n\ndef main():\n import sys\n import io\n def readlines():\n for line in io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8'):\n yield line.strip('\\n')\n\n lines = readlines()\n while True:\n try:\n line = next(lines)\n root = stringToTreeNode(line);\n \n ret = Solution().isSymmetric(root)\n\n out = (ret);\n print(out)\n except StopIteration:\n break\n\nif __name__ == '__main__':\n main()\n\"\"\"","sub_path":"trees/101_Symmetric_Tree.py","file_name":"101_Symmetric_Tree.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"139359361","text":"import numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport math\nimport sys\nfrom imgaug import augmenters as iaa\nimport random\nfrom datetime import datetime\nimport os\n\ndef unpickle(file):\n import pickle\n with open(file, 'rb') as fo:\n cifar_dict = pickle.load(fo, encoding='bytes')\n return cifar_dict\n\n\nmeta = unpickle(r'E:\\University of Windsor\\Machine Learning and Pattern Recognition\\Project\\cifar-100-python\\meta')\ntrain = unpickle(r'E:\\University of Windsor\\Machine Learning and Pattern Recognition\\Project\\cifar-100-python\\train')\ntest = unpickle(r'E:\\University of Windsor\\Machine Learning and Pattern Recognition\\Project\\cifar-100-python\\test')\n\n\nClasses = pd.DataFrame(meta[b'fine_label_names'],columns = ['Classes'])\n\n\nX = train[b\"data\"]\n\n\nX = X.reshape(50000, 3, 32, 32).transpose(0,2,3,1).astype(\"uint8\")\n\n\nimg_num = np.random.randint(0,1000)\nplt.figure(figsize=(10,5))\nplt.xticks([])\nplt.yticks([])\nplt.imshow(X[img_num])\nClasses.iloc[train[b'fine_labels'][img_num]]\nplt.show()\n\n\nplt.figure(figsize=(10,5))\nnum_images_row = 4\nnum_images_column = 4\nimg_nums = np.random.randint(0,len(X),num_images_row*num_images_column)\n\nf, axarr = plt.subplots(num_images_row,num_images_column)\n\nfor i in range(0,num_images_row):\n for j in range(0,num_images_column):\n axarr[i,j].imshow(X[img_nums[(i*num_images_column)+j]])\n #axarr[i,j].set_title(str(Classes.iloc[train[b'fine_labels'][img_nums[(i+1)*(j+1)-1]]]).split()[1])\n axarr[i,j].axis('off')\n\nplt.show()\n","sub_path":"DataVisualization.py","file_name":"DataVisualization.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"69453887","text":"import spi\n\ntext = \\\n'''\nPROGRAM Part12;\nVAR\n a : INTEGER;\n\nPROCEDURE P1;\nVAR\n a : REAL;\n k : INTEGER;\n\n PROCEDURE P2;\n VAR\n a, z : INTEGER;\n BEGIN {P2}\n z := 777;\n END; {P2}\n\nBEGIN {P1}\n\nEND; {P1}\n\nBEGIN {Part12}\n a := 10;\nEND. {Part12}\n'''\n\nlexer = spi.Lexer(text)\nparser = spi.Parser(lexer)\ntree = parser.parse()\nsymtab_builder = spi.SymbolTableBuilder()\nsymtab_builder.visit(tree)\nprint(symtab_builder.symtab)\n\ninterpreter = spi.Interpreter(tree)\nresult = interpreter.interpret()\nprint(interpreter.GLOBAL_MEMORY )","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"636814888","text":"# https://leetcode.com/problems/find-all-numbers-disappeared-in-an-array/\n# // Time Complexity : o(n)\n# // Space Complexity : o(1)\n# // Did this code successfully run on Leetcode : yes\n# // Any problem you faced while coding this : None\n#\n#\n# // Your code here along with comments explaining your approach\n\nclass Solution:\n def findDisappearedNumbers(self, nums: List[int]) -> List[int]:\n result = []\n for i in range(len(nums)):\n temp = nums[i]\n if temp < 0:\n temp *=-1\n if nums[temp-1] > 0:\n nums[temp-1]*=-1\n for i in range(len(nums)):\n if nums[i] > 0:\n result.append(i+1)\n return result","sub_path":"solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"75606568","text":"import serial\nimport time\nimport configs\nimport threading\nfrom my_print import my_print\n\nclass GPS(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.thread_name = 'GPS'\n self.ser = serial.Serial(\"/dev/ttyAMA0\", 115200, timeout=1)\n self.ser.write('AT+GPS=1\\r'.encode())\n time.sleep(0.2)\n self.send('AT+CGATT=1\\r')\n time.sleep(0.2)\n self.send('AT+CGDCONT=1,\"IP\",\"CMNET\"\\r')\n time.sleep(0.2)\n self.send('AT+CGACT=1,1\\r')\n time.sleep(1)\n self.ser.read(self.ser.inWaiting()).decode()\n # my_print(self,self.ser.read(self.ser.inWaiting()).decode())\n # print('[GPS] - ',self.ser.read(self.ser.inWaiting()).decode())\n my_print(self,'init ready.')\n # print('[GPS] - init ready.')\n\n def run(self) -> None:\n self.get_location()\n\n def send(self, str):\n self.ser.write(str.encode())\n\n def read_location(self):\n line_data = self.ser.readline().decode()\n line_head = line_data.split(',')[0]\n location = None\n if len(line_data) is not 0 and (line_head == \"$GNRMC\"):\n location = line_data\n return location\n\n def update_location(self, location_N, location_E):\n url = configs.LOCATION_HOST_URL + '?latitude=' + str(location_N) + '&longitude=' + str(location_E)\n s = 'AT+HTTPGET=\\\"%s\\\"\\r' % url\n # print(s)\n self.send(s)\n\n def get_location(self):\n self.send('AT+GPSRD=5\\r')\n location = None\n while True:\n while True:\n location = self.read_location()\n if location is not None:\n break\n latitude = location.split(',')[3]\n longitude = location.split(',')[5]\n # 伟度小数部分\n la_mm = float(latitude[2:-1]) / 60\n lo_mm = float(longitude[3:-1]) / 60\n new_latitude = float(latitude[0:2]) + la_mm\n new_longitude = float(longitude[0:3]) + lo_mm\n my_print(self,new_latitude, new_longitude)\n self.update_location(new_latitude, new_longitude)\n my_print(self,'Update success!')\n time.sleep(20)\n\n\nif __name__ == '__main__':\n UART_func_class = GPS()\n UART_func_class.get_location()\n","sub_path":"gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"539213623","text":"class RandomListNode:\n def __init__(self, x):\n self.label = x\n self.next = None\n self.random = None\n\n\n\nclass Solution:\n # @param head: A RandomListNode\n # @return: A RandomListNode\n def copyRandomList(self, head):\n # write your code here\n self.copy_next(head)\n self.copy_random(head)\n new_head = self.split_list(head)\n return new_head\n \n def copy_next(self, head):\n while head:\n new = RandomListNode(head.label)\n new.next = head.next\n head.next = new \n head = head.next.next \n \n def copy_random(self, head):\n while head:\n new = head.next \n if head.random:\n new.random = head.random.next\n head = head.next.next \n \n def split_list(self, head):\n if not head:\n return None\n \n dummy = RandomListNode(-1)\n dummy.next = head.next\n new = dummy.next\n head.next = head.next.next\n head = head.next\n \n while head:\n new.next = head.next\n head.next = head.next.next\n head = head.next \n new = new.next\n return dummy.next\n \none = RandomListNode(-1)\ntwo = RandomListNode(1)\none.next = two \n\nSolution().copyRandomList(one)","sub_path":"105 copy list with random pointer.py","file_name":"105 copy list with random pointer.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"253416317","text":"import tensorlayer as tl\nfrom tensorlayer.layers import *\nimport numpy as np\n\nepoch = 50000\nbatch_size = 1\nlearning_rate = 10e-5\nsave_step = 500\n\n\n# def upscale2d(x):\n# x = x.outputs\n# size = x.get_shape().as_list()\n# x = tf.image.resize_nearest_neighbor(x, (size[1]*2, size[2]*2))\n# x = InputLayer(x, name='upscale_inputs')\n# tl.layers.set_name_reuse(True)\n# return x\n\nclass upscale2d(Layer):\n def __init__(\n self,\n layer = None,\n name = 'upscale2d',\n ):\n Layer.__init__(self, name = name)\n self.inputs = layer.outputs\n size = self.inputs.get_shape().as_list()\n self.inputs = tf.image.resize_nearest_neighbor(self.inputs,(size[1]*2,size[2]*2))\n self.outputs = self.inputs\n \n self.all_layers = list(layer.all_layers)\n self.all_params = list(layer.all_params)\n self.all_drop = dict(layer.all_drop)\n self.all_layers.extend( [self.outputs] )\n \n\n\n\ndef data_loader(filename):\n filename_queue = tf.train.string_input_producer([filename])\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(serialized_example,\n features={\n 'label': tf.FixedLenFeature([], tf.int64),\n 'img_raw': tf.FixedLenFeature([], tf.string),\n })\n\n img = tf.decode_raw(features['img_raw'], tf.uint8)\n img = tf.reshape(img, [256, 256, 3])\n img = tf.cast(img, tf.float32) * (1. / 255) - 0.5\n label = tf.cast(features['label'], tf.float32)\n label = tf.reshape(label, [1])\n label = tf.cast(label, tf.float32)\n return img, label\n\n\ndef Encoder(Inputs, is_train=True, reuse=None):\n\n '''\n\n Input a 256x256 image and output a 16x16 tensor\n\n '''\n # C16 - C32 - C64 - C128 - C256 - C512 - C512\n # 256x256 - 256x256 - 128x128 - 128x128 - 64x64 - 32x32 - 16x16\n w_init = tf.random_normal_initializer(stddev=0.02)\n b_init = tf.constant_initializer(value=0.01)\n g_init = tf.random_normal_initializer(1., 0.02)\n\n with tf.variable_scope(\"G\", reuse=reuse) as vs:\n tl.layers.set_name_reuse(reuse)\n n = InputLayer(Inputs, name='in_E')\n #256x256\n net_c16 = Conv2d(n, 16, (4, 4), (2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init,\n b_init=b_init, name='g_c1')\n net_bn1 = BatchNormLayer(net_c16, act=tf.nn.relu, gamma_init=g_init, is_train=is_train, name='g_bn1')\n #256x256\n net_c32 = Conv2d(net_bn1, 32, (4, 4), (2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init,\n b_init=b_init, name='g_c2')\n net_bn2 = BatchNormLayer(net_c32, act=tf.identity, gamma_init=g_init, is_train=is_train, name='g_bn2')\n #128x128\n net_c64 = Conv2d(net_bn2, 64, (4, 4), (2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init,\n b_init=b_init, name='g_c3')\n net_bn3 = BatchNormLayer(net_c64, act=tf.identity, gamma_init=g_init, is_train=is_train, name='g_bn3')\n #128x128\n net_c128 = Conv2d(net_bn3, 128, (4, 4), (2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init,\n b_init=b_init, name='g_c4')\n net_bn4 = BatchNormLayer(net_c128, act=tf.identity, gamma_init=g_init, is_train=is_train, name='g_bn4')\n #64x64\n net_c256 = Conv2d(net_bn4, 256, (4, 4), (2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init,\n b_init=b_init, name='g_c5')\n net_bn5 = BatchNormLayer(net_c256, act=tf.identity, gamma_init=g_init, is_train=is_train, name='g_bn5')\n #32x32\n net_c512 = Conv2d(net_bn5, 512, (4, 4), (2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init,\n b_init=b_init, name='g_c6')\n net_bn6 = BatchNormLayer(net_c512, act=tf.identity, gamma_init=g_init, is_train=is_train, name='g_bn6')\n #16x16\n net_c512 = Conv2d(net_bn6, 512, (4, 4), (2, 2), act=tf.nn.relu, padding='SAME', W_init=w_init,\n b_init=b_init, name='g_c7')\n # variables = tf.contrib.framework.get_variables(vs)\n logits = net_c512.outputs\n output = net_c512\n\n return output, logits\n\n\ndef Decoder(inputs, is_train=True, reuse=None):\n\n '''\n\n input a 16x16 tensor and output a 256x256 image\n\n '''\n # C512+2N - C512+2N - C256+2N - C128+2N - C64+2N - C32+2N - C16+2N \n # x_de is a 256x256 image\n w_init = tf.random_normal_initializer(stddev=0.02)\n b_init = tf.constant_initializer(value=0.01)\n g_init = tf.random_normal_initializer(1., 0.02)\n\n with tf.variable_scope(\"D\", reuse=reuse) as vs:\n tl.layers.set_name_reuse(reuse)\n n = InputLayer(inputs, name='in_D')\n # 16x16\n net_c1 = Conv2d(n, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init,\n b_init=b_init, name='d_c1')\n net_upsca1 = upscale2d(net_c1,name= 'net_upsca1')\n #32x32\n net_c2 = Conv2d(net_upsca1, 512, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init,\n b_init=b_init, name='d_c2')\n net_upsca2 = upscale2d(net_c2,name= 'net_upsca2')\n # 64x64\n net_c3 = Conv2d(net_upsca2, 256, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init,\n b_init=b_init, name='d_c3')\n net_upsca3 = upscale2d(net_c3,name= 'net_upsca3')\n # 128x128\n net_c4 = Conv2d(net_upsca3, 128, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init,\n b_init=b_init, name='d_c4')\n net_upsca4 = upscale2d(net_c4,name= 'net_upsca4')\n # 128x128\n net_c5 = Conv2d(net_upsca4, 64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init,\n b_init=b_init, name='d_c5')\n net_upsca5 = upscale2d(net_c5,name= 'net_upsca5')\n # 256x256\n net_c6 = Conv2d(net_upsca5, 32, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init,\n b_init=b_init, name='d_c6')\n net_upsca6 = upscale2d(net_c6,name= 'net_upsca6')\n # 256x256\n net_c7 = Conv2d(net_upsca6, 3, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init,\n b_init=b_init, name='d_c7')\n net_upsca7 = upscale2d(net_c7,name= 'net_upsca7')\n\n # variables = tf.contrib.framework.get_variables(vs)\n logits = net_upsca7.outputs\n output = net_upsca7\n return output, logits\n\n\ndef Discriminator(inputs, reuse=False):\n # Z = C512+2N\n # C512 + FC512 + FC112 + 1\n w_init = tf.random_normal_initializer(stddev=0.02)\n b_init = tf.constant_initializer(value=0.01)\n\n with tf.variable_scope(\"Di\", reuse=reuse) as vs:\n tl.layers.set_name_reuse(reuse)\n n = InputLayer(inputs, name='input_layer')\n net = FlattenLayer(n, name='flatten')\n net_1 = DenseLayer(net, n_units=512, act=tf.identity, W_init=w_init, b_init=b_init, name='dense_layer1')\n net_2 = DenseLayer(net_1, n_units=112, act=tf.identity, W_init=w_init, b_init=b_init, name=\"dense_layer2\")\n net_3 = DenseLayer(net_2, n_units=1, act=tf.identity, W_init=w_init, b_init=b_init, name='dense_layer3')\n\n # variables = tf.contrib.framework.get_variables(vs)\n logits = net_3.outputs\n output = net_3\n return output, logits\n\n\ndef main():\n iter_counter = 0\n real_image_data, label_data = data_loader('test.tfrecords')\n img_batch, label_batch = tf.train.shuffle_batch([real_image_data, label_data],\n batch_size=batch_size,\n capacity=200,\n min_after_dequeue=100\n )\n print(\"img_batch : %s\" % img_batch._shape)\n print(\"label_batch : %s\" % label_batch._shape)\n\n #==============================MODEL=====================================\n\n\n Enc_z, Enc_logits = Encoder(img_batch, is_train=True, reuse=False)\n # Enc_z_real, Enc_logits_real = Encoder(img_batch, is_train=False, reuse=True)\n\n Dis_z_fake, Dis_logits_fake = Discriminator(Enc_logits, reuse=False)\n\n Dec_z, Dec_logits = Decoder(Enc_logits, is_train=True, reuse=False)\n\n Dis_loss_fake = tl.cost.sigmoid_cross_entropy(Dis_logits_fake, label_batch, name='discriminator_loss_fake')\n Dis_loss = Dis_loss_fake\n\n Dec_loss = tl.cost.mean_squared_error(Dec_logits, img_batch)+0.5*tl.cost.sigmoid_cross_entropy(Dis_logits_fake,1-label_batch,name=None)\n\n Dec_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.5).minimize(Dec_loss)\n Dis_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.5).minimize(Dis_loss)\n\n # ===============================TRAIN=====================================================\n\n with tf.Session() as sess:\n tl.layers.initialize_global_variables(sess)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n load_enc_params = tl.files.load_npz(name='Enc_model.npz')\n load_dec_params = tl.files.load_npz(name='Dec_model.npz')\n tl.files.assign_params(sess, load_dec_params, Dec_z)\n tl.files.assign_params(sess, load_enc_params, Enc_z)\n\n img = sess.run([Dec_logits])\n img = img[0]\n tl.visualize.save_images(img, [1,1], 'predict.png')\n print('predict done !')\n\n coord.request_stop()\n coord.join(threads)\n sess.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":9796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"370636884","text":"'''\nAuthor: Ziwei Zheng\nDate: 04/11/2019\n'''\nimport copy\n\n# calculate how many linear conflict there are\ndef linearConflict(currState, goalState):\n count = 0\n resLst = [] #store all the linear conflict nodes\n tLst = [[],[],[]]\n goalLst = [[],[],[]]\n #to generate all initial index number\n for row in range(len(currState[0])):\n for col in range(len(currState)):\n tLst[row].append([row,col])\n\n #to calculate all the goal position in each node\n for row in range(len(currState[0])):\n for col in range(len(currState)):\n target = currState[row][col]\n goalLst[row].append(getGoalPosition(target, goalState)) #append each value's position in goalState into the the temparory list to compare the indexes later\n\n # examine if tj and tk are both in the same line\n # and tj is to the right of tk (cond1)\n # and goal position of tj is to the left of tk (cond2)\n\n # first check each row\n for i in range(3):\n for j in range(3):\n increment = 1\n while (j+increment < 3):\n cond0 = currState[i][j] != 0 and currState[i][j + increment] != 0\n cond1 = tLst[i][j][1] < tLst[i][j + increment][1] # check if tj is to the right of tk\n cond2 = goalLst[i][j][1] > goalLst[i][j + increment][1] # check if goal position of tj is to the left of tk\n cond3 = goalLst[i][j][0] == goalLst[i][j + increment][0] # check if goal position of tj is on the same line\n if (cond0 and cond1 and cond2 and cond3):\n resLst.append((currState[i][j], currState[i][j+increment]))\n count+=1\n increment += 1\n\n # second check each column\n for row in range(3):\n for col in range(3):\n increment = 1\n while (row+increment < 3):\n cond0 = currState[row][col] != 0 and currState[row + increment][col] != 0\n cond1 = tLst[row][col][0] < tLst[row + increment][col][0] # check if tj is to the down of tk\n cond2 = goalLst[row][col][0] > goalLst[row + increment][col][0] # check if goal position of tj is to the up of tk\n cond3 = goalLst[row][col][1] == goalLst[row + increment][col][1] # check if goal position of tj is on the same column\n if (cond0 and cond1 and cond2 and cond3):\n resLst.append((currState[row][col], currState[row+increment][col]))\n count+=1\n increment += 1\n # print(resLst)\n # print(\"Init [row, col]\", tLst)\n # print(\"Goal [row, col]: \",goalLst)\n return count\n\ndef getGoalPosition(target,glState):\n goalPosX = 0\n goalPosY = 0\n while (target != glState[goalPosX][goalPosY]):\n if (goalPosX < 2):\n goalPosX += 1 # increase column index until it reaches to rightmost column index\n else:\n goalPosX = 0 # start over from first column index when reach to the rightmost of last column\n goalPosY = (goalPosY + 1) % 3 # increase row index\n return [goalPosX,goalPosY] #return target's Position in Goal State\n\n\n\ndef manhattanSum(currState, glState):\n sum = 0\n #currPos = (0,0) #record current position, will be updated throughout the search\n #isDone = False #false if did not go through all nodes to find manhattan sum\n for row in range(len(currState[0])):\n for col in range(len(currState)):\n # the current value in current State, we want to find its position in goal state\n target = currState[row][col]\n #print(\"Target is\",target,\"\\n\")\n if (target != 0):\n goalPos = getGoalPosition(target, glState)\n sum += (abs(row-goalPos[0]) + abs(col-goalPos[1]))\n #print(\"Now the sum is\",sum,\"\\n\")\n return sum\n\n#convert string to int when read file\ndef strToInt(stateLst):\n print(stateLst)\n for lst in stateLst:\n for i in range(len(lst)):\n lst[i] = int(lst[i])\n\n# convert int to string when output file\ndef display(initTile, currTile, resultTile):\n step1 = \"\"\n step2 = \"\"\n res= \"\"\n pathList = []\n res += initTile.outputFormat()+ \"\\n\"+resultTile.outputFormat()+ \"\\n\" + str(resultTile.pathCost) + \"\\n\" + str(currTile.countGenerated) + \"\\n\"\n\n if resultTile.lastAction != None and resultTile.prevState != None:\n addToPath(resultTile.prevState, pathList)\n pathList.append((resultTile.lastAction, resultTile.prevState))\n\n for tile in pathList:\n step1 += str(tile[0]) + \" \"\n step2 += str(tile[1].funcCost) + \" \"\n res += step1 + \"\\n\" + step2 + \"\\n\"\n return res\n\ndef addToPath(currTile, path):\n if currTile.lastAction == None and currTile.prevState == None:\n return\n else:\n addToPath(currTile.prevState, path)\n path.append((currTile.lastAction, currTile.prevState))\n\nclass eightPuzzle:\n def __init__(self, initialNode, goalState):\n # initialize variables to track A start search process: queue visited nodes, goal state\n self.queueLst = [initialNode]\n self.visitedNodes = []\n self.goalState = goalState\n # initialize variable to count how many nodes generated and how many node has spanned\n self.countGenerated = 1\n self.countSpanned = 0\n\n def AstarSearch(self):\n result = self.queueLst[-1]\n while (result.currState != self.goalState):\n if (len(self.queueLst) == 0):\n return\n # We span the first node in the queue with the smallest function cost\n currNode = self.queueLst.pop()\n childLst = currNode.GenerateChildren()\n self.countSpanned += 1 #update how many nodes generated\n self.visitedNodes.append(currNode)\n # check if each child generated is already visited, if it isn't, add to our queue\n for child in childLst:\n isVisited = False\n for node in self.visitedNodes:\n if child.currState == node.currState:\n # we find the same existing tile in visited node\n isVisited = True\n if not isVisited:\n self.queueLst.append(child)\n self.countGenerated += 1\n self.queueLst.sort(key=lambda x : x.funcCost, reverse=True)\n result=self.queueLst[-1]\n return result\n\nclass state:\n '''Initialize the state variables'''\n def __init__(self, currState, goalState, pathCost, typeH=\"Y\", lastAction=None, prevState=None):\n self.currState = currState\n self.goalState = goalState\n self.pathCost = pathCost\n self.typeInput = typeH\n try:\n if (self.typeInput == \"Y\"):\n self.funcCost = manhattanSum(self.currState, goalState)+ 2 * linearConflict(self.currState, self.goalState) + self.pathCost\n elif (self.typeInput == \"N\"):\n self.funcCost = manhattanSum(self.currState, goalState) + self.pathCost\n except:\n print(\"Invalid Input, please try again\")\n self.lastAction = lastAction\n self.prevState = prevState\n '''Movement of the node will result in a new tile'''\n def left(self, targetPos):\n # node generated after left move\n newTile = copy.deepcopy(self.currState)\n newTile[targetPos[0]][targetPos[1]] = newTile[targetPos[0]][targetPos[1] - 1]\n newTile[targetPos[0]][targetPos[1] - 1] = 0\n leftChildNode = state(newTile, self.goalState, self.pathCost + 1, self.typeInput, \"L\", self)\n return leftChildNode\n\n def right(self, targetPos):\n # node generated after right move\n newTile = copy.deepcopy(self.currState)\n newTile[targetPos[0]][targetPos[1]] = newTile[targetPos[0]][targetPos[1] + 1]\n newTile[targetPos[0]][targetPos[1] + 1] = 0\n rightChildNode = state(newTile, self.goalState, self.pathCost + 1, self.typeInput, \"R\", self)\n return rightChildNode\n\n def up(self, targetPos):\n # node generated after up move\n newTile = copy.deepcopy(self.currState)\n newTile[targetPos[0]][targetPos[1]] = newTile[targetPos[0] - 1][targetPos[1]]\n newTile[targetPos[0] - 1][targetPos[1]] = 0\n upChildNode = state(newTile, self.goalState, self.pathCost + 1, self.typeInput, \"U\", self)\n return upChildNode\n\n def down(self, targetPos):\n # node generated after down move\n newTile = copy.deepcopy(self.currState)\n newTile[targetPos[0]][targetPos[1]] = newTile[targetPos[0] + 1][targetPos[1]]\n newTile[targetPos[0] + 1][targetPos[1]] = 0\n DownChildNode = state(newTile, self.goalState, self.pathCost + 1, self.typeInput, \"D\", self)\n return DownChildNode\n\n ''' find zero position in the state'''\n def findZeroPosition(self):\n for row in range(3):\n for col in range(3):\n if self.currState[row][col]== 0:\n return (row, col)\n\n\n ''' the children nodes generated by the spanning node'''\n def GenerateChildren(self):\n targetPos = self.findZeroPosition()\n newChildLst = []\n\n # depending on zero's position, we can move up, down, left, or right.\n if (targetPos[0] > 0):\n newChildNode=self.up(targetPos)\n newChildLst.append(newChildNode)\n\n if (targetPos[0] < 2):\n newChildNode=self.down(targetPos)\n newChildLst.append(newChildNode)\n\n if (targetPos[1] > 0):\n newChildNode=self.left(targetPos)\n newChildLst.append(newChildNode)\n\n if (targetPos[1] < 2):\n newChildNode=self.right(targetPos)\n newChildLst.append(newChildNode)\n\n return newChildLst\n\n # format our output\n def outputFormat(self):\n output = \"\"\n for row in self.currState:\n for val in row:\n output += str(val) + \" \"\n output += \"\\n\"\n return output\n\n\n\ndef main():\n # read inpute file name from user\n userFile = input(\"Please enter your file name: \")\n typeHeuristic = input(\"Would you like to add Linear Conflict? \\nPlease Enter (Y/N): \")\n file = open(userFile, \"r\")\n # format the string from input file\n allState = (file.read().rstrip()).split(\"\\n\")\n allState.remove(\"\")\n # initialize list to save initial state and goal state from input file\n initState = []\n goalState = []\n # seperate the string into initial state and goal state\n for rowInd in range(len(allState)):\n # the previous three line of info store into inite state\n if rowInd < 3:\n initState.append(allState[rowInd].split(\" \"))\n # the last three line of info store into goal state\n else:\n goalState.append(allState[rowInd].split(\" \"))\n # convert string into integer in each state\n strToInt(initState)\n strToInt(goalState)\n # store our initial state as our root tile\n rootTile = state(initState,goalState,0,typeHeuristic,None,None)\n problem = eightPuzzle(rootTile, goalState)\n result = problem.AstarSearch()\n resultStr = display(rootTile, problem, result)\n print(resultStr)\n file.close()\n # depending on heuristic type, generate different file name\n if typeHeuristic == \"Y\":\n userFile = \"LC\"+userFile\n if typeHeuristic == \"N\":\n userFile = \"ans\"+userFile\n # write files\n outputFile = open(userFile,\"w\")\n outputFile.write(resultStr)\n outputFile.close()\nmain()","sub_path":"8 Puzzle/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"357873054","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nPE_0216\n\nInvestigating the primality of numbers of the form 2n^2-1\n\nCreated on Fri Dec 30 09:01:19 2016\n@author: mbh\n\"\"\"\n\nimport time\nimport numpy as np\n \ndef p216(limit):\n t=time.clock()\n primes=primesieve(int(1.5*limit))\n primes=np.array([x for x in primes if x%8==1 or x%8==7])\n az=np.ones(limit,dtype=bool)\n for a in primes:\n b=bsolve(int(a))\n az[a-b::a]=False\n az[a+b::a]=False \n trials=np.nonzero(az)[0][2:]\n print (len(trials)) \n print(time.clock()-t)\n\ndef bsolve(prime):\n \"\"\"returns solution b to 2b^2=1 mod prime\"\"\"\n x=ts(8,prime)\n y=primeLD(prime,-4,1)[1]\n solution= x*y%prime\n if solution>prime/2:\n return prime-solution\n return solution \n \ndef legendre_symbol(a,p):\n return pow(a,(p-1)//2,p)\n \ndef ts(n,p):\n \"\"\"Tonnelli-Shanks algorithm. returns R: R^2=n mod p\"\"\" \n #check first that a solution exists. Return 0 if not.\n# if legendre_symbol(n,p)==-1:\n# return 0\n if p%4==3:\n return pow(n,(p+1)//4,p) \n\n # this means p%4==1... \n #find Q,S: Q.2^s=p-1\n Q=p-1\n S=0\n while not Q%2:\n S+=1\n Q//=2 \n \n #find z - lowest quadratic non-residue of p, using Euler's criterion\n z=2\n while pow(z,(p-1)//2,p)==1:\n z+=1\n \n c=pow(z,Q,p)\n R=pow(n,(Q+1)//2,p)\n t=pow(n,Q,p)\n M=S\n while not t%p==1:\n i=1\n while not pow(t,pow(2,i),p)==1:\n i+=1\n b=pow(c,pow(2,M-i-1),p)\n R=b*R%p\n t=t*pow(b,2)%p\n c=pow(b,2,p)\n M=i\n return R\n \ndef isolve(a,b,c):\n \"\"\"solves linear diophantine equation ax +by = c\"\"\"\n q, r = divmod(a,b)\n if r == 0:\n return( [0,c//b] )\n else:\n sol = isolve( b, r, c )\n u = sol[0]\n v = sol[1]\n return( [ int(v), int(u - q*v) ] )\n\ndef primeLD(a,b,c):\n \"\"\"finds a solution to diophantine equation ax+by=c\"\"\"\n q,r=a//b,a%b\n if r==0:\n return 0,c//b\n u,v=primeLD(b,r,c)\n return v,u-q*v \n\ndef primesieve(n):\n \"\"\"return array of primes 2<=p<=n\"\"\"\n sieve=np.ones(n+1,dtype=bool)\n for i in range(2, int((n+1)**0.5+1)):\n if sieve[i]:\n sieve[2*i::i]=False\n return np.nonzero(sieve)[0][2:] \n \ndef isprime(n):\n \"\"\"Returns True if n is prime.\"\"\"\n if n==2 or n==3:\n return True\n if not n%2 or not n%3:\n return False\n i = 5\n w = 2\n while i * i <= n:\n if n % i == 0:\n return False\n i += w\n w = 6 - w\n return True\n \ndef ts2(a,p): \n \"\"\"Tonnelli-Shanks algorithm. returns R: R^2=n mod p\"\"\"\n \"\"\"Implementing Ezra Brown's description of the algorithm\"\"\"\n \n if legendre_symbol(a,p)==-1:\n return 0\n if p%4==3:\n return pow(a,(p+1)//4,p)\n \n s=p-1\n e=0\n while not s%2:\n e+=1\n s//=2 \n \n n=2\n while legendre_symbol(n,p):\n n+=1\n\n x = pow(a, (s + 1) // 2, p)\n b = pow(a, s, p)\n g = pow(n, s, p)\n r = e\n\n while True:\n t = b\n m = 0\n for m in range(r):\n if t == 1:\n break\n t = pow(t, 2, p)\n\n if m == 0:\n return x\n\n gs = pow(g, 2 ** (r - m - 1), p)\n g = (gs * gs) % p\n x = (x * gs) % p\n b = (b * g) % p\n r = m\n\n#http://eli.thegreenplace.net/2009/03/07/computing-modular-square-roots-in-python\n#implements the Tonnell-Shanks algo, from Ezra Brown's paper\n#Brown, E. (1999) ‘Square Roots from 1; 24, 51, 10 to Dan Shanks’, \n#The College MathematicsJournal, 30(2), pp. 82–95.\n \ndef modular_sqrt(a, p):\n \"\"\" Find a quadratic residue (mod p) of 'a'. p\n must be an odd prime.\n\n Solve the congruence of the form:\n x^2 = a (mod p)\n And returns x. Note that p - x is also a root.\n\n 0 is returned is no square root exists for\n these a and p.\n\n The Tonelli-Shanks algorithm is used (except\n for some simple cases in which the solution\n is known from an identity). This algorithm\n runs in polynomial time (unless the\n generalized Riemann hypothesis is false).\n \"\"\"\n # Simple cases\n #\n if legendre_symbol(a, p) != 1:\n return 0\n elif a == 0:\n return 0\n elif p == 2:\n return p\n elif p % 4 == 3:\n return pow(a, (p + 1) // 4, p)\n\n # Partition p-1 to s * 2^e for an odd s (i.e.\n # reduce all the powers of 2 from p-1)\n #\n s = p - 1\n e = 0\n while s % 2 == 0:\n s //= 2\n e += 1\n\n # Find some 'n' with a legendre symbol n|p = -1.\n # Shouldn't take long.\n #\n n = 2\n while legendre_symbol(n, p) != -1:\n n += 1\n\n # Here be dragons!\n # Read the paper \"Square roots from 1; 24, 51,\n # 10 to Dan Shanks\" by Ezra Brown for more\n # information\n #\n\n # x is a guess of the square root that gets better\n # with each iteration.\n # b is the \"fudge factor\" - by how much we're off\n # with the guess. The invariant x^2 = ab (mod p)\n # is maintained throughout the loop.\n # g is used for successive powers of n to update\n # both a and b\n # r is the exponent - decreases with each update\n #\n x = pow(a, (s + 1) // 2, p)\n b = pow(a, s, p)\n g = pow(n, s, p)\n r = e\n\n while True:\n t = b\n m = 0\n for m in range(r):\n if t == 1:\n break\n t = pow(t, 2, p)\n\n if m == 0:\n return x\n\n gs = pow(g, 2 ** (r - m - 1), p)\n g = (gs * gs) % p\n x = (x * gs) % p\n b = (b * g) % p\n r = m\n\n\ndef legendre_symbol(a, p):\n \"\"\" Compute the Legendre symbol a|p using\n Euler's criterion. p is a prime, a is\n relatively prime to p (if p divides\n a, then a|p = 0)\n\n Returns 1 if a has a square root modulo\n p, -1 otherwise.\n \"\"\"\n ls = pow(a, (p - 1) // 2, p)\n return -1 if ls == p - 1 else ls\n \ndef prime_factors(n):\n \"\"\"returns the prime factors of n\"\"\" \n i = 2\n factors = []\n while i * i <= n:\n if n % i:\n i += 1\n else:\n n //= i\n factors.append(i)\n if n > 1:\n factors.append(n)\n return factors \n \ndef bsolve2(prime):\n \"\"\"brute force way to find b\"\"\"\n for x in range(prime//2):\n if 2*x**2%prime==1:\n return x\n \ndef qr(n,p):\n for x in range(p):\n if x**2%p==n:\n return True\n return False\n \ndef bsolvetest(limit):\n primes=primesieve(limit)\n primes=np.array([x for x in primes if x%8==1 or x%8==7])\n print(len(primes))\n t=time.clock() \n for p in primes: \n modular_sqrt(8,p)\n print(time.clock()-t)\n t=time.clock() \n for p in primes: \n# ts(8,int(p))\n pass\n print(time.clock()-t)","sub_path":"PE_0216/PE_0216.py","file_name":"PE_0216.py","file_ext":"py","file_size_in_byte":6909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"70312921","text":"import numpy as np\nfrom pad import pad_repeat\n\ndef median(image, rad):\n length = 2*rad + 1\n\n newimage = image.copy()\n height, width = image.shape[:2]\n\n buffer_img = pad_repeat(image, rad)\n\n for i in range(height):\n for j in range(width):\n l = sorted([item for sublist in buffer_img[i:i+length, j:j+length].tolist() for item in sublist])\n newimage[i, j] = l[len(l) // 2]\n\n return newimage\n","sub_path":"median.py","file_name":"median.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"167245626","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\pywebuml\\initialize\\initialize_database.py\n# Compiled at: 2011-03-13 18:12:24\n\"\"\"\nInitialize the database.\n\"\"\"\nimport os, logging\nfrom pywebuml.main import db\nfrom pywebuml.initialize.parsers import utils\nfrom pywebuml.initialize.parsers.exceptions import ParserException\nconsole_handler = logging.StreamHandler()\nLOGGER = logging.getLogger(__name__)\nLOGGER.addHandler(console_handler)\n\nclass ParserExecuter(object):\n \"\"\" Class that check will files should be parsed, and\n if they should execute them to be parsed. Also saves the\n parsed data into the database.\n\n The attribute `found` has the number of file that should be\n parsed, the `ignored` has the number of files that where ignored\n because of several reasons, and `errors` has the number of files\n that raise and exception while parsing them.\n\n The ignore files are the one that have a valid file extension (for\n example foo.cs) but the parsed didn't return any result.\n \"\"\"\n IGNORE_FOLDERS = [\n '.svn',\n '.hg']\n VALID_FILE_EXTENSIONS = [\n 'cs',\n 'java']\n\n def __init__(self):\n \"\"\" Initialize the counters to 0. Also, creates the database.\n \"\"\"\n self.found = 0\n self.ignored = 0\n self.error = 0\n db.create_all()\n\n def parse(self, base_folder_path):\n \"\"\" Checks the folder and the files of the `base_folder_path`\n and parse the files.\n\n :parameters:\n `base_folder_path`: str\n the base folder from where start parsing the files.\n \"\"\"\n self.parse_folder(base_folder_path)\n LOGGER.info('Found %s files, where %s were ignored and %s could not be parsed' % (\n self.found, self.ignored, self.error))\n\n def parse_folder(self, folder_name):\n files = os.listdir(folder_name)\n files.sort()\n for name in files:\n full_path = os.path.join(folder_name, name)\n if os.path.isdir(full_path):\n if name not in self.IGNORE_FOLDERS:\n self.parse_folder(full_path)\n else:\n LOGGER.debug('Ignoring folder: %s', full_path)\n else:\n self.parse_file(full_path)\n\n def parse_file(self, filename):\n file_extension = filename.split('.')[(-1)]\n if file_extension not in self.VALID_FILE_EXTENSIONS:\n LOGGER.debug('Ignore file: %s', filename)\n return\n else:\n self.found += 1\n LOGGER.debug('Parsing file: %s', filename)\n try:\n parser_instance = utils.get_parser(file_extension)\n res = parser_instance.parse(filename)\n if not res:\n self.ignored += 1\n for value in res:\n db.session.add(value)\n\n db.session.commit()\n except ParserException as e:\n self.error += 1\n LOGGER.exception('Problem while parsing file: %s', filename)\n db.session.rollback()\n except Exception as e:\n self.error += 1\n LOGGER.exception('Unknow exception while parsing file: %s', filename)\n db.session.rollback()\n\n return","sub_path":"pycfiles/pywebuml-0.3.0-py2.7/initialize_database.py","file_name":"initialize_database.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"64491960","text":"import itertools\n\ndef make_min_score(X, flavo_prices, option_prices):\n crossest_score = 1000000000\n diff_from_X = 100000000\n for flavo_price in flavo_prices:\n for options_number in range(len(option_price) + 1):\n options_orders = itertools.combinations(option_prices, options_number)\n for options_order in options_orders:\n options_sum_price = 0\n for option_order in options_order:\n options_sum_price += int(option_order)\n order_price = flavo_price + options_sum_price\n if abs(X - order_price) <= diff_from_X:\n if diff_from_X == abs(X - order_price):\n if crossest_score > order_price:\n crossest_score = order_price\n else:\n crossest_score = order_price\n diff_from_X = abs(X - order_price)\n return crossest_score\n\nS = int(input())\nanswers = []\n\nfor i in range(S):\n space = input()\n X = int(input())\n N = int(input())\n flavos = []\n flavo_prices = []\n for n in range(N):\n flavo, flavo_price = input().split()\n flavos.append(flavo)\n flavo_prices.append(int(flavo_price))\n M = int(input())\n options = []\n option_prices = []\n for m in range(M):\n option, option_price = input().split()\n options.append(option)\n option_prices.append(option_price)\n answer = make_min_score(X, flavo_prices, option_prices)\n answers.append(answer)\n\nfor s in range(S):\n print(\"Case #{}: {}\".format(s + 1, answers[s]))\n\n\n\n\n","sub_path":"python/test/gg/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"102887702","text":"\"\"\"Restaurant rating lister.\"\"\"\nimport sys\nfrom random import choice\nimport os\n\nlist_of_files = os.listdir()\n\nfor index in range(len(list_of_files)):\n print(list_of_files[index], index)\n\ninput_file = int(input(\"Pick the number associated with the file. \"))\ninput_file = list_of_files[input_file]\n\n\n# file_name = sys.argv[1]\nrestaurant_info = {}\n\nfor line in open(input_file):\n line = line.rstrip().split(\":\")\n restaurant_info[line[0]] = line[1]\n\n\ndef get_user_input():\n print(\"If you would like to see all restaurants, type 1.\")\n print(\"If you would like to add a restaurant and its rating, type 2.\")\n print(\"If you would like to update a restaurant, type 3.\")\n print(\"If you want to quit, type 4.\")\n return input()\n\n\ndef test_user_rating(restaurant_name):\n new_rating = input(\"Please give us a rating from 1-5: \")\n\n while new_rating not in \"12345\":\n print(\"Please enter a number between 1 and 5.\")\n new_rating = input()\n\n restaurant_info[restaurant_name] = new_rating\n\n\nuser_input = get_user_input()\n\nwhile user_input != \"4\":\n\n if user_input == \"1\":\n for key in sorted(restaurant_info):\n print(\"{} is rated at {}.\".format(key, restaurant_info[key]))\n\n elif user_input == \"2\":\n new_restaurant = input(\"Please give us a restaurant name: \")\n test_user_rating(new_restaurant)\n\n elif user_input == \"3\":\n user_random = input(\"Do you want to update a random restaurant? y/n: \")\n\n if user_random.lower() == \"y\":\n random_restaurant = choice(list(restaurant_info.keys()))\n print(random_restaurant, \"is rated at\",\n restaurant_info[random_restaurant] + \".\")\n test_user_rating(random_restaurant)\n\n elif user_random.lower() == \"n\":\n print(\"These are the restaurants currently rated.\")\n restaurant_keys = list(restaurant_info.keys())\n\n for i in range(len(restaurant_keys)):\n print(restaurant_keys[i], i)\n\n user_number = int(input(\"Please enter the number associated with the restaurant you want to rate.\"))\n\n while user_number < 0 or user_number > len(restaurant_keys):\n print(\"Please enter a valid restaurant number.\")\n user_number = int(input())\n\n test_user_rating(restaurant_keys[user_number])\n\n user_input = get_user_input()\n","sub_path":"week-2/dicts-restaurant-rating/ratings.py","file_name":"ratings.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"513420403","text":"# -*- coding: utf-8 -*-\n\"\"\"\ndisplay_menu function\n\"\"\"\n#---------------------------------------Initialization------------------------------------------------------------\nimport numpy as np\nfrom helpingFunctions import *\n\ndef input_number(prompt):\n #input_number prompts user to input a number and assigns value to variable choice.\n #If it is not the case give error message to user.\n #Input: from user\n #Output: number (float)\n \n \n try:\n choice=float(input(prompt))\n \n \n #If input can be converted in a float print messgae\n except ValueError:\n print(\"Unfortunately this is not a valid input.\\n>>\")\n return 0\n return choice\n\n\ndef display_menu(options):\n #display_menu takes in an array of options and prints the options while assigning to each option a number(start: 1.). \n #uses number input and gives further information to mistake if input was invalid.\n #Input: from user \n #output: number (integer)\n \n i=0\n #displaying the options\n for element in options:\n\n print(i+1,\".\",element,\"\\n\")\n i=i+1\n choice=-190\n\n #error check:\n #check's for type number and to valid numbers\n while not(np.any(choice==np.arange(len(options))+1)):\n choice=input_number(\"Choose an option\\n>>\")\n\n if not (np.any(choice==np.arange(len(options))+1)):\n print(\"Please type in a number corresponding to the options\\n\")\n\n\n\n\n return choice\n","sub_path":"display_menu.py","file_name":"display_menu.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"521915791","text":"import argparse\nimport os\nfrom PIL import Image\nimport numpy as np\nimport utils\nimport tensorflow as tf\n\n\ndef gray_preprocessing(args):\n images_paths = os.listdir(args.img_dir)\n for name in images_paths:\n image = Image.open(os.path.join(args.img_dir, name))\n tf_image = tf.convert_to_tensor(np.array(image).astype(np.float32))\n gray_image = utils.convert_to_gray(tf_image[None])[0]\n blurred = Image.fromarray(np.uint8(tf.Session().run(gray_image)))\n blurred.save(os.path.join(args.output_dir, name))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--img-dir', type=str, required=True)\n parser.add_argument('--output-dir', type=str, required=True)\n args = parser.parse_args()\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n gray_preprocessing(args)\n","sub_path":"preprocessing/colorization_preprocessing.py","file_name":"colorization_preprocessing.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"241354547","text":"# -*- encoding=utf8 -*-\n__author__ = \"wanghuajun\"\n\nfrom airtest.core.api import *\n_project_root = os.path.abspath(os.path.join(os.path.abspath(__file__), \"../..\")) + \"/\"\n\neTestCaseState = {\"Function Not Found\": -1,\n \"UnInitialized\": 0,\n \"Function Configered\": 1,\n \"Test Successed\": 2,\n \"Test Failed\": 3}\n\nclass TestCase:\n \n ID = None\n name = \"Undefined\"\n args = []\n test_function = None\n \n State = eTestCaseState[\"UnInitialized\"]\n \n Result = None\n Message = \"\"\n \n # 初始化\n def __init__(self, ID = None, name = \"Undefined\", args = []):\n \n if (ID == None):\n raise Exception(\"Invalid ID\", ID)\n \n self.ID = ID\n self.name = name\n self.args = args\n self.test_function = None\n self.State = eTestCaseState[\"UnInitialized\"]\n \n # 输出为字符串\n def __str__(self):\n result = ('ID:%d\\tName:%s\\tArgs:' %(self.ID,self.name)) + self.args.__str__()\n result += \"\\nFunction: \" + self.test_function.__str__()\n return result\n \n # 设置测试函数\n def setTestFunction(self, test_function = None):\n \n self.test_function = test_function\n \n if (test_function == False):\n self.State = eTestCaseState[\"Function Not Found\"]\n else:\n self.State = eTestCaseState[\"Function Configered\"]\n \n ''' 运行测试 '''\n def run_test(self):\n self.test_function()\n \n # [Helper] 测试开始运行时执行\n def onTestBegin(self):\n pass\n\n # [Helper] 测试成功时执行\n def onTestSuccess(self):\n self.State = eTestCaseState[\"Test Successed\"]\n self.CaseSummarize_print()\n \n # [Helper] 测试失败时执行\n def onTestFailed(self):\n self.State = eTestCaseState[\"Test Failed\"]\n self.CaseSummarize_print()\n \n ''' 打印测试总结 '''\n def CaseSummarize_print(self):\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(self.CaseSummarize_str())\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n \n ''' 获取测试总结 '''\n def CaseSummarize_str(self):\n result = self.__str__() + \"\\n\"\n \n if (self.State == eTestCaseState[\"Function Not Found\"]):\n result += \"Function Not Found\\n\"\n \n if (self.State == eTestCaseState[\"UnInitialized\"]):\n result += \"UnInitialized\\n\"\n \n if (self.State == eTestCaseState[\"Function Configered\"]):\n result += \"Function Configered\\n\"\n \n if (self.State == eTestCaseState[\"Test Successed\"]):\n result += \"\\nTest Successed\\n\"\n result += self.Message\n \n if (self.State == eTestCaseState[\"Test Failed\"]):\n result += \"Test Failed\\n\"\n\n return result\n \n \n \n \n \n","sub_path":"TestManager/TestCase.py","file_name":"TestCase.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"613664903","text":"def get_tree_max_id(value, list_id=[]):\n \"\"\"\n 得到最大Tree max id\n \"\"\"\n if not value:\n return 0 # the first node id\n\n if isinstance(value, list):\n for content in value: # content -> dict\n try:\n children = content['children']\n except KeyError:\n \"\"\"\n 待返回错误信息\n \"\"\"\n pass\n\n if children:\n get_tree_max_id(children)\n\n list_id.append(content['id'])\n\n return max(list_id)\n\n\ndef get_file_size(size):\n \"\"\"计算大小\n \"\"\"\n\n if size >= 1048576:\n size = str(round(size / 1048576, 2)) + 'MB'\n elif size >= 1024:\n size = str(round(size / 1024, 2)) + 'KB'\n else:\n size = str(size) + 'Byte'\n\n return size\n\ndef getNodeIdList(nodeId,treeBody):\n nodeList = []\n for each in treeBody:\n if(isinstance(nodeId,list)):\n for eachNode in nodeId:\n if (each['id'] == int(eachNode)):\n nodeList.append(each['id'])\n if (len(each['children']) > 0):\n nodeList.extend(getAllChildId(each['children']))\n break\n else:\n nodeList.extend(getNodeIdList(nodeId, each['children']))\n else:\n if(each['id'] == int(nodeId)):\n nodeList.append(each['id'])\n if(len(each['children'])>0):\n nodeList.extend(getAllChildId(each['children']))\n break\n else:\n nodeList.extend(getNodeIdList(nodeId,each['children']))\n return nodeList\n\ndef getAllChildId(treeList):\n nodeList = []\n for each in treeList:\n nodeList.append(each['id'])\n if(isinstance(each['children'],list) and len(each['children'])>0):\n nodeList.extend(getAllChildId(each['children']))\n return nodeList\n","sub_path":"fastrunner/utils/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"354406501","text":"from flask import Flask, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\nfrom flask_bootstrap import Bootstrap\nfrom flask_login import UserMixin\nfrom sqlalchemy.orm.session import close_all_sessions\n\napp = Flask(__name__)\ntry:\n app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:iaoeng@localhost:5432/simglucose'\nexcept:\n print(\"connection error\")\n\napp.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKRed'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nma = Marshmallow(app)\nBootstrap(app)\n\n# close_all_sessions()\n# db.session.close_all()\n# db.drop_all()\n\n\nclass Result(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n patient_id = db.Column(db.String, nullable=False)\n time = db.Column(db.DateTime, nullable=False)\n reward = db.Column(db.Float, nullable=False)\n cgm = db.Column(db.Float)\n cho = db.Column(db.Float)\n insulin = db.Column(db.Float)\n bg = db.Column(db.Float)\n lbgi = db.Column(db.Float)\n hbgi = db.Column(db.Float)\n risk = db.Column(db.Float)\n experiment_id = db.Column(db.Integer, db.ForeignKey(\n \"experiment.id\"), nullable=False)\n\n def __repr__(self):\n return f\"Results(Time = {self.time}, Patient_ID = {self.patient_id}), BG = {self.bg}, \\\n CGM = {self.cgm}, CHO = {self.cho}, reward = {self.reward}\\\n INSULIN = {self.insulin}, LBGI = {self.lbgi}, \\\n HBGI = {self.hbgi}, RISK = {self.risk}, experiment_id = {self.experiment_id}\"\n\n\n# db.session.query(Result).delete()\n# db.session.commit()\n\n# Schema\n\n\nclass ResultSchema(ma.Schema):\n class Meta:\n fields = ('result_id', 'patient_id', 'time', 'bg', 'cgm',\n 'cho', 'lbgi', 'hbgi', 'insulin', 'risk', \"experiment_id\")\n\n\nclass Experiment(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n experiment_name = db.Column(db.String, unique=True, nullable=False)\n time = db.Column(db.DateTime, nullable=False)\n results = db.relationship(\"Result\", backref=\"experiment\", lazy=True)\n user_id = db.Column(db.Integer, db.ForeignKey(\"user.id\"), nullable=False)\n\n\nclass User(UserMixin, db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String, unique=True, nullable=False)\n email = db.Column(db.String, unique=True, nullable=False)\n password = db.Column(db.String, nullable=False)\n experiment = db.relationship(\"Experiment\", backref=\"user\", lazy=True)\n\n\n# db.create_all()\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"342349819","text":"import mock\nimport unittest\n\nfrom smqtk.exceptions import ReadOnlyError\nfrom smqtk.representation.key_value.postgres import PostgresKeyValueStore\n\n\nif PostgresKeyValueStore.is_usable():\n\n class TestPostgresKeyValueStore (unittest.TestCase):\n\n # noinspection PyUnusedLocal\n # - purposefully not used mock objects\n @mock.patch('smqtk.utils.postgres.get_connection_pool')\n def test_remove_readonly(self, m_gcp):\n \"\"\" Test that we cannot remove from readonly instance. \"\"\"\n s = PostgresKeyValueStore(read_only=True)\n\n self.assertRaises(\n ReadOnlyError,\n s.remove, 0\n )\n\n # noinspection PyUnusedLocal\n # - purposefully not used mock objects\n @mock.patch('smqtk.utils.postgres.get_connection_pool')\n def test_remove_invalid_key(self, m_gcp):\n \"\"\"\n Simulate an missing key and that it should result in a thrown\n KeyError\n \"\"\"\n s = PostgresKeyValueStore()\n\n # Pretend this store contains nothing.\n s.has = mock.Mock(return_value=False)\n\n self.assertRaises(\n KeyError,\n s.remove, 0\n )\n\n # noinspection PyUnusedLocal\n # - purposefully not used mock objects\n @mock.patch('smqtk.utils.postgres.get_connection_pool')\n def test_remove(self, m_gcp):\n \"\"\"\n Simulate removing a value from the store. Checking executions on\n the mock cursor.\n \"\"\"\n expected_key = 'test_remove_key'\n expected_key_bytea_quoted = \\\n PostgresKeyValueStore._py_to_bin(expected_key).getquoted()\n\n # Cut out create table calls.\n s = PostgresKeyValueStore(create_table=False)\n # Pretend key exists in index.\n s.has = mock.Mock(return_value=True)\n\n # Cursor is created via a context (i.e. __enter__()\n #: :type: mock.Mock\n mock_execute = s._psql_helper.get_psql_connection().cursor()\\\n .__enter__().execute\n\n s.remove(expected_key)\n\n # Call to ensure table and call to remove.\n mock_execute.assert_called_once()\n # Call should have been with provided key as converted to postgres\n # bytea type.\n self.assertRegexpMatches(mock_execute.call_args[0][0],\n \"DELETE FROM .+ WHERE .+ LIKE .+\")\n self.assertEqual(set(mock_execute.call_args[0][1].keys()),\n {'key_like'})\n self.assertEqual(\n mock_execute.call_args[0][1]['key_like'].getquoted(),\n expected_key_bytea_quoted\n )\n\n # noinspection PyUnusedLocal\n # - purposefully not used mock objects\n @mock.patch('smqtk.utils.postgres.get_connection_pool')\n def test_remove_many_readonly(self, m_gcp):\n \"\"\"\n Test failure to remove from a readonly instance.\n \"\"\"\n s = PostgresKeyValueStore(read_only=True)\n self.assertRaises(\n ReadOnlyError,\n s.remove_many, [0, 1]\n )\n\n # noinspection PyUnusedLocal\n # - purposefully not used mock objects\n @mock.patch('smqtk.utils.postgres.get_connection_pool')\n def test_remove_many_invalid_keys(self, m_gcp):\n \"\"\"\n Test failure when one or more provided keys are not present in\n store.\n \"\"\"\n s = PostgresKeyValueStore(create_table=False)\n\n # Simulate the batch execute returning nothing. This simulates no\n # rows being found by the first call to the method when checking\n # for key presence in table.\n s._check_contained_keys = mock.Mock(return_value={0, 1})\n PY2_SET_KEY_ERROR_RE = \"set\\(\\[(?:0|1), (?:0|1)\\]\\)\"\n PY3_SET_KEY_ERROR_RE = \"{(?:0|1), (?:0|1)}\"\n self.assertRaisesRegexp(\n KeyError, '^(?:{}|{})$'.format(PY2_SET_KEY_ERROR_RE,\n PY3_SET_KEY_ERROR_RE),\n s.remove_many, [0, 1]\n )\n\n # Simulate only one of the keys existing in the table.\n s._check_contained_keys = mock.Mock(return_value={1})\n self.assertRaisesRegexp(\n KeyError, '^1$',\n s.remove_many, [0, 1]\n )\n s._check_contained_keys = mock.Mock(return_value={0})\n self.assertRaisesRegexp(\n KeyError, '^0$',\n s.remove_many, [0, 1]\n )\n\n # noinspection PyUnusedLocal\n # - purposefully not used mock objects\n @mock.patch('smqtk.utils.postgres.get_connection_pool')\n # The underlying psycopg2 function used in callback provided to helper.\n @mock.patch('smqtk.representation.key_value.postgres.psycopg2.extras'\n '.execute_batch')\n def test_remove_many(self, m_psqlExecBatch, m_gcp):\n \"\"\"\n Test expected calls to psql `execute_batch` function when removing\n multiple items.\n \"\"\"\n expected_key_1 = 'test_remove_many_key_1'\n exp_key_1_bytea = PostgresKeyValueStore._py_to_bin(expected_key_1)\n expected_key_2 = 'test_remove_many_key_2'\n exp_key_2_bytea = PostgresKeyValueStore._py_to_bin(expected_key_2)\n\n # Skip table creation calls for simplicity.\n s = PostgresKeyValueStore(create_table=False)\n\n # Mock PSQL cursor stuff because we aren't actually connecting to a\n # database.\n # - `get_psql_connection` uses `smqtk.utils.postgres.\n # get_connection_pool`, so the return is a mock object.\n # - Cursor is created via a context (i.e. __enter__()) when utilized\n # in `PsqlConnectionHelper` execute methods.\n #: :type: mock.Mock\n mock_cursor = s._psql_helper.get_psql_connection().cursor() \\\n .__enter__()\n\n # Mocking `PostgresKeyValueStore` key-check method so as to pretend\n # that the given keys exist in the database\n s._check_contained_keys = mock.MagicMock(return_value=set())\n\n s.remove_many([expected_key_1, expected_key_2])\n\n # As a result of this call, we expect:\n # - ``psycopg2.extras.execute_batch`` should have been called once\n # when deleting key-value pairs in db (2 < `s._batch_size`)\n #\n # We back to break up the argument equality check of recorded mock\n # function call arguments due to ``psycopg2.Binary`` instances not\n # being comparable.\n\n m_psqlExecBatch.assert_called_once()\n\n # Confirm call arguments provided to\n # ``psycopg2.extras.execute_batch`` are as expected.\n # -\n expected_del_q = \"DELETE FROM data_set WHERE key LIKE %(key_like)s\"\n psqlExecBatch_call_args = m_psqlExecBatch.call_args[0]\n psqlExecBatch_kwargs = m_psqlExecBatch.call_args[1]\n self.assertEqual(psqlExecBatch_call_args[0], mock_cursor)\n self.assertEqual(psqlExecBatch_call_args[1], expected_del_q)\n # 3rd argument is a list of dictionaries for 'key_like' replacements\n # - dictionary values are `psycopg2.extensions.Binary` type, which\n # are not directly comparable (different instances). Have to\n # convert to bytes representation in order to compare.\n self.assertEqual(len(psqlExecBatch_call_args[2]), 2)\n self.assertSetEqual(\n {d['key_like'].getquoted()\n for d in psqlExecBatch_call_args[2]},\n {exp_key_1_bytea.getquoted(), exp_key_2_bytea.getquoted()}\n )\n self.assertIn('page_size', psqlExecBatch_kwargs)\n self.assertEqual(psqlExecBatch_kwargs['page_size'], s._batch_size)\n self.assertEqual(psqlExecBatch_kwargs['page_size'], 1000) # default\n","sub_path":"tests/representation/KeyValueStore/test_PostgresKeyValueStore.py","file_name":"test_PostgresKeyValueStore.py","file_ext":"py","file_size_in_byte":8167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"207604294","text":"## recognize card values from img\nimport pytesseract\nfrom PIL import Image, ImageOps\nimport numpy\nimport os\nimport cv2\n\n\n\ndef get_card():\n im = Image.open(\"C:\\\\Users\\\\alexg\\\\Desktop\\\\test.png\")\n im = im.convert('LA')\n im.show()\n pytesseract.pytesseract.tesseract_cmd = r'C:\\\\Program Files\\\\Tesseract-OCR\\\\tesseract.exe'\n name1 = pytesseract.image_to_string(im)\n print(\"done\")\n return name1\n\n\n# print(get_card())\n\ncap = cv2.VideoCapture(0)\n\nwhile(True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Our operations on the frame come here\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Display the resulting frame\n cv2.imshow('frame',gray)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()","sub_path":"test_camera.py","file_name":"test_camera.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"74331963","text":"# !/usr/bin/python\n# coding: utf8\n# Time: 2020/2/9 17:42\n# Author: Liam\n# E-mail: luyu.real@qq.com\n# Software: PyCharm\nimport warnings\n\nwarnings.filterwarnings(action=\"ignore\", message=\"^internal gelsd\") # 屏蔽警告信息\n\n# 线性回归模型\nfrom sklearn import linear_model\n\nX = [[6], [8], [10], [14], [18]] # 匹萨的直径\ny = [[7], [9], [13], [17.5], [18]] # 匹萨的价格\nmodel = linear_model.LinearRegression() # 创建一个线性回归模型的对象\nmodel.fit(X, y) # 用线性回归模型进行数据\na = model.predict([[12]]) # 预测一下直径为12的匹萨价格多少吧\nprint(\"一张12英寸的匹萨价格是: {:.2f}\".format(a[0][0]))\n\n# 模型评估\nx_test = [[7], [15], [21], [25], [26]]\ny_test = [[8.3], [16], [23], [28.5], [30]]\nprint(model.predict(x_test)) # 用我们的模型预测出来的价格\nprint(model.score(x_test, y_test)) # 计算R值\n\nimport matplotlib.pyplot as plt # 导库\nfrom matplotlib.font_manager import FontProperties # 字体库\n\n# font = FontProperties(fname=r\"c:\\windows\\fonts\\msyh.ttc\", size=10) # Windows\nfont = FontProperties(fname=r\"/System/Library/Fonts/STHeiti Medium.ttc\", size=10) # macOS\n\n\ndef runplt(): # 用于绘制图像的函数\n plt.title('匹萨价格与直径数据', fontproperties=font) # 设置图标题\n plt.xlabel('直径(英寸)', fontproperties=font) # 设置横轴标题\n plt.ylabel('价格(美元)', fontproperties=font) # 设置纵轴标题\n plt.axis([0, 25, 0, 25]) # 设置刻度\n plt.grid(True) # 显示网格\n return plt\n\n\nplt = runplt()\nplt.plot(X, y, 'ro') # 绘图\nplt.show() # 显示图像\n","sub_path":"AI/cls2_LinearRegression.py","file_name":"cls2_LinearRegression.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"503173280","text":"#! /usr/bin/env python\n#coding=utf-8\nimport logging,os\nimport time\nimport Model\nclass Logger:\n def addlog(self,testcaseID):\n filename = time.strftime(\"%Y-%m-%d_%H_%M_%S\",time.localtime(time.time()))+'.log'\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M',\n filename=r'../log/'+str(filename),\n filemode='a')\n #定义一个Handler打印INFO及以上级别的日志到sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # 设置日志打印格式\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n console.setFormatter(formatter)\n # 将定义好的console日志handler添加到root logger\n logging.getLogger('').addHandler(console)\n #logger = logging.getLogger(testcaseID)\n return filename\n\n \n\"\"\" \nif __name__ =='__main__':\n logyyx = Logger('../log/yyx.log',logging.ERROR,logging.DEBUG)\n logyyx.debug('一个debug信息')\n logyyx.info('一个info信息')\n logyyx.war('一个warning信息')\n logyyx.error('一个error信息')\n logyyx.cri('一个致命critical信息')\n\"\"\"","sub_path":"model/Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"169836134","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 12 16:27:31 2021\n\n@author: Joyce & Lucas\n\"\"\"\n\nfrom __future__ import print_function\nimport numpy as np\nimport sympy \n\nfrom scipy.linalg import eigh\nfrom scipy.sparse.linalg import lobpcg\nfrom matplotlib import pyplot as plt\n\n\n### Modelagem do sistema \n\n## Gravidade\ng = 9.81\n\n## Dimensões\nL0 = 2.385 #[m] #entre eixos\nL1 = (3/5)*1.7 #dist roda frente ao CG\nL2 = 1.7-L1 #dist roda traseira ao CG\nL3 = 1.4 #Bitola\n\n## Massas e Inércias\nm_roda = 7 # kg\nm_carro = 230 #kg\n\nIx_carro = 47 #valor qualqr\nIz_carro = 20\n\n## Constantes Elásticas Estimadas\nk2a = 150*9.81/0.01 # K amortecedor\nk3a = 150*9.81/0.01 # K amortecedor\nk4a = 150*9.81/0.01 # K amortecedor\nk5a = 150*9.81/0.01 # K amortecedor\nk2b = k2a/8 # K pneu\nk3b = k3a/8 # K pneu\nk4b = 1.5*k2b # K pneu\nk5b = 1.5*k3b # K pneu\n\n## Constantes de Amortecimento (chutes)\nc1 = 0\nc2 = 200\nc2b = c2/5\nc3 = 200\nc3b = c3/5\nc4 = 200\nc4b = c4/5\nc5 = 200\nc5b = c5/5\nc6 = 0\nc7 = 0\nc_ = [c1, c2, c3, c4, c5, c6, c7]\n\n## Amplitudes e velocidade da excitação de pista\n\nu2 = 0.1\nu3 = 0.1\nu4 = 0.1\nu5 = 0.1\n\nu2_dot = 10\nu3_dot = 10\nu4_dot = 10\nu5_dot = 10\n\n#### Declarando variaveis simbólicas\n\ny1, y2, y3, y4, y5, gama, teta, y1_dot, y2_dot, y3_dot, y4_dot, y5_dot, gama_dot, teta_dot, y1_dot2, y2_dot2, y3_dot2, y4_dot2, y5_dot2, gama_dot2, teta_dot2, t_, = sympy.symbols('y1, y2, y3, y4, y5, gama, teta, y1_dot, y2_dot, y3_dot, y4_dot, y5_dot, gama_dot, teta_dot, y1_dot2, y2_dot2, y3_dot2, y4_dot2, y5_dot2, gama_dot2, teta_dot2, t_') \nvar_dot2 = [y2_dot2, y3_dot2, y4_dot2, y5_dot2, y1_dot2, gama_dot2, teta_dot2] #Variável de \"velocidade\" para derivada da energia cinética\nvar = [y2, y3, y4, y5, y1, gama, teta] #Variável de deslocamento para derivada da energia potencial\nvar_dot = [y2_dot, y3_dot, y4_dot, y5_dot, y1_dot, gama_dot, teta_dot] #Variável de velocidade pra derivada da energia dissipada em amortecimento viscoso\n\n#### energia cinetica, potencial e dissipativa\n\nEc = m_carro*(y1_dot2**2)/2 + Ix_carro*(teta_dot2**2)/2 + Iz_carro*(gama_dot2**2)/2 + m_roda*((y2_dot2**2)/2 + (y3_dot2**2)/2 + (y4_dot2**2)/2 + (y5_dot2**2)/2) \nEp = ((y1 + (gama*L1) + teta*(L3/2) - y3)**2)*k3a/2 + ((y1 - (gama*L2) + teta*(L3/2)- y2)**2)*k2a/2 + ((y1 + (gama*L2) - teta*(L3/2) - y4)**2)*k4a/2 + ((y1 - (gama*L2) - teta*(L3/2) - y5)**2)*k5a/2 + ((y2 - u2)**2)*k2b/2 + ((y3 - u3)**2)*k3b/2 + ((y4 - u4)**2)*k4b/2 + ((y5 - u5)**2)*k5b/2 \nEd = c2*(1/2)*(y1_dot - y2_dot - teta_dot*L3/2 + gama_dot*L1)**2 + c3*(1/2)*(y1_dot - y3_dot + teta_dot*L3/2 + gama_dot*L1)**2 + c4*(1/2)*(y1_dot - y4_dot + teta_dot*L3/2 - gama_dot*L2)**2 + c5*(1/2)*(y1_dot - y5_dot - teta_dot*L3/2 - gama_dot*L2)**2 + c2b*(1/2)*(y2_dot - u2_dot)**2 + c3b*(1/2)*(y3_dot - u3_dot)**2 + c4b*(1/2)*(y4_dot - u4_dot)**2 + c5b*(1/2)*(y5_dot - u5_dot)**2\n\n## Força dissipativa\n#Q = [- c2*y2_dot, - c3*y3_dot, - c4*y4_dot, - c5*y5_dot, - c1*y1_dot, - c6*gama_dot, - c7*teta_dot]\n\n##### Lagrangeano \nL = Ec - Ep\nL = sympy.simplify(L)\n\n#### Derivadas em relacao aos deslocamentos\n#### Derivadas em relacao as velocidades\n#### Equecoes do movimento para cada grau de liberdade \ndL_dy = []\ndL_dy_dot = []\n#dtdL = []\ndEd_dy_dot = []\ngraus = []\n\nfor i in range(len(var)):\n dL_dy.append(0)\n dL_dy_dot.append(0)\n dEd_dy_dot.append(0)\n #dtdL.append(0)\n graus.append(0)\n dL_dy[i] = sympy.diff(L, var[i])\n dL_dy_dot[i] = sympy.diff(L, var_dot2[i])\n #dtdL[i] = sympy.diff(dL_dy_dot[i], t_)\n dEd_dy_dot[i] = sympy.diff(Ed,var_dot[i])\n \n graus[i] = dL_dy_dot[i] - dL_dy[i] + dEd_dy_dot[i] #Equaçao do movimento da qual se extrai as matrizes M, K e C\n \n print( dEd_dy_dot[i],'\\n')\n\n \n \n### Montando Matriz de Rigidez\nK = []\n\nfor i in range(len(var)):\n k = []\n for j in range(len(var)):\n k.append(0)\n K.append(k)\n \n\nfor i in range(len(var)):\n for j in range(len(var)):\n \n K[i][j] = float(graus[i].coeff(var[j],1))\n\nfor i in range(len(var)): #Verificando Simetria\n for j in range(len(var)):\n if K[i][j] == K[j][i]:\n pass\n else:\n print('Erro: Matriz K assimétrica')\n break\nprint('A matriz K é simétrica') \n\n\n\nK = np.reshape(K, (len(var),len(var)))\n\n\n### Montando Matriz de Amortecimento\nC = []\n\n## montando matriz generica n x n\nfor i in range(len(var)):\n c = []\n for j in range(len(var)):\n c.append(0)\n C.append(c)\n \n## Preenchendo a matriz genérica\nfor i in range(len(var)):\n for j in range(len(var)):\n \n C[i][j] = float(graus[i].coeff(var_dot[j],1))\n\nfor i in range(len(var)): #Verificando Simetria\n for j in range(len(var)):\n if C[i][j] == C[j][i]:\n pass\n else:\n print('Erro: Matriz de Amortecimento assimétrica')\n break\nprint('A matriz C é simétrica') \n\n \n\nC = np.reshape(C, (len(var),len(var)))\n#print(C)\n\n### Montando Matriz de Massa\n\nM = []\n\nfor i in range(len(var)):\n m = []\n for j in range(len(var)):\n m.append(0)\n M.append(m)\n \n\nfor i in range(len(var)):\n for j in range(len(var)):\n \n M[i][j] = float(graus[i].coeff(var_dot2[j],1))\n#print(M)\nfor i in range(len(var)): #Verificando Simetria\n for j in range(len(var)):\n if M[i][j] == M[j][i]:\n pass\n else:\n print('Erro: Matriz de Massa assimétrica')\n break\nprint('A matriz M é simétrica')\n\nM = np.reshape(M, (len(var),len(var)))\n\n\n##### Resolvendo problema de autovalores e autovetores oara calcular frequencias naturais e modos de vibrar\n \n[wn, B] = eigh(K,M)\nwn = np.sqrt(wn**2) #Obtendo o módulo das frequências naturais ao quadrado \n# #print(wn)\n# #print(B) \n \n\nfreq_n = np.sqrt(((wn)**2)**(1/2)) #Frequencia natural\n\n\n\nmeff = []\nfor i in range(len(var)): #Normalizando Matriz de massa pelo matriz de modal (autovetores)\n meff.append(0)\n meff[i] = np.dot(np.dot(B[:,i].T,M),B[:,i])\n\nmeffdiag = np.diag(1/np.sqrt(meff)) #Matriz diagonal com as massas efetivas meff para cada grau de liberdade\n\nCeff = []\nfor i in range(len(var)): #Normalizando Matriz de Amortecimento pela matriz modal\n Ceff.append(0)\n Ceff[i] = np.dot(np.dot(B[:,i].T,C),B[:,i]) #vetor de amortecimento efetivo [2.qsi(i).wn(i)]\n\nCeffdiag = np.diag(1/np.sqrt(Ceff))\n#B = B*np.diag(1/np.sqrt(meff));\n\nqsi = []\nfor i in range(len(var)): #Calculando taxa de amortecimento para cada grau de liberdade\n qsi.append(0)\n qsi[i] = Ceff[i]/(2*freq_n[i]) \n\nfreq_d = []\nfor i in range(len(var)): #Calculando frequencia natural amortecida\n freq_d.append(0)\n if qsi[i] <= 1:\n freq_d[i] = freq_n[i]*np.sqrt(1-qsi[i])\n else:\n freq_d[i] = freq_n[i]*np.sqrt(qsi[i]-1)\n \nmeff = np.diag(meff) \nfor i in range(len(var)): #Verificando Simetria\n for j in range(len(var)):\n if meff[i][j] == meff[j][i]:\n pass\n else:\n print('Erro: Matriz meff assimétrica')\n break\nprint('A matriz meff é simétrica')\n#### Constante Ksi de amortecimento\n\n\n#### Condições iniciais\nX = 0.01 #[m]\nwb= 20 #[rad/s]\nr = wb/wn\n\n\n\n\n\n\n\n\n\n\n#y0 = np.dot(np.dot(B.T,M),x0) #Condições iniciais em coordenadas modais\n#y0dot = np.dot(np.dot(B.T,M),v0)\n\nt_inc0 = 0.05 #segundos\nt_end0 = 2 #segundos \nt = np.arange(0, t_end0, t_inc0)\n\ny = []\nphi_x = []\n \nfor i in range(len(var)): #Obtendo a resposta no tempo em coordenadas modais para cada grau de liberdade\n y.append(0)\n phi_x.append(0)\n phi_x[i] = np.arctan((2*qsi[i]*r[i]**3)/(1-(r[i]**2)+(2*qsi[i]*r[i])**2))\n y[i] = ((X*np.sqrt(1+(2*qsi[i]*r[i])**2))/np.sqrt((1-r[i]**2)**2 + (2*qsi[i]*r[i])**2))*np.sin(wb*t - phi_x[i])\n \nx = np.dot(B,y) #Convertendo resposta de coordenadas modais para físicas\nx1 = x[0][:]\nx2 = x[1][:]\nx3 = x[2][:]\nx4 = x[3][:]\nx5 = x[4][:]\nx6 = x[5][:]\nx7 = x[6][:]\n\nfor i in range(len(var)):\n \n plt.plot(t,x[i][:])\n\n# # plt.plot(t,x6)\n# # plt.plot(t,x7)\nnp.savetxt('7GDL_ExcitBase2', np.transpose([x1,x2,x3,x4,x5,x6,x7]), fmt='%1.5f')\n\n\n\n","sub_path":"7DOF-Damped-Track-Excitation-Response.py","file_name":"7DOF-Damped-Track-Excitation-Response.py","file_ext":"py","file_size_in_byte":7998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"624561372","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = u'Carlos Gustavo Ruiz'\nSITENAME = u'atmantree.com'\nSITEURL = ''\n\nPATH = 'content'\n\nTIMEZONE = 'America/Caracas'\n\nDEFAULT_LANG = u'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\nLINKS = ()\n# LINKS = (('Pelican', 'http://getpelican.com/'),\n# ('Python.org', 'http://python.org/'),\n# ('Jinja2', 'http://jinja.pocoo.org/'),\n# ('You can modify those links in your config file', '#'),)\n\n# Social widget\nSOCIAL = (('Twitter', 'https://twitter.com/atmantree'),\n# ('Google+',\n# 'https://plus.google.com/+CarlosGustavoRuiz-atmantree/'),\n ('Tumblr', 'http://atmantree.tumblr.com'),\n ('LinkedIn', 'http://ve.linkedin.com/in/atmantree/'),\n ('GitHub', 'http://github.com/atmantree'),\n ('Bitbucket', 'http://bitbucket.org/atmantree'))\n\n\nDEFAULT_PAGINATION = False\n\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\nDISPLAY_CATEGORIES_ON_MENU = False\n\nARTICLE_URL = 'blog/{date:%Y}/{date:%m}/{date:%d}/{slug}/'\nARTICLE_SAVE_AS = 'blog/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'\nPAGE_URL = '{slug}/'\nPAGE_SAVE_AS = '{slug}/index.html'\nCATEGORY_URL = 'category/{slug}/'\nCATEGORY_SAVE_AS = 'category/{slug}/index.html'\nTAG_URL = 'tag/{slug}/'\nTAG_SAVE_AS = 'tag/{slug}/index.html'\nSTATIC_PATHS = ['images','uploads']\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"575851387","text":"import re\n\n\nclass Grammar:\n @staticmethod\n def parseLine(line):\n return [element.strip() for element in line.strip().split('=')[1].strip()[1:-1].split(',')]\n\n @staticmethod\n def parseConsole(line):\n return [element.strip() for element in line.strip().split(',')]\n\n @staticmethod\n def parseProductions(productions):\n result = []\n for rule in productions:\n [lhs, rhs] = rule.strip().split('->')\n results = rhs.strip().split('|')\n for res in results:\n result.append((lhs.strip(), res.strip()))\n return result\n\n @staticmethod\n def readFromFile(filename):\n with open(filename) as file:\n N = Grammar.parseLine(file.readline())\n E = Grammar.parseLine(file.readline())\n S = Grammar.parseLine(file.readline())\n P = Grammar.parseProductions(Grammar.parseLine(''.join([line for line in file])))\n return Grammar(N, E, P, S)\n\n @staticmethod\n def readFromConsole():\n N = Grammar.parseConsole(input(\"N = \"))\n E = Grammar.parseConsole(input(\"E = \"))\n S = Grammar.parseConsole(input(\"S = \"))\n P = Grammar.parseProductions(Grammar.parseConsole(input(\"P = \")))\n return Grammar(N, E, P, S)\n\n def __init__(self, N, E, P, S):\n self.N = N\n self.E = E\n self.P = P\n self.S = S\n\n def getProductions(self, symbol):\n result = []\n for production in self.P:\n if production[0] == symbol:\n result.append(production[1])\n return result\n\n def checkForStartingSymbolOnRHS(self):\n for (lhs, rhs) in self.P:\n if re.findall(self.S[0], rhs):\n return False\n return True\n\n def checkRegular(self):\n # A -> a, A-> ab (1)\n # if S -> epsilon then S does not appear on the rhs of any production (2)\n # epsilon = e\n startingSymbol = self.S[0]\n for rule in self.P:\n lhs, rhs = rule\n print(lhs, rhs)\n # A -> a or a -> ab\n if len(rhs) > 2:\n print(\"The length of rhs should be at most 2 (a or aB)\")\n return False\n for elem in self.getProductions(rhs):\n if re.match(r'^[a-z]{1}[A-Z]{1}$|^[a-z]{1}$', elem) is None:\n print(\"The production should be of form A -> a or A -> aB\")\n return False\n if lhs == startingSymbol and rhs == 'e':\n if self.checkForStartingSymbolOnRHS() is False:\n print(\"S should not be in rhs \")\n return False\n return True\n\n @staticmethod\n def fromFA(fa):\n N = fa.Q\n E = fa.E\n S = fa.q0\n P = []\n if fa.q0[0] in fa.F:\n P.append((fa.q0[0], 'e'))\n for transition in fa.D:\n lhs, state = transition[0]\n rhs = transition[1].strip()\n if rhs in fa.F:\n P.append((lhs, state))\n P.append((lhs, state.strip() + rhs))\n if rhs not in fa.F:\n P.append((lhs, state.strip() + rhs))\n return Grammar(N, E, P, S)\n","sub_path":"Lab2/Grammar.py","file_name":"Grammar.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"87527400","text":"import configparser\nfrom datetime import datetime\nimport os\nfrom pyspark.sql import SparkSession\n# from pyspark.sql.functions import udf, col\n# from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format\nfrom pyspark.sql.types import *\nfrom pyspark.sql import functions as F\n\n\nconfig = configparser.ConfigParser()\nconfig.read('dl.cfg')\n\nos.environ['AWS_ACCESS_KEY_ID']=config['AWS']['AWS_ACCESS_KEY_ID']\nos.environ['AWS_SECRET_ACCESS_KEY']=config['AWS']['AWS_SECRET_ACCESS_KEY']\n\n# read song data file\ndef build_song_schema():\n \"\"\"Build and return a schema to use for the song data.\n Returns\n schema: StructType object, a representation of schema and defined fields\n \"\"\"\n schema = StructType(\n [\n StructField('artist_id', StringType(), True),\n StructField('artist_latitude', DecimalType(), True),\n StructField('artist_longitude', DecimalType(), True),\n StructField('artist_location', StringType(), True),\n StructField('artist_name', StringType(), True),\n StructField('duration', DecimalType(), True),\n StructField('num_songs', IntegerType(), True),\n StructField('song_id', StringType(), True),\n StructField('title', StringType(), True),\n StructField('year', IntegerType(), True)\n ]\n )\n return schema\n\ndef create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark\n\n\ndef process_song_data(spark, input_data, output_data):\n \"\"\"\n Processes a song file. Writes song and artist tables to S3.\n Arguments:\n input_data -- input S3 directory with `song` file\n output_data -- output S3 directory\n \"\"\"\n \n print(\"Read song data\")\n df_song = spark.read.json(input_data+\"song_data/*/*/*/*.json\", schema=build_song_schema())\n \n # extract columns to create songs table\n songs_table = df_song[['song_id', 'title', 'artist_id', 'year', 'duration']].drop_duplicates()\n\n \n print(\"Write...\")\n # write songs table to parquet files partitioned by year and artist\n songs_table.write.save(path=output_data+'song_table',\n format='parquet',\n partitionBy=['year', 'artist_id'],\n mode='overwrite' )\n\n # extract columns to create artists table\n artists_table = df_song[['artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude']].drop_duplicates()\n\n print(\"Write...\")\n # write artists table to parquet files\n artists_table.write.save(path=output_data+'artists_table',\n format='parquet',\n mode='overwrite' )\n\n\ndef process_log_data(spark, input_data, output_data):\n \"\"\"\n Processes a log file. Writes time, users and songplay tables to S3.\n Arguments:\n input_data -- input S3 directory with `song` and `log` files\n output_data -- output S3 directory\n \"\"\"\n\n print(\"Read log data\")\n # read log data file\n df_log_data = spark.read.json(input_data + \"log-data/*/*/*.json\")\n\n # filter by actions for song plays\n df_log_data = df_log_data[df_log_data['page']=='NextSong']\n\n # extract columns for users table \n users_table = df_log_data[['userId', 'firstName', 'lastName', 'gender', 'level']].drop_duplicates()\n\n \n print(\"Write...\")\n # write users table to parquet files\n users_table.write.save(path=output_data + 'users_table',\n format='parquet',\n mode='overwrite'\n )\n\n df_log_data = df_log_data.withColumn('timestamp', F.from_unixtime(df_log_data['ts']/1000))\\\n .withColumn('hour', F.hour(F.col('timestamp')))\\\n .withColumn('day', F.dayofmonth(F.col('timestamp')))\\\n .withColumn('month', F.month(F.col('timestamp')))\\\n .withColumn('year', F.year(F.col('timestamp')))\\\n .withColumn('weekofyear', F.weekofyear(F.col('timestamp')))\\\n .withColumn('dayofweek', F.dayofweek(F.col('timestamp')))\n\n # extract columns to create time table\n time_table = df_log_data[['timestamp','hour','day','month','year','weekofyear','dayofweek',]].drop_duplicates()\n\n print(\"Write...\")\n # write time table to parquet files partitioned by year and month\n time_table.write.save(path=output_data + 'time_table',\n format='parquet',\n mode='overwrite',\n partitionBy=['year','month'] )\n\n # read in song data to use for songplays table\n df_song = spark.read.json(input_data + \"song_data/*/*/*/*.json\", schema=build_song_schema())\n\n # extract columns from joined song and log datasets to create songplays table \n songplays_table = df_log_data.join(df_song, \n on = (df_song['title'] == df_log_data['song']) & \\\n (df_song['artist_name'] == df_log_data['artist']) & \\\n (df_song['duration'] == df_log_data['length']) \n )\n\n print(\"Write...\")\n # write songplays table to parquet files partitioned by year and month\n songplays_table.write.save(path=output_data + 'songplays_table',\n format='parquet',\n mode='overwrite',\n partitionBy=['year','month'] )\n\n\ndef main():\n spark = create_spark_session()\n \n print(\"Session created\")\n process_log_data(spark, \"s3a://udacity-dend/\", 's3a://my-sparkify-data-lake/log_data/')\n print(\"Log data proccessed\")\n process_song_data(spark, \"s3a://udacity-dend/\", 's3a://my-sparkify-data-lake/song_data/')\n print(\"Song data proccessed\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Project 4. S3 to S3 with PySpark/etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":6163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"28802331","text":"\"\"\"\nSpy snippets\n============\nYou've been recruited by the team building Spy4Rabbits, a highly advanced search engine used to help fellow agents\ndiscover files and intel needed to continue the operations against Dr. Boolean's evil experiments. The team is known for\nrecruiting only the brightest rabbit engineers, so there's no surprise they brought you on board. While you're elbow\ndeep in some important encryption algorithm, a high-ranking rabbit official requests a nice aesthetic feature for the\ntool called \"Snippet Search.\" While you really wanted to tell him how such a feature is a waste of time in this\nintense, fast-paced spy organization, you also wouldn't mind getting kudos from a leader. How hard could it be, anyway?\nWhen someone makes a search, Spy4Rabbits shows the title of the page. Your commander would also like it to show a short\nsnippet of the page containing the terms that were searched for.\nWrite a function called answer(document, searchTerms) which returns the shortest snippet of the document, containing all\nof the given search terms. The search terms can appear in any order.\nThe length of a snippet is the number of words in the snippet. For example, the length of the snippet \"round fluffy\nrabbit tails\" is 4. (Hey, don't judge your colleagues for what they search in their spare time).\nThe document will be a string consisting only of lower-case letters [a-z] and spaces. Words in the string will be\nseparated by a single space. A word could appear multiple times in the document. searchTerms will be a list of words,\neach word comprised only of lower-case letters [a-z]. All the search terms will be distinct.\nSearch terms must match words exactly, so \"hop\" does not match \"hopping\".\nReturn the first sub-string if multiple sub-strings are shortest. For example, if the document is \"world there hello\nhello where world\" and the search terms are [\"hello\", \"world\"], you must return \"world there hello\".\nThe document will be guaranteed to contain all the search terms.\nThe number of words in the document will be at least one, will not exceed 500, and each word will be 1 to 10 letters\nlong. Repeat words in the document are considered distinct for counting purposes.\nThe number of words in searchTerms will be at least one, will not exceed 100, and each word will not be more than 10\nletters long.\n\"\"\"\n\ndef answer(document,searchTerms):\n text = document.split()\n num=[]\n name=[] \n \n for i in range(0,len(text)):\n num.append([])\n name.append([])\n if (text[i] in searchTerms):\n num[i].append(i)\n name[i].append(text[i])\n \n for j in range(0,len(num)):\n if (text[i] not in name[j]) and name[j]:\n num[j].append(i)\n name[j].append(text[i])\n \n num = filter(lambda x: len(x)==len(searchTerms), num)\n name = filter(lambda x: len(x)==len(searchTerms), name)\n shortest_len=num[0][len(num[0])-1]-num[0][0]\n shortest=0\n \n for k in range(1,len(num)):\n if num[k]: \n length = num[k][len(num[k])-1]-num[k][0]\n \n if length < shortest_len:\n shortest_len = length\n shortest = k\n \n \n result=' '.join(text[num[shortest][0]:num[shortest][len(searchTerms)-1]+1])\n return result\n","sub_path":"Spy_snippets.py","file_name":"Spy_snippets.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"248834176","text":"import os\nimport copy\nfrom abc import ABC, abstractmethod\nfrom ptflops import get_model_complexity_info\n\nfrom DA2Lite.core.log import get_logger\n\nlogger = get_logger(__name__)\n\nclass TrainerBase(ABC):\n def __init__(\n self,\n cfg_util,\n prefix,\n train_loader,\n test_loader,\n device\n ):\n \n self.cfg = cfg_util.cfg\n\n self.device = device\n self.img_shape = self.cfg.DATASET.IMG_SHAPE\n self.prefix = prefix\n self.model_name = self.cfg.MODEL.NAME\n self.dataset_name = self.cfg.DATASET.NAME\n self.train_loader = train_loader\n self.test_loader = test_loader\n\n save_dir = os.path.join(self.cfg.SAVE_DIR, 'models/')\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n file_name = f'{self.prefix}_{self.dataset_name}_{self.model_name}.pt'\n self.save_path = os.path.join(save_dir, file_name)\n\n @abstractmethod\n def train(self):\n raise NotImplementedError\n \n @abstractmethod\n def test(self):\n raise NotImplementedError\n\n @abstractmethod\n def evaluate(self):\n pass\n\n @abstractmethod\n def build(self):\n raise NotImplementedError\n\n def _get_file_size(self, file_path):\n size = os.path.getsize(file_path)\n return size\n\n def model_summary(self, test_acc, test_loss, model):\n model = copy.deepcopy(model)\n macs, params = get_model_complexity_info(model=model,\n input_res=tuple(self.img_shape),\n print_per_layer_stat=False,\n as_strings=False,\n verbose=False)\n \n file_size = self._get_file_size(self.save_path)\n\n num_dummy = 60\n model_name = ' '+self.prefix +'_model '\n dummy_line = '-'*num_dummy\n\n acc = f' Test Accuracy (%) : {round(test_acc*100.0, 2)} %'\n loss = f' Test loss : {round(test_loss, 4)}'\n param_num = f' The number of parameters (M) : {round(params * 1e-6,2)} M'\n complexity = f' Computational complexity (G) : {round(macs * 1e-9,2)} G'\n file_size = f' File size (MB) : {round(file_size / (1024 * 1024), 2)} MB'\n\n logger.info(f'+{dummy_line}+')\n logger.info(f'|{model_name.center(num_dummy)}|')\n logger.info(f'+{dummy_line}+')\n logger.info(f'|{\" \".ljust(num_dummy)}|')\n logger.info(f'|{acc.ljust(num_dummy)}|')\n logger.info(f'|{loss.ljust(num_dummy)}|')\n logger.info(f'|{param_num.ljust(num_dummy)}|')\n logger.info(f'|{complexity.ljust(num_dummy)}|')\n logger.info(f'|{file_size.ljust(num_dummy)}|')\n logger.info(f'|{\" \".ljust(num_dummy)}|')\n logger.info(f'+{dummy_line}+\\n')","sub_path":"DA2Lite/trainer/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"219025824","text":"\"\"\"\n给定三个字符串 s1、s2、s3,请你帮忙验证 s3 是否是由 s1 和 s2 交错 组成的。\n\n两个字符串 s 和 t 交错 的定义与过程如下,其中每个字符串都会被分割成若干 非空 子字符串:\n\ns = s1 + s2 + ... + sn\nt = t1 + t2 + ... + tm\n|n - m| <= 1\n交错 是 s1 + t1 + s2 + t2 + s3 + t3 + ... 或者 t1 + s1 + t2 + s2 + t3 + s3 + ...\n提示:a + b 意味着字符串 a 和 b 连接。\n\n \n\n示例 1:\n\n\n输入:s1 = \"aabcc\", s2 = \"dbbca\", s3 = \"aadbbcbcac\"\n输出:true\n示例 2:\n\n输入:s1 = \"aabcc\", s2 = \"dbbca\", s3 = \"aadbbbaccc\"\n输出:false\n示例 3:\n\n输入:s1 = \"\", s2 = \"\", s3 = \"\"\n输出:true\n \n\n提示:\n\n0 <= s1.length, s2.length <= 100\n0 <= s3.length <= 200\ns1、s2、和 s3 都由小写英文字母组成\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/interleaving-string\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\ndef isInterleave(s1: str, s2: str, s3: str) -> bool:\n if len(s1) + len(s2) != len(s3):\n return False\n # return self.checkSubStr(s1, s2, s3)\n dp = [[False for _ in range(len(s2) + 1)] for _ in range(len(s1) + 1)]\n dp[0][0] = True\n for i in range(1, len(s1) + 1):\n dp[i][0] = dp[i - 1][0] and s1[i - 1] == s3[i - 1]\n for j in range(1, len(s2) + 1):\n dp[0][j] = dp[0][j - 1] and s2[j - 1] == s3[j - 1]\n\n for i in range(1, len(s1) + 1):\n for j in range(1, len(s2) + 1):\n flag1 = dp[i][j - 1] and s2[j - 1] == s3[i + j - 1]\n flag2 = dp[i - 1][j] and s1[i - 1] == s3[i + j - 1]\n dp[i][j] = flag1 or flag2\n return dp[-1][-1]","sub_path":"97_isInterStr.py","file_name":"97_isInterStr.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"143666851","text":"# ======================================================================================================================\n# Task A1 model class. The class A1 essentially acts as a classifier object. The default arguments are that of the\n# chosen final model. Certain functions were used in model selection and are mentioned in the report, however are not\n# ran in 'main.py' in the final submission. The A1 class ended up being made quite modular and being used across tasks\n# ======================================================================================================================\nimport time\nfrom sklearn import svm\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import cross_val_score\n\nclass A1:\n\n # The classifier object. Default parameters are not the ones used in the final result\n # The LogisticRegressor with the best parameters found after validation was used. This is created when lr=True is\n # passed to the constructor.\n def __init__(self, c=0.1, kernel='poly', degree=4, lr=False):\n self.c = c\n self.kernel = kernel\n self.degree = degree\n if lr:\n # Best params: {'C': 0.1, 'penalty': 'l2', 'solver': 'newton-cg'}\n self.classifier = LogisticRegression(penalty='l2', solver='newton-cg', C=0.1, max_iter=4000)\n else:\n # Best params: C=0.1, kernel='poly', degree=4\n self.classifier = svm.SVC(C=self.c, kernel=self.kernel, degree=self.degree)\n\n\n def train(self, training_images, training_labels, test_images, test_labels):\n start = time.time()\n self.classifier.fit(training_images, training_labels)\n stop = time.time()\n\n print(f\"Training time: {stop - start:.2f} seconds\")\n\n return self.test(test_images, test_labels)\n\n def test(self, test_images, test_labels):\n pred = self.classifier.predict(test_images)\n\n return accuracy_score(test_labels, pred)\n\n def cross_validate(self, validation_images, validation_labels, cv_folds):\n scores = cross_val_score(self.classifier, validation_images, validation_labels, cv=cv_folds)\n\n return scores","sub_path":"A1/a1.py","file_name":"a1.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"128663397","text":"\"\"\"\nIn this simple RPG game, the hero fights the goblin. He has the options to:\n\n1. fight goblin\n2. do nothing - in which case the goblin will attack him anyway\n3. flee\n\n\"\"\"\nclass Unit:\n def __init__(self, power, health, name):\n self.name = name\n self.power = power\n self.health = health\n \n def attack(self, target ):\n target.health -= self.power\n if target.health < 0:\n print(f'The {target.name} is dead!')\n\n def alive(self):\n if self.health > 0:\n alive = True\n return alive\n else:\n alive = False\n return alive\n\n def print_status(self):\n print(f\"Currently the {self.name} has {self.health} health and {self.power} power!\")\n\ndef main():\n hero = Unit(5,10, \"Hero\")\n goblin = Unit(2,8, \"Goblin\")\n minotaur = Unit(8,15, \"Minotaur\")\n\n\n print(\"You're attacked by a goblin!\")\n while hero.alive() and goblin.alive():\n hero.print_status()\n print(\"What do you want to do?\")\n print(\"1. fight goblin\")\n print(\"2. do nothing\")\n print(\"3. check\")\n print(\"4. flee\")\n print(\"> \",)\n user_input = input()\n if user_input == \"1\":\n hero.attack(goblin)\n elif user_input == \"2\":\n pass\n elif user_input == \"3\":\n goblin.print_status()\n elif user_input == \"4\":\n print(\"Goodbye.\")\n break\n else:\n print(\"Invalid input %r\" % user_input)\n\n if goblin.health > 0:\n # Goblin attacks hero\n goblin.attack(hero)\n print(\"The goblin does %d damage to you.\" % goblin.power)\n \n\nmain()\n","sub_path":"rpg-0.py","file_name":"rpg-0.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"585139971","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom .views import HomeView, MovieDetailView\n\nurlpatterns = [\n url(r'^', include('apps.accounts.urls')),\n url(r'^', include('apps.movies.urls')),\n url(r'^$', HomeView.as_view(), name='home'),\n url(r'^movie/', MovieDetailView.as_view(), name='movie'),\n url(r'^admin/', include(admin.site.urls)),\n]\n","sub_path":"website/apps/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"51600998","text":"#!/usr/bin/python\n\nimport numpy as np\n\nfrom data import features\nfrom data import labels\nimport matplotlib.pyplot as plt\n\n\nclass NeuralNet(object):\n \"\"\"3 layered Neural Network class\n 2 - input layer nodes\n 2 - hidden layer nodes\n 2 - output layer nodes\n \"\"\"\n def __init__(self, features, labels, update_step_size=0.05):\n self.features = features\n self.labels = labels\n self.instances = features.shape[0]\n self.eta = update_step_size\n self.cost = []\n\n def init_weights(self, layer_mapping):\n for key in layer_mapping.keys():\n weight = key.replace(LAYER, WEIGHT)\n val = np.mat(\n np.random.randn(layer_mapping[key].get('out'),\n layer_mapping[key].get('in')))\n exec('self.%s = val' % weight)\n\n @staticmethod\n def sigmoid(z):\n \"\"\"Sigmoid function\"\"\"\n return 1.0/(1.0 + np.exp(-z))\n\n def randomize_data(self, times=2):\n \"\"\"Randomize the data instances\n :param times: number times to shuffle\n :return: returns the shuffled data instances\n \"\"\"\n XY = np.concatenate((self.features, self.labels), axis=1)\n _ = [np.random.shuffle(XY) for i in range(times)]\n self.features, self.labels = np.split(XY, [2], axis=1)\n\n def _add_bias(self, in_):\n return np.mat(np.insert([1.0], 1, np.ravel(in_))).transpose()\n\n def forward(self, in_, weight):\n return self.sigmoid(np.dot(weight, self._add_bias(in_)))\n\n def backward(self, weight, delta_prev, in_):\n in_bias = self._add_bias(in_)\n delta_curr_bias = np.multiply(np.multiply(in_bias,\n (1 - in_bias)),\n np.dot(weight.transpose(),\n delta_prev))\n return delta_curr_bias[1:] # removing the bias term\n\n def update(self, x0, x1, delta1, delta2):\n \"\"\"Update weights based on stochastic gradient descent\n :param x0: input node values\n :param x1: hidden node values\n :param delta1: error in hidden node values\n :param delta2: error in output node values\n :return:\n \"\"\"\n self.weight1 = (self.weight1 -\n self.eta*np.dot(\n delta1, self._add_bias(x0).transpose()))\n self.weight2 = (self.weight2 -\n self.eta*np.dot(\n delta2, self._add_bias(x1).transpose()))\n\n def _feed_forward(self, x0, W1, W2):\n \"\"\"Run forward through the nodes and evaluate the node values\n :param x0: input node values\n :param W1: Layer 1 weights\n :param W2: Layer 2 weights\n :return: returns all the nodes values\n \"\"\"\n x1 = self.forward(x0, W1)\n x2 = self.forward(x1, W2)\n return x0, x1, x2\n\n def cost_calculation(self, hypothesis, actual):\n \"\"\"Logistic regression's error equation\n :param hypothesis: estimated value by the algorithm\n :param actual: actual value\n :return: returns the cost during the iteration\n \"\"\"\n error = -1*(np.multiply(actual,\n np.log(hypothesis)).sum() +\n np.multiply(1 - actual,\n np.log(1 - hypothesis)).sum())\n self.cost.append(error)\n\n def _back_propagate(self, x2, x1, y):\n \"\"\"Run backward through the nodes and evaluate the errors\n :param x2: Output node values (estimate)\n :param x1: Hidden node values\n :param y: Output node values (actual)\n :return: returns the errors of the layers\n \"\"\"\n delta2 = x2 - y\n delta1 = self.backward(self.weight2, delta2, x1)\n return delta1, delta2\n\n def train_features(self):\n \"\"\"Stochastic gradient descent for training\"\"\"\n for _iter in range(self.instances):\n x0 = np.mat(self.features[_iter]).transpose()\n y = np.mat(self.labels[_iter]).transpose()\n\n # Feed forward - run through the nodes and\n # evaluate the output nodes values\n x0, x1, x2 = self._feed_forward(x0,\n self.weight1,\n self.weight2)\n\n # Calculate the error\n self.cost_calculation(x2, y)\n\n # Back propagate the error\n delta1, delta2 = self._back_propagate(x2, x1, y)\n\n # Update weight\n self.update(x0, x1, delta1, delta2)\n\n def estimate(self):\n y_estimate = []\n for _iter in range(self.instances):\n x0 = np.mat(self.features[_iter]).transpose()\n y = np.mat(self.labels[_iter]).transpose()\n _, _, x2 = self._feed_forward(x0,\n self.weight1,\n self.weight2)\n x2 = np.ravel(x2)\n y_estimate.append(np.array([1.0, 0.0]) if x2[0] >= x2[1]\n else np.array([0.0, 1.0]))\n return np.array(y_estimate)\n\n\nnumHiddenLayer = 1\nnumHiddenLayerNodes = 2\nnumOutputLayerNodes = 2\n\nNN = NeuralNet(features, labels)\nLAYER = 'layer'\nWEIGHT = 'weight'\nlayer_node_mapping = {\n LAYER + \"1\": {\"in\": NN.features.shape[1] + 1,\n \"out\": numHiddenLayerNodes},\n LAYER + \"2\": {\"in\": numHiddenLayerNodes + 1,\n \"out\": numOutputLayerNodes}\n }\n\nREPEAT = 1000\ncost_all = []\n\nfor experiment in range(REPEAT):\n print(\"Training attempt: %s\" % (experiment + 1))\n NN = NeuralNet(features, labels)\n NN.init_weights(layer_node_mapping)\n NN.randomize_data()\n NN.train_features()\n cost_all.append(np.asarray(NN.cost))\n\ncost_averaged = np.asarray(cost_all).mean(axis=0)\n\ny_estimate = NN.estimate()\nestimate = (y_estimate == NN.labels)[:, 0].astype(float)\naccuracy = estimate.sum()/estimate.__len__()\nprint(\"accuracy = %s\" % accuracy)\n\nplt.close()\nplt.plot(range(NN.instances), cost_averaged)\nplt.ylabel('Averaged Cost')\nplt.xlabel('Iterations')\nplt.title('Averaged Cost over 1000 experiments')\nplt.axis('tight')\nplt.show()\n","sub_path":"NN222.py","file_name":"NN222.py","file_ext":"py","file_size_in_byte":6274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"80812547","text":"from __future__ import print_function\nimport httplib2\nimport os\n\nimport GetMessage\nimport obtainallthreads\nfrom collections import Counter\nimport pandas\n\nfrom apiclient import discovery\nimport oauth2client\nfrom oauth2client import client\nfrom oauth2client import tools\n\n\n\ntry:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\nexcept ImportError:\n flags = None\n\n# If modifying these scopes, delete your previously saved credentials\n# at ~/.credentials/gmail-python-quickstart.json because if not the previous ones will keep being used instead of the new ones.\nSCOPES = 'https://www.googleapis.com/auth/gmail.readonly'\nCLIENT_SECRET_FILE = 'client_secrets.json'\nAPPLICATION_NAME = 'Gmail API Python Quickstart'\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\ndef main():\n \"\"\"Shows different stats obtained from the mails sent by Deliveroo for each drop.\n\n Creates a Gmail API service object and obtain all Threads of the user's mailbox matching the query. After that, scrap the useful data in the mails such as the value of the tip, the name of the restaurant, the delivery Time and then make some statistics with these data.\n\n \"\"\"\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n threads = obtainallthreads.ListThreadsMatchingQuery(service, 'me', query='from:(noreply@deliveroo.co.uk) subject:(Order #) after:2016/8/21 before:2016/9/4')\n\n drops = len(threads)\n\n tip = []\n restaurant = []\n deliveryTime = []\n i = 0\n for thread in threads:\n messageId = thread['id']\n message = GetMessage.GetMessageBody(service, 'me', str(messageId))\n tipIndex = message.index(\"Credit Card Tip:\")\n tipLine = message[tipIndex:]\n tipRaw = tipLine.split()\n tip.append(float(tipRaw[3]))\n\n restaurantAddressIndex = message.index(\"Restaurant Address:\")\n restaurantAdressLine = message[restaurantAddressIndex:]\n restaurantIndex = message.index(\"Restaurant:\")\n restaurantLine = message[restaurantIndex:restaurantAddressIndex]\n restaurantRaw = restaurantLine.split()\n restaurantRaw.pop(0)\n restaurantRaw = ' '.join(restaurantRaw)\n restaurant.append(restaurantRaw)\n\n deliveryTimeIndex = message.index(\"Delivery Time:\")\n deliveryTimeLine = message[deliveryTimeIndex:]\n deliveryTimeRaw = deliveryTimeLine.split()\n deliveryTime.append(deliveryTimeRaw[2] + ' ' + deliveryTimeRaw[3])\n\n i += 1\n\n\n tipsTotal = sum(tip)\n\n tableRestaurant = pandas.Series(Counter(restaurant))\n index = restaurant\n tableTipsRestaurant = pandas.Series(0., index=tableRestaurant.keys())\n for i in xrange(len(tip)):\n print(restaurant[i])\n print(tip[i])\n tableTipsRestaurant.ix[restaurant[i]] += tip[i]\n print(\"\\nTips per restaurant:\\n%s \" %tableTipsRestaurant.sort_values())\n\n #print(tableRestaurant.sort_values())\n #print(tableTipsRestaurant.values)\n #print(tableRestaurant.values)\n AvgTableTipsRestaurant = pandas.Series(tableTipsRestaurant.values/tableRestaurant.values, index=tableRestaurant.keys())\n print(\"\\nAverage Tips per restaurant:\\n%s \" % AvgTableTipsRestaurant.sort_values())\n\n print(\"\\ndrops: %s \" % drops)\n print(\"\\nOrder Comission: %s \" % (int(drops)*4.0))\n #print(tip)\n print(\"\\nTotal Tips: %s \" % tipsTotal)\n\nif __name__ == '__main__':\n main()\n","sub_path":"Deliveroo_quickstart.py","file_name":"Deliveroo_quickstart.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"401555234","text":"from sklearn import tree\nfrom random import randint\nimport numpy as np\n#from equal import splitteams\n\n\n#this is for 1 point player. Actually this is worst player class\nwin1 = []\ndraw1 = []\nlose1 = []\nage1 = []\ntotal1 = []\ncounter1 = 0\nwhile counter1 <= 50:\n created_win = randint(1, 100)\n created_draw = randint(1, 100)\n created_lose = randint(1, 100)\n created_age = randint(20, 50)\n if created_lose / created_win >= 1.5:\n win1.append(created_win)\n draw1.append(created_draw)\n lose1.append(created_lose)\n age1.append(created_age)\n counter1 += 1\n\nfor i in range(50):\n total1.append([win1[i], draw1[i], lose1[i], age1[i]])\n\n\n\n#this is for 2 point player.\nwin2 = []\ndraw2 = []\nlose2 = []\nage2 = []\ntotal2 = []\ncounter2 = 0\nwhile counter2 <= 50:\n created_win = randint(1, 100)\n created_draw = randint(1, 100)\n created_lose = randint(1, 100)\n created_age = randint(20, 50)\n\n if created_win / created_lose <= 1.25 and created_win / created_lose >= 1.50:\n win2.append(randint(1, 100))\n draw2.append(randint(1, 100))\n lose2.append(randint(1, 100))\n age2.append(created_age)\n\n counter2 += 1\n\nfor i in range(50):\n total2.append([win2[i], draw2[i], lose2[i], age2[i]])\n\nwin3 = []\ndraw3 = []\nlose3 = []\nage3 = []\ntotal3 = []\ncounter3 = 0\nwhile counter3 <= 50:\n created_win = randint(1, 100)\n created_draw = randint(1, 100)\n created_lose = randint(1, 100)\n created_age = randint(20, 50)\n\n if created_lose / created_win >= 0.90 and created_lose / created_win <= 1.10:\n win3.append(created_win)\n draw3.append(created_draw)\n lose3.append(created_lose)\n age3.append(created_age)\n\n counter3 += 1\n\nfor i in range(50):\n total3.append([win3[i], draw3[i], lose3[i], age3[i]])\n\nwin4 = []\ndraw4 = []\nlose4 = []\nage4 = []\ntotal4 = []\ncounter4 = 0\nwhile counter4 <= 50:\n created_win = randint(1, 100)\n created_draw = randint(1, 100)\n created_lose = randint(1, 100)\n created_age = randint(20, 50)\n\n if created_win / created_lose >= 1.25 and created_win / created_lose <= 1.50:\n win4.append(randint(1, 100))\n draw4.append(randint(1, 100))\n lose4.append(randint(1, 100))\n age4.append(created_age)\n\n counter4 += 1\n\nfor i in range(50):\n total4.append([win4[i], draw4[i], lose4[i], age4[i]])\n\nwin5 = []\ndraw5 = []\nlose5 = []\ntotal5 = []\ncounter5 = 0\nage5 = []\nwhile counter5 <= 50:\n created_win = randint(1, 100)\n created_draw = randint(1, 100)\n created_lose = randint(1, 100)\n created_age = randint(20, 50)\n\n if created_win / created_lose >= 1.5:\n win5.append(randint(1, 100))\n draw5.append(randint(1, 100))\n lose5.append(randint(1, 100))\n age5.append(created_age)\n\n counter5 += 1\n\nfor i in range(50):\n total5.append([win5[i], draw5[i], lose5[i], age5[i]])\n\nwin_draw_lose = total1 + total2 + total3 + total4 + total5\n\npoint_label = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,\n 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,\n 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,\n 5, 5, 5, 5, 5]\n\n\nwin = input('enter win: ')\ndraw = input('enter draw: ')\nlose = input('enter lose: ')\nage = input('enter age: ')\n\n\nmy_classifier = tree.DecisionTreeClassifier()\n\ntlf = my_classifier.fit(win_draw_lose, point_label)\nprint(tlf.predict([[win, draw, lose, age]]))\n\nplayers = []\n\nfor i in range(50):\n players.append([[total1[i]], point_label[0]])\n\nfor i in range(50):\n players.append([[total2[i]], point_label[51]])\n\nfor i in range(50):\n players.append([[total1[i]], point_label[101]])\n\nfor i in range(50):\n players.append([[total1[i]], point_label[151]])\n\nfor i in range(50):\n players.append([[total1[i]], point_label[201]])\n\n\nf = open(\"sonuc.txt\", \"a+\")\n\nfor item in players:\n f.write('%s\\n' % item)\n\nf.close()\n\n#equal dosyasındaki fonksiyonu çağırıyorum\n\n#deneme = [3, 5, 2, 4, 1, 8]\n#split_teams(deneme)","sub_path":"firstapp/tek.py","file_name":"tek.py","file_ext":"py","file_size_in_byte":4647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"99477650","text":"#test file to check set in python\nimport random\nfrom random import randint\nmap = set()\npt1 = (2,2)\npt2 = (2,3)\n\npt3 = (2,2)\nmap.add(pt1)\nmap.add(pt2)\n\nprint(pt3 in map)\nprint(map)\nprint(randint(0, 9))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"169834339","text":"import redis\nfrom log.logger import RecordLogging\n# 获取redis上的验证码\ndef getverifcode(key):\n try:\n r = redis.Redis(host=\"172.28.20.21\",\n port=6379,db=0,\n encoding='utf-8',\n decode_responses=True,\n socket_connect_timeout=3)\n\n result_code = r.get(key)[3:9] # code结果为J\u0006395132\f 字符串,可以直接切片获取\n if result_code!=None:\n return result_code\n else:\n print(\"获取验证码失败!\")\n except Exception as e:\n logs = RecordLogging()\n logs.logger.error(e)\n\n\n# print(getverifcode('AUTOREGISTER_WEB_REDIS_KEY_18392031414'))","sub_path":"api_requests/data/getVerifcation_code.py","file_name":"getVerifcation_code.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"378644088","text":"# title: maximum-average-pass-ratio\n# detail: https://leetcode.com/submissions/detail/467507689/\n# datetime: Sun Mar 14 12:09:13 2021\n# runtime: 2908 ms\n# memory: 64.1 MB\n\nclass Solution:\n def maxAverageRatio(self, classes: List[List[int]], extraStudents: int) -> float:\n s = 0\n maxd = 0\n q = []\n for p, t in classes:\n heapq.heappush(q, (-(t - p) / (t * (t + 1)), p, t))\n for i in range(extraStudents):\n d, p, t = heapq.heappop(q)\n p += 1\n t += 1\n heapq.heappush(q, (-(t - p) / (t * (t + 1)), p, t))\n while q:\n d, p, t = heapq.heappop(q)\n s += p / t\n return s / len(classes)","sub_path":"leetcode/maximum-average-pass-ratio/467507689.py","file_name":"467507689.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"572433257","text":"from tkinter import *\n \n \n \nwindow = Tk()\nwindow.title(\"Calculator\")\n \ndisplay = Entry(window, width=33, bg=\"yellow\")\ndisplay.grid(row=0, column=0, columnspan=5)\n \nbuttonText = [\n '7', '8', '9', '/' ,'C',\n '4', '5', '6', '*', ' ',\n '1', '2', '3', '-', ' ',\n '0', '.', '=', '+', '끝' ]\n \n \ndef click(key) : \n if key == '=' :\n result = eval(display.get())\n s = str(result)\n display.insert(END, '='+s)\n elif key == '끝' :\n window.destroy()\n else :\n display.insert(END, key)\n \nrowIndex = 1\ncolIndex = 0\n \nfor button_Text in buttonText :\n def process(t=button_Text) :\n click(t)\n Button(window, text=button_Text, width=5, command=process).grid(row=rowIndex, column=colIndex)\n colIndex += 1\n if colIndex>4 :\n rowIndex += 1\n colIndex = 0\n \n \n \nwindow.mainloop()\n","sub_path":"codingPractice/python/학교 시험 대비/tk_calcu.py","file_name":"tk_calcu.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"508231073","text":"#\n# License: BSD\n# https://raw.github.com/robotics-in-concert/rocon_tools/license/LICENSE\n#\n##############################################################################\n# Imports\n##############################################################################\n\nimport rospy\nimport threading\n\n# Local imports\nfrom .exceptions import ServicePairException\n\n##############################################################################\n# Server Class\n##############################################################################\n\n\nclass ServicePairServer(object):\n '''\n The server side of a pubsub service pair.\n '''\n __slots__ = [\n '_publisher',\n '_subscriber',\n '_callback',\n '_use_threads',\n #'_request_handlers', # initiate, track and execute requests with these { hex string ids : dic of RequestHandler objects (Blocking/NonBlocking) }\n 'ServicePairSpec',\n 'ServicePairRequest',\n 'ServicePairResponse',\n ]\n\n ##########################################################################\n # Initialisation\n ##########################################################################\n\n def __init__(self, name, callback, ServicePairSpec, use_threads=False):\n '''\n @param name : resource name of service pair (e.g. testies for pair topics testies/request, testies/response)\n @type str\n @param callback : function invoked when a request arrives\n @param ServicePairSpec : the pair type (e.g. rocon_service_pair_msgs.msg.TestiesPair)\n @type str\n '''\n self._callback = callback\n self._use_threads = use_threads\n try:\n p = ServicePairSpec()\n self.ServicePairSpec = ServicePairSpec\n self.ServicePairRequest = type(p.pair_request)\n self.ServicePairResponse = type(p.pair_response)\n except AttributeError:\n raise ServicePairException(\"Type is not an pair spec: %s\" % str(ServicePairSpec))\n self._subscriber = rospy.Subscriber(name + \"/request\", self.ServicePairRequest, self._internal_callback)\n self._publisher = rospy.Publisher(name + \"/response\", self.ServicePairResponse)\n\n ##########################################################################\n # Public Methods\n ##########################################################################\n\n def reply(self, request_id, msg):\n '''\n @param request_id : the request id to associate with this response.\n @type uuid_msgs.UniqueID\n\n @param msg : the response\n @type ServiceResponse\n '''\n pair_response = self.ServicePairResponse()\n pair_response.id = request_id\n pair_response.response = msg\n self._publisher.publish(pair_response)\n\n ##########################################################################\n # Callbacks\n ##########################################################################\n\n def _internal_callback(self, msg):\n '''\n @param msg : message returned from the server (with pair id etc)\n @type self.ServicePairRequest\n '''\n # Check if it is a blocking call that has requested it.\n if self._use_threads:\n thread = threading.Thread(target=self._callback, args=(msg.id, msg.request))\n thread.start()\n else:\n self._callback(msg.id, msg.request)\n","sub_path":"rocon_python_comms/src/rocon_python_comms/service_pair_server.py","file_name":"service_pair_server.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"332772723","text":"# %%\nimport import_path\nimport numpy as np\nimport pandas as pd\nfrom models.oracle import Oracle\nfrom models.surrogate_teacher import Surrogate\nfrom models.omniscient_teacher import Omniscient\nfrom models.random_teacher import Random\nfrom models.without_teacher import Without_teacher\nfrom utils import predict, predict_by_W, rmse_W, write_np2csv, rmse_w, make_random_mask, predict_wj\nfrom load_data import read_W, read_csv, split_data\nfrom tqdm import tqdm\nfrom sklearn.metrics import roc_auc_score\nimport logging\nimport datetime\n\n# %%\ndf = read_csv('output/weebil_vespula_pm1.csv', header=0)\ntrain_X, test_X, train_y, test_y = split_data(df, False)\neta, lambd, alpha = 1, 2, 0.01\ntraining_epochs, loops = 10, 10\nJ = 10\n# 提示する教材合計数\ntextbook = 500\n# 推定に使う教材数\ntest_textbook_list = [100]\n# 推定間に提示する教材数\nbetween_textbook_list = [1]\n# 組\nk = 1\n\nlambds = [1, 2, 3, 4, 5]\nfor lambd in lambds:\n oracle = Oracle(eta=eta, lambd=lambd)\n min_w = oracle.estimate_min_w(\n pd.concat([train_X, test_X]), pd.concat([train_y, test_y]))\n print('{}: {}'.format(min_w, predict(test_X, test_y, min_w)))\n W_init = oracle.make_W_init(J=J)\n W = W_init.copy()\n train_X_ = train_X.copy()\n train_y_ = train_y.copy()\n\n now = datetime.datetime.now()\n now_str = now.strftime('%Y%m%d%H%M%S')\n result_path = 'result/insect_{}_{}_{}'.format(now_str, k, lambd)\n logging.basicConfig(\n filename='./logs/log_insect_{0:%Y%m%d%H%M%S}_{1}_{2}.log'.format(now, k, lambd), level=logging.DEBUG\n )\n logging.debug(\n './logs/log_{0:%Y%m%d%H%M%S}_{1}_{2}.log'.format(now, k, lambd))\n logging.debug('min_w')\n logging.debug(min_w)\n logging.debug('eta,lambd')\n logging.debug([eta, lambd])\n # %%\n\n # %%\n # Omniscient\n logging.debug('Omniscient')\n train_X_ = train_X.copy()\n train_y_ = train_y.copy()\n W = W_init.copy()\n omt = Omniscient(min_w, W, N=train_X_.shape[0], alpha=alpha)\n\n a = np.zeros(1)\n b = np.zeros(J)\n for i in range(textbook):\n a = np.vstack((a, predict_by_W(test_X, test_y, omt.W)))\n b = np.vstack((b, predict_wj(test_X, test_y, omt.W)))\n print(predict_wj(test_X, test_y, omt.W))\n omt.show_textbook(X=train_X_, y=train_y_, N=1, option='min_w')\n logging.debug(predict_by_W(test_X, test_y, omt.W))\n a = a[1:]\n b = b[1:]\n write_np2csv(\n a, '{}_{}.csv'.format(result_path, 'omniscient'))\n write_np2csv(b, '{}_{}_wj.csv'.format(result_path, 'omniscient'))\n print('{}: finished.'.format(k))\n # %%\n # Random\n\n train_X_ = train_X.copy()\n train_y_ = train_y.copy()\n W = W_init.copy()\n\n rat = Random(min_w, W, N=train_X_.shape[0], alpha=alpha)\n logging.debug('Random')\n a = np.zeros(1)\n b = np.zeros(J)\n for i in range(textbook):\n a = np.vstack((a, predict_by_W(test_X, test_y, rat.W)))\n b = np.vstack((b, predict_wj(test_X, test_y, rat.W)))\n\n print(\"{}: {}\".format(i, predict_by_W(test_X, test_y, rat.W)))\n rat.show_textbook(train_X_, y=train_y_, N=1, option='min_w')\n logging.debug(predict_by_W(test_X, test_y, rat.W))\n a = a[1:]\n b = b[1:]\n write_np2csv(a, '{}_{}.csv'.format(result_path, 'random'))\n write_np2csv(b, '{}_{}_wj.csv'.format(result_path, 'random'))\n print('{}: finished.'.format(k))\n\n # %%\n for t_num in test_textbook_list:\n for b_num in between_textbook_list:\n # wot\n logging.debug('without teacher')\n w_init = np.random.normal(loc=0, scale=lambd, size=min_w.shape)\n W = W_init.copy()\n train_X_ = train_X.copy()\n train_y_ = train_y.copy()\n\n wot = Without_teacher(\n w_init, W, N=train_X_.shape[0], eta=eta, lambd=lambd, alpha=alpha)\n\n a = np.zeros(7)\n b = np.zeros(J)\n for i in range(textbook):\n tmp = np.append([], predict(test_X, test_y, min_w))\n tmp = np.append(tmp, predict(test_X, test_y, wot.w_star))\n tmp = np.append(tmp, predict_by_W(test_X, test_y, wot.W))\n tmp = np.append(tmp, predict_by_W(test_X, test_y, wot.W_star))\n tmp = np.append(tmp, rmse_w(wot.w_star, min_w))\n tmp = np.append(tmp, rmse_W(wot.W, wot.W_star))\n tmp = np.append(tmp, roc_auc_score(\n test_y, wot.predict_y(test_X, wot.w_star)))\n a = np.vstack((a, tmp))\n b = np.vstack((b, predict_wj(test_X, test_y, wot.W)))\n print()\n\n if i % b_num == 0:\n masked_textbook, _ = make_random_mask(train_X_, t_num)\n wot.learn(masked_textbook, 10, 10)\n print('{}: {}'.format(i, predict_by_W(test_X, test_y, wot.W)))\n wot.show_textbook(train_X_, y=None, N=1, option='w_star')\n print(predict(test_X, test_y, min_w), roc_auc_score(\n test_y, wot.predict_y(test_X, wot.w_star)))\n logging.debug([predict_by_W(test_X, test_y, wot.W),\n predict_by_W(test_X, test_y, wot.W_star)])\n\n a = a[1:]\n write_np2csv(\n a, '{}_{}_{}_{}.csv'.format(result_path, \"wot\", t_num, b_num))\n b = b[1:]\n write_np2csv(b, '{}_{}_{}_{}_wj.csv'.format(\n result_path, 'wot', t_num, b_num))\n print('t{}b{}: finished.'.format(t_num, b_num))\n\n # %%\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"579658020","text":"import config\nimport pipeline\nimport validation\n\nconfig.create_directory()\n\ndf = pipeline.data_download()\npipeline.variables(df)\nX_train, y_train = pipeline.data_split(df)\npipeline.model_train(X_train, y_train)\nvalidation_object = validation.Validation(model_dir = config.MODEL_DIR , dataset_dir = config.DATASET_DIR)\nvalidation_object.start_validation()\n","sub_path":"scripts/train_pipeline.py","file_name":"train_pipeline.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"642195462","text":"import sys\n\ndef g(s):\n while len(s) > 0 and s[-1] == '+':\n s = s[:-1]\n sign_change = 0\n last_sign = '+'\n for i in range(0, len(s)):\n if s[-(i+1)] != last_sign:\n sign_change += 1\n last_sign = s[-(i+1)]\n return sign_change\n\nl = sys.stdin.readline()\nm = int(l.strip())\n\nfor i in range(0, m):\n l = sys.stdin.readline()\n s = l.strip()\n print(\"Case #%d: %d\" % (i + 1, g(s)))\n\n","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_thueii_q2.py","file_name":"16_0_2_thueii_q2.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"76190000","text":"#!/usr/bin/python\nimport smbus\nimport numpy as np\n\n\np0 = np.array([[1886],[-3507],[6048.5],[1/3000],[1/3000],[1/3000],[0],[0],[0]]) # forme une matrice identité\nbus = smbus.SMBus(1) # 0 = /dev/i2c-0 (port I2C0), 1 = /dev/i2c-1 (port I2C1)\nDEVICE_ADDRESS = 0x1e #7 bit address (will be left shifted to add the read write bit)\nCTRL_REG1=0x20\nCTRL_REG2=0x21\nCTRL_REG3=0x22\nCTRL_REG4=0x23\nCTRL_REG5=0x24\nOUT_X_L = 0b00101000\nOUT_Y_L = 0b00101010\nOUT_Z_L = 0b00101100\n\ndef fp(x,p0):\n I1 = np.array([[p0[3,0],p0[6,0],p0[8,0]],\n [p0[6,0],p0[4,0],p0[7,0]],\n [p0[8,0],p0[7,0],p0[5,0]]])\n I2 = np.array([[p0[0,0]],[p0[1,0]],[p0[2,0]]])\n return I1 @ (x-I2)\n\ndef get_cap():\n bus.write_i2c_block_data(DEVICE_ADDRESS, CTRL_REG4, [0b00000000])\n bus.write_i2c_block_data(DEVICE_ADDRESS, CTRL_REG3, [0b00000000])\n ledout_values_x = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff]\n ledout_values_y = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff]\n ledout_values_z = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff]\n ledout_values_x = bus.read_i2c_block_data(DEVICE_ADDRESS, OUT_X_L,2)\n x = ledout_values_x[0] + ledout_values_x[1]*256\n if x > 32767 : # 2p15 -1\n x = x - 65536 # 2p16\n ledout_values_y = bus.read_i2c_block_data(DEVICE_ADDRESS, OUT_Y_L,2)\n y = ledout_values_y[0] + ledout_values_y[1] * 256\n if y > 32767:\n y = y - 65536\n ledout_values_z = bus.read_i2c_block_data(DEVICE_ADDRESS, OUT_Z_L,2)\n z = ledout_values_z[0] + ledout_values_z[1] * 256\n if z > 32767:\n z = z - 65536\n N = np.array([[x], [y], [z]])\n N2 = fp(N,p0)\n x2 = N2[0,0]\n y2 = N2[1, 0]\n z2 = N2[2, 0]\n cap_m2 = np.arctan2(y2, x2)\n cap_m2 = cap_m2 * (180 / np.pi)\n if cap_m2<0 :\n cap_m2+=360\n\n\n return cap_m2\n\n\n# print(get_cap())","sub_path":"Récup/S3_JV/fonction_regul_cap.py","file_name":"fonction_regul_cap.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"356598104","text":"# archive: exercise.py\n# author: riizdo\n# date: 16-02-2021\n# description: exercises file \n\nimport pygame\nfrom pygame.locals import *\nimport random\n\n\nclass Exercise():\n def __init__(self, level, id):\n self.level = level\n self.elements = {}\n self.elements['id'] = id\n self.elements['element'] = []\n self.elements['operation'] = []\n\n\n def getExercise(self):\n return self.elements\n\n\n def getElements(self):\n elements = []\n for element in self.elements:\n if element == 'element':\n elements.append(self.elements[element])\n return elements\n\n\n def getOperation(self):\n operations = []\n for element in self.elements:\n if element == 'operation':\n operations.append(self.elements[element])\n return operations\n\n\n def getResult(self):\n for element in self.elements:\n if element == 'result':\n return self.elements[element]\n \n \n\n\n\nclass Adds(Exercise):\n def __init__(self, level, id):\n Exercise.__init__(self, level, id)\n self.elements['operation'] = '+'\n\n nElements = self.getNElements()\n\n self.defineElements(nElements)\n self.elements['result'] = self.defineResult()\n\n\n def getNElements(self):\n if self.level < 75:\n return 2\n elif self.level < 90:\n return 3\n else:\n return 4\n\n\n def defineElements(self, nElements):\n max = 0\n\n if self.level < 10:\n max = 10\n elif self.level < 20:\n max = 50\n elif self.level < 30:\n max = 100\n else:\n max = 1000\n\n for i in range(0, nElements):\n n = random.randrange(0, max)\n self.elements['element'].append(n)\n\n\n def defineResult(self):\n result = 0\n for element in self.elements:\n if element == 'element':\n for num in self.elements[element]:\n result += num\n return result\n\n","sub_path":"exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"501893305","text":"import pytest\nimport requests\n\nfrom tests.integration import asserts\n\n\ndef test_should_list_mapping_rules(proxy, mapping_rule):\n resource = proxy.mapping_rules.list()\n assert len(resource) > 1\n\n\ndef test_should_create_mapping_rule(mapping_rule, mapping_rule_params):\n asserts.assert_resource(mapping_rule)\n asserts.assert_resource_params(mapping_rule, mapping_rule_params)\n\n\ndef test_should_mapping_rule_endpoint_return_ok(mapping_rule, apicast_http_client):\n response = apicast_http_client.get(path=mapping_rule['pattern'])\n asserts.assert_http_ok(response)\n\n\ndef test_should_fields_be_required(proxy, updated_mapping_rules_params):\n del updated_mapping_rules_params['delta']\n del updated_mapping_rules_params['http_method']\n del updated_mapping_rules_params['metric_id']\n resource = proxy.mapping_rules.create(params=updated_mapping_rules_params, throws=False)\n asserts.assert_errors_contains(resource, ['delta', 'http_method', 'metric_id'])\n\n\ndef test_should_read_mapping_rule(mapping_rule, mapping_rule_params):\n resource = mapping_rule.read()\n asserts.assert_resource(resource)\n asserts.assert_resource_params(resource, mapping_rule_params)\n\n\ndef test_should_update_mapping_rule(proxy, updated_mapping_rules_params, apicast_http_client):\n resource = proxy.mapping_rules.create(params=updated_mapping_rules_params)\n pattern = '/anything/test-foo'\n resource['pattern'] = pattern\n resource.update()\n updated_resource = resource.read()\n assert updated_resource['pattern'] == pattern\n response = apicast_http_client.get(path=pattern)\n asserts.assert_http_ok(response)\n\n\ndef test_should_delete_mapping_rule(proxy, updated_mapping_rules_params):\n resource = proxy.mapping_rules.create(params=updated_mapping_rules_params)\n assert resource.exists()\n resource.delete()\n assert not resource.exists()\n\n\ndef test_stop_processing_mapping_rules_once_first_one_is_met(proxy, updated_mapping_rules_params,\n apicast_http_client):\n params_first = updated_mapping_rules_params.copy()\n params_first['pattern'] = '/anything/search'\n resource_first = proxy.mapping_rules.create(params=params_first)\n assert resource_first.exists()\n\n params_second = updated_mapping_rules_params.copy()\n params_second['pattern'] = '/anything/{id}'\n resource_second =proxy.mapping_rules.create(params=params_second)\n assert resource_second.exists()\n\n response = apicast_http_client.get(path=params_first['pattern'])\n asserts.assert_http_ok(response)\n\n assert params_first['pattern'] in response.url\n","sub_path":"tests/integration/test_integration_mapping_rules.py","file_name":"test_integration_mapping_rules.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"339149620","text":"import multiprocessing as mpi\nimport yaml\nwith open(\"config.yml\",\"r\") as f:\n config = yaml.safe_load(f)\nimport os,sys\nsys.path.append(f\"{config['machine']['codepath']}\")\n\nimport lib\n\n\ndef main():\n comp = sys.argv[1]\n comp = comp.split(\",\")\n times = sys.argv[2]\n times = times.split(\",\")\n\n fdir = []\n for c in comp:\n for seas in times:\n fdir.append([f\"{config['run']['folder']}/{config['run']['name']}/{c}/hist/{seas}\",seas])\n\n regions = config[\"timeseries\"][\"regions\"]\n varlist = lib.mpimods.make_varlist3(fdir,regions)\n\n jobs = []\n for i in range(len(varlist)):\n arg = varlist[i]\n print(arg)\n p = mpi.Process(target=lib.proc.ts, args=(arg[0],arg[2],arg[1],arg[3],))\n jobs.append(p)\n p.start()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"process/all/timeseries.py","file_name":"timeseries.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"336541518","text":"#! /usr/bin/env python2\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport sys\nfrom distutils.dir_util import copy_tree\n\n\nclass EmPack:\n def __init__(self, system, extensions, emcoredef, fullname=None, platform=None, theme=None, force=False, port=False, readonly=False, rompath=None):\n # Set member variable\n \"\"\"\n\n :type emcoredef: list\n \"\"\"\n self._System = system.lower()\n self._Extensions = extensions\n self._EmulatoreCoreDefinitions = emcoredef\n self._FullName = fullname if fullname is not None else self._System.title()\n self._Platform = platform if platform is not None else self._System\n self._Theme = theme if theme is not None else self._System\n self._Force = force\n self._Port = port\n self._ReadOnly = readonly\n self._RomPath = rompath if rompath is not None else \"\"\n\n self._SingleMode = False\n self._SingleEmulatorBRPackage = \"\"\n self._MultiEmulatorBRPackage = dict()\n self._BRVarsList = list()\n\n if not isinstance(emcoredef, list):\n raise\n\n # Don't use single emulator!\n if len(emcoredef) == 1 and emcoredef[0].count(':') == 0:\n self._SingleMode = True\n self._SingleEmulatorBRPackage = emcoredef[0]\n self._BRVarsList = [emcoredef[0]]\n else:\n self._SingleMode = False\n self._BRVarsList = self.initmultiem(emcoredef)\n\n self._CommandLine = self.setcmdline(system, extensions, emcoredef, fullname, platform, theme, rompath)\n self._SystemUpper = self._System.upper()\n self._PackageName = \"recalbox-romfs-{}\".format(self._System)\n self._PackageDir = \"package/recalbox-romfs/{}\".format(self._PackageName)\n if self._Port:\n self._RomsDir = \"{}/{}/ports/{}\".format(self._PackageDir, 'roms', self._FullName)\n else:\n self._RomsDir = \"{}/{}/{}\".format(self._PackageDir, 'roms', self._System)\n self._Makefile = '{}/{}.mk'.format(self._PackageDir, self._PackageName)\n\n def __str__(self):\n return \"\"\"\n system: {}\n extensions: {}\n fullname: {}\n platform: {}\n theme: {}\n rompath: {}\n singleMode: {}\n single emulator package: {}\n multi emulator packages: {}\n ------\n list of BR package variables: {}\n \"\"\".format(self._System, self._Extensions, self._FullName, self._Platform, self._Theme, self._RomPath, self._SingleMode, self._SingleEmulatorBRPackage, self._MultiEmulatorBRPackage, self._BRVarsList)\n\n def setcmdline(self, system, extensions, emcoredef, fullname=None, platform=None, theme=None, rompath=None):\n cmdline = \"{} \".format(sys.argv[0])\n if self._Force: cmdline += \"--force \"\n if self._Port: cmdline += \"--port \"\n if self._ReadOnly: cmdline += \"--readonly \"\n cmdline += \"--system {} \".format(system)\n cmdline += \"--extension '{}' \".format(extensions)\n cmdline += \"--fullname '{}' \".format(fullname) if fullname is not None else \"\"\n cmdline += \"--platform {} \".format(platform) if platform is not None else \"\"\n cmdline += \"--theme {} \".format(theme) if theme is not None else \"\"\n cmdline += \"--rompath {} \".format(rompath) if rompath is not None else \"\"\n cmdline += \" \".join(emcoredef)\n return cmdline\n\n def initmultiem(self, emcorelist):\n \"\"\"\n Returns the list of BR2_PACKAGE from the command line into a single list\n :type emcorelist: list\n :rtype: list\n \"\"\"\n superpackage = list()\n for item in emcorelist:\n if item.count(':') != 3:\n print(sys.stderr, \"{} must be follow the pattern priority:emulator:core:BR2_PACKAGE_NAME\".format(item))\n exit(1)\n try:\n # split priority:emulator:core:BR_VAR into dicts\n priority, emulator, core, brvar = item.split(':')\n # print \"{} splitted to {} {} {} {}\".format(arg, priority, emulator, core, brVar)\n if emulator not in self._MultiEmulatorBRPackage:\n self._MultiEmulatorBRPackage[emulator] = dict()\n\n self._MultiEmulatorBRPackage[emulator][core] = (brvar, priority)\n if brvar not in superpackage:\n superpackage.append(brvar)\n except:\n raise\n return superpackage\n\n def listpackages(self):\n \"\"\" Appends buildroot package variables to be processed by a ifeq/ifneq command\n :returns: a concatenation of $(VAR1)$(VAR2) etc ... ex: '$(BR2_PACKAGE_LIBRETRO_MAME2003)$(BR2_PACKAGE_LIBRETRO_MAME2000)'\n :rtype: string\n \"\"\"\n cond = '$('\n cond += ')$('.join(self._BRVarsList)\n cond += ')'\n return cond\n\n def generatedefinename(self, typeCmd, emulator='', core=''):\n \"\"\" Generate a define name for .mk files\n The return string looks like CONFIGURE_system_emulator_core_typeCmd , depending on provided arguments\n :param typeCmd: they type of command\n :type typeCmd: string must be START, END or DEF\n :param emulator: optionnal, the emulator name (ex: libretro)\n :type emulator: string\n :param core: optional, the core name (ex: mame2003)\n :type core: string\n :returns: the define name\n :rtype: string\n \"\"\"\n if typeCmd not in ('START', 'END', 'DEF'):\n sys.stderr.write(\"generateDefineName : typeCmd must be START END or DEF\\n\")\n sys.exit(1)\n\n if core and not emulator:\n sys.stderr.write(\"generateDefineName : you must define emulator if you set core\\n\")\n sys.exit(1)\n\n defStr = \"CONFIGURE_{}\".format(self._SystemUpper)\n\n if emulator:\n emulatorUp = emulator.upper()\n defStr += \"_{}\".format(emulatorUp)\n if core:\n coreUp = core.upper()\n defStr += \"_{}\".format(coreUp)\n\n defStr += \"_{}\".format(typeCmd)\n return defStr\n\n def addemulator(self, emulator):\n \"\"\" Returns the starting makefile block of a new emulator section in the es_settings.cfg\n the returned string looks like :\n \n ifneq(brVars,)\n define YYY\n echo -e '\\t\\t\\n' > $(SYSTEM_XML_{})\n \n endef\n\n :returns: a multiline and indented string\n :rtype: string\n \"\"\"\n #emulatorUp = emulator.upper()\n emulatorLo = emulator.lower()\n defineName = self.generatedefinename('START', emulator)\n #returnStr = \"define {}\\n\".format(defineName)\n cond = self.listpackages()\n \n returnStr = \"ifneq ({},)\\n\".format(cond)\n returnStr += \"define {}\\n\".format(defineName)\n returnStr += \"\\t$(call RECALBOX_ROMFS_CALL_START_EMULATOR,$(SYSTEM_XML_{}),{})\\n\".format(self._SystemUpper, emulatorLo)\n returnStr += \"endef\\n\"\n \n return returnStr, defineName\n\n def addcore(self, emulator, core, brVar, priority):\n \"\"\" Returns the starting makefile block of a new emulator section in the es_settings.cfg\n The returned string looks like:\n\n ifeq(brVar,y)\n define XXX\n echo -e '\\t\\t\\tcore' >> $(SYSTEM_XML_system)\n\n endef\n RECALBOX_ROMFS_system_CONFIGURE_CMDS += $(XXX)\n\n :param emulator: optionnal, the emulator name (ex: libretro)\n :type emulator: string\n :param core: the core name\n :type core: string\n :param brVar: a BR2_PACKAGE_ like name\n :type brVar: string\n :param priority: core priority (lower priority choosen first as default)\n :type priority: int\n :returns: a multiline and indented string\n :rtype: string\n \"\"\"\n #emulatorUp = emulator.upper()\n #emulatorLo = emulator.lower()\n #coreUp = core.upper()\n coreLo = core.lower()\n defineName = self.generatedefinename('DEF', emulator, core)\n\n returnStr = \"ifeq ($({}),y)\\n\".format(brVar)\n returnStr += \"define {}\\n\".format(defineName)\n returnStr += \"\\t$(call RECALBOX_ROMFS_CALL_ADD_CORE,$(SYSTEM_XML_{}),{},{})\\n\".format( self._SystemUpper,coreLo,priority)\n returnStr += \"endef\\n\"\n returnStr += \"endif\\n\\n\"\n\n return returnStr, defineName\n\n def endemulator(self, emulator):\n \"\"\" Returns the ending makefile block of a new emulator section in the es_settings.cfg\n the returned string looks like :\n\n define XXX\n echo -e '\\t\\t\\n' >> $(SYSTEM_XML_MAME)\n\n endef\n RECALBOX_ROMFS_system_CONFIGURE_CMDS += $(XXX)\n\n :param emulator: optionnal, the emulator name (ex: libretro)\n :type emulator: string\n :returns: a multiline and indented string\n :rtype: string\n \"\"\"\n #emulatorUp = emulator.upper()\n defineName = self.generatedefinename('END', emulator)\n\n returnStr = \"define {}\\n\".format(defineName)\n returnStr += \"\\t$(call RECALBOX_ROMFS_CALL_END_EMULATOR,$(SYSTEM_XML_{}))\\n\".format(self._SystemUpper)\n returnStr += \"endef\\n\"\n returnStr += \"endif\\n\\n\"\n\n return returnStr, defineName\n\n def writemakefile(self):\n print(\"== Creating new package dir structure: {}\".format(self._RomsDir))\n if not os.path.exists(self._RomsDir):\n try:\n os.makedirs(self._RomsDir)\n print(\"{} created !\".format(self._RomsDir))\n except:\n print(\"Failed ... Could not make dir {}\".format(self._RomsDir))\n raise\n else:\n if not self._Force:\n print(\"{} already exists ... Are you sure of what you're doing ? Exiting ...\".format(self._RomsDir))\n sys.exit(1)\n\n if self._SingleMode:\n skeletonFile = 'package/recalbox-romfs/recalbox-romfs_single_emulator.skeleton'\n else:\n if len(self._RomPath) > 0:\n skeletonFile = 'package/recalbox-romfs/recalbox-romfs_multicores.rompath.skeleton'\n else:\n if self._Port:\n skeletonFile = 'package/recalbox-romfs/recalbox-romfs_multicores.ports.skeleton'\n else:\n skeletonFile = 'package/recalbox-romfs/recalbox-romfs_multicores.skeleton'\n\n #prtCmdLine = \" \".join(sys.argv[:]) # Ugly, sadly ... Should be improved to reflect quotes\n\n #\n # Replace known patterns\n #\n mkFile = open(skeletonFile, 'r').read()\n #mkFile = mkFile.replace('%COMMAND_LINE%', prtCmdLine)\n mkFile = mkFile.replace('%COMMAND_LINE%', self._CommandLine)\n mkFile = mkFile.replace('%SYSTEM_EXTENSIONS%', self._Extensions)\n mkFile = mkFile.replace('%SYSTEM_UPPER%', self._SystemUpper)\n mkFile = mkFile.replace('%SYSTEM_LOWER%', self._System)\n mkFile = mkFile.replace('%FULLNAME%', self._FullName)\n mkFile = mkFile.replace('%PLATFORM%', self._Platform)\n mkFile = mkFile.replace('%THEME%', self._Theme)\n mkFile = mkFile.replace('%RO%', '1' if self._ReadOnly else '0')\n mkFile = mkFile.replace('%ROMPATH%', self._RomPath)\n\n #\n # patterns : additionnal actions for multicore systems\n #\n listDefines = list()\n if not self._SingleMode:\n superString = \"\"\n for emulator, cores in self._MultiEmulatorBRPackage.iteritems():\n print(\"Adding emulator {} {}\".format(self._System, emulator))\n definePart, defineName = self.addemulator(emulator)\n superString += definePart\n listDefines.append(defineName)\n for core, (brVar, priority) in cores.iteritems():\n definePart, defineName = self.addcore(emulator, core, brVar, priority)\n superString += definePart\n listDefines.append(defineName)\n\n definePart, defineName = self.endemulator(emulator)\n superString += definePart\n listDefines.append(defineName)\n finalDefines = \"$(\" + \")\\n\\t$(\".join(listDefines) + \")\"\n print(listDefines)\n print(finalDefines)\n mkFile = mkFile.replace('%SUPER_PACKAGE%', self.listpackages())\n mkFile = mkFile.replace('%EMULATORS_AND_CORES%', superString)\n mkFile = mkFile.replace('%EMULATORS_DEFINES%', finalDefines)\n else:\n mkFile = mkFile.replace('%BR2_PACKAGE_NAME%', self._SingleEmulatorBRPackage)\n\n print(\"== Writing {} :\".format(self._Makefile), end=' ')\n with open(self._Makefile, \"w\") as f:\n try:\n f.write(mkFile)\n print(\"OK !\")\n except:\n print(\"Failed ... Couldn't write to {}. Aborting ...\".format(mkFile))\n raise\n\n def writeconfigin(self):\n # Time to write the Config.in\n dependsOn = \" \\\\\\n\\t || \".join(self._BRVarsList)\n print(\"== Writing the Config.in: \", end=' ')\n configIn = \"config BR2_PACKAGE_RECALBOX_ROMFS_{}\\n\".format(self._SystemUpper)\n configIn += \"\\tbool \\\"recalbox-romfs-{}\\\"\\n\".format(self._System)\n #~ configIn += \"\\tdefault y\\n\"\n configIn += \"\\tselect BR2_PACKAGE_RECALBOX_ROMS\\n\"\n configIn += \"\\tdepends on {}\\n\".format(dependsOn)\n configIn += \"\\thelp\\n\"\n configIn += \"\\t share_init/roms and xml for {}\\n\".format(self._System)\n\n fileConfigIn = \"{}/Config.in\".format(self._PackageDir)\n try:\n with open(fileConfigIn, \"w\") as f:\n f.write(configIn)\n print(\"OK!\")\n except:\n print(\"KO!\")\n print(\"Couldn't write the {}\".format(fileConfigIn))\n raise\n\n def copyoverlaydir(self):\n # Copy the previous fsoverlay if it exists\n overlaydir = \"board/recalbox/fsoverlay/recalbox/share_init/roms/{}\".format(self._System)\n print(\"== Copy the previous fsoverlay of this system if it exists:\", end=' ')\n if os.path.exists(overlaydir):\n try:\n copy_tree(overlaydir, self._RomsDir)\n # Need to remove the .keep\n print(\"OK !\")\n except:\n print(\"Failed ... Couldn't copy {} to {}. Aborting ...\".format(overlaydir, self._PackageDir))\n raise\n else:\n print(\"No overlay, creating default files instead:\", end=' ')\n LISEZ_MOI = \"{}/_lisezmoi.txt\".format(self._RomsDir)\n READ_ME = \"{}/_readme.txt\".format(self._RomsDir)\n try:\n if not os.path.exists(LISEZ_MOI):\n with open(LISEZ_MOI, \"w\") as f:\n f.write(\"Remplir ce fichier\")\n if not os.path.exists(READ_ME):\n with open(READ_ME, \"w\") as f:\n f.write(\"Please fill the file\")\n print(\"OK !\")\n except:\n print(\"Failed ... couldn't create {} or {}\".format(LISEZ_MOI, READ_ME))\n raise\n\n def finalword(self):\n # Ask the user to add himself to recalbox-rom.mk the following lines:\n print(\"\\nNow you will have to edit :\\n\")\n # print \" * package/recalbox-romfs/recalbox-romfs/Config.in and add :\"\n # print \"\\tdepends on BR2_PACKAGE_RECALBOX_ROMFS_{}\\n\\n\".format(SYSTEM_UPPER)\n print(\" * package/recalbox-romfs/recalbox-romfs/recalbox-romfs.mk :\")\n print(\" Mind the tabulation. The shell may have added spaces instead\\n\")\n defLine = \"# System: {}\\n\".format(self._System)\n if not self._SingleMode:\n defLine += \"ifneq ({},)\\n\".format(self.listpackages())\n else:\n defLine += \"ifeq ($({}),y)\\n\".format(self._SingleEmulatorBRPackage)\n defLine += \"\\tRECALBOX_ROMFS_DEPENDENCIES += {}\\n\".format(self._PackageName)\n defLine += \"endif\"\n print(defLine)\n print(\"\\n * Add a source to package/recalbox-romfs/Config.in :\")\n print(\" source {}/Config.in\".format(self._PackageDir))\n print(\"\\n * Add dependencies to package/recalbox-romfs/recalbox-romfs/Config.in :\")\n print(\"\\n\\t || BR2_PACKAGE_RECALBOX_ROMFS_{}\".format(self._SystemUpper))\n print(\"\\n * Add to the emulators/cores Config.in:\\n\")\n print(\"\\tselect BR2_PACKAGE_RECALBOX_ROMFS_{}\".format(self._SystemUpper))\n\n\nif __name__ == '__main__':\n # Parse command line\n parser = argparse.ArgumentParser(description='Emulator Packager helper')\n parser.add_argument(\"-s\", \"--system\", help=\"Sets the system name ex: snes\", type=str, required=True)\n parser.add_argument(\"-e\", \"--extensions\", help=\"File extensions ES should display. Must be a single string between (double) quotes ex: '.bin .BIN .zip.ZIP'\", type=str, required=True)\n parser.add_argument(\"-f\", \"--fullname\", help=\"Sets the nice full name of the system. Defaults to the system name with a first upper case. ex: 'SEGA Master System'\", type=str, required=False)\n parser.add_argument(\"-p\", \"--platform\", help=\"Sets the system platform. Defaults to the system name. ex: pc\", type=str, required=False)\n parser.add_argument(\"-t\", \"--theme\", help=\"Sets the theme name. Defaults to the system name. ex: nes\", type=str, required=False)\n parser.add_argument(\"-r\", \"--rompath\", help=\"Sets the full rompath instead of /recalbox/share/roms/. ex: /recalbox/share/screenshots\", type=str, required=False)\n parser.add_argument(\"--force\", help=\"force overwriting any existing files\", action=\"store_true\", required=False)\n parser.add_argument(\"--port\", help=\"This system is a port, not a regular system\", action=\"store_true\", required=False)\n parser.add_argument(\"--readonly\", help=\"This system is a port, and this port is ready-only (can stay in share_init)\", action=\"store_true\", required=False)\n parser.add_argument(\"packageDetails\", nargs='+', help=\"Either specify a BR2_PACKAGE_XXXXX for a standalone emulator (like reicast, ppsspp etc ...)\\nOr write it like libretro:mame2003:BR2_PACKAGE_LIBRETRO_MAME2003 libretro:mame2000:BR2_PACKAGE_LIBRETRO_MAME2000 advancemame:advancemame:BR2_PACKAGE_ADVANCEMAME for a multiple emulators/cores system. The syntax in that case is emulator:core:BUILDROOT_CORE_PACKAGE\", type=str)\n\n args = parser.parse_args()\n\n ConfigEm = EmPack(args.system, args.extensions, args.packageDetails, fullname = args.fullname, platform = args.platform, theme = args.theme, force = args.force, port = args.port, readonly = args.readonly, rompath = args.rompath)\n print(ConfigEm)\n ConfigEm.writemakefile()\n ConfigEm.writeconfigin()\n ConfigEm.copyoverlaydir()\n ConfigEm.finalword()\n","sub_path":"scripts/linux/empack.py","file_name":"empack.py","file_ext":"py","file_size_in_byte":18889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"281694364","text":"\"\"\"\nThis problem was recently asked by Google.\n\nGiven a list of numbers and a number k, return whether any two numbers from the list add up to k.\n\nFor example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.\n\nBonus: Can you do this in one pass?\n\"\"\"\n\ndef resolve(alist, k):\n for i in range(0, len(alist)):\n for j in range(0, len(alist)):\n if alist[i] + alist[j] == k:\n \n return True\n\n return False\n\nalist = [10, 15, 3, 7]\nk = 25\n\nprint(resolve(alist, k))\n","sub_path":"dcp1.py","file_name":"dcp1.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"550268476","text":"from oauth2client.client import AccessTokenRefreshError\n\nfrom ..utils import google_api\nfrom .calendar_parser import parse_event_data\n\n\nclass CalendarViewModel():\n\n __calendars = google_api.get_calendar_urls()\n __calendar_service = google_api.get_calendar_service()\n __event_data = None\n\n def __init__(self):\n self.__event_data = []\n\n @property\n def event_data(self):\n return CalendarViewModel.__event_data\n\n @property\n def calendar_names(self):\n for name in google_api.get_calendar_names():\n yield name\n\n @classmethod\n def refresh_calendars(cls):\n new_event_data = []\n\n for calendar_url in CalendarViewModel.__calendars:\n\n if calendar_url is None:\n continue\n\n calendar_info = google_api.get_calendar_info(calendar_url)\n\n try:\n request = CalendarViewModel.__calendar_service.events() \\\n .list(calendarId=calendar_url)\n\n while request != None:\n response = request.execute()\n parsed_data = parse_event_data(response)\n\n CalendarViewModel._add_calendar_info(\n parsed_data,\n calendar_info['name'],\n calendar_info['title'])\n\n new_event_data.extend(parsed_data)\n request = CalendarViewModel.__calendar_service.events()\\\n .list_next(request, response)\n\n except AccessTokenRefreshError:\n print('Found AccessTokenRefreshError')\n\n CalendarViewModel.__event_data = new_event_data\n\n\n @classmethod\n def _add_calendar_info(cls, events, calendar_name, calendar_title):\n for event in events:\n event['track_name'] = calendar_name\n event['track_display_name'] = calendar_title","sub_path":"phreaknic/calendar/viewmodel.py","file_name":"viewmodel.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"575388612","text":"import files_const as pth\n\n\ntry:\n with open(pth.ini_file, 'r', encoding='UTF-8') as settings:\n data = settings.readlines()\n lang = data[1][5:]\nexcept FileNotFoundError:\n lang = 'eng'\nif lang == 'rus':\n import lang.rus as r\nelse:\n import lang.eng as r\n","sub_path":"langSelector.py","file_name":"langSelector.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"371363462","text":"# Mimics the MAS server side script, run with system python\n\nimport socket\nimport os, sys\nimport time\nimport errno\nimport threading\n\ndata = []\nreceiveData = True\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nserver.settimeout(0.01)\n\nhost = \"127.0.0.1\"\nport = 23456\n\nserver.bind((host, port))\n#server.listen()\n'''\nwhile True:\n\ttry:\n\t\tclient, addr = server.accept()\n\texcept socket.error: # Wait for connection\n\t\tcontinue\n\tbreak\n'''\nprint(\"Connected client\")\n#toSend = ('{{' + 'Awoo' + ':' + False + '}}')\nserver.sendto(\"recognizeFace\".encode('utf-8'), (\"127.0.0.1\", 34567))\n\ndef comm():\n\twhile receiveData:\n\t\treceived = None\n\t\ttry:\n\t\t\treceived, addr = server.recvfrom(64)\n\t\t\treceived = received.decode('utf-8')\n\t\t\tprint(\"Received: {}\\tDataSize: {}\".format(received, len(data)))\n\t\texcept socket.error: # No data received\n\t\t\tpass\n\n\t\tif received != None:\n\t\t\tdata.append(received)\n\n\tserver.close()\n\t\n#thr = threading.Thread(target = comm)\n#thr.setDaemon(True)\n#thr.start()\n\ncomm()","sub_path":"Additions_MASM/game/Additions/MASM/serverSim.py","file_name":"serverSim.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"262439554","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nimport smtplib\n\n\ndif = int(input('tracked difference course:'))\n# сюда вписывается отклонение курса, которое будет для критичным\ntime_out = int(input('tracking interval: '))\n# задается время для проверки курса, в секундах\n\n\nclass Currency:\n\n BTC_USD = 'https://ru.investing.com/crypto/bitcoin/btc-usd'\n # адрес сайта, который будет парсится на предмет курса битка\n headers = {'User-Agent': 'your_user_agent'}\n # юзер-агент устройства, с которого запускается программа\n current_converted_price = 0\n difference = dif\n\n def __init__(self):\n self.current_converted_price = float(self.\n get_currency_price().\n replace(',', '.'))\n\n # функция для получения текущего курса биткоина в виде строки\n def get_currency_price(self):\n full_page = requests.get(self.BTC_USD, headers=self.headers)\n soup = BeautifulSoup(full_page.content, 'html.parser')\n convert = soup.find_all('span', {'id': 'last_last'})\n res = convert[0].text.replace('.', '')\n return res\n\n # функция для проверки отклонения курса\n def check_currency(self):\n currency = float(self.get_currency_price().replace(',', '.'))\n if currency >= self.current_converted_price + self.difference:\n print('The rate has drown a lot')\n self.send_mail()\n elif currency <= self.current_converted_price - self.difference:\n print('the rate has fallen sharply')\n self.send_mail()\n print(f'1 bitcoin = {str(currency)} USD now')\n time.sleep(time_out)\n self.check_currency()\n\n # функция отправки сообщения на почту о критическом изменении курса\n @staticmethod\n def send_mail():\n server = smtplib.SMTP('smtp.gmail.com', 587)\n server.ehlo()\n server.starttls()\n server.ehlo()\n\n # адрес и пароль почты отправителя\n server.login('mail_address_from', 'password')\n\n # данные для письма\n subject = 'Rate btc'\n body = 'Rate btc was changing'\n message = f'Subject: {subject}\\n\\n{body}'\n\n server.sendmail(\n 'mail_address_from', # почта отправления\n 'maik_address_to', # почта получения\n message\n )\n server.quit()\n\n\ncur = Currency()\ncur.check_currency()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"499664240","text":"# Copyright 2018 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for the classroom page.\"\"\"\nfrom __future__ import absolute_import # pylint: disable=import-only-modules\nfrom __future__ import unicode_literals # pylint: disable=import-only-modules\n\nfrom constants import constants\nfrom core.controllers import acl_decorators\nfrom core.controllers import base\nfrom core.domain import config_domain\nfrom core.domain import topic_services\nimport feconf\n\n\nclass ClassroomPage(base.BaseHandler):\n \"\"\"Renders the classroom page.\"\"\"\n\n @acl_decorators.open_access\n def get(self, classroom_name):\n \"\"\"Handles GET requests.\"\"\"\n\n if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:\n raise self.PageNotFoundException\n\n classroom_name_is_valid = False\n for classroom_dict in config_domain.TOPIC_IDS_FOR_CLASSROOM_PAGES.value:\n if classroom_dict['name'] == classroom_name:\n classroom_name_is_valid = True\n break\n\n if not classroom_name_is_valid:\n raise self.PageNotFoundException\n\n self.render_template('classroom-page.mainpage.html')\n\n\nclass ClassroomDataHandler(base.BaseHandler):\n \"\"\"Manages the data that needs to be displayed to a learner on the classroom\n page.\n \"\"\"\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @acl_decorators.open_access\n def get(self, classroom_name):\n \"\"\"Handles GET requests.\"\"\"\n\n if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:\n raise self.PageNotFoundException\n\n classroom_name_is_valid = False\n for classroom_dict in config_domain.TOPIC_IDS_FOR_CLASSROOM_PAGES.value:\n if classroom_dict['name'] == classroom_name:\n classroom_name_is_valid = True\n topic_ids = classroom_dict['topic_ids']\n break\n\n if not classroom_name_is_valid:\n raise self.PageNotFoundException\n\n topic_summaries = topic_services.get_multi_topic_summaries(topic_ids)\n topic_rights = topic_services.get_multi_topic_rights(topic_ids)\n topic_summary_dicts = [\n summary.to_dict() for ind, summary in enumerate(topic_summaries)\n if summary is not None and topic_rights[ind].topic_is_published\n ]\n\n self.values.update({\n 'topic_summary_dicts': topic_summary_dicts\n })\n self.render_json(self.values)\n","sub_path":"core/controllers/classroom.py","file_name":"classroom.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"342813007","text":"import datetime\nimport numpy as np\nimport sys\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nimport sip\nimport pandas as pd\nfrom sklearn.manifold import MDS\nimport matplotlib.pyplot as plt\nimport math\nimport copy\nfrom PIL import Image, ImageDraw, ImageFont\nimport requests\nimport os\nimport MeCab\nfrom scipy.spatial import Delaunay, delaunay_plot_2d, Voronoi, voronoi_plot_2d\nimport math\nimport random \n\nKR = 10000000\nKA = 500000\n\nHOW_MANY_MOVES = 100\n\nclass Wrec:\n def __init__(self, word, p_c_x, p_c_y, p_tl_x, p_tl_y, p_tr_x, p_tr_y, p_bl_x, p_bl_y, p_br_x, p_br_y, size, color, i):\n self.index_num = i\n self.word = word\n self.p_c = np.array([float(p_c_x), float(p_c_y )])\n self.p_tl = np.array([float(p_tl_x), float(p_tl_y)])\n self.p_tr = np.array([float(p_tr_x), float(p_tr_y)])\n self.p_bl = np.array([float(p_bl_x), float(p_bl_y)])\n self.p_br = np.array([float(p_br_x), float(p_br_y)])\n self.size = int(size)\n self.color = color\n\n self.conneced_wrec_dict = {} #{頂点INDEX:wrec, 頂点INDEX:wrec, ..., 頂点INDEX:wrec}\n self.belonged_mesh =[] #[[a, b, c], [c, d, e]..., [f, g, h]]\n self.fs = np.zeros(2)\n self.fr = np.zeros(2)\n self.fa = np.zeros(2)\n self.f_all = np.zeros(2)\n\n self.bfr_p_c = np.zeros(2)\n self.aft_p_c = np.zeros(2)\n\n self.went_over = False\n \n def calculate_fs(self):\n self.fs = np.zeros(2)\n for wrec in self.conneced_wrec_dict.values():\n d = np.linalg.norm(self.p_c - wrec.p_c)\n one_fs = self.size * wrec.size * d\\\n * ((wrec.p_c - self.p_c) / np.linalg.norm(wrec.p_c - self.p_c))\n '''\n print(\"size : \" +str(self.size) )\n print(\"d : \" + str(d))\n print((wrec.p_c - self.p_c) / np.linalg.norm(wrec.p_c - self.p_c))\n print(\"*********\")\n '''\n self.fs += one_fs\n\n def calculate_fr(self):\n self.fr = np.zeros(2)\n for wrec in self.conneced_wrec_dict.values():\n #重なっている時\n if max(self.p_tl[0], wrec.p_tl[0]) <= min (self.p_br[0], wrec.p_br[0]) and\\\n max(self.p_tl[1], wrec.p_tl[1]) <= min (self.p_br[1], wrec.p_br[1]):\n one_fr = KR * min( abs(self.p_br[0]-wrec.p_tl[0]), abs(self.p_br[1]-wrec.p_tl[1]) ) \\\n * ((self.p_c - wrec.p_c) / np.linalg.norm(self.p_c - wrec.p_c))\n self.fr += one_fr\n #print(self.fr)\n\n def calculate_fa(self):\n a = 1\n\n\n def calculate_all_f(self):\n self.f_all = self.fs + self.fr + self.fa\n\n\n\nclass Wrecset:\n def __init__(self, path):\n self.wrec_list = []\n self.df = pd.read_csv(path)\n row_no = len(self.df)\n i = 0\n while i < row_no:\n wrec = Wrec(self.df.iat[i, 0], self.df.iat[i, 1], self.df.iat[i, 2], self.df.iat[i, 3], \\\n self.df.iat[i, 4], self.df.iat[i, 5], self.df.iat[i, 6], self.df.iat[i, 7],\\\n self.df.iat[i, 8], self.df.iat[i, 9], self.df.iat[i, 10], self.df.iat[i, 11], self.df.iat[i, 12],\\\n i) \n self.wrec_list.append(wrec)\n i += 1\n self.x_max = 0\n self.x_min = 0\n self.y_max = 0\n self.y_min = 0\n\n self.word_positions_in_pic = np.zeros(2) #ここは適当に入れてる。\n\n self.div_value = 0\n\n def move(self):\n # div_valueを決める\n sum_f_all = 0\n lgt = len(self.wrec_list)\n for wrec in self.wrec_list:\n sum_f_all += np.linalg.norm( wrec.f_all )\n print(\"*************************\")\n #print(wrec.belonged_mesh)\n print(\"fs : \" + str(wrec.fs))\n print(\"fr : \" + str(wrec.fr))\n print(\"fa : \" + str(wrec.fa))\n #print(\"f_all : \" + str(wrec.f_all))\n sum_f_all = sum_f_all / lgt\n self.div_value = sum_f_all / 1.5\n print(\"div_value : \" + str(self.div_value))\n\n for wrec in self.wrec_list:\n wrec.bfr_p_c = wrec.p_c\n wrec.p_c = wrec.p_c + wrec.f_all / self.div_value # とりあえず10,000\n wrec.aft_p_c = wrec.p_c\n print(\"move : \" + str(wrec.f_all / self.div_value))\n wrec.p_tl = wrec.p_tl + wrec.f_all / self.div_value # とりあえず10,000\n wrec.p_tr = wrec.p_tr + wrec.f_all / self.div_value # とりあえず10,000\n wrec.p_bl = wrec.p_bl + wrec.f_all / self.div_value # とりあえず10,000\n wrec.p_br = wrec.p_br + wrec.f_all / self.div_value # とりあえず10,000\n \n # print(\"wrec.f_all : \" + str(wrec.f_all))\n #print(\"bfr_p_c : \" + str(wrec.bfr_p_c))\n #print(\"aft_p_c : \" + str(wrec.aft_p_c))\n #print(\"**************\")\n # 移動終了\n # ここでfaを計算しておく。\n for wrec in self.wrec_list:\n # エッジを超えたかを計算して、faを設定していく。\n # ①エッジの直線の方程式 (x1-x2) * (y-y1) - (y1-y2)*(x-x1) = 0 ◀︎ 式(1)\n wrec.fa = np.zeros(2) #◀︎一回リセット\n bfr_p_c = wrec.bfr_p_c\n aft_p_c = wrec.aft_p_c\n\n for mesh in wrec.belonged_mesh: #自分の属する全メッシュに対して\n #print(mesh)\n #print(wrec.index_num)\n tmp_lst = []\n for index in mesh:\n if index != wrec.index_num:\n tmp_lst.append(index)\n \n other_wrecs = np.array(tmp_lst)\n #print(\"other_wrecs\")\n #print(other_wrecs)\n #print(\"____\")\n\n #print(bfr_p_c)\n #print(wrec.fs)\n #print(aft_p_c)\n p1 = self.wrec_list[other_wrecs[0]].p_c\n p2 = self.wrec_list[other_wrecs[1]].p_c\n s = (p1[0]- p2[0]) * (bfr_p_c[1]- p1[1]) - (p1[1]- p2[1]) * (bfr_p_c[0] - p1[0]) \n t = (p1[0]- p2[0]) * (aft_p_c[1]- p1[1]) - (p1[1]- p2[1]) * (aft_p_c[0] - p1[0]) \n \n # 点と直線の距離を求める。\n # (y1 - y2)*x + (x2 - x1)*y + x1*y2 - y1*x2 = 0 ◀︎式(1)と同値\n a, b, c = (p1[1]- p2[1]), (p2[0]- p1[0]), (p1[0] * p2[1] - p1[1]*p2[0])\n x0 , y0 = aft_p_c[0], aft_p_c[1]\n d = abs(a*x0 + b*y0 + c) / math.sqrt(a*a + b*b)\n # 直線 bfr_p_c・aft_p_c \n # (bfr_p_c[1] - aft_p_c[1]) * x + (aft_p_c[0] - btr_p_c[0]) * y = bfr_p_c[1] * aft_p_c[0] - bfr_p_c[0] * aft_p_c[1]\n #\n # エッジの直線\n # (p1[1] - p2[1] ) * x + (p2[0] - p1[0] ) * y = p1[1] * p2[0] - p1[0] * p2[1] \n A = np.array( [ [bfr_p_c[1] - aft_p_c[1] , aft_p_c[0] - bfr_p_c[0]], \\\n [p1[1] - p2[1] , p2[0] - p1[0] ] \\\n ])\n #print(A)\n B = np.array( [ bfr_p_c[1] * aft_p_c[0] - bfr_p_c[0] * aft_p_c[1],\\\n p1[1] * p2[0] - p1[0] * p2[1] \\\n ])\n #print(B)\n try:\n crossing_point = np.linalg.solve(A, B)\n fa_direction = crossing_point - aft_p_c\n\n if wrec.went_over == False and s * t < 0: # 正常だったのにひっくり返った時\n wrec.went_over= True \n wrec.fa += KA * d * \\\n ( fa_direction / np.linalg.norm(fa_direction))\n\n if wrec.went_over == True and s * t < 0: # ひっくり帰っていたのが正常に戻った時\n wrec.went_over == False\n\n if wrec.went_over == True and s * t >= 0: # ひっくり帰ったままの時\n wrec.fa += KA * d * \\\n ( fa_direction / np.linalg.norm(fa_direction)) \n except:\n pass\n \n \n def force_model(self, DRAW_INDEX):\n # まず同じ値のものはちょっとずらず\n for wrec_1 in self.wrec_list:\n for wrec_2 in self.wrec_list:\n if wrec_1 == wrec_2:\n continue\n if int(wrec_1.p_c[0]) == int(wrec_2.p_c[0]) and int(wrec_1.p_c[1]) == int(wrec_2.p_c[1]):\n random_vec = np.array([(0.5 - random.random())*25, (0.5 - random.random())*25]) #とりあえず15なだけ\n wrec_1.p_c = wrec_1.p_c + random_vec\n print(\"come\")\n\n # word_positions_in_pic は wrec_set.wrec_list のINDEX順番と同じ。\n for i, wrec in zip( range(len(self.wrec_list)), self.wrec_list):\n if i == 0:\n word_positions_in_pic = np.array([[wrec.p_c[0], wrec.p_c[1]]])\n if i > 0:\n a_2d_ex = np.array([[wrec.p_c[0], wrec.p_c[1]]])\n word_positions_in_pic = np.append(word_positions_in_pic, a_2d_ex, axis=0)\n\n #print(word_positions_in_pic)\n tri = Delaunay(word_positions_in_pic)\n fig = delaunay_plot_2d(tri)\n # fig.savefig('./Images/scipy_matplotlib_delaunay_before.png')\n print(tri.simplices)\n\n for mesh in tri.simplices:\n if mesh[0] == self.wrec_list[mesh[0]].index_num:# 一応一致しているかを確認\n self.wrec_list[mesh[0]].conneced_wrec_dict[mesh[1]] = self.wrec_list[mesh[1]]\n self.wrec_list[mesh[0]].conneced_wrec_dict[mesh[2]] = self.wrec_list[mesh[2]]\n self.wrec_list[mesh[1]].conneced_wrec_dict[mesh[0]] = self.wrec_list[mesh[0]]\n self.wrec_list[mesh[1]].conneced_wrec_dict[mesh[2]] = self.wrec_list[mesh[2]]\n self.wrec_list[mesh[2]].conneced_wrec_dict[mesh[0]] = self.wrec_list[mesh[0]]\n self.wrec_list[mesh[2]].conneced_wrec_dict[mesh[1]] = self.wrec_list[mesh[1]]\n\n self.wrec_list[mesh[0]].belonged_mesh.append(mesh)\n self.wrec_list[mesh[1]].belonged_mesh.append(mesh)\n self.wrec_list[mesh[2]].belonged_mesh.append(mesh)\n\n # 力を計算する\n i = 0\n while i < HOW_MANY_MOVES:\n for wrec in self.wrec_list:\n wrec.calculate_fs()\n wrec.calculate_fr()\n wrec.calculate_all_f()\n self.move() #ここで、移動後次回のfaは計算ずみ。\n i += 1\n\n # word_positions_in_pic は wrec_set.wrec_list のINDEX順番と同じ。\n for i, wrec in zip( range(len(self.wrec_list)), self.wrec_list):\n if i == 0:\n word_positions_in_pic = np.array([[wrec.p_c[0], wrec.p_c[1]]])\n if i > 0:\n a_2d_ex = np.array([[wrec.p_c[0], wrec.p_c[1]]])\n word_positions_in_pic = np.append(word_positions_in_pic, a_2d_ex, axis=0)\n\n print(word_positions_in_pic)\n self.x_max = np.max(word_positions_in_pic, axis = 0)[0]\n self.y_max = np.max(word_positions_in_pic, axis = 0)[1]\n self.x_min = np.min(word_positions_in_pic, axis = 0)[0]\n self.y_min = np.min(word_positions_in_pic, axis = 0)[1]\n\n self.word_positions_in_pic = word_positions_in_pic\n\n # 描画様にデータをcsvで排出\n i = 0\n for wrec in self.wrec_list:\n if i == 0:\n tmp_df = pd.DataFrame(\\\n [[wrec.word,\\\n wrec.p_c[0],\\\n wrec.p_c[1],\\\n wrec.p_tl[0],\\\n wrec.p_tl[1],\\\n wrec.p_tr[0],\\\n wrec.p_tr[1],\\\n wrec.p_bl[0],\\\n wrec.p_bl[1],\\\n wrec.p_br[0],\\\n wrec.p_br[1],\\\n wrec.size,\\\n wrec.color\\\n ]]\\\n )\n\n tmp_df.columns = ['word', 'p_c_x', 'p_c_y', 'p_tl_x', 'p_tl_y', 'p_tr_x', 'p_tr_y', 'p_bl_x', 'p_bl_y', 'p_br_x', 'p_br_y', 'size', 'color']\n \n if i > 0:\n tmp_df = tmp_df.append({'word' : wrec.word,\\\n 'p_c_x' : wrec.p_c[0],\\\n 'p_c_y' : wrec.p_c[1],\\\n 'p_tl_x' : wrec.p_tl[0],\\\n 'p_tl_y' : wrec.p_tl[1],\\\n 'p_tr_x' : wrec.p_tr[0],\\\n 'p_tr_y' : wrec.p_tr[1],\\\n 'p_bl_x' : wrec.p_bl[0],\\\n 'p_bl_y' : wrec.p_bl[1],\\\n 'p_br_x' : wrec.p_br[0],\\\n 'p_br_y' : wrec.p_br[1],\\\n 'size' : wrec.size,\\\n 'color' : wrec.color\\\n } , ignore_index=True)\n\n i = i+1\n SAVE_PATH = './Bokeh/CSVs/afrer_forced_output_' + str(DRAW_INDEX) + '.csv'\n tmp_df.to_csv(SAVE_PATH, index=False)\n\n def draw_word_crowd(self, DRAW_INDEX):\n #ワードクラウド描画\n X_SIZE = int(self.x_max - self.x_min + 400)\n Y_SIZE = int(self.y_max - self.y_min + 400)\n\n adjust_x = self.x_min - 200\n adjust_y = self.y_min - 200\n\n campus = Image.new('RGB', (X_SIZE, Y_SIZE), (128, 128, 128))\n draw = ImageDraw.Draw(campus)\n\n for wrec in self.wrec_list: \n if wrec.color == \"RED\":\n draw.rectangle((wrec.p_tl[0] - adjust_x, \\\n wrec.p_tl[1] - adjust_y, \\\n wrec.p_br[0] - adjust_x, \\\n wrec.p_br[1] - adjust_y), \\\n fill=(240, 0, 0), outline=(255, 255, 255))\n print(wrec.p_tl[0])\n\n if wrec.color == \"BLUE\":\n draw.rectangle((wrec.p_tl[0] - adjust_x, \\\n wrec.p_tl[1] - adjust_y, \\\n wrec.p_br[0] - adjust_x, \\\n wrec.p_br[1] - adjust_y), \\\n fill=(0, 0, 240), outline=(255, 255, 255))\n\n if wrec.color == \"PURPLE\":\n draw.rectangle((wrec.p_tl[0] - adjust_x, \\\n wrec.p_tl[1] - adjust_y, \\\n wrec.p_br[0] - adjust_x, \\\n wrec.p_br[1] - adjust_y), \\\n fill=(150, 0, 150), outline=(255, 255, 255))\n \n if wrec.color == \"NO\":\n draw.rectangle((wrec.p_tl[0] - adjust_x, \\\n wrec.p_tl[1] - adjust_y, \\\n wrec.p_br[0] - adjust_x, \\\n wrec.p_br[1] - adjust_y), \\\n fill=(50, 50, 50), outline=(255, 255, 255))\n\n if wrec.color == \"RED\" or wrec.color == \"BLUE\" or wrec.color == \"PURPLE\" or wrec.color == \"NO\":\n ttfontname = \"./logotypejp_mp_m_1.1.ttf\"\n fontsize = wrec.size\n font = ImageFont.truetype(ttfontname, fontsize)\n text_position_x = wrec.p_tl[0] - adjust_x\n text_position_y = wrec.p_tl[1] - adjust_y\n textRGB = (20, 20, 20)\n text = wrec.word\n draw.text((text_position_x, text_position_y), text, fill=textRGB, font=font)\n\n if wrec.color == \"Thumbnail\":\n url = 'http://i.ytimg.com/vi/' + wrec.word + \"/mqdefault.jpg\"\n response = requests.get(url)\n image = response.content\n file_name = \"Thumbnail/\" + wrec.word + \".jpeg\"\n with open(file_name, \"wb\") as aaa:\n aaa.write(image)\n img = Image.open(\"Thumbnail/\" + wrec.word + \".jpeg\")\n img_resize = img.resize( (int((wrec.p_br[0] - wrec.p_bl[0]) / 2), int((wrec.p_bl[1] - wrec.p_tl[1]) / 2)) )\n campus.paste(img_resize, (int(wrec.p_c[0]), int(wrec.p_c[1])) )\n \n campus.save('./Images/pillow_imagedraw.jpg', quality=95)\n tri_2 = Delaunay(self.word_positions_in_pic)\n fig_2 = delaunay_plot_2d(tri_2)\n fig_2.savefig('./Images/scipy_matplotlib_delaunay_after.png')\n\n'''\nfig = plt.figure()\nax = fig.add_subplot(1,1,1)\nax.scatter(x,y)\nax.set_title('first scatter plot')\nax.set_xlabel('x')\nax.set_ylabel('y')\nfig.savefig('./Images/scatter.png')\n'''","sub_path":"myModuleForceModel.py","file_name":"myModuleForceModel.py","file_ext":"py","file_size_in_byte":17102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"500301232","text":"import random \n#doesnt work on sentences. just words\nclass Extras():\n\n\t# constructor in python is __init__(), which is also called special or dunder function in python \n\tdef __init__(self, message):\n\t\tself.message = message\n\t\n\t#Normal member funtion\n\tdef print_message(self):\n\t\tprint(self.message)\n\nclass Encryption(Extras):\n\n\tdef __init__(self, message):\n\t\tExtras.__init__(self, message)\n\t\tself.encrypted_message = ''\n\t\tself.key = []\n\n\tdef encrypt(self):\n\t\tA = list(range(26))\n\t\tB = random.sample(A, 26)\n\t\tself.key = B\n\n\t\tfor i in range(len(self.message)):\n\t\t\tif self.message[i] == ' ':\n\t\t\t\tself.encrypted_message += '.'\n\t\t\telif self.message[i].isupper():\n\t\t\t\tself.encrypted_message += chr(B[ord(self.message[i]) - 65] + 65)\n\t\t\telse:\n\t\t\t\tself.encrypted_message += chr(B[ord(self.message[i]) - 97] + 97)\n\t\treturn self.encrypted_message\n\nclass SubstitutionCipher(Encryption):\n\tdef __init__(self, message, key=None):\n\t\tEncryption.__init__(self, message)\n\t\tself.decrypted_message = ''\n\t\tself.key = key\n\n\tdef decrypt(self, encrypted_message):\n\t\tfor i in range(len(encrypted_message)):\n\t\t\tif encrypted_message[i] == '.':\n\t\t\t\tself.decrypted_message += ' '\n\t\t\t\tcontinue\n\t\t\tfor x in range(26):\n\t\t\t\tnum = 0\n\t\t\t\tif encrypted_message[i].isupper():\n\t\t\t\t\tnum = ord(encrypted_message[i]) - 65\n\t\t\t\telse:\n\t\t\t\t\tnum = ord(encrypted_message[i]) - 97 \n\t\t\t\tif num == self.key[x]:\n\t\t\t\t\tif encrypted_message[i].isupper():\n\t\t\t\t\t\tself.decrypted_message += chr(65 + x)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.decrypted_message += chr(97 + x)\n\t\treturn self.decrypted_message\n\n\n","sub_path":"crypto-algorithms/substitution.py","file_name":"substitution.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"495552003","text":"from multiprocessing import Process, Manager, Queue\n\n\ndef element(i: int, j: int, A: list, B: list, answer: Queue):\n answer.put((sum(A[i][k] * B[k][j] for k in range(len(A[0]) or len(B))), [i, j]))\n\n\nif __name__ == '__main__':\n manager = Manager()\n\n n1 = int(input('Введите число элементов в строке -> '))\n n2 = int(input('Введите число строк в матрице -> '))\n\n matrix1 = [[[] for i in range(n1)] for j in range(n2)]\n matrix1 = [[0 for i in range(n1)] for j in range(n2)]\n\n n1 = int(input('Введите число элементов в строке -> '))\n n2 = int(input('Введите число строк в матрице -> '))\n\n matrix2 = [[[] for i in range(n1)] for j in range(n2)]\n matrix2 = [[0 for i in range(n1)] for j in range(n2)]\n\n matrix_l1 = []\n matrix_l2 = []\n k = 0\n kk = 0\n\n with open('matrix1.txt', 'r') as m1:\n for line in m1.read():\n if line != '[' and line != ']' and line != ',' and line != '\\n' and line != ' ':\n matrix_l1 += line\n\n with open('matrix2.txt', 'r') as m2:\n for line in m2.read():\n if line != '[' and line != ']' and line != ',' and line != '\\n' and line != ' ':\n matrix_l2 += line\n\n for i in range(len(matrix1)):\n for j in range(len(matrix1[0])):\n try:\n matrix1[i][j] = int(matrix_l1[k])\n k += 1\n except:\n if k <= len(matrix_l1):\n i = k\n k += 1\n\n print(matrix1)\n\n for i in range(len(matrix2)):\n for j in range(len(matrix2[0])):\n try:\n matrix2[i][j] = int(matrix_l2[kk])\n kk += 1\n except:\n if kk <= len(matrix_l2):\n i = kk\n kk += 1\n\n print(matrix2)\n\n matrix3 = [[0 for _ in range(len(matrix2[0]))] for _ in range(len(matrix2[0]))]\n\n processes = list()\n answer = manager.Queue()\n\n for i in range(len(matrix3)):\n for j in range(len(matrix3[i])):\n p = Process(target=element, args=(i, j, matrix1, matrix2, answer, ))\n processes.append(p)\n\n for p in processes:\n p.start()\n\n for p in processes:\n p.join()\n\n for i in range(len(matrix3)):\n for j in range(len(matrix3[i])):\n mt_l, mt_k = answer.get()\n matrix3[mt_k[0]][mt_k[1]] = mt_l\n\n with open('result.txt', 'w') as file:\n file.write(str(matrix3))\n\n print(matrix3)","sub_path":"Course-ll/Semester-ll/Практикум по программированию/3_Parallelism/multiplication.py","file_name":"multiplication.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"276856397","text":"import socket\nfrom flask import Flask\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello():\n temp_hostname=str(socket.gethostname())\n return_str = \"hostname is < \" + temp_hostname +\" >\"\n return return_str\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')\n","sub_path":"gs-scheduler/test_program/app/get-hostname.py","file_name":"get-hostname.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"636238485","text":"import random\nimport unittest\nfrom typing import Iterable\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\n\nfrom putput.joiner import ComboOptions\nfrom putput.joiner import join_combo\n\n\nclass TestJoiner(unittest.TestCase):\n def setUp(self):\n random.seed(0)\n\n def _test_join_combo(self,\n pattern: Sequence[Sequence],\n expected_output: Iterable[Sequence],\n *,\n all_options: Optional[List[ComboOptions]] = None\n ) -> None:\n expected_output = tuple(expected_output)\n if not all_options:\n all_options = [ComboOptions(max_sample_size=len(expected_output),\n with_replacement=False),\n ComboOptions(max_sample_size=len(expected_output),\n with_replacement=True)]\n for combo_options in all_options:\n actual_output = list(join_combo(pattern, combo_options=combo_options))\n if combo_options.with_replacement:\n self.assertEqual(len(actual_output), combo_options.max_sample_size)\n else:\n self.assertEqual(len(actual_output), len(set(actual_output)))\n self.assertEqual(len(actual_output), min(len(expected_output), combo_options.max_sample_size))\n for actual in actual_output:\n self.assertIn(actual, expected_output)\n\n def test_join_same_shape_lists(self) -> None:\n pattern = (('she', 'he'), ('wants', 'needs'))\n expected_output = (('she', 'needs'), ('he', 'wants'), ('she', 'wants'), ('he', 'needs'))\n self._test_join_combo(pattern, expected_output)\n\n def test_first_list_shorter_than_second_list(self) -> None:\n pattern = (('he',), ('wants', 'needs'))\n expected_output = (('he', 'wants'), ('he', 'needs'))\n self._test_join_combo(pattern, expected_output)\n\n def test_first_list_longer_than_second_list(self) -> None:\n pattern = (('he', 'she'), ('wants',))\n expected_output = (('he', 'wants'), ('she', 'wants'))\n self._test_join_combo(pattern, expected_output)\n\n def test_one_list(self) -> None:\n pattern = (('he wants', 'she needs'),)\n expected_output = (('he wants',), ('she needs',))\n self._test_join_combo(pattern, expected_output)\n\n def test_one_list_one_item(self) -> None:\n pattern = (('he wants',),)\n expected_output = (('he wants',),)\n self._test_join_combo(pattern, expected_output)\n\n def test_multiple_lists_same_size(self) -> None:\n pattern = (('he', 'she'), ('would', 'will'), ('want', 'need'))\n expected_output = (('he', 'would', 'want'), ('he', 'would', 'need'),\n ('he', 'will', 'want'), ('he', 'will', 'need'),\n ('she', 'would', 'want'), ('she', 'would', 'need'),\n ('she', 'will', 'want'), ('she', 'will', 'need'))\n self._test_join_combo(pattern, expected_output)\n\n def test_multiple_lists_first_list_shorter(self) -> None:\n pattern = (('he',), ('would', 'will'), ('want', 'need'))\n expected_output = (('he', 'would', 'want'), ('he', 'would', 'need'),\n ('he', 'will', 'want'), ('he', 'will', 'need'))\n self._test_join_combo(pattern, expected_output)\n\n def test_multiple_lists_middle_list_shorter(self) -> None:\n pattern = (('he', 'she'), ('will',), ('want', 'need'))\n expected_output = (('he', 'will', 'want'), ('he', 'will', 'need'),\n ('she', 'will', 'want'), ('she', 'will', 'need'))\n self._test_join_combo(pattern, expected_output)\n\n def test_multiple_lists_last_list_shorter(self) -> None:\n pattern = (('he', 'she'), ('would', 'will'), ('want',))\n expected_output = (('he', 'would', 'want'), ('he', 'will', 'want'),\n ('she', 'would', 'want'), ('she', 'will', 'want'))\n self._test_join_combo(pattern, expected_output)\n\n def test_multiple_lists_all_different_size(self) -> None:\n pattern = (('he',), ('would', 'will'), ('want', 'have', 'order'))\n expected_output = (('he', 'would', 'want'), ('he', 'would', 'have'),\n ('he', 'would', 'order'), ('he', 'will', 'want'),\n ('he', 'will', 'have'), ('he', 'will', 'order'))\n self._test_join_combo(pattern, expected_output)\n\n def test_zero_dimension(self) -> None:\n with self.assertRaises(ValueError):\n pattern = (tuple(), ('would', 'will'), ('want', 'have', 'order')) # type: Sequence[Sequence]\n expected_output = (('would', 'want'), ('would', 'have'),\n ('would', 'order'), ('will', 'want'),\n ('will', 'have'), ('will', 'order'))\n self._test_join_combo(pattern, expected_output)\n\n def test_num_unique_combinations_greater_than_max_size(self) -> None:\n pattern = tuple([tuple(range(50))] * 50)\n max_sample_size = 2\n all_options = [ComboOptions(max_sample_size=max_sample_size, with_replacement=True),\n ComboOptions(max_sample_size=max_sample_size, with_replacement=False)]\n for combo_options in all_options:\n actual_output = list(join_combo(pattern, combo_options=combo_options))\n self.assertEqual(len(actual_output), max_sample_size)\n\n def test_unique_one_d_to_mult_d(self) -> None:\n pattern = ((0, 1, 2, 3, 4, 5, 6, 7, 8, 9),\n (10, 11, 12, 13, 14, 15, 16, 17, 18, 19),\n (20, 21, 22, 23, 24, 25, 26, 27, 28, 29))\n num_unique_combinations = 1000\n # Subtract 1 from num_unique_combinations because if max_sample_size == num_unique_combinations,\n # the sampling will fall back to joining without sampling. This leads to a 0.1% chance that\n # there is an error, given that num_unique_combinations is 1000.\n combo_options = ComboOptions(max_sample_size=num_unique_combinations - 1, with_replacement=False)\n actual_output = list(join_combo(pattern, combo_options=combo_options))\n self.assertEqual(len(set(actual_output)), len(actual_output))\n\n def test_invalid_combo_options(self) -> None:\n with self.assertRaises(ValueError):\n ComboOptions(max_sample_size=0, with_replacement=False)\n\n def test_max_sample_size_less_than_max_combo_options(self) -> None:\n pattern = (('he', 'she'), ('would', 'will'), ('want',))\n expected_output = (('he', 'would', 'want'), ('he', 'will', 'want'),\n ('she', 'would', 'want'), ('she', 'will', 'want'))\n max_sample_size = 2\n all_options = [ComboOptions(max_sample_size=max_sample_size, with_replacement=False),\n ComboOptions(max_sample_size=max_sample_size, with_replacement=True)]\n self._test_join_combo(pattern, expected_output, all_options=all_options)\n\n def test_max_sample_size_greater_than_max_unique_options(self) -> None:\n pattern = (('he', 'she'), ('would', 'will'), ('want',))\n expected_output = (('he', 'would', 'want'), ('he', 'will', 'want'),\n ('she', 'would', 'want'), ('she', 'will', 'want'))\n max_sample_size = 10\n all_options = [ComboOptions(max_sample_size=max_sample_size, with_replacement=False),\n ComboOptions(max_sample_size=max_sample_size, with_replacement=True)]\n self._test_join_combo(pattern, expected_output, all_options=all_options)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/unit/test_joiner.py","file_name":"test_joiner.py","file_ext":"py","file_size_in_byte":7696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"638755634","text":"'''\r\nCreated on 2019. 4. 11.\r\n\r\n@author: user\r\n'''\r\n# 네이버 캡차 Open API 예제 - 키 입력값 비교\r\nimport os\r\nimport sys\r\nimport urllib.request\r\nclient_id = \"y2ojBn2tSStkUq1zVx51\"\r\nclient_secret = \"nKd9wpjDJZ\"\r\ncode = \"1\"\r\nkey = \"V5Qj0QyU7wWdWky6\"\r\nvalue = \"YOUR_CAPTCHA_VALUE\"\r\nurl = \"https://openapi.naver.com/v1/captcha/nkey?code=\" + code + \"&key=\" + key + \"&value=\" + value\r\nrequest = urllib.request.Request(url)\r\nrequest.add_header(\"X-Naver-Client-Id\",client_id)\r\nrequest.add_header(\"X-Naver-Client-Secret\",client_secret)\r\nresponse = urllib.request.urlopen(request)\r\nrescode = response.getcode()\r\nif(rescode==200):\r\n response_body = response.read()\r\n print(response_body.decode('utf-8'))\r\nelse:\r\n print(\"Error Code:\" + rescode)","sub_path":"영화사이트테스트/캡챠/키입력값비교.py","file_name":"키입력값비교.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"104410579","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 5 09:33:58 2019\n\n@author: constatza\n\"\"\"\n\nimport numpy as np \nimport matplotlib.pyplot as plt\nimport fempy.dmaps as dm\nfrom fempy import smartplot\n\n\"\"\"\nInput\n\"\"\"\nepsilon = 40\nnumeigs = 3\ntimesteps = 10\nNsim = 2000\ndof = 1050\n\nplt.close('all')\nfilename = r\"variable_E_displacements_20x50.npy\"\nwith open(filename) as file:\n displacements = np.load(filename)\n\ndisplacements = np.transpose(displacements)\nU, Umean, Ustd = dm.normalize(displacements[:, :Nsim])\nutop = displacements[:,:]\nutop_norm1 = np.linalg.norm(utop, 1, axis=0)\n\n\"\"\"Diffusion Maps\"\"\"\neigvals_dm, eigvecs_dm = dm.diffusion_maps(U,\n epsilon=epsilon, \n t=timesteps, \n numeigs=numeigs+1)\n\n\n\n\nprint(A_dm.shape)\n\nUnew_dm = A_dm @ Fi[:, :k].T\nUnew_dm = dm.denormalize(Unew_dm, Umean, Ustd)\nx_dm = Unew_dm[2*dof-2, :]\ny_dm = Unew_dm[2*dof-1, :]\n\n\n\n\n\"\"\"PCA\"\"\"\n\neigvals_pca, eigvecs_pca = dm.pca(U, numeigs=numeigs)\n\nm = len(eigvals_pca[eigvals_pca>[.05]]) \nm = numeigs\nLr = eigvecs_pca[:, :m]\nA_pca, res_pca = dm.least_squares(U, Lr) \n\nUnew_pca = A_pca @ Lr.T \nUnew_pca = dm.denormalize(Unew_pca, Umean, Ustd)\nx_pca = Unew_pca[2*dof-2, :]\ny_pca = Unew_pca[2*dof-1, :]\n\n\n\n\n\n ","sub_path":"examples/variable_young_modulus/variable_young_modulus_results.py","file_name":"variable_young_modulus_results.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"392553266","text":"import pandas as pd\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.linear_model import LogisticRegression\n\ndf = pd.read_csv('data/prepared_data.csv', sep=';')\n\n# разделим признаки на числовые и категориальные\nnum_cols = ['Course_averageScore',\n 'Course_passingScore',\n 'Test_Qnt',\n 'Test_durationAvg',\n 'Student_MessagesQnt']\n\nobj_cols = ['Course_visibilityStatus',\n 'Course_periodicity']\n\n# Кодируем категориальные признаки\nohe_df = pd.DataFrame(index=df['student_id'])\nohe = OneHotEncoder(handle_unknown='ignore')\n\nfor col in obj_cols:\n ohe.fit(df[[col]])\n\n ohe_result = pd.DataFrame(ohe.transform(df[[col]]).toarray(),\n columns=ohe.get_feature_names(input_features=[col]),\n index=df['student_id'])\n ohe_df = ohe_df.join(ohe_result)\n\n# Стандартизируем числовые признаки\nstd_df = pd.DataFrame(index=df['student_id'])\nscaler = StandardScaler()\n\nfor col in num_cols:\n scaler.fit(df[[col]])\n std_result = pd.DataFrame(scaler.transform(df[[col]]),\n columns=[col],\n index=df['student_id'])\n std_df = std_df.join(std_result, on='student_id', how='left', lsuffix='_left', rsuffix='_right')\n\n X = ohe_df.join(std_df)\ny = df['Churn']\n\n# Отделяем тестовую выборку\nX_train, X_test, y_train, y_test = train_test_split(X.values, y.values, test_size=0.3, stratify=y, random_state=18)\n\n# Обучаем логистическую регрессию\nlr = LogisticRegression(C=1.0, random_state=18, n_jobs=-1).fit(X_train, y_train)\n\n# Делаем прогноз и оцениваем результат\ny_pred_test = lr.predict_proba(X_test)[:, 1]\nscore = roc_auc_score(y_test, y_pred_test)\n\n# Для примера предсказываем значения для всей выборки\ny_pred = lr.predict_proba(X)[:, 1]\nresults = list(zip(df['student_id'], y_pred, y))\nresults_df = pd.DataFrame(results, columns=['student_id', 'prob_predicted', 'Real_value'])\nresults_df['Predicted_value'] = results_df['prob_predicted'].apply(lambda x: 1 if x >= 0.81 else 0)\n\n# Сохраняем результат\ndf.to_csv('data/churn_prediction.csv', index=False, sep=';')\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"122301180","text":"from opentrons import protocol_api\n\n# metadata\nmetadata = {\n 'protocolName': 'My Protocol',\n 'author': 'Name ',\n 'description': 'Simple protocol to get started using OT2',\n 'apiLevel': '2.2'\n}\n\n# protocol run function. the part after the colon lets your editor know\n# where to look for autocomplete suggestions\ndef run(protocol: protocol_api.ProtocolContext):\n\n # labware\n tubes = protocol.load_labware('opentrons_24_tuberack_eppendorf_1.5ml_safelock_snapcap', '1')\n tiprack = protocol.load_labware('opentrons_96_tiprack_1000ul', '2')\n\n # pipettes\n left_pipette = protocol.load_instrument(\n 'p1000_single_gen2', 'left', tip_racks=[tiprack])\n\n # commands\n left_pipette.pick_up_tip()\n left_pipette.aspirate(200, tubes['A1'].bottom())\n left_pipette.dispense(200, tubes['B2'].bottom())\n left_pipette.drop_tip()\n","sub_path":"example1000.py","file_name":"example1000.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"311805739","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 17 02:19:03 2019\n\n@author: kalpi\n\"\"\"\n\nimport bs4 as bs\nimport urllib.request\nimport io\nimport sys\n\n\nx = (sys.argv[1])\n\n#x = '[{\"name\":\"adasdasd\"},{\"name\":\"iuythngtrfbfvd\"}]'\n\nx = x.replace('[', \"\")\nx = x.replace(']', \"\")\nx = x.replace('\"}', \"\")\nx = x.replace('{\"', \"\")\nx = x.split(',')\n\nx = [i.replace('name\":\"', \"\") for i in x]\n\n\nkeypoints = x\n# with io.open(\"sep.txt\", \"w\", encoding=\"utf-8\") as f:\n # f.write(keypoints[0])\n # f.write(keypoints[1])\n#bcpe\ncourse = str(sys.argv[3])\nbatch = str(sys.argv[2])\nexamNo = str(sys.argv[5])\nprogram = str(sys.argv[4])\n\n\nfor url in keypoints:\n url = url.replace(' ','+')\n url = 'https://en.wikipedia.org/w/index.php?search=' + url\n sauce = urllib.request.urlopen(url).read()\n \n soup = bs.BeautifulSoup(sauce,'lxml')\n \n a = soup.find_all('p')\n \n final = ''\n for par in a:\n final = final + par.text\n final = final + '\\n'\n \n with io.open(\"scrapped_{}_{}_{}_{}.txt\".format(course, batch,program, examNo), \"a+\", encoding=\"utf-8\") as f:\n f.write(final)\n\nimport gensim\nimport smart_open\n\n\n#course = \"IT314\"\n#batch = \"2016\"\n#examNo = \"1\"\n\nwiki_scrapped = \"scrapped_{}_{}_{}_{}.txt\".format(course, batch, program, examNo)\n\ndef read_corpus(fname, tokens_only=False):\n with smart_open.smart_open(fname) as f:\n for i, line in enumerate(f):\n if tokens_only:\n yield gensim.utils.simple_preprocess(line)\n else:\n yield gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(line), [i])\n\ntrain_corpus = list(read_corpus(wiki_scrapped))\n\nmodel = gensim.models.doc2vec.Doc2Vec(vector_size=70,alpha=0.025,min_alpha=0.00025 ,min_count=2, epochs=40)\n\nmodel.build_vocab(train_corpus)\n\n\nmax_epochs = 100\n\nf2 = open(\"myNewText.txt\", \"a+\")\nfor epoch in range(max_epochs):\n print('iteration {0}'.format(epoch))\n try:\n f2.write('iteration {}'.format(epoch))\n except:\n print(\"ff\")\n model.train(train_corpus,\n total_examples=model.corpus_count,\n epochs=model.iter)\n # decrease the learning rate\n model.alpha -= 0.0001\n # fix the learning rate, no decay\n model.min_alpha = model.alpha\n\nf2.close()\nimport pickle\npickle.dump(model, open('ans_checker_{}_{}_{}_{}.sav'.format(course, batch,program, examNo), 'wb'))\nprint(\"o bc\")\nsys.stdout.flush()","sub_path":"studentSide/corpusAndTrainCombined.py","file_name":"corpusAndTrainCombined.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"158100751","text":"def softmax_backward(Y, softmax_out):\n \"\"\"\n Y: designe training data. shape: (vocab_size, m)\n softmax_out: sortie du softmax. shape: (vocab_size, m)\n \"\"\"\n dL_dZ = softmax_out - Y\n \n assert(dL_dZ.shape == softmax_out.shape)\n return dL_dZ\n\ndef dense_backward(dL_dZ, caches):\n \"\"\"\n dL_dZ: shape: (vocab_size, m)\n caches: dict. sorties de chaque etape de la forward propagation\n \"\"\"\n W = caches['W']\n word_vec = caches['word_vec']\n m = word_vec.shape[1]\n \n dL_dW = (1 / m) * np.dot(dL_dZ, word_vec.T)\n dL_dword_vec = np.dot(W.T, dL_dZ)\n\n assert(W.shape == dL_dW.shape)\n assert(word_vec.shape == dL_dword_vec.shape)\n \n return dL_dW, dL_dword_vec\n\ndef backward_propagation(Y, softmax_out, caches):\n dL_dZ = softmax_backward(Y, softmax_out)\n dL_dW, dL_dword_vec = dense_backward(dL_dZ, caches)\n \n gradients = dict()\n gradients['dL_dZ'] = dL_dZ\n gradients['dL_dW'] = dL_dW\n gradients['dL_dword_vec'] = dL_dword_vec\n \n return gradients\n\ndef update_parameters(parameters, caches, gradients, learning_rate):\n vocab_size, emb_size = parameters['WRD_EMB'].shape\n inds = caches['inds']\n WRD_EMB = parameters['WRD_EMB']\n dL_dword_vec = gradients['dL_dword_vec']\n m = inds.shape[-1]\n \n WRD_EMB[inds.flatten(), :] -= dL_dword_vec.T * learning_rate\n\n parameters['W'] -= learning_rate * gradients['dL_dW']","sub_path":"backward propagation.py","file_name":"backward propagation.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"507932156","text":"import tweepy\nimport json\nimport csv\n\nconsumer_key = 'L8tNdzu2q3n1JnBWn3CsetBuN'\nconsumer_secret = 'xu6Ys6W757EI0mcEHKNpux7qrIClslsolwoHRLlwDCHRslzQMS'\naccess_token = '1043037626695991296-ocEyZgIVQ6hcVpxiEPAqi5uEckzmag'\naccess_token_secret = 'Uh0nYzErw5hU136psP4oP8v2pIVUkxulTZalWB1R9bjQR'\n\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\nuser = api.me()\nprint (user.name)\n\n#####\n# Opening Spotify Data <-- Top 100 tracks of 2017\n#####\n\nwith open('top_spotify.json') as f:\n spot_data = json.load(f)\n\n\n#####\n# Open past twitter json file\n#####\n \n#do not forget the new tweets\n\ntd = [['id', 'author', 'text', 'in_reply']]\n\n#with open('tweet_data.json') as f:\n # tweet_data = json.load(f)\n\n\n###FINDS THE USERS THAT HAVE MENTIONED THE BOT\n\n\n####\n# Define the search\n#####\nquery = '@jesslightsupli1'\nmax_tweets = 100\n\n####\n# Do the search\n#####\nsearched_tweets = []\nlast_id = -1\nwhile len(searched_tweets) < max_tweets:\n count = max_tweets - len(searched_tweets)\n try:\n new_tweets = api.search(q=query, count=count, max_id=str(last_id - 1))\n if not new_tweets:\n break\n searched_tweets.extend(new_tweets)\n last_id = new_tweets[-1].id\n except tweepy.TweepError as e:\n # depending on TweepError.code, one may want to retry or wait \n # to keep things simple, we will give up on an error \n break\n\n\n####\n# Iterate over the search\n#####\nfor status in searched_tweets:\n # do something with all these tweets \n print\t(status.text)\n\n #if(status.id in tweet_data['id']): ## check the tweets that already exist, if it does. ignore\n # continue\n\n count = 0\n for i in spot_data:\n\n\n #INTENT NUMBER ONE <--- FIND THE SINGER FOR A SONG <-- uses entities\n #what is the singer for I don't want to live forever?\n #what is the artist that sang Stay with Me?\n\n if(\"artist\" in status.text or \"singer\" in status.text):\n #api.update_status('what song? @' + status.author.screen_name, status.id_str)\n if('for' in status.text):\n song = status.text.split('for ')\n song_n = song[1]\n if(song_n in i[\"name\"]):\n obj = i['artist']\n api.update_status(obj+' @' + status.author.screen_name, status.id_str)\n if('sang' in status.text):\n song = status.text.split('sang ')\n song_n = song[1]\n if(song_n in i['name'].ignoreCase()):\n obj = i['artist']\n api.update_status(obj+' @' + status.author.screen_name, status.id_str)\n\n\n\n #INTENT NUMBER TWO <--- FIND THE SONG BY SINGER <-- uses entities\n # what in the top of 2017 is a song by Zedd?\n # whst 2017 song featured Alessia Cara?\n if('song' in status.text and ('featured' in status.text or 'by' in status.text)):\n if('featured' in status.text):\n feat = status.text.split('featured ')\n artist_feat = feat[1]\n if(artist_feat in i[\"name\"]):\n obj = i['name']\n api.update_status(obj+' @' + status.author.screen_name, status.id_str)\n if('by' in status.text):\n art = status.text.split('by ')\n artist_n = art[1]\n if(artist_n in i['artist']):\n obj = i['name']\n api.update_status(obj+' @' + status.author.screen_name, status.id_str)\n\n\n\n #INTENT NUMBER THREE <--- WHAT IS THE DANCIBILITY OF \"Unforgettable\"\n # can i dance to Unforgetable?\n # what is the danceability of Unforgetable?\n if(\"dance\" in status.text and ('of' in status.text or 'to' in status.text)):\n if('to' in status.text):\n song_n = status.text.split('to ')\n song_name = song_n[1]\n if(song_name in i['name']):\n obj = 'yes'\n if(i['danceability'] < 0.5): \n obj = 'no'\n api.update_status(obj+' @' + status.author.screen_name, status.id_str)\n if('by' in status.text):\n song_n = status.text.split('of ')\n song_name = song_n[1]\n if(song_name in i['name']):\n obj = i['danceability']\n api.update_status(obj+' @' + status.author.screen_name, status.id_str)\n\n\n\n #INTENT NUMBER FOUR <--- HIGHEST RANKED SONG\n # what is the best song in 2017?\n # what is the highest ranked song in 2017?\n if(\"best\" in status.text or (\"highest\" in status.text and \"rank\" in status.text)):\n obj = '----'\n if(count == 0):\n obj = i['name']\n api.update_status(obj,' @' + status.author.screen_name, status.id_str)\n\n\n #INTENT NUMBER FIVE <--- LOWEST RANKED SONG\n # what is the worst song in 2017?\n # what is the lowest ranked song in 2017?\n if(\"worst\" in status.text or (\"lowest\" in status.text and \"rank\" in status.text)):\n obj = '----'\n if(count == 100):\n obj = i['name']\n\n api.update_status(obj,' @' + status.author.screen_name, status.id_str)\n\n count += 1\n \n ts = [status.id, status.author, status.text, status.in_reply_to_status_id]\n td.append(ts)\n\n## uncomment to save file of old tweets\n#with open('output_tweet.csv', 'w', newline='') as csvfile:\n# writer = csv.writer(csvfile)\n# writer.writerows(td)\n\n#csvfile = open('output_tweet.csv', 'rt', encoding = 'utf-8')\n#jsonfile = open('tweet_data.json', 'w')\n\n#fieldnames = ('id','author','text','in_reply')\n#reader = csv.DictReader(csvfile, fieldnames)\n#for row in reader:\n# json.dump(row, jsonfile)\n# jsonfile.write('\\n')\n\n #with open('tweet_data.json', 'w') as outfile:\n #json.dump(tweet_data, outfile)\n\n\n","sub_path":"twitter_bot_main.py","file_name":"twitter_bot_main.py","file_ext":"py","file_size_in_byte":5832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"607830326","text":"x = [8, 2, 3, -1, 2, -5, 7]\npersonas = [\"Sara\", \"Pedro\", \"Miguel\"]\n\ncubos = [i ** 3 for i in x]\nprint('Lista de los cubos: {}'.format(cubos))\n\ncuad_impares = [i ** 2 for i in x if i % 2 != 0]\nprint('Lista de cuadrados de los impares {}'.format(cuad_impares))\n\ncuad_pares_positivos = [i ** 2 for i in x if i % 2 == 0 and i > 0]\nprint('Lista de cuadrados pares positivos: {}'.format(cuad_pares_positivos))\n\nmas_5_cars = [n for n in personas if len(n) > 5]\nprint('Lista de personas con más de 5 caracteres: {}'.format(mas_5_cars))\n\npersonas_o = [n for n in personas if 'o' in n]\nprint('Lista de personas que contengan la vocal o: {}'.format(personas_o))\n\npersonas_e_6 = [n for n in personas if 'e' in n and len(n) >= 6]\nprint('Lista de personas con la vocal e y longitud de al menos 6 caracteres: {}'.format(personas_e_6))\n\n\nlinea = \"123 43 4 35 25 5 41 42 5\"\nnumeros = linea.split()\nsuma = sum([int(v) for v in numeros])\nprint(suma)\n\ntuplas = [(persona.lower(), len(persona)) for persona in personas]\nprint(tuplas)\n\ndico = {persona: len(persona) for persona in personas}\nprint(dico)\n\nt = (v ** 2 for v in x) # Generador\n\n ","sub_path":"ejercicios/ejercicios_lpc.py","file_name":"ejercicios_lpc.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"96691232","text":"#coding:utf-8\r\nimport os,shutil,time,sys\r\n\r\n\r\n\r\ncs_port=80\r\n\r\ncsStopCmd='/etc/init.d/cs stop'\r\ncsStartCmd='/etc/init.d/cs start'\r\ncsshStartCmd='/etc/init.d/cssh start'\r\ncsshStopCmd='/etc/init.d/cssh stop'\r\ncsConfPath='/usr/local/apache-tomcat-7.0.42/webapps/ROOT/WEB-INF/classes/csconfig.properties'\r\ncsshConfPath='/usr/local/cssh/config/conf.py'\r\n\r\ndef getPid(port):\r\n f=os.popen(\"lsof -Pnl -i :%s |grep LISTEN|awk '{print$2}'\" %str(port))\r\n pid=f.readline().strip()\r\n f.close()\r\n return pid\r\n\r\ndef stopCS():\r\n# os.system(csStopCmd)\r\n pid=getPid(cs_port)\r\n os.system(\"kill -9 %s &>/dev/null\" %pid)\r\n\r\ndef stopCssh():\r\n os.system(csshStopCmd)\r\n \r\n \r\ndef startCS():\r\n os.system(csStartCmd)\r\n\r\ndef startCSSH():\r\n os.system(csshStartCmd)\r\n \r\ndef replaceCS(guid):\r\n data=[]\r\n f=open(csConfPath,'r+') \r\n for line in f.readlines():\r\n \r\n if line.find('guid')!=-1:\r\n s=line.split('=')\r\n data.append(s[0]+\"=\"+guid+'\\n')\r\n else:\r\n data.append(line) \r\n \r\n f.close()\r\n f=open(csConfPath,'w')\r\n f.writelines(data)\r\n f.close() \r\n\r\ndef replaceCSSH(guid):\r\n data=[]\r\n f=open(csshConfPath,'r+') \r\n for line in f.readlines():\r\n \r\n if line.find('guid')!=-1:\r\n s=line.split('=')\r\n data.append(s[0]+\"=\"+\"'\"+guid+\"'\"+'\\n')\r\n else:\r\n data.append(line) \r\n \r\n f.close()\r\n f=open(csshConfPath,'w')\r\n f.writelines(data)\r\n f.close()\r\n \r\nif __name__=='__main__':\r\n guid= sys.argv[1].strip()\r\n print('#### the guid is :%s.'%guid ) \r\n \r\n try:\r\n print('#### start to stop cs & cssh.' ) \r\n stopCS()\r\n stopCssh()\r\n \r\n time.sleep(1)\r\n print('#### start to replace guid.' ) \r\n replaceCS(guid)\r\n replaceCSSH(guid)\r\n \r\n print('#### start to start cs & cssh.' ) \r\n startCS()\r\n startCSSH()\r\n \r\n except:\r\n errorMsg = sys.exc_info()[1]\r\n print('#### replace guid failed ,the error:%s' % errorMsg)\r\n \r\n print('#### end replace, please check cs & cssh start status.' ) \r\n","sub_path":"qiangao/zhang/replaceGuid.py","file_name":"replaceGuid.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"82140068","text":"import numpy as np\nimport pandas as pd\nfrom flask import Flask, request, jsonify, render_template\nimport pickle\nimport model\n\napp= Flask(__name__, static_url_path='/static')\nmy_model=pickle.load(open('model.pkl', 'rb'))\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n@app.route('/index')\ndef predictionpage():\n return render_template('index.html')\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n\n input_values = [float(i) for i in request.form.values()] #fetching the input values\n df_row=[[i] for i in input_values] #This will form the input row\n df_keys = [i for i in request.form.keys()] #fetching the input keys\n rescaling_cols=['temp', 'hum', 'windspeed'] #declaring list of keys which has to be rescaled\n\n #Declaring dictionary to convert into dataframe in the next step.\n html_dict = {df_keys[i]: df_row[i] for i in range(len(df_keys))}\n\n def creating_input_to_model(dict):\n df_dict = {}\n if dict['weather']==[1.0]:\n df_dict['Best']=[1.0]\n df_dict['Neutral']=[0.0]\n elif dict['weather']==[2.0]:\n df_dict['Best']=[0.0]\n df_dict['Neutral']=[1.0]\n else:\n df_dict['Best'] = [0.0]\n df_dict['Neutral'] = [0.0]\n\n if dict['Seasons']==[1.0]:\n df_dict['spring']=[1.0]\n df_dict['temp']=dict['temp']\n else:\n df_dict['spring'] = [0.0]\n df_dict['temp'] = dict['temp']\n\n if dict['Seasons']==[4.0]:\n df_dict['winter']=[1.0]\n df_dict['summer']=[0.0]\n elif dict['Seasons']==[2.0]:\n df_dict['winter'] = [0.0]\n df_dict['summer'] = [1.0]\n else:\n df_dict['winter'] = [0.0]\n df_dict['summer'] = [0.0]\n\n df_dict['hum']=dict['hum']\n\n if dict['Month']==[7.0]:\n df_dict['Jul']=[1.0]\n df_dict['Sep']=[0.0]\n elif dict['Month']==[9.0]:\n df_dict['Jul'] = [0.0]\n df_dict['Sep'] = [1.0]\n else:\n df_dict['Jul'] = [0.0]\n df_dict['Sep'] = [0.0]\n\n df_dict['windspeed'] = dict['windspeed']\n df_dict['yr']= dict['yr']\n df_dict['holiday']=dict['holiday']\n\n return df_dict\n\n func_dict=creating_input_to_model(html_dict)\n df=pd.DataFrame(func_dict)\n df[df.columns[df.columns.isin(rescaling_cols)]] = model.scaler.transform(df[df.columns[df.columns.isin(rescaling_cols)]])\n\n #Prediction of the trained model\n prediction= my_model.predict(df)\n #Output derived from the ML model\n output= round(prediction[0], 2)\n\n #Output sent to the html page\n return render_template('index.html', prediction_text='Prediction: \\n {} cycle rents.'.format(output))\n\nif __name__==\"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"563659977","text":"from django.db import models\nimport os\nimport random\nimport shutil\nimport zipfile\n\n# Create your models here.\n\nfrom django.core.urlresolvers import reverse \nfrom django.db import models\nfrom datetime import datetime\nfrom utils.thumbs import ImageWithThumbsField\nfrom django.template.defaultfilters import slugify\nfrom django.core.files.base import ContentFile\n\n \nclass Album(models.Model):\n titulo = models.CharField(u'Titulo', max_length = 100)\n slug = models.SlugField(max_length = 100, blank = True, unique = True)\n data = models.DateTimeField(auto_now = True)\n \n def __unicode__(self): \n return self.titulo\n \n def get_absolute_url(self):\n return reverse('album', kwargs={'slug': self.slug})\n \n class Meta:\n ordering = ('titulo', )\n db_table = 'psssv_album' \n verbose_name_plural = 'Albuns'\n\nclass Foto(models.Model):\n album = models.ForeignKey('Album')\n titulo = models.CharField(u'Titulo', max_length = 100)\n slug = models.SlugField(max_length = 100, blank = True, unique = True)\n descricao = models.TextField(blank = True)\n original = ImageWithThumbsField(\n null = True,\n blank = True, \n upload_to = 'galeria',\n sizes=((125,125),(200,200),(500,500))\n )\n dataPublicacao = models.DateTimeField(auto_now = True)\n \n def __unicode__(self):\n return self.titulo\n \n def get_absolute_url(self):\n #return reverse('album', kwargs={'slug': self.slug})\n return self.original\n \n def get_exibicao(self):\n nome_arq = str(self.original)\n delimiter = nome_arq.find(\".JPG\")\n nome_arq = nome_arq[:delimiter] + \".500x500.jpg\"\n return nome_arq\n \n def get_thumbnail(self):\n nome_arq = str(self.original)\n delimiter = nome_arq.find(\".JPG\")\n nome_arq = nome_arq[:delimiter] + \".125x125.jpg\"\n return nome_arq\n \n class Meta: \n ordering =('album', 'titulo',)\n db_table = 'psssv_fotos'\n\nclass AlbumUpload(models.Model):\n zip = models.FileField(_('images file (.zip)'), upload_to=\"/temp\",\n help_text=_('Select a .zip file of images to upload into a new Gallery.'))\n album = models.ForeignKey(Album, null=True, blank=True, help_text=_('Select a gallery to add these videos to. leave this empty to create a new gallery from the supplied title.'))\n titulo = models.CharField(_('title'), max_length=75, help_text=_('All videos in the gallery will be given a title made up of the gallery title + a sequential number.'))\n descricao = models.TextField(_('description'), blank=True, help_text=_('Descricao dessa galeria.'))\n data = models.DateTimeField(auto_now = True)\n \n class Meta:\n verbose_name = _('album upload')\n verbose_name_plural = _('album uploads')\n\n def save(self, *args, **kwargs):\n super(AlbumUpload, self).save(*args, **kwargs)\n album = self.process_zipfile()\n super(AlbumUpload, self).delete()\n return album\n\n def process_zipfile(self):\n if os.path.isfile(self.zip_file.path):\n # TODO: implement try-except here\n zip = zipfile.ZipFile(self.zip_file.path)\n bad_file = zip.testzip()\n if bad_file:\n raise Exception('\"%s\" in the .zip archive is corrupt.' % bad_file)\n count = 1\n if self.album:\n Album = self.Album\n else:\n album = Album.objects.create(titulo=self.titulo,\n slug=slugify(self.titulo),\n data=self.data,\n )\n from cStringIO import StringIO\n for filename in zip.namelist():\n if filename.startswith('__'): # do not process meta files\n continue\n data = zip.read(filename)\n if len(data):\n while 1:\n titulo = ' '.join([self.titulo, str(count)])\n slug = slugify(titulo)\n try:\n p = Foto.objects.get(titulo_slug=slug)\n except Foto.DoesNotExist:\n foto = Foto(titulo=titulo,\n slug=slug,\n caption=self.caption,\n descricao =self.descricao ,\n )\n foto.save(filename, ContentFile(data), save=False)\n foto.save()\n album.add(foto)\n count = count + 1\n break\n count = count + 1\n zip.close()\n return album\n \n\n \n#Signals\nfrom django.db.models import signals\nfrom utils.signals_comuns import slug_pre_save\nsignals.pre_save.connect(slug_pre_save, sender = Album)\nsignals.pre_save.connect(slug_pre_save, sender = Foto) ","sub_path":"galeria/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"220322300","text":"#!/usr/bin/env python3\n\nimport terminal_colors\nimport git_lib\n\ndef do_pull(repo, remotes, branches):\n\n ORIGINAL_COLOR = terminal_colors.get_standard_color() \n report = []\n\n remotes_list = [i for i in remotes if \"fetch\" in remotes[i]]\n\n print(\"\\n* Pulling on %s ...\" % repo)\n hasanyfailed = False\n for rm in remotes_list:\n for bn in branches:\n\n v, r = git_lib.pull(repo, rm, bn)\n if v:\n out = \"OK.\"\n color = terminal_colors.TTY_GREEN\n else:\n print(r)\n hasanyfailed = True\n out = \"Failed.\"\n color = terminal_colors.TTY_RED\n\n report.append(\"%s%s (remote=%s, branch=%s): %s%s\" % (color, repo, rm, bn, out, ORIGINAL_COLOR))\n\n return hasanyfailed, report\n","sub_path":"git/visitor/backends/git_pull.py","file_name":"git_pull.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"438848508","text":"#!/usr/bin/env python\r\n#\r\n# Copyright 2016 Greg Eastman\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n#\r\n\r\n#Natively provided by python libraries\r\nimport json\r\nimport random\r\n\r\n#Natively provided by app engine\r\nimport google.appengine.ext.ndb as ndb\r\nimport google.appengine.api.users as google_authentication\r\n\r\n#Includes specified by the app.yaml\r\nimport webapp2\r\n\r\n#App specific includes\r\nimport datamodel\r\nimport constants\r\n\r\n\r\n_DEFAULT_GIFT_EXCHANGE_NAME = datamodel.DEFAULT_GIFT_EXCHANGE_NAME\r\n_DEFAULT_DISPLAY_NAME = ''\r\n_DEFAULT_MAX_RESULTS = 200\r\n\r\nmember_required = datamodel.member_required\r\n\r\ndef event_required(handler):\r\n \"\"\"\r\n Decorator that checks if there's an event associated with the current session.\r\n Looks for post parameters or JSON object.\r\n Will also fail if there's no session present.\r\n \"\"\"\r\n def check_event(self, *args, **kwargs):\r\n event = self.get_event(*args, **kwargs)\r\n if event is None:\r\n self.redirect(self.uri_for('home'), abort=True)\r\n else: \r\n return handler(self, *args, **kwargs) \r\n return check_event\r\n\r\nclass AdminWebAppHandler(datamodel.BaseHandler):\r\n \"\"\"A wrapper around webapp2.RequestHandler with a few convenience methods\"\"\"\r\n def get_event(self, *args, **kwargs):\r\n \"\"\"Gets an event from the get string event\"\"\"\r\n event = None\r\n try:\r\n event_string = kwargs['event']\r\n event_key = ndb.Key(urlsafe=event_string)\r\n event = event_key.get()\r\n except:\r\n pass\r\n return event\r\n\r\nclass HomeHandler(AdminWebAppHandler):\r\n \"\"\"Handles the requests to the admin home page\"\"\"\r\n @member_required\r\n def get(self):\r\n \"\"\"Handles get requests to the admin home page - listing all available events\"\"\"\r\n google_user = google_authentication.get_current_user()\r\n gift_exchange_key = datamodel.get_gift_exchange_key(_DEFAULT_GIFT_EXCHANGE_NAME)\r\n datamodel.GiftExchangeMember.update_and_retrieve_member_by_google_user(gift_exchange_key, google_user)\r\n query = datamodel.GiftExchangeEvent.get_all_events_query(gift_exchange_key)\r\n event_list = query.fetch(_DEFAULT_MAX_RESULTS) #maybe filter out the events that have ended\r\n not_started_events = []\r\n in_progress_events = []\r\n ended_events = []\r\n for event in event_list:\r\n if event.has_ended:\r\n ended_events.append(event)\r\n elif event.has_started:\r\n in_progress_events.append(event)\r\n else:\r\n not_started_events.append(event) \r\n template_values = {\r\n 'not_started_events': not_started_events,\r\n 'in_progress_events': in_progress_events,\r\n 'ended_events': ended_events,\r\n 'page_title': 'Administrative Dashboard',\r\n }\r\n self.add_template_values(template_values)\r\n self.render_template('admin.html')\r\n\r\nclass EventHandler(AdminWebAppHandler):\r\n \"\"\"Handles requests for updating a particular event, including the participants\"\"\"\r\n @member_required\r\n def get(self, *args, **kwargs):\r\n \"\"\"Handles get requests to the page that shows an administrative view of an event\"\"\"\r\n #TODO: add javascript validation\r\n event_string =''\r\n event = self.get_event(*args, **kwargs)\r\n event_display_name = _DEFAULT_DISPLAY_NAME\r\n money_limit = ''\r\n participant_list = []\r\n gift_exchange_key = datamodel.get_gift_exchange_key(_DEFAULT_GIFT_EXCHANGE_NAME)\r\n query = datamodel.GiftExchangeMember.get_all_members_query(gift_exchange_key)\r\n member_list = query.fetch(_DEFAULT_MAX_RESULTS)\r\n has_started = False\r\n has_ended = False\r\n if event is not None:\r\n event_string = event.key.urlsafe()\r\n event_display_name = event.display_name\r\n money_limit = event.money_limit\r\n has_started = event.has_started\r\n has_ended = event.has_ended\r\n query = datamodel.GiftExchangeParticipant.get_participants_in_event_query(gift_exchange_key, event.key)\r\n participant_list = query.fetch(_DEFAULT_MAX_RESULTS)\r\n template_values = {\r\n 'event_string': event_string,\r\n 'event_display_name': event_display_name,\r\n 'has_started': has_started,\r\n 'has_ended': has_ended,\r\n 'money_limit': money_limit,\r\n 'participant_list': participant_list, #TODO: put in better selector, and probably default names\r\n 'member_list': member_list,\r\n 'page_title': 'Edit an event',\r\n }\r\n self.add_template_values(template_values)\r\n self.render_template('event.html')\r\n \r\n @member_required\r\n def post(self, *args, **kwargs):\r\n \"\"\"Handles updating a particular event, including the participants. Expects a JSON object.\"\"\"\r\n def _prune_participants(gift_exchange_key, event_key, name_index):\r\n \"\"\"Deletes any participants from a given event that aren't in the name_index\"\"\"\r\n query = datamodel.GiftExchangeParticipant.get_participants_in_event_query(gift_exchange_key, event_key)\r\n participant_list = query.fetch(_DEFAULT_MAX_RESULTS)\r\n for participant in participant_list:\r\n if participant.display_name in name_index:\r\n if ((participant.get_member().email_address != name_index[participant.display_name][0]) \r\n or (participant.family != name_index[participant.display_name][1])):\r\n participant.key.delete()\r\n else:\r\n participant.key.delete()\r\n return\r\n \r\n def _save_participants(gift_exchange_key, event_key, participant_list):\r\n \"\"\"Helper method for saving the participants in a particular event, including pruning participants\"\"\"\r\n message = None\r\n #There's likely a better way to check for duplicates, but this shouldn't happen\r\n name_index = {}\r\n for participant_object in participant_list:\r\n temp_name = participant_object['display_name']\r\n if temp_name in name_index:\r\n return 'Duplicate name found: ' + temp_name\r\n name_index[participant_object['display_name']] = (participant_object['email'], participant_object['family'])\r\n for participant_object in participant_list:\r\n needs_saving = False\r\n display_name = participant_object['display_name']\r\n participant = datamodel.GiftExchangeParticipant.get_participant_by_name(gift_exchange_key, display_name, event_key)\r\n if participant is None:\r\n participant = datamodel.GiftExchangeParticipant.create_participant_by_name(gift_exchange_key, display_name, event_key)\r\n needs_saving = True\r\n member = datamodel.GiftExchangeMember.get_member_by_email(gift_exchange_key, participant_object['email'])\r\n if participant.member_key != member.key:\r\n participant.member_key = member.key\r\n needs_saving = True\r\n family = participant_object['family']\r\n if participant.family != family:\r\n participant.family = family\r\n needs_saving = True\r\n if needs_saving:\r\n participant.put()\r\n _prune_participants(gift_exchange_key, event_key, name_index)\r\n return message\r\n \r\n data = json.loads(self.request.body)\r\n gift_exchange_key = datamodel.get_gift_exchange_key(_DEFAULT_GIFT_EXCHANGE_NAME)\r\n message = 'Event Updated Successfully'\r\n event = self.get_event(*args, **kwargs)\r\n needs_saving = False\r\n event_display_name = data['event_display_name']\r\n money_limit = data['money_limit']\r\n if ((event_display_name is None) or (event_display_name == '') or (event_display_name == _DEFAULT_DISPLAY_NAME)):\r\n message = 'You must select a valid display name'\r\n else:\r\n if event is None:\r\n event = datamodel.GiftExchangeEvent(parent=gift_exchange_key)\r\n needs_saving = True\r\n if event.display_name != event_display_name:\r\n event.display_name = event_display_name\r\n needs_saving = True\r\n if money_limit:\r\n if event.money_limit != money_limit:\r\n event.money_limit = money_limit\r\n needs_saving = True\r\n if needs_saving:\r\n event.put()\r\n if not event.has_started: #maybe should return a message, but UI handles it\r\n error_message = _save_participants(gift_exchange_key, event.key, data['participant_list'])\r\n if error_message:\r\n message = error_message\r\n self.response.out.write(json.dumps(({'message': message, 'event_string': event.key.urlsafe(), 'money_limit': event.money_limit})))\r\n \r\nclass DeleteHandler(AdminWebAppHandler):\r\n \"\"\"Handles requests for deleting an event, including all participants associated with the event\"\"\"\r\n @event_required\r\n @member_required\r\n def post(self, *args, **kwargs):\r\n \"\"\"Takes a JSON request and deletes the event and all participants associated with it.\"\"\"\r\n gift_exchange_key = datamodel.get_gift_exchange_key(_DEFAULT_GIFT_EXCHANGE_NAME)\r\n event = self.get_event(*args, **kwargs)\r\n participant_query = datamodel.GiftExchangeParticipant.get_participants_in_event_query(gift_exchange_key, event.key)\r\n participant_list = participant_query.fetch(_DEFAULT_MAX_RESULTS)\r\n for participant in participant_list:\r\n message_query = datamodel.GiftExchangeMessage.get_messages_from_participant_query(gift_exchange_key, participant)\r\n message_list = message_query.fetch(_DEFAULT_MAX_RESULTS*10)\r\n for message in message_list:\r\n message.key.delete()\r\n participant.key.delete()\r\n event.key.delete()\r\n self.response.out.write(json.dumps(({'message': 'Successfully deleted.'})))\r\n \r\nclass ReportHandler(AdminWebAppHandler):\r\n \"\"\"Handles showing a report for all the data about a particular event.\"\"\"\r\n @event_required\r\n @member_required\r\n def get(self, *args, **kwargs):\r\n \"\"\"Displays a report about a particular event.\"\"\"\r\n event = self.get_event(*args, **kwargs)\r\n gift_exchange_key = datamodel.get_gift_exchange_key(_DEFAULT_GIFT_EXCHANGE_NAME)\r\n query = datamodel.GiftExchangeParticipant.get_participants_in_event_query(gift_exchange_key, event.key)\r\n participant_list = query.fetch(_DEFAULT_MAX_RESULTS)\r\n template_values = {\r\n 'event': event,\r\n 'participant_list': participant_list,\r\n 'page_title': 'Event Report',\r\n }\r\n self.add_template_values(template_values)\r\n self.render_template('report.html')\r\n \r\nclass InheritHandler(AdminWebAppHandler):\r\n \"\"\"Handler for a particular event spawning a child event with the same defaults and previous targets filled in\"\"\"\r\n @event_required\r\n @member_required\r\n def get(self, *args, **kwargs):\r\n \"\"\"Handles the get requests for inheriting an event\"\"\"\r\n parent_event = self.get_event(*args, **kwargs)\r\n gift_exchange_key = datamodel.get_gift_exchange_key(_DEFAULT_GIFT_EXCHANGE_NAME)\r\n child_event = datamodel.GiftExchangeEvent(parent=gift_exchange_key)\r\n child_event.display_name = 'Sequel to ' + parent_event.display_name\r\n child_event.money_limit = parent_event.money_limit\r\n child_event.put()\r\n child_event_key = child_event.key\r\n query = datamodel.GiftExchangeParticipant.get_participants_in_event_query(gift_exchange_key, parent_event.key)\r\n participant_list = query.fetch(_DEFAULT_MAX_RESULTS)\r\n for participant in participant_list:\r\n display_name = participant.display_name\r\n new_participant = datamodel.GiftExchangeParticipant.create_participant_by_name(gift_exchange_key, display_name, child_event_key)\r\n new_participant.member_key = participant.member_key\r\n new_participant.family = participant.family\r\n new_participant.previous_target = participant.target\r\n new_participant.put()\r\n self.redirect(self.uri_for('event', event=child_event.key.urlsafe()))\r\n\r\nclass StatusChangeHandler(AdminWebAppHandler):\r\n \"\"\"Handler for changing for starting or stopping an event\"\"\"\r\n @event_required\r\n @member_required\r\n def post(self, *args, **kwargs):\r\n \"\"\"Post handler for starting or stopping an event. Expects a JSON object\"\"\"\r\n \r\n def _is_valid_assignment(source_participant, target_participant):\r\n \"\"\"Returns whether source_participant can give to target_participant\"\"\"\r\n #Cannot give to yourself\r\n if source_participant.display_name == target_participant.display_name:\r\n return False\r\n #Cannot give to the person you gave to last year\r\n if source_participant.previous_target == target_participant.display_name:\r\n return False\r\n #If you are in a family, cannot give to someone in your own family\r\n if source_participant.family:\r\n if source_participant.family == target_participant.family:\r\n return False\r\n return True\r\n \r\n def _can_assign(source_participant, target_participant, need_to_give, need_a_giver):\r\n \"\"\"Returns where the source_participant can give to target_participant by checking if there is still a valid assignment scheme for other participants\"\"\" \r\n source_participant.target = target_participant.display_name\r\n source_index = need_to_give.index(source_participant)\r\n need_to_give.remove(source_participant)\r\n target_index = need_a_giver.index(target_participant)\r\n need_a_giver.remove(target_participant)\r\n \r\n if len(need_to_give) == 0:\r\n return True\r\n for giver in need_to_give:\r\n found_possible_participant = False\r\n for givee in need_a_giver:\r\n if _is_valid_assignment(giver, givee):\r\n found_possible_participant = True\r\n break\r\n if not found_possible_participant:\r\n source_participant.target = None\r\n need_to_give.insert(source_index, source_participant)\r\n need_a_giver.insert(target_index, target_participant)\r\n return False\r\n for givee in need_a_giver:\r\n if _is_valid_assignment(giver, givee):\r\n if _can_assign(giver, givee, need_to_give, need_a_giver):\r\n return True\r\n else:\r\n source_participant.target = None\r\n need_to_give.insert(source_index, source_participant)\r\n need_a_giver.insert(target_index, target_participant)\r\n source_participant.target = None\r\n need_to_give.insert(source_index, source_participant)\r\n need_a_giver.insert(target_index, target_participant)\r\n return False\r\n \r\n def _assign_participants(gift_exchange_key, event_key):\r\n \"\"\"Helper method for assigning targets to all participants in a given event.\"\"\"\r\n query = datamodel.GiftExchangeParticipant.get_participants_in_event_query(gift_exchange_key, event_key)\r\n participant_list = query.fetch(_DEFAULT_MAX_RESULTS)\r\n if len(participant_list) == 0:\r\n return\r\n need_to_give = list(participant_list)\r\n need_a_giver = list(participant_list)\r\n #randomize list and then brute force for acceptable assignment\r\n random.shuffle(need_to_give)\r\n random.shuffle(need_a_giver)\r\n source_participant = need_to_give[0]\r\n for target_participant in need_a_giver:\r\n if _is_valid_assignment(source_participant, target_participant):\r\n if _can_assign(source_participant, target_participant, need_to_give, need_a_giver):\r\n break\r\n #Save all participants\r\n for participant in participant_list:\r\n participant.put()\r\n return\r\n \r\n data = json.loads(self.request.body)\r\n status_change_type = data['status_change_type']\r\n event = self.get_event(*args, **kwargs)\r\n if status_change_type == 'start':\r\n _assign_participants(datamodel.get_gift_exchange_key(_DEFAULT_GIFT_EXCHANGE_NAME), event.key)\r\n event.has_started = True\r\n event.put()\r\n if status_change_type == 'stop':\r\n event.has_ended = True\r\n event.put()\r\n self.response.out.write(json.dumps(({'message': 'Event successfully updated', 'event_string': event.key.urlsafe()})))\r\n\r\nconfig = {\r\n 'webapp2_extras.auth': {\r\n 'user_model': 'datamodel.User',\r\n 'user_attributes': ['name']\r\n },\r\n 'webapp2_extras.sessions': {\r\n 'secret_key': constants.SECRET_KEY\r\n }\r\n}\r\n\r\napp = webapp2.WSGIApplication([\r\n webapp2.Route('/admin/', HomeHandler, name='home'),\r\n webapp2.Route('/admin/event/', handler=EventHandler),\r\n webapp2.Route('/admin/event/', handler=EventHandler, name='event'),\r\n webapp2.Route('/admin/inherit/', handler=InheritHandler),\r\n webapp2.Route('/admin/statuschange/', handler=StatusChangeHandler),\r\n webapp2.Route('/admin/delete/', handler=DeleteHandler),\r\n webapp2.Route('/admin/report/', handler=ReportHandler)\r\n], debug=False, config=config)\r\n","sub_path":"src/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":18686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"538378173","text":"from setuptools import setup\n\nwith open('postcodeinfo.py', 'r') as f:\n version = reduce(\n lambda a, l: l.startswith('__version__') and l[15:-2] or a, f, '')\n\nwith open('README.rst', 'r') as f:\n readme = f.read()\n\nsetup(\n name='postcodeinfo',\n version=version,\n py_modules=['postcodeinfo'],\n description=(\n 'API client for https://github.com/ministryofjustice/postcodeinfo'),\n long_description=readme,\n author='MOJ Digital Services',\n author_email='andy.driver@digital.justice.gov.uk',\n maintainer='Andy Driver',\n maintainer_email='andy.driver@digital.justice.gov.uk',\n keywords=['python', 'ministryofjustice', 'govuk'],\n platforms=['any'],\n url='https://github.com/ministryofjustice/postcodeinfo-client-python',\n license='LICENSE',\n install_requires=['requests>=2.7,<3'],\n test_suite=\"tests\",\n classifiers=[\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Development Status :: 3 - Alpha',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Python Modules'])\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"602957927","text":"from flask import Flask, render_template\nimport data_collector\nimport team\n\n# Set as variables so that they can be easily changed\nDEBUG = True\nPORT = 8000\nHOST = '0.0.0.0'\n\n# Initialise flask app\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef index():\n # Include team member names and teams\n context = team.team\n\n # Fetch the data from S3\n data = data_collector.collect_data_url()\n return render_template(\"home.html\", context=list(team.team.values()), data=data)\n\n\nif __name__ == \"__main__\":\n app.run(debug=DEBUG, port=PORT, host=HOST)\n\n\n# @app.route(\"/meet_team\")\n# def meet_team():\n# return \"Hello, Meet our team!\"\n\n\n# @app.route(\"/data\")\n# def data():\n# return \"IT JOB WATCH!\"\n","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"392360430","text":"import pickle\r\nimport os\r\nimport argparse\r\nimport logging\r\nimport torch\r\nimport time\r\nimport sys\r\nimport random\r\n\r\nimport numpy as np\r\nfrom numpy.linalg import *\r\nimport torch.optim as optim\r\nimport torchvision.transforms as transforms\r\n\r\nfrom sklearn.cluster import KMeans\r\nfrom datetime import datetime\r\nfrom torch.autograd import Variable\r\nfrom torch.utils.data import DataLoader\r\nimport torch.nn as nn\r\nimport torch.backends.cudnn as cudnn\r\n\r\nimport utils.DC_dataset as dataset\r\nimport utils.DAGH_no_Ink_loss as dl\r\nimport utils.cnn_model_DAGH as cnn_model\r\nimport utils.calc_hr as calc_hr\r\n\r\nparser = argparse.ArgumentParser(description=\"DAGH demo\")\r\nparser.add_argument('--bits', default='8', type=str,\r\n help='binary code length (default: 8,16,32,64)')\r\nparser.add_argument('--gpu', default='0', type=str,\r\n help='selected gpu (default: 3)')\r\nparser.add_argument('--dataname', default='MirFlickr', type=str,\r\n help='MirFlickr, NUSWIDE, COCO')\r\nparser.add_argument('--arch', default='vgg11', type=str,\r\n help='model name (default: resnet50,vgg11)')\r\nparser.add_argument('--max-iter', default=6, type=int,\r\n help='maximum iteration (default: 50)')\r\nparser.add_argument('--epochs', default=2, type=int,\r\n help='number of epochs (default: 1)')\r\nparser.add_argument('--batch-size', default=48, type=int,\r\n help='batch size (default: 64)')\r\nparser.add_argument('--lambda-1', default='10', type=str,\r\n help='hyper-parameter: oth-lambda-1 (default: 0.001,0.01,0.1,1,10,100)')\r\nparser.add_argument('--lambda-2', default='0.1', type=str,\r\n help='hyper-parameter: bla-lambda-2 (default: 0.00001)')\r\nparser.add_argument('--lambda-3', default='1', type=str,\r\n help='hyper-parameter: l1-lambda-3 (default: 0.00001)')\r\nparser.add_argument('--learning-rate', default=0.01, type=float,\r\n help='hyper-parameter: learning rate (default: 10**-3)')\r\nparser.add_argument('--num-anchor', default=300, type=int,\r\n help='number of anchor: (default: 300)')\r\n\r\ndef _logging():\r\n os.mkdir(logdir)\r\n global logger\r\n logfile = os.path.join(logdir, 'log.log')\r\n logger = logging.getLogger('')\r\n logger.setLevel(logging.INFO)\r\n fh = logging.FileHandler(logfile)\r\n fh.setLevel(logging.INFO)\r\n ch = logging.StreamHandler()\r\n ch.setLevel(logging.INFO)\r\n\r\n _format = logging.Formatter(\"%(name)-4s: %(levelname)-4s: %(message)s\")\r\n fh.setFormatter(_format)\r\n ch.setFormatter(_format)\r\n\r\n logger.addHandler(fh)\r\n logger.addHandler(ch)\r\n return\r\n\r\ndef _record():\r\n global record\r\n record = {}\r\n record['train loss'] = []\r\n record['iter time'] = []\r\n record['param'] = {}\r\n return\r\n\r\ndef _save_record(record, filename):\r\n with open(filename, 'wb') as fp:\r\n pickle.dump(record, fp)\r\n return\r\n\r\ndef get_dist_graph(all_points, num_anchor=300):\r\n \"\"\"\r\n get the cluster center as anchor by K-means++\r\n and calculate distance graph (n data points vs m anchors),\r\n :param all_points: n data points\r\n :param num_anchor: m anchors, default = 300\r\n :return: distance graph n X m\r\n \"\"\"\r\n # kmeans = KMeans (n_clusters=num_anchor, random_state=0, n_jobs=16, max_iter=50).fit_transform(all_points)\r\n # print ('dist graph done!')\r\n # return np.asarray(kmeans)\r\n ## smaple\r\n sample_rate = 3000\r\n num_data = np.size(all_points,0)\r\n ind = random.sample(range(num_data),sample_rate)\r\n sample_points = all_points[ind,:]\r\n kmeans = KMeans (n_clusters=num_anchor, random_state=0, n_jobs=16, max_iter=50).fit(sample_points)\r\n km = kmeans.transform(all_points)\r\n print ('dist graph done!')\r\n return np.asarray(km)\r\n\r\ndef calc_Z(dist_graph,s=2):\r\n \"\"\"\r\n calculate anchor graph (n data points vs m anchors),\r\n :param dist_graph: the distance matrix of n data points vs m anchors, n X m\r\n :param s: the number of nearest anchors, default = 2\r\n :return: anchor graph, n X m\r\n \"\"\"\r\n #dist_graph = dist_graph * dist_graph\r\n n,m = dist_graph.shape\r\n Z = np.zeros((n,m))\r\n val = np.zeros((n,s))\r\n pos = np.zeros((n,s), 'int')\r\n for i in range(0,s):\r\n val[:,i] = np.min(dist_graph,1)\r\n pos[:,i] = np.argmin(dist_graph,1)\r\n x = range(0,n)\r\n y = pos[:,i]\r\n dist_graph[x,y] = 1e60\r\n sigma = np.mean(val[:,s-1] ** 0.5)\r\n\r\n '''\r\n normalization\r\n '''\r\n val = np.exp(-val / (1 / 1 * sigma ** 2))\r\n val = np.tile(1./val.sum(1),(s,1)).T * val\r\n\r\n for i in range(0, s):\r\n x = range (0, n)\r\n y = pos[:, i]\r\n Z[x,y] = val[:,i]\r\n\r\n print ('Z graph done!')\r\n return Z\r\n\r\ndef get_batch_gard(B, left, Z_T, batch_ind):\r\n \"\"\"\r\n get the gradient of each corresponding batch : = BL[:,batch_ind]\r\n :param B: Binary codes of the all data points, k X n\r\n :param left: B * Z * inv_A, k X m\r\n :param Z: anchor graph, n X m\r\n Laplacian matrix L: I - Z * inv_A * Z^t\r\n :param batch_ind: batch size X k\r\n :return: the gradient of the batch points, batch size X k\r\n \"\"\"\r\n grad = B[:,batch_ind] - np.dot(left, Z_T[:,batch_ind])\r\n return grad.transpose()\r\n\r\ndef B_step(F, Z, inv_A):\r\n \"\"\"\r\n Update B : F^t * W\r\n :param F: output of network as the real-valued embeddings n X k\r\n :param Z: anchor graph, n X m\r\n Affinity matrix W: Z * inv_A * Z^t\r\n :return: the updated B, k X n\r\n \"\"\"\r\n Z_T = Z.transpose() # m X n\r\n temp = np.dot(F.transpose(), np.dot(Z, inv_A)) # k X m\r\n B = np.dot(temp, Z_T) # k X n\r\n print ('B step done!')\r\n return np.sign(B)\r\n\r\ndef encoding_onehot(target, nclasses=10):\r\n target_onehot = torch.FloatTensor(target.size(0), nclasses)\r\n target_onehot.zero_()\r\n target_onehot.scatter_(1, target.view(-1, 1), 1)\r\n return target_onehot\r\n\r\ndef _dataset(dataname):\r\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n transformations = transforms.Compose([\r\n transforms.Scale(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n normalize\r\n ])\r\n\r\n rootpath = os.path.join('/data/dacheng/Datasets/', dataname)\r\n\r\n if dataname=='NUSWIDE':\r\n dset_database = dataset.NUSWIDE ('train_img.txt', 'train_label.txt', transformations)\r\n dset_test = dataset.NUSWIDE ('test_img.txt', 'test_label.txt', transformations)\r\n elif dataname=='MirFlickr':\r\n dset_database = dataset.MirFlickr ('train_img.txt', 'train_label.txt', transformations)\r\n dset_test = dataset.MirFlickr ('test_img.txt', 'test_label.txt', transformations)\r\n elif dataname =='COCO':\r\n dset_database = dataset.COCO ('train_img.txt', 'train_label.txt', transformations)\r\n dset_test = dataset.COCO ('test_img.txt', 'test_label.txt', transformations)\r\n\r\n num_database, num_test = len (dset_database), len (dset_test)\r\n\r\n def load_label(filename, DATA_DIR):\r\n path = os.path.join(DATA_DIR, filename)\r\n fp = open(path, 'r')\r\n labels = [x.strip() for x in fp]\r\n fp.close()\r\n return torch.LongTensor(list(map(int, labels)))\r\n\r\n def DC_load_label(filename, DATA_DIR):\r\n path = os.path.join(DATA_DIR, filename)\r\n label = np.loadtxt (path, dtype=np.int64)\r\n return torch.LongTensor(label)\r\n\r\n def load_label2(root, train=True):\r\n base_folder = 'cifar-10-batches-py'\r\n train_list = [\r\n ['data_batch_1', 'c99cafc152244af753f735de768cd75f'],\r\n ['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],\r\n ['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],\r\n ['data_batch_4', '634d18415352ddfa80567beed471001a'],\r\n ['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],\r\n ]\r\n\r\n test_list = [\r\n ['test_batch', '40351d587109b95175f43aff81a1287e'],\r\n ]\r\n\r\n root = os.path.expanduser(root)\r\n train = train # training set or test set\r\n\r\n # now load the picked numpy arrays\r\n if train:\r\n train_data = []\r\n train_labels = []\r\n for fentry in train_list:\r\n f = fentry[0]\r\n file = os.path.join(root, base_folder, f)\r\n fo = open(file, 'rb')\r\n if sys.version_info[0] == 2:\r\n entry = pickle.load(fo)\r\n else:\r\n entry = pickle.load(fo, encoding='latin1')\r\n train_data.append(entry['data'])\r\n if 'labels' in entry:\r\n train_labels += entry['labels']\r\n else:\r\n train_labels += entry['fine_labels']\r\n fo.close()\r\n\r\n else:\r\n f = test_list[0][0]\r\n file = os.path.join(root, base_folder, f)\r\n fo = open(file, 'rb')\r\n if sys.version_info[0] == 2:\r\n entry = pickle.load(fo)\r\n else:\r\n entry = pickle.load(fo, encoding='latin1')\r\n test_data = entry['data']\r\n if 'labels' in entry:\r\n test_labels = entry['labels']\r\n else:\r\n test_labels = entry['fine_labels']\r\n fo.close()\r\n\r\n if train:\r\n target = train_labels\r\n else:\r\n target = test_labels\r\n return torch.LongTensor(list(map(int, target)))\r\n\r\n databaselabels = DC_load_label('train_label.txt', rootpath)\r\n testlabels = DC_load_label('test_label.txt', rootpath)\r\n\r\n # testlabels2 = load_label2('/home/dacheng/PycharmProjects/ADSH_pytorch/data', train=False)\r\n # databaselabels2 = load_label2('/home/dacheng/PycharmProjects/ADSH_pytorch/data', train=True)\r\n\r\n # testlabels = encoding_onehot(testlabels2)\r\n # databaselabels = encoding_onehot(databaselabels2)\r\n\r\n dsets = (dset_database, dset_test)\r\n nums = (num_database, num_test)\r\n labels = (databaselabels, testlabels)\r\n\r\n return nums, dsets, labels\r\n\r\ndef calc_loss(B, F, Z, inv_A, lambda_1, lambda_2, lambda_3, code_length):\r\n \"\"\"\r\n Calculate loss: Tr(BLF^t) = Tr(B * Z * inv_A * Z^t * F^t)\r\n :param F: output of network n X k\r\n :param B: binary codes k X n\r\n :param Z: anchor graph, n X m\r\n :return: loss: trace(BLF)\r\n \"\"\"\r\n Z_T = Z.transpose () # m X n\r\n temp = np.dot (B, np.dot (Z, inv_A)) # k X m\r\n temp2 = np.dot (temp, Z_T) # k X n\r\n BAF = np.dot (temp2, F) # k X k\r\n Tr_BLF = np.trace(np.dot(B, F) - BAF)\r\n\r\n num_train = np.size (B,1)\r\n # nI_K = num_train * np.eye (code_length, code_length)\r\n nI_K = np.eye (code_length, code_length)\r\n\r\n one_vectors = np.ones ((num_train, code_length))\r\n reg_loss = (B - F.transpose()) ** 2\r\n # oth_loss = (np.dot(F.transpose(), F) - nI_K) ** 2\r\n oth_loss = (np.dot (F.transpose (), F)/num_train - nI_K) ** 2\r\n\r\n bla_loss = (F.sum (0)) ** 2\r\n susb_loss = np.abs (F) - one_vectors\r\n L1_loss = np.abs (susb_loss)\r\n loss = (Tr_BLF + 0.5 * (reg_loss.sum () + lambda_2 * bla_loss.sum () + lambda_3 * L1_loss.sum())) / num_train + 0.5*lambda_1 * oth_loss.sum ()/ (code_length)\r\n\r\n print ('Tr_BLF:'+ str(Tr_BLF/ num_train))\r\n print ('reg_loss:' + str (reg_loss.sum () / num_train))\r\n print ('oth_loss:' + str (lambda_1 * oth_loss.sum ()/ (code_length)))\r\n print ('bla_loss:' + str (lambda_2 * bla_loss.sum ()/ num_train))\r\n print ('L1_loss:' + str (lambda_3 * L1_loss.sum()/ num_train))\r\n\r\n print ('loss done!')\r\n return loss\r\n\r\ndef encode(model, data_loader, num_data, bit):\r\n B = np.zeros([num_data, bit], dtype=np.float32)\r\n for iter, data in enumerate(data_loader, 0):\r\n data_input, _, data_ind = data\r\n data_input = Variable(data_input.cuda(),volatile=True)\r\n output = model(data_input)\r\n B[data_ind.numpy(), :] = torch.sign(output[1].cpu().data).numpy()\r\n return B\r\n\r\ndef adjusting_learning_rate(optimizer, iter):\r\n #update_list = [10, 20, 30, 40, 50]\r\n if ((iter % 3) == 0) & (iter !=0):\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = param_group['lr'] / 5\r\n print ('learning rate is adjusted!')\r\n\r\ndef DAGH_algo(code_length, dataname):\r\n os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu\r\n torch.manual_seed(0)\r\n torch.cuda.manual_seed(0)\r\n # code_length=8\r\n\r\n '''\r\n parameter setting\r\n '''\r\n max_iter = opt.max_iter\r\n epochs = opt.epochs\r\n batch_size = opt.batch_size\r\n learning_rate = opt.learning_rate\r\n weight_decay = 5 * 10 ** -4\r\n num_anchor = opt.num_anchor\r\n lambda_1 = float(opt.lambda_1)\r\n lambda_2 = float(opt.lambda_2)\r\n lambda_3 = float(opt.lambda_3)\r\n\r\n record['param']['opt'] = opt\r\n record['param']['description'] = '[Comment: learning rate decay]'\r\n logger.info(opt)\r\n logger.info(code_length)\r\n logger.info(record['param']['description'])\r\n\r\n '''\r\n dataset preprocessing\r\n '''\r\n nums, dsets, labels = _dataset(dataname)\r\n num_database, num_test = nums\r\n dset_database, dset_test = dsets\r\n database_labels, test_labels = labels\r\n\r\n '''\r\n model construction\r\n '''\r\n beta = 2\r\n model = cnn_model.CNNNet(opt.arch, code_length)\r\n model.cuda()\r\n cudnn.benchmark = True\r\n DAGH_loss = dl.DAGHLoss (lambda_1, lambda_2, lambda_3, code_length)\r\n L1_criterion = nn.L1Loss ()\r\n L2_criterion = nn.MSELoss ()\r\n optimizer = optim.SGD(model.parameters(), lr=learning_rate, weight_decay=weight_decay)\r\n\r\n B = np.sign(np.random.randn(code_length, num_database))\r\n\r\n model.train()\r\n for iter in range(max_iter):\r\n iter_time = time.time()\r\n\r\n trainloader = DataLoader(dset_database, batch_size=batch_size,\r\n shuffle=True,\r\n num_workers=4)\r\n F = np.zeros ((num_database, code_length), dtype=np.float)\r\n\r\n if iter == 0:\r\n '''\r\n initialize the feature of all images to build dist graph\r\n '''\r\n ini_Features = np.zeros ((num_database, 4096), dtype=np.float)\r\n ini_F = np.zeros ((num_database, code_length), dtype=np.float)\r\n for iteration, (train_input, train_label, batch_ind) in enumerate (trainloader):\r\n train_input = Variable (train_input.cuda ())\r\n output = model (train_input)\r\n ini_Features[batch_ind, :] = output[0].cpu ().data.numpy ()\r\n ini_F[batch_ind, :] = output[1].cpu ().data.numpy ()\r\n print ('initialization dist graph forward done!')\r\n dist_graph = get_dist_graph (ini_Features, num_anchor)\r\n # dist_graph = np.random.rand(num_database,num_anchor)\r\n # bf = np.sign(ini_F)\r\n Z = calc_Z (dist_graph)\r\n elif (iter % 3) == 0:\r\n dist_graph = get_dist_graph (Features, num_anchor)\r\n Z = calc_Z (dist_graph)\r\n print ('calculate dist graph forward done!')\r\n\r\n inv_A = inv (np.diag (Z.sum (0))) # m X m\r\n Z_T = Z.transpose () # m X n\r\n left = np.dot (B, np.dot (Z, inv_A)) # k X m\r\n\r\n if iter == 0:\r\n loss_ini = calc_loss (B, ini_F, Z, inv_A, lambda_1, lambda_2, lambda_3, code_length)\r\n # loss_ini2 = calc_all_loss(B,F,Z,inv_A,Z1,Z2,Y1,Y2,rho1,rho2,lambda_1,lambda_2)\r\n print(loss_ini)\r\n '''\r\n learning deep neural network: feature learning\r\n '''\r\n Features = np.zeros ((num_database, 4096), dtype=np.float)\r\n for epoch in range(epochs):\r\n for iteration, (train_input, train_label, batch_ind) in enumerate(trainloader):\r\n train_input = Variable(train_input.cuda())\r\n\r\n output = model(train_input)\r\n Features[batch_ind, :] = output[0].cpu ().data.numpy ()\r\n F[batch_ind, :] = output[1].cpu ().data.numpy ()\r\n\r\n batch_grad = get_batch_gard(B, left, Z_T, batch_ind)/(1*batch_size)\r\n batch_grad = Variable (torch.from_numpy (batch_grad).type (torch.FloatTensor).cuda ())\r\n optimizer.zero_grad()\r\n # output[1].backward(batch_grad, retain_graph=True)\r\n output[1].backward (batch_grad)\r\n\r\n B_cuda = Variable (torch.from_numpy (B[:,batch_ind]).type (torch.FloatTensor).cuda ())\r\n # optimizer.zero_grad ()\r\n other_loss = DAGH_loss(output[1].t(),B_cuda)\r\n one_vectors = Variable (torch.ones (output[1].size()).cuda ())\r\n L1_loss = L1_criterion(torch.abs(output[1]),one_vectors)\r\n # L2_loss = L2_criterion (output[1],B_cuda.t())\r\n All_loss = other_loss + lambda_3 * L1_loss\r\n All_loss.backward()\r\n\r\n optimizer.step()\r\n\r\n if (iteration % 200) == 0 :\r\n print ('iteration:' + str (iteration))\r\n #print (model.features[0].weight.data[1, 1, :, :])\r\n #print (model.features[18].weight.data[1, 1, :, :])\r\n #print (model.classifier[6].weight.data[:, 1])\r\n adjusting_learning_rate(optimizer, iter)\r\n '''\r\n learning binary codes: discrete coding\r\n '''\r\n # bf = np.sign (F)\r\n\r\n # F = np.random.randn (num_database, 12)\r\n loss_before = calc_loss (B, F, Z, inv_A, lambda_1, lambda_2, lambda_3, code_length)\r\n\r\n B = B_step (F, Z, inv_A)\r\n iter_time = time.time() - iter_time\r\n loss_ = calc_loss(B, F, Z, inv_A, lambda_1, lambda_2, lambda_3, code_length)\r\n\r\n logger.info('[Iteration: %3d/%3d][Train Loss: before:%.4f, after:%.4f]', iter, max_iter, loss_before, loss_)\r\n record['train loss'].append(loss_)\r\n record['iter time'].append(iter_time)\r\n\r\n '''\r\n training procedure finishes, evaluation\r\n '''\r\n model.eval()\r\n testloader = DataLoader(dset_test, batch_size=batch_size,\r\n shuffle=False,\r\n num_workers=4)\r\n qB = encode(model, testloader, num_test, code_length)\r\n rB = B.transpose()\r\n\r\n topKs = np.arange (1, 500, 50)\r\n top_ndcg = 100\r\n map = calc_hr.calc_map (qB, rB, test_labels.numpy (), database_labels.numpy ())\r\n top_map = calc_hr.calc_topMap (qB, rB, test_labels.numpy (), database_labels.numpy (), 2000)\r\n Pres = calc_hr.calc_topk_pres (qB, rB, test_labels.numpy (), database_labels.numpy (), topKs)\r\n ndcg = calc_hr.cal_ndcg_k (qB, rB, test_labels.numpy (), database_labels.numpy (), top_ndcg)\r\n\r\n logger.info ('[lambda_1: %.4f]', lambda_1)\r\n logger.info ('[lambda_2: %.4f]', lambda_2)\r\n logger.info ('[lambda_3: %.4f]', lambda_3)\r\n logger.info('[Evaluation: mAP: %.4f]', map)\r\n logger.info ('[Evaluation: topK_mAP: %.4f]', top_map)\r\n logger.info('[Evaluation: Pres: %.4f]', Pres[0])\r\n logger.info ('[Evaluation: topK_ndcg: %.4f]', ndcg)\r\n record['rB'] = rB\r\n record['qB'] = qB\r\n record['map'] = map\r\n record['topK_map'] = top_map\r\n record['topK_ndcg'] = ndcg\r\n record['Pres'] = Pres\r\n record['F'] = F\r\n filename = os.path.join(logdir, str(code_length) + 'bits-record.pkl')\r\n\r\n _save_record(record, filename)\r\n return top_map\r\n\r\n\r\nif __name__==\"__main__\":\r\n global opt, logdir\r\n opt = parser.parse_args()\r\n logdir = '-'.join(['log/noInk',opt.dataname, datetime.now().strftime(\"%y-%m-%d-%H-%M-%S\")])\r\n _logging()\r\n _record()\r\n bits = [int(bit) for bit in opt.bits.split(',')]\r\n for bit in bits:\r\n DAGH_algo (bit,opt.dataname)\r\n # lambda_1s = [ float(lambda_1) for lambda_1 in opt.lambda_1.split (',')]\r\n # lambda_2s = [int (lambda_2) for lambda_2 in opt.lambda_2.split (',')]\r\n # lambda_3s = [float (lambda_3) for lambda_3 in opt.lambda_3.split (',')]\r\n # topmap_lambda = np.zeros ((len(lambda_1s),len(lambda_3s)))\r\n # for i, lambda_1 in enumerate(lambda_1s):\r\n # for j, lambda_3 in enumerate(lambda_3s):\r\n #\r\n # topmap = DAGH_algo(lambda_1,lambda_3,opt.dataname)\r\n #\r\n # topmap_lambda[i,j] = topmap\r\n # record['topmap_lambda'] = topmap_lambda\r\n","sub_path":"ADSH_pytorch/DC_no_Ink.py","file_name":"DC_no_Ink.py","file_ext":"py","file_size_in_byte":20196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"136947007","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue May 25 17:29:35 2021\r\n\r\n@author: Nicolas Barragan\r\n\"\"\"\r\n\r\nimport cv2\r\nimport mediapipe as mp\r\nimport pyautogui\r\nimport numpy as np\r\n\r\nmp_face_detection = mp.solutions.face_detection\r\nmp_drawing = mp.solutions.drawing_utils\r\n\r\n#Guardara en que estado se encuentra actualmente el programa\r\n# 0: Defecto, 1: Start, 2: Update, 3: Error\r\nestado = 0\r\n\r\n#captura\r\ncaptura = cv2.VideoCapture(0, cv2.CAP_DSHOW) #acceso a la camara integrada del computador\r\n\r\n#ROI\r\nROI_superior = [300,50,400,150]\r\nROI_inferior = [300,250,400,350]\r\nROI_izquierda = [200,150,300,250]\r\nROI_derecha = [400,150,500,250]\r\n\r\nwith mp_face_detection.FaceDetection(min_detection_confidence=0.8) as face_detection:\r\n \r\n while(True):\r\n \r\n disponible, fotograma = captura.read()\r\n \r\n #Tamaño de la pantalla\r\n height, width, _ = fotograma.shape\r\n \r\n #Maquina de estados\r\n if(disponible == True and estado==0):\r\n estado = 1 #Empieza el programa\r\n elif(disponible == False and estado==0):\r\n estado = 3 #Error(camara no disponible o camara dañada)\r\n \r\n #Estados\r\n if(estado == 1): #Start\r\n print(\"Screen size: (\" + str(width) + \",\" + str(height) + \")\")\r\n estado = 2\r\n \r\n elif(estado == 2): #Update\r\n #print(\"Update\")\r\n #preparar la camara para realizar el face detection\r\n fotograma = cv2.flip(fotograma,1)\r\n #fotograma_alpha = np.zeros(fotograma.shape[:2], np.uint8)\r\n fotograma_alpha = fotograma.copy()\r\n alpha = 0.4\r\n fotograma_rgb = cv2.cvtColor(fotograma, cv2.COLOR_BGR2RGB)\r\n \r\n #Imprimir las regiones de interes\r\n cv2.rectangle(fotograma,(ROI_superior[0],ROI_superior[1]),(ROI_superior[2],ROI_superior[3]),(0,255,0),2)\r\n cv2.rectangle(fotograma,(ROI_inferior[0],ROI_inferior[1]),(ROI_inferior[2],ROI_inferior[3]),(0,255,0),2)\r\n cv2.rectangle(fotograma,(ROI_izquierda[0],ROI_izquierda[1]),(ROI_izquierda[2],ROI_izquierda[3]),(0,255,0),2)\r\n cv2.rectangle(fotograma,(ROI_derecha[0],ROI_derecha[1]),(ROI_derecha[2],ROI_derecha[3]),(0,255,0),2)\r\n \r\n #Calcular los puntos de interes del rostro\r\n resultados = face_detection.process(fotograma_rgb)\r\n \r\n if resultados.detections is not None:\r\n for detection in resultados.detections:\r\n #Nariz (NOSE_TIP)\r\n xN = int(detection.location_data.relative_keypoints[2].x * width)\r\n yN = int(detection.location_data.relative_keypoints[2].y * height)\r\n #Dibujar un circulo en la punta de la nariz\r\n cv2.circle(fotograma,(xN,yN),10,(255,255,0), -1)\r\n \r\n #Detectar si entro en alguna region de interes\r\n #ROI_superior\r\n if(xN > ROI_superior[0] and xN < ROI_superior[2] and yN > ROI_superior[1] and yN < ROI_superior[3]):\r\n #print(\"Se oprimio la flecha de arriba del mouse\")\r\n cv2.putText(fotograma,'UP',(350,40), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,0,0), 1, cv2.LINE_AA)\r\n #cv2.rectangle(fotograma,(ROI_superior[0],ROI_superior[1]),(ROI_superior[2],ROI_superior[3]),(0,0,255),-1)\r\n cv2.rectangle(fotograma_alpha,(ROI_superior[0],ROI_superior[1]),(ROI_superior[2],ROI_superior[3]),(0,0,255),-1)\r\n #keyboard.press_and_release('up')\r\n pyautogui.press('up')\r\n #ROI_inferior \r\n if(xN > ROI_inferior[0] and xN < ROI_inferior[2] and yN > ROI_inferior[1] and yN < ROI_inferior[3]):\r\n #print(\"Se oprimio la flecha de abajo del mouse\")\r\n cv2.putText(fotograma,'DOWN',(350,390), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,0,0), 1, cv2.LINE_AA)\r\n #cv2.rectangle(fotograma,(ROI_inferior[0],ROI_inferior[1]),(ROI_inferior[2],ROI_inferior[3]),(0,0,255),-1)\r\n cv2.rectangle(fotograma_alpha,(ROI_inferior[0],ROI_inferior[1]),(ROI_inferior[2],ROI_inferior[3]),(0,0,255),-1)\r\n #keyboard.press_and_release('down')\r\n pyautogui.press('down')\r\n #ROI_izquierda \r\n if(xN > ROI_izquierda[0] and xN < ROI_izquierda[2] and yN > ROI_izquierda[1] and yN < ROI_izquierda[3]):\r\n #print(\"Se oprimio la flecha izquierda del mouse\")\r\n cv2.putText(fotograma,'LEFT',(100,200), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,0,0), 1, cv2.LINE_AA)\r\n #cv2.rectangle(fotograma,(ROI_izquierda[0],ROI_izquierda[1]),(ROI_izquierda[2],ROI_izquierda[3]),(0,0,255),-1)\r\n cv2.rectangle(fotograma_alpha,(ROI_izquierda[0],ROI_izquierda[1]),(ROI_izquierda[2],ROI_izquierda[3]),(0,0,255),-1)\r\n #keyboard.press_and_release('left')\r\n pyautogui.press('left')\r\n #ROI_derecha \r\n if(xN > ROI_derecha[0] and xN < ROI_derecha[2] and yN > ROI_derecha[1] and yN < ROI_derecha[3]):\r\n #print(\"Se oprimio la flecha derecha del mouse\")\r\n cv2.putText(fotograma,'RIGHT',(510,200), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,0,0), 1, cv2.LINE_AA)\r\n #cv2.rectangle(fotograma,(ROI_derecha[0],ROI_derecha[1]),(ROI_derecha[2],ROI_derecha[3]),(0,0,255),-1)\r\n cv2.rectangle(fotograma_alpha,(ROI_derecha[0],ROI_derecha[1]),(ROI_derecha[2],ROI_derecha[3]),(0,0,255),-1)\r\n #keyboard.press_and_release('right')\r\n pyautogui.press('right')\r\n else:\r\n print(\"NONE\")\r\n \r\n #Mostrar la captura\r\n fotograma = cv2.addWeighted(fotograma_alpha, alpha, fotograma, 1 - alpha, 0)\r\n cv2.imshow(\"Camara\", fotograma)\r\n \r\n #Comandos de teclado\r\n if(cv2.waitKey(1) & 0xFF == ord('q')):\r\n #Liberar camara y destruir ventanas\r\n print(\"Finalizando\")\r\n break\r\n \r\n elif(estado == 3): #Error\r\n print(\"Error, camara no disponible\")\r\n break\r\n \r\n#libera camara y destruye ventanas\r\ncaptura.release()\r\ncv2.destroyAllWindows()","sub_path":"HermitPurple.py","file_name":"HermitPurple.py","file_ext":"py","file_size_in_byte":6269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"77469237","text":"# ---------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# ---------------------------------------------------------\nfrom abc import ABC\nfrom typing import Union\n\nfrom azure.ai.ml._restclient.v2023_04_01_preview.models import (\n BayesianSamplingAlgorithm as RestBayesianSamplingAlgorithm,\n)\nfrom azure.ai.ml._restclient.v2023_04_01_preview.models import GridSamplingAlgorithm as RestGridSamplingAlgorithm\nfrom azure.ai.ml._restclient.v2023_04_01_preview.models import RandomSamplingAlgorithm as RestRandomSamplingAlgorithm\nfrom azure.ai.ml._restclient.v2023_04_01_preview.models import SamplingAlgorithm as RestSamplingAlgorithm\nfrom azure.ai.ml._restclient.v2023_04_01_preview.models import SamplingAlgorithmType\nfrom azure.ai.ml.entities._mixins import RestTranslatableMixin\n\n\nclass SamplingAlgorithm(ABC, RestTranslatableMixin):\n def __init__(self) -> None:\n self.type = None\n\n @classmethod\n def _from_rest_object(cls, obj: RestSamplingAlgorithm) -> \"SamplingAlgorithm\":\n if not obj:\n return None\n\n sampling_algorithm = None\n if obj.sampling_algorithm_type == SamplingAlgorithmType.RANDOM:\n sampling_algorithm = RandomSamplingAlgorithm._from_rest_object(obj) # pylint: disable=protected-access\n\n if obj.sampling_algorithm_type == SamplingAlgorithmType.GRID:\n sampling_algorithm = GridSamplingAlgorithm._from_rest_object(obj) # pylint: disable=protected-access\n\n if obj.sampling_algorithm_type == SamplingAlgorithmType.BAYESIAN:\n sampling_algorithm = BayesianSamplingAlgorithm._from_rest_object(obj) # pylint: disable=protected-access\n\n return sampling_algorithm\n\n\nclass RandomSamplingAlgorithm(SamplingAlgorithm):\n \"\"\"Random Sampling Algorithm.\n\n :ivar type: Specifies the type of sampling algorithm. Set automatically to \"random\" for this class.\n :vartype type: str\n :param logbase: A positive number or e in string format to be used as base for log\n based random sampling.\n :type logbase: Union[float, str]\n :param rule: The specific type of random algorithm. Possible values include: \"random\",\n \"sobol\".\n :type rule: str\n :param seed: An integer to use as the seed for random number generation.\n :type seed: int\n \"\"\"\n\n def __init__(\n self,\n *,\n rule: str = None,\n seed: int = None,\n logbase: Union[float, str] = None,\n ) -> None:\n super().__init__()\n self.type = SamplingAlgorithmType.RANDOM.lower()\n self.rule = rule\n self.seed = seed\n self.logbase = logbase\n\n def _to_rest_object(self) -> RestRandomSamplingAlgorithm:\n return RestRandomSamplingAlgorithm(\n rule=self.rule,\n seed=self.seed,\n logbase=self.logbase,\n )\n\n @classmethod\n def _from_rest_object(cls, obj: RestRandomSamplingAlgorithm) -> \"RandomSamplingAlgorithm\":\n return cls(\n rule=obj.rule,\n seed=obj.seed,\n logbase=obj.logbase,\n )\n\n\nclass GridSamplingAlgorithm(SamplingAlgorithm):\n \"\"\"Grid Sampling Algorithm.\n\n :ivar type: Specifies the type of sampling algorithm. Set automatically to \"grid\" for this class.\n :vartype type: str\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.type = SamplingAlgorithmType.GRID.lower()\n\n # pylint: disable=no-self-use\n def _to_rest_object(self) -> RestGridSamplingAlgorithm:\n return RestGridSamplingAlgorithm()\n\n @classmethod\n # pylint: disable=unused-argument\n def _from_rest_object(cls, obj: RestGridSamplingAlgorithm) -> \"GridSamplingAlgorithm\":\n return cls()\n\n\nclass BayesianSamplingAlgorithm(SamplingAlgorithm):\n \"\"\"Bayesian Sampling Algorithm.\n\n :ivar type: Specifies the type of sampling algorithm. Set automatically to \"bayesian\" for this class.\n :vartype type: str\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.type = SamplingAlgorithmType.BAYESIAN.lower()\n\n # pylint: disable=no-self-use\n def _to_rest_object(self) -> RestBayesianSamplingAlgorithm:\n return RestBayesianSamplingAlgorithm()\n\n @classmethod\n # pylint: disable=unused-argument\n def _from_rest_object(cls, obj: RestBayesianSamplingAlgorithm) -> \"BayesianSamplingAlgorithm\":\n return cls()\n","sub_path":"sdk/ml/azure-ai-ml/azure/ai/ml/entities/_job/sweep/sampling_algorithm.py","file_name":"sampling_algorithm.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"13400724","text":"import os\n\nfrom pathlib import Path\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom django.conf import settings\n\nfrom dockerizer.builder import DockerBuilder\nfrom factories.factory_build_jobs import BuildJobFactory\nfrom tests.utils import BaseTest\n\n\n@pytest.mark.dockerizer_mark\nclass TestDockerize(BaseTest):\n @patch('dockerizer.builder.APIClient')\n def test_get_requirements_and_setup_path_works_as_expected(self, _):\n build_job = BuildJobFactory()\n # Create a repo folder\n repo_path = os.path.join(settings.REPOS_MOUNT_PATH, 'repo')\n os.mkdir(repo_path)\n\n builder = DockerBuilder(build_job=build_job,\n repo_path=repo_path,\n from_image='busybox')\n assert builder.polyaxon_requirements_path is None\n assert builder.polyaxon_setup_path is None\n builder.clean()\n\n # Add a polyaxon_requirements.txt and polyaxon_setup.sh files to repo path\n Path(os.path.join(repo_path, 'polyaxon_requirements.txt')).touch()\n Path(os.path.join(repo_path, 'polyaxon_setup.sh')).touch()\n\n builder = DockerBuilder(build_job=build_job,\n repo_path=repo_path,\n from_image='busybox')\n assert builder.polyaxon_requirements_path == 'repo/polyaxon_requirements.txt'\n assert builder.polyaxon_setup_path == 'repo/polyaxon_setup.sh'\n builder.clean()\n\n # Delete previous files\n os.remove(os.path.join(repo_path, 'polyaxon_requirements.txt'))\n os.remove(os.path.join(repo_path, 'polyaxon_setup.sh'))\n\n # Add a requirements.txt and setup.sh files to repo path\n Path(os.path.join(repo_path, 'requirements.txt')).touch()\n Path(os.path.join(repo_path, 'setup.sh')).touch()\n\n builder = DockerBuilder(build_job=build_job,\n repo_path=repo_path,\n from_image='busybox')\n assert builder.polyaxon_requirements_path == 'repo/requirements.txt'\n assert builder.polyaxon_setup_path == 'repo/setup.sh'\n builder.clean()\n\n @patch('dockerizer.builder.APIClient')\n def test_render_works_as_expected(self, _):\n build_job = BuildJobFactory()\n\n # Create a repo folder\n repo_path = os.path.join(settings.REPOS_MOUNT_PATH, 'repo')\n os.mkdir(repo_path)\n\n # By default it should user FROM image declare WORKDIR and COPY code\n builder = DockerBuilder(build_job=build_job,\n repo_path=repo_path,\n from_image='busybox')\n\n dockerfile = builder.render()\n builder.clean()\n\n assert 'FROM busybox' in dockerfile\n assert 'WORKDIR {}'.format(builder.WORKDIR) in dockerfile\n assert 'COPY {}'.format(builder.folder_name) in dockerfile\n\n # Add env vars\n builder = DockerBuilder(build_job=build_job,\n repo_path=repo_path,\n from_image='busybox',\n env_vars=[('BLA', 'BLA')])\n\n dockerfile = builder.render()\n assert 'ENV BLA BLA' in dockerfile\n builder.clean()\n\n # Add a polyaxon_requirements.txt and polyaxon_setup.sh files to repo path\n Path(os.path.join(repo_path, 'polyaxon_requirements.txt')).touch()\n Path(os.path.join(repo_path, 'polyaxon_setup.sh')).touch()\n # Add step to act on them\n build_steps = [\n 'pip install -r polyaxon_requirements.txt',\n './polyaxon_setup.sh'\n ]\n\n builder = DockerBuilder(build_job=build_job,\n repo_path=repo_path,\n from_image='busybox',\n build_steps=build_steps)\n\n dockerfile = builder.render()\n assert 'COPY {} {}'.format(\n builder.polyaxon_requirements_path, builder.WORKDIR) in dockerfile\n assert 'COPY {} {}'.format(\n builder.polyaxon_setup_path, builder.WORKDIR) in dockerfile\n\n assert 'RUN {}'.format(build_steps[0]) in dockerfile\n assert 'RUN {}'.format(build_steps[1]) in dockerfile\n builder.clean()\n","sub_path":"tests/test_dockerizer/test_dockerize.py","file_name":"test_dockerize.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"471602546","text":"# game.py\n# -------\n# Licensing Information: Please do not distribute or publish solutions to this\n# project. You are free to use and extend these projects for educational\n# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by\n# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html\n\n\nclass Grid:\n def __init__(self, width, height, initialValue=False, bitRepresentation=None):\n if initialValue not in [False, True]:\n raise Exception('Grids can only contain booleans')\n self.CELL_PER_INT = 30\n\n self.width = width\n self.height = height\n self.data = [[initialValue for y in range(height)] for x in range(width)]\n if bitRepresentation:\n self._unpackBits(bitRepresentation)\n\n def __getitem__(self, i):\n return self.data[i]\n\n def __setitem__(self, key, item):\n self.data[key] = item\n\n def __str__(self):\n out = [[str(self.data[x][y])[0] for x in range(self.width)] for y in range(self.height)]\n out.reverse()\n return '\\n'.join([''.join(x) for x in out])\n\n def __eq__(self, other):\n if other == None:\n return False\n return self.data == other.data\n\n def __hash__(self):\n # return hash(str(self))\n base = 1\n h = 0\n for l in self.data:\n for i in l:\n if i:\n h += base\n base *= 2\n return hash(h)\n\n def copy(self):\n g = Grid(self.width, self.height)\n g.data = [x[:] for x in self.data]\n return g\n\n def deepCopy(self):\n return self.copy()\n\n def shallowCopy(self):\n g = Grid(self.width, self.height)\n g.data = self.data\n return g\n\n def count(self, item = True):\n return sum([x.count(item) for x in self.data])\n\n def asList(self, key = True):\n list = []\n for x in range(self.width):\n for y in range(self.height):\n if self[x][y] == key:\n list.append( (x,y) )\n return list\n\n\n def packBits(self):\n bits = [self.width, self.height]\n currentInt = 0\n for i in range(self.height * self.width):\n bit = self.CELLS_PER_INT - (i % self.CELLS_PER_INT) - 1\n x, y = self._cellIndexToPosition(i)\n if self[x][y]:\n currentInt += 2 ** bit\n if (i + 1) % self.CELLS_PER_INT == 0:\n bits.append(currnetInt)\n currentInt = 0\n bits.append(currentInt)\n return tuple(bits)\n\n def _cellIndexToPosition(self, index):\n x = index / self.height\n y = index % self.height\n return x, y\n\n def _unpackBits(self, bits):\n cell = 0\n for packed in bits:\n for bit in self._unpackInt(packed, self.CELLS_PER_INT):\n if cell == self.width * self.height:\n break\n x, y = self._cellIndexToPosition(cell)\n self[x][y] = bit\n cell += 1\n\n def _unpackInt(self, packed, size):\n bools = []\n if packed < 0:\n raise ValueError(\"must be a postive integer\")\n for i in range(size):\n n = 2 ** (self.CELLS_PER_INT - i - 1)\n if packed >= n:\n bools.append(True)\n packed -= n\n else:\n bools.append(False)\n return bools\n\n\n\nclass GameStateData:\n def __init__(self, prevState = None):\n if prevState != None:\n #self.food = prevState.food.shallowCopy()\n self.capsules = prevState.capsules[:]\n #self.agentStates = self.copyAngentStates(prevState.agentStates)\n self.layout = prevState.layout\n self._eaten = prevState._eaten\n self.score = prevState.score\n self._foodEaten = None\n self._capsuleEaten = None\n self._agentMoved = None\n self._lose = False\n self._win = False\n self.scoreChange = 0\n\n","sub_path":"Day19/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"504099154","text":"# Repita cada arquivo da pasta\n# Para cada arquivo:\n# Girar a imagem 90 ° no sentido horário (270)\n# Redimensione a imagem de 192x192 para 128x128\n# Salve a imagem em uma nova pasta no formato .jpeg\n# Você pode nomear o arquivo como quiser.\n# E certifique-se de salvar as imagens atualizadas na pasta:/opt/icons/\n\n#!/usr/bin/env python3\n\nfrom PIL import Image\nimport sys\nimport os\n\n\ndef AdjustImages():\n\n size = (128, 128)\n graus = 270\n\n for root, dirs, file in os.walk(\"\"):\n\n print(dirs)\n\n for infile in file:\n f, e = os.path.splitext(infile)\n outfile = '/opt/icons/' + f\n try:\n im = Image.open(infile)\n im.rotate(graus).resize(size).convert(\n \"RGB\").save(outfile, \"jpeg\")\n print('Success record!')\n\n except OSError as e:\n print(\"Not possible converted {} {}\".format(infile, e))\n pass\n\n return 'Finally process with success'\n\n\nif __name__ == '__main__':\n AdjustImages()\n","sub_path":"P01-Automation_Adjust_Images/ITAUT-ImagesManipulation.py","file_name":"ITAUT-ImagesManipulation.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"376110133","text":"import sqlite3 as sql\n\n\nconn2 = sql.connect(\"Contact_Details.sqlite\");\n\nwith conn2:\n cur = conn2.cursor()\n\n cur.execute(\"SELECT * FROM ContactDetails INNER JOIN Address on ContactDetails.ID = Address.ID\")\n\n rows = cur.fetchall()\n\nprint(rows[0])\n","sub_path":"Introduction to Programming/Assignment 3/DB Dummy.py","file_name":"DB Dummy.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"416928823","text":"'''\nA simple library for communication with the wemo insight switch.\n(c) Devin Gardella 2015 (dpg3@williams.edu)\n'''\nimport requests\nimport time\nimport sys\n\nclass WemoReader:\n def __init__(self, ip_addr, ignores):\n self.ip = ip_addr\n POSS_PORTS = [\"49152\",\"49153\",\"49154\"]\n #Try each port that I've seen the Wemo decide to use.\n for p in POSS_PORTS:\n try:\n self.port = p\n self.get_info()\n break\n except Exception as err:\n #Seems silly, but it works\n if not \"ConnectionRefusedError\" in str(err):\n raise\n\n if not self.port:\n print(\"Could not find port to connect to on ip: \" + self.ip)\n sys.exit()\n self.ig = ignores\n\n def _action(self,command,service,control,param_dict):\n header = {'Content-Type': 'text/xml; charset=\"utf-8\"',\n 'SOAPACTION': '\"' + service + '#' + command + '\"'}\n body=\"\" + \\\n \"\" + \\\n \"\" + \\\n \"\"\n \n for key,val in param_dict.items():\n body += \"<\" + key + \">\"+ val + \"\"\n \n body += \"\" + \\\n \"\" + \\\n \"\"\n\n controlUrl= \"http://\" + self.ip + \":\" + self.port + control\n response = requests.post(controlUrl, body, headers=header)\n return response\n\n #Gets the current state of the Wemo device. \n #(Be aware there is a delay in the data)\n def get_info(self):\n state_definition = {\"0\": \"off\", \"1\" : \"on\", \n \"8\" : \"wemo on, connected device off\"}\n # state: 0 if off, 1 if on, 8 if on but load is off\n params = [\"device_state\", \"last_change\",\"on_for\",\n \"on_today\",\"on_total\",\"???\",\n \"???\", \"current_w\",\"today_w\",\n \"total_w\", \"power_threshold\"]\n \n wemo_res = self._action(\"GetInsightParams\",\"urn:Belkin:service:insight:1\",\"/upnp/control/insight1\" ,{}).text\n data_line = wemo_res.split(\"\\n\")[2]\n data = data_line[data_line.index(\">\") + 1: data_line.index(\" bins[7])[0]\n idx1[temp] = 0\n \n \n features[idx1,:] += val1*mag[i][j]/binspace\n features[idx,:] += mag[i][j]*(1-(val1/binspace))\n return features\n \ndef hogFeatures(x):\n numOr = 8\n binSize = 4\n #x = np.sqrt(x)\n gx_filt = np.asarray([-1,0,1]).reshape(1,-1)\n gy_filt = np.asarray([[-1],[0],[1]]).reshape(-1,1)\n Gx = np.zeros(x.shape)\n Gy = np.zeros(x.shape)\n for i in range(x.shape[2]):\n Gx[:,:,i] = convolve(x[:,:,i], gx_filt, mode='same')\n Gy[:,:,i] = convolve(x[:,:,i], gy_filt, mode='same')\n #print('shape',Gx.shape, Gy.shape)\n gradient_magnitude = np.sqrt(Gx**2 + Gy**2)\n #print(gradient_magnitude.shape)\n #print(np.max(Gx), np.max(Gy))\n gradient_orientation = np.arctan(Gy/(Gx+0.00000001))*180/np.pi + 90\n #print(gradient_orientation)\n #print(np.min(gradient_orientation), np.max(gradient_orientation))\n \n bins = np.linspace(0.,180.,numOr, endpoint=False)\n #print(bins)\n features = np.zeros((1,x.shape[2]))\n for i in range(0,x.shape[0],binSize):\n for j in range(0,x.shape[1],binSize):\n magBin = gradient_magnitude[i:i+binSize, j:j+binSize,:]\n oriBin = gradient_orientation[i:i+binSize, j:j+binSize,:]\n #print(magBin.shape, oriBin.shape)\n out = computeHist(magBin, oriBin, bins, binSize, numOr)\n features = np.concatenate((features, out), axis = 0)\n #print(features.shape)\n features = features[1:,:]\n \n #return np.sqrt(features/np.sqrt(np.sum(features, axis = 0, keepdims=True)))\n return features\n\ndef lbpFeatures(x):\n features = np.zeros((256, x.shape[2]))\n bitNum = np.asarray([[1,2,4],[8,0,16],[32,64,128]])\n for ind in range(x.shape[2]):\n singleImage = x[:,:,ind]\n listInt = []\n c = Counter()\n for i in range(1,x.shape[0]-1):\n for j in range(1,x.shape[1]-1):\n patch = singleImage[i-1:i+2,j-1:j+2] - singleImage[i,j]\n patch[patch>0] = 1\n patch[patch<0] = 0\n patch[1,1] = 0\n mappedInt = np.sum(patch*bitNum)\n listInt.append(mappedInt)\n c.update(listInt)\n indices = map(int, c.keys())\n c[0] = 0\n features[indices,ind] = c.values()\n #print(c.values()[0])\n #/np.linalg.norm(c.values())\n #output = np.sqrt(features)\n #features[:,ind] = features[:,ind]/np.sqrt(np.sum(features[:,ind]))\n #features = np.sqrt(features)\n #features = features/np.sqrt(np.sum(features, axis=0, keepdims=True))\n return features\n \ndef extractDigitFeatures(x, featureType):\n \n if featureType == 'pixel':\n features = pixelFeatures(x) # implement this\n elif featureType == 'hog':\n features = hogFeatures(x) # implement this\n elif featureType == 'lbp':\n features = lbpFeatures(x) # implement this \n\n return features\n","sub_path":"code/extractDigitFeatures.py","file_name":"extractDigitFeatures.py","file_ext":"py","file_size_in_byte":5357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"234242327","text":"class Employeee:\n company= \"Google\"\n def getsalary(self, signature):\n print(f\"Salary for this employee working in {self.company} is {self.salary}\\n {signature}\")\n\n\n @staticmethod #static method doent use self in the brackets \n def greet():\n print(\"Good morning, sir\")\n\n @staticmethod\n def time():\n print(\"The time is 9 am\")\n\n# for harry\nharry = Employeee()\nharry.greet() #greeting\nharry.salary = 100000\nharry.getsalary(\"Thanks!\") # statement in bracket betn \" \" is the signature\n\n\n# for rajni\nrajni= Employeee()\nrajni.salary = 200789\nrajni.getsalary(\"Thanks!\") # this quote is similar to 'Employee.getsalary(rajni)'\nrajni.time() # function calling for time\n\n","sub_path":"Chap10_oops/06_self_and_staticmethod.py","file_name":"06_self_and_staticmethod.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"653782360","text":"from nltk.tokenize import sent_tokenize\r\nimport string\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\n\r\ndef minimum(a,b):\r\n if a\", i,j)\r\n weight = weight + word_scores[w1]\r\n \r\n print(weight)\r\n w = weight\r\n G.add_edge(temp1,temp2,weight=w)\r\n\r\nnx.draw(G)\r\nplt.show()\r\n\r\nsent_scores = nx.pagerank(G,0.85)\r\n\r\nprint(sent_scores)\r\n\r\nprint(\"\\n\\n\\n final scores \\n\\n\\n\")\r\n\r\ns = [(k, sent_scores[k]) for k in sorted(sent_scores, key=sent_scores.get, reverse=True)]\r\nfor k, sent_scores[k] in s:\r\n v = sent_scores[k]\r\n print(k,v)\r\n\r\nprint(\"\\n\\n\\n final summary \\n\\n\\n\")\r\n\r\nsize = minimum(10,len(s))\r\n\r\nfor i in range(size):\r\n print(s[i][0],\".\")\r\n","sub_path":"TextRank/old_sent_ex.py","file_name":"old_sent_ex.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"338802003","text":"__author__ = 'cbrown'\n\ndef body_mass_index(weight, height):\n BMI = weight / (height**2)\n if BMI < 18.5:\n return(\"under\")\n elif 18.5 <= BMI and BMI < 25.0:\n return(\"normal\")\n elif 25.0 <= BMI and BMI <30:\n return(\"over\")\n elif 30<= BMI:\n return(\"obese\")\n\nf = open(\"data/body-mass-index-data.txt\",\"r\")\n\ndata = f.read()\ndata = [float(n) for n in data.split()]\n\ni = 0\nwhile i\n# https://github.com/NimaBavari\n#\n# @description: Finds the most highly supercomposite\n# number in any given neighbourhood of\n# any given number, i.e, given a positive\n# integer n and a real 0 < c < 1, finds\n# N in (1 +/- c) * n for which d(N) / N\n# is maximum, where d(N) denotes the number\n# of positive integer divisors of N.\n#\n# @date: Feb 21, 2018 Wednesday\n##########################################################\n\nimport math\n\n\ndef is_prime(num):\n if num <= 1:\n return False\n if num <= 3:\n return True\n if num % 2 == 0 or num % 3 == 0:\n return False\n for i in range(5, int(math.sqrt(num) + 1), 6):\n if num % i == 0 or num % (i + 2) == 0:\n return False\n return True\n\n\ndef get_abundancy(n):\n counter = 0\n for i in range(1, n + 1):\n if n % i == 0:\n counter += 1\n return counter / n\n\n\ndef get_most_abundant(collection):\n abundancies = []\n for number in collection:\n abundancies.append(get_abundancy(number))\n return collection[abundancies.index(max(abundancies))]\n\n\ndef generate_primes(n):\n primes = [2]\n for i in range(3, n + 1, 2):\n if is_prime(i):\n primes.append(i)\n return primes\n\n\ndef build_largest_primorial_under(n):\n prod = 1\n for prime in generate_primes(int(math.sqrt(n))):\n prod *= prime\n if prod > n:\n prod /= prime\n break\n return int(prod)\n\n\ndef generate_super_composite(given_number, proximity):\n upper_bound = int((1 + proximity) * given_number)\n lower_bound = int((1 - proximity) * given_number)\n largest_primorial = build_largest_primorial_under(lower_bound)\n upper_multiple = int(math.floor(upper_bound / largest_primorial))\n lower_multiple = int(math.ceil(lower_bound / largest_primorial))\n try:\n most_abundant = get_most_abundant(\n range(lower_multiple, upper_multiple + 1))\n return largest_primorial * most_abundant\n except ValueError:\n return None\n\nprint(generate_super_composite(180000000000000, 0.3))\n","sub_path":"supercomposite.py","file_name":"supercomposite.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"207206315","text":"#!/usr/bin/env python\n# (C) 2017 OpenEye Scientific Software Inc. All rights reserved.\n#\n# TERMS FOR USE OF SAMPLE CODE The software below (\"Sample Code\") is\n# provided to current licensees or subscribers of OpenEye products or\n# SaaS offerings (each a \"Customer\").\n# Customer is hereby permitted to use, copy, and modify the Sample Code,\n# subject to these terms. OpenEye claims no rights to Customer's\n# modifications. Modification of Sample Code is at Customer's sole and\n# exclusive risk. Sample Code may require Customer to have a then\n# current license or subscription to the applicable OpenEye offering.\n# THE SAMPLE CODE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED. OPENEYE DISCLAIMS ALL WARRANTIES, INCLUDING, BUT\n# NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n# PARTICULAR PURPOSE AND NONINFRINGEMENT. In no event shall OpenEye be\n# liable for any damages or liability in connection with the Sample Code\n# or its use.\n\nfrom __future__ import print_function\nimport sys\n\nfrom openeye import oechem\nfrom openeye import oezap\n\n\ndef main(argv=[__name__]):\n if len(argv) != 3:\n oechem.OEThrow.Usage(\"%s \" % argv[0])\n\n protein = oechem.OEMol()\n\n ifs = oechem.oemolistream()\n if not ifs.open(argv[1]):\n oechem.OEThrow.Fatal(\"Unable to open %s for reading\" % argv[1])\n oechem.OEReadMolecule(ifs, protein)\n\n oechem.OEAssignBondiVdWRadii(protein)\n oechem.OEMMFFAtomTypes(protein)\n oechem.OEMMFF94PartialCharges(protein)\n print(\"protein: \" + protein.GetTitle())\n\n epsin = 1.0\n bind = oezap.OEBind()\n bind.GetZap().SetInnerDielectric(epsin)\n bind.SetProtein(protein)\n results = oezap.OEBindResults()\n\n if not ifs.open(argv[2]):\n oechem.OEThrow.Fatal(\"Unable to open %s for reading\" % argv[2])\n ifs.SetConfTest(oechem.OEIsomericConfTest())\n\n ligand = oechem.OEMol()\n while oechem.OEReadMolecule(ifs, ligand):\n oechem.OEAssignBondiVdWRadii(ligand)\n oechem.OEMMFFAtomTypes(ligand)\n oechem.OEMMFF94PartialCharges(ligand)\n print(\"ligand: %s has %d conformers\" %\n (ligand.GetTitle(), ligand.NumConfs()))\n\n for conf in ligand.GetConfs():\n bind.Bind(conf, results)\n print(\" conf# %d be = %f\" %\n (conf.GetIdx(), results.GetBindingEnergy()))\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","sub_path":"venv/Lib/site-packages/openeye/examples/zap/bind.py","file_name":"bind.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"517447004","text":"import urllib\n\nfrom celery.task import *\nfrom django.core.mail import send_mail\n\nfrom albums.models import Album, Image\nfrom datetime import datetime, timedelta\nfrom twitter import *\nfrom django.conf import settings\n\n\ndef twitter_images(album):\n t = Twitter(auth=OAuth(\n settings.ACCESS_TOKEN,\n settings.ACCESS_SECRET,\n settings.CONSUMER_KEY,\n settings.CONSUMER_SECRET\n ))\n\n result = t.search.tweets(\n q=album.hashtag,\n result_type='recent',\n count=10,\n exclude_replies=True\n )\n\n media_files = []\n for status in result['statuses']:\n user = status['user'].get('screen_name')\n media = status['entities'].get('media', [])\n used_urls = []\n for item in album.image_set.all():\n used_urls.append(item.media_url)\n\n if len(media) > 0:\n if not media[0]['media_url'] in used_urls:\n media_url = media[0]['media_url']\n media_files.append(media_url)\n content = urllib.urlretrieve(media_url, filename=\"uploads/\" + media_url.split('/')[-1] )\n Image(image=media_url.split('/')[-1], album=album, user=user,\n media_url=media_url).save()\n mail(album)\n\n\ndef mail(album):\n if album.image_set.all().count() % 100 == 0 and album.image_set.all().count() < 501:\n hashtag = str(album.hashtag)\n hashtag = hashtag.encode('utf-8')\n quantity = str(album.image_set.all().count())\n send_mail(hashtag + ' has ' + quantity + ' photos', \"I'm awesome!\",\n 'Hashtag@EversnapApp.com',\n ['monokbaev@gmail.com'], fail_silently=False)\n\n@periodic_task(run_every=timedelta(minutes=1))\ndef fill_the_album():\n for album in Album.objects.all():\n twitter_images(album)","sub_path":"albums/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"443507608","text":"\"\"\"\r\n Mouse Event\r\n\r\n Mouse as a paint brush\r\n\r\n OpenCV provides a facility to use the mouse as a paint brush or a drawing tool.\r\n Whenever any mouse event occurs on the window screen, it can draw anything.\r\n Mouse events can be left-button up, double-click, etc. It gives us the\r\n coordinates (x, y) for every mouse event. By using these coordinates, we can draw\r\n whatever we want. To get the list of all available events, run the following code in\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\n\r\n# Creating mouse callback function\r\ndef draw_circle(event, x, y, flags, param):\r\n if event == cv2.EVENT_LBUTTONDBLCLK:\r\n # create a circle\r\n cv2.circle(img, (x, y), 100, (255, 255, 0), -1)\r\n\r\n\r\n# Creating a black image, a window and bind the function to window\r\nimg = np.zeros((512, 512, 3), np.uint8)\r\n\r\n# window name\r\ncv2.namedWindow('image')\r\n\r\n# set mouse event\r\ncv2.setMouseCallback('image', draw_circle)\r\n\r\nwhile (1):\r\n\r\n # display window\r\n cv2.imshow('image', img)\r\n\r\n if cv2.waitKey(20) & 0xFF == 27:\r\n break\r\ncv2.destroyAllWindows()\r\n","sub_path":"Mouse_Event/Draw_Circle.py","file_name":"Draw_Circle.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"160061373","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# BackPropagation : 결괏값(Y)에 대한 각 요소들(W, b, X)의 영향력(미분값)을 구하기 위한 과정\n\nimport torch\ntorch.manual_seed(777)\n\n\n# In[2]:\n\n\nX = torch.FloatTensor([[0,0], [0,1], [1,0], [0,0]])\nY = torch.FloatTensor([[0], [1], [1], [0]])\n\n\n# In[3]:\n\n\n# 2 Layers\n\n'''\n# L1\nw1 = torch.Tensor(2,2) # 2 inputs -> 2 outputs\nb1 = torch.Tensor(2) # 2 bias\n# L2\nw2 = torch.Tensor(2,1) # 2 inputs -> output\nb2 = torch.Tensor(1) # 1 bias\n'''\n\nl1 = torch.nn.Linear(2, 2, bias=True)\nl2 = torch.nn.Linear(2, 1, bias=True)\n\n\n# In[4]:\n\n\n'''\ndef sigmoid(x):\n return 1.0 / (1.0 + torch.exp(-x))\n\n# Sigmoid 미분\ndef sigmoid_prime(x):\n return sigmoid(x) * (1 - sigmoid(x))\n'''\n\nsigmoid = torch.nn.Sigmoid()\nmodel = torch.nn.Sequential(l1, sigmoid, l2, sigmoid)\n\n\n# In[5]:\n\n\n'''\nlearning_rate = 1\nfor step in range(10001):\n # Forward\n l1 = torch.add(torch.matmul(X, w1), b1)\n a1 = sigmoid(l1)\n l2 = torch.add(torch.matmul(a1, w2), b2)\n Y_pred = sigmoid(l2)\n \n cost = -torch.mean(Y * torch.log(Y_pred) + (1 - Y) * torch.log(1 - Y_pred)) # BCELoss\n \n # BackPropagation\n \n # cost 함수 (BCE) 미분\n d_Y_pred = (Y_pred - Y) / (Y_pred * (1.0 - Y_pred) + 1e-7)\n \n # L2\n d_l2 = d_Y_pred * sigmoid_prime(l2) # sigmoid 미분 계산\n d_b2 = d_l2 # bias 미분 계산\n d_w2 = torch.matmul(torch.transpose(a1, 0, 1), d_b2) # weight 미분 계산\n \n # L1\n d_a1 = torch.matmul(d_b2, torch.transpose(w2, 0, 1))\n d_l1 = d_a1 * sigmoid_prime(l1)\n d_b1 = d_l1\n d_w1 = torch.matmul(torch.transpose(X, 0, 1), d_b1)\n \n \n # Update Weights and Biases (Gradient Descent)\n w1 = w1 - learning_rate * d_w1\n b1 = b1 - learning_rate * torch.mean(d_b1, 0)\n w2 = w2 - learning_rate * d_w2\n b2 = b2 - learning_rate * torch.mean(d_b2, 0)\n \n \n if step % 1000 == 0:\n print(step, cost.item())\n'''\n\ncriterion = torch.nn.BCELoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=1)\n\n\n# In[6]:\n\n\nfor step in range(10001):\n hypothesis = model(X)\n cost = criterion(hypothesis, Y)\n \n optimizer.zero_grad()\n cost.backward()\n optimizer.step()\n \n if step % 1000 == 0:\n print(step, cost.item())\n\n","sub_path":"Lab/Lab-08/Lab-08-2 Multi Layer Perceptron (XOR BackPropagation).py","file_name":"Lab-08-2 Multi Layer Perceptron (XOR BackPropagation).py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"203794004","text":"\nimport csv\nfrom pymongo import MongoClient\nimport pymongo\nfrom datetime import datetime\nimport string\nfrom pymongo_hadoop.input import BSONInput\nimport gc\n\nbad = ['news', 'politics', 'cnn', 'fox', 'cbs', 'nbc', 'abc', 'time', 'gma', 'bbc', 'reuters'] #augment with more stuff maybe\ndem2012 = ['obama', 'barack', 'joe', 'biden', 'liberal', 'democrat']\nrep2012 = ['romney', 'mitt', 'gop', 'paul', 'ryan', 'conservative', 'republican']\ndem2016 = ['hillary', 'clinton', 'democrat', 'liberal']\nrep2016 = ['donald', 'trump', 'gop', 'conservative', 'republican']\nstate_abbr = {}\n\nstates = set([\"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DC\", \"DE\", \"FL\", \"GA\", \n\t\t \"HI\", \"ID\", \"IL\", \"IN\", \"IA\", \"KS\", \"KY\", \"LA\", \"ME\", \"MD\", \n\t\t \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\", \"NJ\", \n\t\t \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\", \n\t\t \"SD\", \"TN\", \"TX\", \"UT\", \"VT\", \"VA\", \"WA\", \"WV\", \"WI\", \"WY\"])\n\nBUFFER_SIZE = 200000\n\ndef readStates():\n\n\tfilename = 'states.txt'\n\n\tstates = {}\n\n\twith open(filename, 'rb') as fin:\n\t\ttsvin = csv.reader(fin, delimiter='\\t')\n\t\tfor row in tsvin:\n\t\t\tstates[row[0]] = row[1].lower()\n\t\t\tstate_abbr[row[1].lower()] = True\n\n\treturn states\n\ndef makeUser(string):\n\n\tfor b in bad:\n\t\tif string.find(b) != -1:\n\t\t\treturn None\n\n\tuser = {}\n\n\tuser['age'] = None\n\tuser['race'] = None\n\tuser['gender'] = None\n\tuser['weight'] = 1.0\n\n\tstring = string.split(' ')\n\tstring[1] = string[1][1:]\n\tstring[-1] = string[-1][:-1]\n\tuser['handle'] = string[0]\n\tuser['first'] = string[1]\n\tuser['last'] = string[-1]\n\n\treturn user\n\ndef getSubject(string, dem, rep):\n\tstring = string.lower()\n\td = False\n\tr = False\n\tfor x in dem:\n\t\tif string.find(x) != -1:\n\t\t\td = True\n\t\t\tbreak\n\tfor x in rep:\n\t\tif string.find(x) != -1:\n\t\t\tr = True\n\t\t\tbreak\n\tif d == True and r == False:\n\t\treturn 'd'\n\tif d == False and r == True:\n\t\treturn 'r'\n\treturn None\n\ndef readTweets(filename, database):\n\n\tclient = MongoClient()\n\tdb = client[database]\n\ttweets = []\n\tusers = []\n\n\treadStates()\n\n\twith open(filename, 'rb') as fin: \n\t\ttsvin = csv.reader(fin, delimiter='\\t')\n\t\tnext(tsvin)\n\t\tfor row in tsvin:\n\t\t\tif row[2] != 'us' or row[3] not in state_abbr or row[13] != 'en':\n\t\t\t\tcontinue\n\t\t\ttweet = {}\n\t\t\tuser = makeUser(row[8])\n\t\t\tsubject = getSubject(row[14], dem2012, rep2012)\n\t\t\tif user == None or subject == None:\n\t\t\t\tcontinue\n\t\t\ttweet['id'] = row[0]\n\t\t\ttweet['state'] = row[3]\n\t\t\ttweet['lat'] = float(row[6])\n\t\t\ttweet['lon'] = float(row[5])\n\t\t\tdate = row[7]\n\t\t\ttweet['date'] = datetime(int(date[0:4]), int(date[5:7]), int(date[8:10]), int(date[11:13]), int(date[14:16]))\n\t\t\ttweet['text'] = row[14]\n\t\t\ttweet['user'] = user['handle']\n\t\t\ttweet['subject'] = subject\n\t\t\ttweets.append(tweet)\n\t\t\tusers.append(user)\n\n\tdb.tweets.create_index([('id', pymongo.ASCENDING)], unique=True)\n\tdb.users.create_index([('handle', pymongo.ASCENDING)], unique=True)\n\tdb.tweets.insert_many(tweets, ordered=True)\n\ttry:\n\t\tdb.users.insert_many(users, ordered=False)\n\texcept pymongo.errors.BulkWriteError:\n\t\tpass\n\n\tprint(str(db.tweets.count()) + ' tweets')\n\tprint(str(db.users.count()) + ' users')\n\ndef isValidDoc(doc):\n\tif doc['published'].year < 2016:\n\t\treturn False\n\tif doc['retwitt']:\n\t\treturn False\n\tif doc['geoloc'] == None:\n\t\treturn False\n\tif doc['geoloc']['address']['countrycode'] != 'US' or doc['geoloc']['address']['statecode'] not in states:\n\t\treturn False\n\tif doc['lang'] != 'en':\n\t\treturn False\n\tif 'author' in doc:\n\t\tname = doc['author']['name'].lower()\n\telse:\n\t\tname = doc['authors'][0]['name'].lower()\n\tfor b in bad:\n\t\tif name.find(b) != -1:\n\t\t\treturn False\n\treturn True\n\ndef filter(database):\n\n\tclient = MongoClient()\n\tdb = client[database]\n\tbad_names = set([])\n\tfor t in db.tweets.find({'created_at': {'$gte': datetime(2016, 6, 1)}}):\n\t\tx = t\n\t\tif 'author' not in t:\n\t\t\tdb.tweets.update_one(\n\t\t\t\t{'_id': t['_id']}, \n\t\t\t\t{\n\t\t\t\t\t'$set': {'author': t['authors'][0]}, \n\t\t\t\t\t'$unset': {'authors': ''}\n\t\t\t\t}\n\t\t\t)\n\t\t\tx = db.tweets.find_one({'_id': t['_id']})\n\t\tif not isValidDoc(x):\n\t\t\tname = x['author']['name']\n\t\t\tbad_names.add(name)\n\tprint('test1')\n\tbad_names = list(bad_names)\n\tdb.tweets.delete_many({'author.name': {'$in': bad_names}})\n\tprint('test2')\n\tdb.users.delete_many({'name': {'$in': bad_names}})\n\n\ndef readBSON(filename, database):\n\n\tclient = MongoClient()\n\tdb = client[database]\n\tdb.tweets.drop()\n\tdb.users.drop()\n\tprint('dbs cleared')\n\tdb.tweets.create_index([('created_at', pymongo.ASCENDING)], unique=False)\n\tdb.users.create_index([('name', pymongo.ASCENDING)], unique=True)\n\ttweets = []\n\tusers = []\n\tcount = 0\n\n\twith open(filename, 'rb') as fin:\n\t\tbs = BSONInput(fin)\n\t\tfor doc in bs:\n\t\t\tif not isValidDoc(doc):\n\t\t\t\tcontinue\n\t\t\tdoc['subject'] = getSubject(doc['title'], dem2016, rep2016)\n\t\t\tif doc['subject'] == None:\n\t\t\t\tcontinue\n\t\t\tuser = doc['authors'][0]\n\t\t\ttweets.append(doc)\n\t\t\tusers.append(user)\n\t\t\tif len(tweets) == BUFFER_SIZE:\n\t\t\t\tprint(doc['published'])\n\t\t\t\ttry:\n\t\t\t\t\tdb.tweets.insert_many(tweets, ordered=False)\n\t\t\t\t\tdb.users.insert_many(users, ordered=False)\n\t\t\t\texcept pymongo.errors.BulkWriteError:\n\t\t\t\t\tpass\n\t\t\t\tdel tweets[:]\n\t\t\t\tdel users[:]\n\t\t\t\tgc.collect()\n\t\t\t\tcount += 0.2\n\t\t\t\tprint('%.1f million tweets' %count)\n\t\tprint(doc['published'])\n\t\ttry:\n\t\t\tdb.tweets.insert_many(tweets, ordered=False)\n\t\t\tdb.users.insert_many(users, ordered=False)\n\t\texcept pymongo.errors.BulkWriteError:\n\t\t\tpass\n\t\tdel tweets[:]\n\t\tdel users[:]\n\t\tgc.collect()\n\t\tcount += 0.2\n\t\tprint('%.1f million tweets' %count)\n\n\tprint('Number of Tweets: %d' %db.tweets.find().count())\n\tprint('Number of Users: %d' %db.users.find().count())\n\ndef raceParse(x):\n\tif x == '(S)':\n\t\treturn float(0)\n\treturn float(x)\n\ndef readSurnameRace(database, day):\n\n\tfilename = 'app_c.csv'\n\n\tnames = {}\n\tclient = MongoClient()\n\tdb = client[database]\n\n\twith open(filename, 'rb') as fin:\n\t\tcsvin = csv.reader(fin)\n\t\tnext(csvin)\n\t\tfor row in csvin:\n\t\t\tnames[row[0].lower()] = (raceParse(row[5]), raceParse(row[6]), raceParse(row[7]), raceParse(row[10])) #(white, black, asian, hispanic)\n\n\tfor tweet in db.tweets.find({'created_at': {'$gte': day}}):\n\t\tn = tweet['author']['full_name'].split(' ')[-1].lower()\n\t\tif n in names:\n\t\t\tdb.users.update_one(\n\t\t\t\t{'_id': tweet['_id']}, \n\t\t\t\t{'$set': {'race': names[n]}}\n\t\t\t)\n\t\telse:\n\t\t\tdb.users.update_one(\n\t\t\t\t{'_id': tweet['_id']}, \n\t\t\t\t{'$set': {'race': None}}\n\t\t\t)\n\ndef readFirstnameGender(database, day):\n\n\tfilename1 = 'male_names.txt'\n\tfilename2 = 'female_names.txt'\n\n\tnames = {}\n\tprob = {}\n\tclient = MongoClient()\n\tdb = client[database]\n\n\twith open(filename1, 'rb') as fin:\n\t\tfor line in fin:\n\t\t\tx = line.find(' ')\n\t\t\tnames[line[:x].lower()] = 'm'\n\t\t\tprob[line[:x].lower()] = float(line[15:20])\n\n\twith open(filename2, 'rb') as fin:\n\t\tfor line in fin:\n\t\t\tx = line.find(' ')\n\t\t\tname = line[:x].lower()\n\t\t\tp = float(line[15:20])\n\t\t\tif name in names:\n\t\t\t\tpr = p / (p + prob[name])\n\t\t\t\tif pr >= 0.80:\n\t\t\t\t\tnames[name] = 'f'\n\t\t\t\telif pr <= 0.20:\n\t\t\t\t\tnames[name] = 'm'\n\t\t\t\telse:\n\t\t\t\t\tdel names[name]\n\t\t\telse:\n\t\t\t\tnames[name] = 'f'\n\n\tfor tweet in db.tweets.find({'created_at': {'$gte': day}}):\n\t\tn = tweet['author']['full_name'].split(' ')[0].lower()\n\t\tif n in names:\n\t\t\tdb.tweets.update_one(\n\t\t\t\t{'_id': tweet['_id']},\n\t\t\t\t{'$set': {'author.gender': names[n]}}\n\t\t\t)\n\t\telse:\n\t\t\tdb.tweets.update_one(\n\t\t\t\t{'_id': tweet['_id']},\n\t\t\t\t{'$set': {'author.gender': None}}\n\t\t\t)\n\n\t'''\n\tfor user in db.users.find():\n\t\tif user['first'].lower() in names:\n\t\t\tdb.users.update_one(\n\t\t\t\t{'handle': user['handle']}, \n\t\t\t\t{'$set': {'gender': names[user['first'].lower()]}}\n\t\t\t)\n\t\telse:\n\t\t\tdb.users.update_one(\n\t\t\t\t{'handle': user['handle']},\n\t\t\t\t{'$set': {'gender': None}}\n\t\t\t)\n\t'''\n","sub_path":"reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":7555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"318339422","text":"from pickle import Pickler, Unpickler, loads\nfrom NNet import NNetWrapper as nn\nfrom dotted_dict import DottedDict as dotdict\nfrom random import shuffle\nimport time, os, sys\nimport torch\nfrom sklearn.model_selection import train_test_split\n#from tensorboard_logger import configure, log_value\nfrom tensorboardX import SummaryWriter\n\nargs = dotdict({\n 'modelspath': './models/',\n 'examplespath': './examples/',\n 'epochs': 100, # best 25,\n 'validation': True,\n 'early_stopping': False,\n 'save_model': True\n})\n\nclass TrainingClient:\n def __init__(self):\n # self.trainExamplesHistory = [] # history of examples from args.numItersForTrainExamplesHistory latest iterations\n self.nnet = nn()\n\n def loadTrainExamples(self):\n # modelFile = os.path.join(args.examplespath, 'best.pth.tar')\n # examplesFile = modelFile+\".examples\"\n examplesFile = os.path.join(args.examplespath, '0.pth.tar_6.examples')\n if not os.path.isfile(examplesFile):\n print(examplesFile)\n r = input(\"File with trainExamples not found. Continue? [y|n]\")\n if r != \"y\":\n sys.exit()\n else:\n print(\"File with trainExamples found. Read it.\")\n with open(examplesFile, \"rb\") as f:\n return Unpickler(f).load()\n f.closed\n\nif __name__==\"__main__\":\n # print('starting...')\n client = TrainingClient()\n\n # load neural network\n # print(\"Load current neural network\")\n # client.nnet.load_checkpoint(folder=args.modelspath, filename='0-28b.pth.tar')\n\n # load trainexamples\n print(\"Load trainExamples from file\")\n trainExamplesHistory = client.loadTrainExamples()\n\n # shuffle examples before training => possibly not needed because network takes random samples for training\n trainExamples = []\n for e in trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n \n # configure logger\n #configure(\"logs/run-1\", flush_secs=5)\n if args.validation:\n training_writer = SummaryWriter('runs/training-39')\n test_writer = SummaryWriter('runs/test-39')\n dif_writer = SummaryWriter('runs/dif-39')\n max_loss_pi_dif = float('-inf')\n # Split training and test data\n examples_train, examples_test = train_test_split(trainExamples, test_size=0.20)\n\n # train neural network\n for epoch in range(args.epochs):\n print('EPOCH ::: ' + str(epoch+1))\n\n if args.validation:\n loss_pi_train, loss_v_train = client.nnet.train(examples_train)\n # data grouping by `slash`\n training_writer.add_scalar('loss_pi', loss_pi_train, epoch+1)\n training_writer.add_scalar('loss_v', loss_v_train, epoch+1)\n\n loss_pi_test, loss_v_test = client.nnet.test(examples_test)\n test_writer.add_scalar('loss_pi', loss_pi_test, epoch+1)\n test_writer.add_scalar('loss_v', loss_v_test, epoch+1)\n\n loss_pi_dif = loss_pi_test - loss_pi_train\n loss_v_dif = loss_v_test - loss_v_train\n dif_writer.add_scalar('dif_pi', loss_pi_dif, epoch+1)\n dif_writer.add_scalar('dif_v', loss_v_dif, epoch+1)\n\n # Early stopping\n if args.early_stopping and max_loss_pi_dif < loss_pi_dif:\n max_loss_pi_dif = loss_pi_dif\n if max_loss_pi_dif > 0:\n print('Early stopping because difference in loss_pi between training and test is positive and growing.')\n break\n else:\n # Train normally on full data\n client.nnet.train(trainExamples)\n # TODO: We could also save only training progress losses for plotting\n\n # Save trained neural network after each epoch\n if args.save_model:\n print(\"Saving trained neural network\")\n # client.nnet.save_checkpoint(folder=args.modelspath, filename='0-38.pth.tar')\n client.nnet.save_checkpoint(folder=args.modelspath, filename=f'0-{epoch+1}.pth.tar')\n\n # TODO: Final model saving needed in case of early stopping?","sub_path":"remote/training_client.py","file_name":"training_client.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"36132192","text":"import manifolds\nfrom manifolds.etc.pitch._helpers.field_number_to_field import \\\n field_number_to_field\n\n\ndef field_id_to_segmented_field(field_id):\n letter = field_id[0]\n if letter == 'L':\n direction = 'left'\n elif letter == 'R':\n direction = 'right'\n else:\n raise ValueError\n field_number = int(field_id[1])\n field = field_number_to_field(field_number)\n segmented_field = manifolds.partition_to_avoid_octave_adjacencies(\n field, direction)\n return segmented_field\n","sub_path":"manifolds/etc/pitch/_helpers/field_id_to_segmented_field.py","file_name":"field_id_to_segmented_field.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"496807092","text":"import os\nimport sys\nfrom rich.console import Console\n\n\nif sys.platform.startswith('win'):\n is_win = True\n dir_char = '\\\\'\nelse:\n is_win = False\n dir_char = '/'\nQproDefaultConsole = Console()\nQproErrorString = '[bold red][ERRO]'\nQproInfoString = '[bold cyan][INFO]'\nQproWarnString = '[bold yellow][WARN]'\nname = 'QuickProject'\n\n\nclass SshProtocol:\n @staticmethod\n def post_folder(user, domain, target, port, path):\n status = os.system('scp -P %s -r %s %s' % (port, path, user + '@\\\\[' + domain + '\\\\]:' + target + path))\n return status\n\n @staticmethod\n def post_file(user, domain, target, port, path):\n status = os.system('scp -P %s %s %s' % (port, path, user + '@\\\\[' + domain + '\\\\]:' + target + path))\n return status\n\n @staticmethod\n def post_all_in_folder(user, domain, target, port):\n status = os.system('scp -P %s -r * %s' % (port, user + '@\\\\[' + domain + '\\\\]:' + target))\n return status\n\n @staticmethod\n def get_file_or_folder(user, domain, target, port, path):\n return os.system('scp -P %s -r %s %s' % (port, user + '@\\\\[' + domain + '\\\\]:' + target + path, path))\n\n @staticmethod\n def ls(user, domain, target, port, path):\n return os.system(f\"ssh -P {port} {user + '@' + domain} 'ls {target + path}'\")\n\n @staticmethod\n def ssh(user, domain, target, port):\n return os.system(f\"ssh -P {port} -t {user + '@' + domain} 'cd {target} ; exec $SHELL -l'\")\n\n\ndef menu_output(menu):\n from rich.table import Table, Column\n from rich.box import SIMPLE\n\n tb = Table(*[Column('Parameter', justify='full', style='bold yellow'),\n Column('Description', justify='right', style='bold cyan')],\n show_edge=False, show_header=False, row_styles=['none', 'dim'], box=SIMPLE, pad_edge=False,\n title=f'[bold underline] {menu[\"title\"]}[dim]Author: RhythmLian\\n')\n for line in menu['lines']:\n tb.add_row((menu['prefix'] + ' ' if line[0].startswith('-') else '') + line[0], line[1])\n QproDefaultConsole.print(tb, justify='center')\n QproDefaultConsole.print('\\nDOC: https://rhythmlian.cn/2020/02/14/QuickProject/', justify='center')\n\n\ndef get_config(exit_if_failed: bool = True):\n config = {}\n try:\n with open('project_configure.csv', 'r') as f:\n for row in f.read().strip().split('\\n'):\n row = row.replace('\\,', '--QPRO-IS-SPLIT--')\n row = [i.replace('--QPRO-IS-SPLIT--', ',') for i in row.split(',')]\n config[row[0]] = [i.strip() for i in row[1:]]\n for i in config:\n if i in ['server_target']:\n continue\n config[i] = config[i][0]\n except IOError:\n if exit_if_failed:\n QproDefaultConsole.print(\n QproErrorString, \"No file named: project_configure.csv\\n May you need run:\\\"Qpro -init\\\" first!\")\n exit(0)\n else:\n return False\n return config\n\n\ndef get_server_target(st=None):\n if not st:\n config = get_config()['server_target']\n ls, port = config[0].split(':'), config[1]\n else:\n ls, port = st[0].split(':'), st[1]\n if len(ls) > 2:\n server = ':'.join(ls[:8])\n target = ':'.join(ls[8:])\n else:\n server, target = ls\n return server, target, port\n","sub_path":"QuickProject/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"514688762","text":"from patch_dense_net import UNet\nimport numpy as np\nimport os\nimport time\nimport scipy.io as scio\nimport random\nimport itertools\nfrom scipy import misc\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.utils.data as data\nimport math\nfrom scipy.ndimage.filters import gaussian_filter, median_filter\nfrom skimage.morphology import label\nfrom scipy import ndimage\nfrom scipy.ndimage import zoom\nfrom custom import confirm_loc, extract_patches, reconstruct_from_patches_weightedall\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\ntorch.set_num_threads(8)\n\n# imageneet mean and std\nnorm_mean=[0.485, 0.456, 0.406]\nnorm_std=[0.229, 0.224, 0.225]\n\nn_pred_labels = 1\ninput_modalities = 3\nbatch_sz = 16\nvalsegcenterpred_filefold = '../tafe/test/tafe_4fold_epoch600/'\n\ndata_filefold = '/home/cong/workplace/kumar'\nimgs = np.load(os.path.join(data_filefold, 'valdata_after_stain_norm_mm_ref1.npy')) # .mat of train set\nimgs = imgs.transpose([0, 3, 1, 2]).astype(np.float32)\nndata, mod, h0, w0 = imgs.shape\nfor imod in range(mod):\n imgs[:, imod] = (imgs[:, imod]/255.0 - norm_mean[imod])/norm_std[imod]\n\nmaxpad = 176//2\npred_sz_type_list = ['48', '176']\ndilation_s = 0\ndilation_c = 2\nloss_type = 'focalloss'\n#aug_list = ['ori', 'rot90', 'rot180', 'rot270', 'flip_h', 'flip_w']\naug_list = ['ori']\nn_data = len(imgs)\ndataset_type = 'test'\n\nnfold = 4\n\nres_root = '/data0/cong/workplace/brp_net/patch_net/val'\nres_list = os.listdir(res_root)\n\nfor dilation_rate in [2]:\n for ths in [5]:\n for pred_sz_type in pred_sz_type_list:\n if pred_sz_type == '48':\n low_sz_ths = 1\n high_sz_ths = 48//2\n cursz = 48\n batchsz = 256\n elif pred_sz_type == '176':\n low_sz_ths = 48//2\n high_sz_ths = 1000\n cursz = 176\n batchsz = 24\n else:\n print('error pred_sz_type')\n\n valimgs = np.pad(imgs, ((0,0), (0,0), (maxpad, maxpad), (maxpad, maxpad)), 'reflect')\n \n print(cursz)\n for irnd, iseed in enumerate(range(0)):\n\n dir_savefile = './'+dataset_type+'/noaug_nodrop_'+loss_type+'_iouthsp'+str(ths)+'_dilation_'+str(dilation_rate)+'_sz'+str(cursz)+'_ds'+str(dilation_s)+'_dc'+str(dilation_c)+'_wtta/rnd_'+str(iseed)+'/'\n confirm_loc(dir_savefile)\n confirm_loc(dir_savefile + 'pred_res/')\n\n net = UNet(input_modalities, n_pred_labels*2, n_pred_labels).cuda()\n\n sigmoid = nn.Sigmoid()\n\n snapshot_lists = []\n for ifold in range(nfold):\n snapshot_list = []\n val_res_filefold = './val/noaug_nodrop_'+loss_type+'_iouthsp'+str(ths)+'_dilation_'+str(dilation_rate)+'_sz'+str(cursz)+'_ds'+str(dilation_s)+'_dc'+str(dilation_c)+'_ifold'+str(ifold)+'_wtta/rnd_'+str(iseed)+'/'\n with open(os.path.join(val_res_filefold, 'val_res.txt'), 'r') as f:\n lines = f.readlines()\n final_res = lines[-1]\n selected_snapshot = int(final_res.split('in isnapshot:')[-1])\n snapshot_list = [selected_snapshot]\n snapshot_lists.append(snapshot_list)\n # snapshot_lists = [[22], [22], [22], [22]]\n print(snapshot_lists)\n\n with torch.no_grad():\n\n for i_data in range(n_data):\n dmat = scio.loadmat(valsegcenterpred_filefold+'pred_res_'+str(i_data)+'_withpostproc.mat')\n tmp_instance = dmat['instance_nodilation']\n tmp_instance = np.pad(tmp_instance, ((maxpad, maxpad), (maxpad, maxpad)), 'constant', constant_values=0)\n tmps = np.pad(dmat['s'], ((maxpad, maxpad), (maxpad, maxpad)), 'constant', constant_values=0)\n tmpc = np.pad(dmat['c'], ((maxpad, maxpad), (maxpad, maxpad)), 'constant', constant_values=0)\n cset = np.unique(tmp_instance[tmp_instance>0])\n d = valimgs[i_data].copy()\n mod, h, w = d.shape\n \n # pixels with probability larger than 0.5 are considered as foreground pixels\n instances = []\n instances.append(np.zeros([1, h, w])+0.5)\n\n n_suitable_instance = 0\n patches_cursz_img = []\n patches_cursz_sin = []\n patches_cursz_cin = []\n sxs = []\n sys = []\n exs = []\n eys = []\n halfszs = []\n for ic in cset:\n icmap = tmp_instance==ic\n if dilation_rate > 0:\n icmap = ndimage.morphology.binary_dilation(icmap, iterations=dilation_rate)\n\n if dilation_s > 0:\n dicmap_s = ndimage.morphology.binary_dilation(icmap, iterations=dilation_s)\n elif dilation_s == 0:\n dicmap_s = icmap.copy()\n else:\n dicmap_s = np.ones(icmap.shape)\n \n if dilation_c > 0:\n dicmap_c = ndimage.morphology.binary_dilation(icmap, iterations=dilation_c)\n elif dilation_c == 0:\n dicmap_c = icmap.copy()\n else:\n dicmap_c = np.ones(icmap.shape)\n \n icx, icy = np.nonzero(icmap)\n maxx = icx.max()\n maxy = icy.max()\n minx = icx.min()\n miny = icy.min()\n mx = np.round((maxx+minx)/2)\n my = np.round((maxy+miny)/2)\n halfsz = (np.max([(maxx-minx)/2, (maxy-miny)/2, 8])+12).astype(np.int16)\n sx = np.round(mx - halfsz).astype(np.int16)\n sy = np.round(my - halfsz).astype(np.int16)\n ex = np.round(mx + halfsz + 1).astype(np.int16)\n ey = np.round(my + halfsz + 1).astype(np.int16)\n \n if halfsz>=low_sz_ths and halfsz0:\n patches_img = torch.cat(tuple(patches_cursz_img), dim=0)\n patches_sin = torch.cat(tuple(patches_cursz_sin), dim=0)\n patches_cin = torch.cat(tuple(patches_cursz_cin), dim=0)\n \n for sipatch in list(range(0, npatches, batchsz)):\n eipatch = min(sipatch+batchsz, npatches)\n batch_img = patches_img[sipatch:eipatch]\n batch_sin = patches_sin[sipatch:eipatch]\n batch_cin = patches_cin[sipatch:eipatch]\n \n n_inbatch = batch_img.shape[0]\n \n batch_scin = torch.cat((batch_sin, batch_cin), dim=1)\n\n tta_img = []\n tta_scin = []\n for type_aug in aug_list:\n # TTA\n if type_aug == 'ori':\n tta_img.append(batch_img.clone())\n tta_scin.append(batch_scin.clone())\n elif type_aug == 'rot90':\n tta_img.append(batch_img.rot90(1, dims=(2, 3)))\n tta_scin.append(batch_scin.rot90(1, dims=(2, 3)))\n elif type_aug == 'rot180':\n tta_img.append(batch_img.rot90(2, dims=(2, 3)))\n tta_scin.append(batch_scin.rot90(2, dims=(2, 3)))\n elif type_aug == 'rot270':\n tta_img.append(batch_img.rot90(3, dims=(2, 3)))\n tta_scin.append(batch_scin.rot90(3, dims=(2, 3)))\n elif type_aug == 'flip_h':\n tta_img.append(batch_img.flip(2))\n tta_scin.append(batch_scin.flip(2))\n elif type_aug == 'flip_w':\n tta_img.append(batch_img.flip(3))\n tta_scin.append(batch_scin.flip(3))\n\n tta_img = torch.cat(tuple(tta_img), dim=0)\n tta_scin = torch.cat(tuple(tta_scin), dim=0)\n tta_img = tta_img.cuda()\n tta_scin = tta_scin.cuda()\n\n spred = None\n n_valid = 0\n for ifold in range(nfold):\n dir_checkpoint = './weight/noaug_nodrop_'+loss_type+'_iouthsp'+str(ths)+'_dilation_'+str(dilation_rate)+'_sz'+str(cursz)+'_ds'+str(dilation_s)+'_dc'+str(dilation_c)+'_ifold'+str(ifold)+'/rnd_'+str(iseed)+'/'\n for isnapshot in snapshot_lists[ifold]:\n weight_loc = dir_checkpoint+'model_of_'+str(isnapshot)+'.pth'\n if not(os.path.exists(weight_loc)):\n continue\n\n weight = torch.load(weight_loc, map_location=lambda storage, loc: storage.cuda())\n target_dict = net.state_dict()\n source_dict = weight\n source_dict2 = {}\n for k,v in source_dict.items():\n if k in target_dict:\n source_dict2.update({k:v})\n target_dict.update(source_dict2)\n net.load_state_dict(target_dict)\n net.eval()\n\n isnapshot_spred = net(tta_img, tta_scin)\n # (n*naug) * H * W\n if isinstance(isnapshot_spred, tuple):\n isnapshot_spred = F.sigmoid(isnapshot_spred[0]).data.cpu().squeeze()\n else:\n isnapshot_spred = F.sigmoid(isnapshot_spred).data.cpu().squeeze()\n if spred is None:\n spred = isnapshot_spred.clone()\n else:\n spred += isnapshot_spred\n n_valid += 1\n spred /= n_valid\n\n aug_spred = np.zeros([n_inbatch, cursz, cursz], dtype=np.float32)\n for iaug, type_aug in enumerate(aug_list):\n idx_iaug_s = n_inbatch*iaug\n idx_iaug_e = n_inbatch*(iaug+1)\n # Inverse TTA\n if type_aug == 'ori':\n aug_spred += spred[idx_iaug_s:idx_iaug_e].numpy()\n elif type_aug == 'rot90':\n aug_spred += spred[idx_iaug_s:idx_iaug_e].rot90(3, dims=(1, 2)).numpy()\n elif type_aug == 'rot180':\n aug_spred += spred[idx_iaug_s:idx_iaug_e].rot90(2, dims=(1, 2)).numpy()\n elif type_aug == 'rot270':\n aug_spred += spred[idx_iaug_s:idx_iaug_e].rot90(1, dims=(1, 2)).numpy()\n elif type_aug == 'flip_h':\n aug_spred += spred[idx_iaug_s:idx_iaug_e].flip(1).numpy()\n elif type_aug == 'flip_w':\n aug_spred += spred[idx_iaug_s:idx_iaug_e].flip(2).numpy()\n aug_spred /= len(aug_list)\n\n for ibatch in list(range(n_inbatch)):\n sx = sxs[ibatch+sipatch]\n sy = sys[ibatch+sipatch]\n ex = exs[ibatch+sipatch]\n ey = eys[ibatch+sipatch]\n halfsz = halfszs[ibatch+sipatch]\n\n probmap_instances = np.zeros([1, h, w])\n probmap_instances[:, sx:ex, sy:ey] = zoom(aug_spred[ibatch], [(halfsz*2+1).astype(np.float64)/cursz, (halfsz*2+1).astype(np.float64)/cursz], order=1)\n instances.append(probmap_instances.astype(np.float32))\n\n print('Finished Processing ' + str(i_data) + 'th image in ' + str(isnapshot) + ', including ' + str(len(instances)) + ' objs(sz: '+str(cursz)+')')\n\n instances = np.concatenate(instances, axis=0)\n maxprob_instances = np.max(instances, axis=0)\n idx_instances = np.argmax(instances, axis=0)\n\n scio.savemat(dir_savefile + 'pred_res/'+str(i_data)+'.mat', {'instance':idx_instances[maxpad:(h0+maxpad), maxpad:(w0+maxpad)], 'instance_ori':tmp_instance[maxpad:(h0+maxpad), maxpad:(w0+maxpad)]})","sub_path":"patch_net/eval_testset.py","file_name":"eval_testset.py","file_ext":"py","file_size_in_byte":16243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"526042168","text":"import os\nimport socket\n\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ntarget = '127.0.0.1'\nport = 9999\n\ns.connect((target, port))\n\ns.send('Sending data.'.encode())\n\nresp = s.recv(4096)\n\nprint(resp.decode())\n","sub_path":"scripts/python/networking/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"23770187","text":"import pygame, sys, time\nimport matplotlib.pyplot as plt\nfrom pickit.Datatypes import *\nfrom pickit import DebraArm, ArmManager\nfrom math import cos, sin, pi\nfrom pygame_utils import *\nfrom graph_utils import *\n\n# Robot settings\nL1 = 1.5\nL2 = 1.0\nL3 = 0.2\nGRIPPER_HEADING = 0\nRANGE_MIN = abs(L1 - L2)\nRANGE_MAX = abs(L1 + L2)\n\n# Trajectory generation settings\nDELTA_T = 0.05\n\n# Display settings\nPX_PER_METER = 100\nWIDTH = int(3 * (L1 + L2 + L3) * PX_PER_METER)\nHEIGHT = int(3 * (L1 + L2 + L3) * PX_PER_METER)\n\npygame.init()\nSCREEN = pygame.display.set_mode((WIDTH, HEIGHT))\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nYELLOW = (255, 255, 0)\nCYAN = (0, 255, 255)\nPURPLE = (255, 0, 255)\n\ndef main():\n \"draw loop\"\n ALTERNATE = 0\n\n # Initial robot state\n right_origin = Vector3D(0.5, 0.0, 0.0)\n left_origin = Vector3D(-0.5, 0.0, 0.0)\n\n right_arm = DebraArm.DebraArm(l1=L1, l2=L2, origin=right_origin, flip_x=1)\n right_arm.inverse_kinematics(RobotSpacePoint(0.99*(L1+L2) + right_origin.x,\n 0 + right_origin.y,\n 0 + right_origin.z,\n 0))\n tool = right_arm.get_tool()\n joints = right_arm.get_joints()\n\n left_arm = DebraArm.DebraArm(l1=L1, l2=L2, origin=left_origin, flip_x=-1)\n left_arm.inverse_kinematics(RobotSpacePoint(-0.99*(L1+L2) + left_origin.x,\n 0 + left_origin.y,\n 0 + left_origin.z,\n 0))\n tool = left_arm.get_tool()\n joints = left_arm.get_joints()\n\n ws_front = Workspace(-1.5, 1.5,\n abs(L1 - L2), abs(L1 + L2),\n 0.0, 0.2,\n 1)\n ws_back = Workspace(-1.5, 1.5,\n -abs(L1 + L2), -abs(L1 - L2),\n 0.0, 0.2,\n -1)\n ws_right = Workspace(abs(L1 - L2) + right_origin.x, abs(L1 + L2) + right_origin.x,\n -1.5 + right_origin.y, 1.5 + right_origin.y,\n 0.0 + right_origin.z, 0.2 + right_origin.z,\n 1)\n ws_left = Workspace(-abs(L1 + L2) + left_origin.x, -abs(L1 - L2) + left_origin.x,\n -1.5 + left_origin.y, 1.5 + left_origin.y,\n 0.0 + left_origin.z, 0.2 + left_origin.z,\n 1)\n\n right_arm_manager = ArmManager.ArmManager(right_arm, ws_front, ws_right, ws_back, DELTA_T)\n left_arm_manager = ArmManager.ArmManager(left_arm, ws_front, ws_left, ws_back, DELTA_T)\n\n # Draw right arm\n origin, p1, p2, p3, z = right_arm.get_detailed_pos(L3)\n draw_arm(origin, p1, p2, p3, RANGE_MIN, RANGE_MAX)\n draw_workspaces(ws_front, ws_right, ws_back)\n # Draw left arm\n origin, p1, p2, p3, z = left_arm.get_detailed_pos(L3)\n draw_arm(origin, p1, p2, p3, RANGE_MIN, RANGE_MAX)\n draw_workspaces(ws_front, ws_left, ws_back)\n\n pygame.display.update()\n\n paused = False\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_p:\n paused = not paused\n if event.type == pygame.MOUSEBUTTONUP:\n x, y = get_cursor_pos()\n if ALTERNATE:\n ALTERNATE = 0\n tool_prev = right_arm_manager.arm.get_tool()\n tool = RobotSpacePoint(x, y, z, GRIPPER_HEADING)\n\n start_time = time.time()\n pth1, pth2, pz, pth3 = right_arm_manager.goto(tool_prev, RobotSpacePoint(0,0,0,0),\n tool, RobotSpacePoint(0,0,0,0),\n 'line')\n elapsed_time = time.time() - start_time\n print('elapsed time: ', elapsed_time)\n\n graph_trajectory_joint(pth1, pth2, pth3)\n draw_trajectory(right_arm_manager.arm, pth1, pth2, pz, pth3, DELTA_T)\n\n else:\n ALTERNATE = 1\n tool_prev = left_arm_manager.arm.get_tool()\n tool = RobotSpacePoint(x, y, z, GRIPPER_HEADING)\n\n start_time = time.time()\n pth1, pth2, pz, pth3 = left_arm_manager.goto(tool_prev, RobotSpacePoint(0,0,0,0),\n tool, RobotSpacePoint(0,0,0,0),\n 'line')\n elapsed_time = time.time() - start_time\n print('elapsed time: ', elapsed_time)\n\n graph_trajectory_joint(pth1, pth2, pth3)\n draw_trajectory(left_arm_manager.arm, pth1, pth2, pz, pth3, DELTA_T)\n\n if not paused:\n SCREEN.fill(BLACK)\n\n # Draw right arm\n origin, p1, p2, p3, z = right_arm_manager.arm.get_detailed_pos(L3)\n draw_arm(origin, p1, p2, p3, RANGE_MIN, RANGE_MAX)\n draw_workspaces(ws_front, ws_right, ws_back)\n # Draw left arm\n origin, p1, p2, p3, z = left_arm_manager.arm.get_detailed_pos(L3)\n draw_arm(origin, p1, p2, p3, RANGE_MIN, RANGE_MAX)\n draw_workspaces(ws_front, ws_left, ws_back)\n\n pygame.display.update()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"dual_arm_generic_visualiser.py","file_name":"dual_arm_generic_visualiser.py","file_ext":"py","file_size_in_byte":5660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"611279144","text":"import sys\n\nwith open(sys.argv[1], 'r') as test_cases:\n\tfor line in test_cases:\n\t\tline = line.rstrip('\\n')\n\t\tif len(line) > 0:\n\t\t\tarr, n = line.split(\"|\")\n\t\t\tn = int(n.strip())\n\t\t\tarr = [ int(s) for s in arr.split(\" \") if s != \"\" ]\n\t\t\tL = len(arr)\n\t\t\tk = 0\n\t\t\tfor i in range(len(arr)-1,1,-1):\n\t\t\t\tis_sorted = True\n\t\t\t\tfor j in range(i):\n\t\t\t\t\tif arr[j] > arr[j+1]:\n\t\t\t\t\t\ttmp = arr[j]\n\t\t\t\t\t\tarr[j] = arr[j+1]\n\t\t\t\t\t\tarr[j+1] = tmp\n\t\t\t\t\t\tis_sorted = False\n\t\t\t\tif is_sorted:\n\t\t\t\t\tbreak\n\t\t\t\tk += 1\n\t\t\t\tif k == n:\n\t\t\t\t\tbreak\n\t\t\tprint(\" \".join([str(i) for i in arr]))","sub_path":"2_Medium/158_interrupted_buuble_sort.py","file_name":"158_interrupted_buuble_sort.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"17611621","text":"from scipy.optimize import nnls\nimport numpy as np\nfrom ..base.incremental import IncrementalCoreset\nfrom ..util.errors import NumericalPrecisionError\nfrom .hilbert import HilbertCoreset\n\n\nclass OrthoPursuitCoreset(HilbertCoreset,IncrementalCoreset):\n def __init__(self, tangent_space):\n super().__init__(N=tangent_space.num_vectors()) \n self.T = tangent_space\n if np.any(self.T.norms() == 0):\n raise ValueError(self.alg_name+'.__init__(): tangent space must not have any 0 vectors')\n\n def _select(self):\n dots = (self.T[:]/self.T.norms()[:,np.newaxis]).dot(self.T.residual(self.wts, self.idcs))\n\n #if no active indices, just output argmax\n if self.idcs.shape[0] == 0:\n return dots.argmax()\n \n #search positive direction on whole dataset, negative direction on active set\n fpos = dots.argmax()\n pos = dots[fpos]\n fneg = (-dots[self.idcs]).argmax()\n neg = (-dots[self.idcs])[fneg]\n\n if pos >= neg:\n return fpos\n else:\n return self.idcs[fneg]\n\n def _reweight(self, f):\n\n #store prev weights/idcs before adding f\n old_wts = self.wts.copy()\n old_idcs = self.idcs.copy()\n\n #check to make sure value to add is not in the current set (error should be ortho to current subspace)\n #otherwise add a 0 entry to enable nnls below to use f\n f_already = np.where(self.idcs == f)[0].shape[0] > 0\n if f_already:\n raise NumericalPrecisionError('search selected a nonzero weight to update.')\n else:\n self._update(f, 0.)\n \n #run nnls, catch a numerical precision error, reset to old wts/idcs if needed, reraise to tell outer algorithms we failed\n try:\n self._optimize()\n except NumericalPrecisionError as e:\n self._overwrite(old_idcs, old_wts)\n raise\n\n return\n\n\n\n\n","sub_path":"bayesiancoresets/hilbert/orthopursuit.py","file_name":"orthopursuit.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"295858044","text":"# -*- coding: utf-8 -*-\n\n\nimport sys\nimport json\n\nfrom nlp_tasks.utils import file_utils\n\ninput_filepath = sys.argv[1]\n\nprecisions = []\nrecalls = []\nf1s = []\nresult_filepath_of_test = []\nlines = file_utils.read_all_lines(input_filepath)\nfor line in lines:\n if 'sequence_labeling_train_templates.py-456' in line and 'data_type: test result' in line:\n start_index = line.index('{')\n performances_str = line[start_index:].replace('\\'', '\"')\n performances = json.loads(performances_str)\n precisions.append(str('%.3f' % (performances['precision'] * 100)))\n recalls.append(str('%.3f' % (performances['recall'] * 100)))\n f1s.append(str('%.3f' % (performances['f1'] * 100)))\n continue\n if 'result_of_predicting_tes' in line:\n start_index = line.index(':') + 1\n filepath = line[start_index:]\n result_filepath_of_test.append(filepath)\nprint('precisions:')\nprint(','.join(precisions))\nprint('recalls:')\nprint(','.join(recalls))\nprint('f1s:')\nprint(','.join(f1s))\nprint('p r f')\nprint('\\t'.join([','.join(precisions), ','.join(recalls), ','.join(f1s)]))\nprint('filepaths: %s' % str(result_filepath_of_test))","sub_path":"nlp_tasks/absa/mining_opinions/sequence_labeling/towe_performance_from_result.py","file_name":"towe_performance_from_result.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"132395801","text":"from django.db import models\nfrom django.utils import timezone\n\n# Create your models here.\nclass ProInfo(models.Model):\n pro_name = models.CharField('项目名称', max_length = 24, unique = True)\n pro_ctime = models.DateField('创建时间', default = timezone.now)\n pro_type = models.CharField('项目类型', max_length = 16)\n pro_link = models.URLField('link', blank = True)\n\n note = models.CharField('备注', max_length = 100, blank = True)\n\n def __str__(self):\n return self.pro_name\n\n class Meta:\n verbose_name = '1-项目信息'\n verbose_name_plural = '1-项目信息'\n\n\nclass DeviceInfo(models.Model):\n pro = models.ForeignKey(ProInfo, verbose_name = '所属项目', on_delete = models.DO_NOTHING)\n hostname = models.CharField('主机简称', max_length = 24)\n ip = models.GenericIPAddressField('ip')\n os_name = models.CharField('OS', max_length = 16)\n director\t= models.CharField('负责人', max_length = 16)\n nature = models.CharField('配置', max_length = 16, help_text = 'such as 2C4G200G')\n isvhost = models.BooleanField('虚机')\n note = models.CharField('备注', max_length = 100, blank = True)\n \n def __str__(self):\n return self.hostname\n\n class Meta:\n verbose_name = '2-设备信息'\n verbose_name_plural = '2-设备信息'\n\n\nclass UnitInfo(models.Model):\n pro = models.ForeignKey(ProInfo, verbose_name = '所属项目', on_delete = models.DO_NOTHING)\n unit_name = models.CharField('关系名', max_length = 16)\n src_unit = models.CharField('源组件', max_length = 16)\n src_app = models.CharField('源应用', max_length = 16)\n src_port = models.CharField('源端口', max_length = 16)\n\n dest_unit = models.CharField('目的组件', max_length = 16)\n dest_vip = models.CharField('组件vip', max_length = 48)\n dest_app = models.CharField('目的应用', max_length = 16)\n dest_port = models.CharField('目的端口', max_length = 16)\n\n test_way = models.CharField('测试方法', max_length = 200, default = '')\n note = models.CharField('备注', max_length = 48, blank = True)\n\n def __str__(self):\n return self.unit_name\n\n class Meta:\n verbose_name = '3-组件关系'\n verbose_name_plural = '3-组件关系'\n\n\nclass AppInfo(models.Model):\n pro = models.ForeignKey(ProInfo, verbose_name = '所属项目', on_delete = models.DO_NOTHING)\n unit_name = models.CharField('组件', max_length = 16)\n app_name = models.CharField('应用', max_length = 24)\n app_user = models.CharField('用户', max_length = 24)\n app_passwd = models.CharField('密码', max_length = 24)\n start_way = models.CharField('启动方式', max_length = 100, default = '')\n note = models.CharField('备注', max_length = 100, blank = True)\n \n def __str__(self):\n return self.app_name\n\n class Meta:\n verbose_name = '4-应用信息'\n verbose_name_plural = '4-应用信息'\n\n\nclass OhostInfo(models.Model):\n area = models.CharField(verbose_name = '所在地', max_length = 24)\n hostname = models.CharField('主机简称', max_length = 24)\n root = models.CharField('root账号', max_length = 16, default = '')\n rootpasswd = models.CharField('root密码', max_length = 24, default = '')\n ip = models.GenericIPAddressField('ip')\n ohost = models.GenericIPAddressField('宿主机', default = '0.0.0.0', help_text = '没有就写0.0.0.0')\n os_name = models.CharField('OS', max_length = 16)\n director\t= models.CharField('运维负责人', max_length = 16, default = '')\n user = models.CharField('使用人', max_length = 16, default = '')\n cpus = models.SmallIntegerField('cpu核数')\n mems = models.SmallIntegerField('内存/G')\n disk = models.SmallIntegerField('磁盘/G')\n isvhost = models.BooleanField('虚机', default = True)\n status = models.BooleanField('已分配', default = 'False')\n note = models.CharField('用处/备注', max_length = 50, blank = True)\n needs = models.CharField('其他要求', max_length = 50, blank = True)\n \n def __str__(self):\n return self.hostname\n\n class Meta:\n verbose_name = 'x-测试环境'\n verbose_name_plural = 'x-测试环境'\n permissions = (\n ('report_virtualinfo', '导出表格'),\n ('import_virtualinfo', '导入表格'),\n )\n","sub_path":"local/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"618659066","text":"# Copyright 2017 by Akira Yoshiyama .\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nStatistics Reporter\n\"\"\"\n\nimport argparse\nfrom datetime import datetime, timedelta\nfrom email.message import Message\nfrom email.header import Header\nfrom jinja2 import Environment\nimport logging\nimport os\nimport pbr.version\nimport smtplib\nimport sys\nimport yaml\n\n\nfrom amane import const\nfrom amane import db\nfrom amane import log\n\n\nCONFIG_FILE = os.environ.get(\"AMANE_CONFIG_FILE\", \"/etc/amane/amane.conf\")\nERROR_RETURN = 'amane-error'\n\n\ndef convert(data):\n updated = data['updated']\n created = data['created']\n data['updated'] = updated - timedelta(microseconds=updated.microsecond)\n data['created'] = created - timedelta(microseconds=created.microsecond)\n return data\n\n\ndef report_tenant_status(relay_host=None, relay_port=None,\n db_url=None, db_name=None,\n report_subject=None, report_msg=None,\n days_to_close=None, charset='utf8',\n admins=None, domain=None, debug=False,\n **kwargs):\n\n db.init_db(db_url, db_name)\n\n new = db.find_mls({'status': const.STATUS_NEW}, sortkey='updated')\n new = [convert(_) for _ in new]\n _open = db.find_mls({'status': const.STATUS_OPEN}, sortkey='updated')\n _open = [convert(_) for _ in _open]\n orphaned = db.find_mls({'status': const.STATUS_ORPHANED},\n sortkey='updated')\n orphaned = [convert(_) for _ in orphaned]\n closed_after = datetime.now() - timedelta(days=days_to_close)\n closed = db.find_mls({'status': const.STATUS_CLOSED,\n 'updated': {'$gt': closed_after}},\n sortkey='updated', reverse=False)\n closed = [convert(_) for _ in closed]\n\n params = dict(new=new, open=_open, orphaned=orphaned, closed=closed)\n temp = Environment(newline_sequence='\\r\\n')\n content = temp.from_string(report_msg).render(params)\n\n # Format a report message\n _from = ERROR_RETURN + \"@\" + domain\n _to = \", \".join(admins)\n logging.debug(\"From: %s\", _from)\n logging.debug(\"To: %s\", _to)\n logging.debug(\"Subject: %s\", report_subject)\n logging.debug(\"\\n%s\", content)\n message = Message()\n message['To'] = message['Reply-To'] = _to\n message['From'] = message['Return-Path'] = _from\n message['Subject'] = Header(report_subject, charset)\n message.set_payload(content.encode(charset))\n message.set_charset(charset)\n\n # Send the report to the relay host\n relay = smtplib.SMTP(relay_host, relay_port)\n if debug:\n relay.set_debuglevel(1)\n relay.sendmail(_from, admins, message.as_string())\n relay.quit()\n logging.debug(\"Sent a report mail\")\n\n\ndef report_status(relay_host=None, relay_port=None, db_url=None,\n db_name=None, domain=None, debug=False, **kwargs):\n\n db.init_db(db_url, db_name)\n\n tenants = db.find_tenants({'status': const.TENANT_STATUS_ENABLED})\n for tenant in tenants:\n report_tenant_status(\n relay_host=relay_host, relay_port=relay_port, db_url=db_url,\n db_name=db_name, domain=domain, debug=debug, **tenant)\n\n\ndef main():\n \"\"\"\n The main routine\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--version',\n help='Print version and exit',\n action='store_true')\n parser.add_argument('--debug',\n help='Debug output',\n action='store_true')\n parser.add_argument('--config-file',\n help='cofiguration file',\n type=argparse.FileType('r'),\n default=CONFIG_FILE)\n\n opts = parser.parse_args()\n\n if opts.version:\n print(pbr.version.VersionInfo('amane'))\n return 0\n\n log.setup(debug=opts.debug)\n logging.debug(\"args: %s\", opts.__dict__)\n\n config = yaml.load(opts.config_file)\n for key, value in config.items():\n setattr(opts, key, value)\n\n return(report_status(**config))\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"amane/cmd/reporter.py","file_name":"reporter.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"629632981","text":"import os\nimport sys\nimport wget\nimport shlex\nimport shutil\nimport tarfile\nimport platform\nfrom subprocess import check_output, call\n\nDEVNULL = open(os.devnull, 'wb')\nCLEAR = False\n\n_defs = {}\n\ndef _get_home_dir():\n name = platform.system()\n if name == 'Linux':\n readlink = 'readlink'\n else:\n raise Exception('%s is not supported.' % name)\n f = check_output([readlink, '-f', sys.argv[0]])\n d = os.path.dirname(f)\n return os.path.dirname(d)\n\nsys.path.append(os.path.join(_get_home_dir(), 'scripts'))\nfrom configure import *\n\ndef _get_conf():\n home = _get_home_dir()\n return os.path.join(home, 'conf', 'build.cfg')\n\ndef _get_inc():\n home = _get_home_dir()\n return os.path.join(home, 'include')\n\ndef _get_patch():\n home = _get_home_dir()\n return os.path.join(home, 'src', 'linux-%s.patch' % _defs['kernel_version'])\n\ndef _get_kernel_config():\n home = _get_home_dir()\n return os.path.join(home, 'conf', 'config-%s' % _defs['kernel_version'])\n\ndef _get_url():\n ver = _defs['kernel_version']\n major, _, _ = ver.split('.')\n return 'https://www.kernel.org/pub/linux/kernel/v%s.x/linux-%s.tar.gz' % (major, ver)\n\ndef _get_path():\n return os.path.join(_defs['path_source'], 'linux-%s' % _defs['kernel_version'])\n\ndef _read_args():\n path = _get_conf()\n with open(path) as f:\n lines = f.readlines()\n\n for i in lines:\n i = i.strip()\n if i and not i.startswith('#'):\n res = i.split('=')\n if len(res) != 2:\n raise Exception('Error: failed to parse %s' % i)\n key = res[0].lower()\n val = res[1].split('#')[0].strip()\n if key not in ARGS:\n raise Exception('Error: cannot find the definition of %s' % key)\n ARGS[key]['value'] = val\n\ndef _chkargs():\n _read_args()\n for i in ARGS:\n res = ARGS[i].get('value')\n if None == res:\n res = ARGS[i].get('default')\n if None == res:\n raise Exception('Error: %s is not set' % i)\n _defs[i] = str(res)\n\ndef _config():\n src = _get_kernel_config()\n if not os.path.exists(src):\n raise Exception('Error: failed to find config-%s' % _defs['kernel_version'])\n dest = os.path.join(_get_path(), '.config')\n shutil.copyfile(src, dest)\n\ndef _include():\n dir_src = _get_inc()\n dir_dest = os.path.join(_get_path(), 'include', 'linux')\n for i in os.listdir(dir_src):\n if i.endswith('.h'):\n src = os.path.join(dir_src, i)\n dest = os.path.join(dir_dest, i)\n shutil.copyfile(src, dest)\n\ndef _download():\n url = _get_url()\n name = os.path.join(_defs['path_source'], 'linux-%s.tar.gz' % _defs['kernel_version'])\n if not os.path.exists(name):\n wget.download(url, name)\n path = _get_path()\n if os.path.exists(path):\n shutil.rmtree(path)\n tar = tarfile.open(name)\n tar.extractall(_defs['path_source'])\n tar.close()\n if CLEAR:\n os.remove(name)\n\ndef _patch():\n _download()\n cmd = 'patch -d %s -p1 < %s' % (_get_path(), _get_patch())\n os.system(cmd)\n\ndef _configure():\n _chkargs()\n _patch()\n _config()\n _include()\n\ndef _call(cmd, path=None, quiet=False, ignore=False):\n if path:\n os.chdir(path)\n if not quiet:\n check_output(cmd, shell=True)\n else:\n if call(shlex.split(cmd), stdout=DEVNULL, stderr=DEVNULL):\n if not ignore:\n raise Exception('Error: failed to run %s' % cmd)\n\ndef _build():\n path = _get_path()\n _call('make', path)\n _call('make modules', path)\n _call('make modules_install', path)\n _call('make install', path)\n\nif __name__ == '__main__':\n _configure()\n _build()\n","sub_path":"scripts/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"1528769","text":"import re\nfrom datetime import datetime\nfrom typing import Optional, List\n\nimport requests\nfrom bs4 import BeautifulSoup, Tag\nfrom connexion import NoContent\n\nfrom insis_api import util\nfrom insis_api.models.exam import Exam # noqa: E501\n\nSELECTOR_ENROLLED_COURSES = '#table_1 tbody tr'\nSELECTOR_AVAILABLE_COURSES = '#table_2 tbody tr'\n\nEXAM_ENROLL_LINK = 'https://insis.vse.cz/auth/student/terminy_prihlaseni.pl'\nEXAM_LIST_LINK = 'https://insis.vse.cz/auth/student/terminy_seznam.pl'\n\n\ndef disenroll_exam_by_id(exam_id, body): # noqa: E501\n \"\"\"Disenroll exam with id\n\n # noqa: E501\n\n :param exam_id: ID of exam to disenroll\n :type exam_id: int\n\n :param body: Insis user credentials\n :type body: dict | bytes\n\n :rtype: None\n \"\"\"\n user = util.get_credentials()\n with requests.Session() as ses:\n util.log_in(ses, user)\n info = util.get_study_info(ses)\n disenroll = {\n 'termin': exam_id,\n 'predmet': '',\n 'studium': info['studium'],\n 'obdobi': info['obdobi'],\n 'odhlasit': 'Odhlásit z termínu'\n }\n ses.post(EXAM_ENROLL_LINK, data=disenroll)\n\n return NoContent, 200\n\n\ndef enroll_exam_by_id(exam_id: int, body): # noqa: E501\n \"\"\"Enroll exam with id\n\n # noqa: E501\n\n :param exam_id: ID of exam to enroll\n :type exam_id: int\n\n :param body: Insis user credentials\n :type body: dict | bytes\n\n :rtype: None\n \"\"\"\n user = util.get_credentials()\n with requests.Session() as ses:\n util.log_in(ses, user)\n info = util.get_study_info(ses)\n enroll = {\n 'termin': exam_id,\n 'predmet': '',\n 'studium': info['studium'],\n 'obdobi': info['obdobi'],\n 'prihlasit': 'Přihlásit na termín'\n }\n ses.post(EXAM_ENROLL_LINK, data=enroll)\n\n return NoContent, 200\n\n\ndef get_exam(body): # noqa: E501\n \"\"\"Get all exams\n\n # noqa: E501\n\n :param body: Insis user credentials\n :type body: dict | bytes\n\n :rtype: Exam\n \"\"\"\n exams = __list_exams(SELECTOR_AVAILABLE_COURSES)\n return (exams, 200) if exams else (NoContent, 404)\n\n\ndef get_enrolled_exam(body): # noqa: E501\n \"\"\"Get enrolled exams\n\n # noqa: E501\n\n :param body: Insis user credentials\n :type body: dict | bytes\n\n :rtype: Exam\n \"\"\"\n exams = __list_exams(SELECTOR_ENROLLED_COURSES)\n return (exams, 200) if exams else (NoContent, 404)\n\n\ndef get_exam_by_id(exam_id: int, body): # noqa: E501\n \"\"\"Get exam by id\n\n # noqa: E501\n\n :param exam_id: ID of exam that needs to be fetched\n :type exam_id: int\n\n :param body: Insis user credentials\n :type body: dict | bytes\n\n :rtype: Exam\n \"\"\"\n for exam in __list_exams(SELECTOR_AVAILABLE_COURSES) + __list_exams(SELECTOR_ENROLLED_COURSES):\n if exam.id == exam_id:\n return exam, 200\n return NoContent, 404\n\n\ndef __list_exams(selector: str) -> List['Exam']:\n user = util.get_credentials()\n with requests.Session() as ses:\n util.log_in(ses, user)\n ter = ses.get(EXAM_LIST_LINK)\n\n # scrape exams html\n bs = BeautifulSoup(ter.text, \"html.parser\")\n exams = bs.select(selector)\n\n return [__get_exam_from_html(exam_html.contents) for exam_html in exams] if exams else None\n\n\ndef __get_exam_from_html(exam_html: str) -> 'Exam':\n offset = 1 if len(exam_html) == 13 else 0\n\n id = int(re.search('termin=([0-9]+);', exam_html[13 - offset].contents[0].contents[0].attrs['href']).group(1))\n accessibility = exam_html[1].contents[0].contents[0].attrs['title'] if len(exam_html) == 14 else None\n ident = exam_html[2 - offset].text\n subject = exam_html[3 - offset].text\n department = exam_html[4 - offset].text\n date = __get_date_from_text(' '.join(exam_html[5 - offset].text.split()[:-1]))\n room = exam_html[6 - offset].text\n type = exam_html[7 - offset].text\n teacher = exam_html[8 - offset].text\n availability = exam_html[9 - offset].text\n sign_in_from, sign_in_to, sign_out_to = [__get_date_from_text(date_str) for date_str\n in exam_html[11 - offset].contents[0].contents if\n not isinstance(date_str, Tag)]\n\n return Exam(id, accessibility, ident, subject, department, date, room, type, teacher, availability, sign_in_from,\n sign_in_to,\n sign_out_to)\n\n\ndef __get_date_from_text(text: str) -> Optional[datetime]:\n try:\n return datetime.strptime(text, '%d.%m.%Y %H:%M')\n except ValueError:\n return None\n","sub_path":"insis_api/controllers/exam_controller.py","file_name":"exam_controller.py","file_ext":"py","file_size_in_byte":4621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"561049025","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom timeit import repeat\n\n\nclass HeapTimeCase(object):\n\n def time_init(self):\n return [\n 'init',\n (\n 'heapq',\n 'from heapq import heapify',\n 'heapify(list(reversed(range({size}))))',\n 1,\n ),\n (\n 'Heap',\n 'from xheap import Heap',\n 'Heap(reversed(range({size})))',\n 1,\n ),\n (\n 'RemovalHeap',\n 'from xheap import RemovalHeap',\n 'RemovalHeap(reversed(range({size})))',\n 1,\n ),\n ]\n\n def time_pop(self):\n return [\n 'pop',\n (\n 'heapq',\n 'from heapq import heapify, heappop; heap = list(reversed(range({size}))); heapify(heap)',\n 'heappop(heap)',\n None,\n ),\n (\n 'Heap',\n 'from xheap import Heap; heap = Heap(reversed(range({size})))',\n 'heap.pop()',\n None,\n ),\n (\n 'RemovalHeap',\n 'from xheap import RemovalHeap; heap = RemovalHeap(reversed(range({size})))',\n 'heap.pop()',\n None,\n ),\n ]\n\n def time_push(self):\n return [\n 'push',\n (\n 'heapq',\n 'from heapq import heapify, heappush; heap = list(reversed(range(0, {size}*4, 4))); heapify(heap); i = 1',\n 'heappush(heap, i); i += 4',\n None,\n ),\n (\n 'Heap',\n 'from xheap import Heap; heap = Heap(reversed(range(0, {size}*4, 4))); i = 1',\n 'heap.push(i); i += 4',\n None,\n ),\n (\n 'RemovalHeap',\n 'from xheap import RemovalHeap; heap = RemovalHeap(reversed(range(0, {size}*4, 4))); i = 1',\n 'heap.push(i); i += 4',\n None,\n ),\n ]\n\n\nclass OrderHeapTimeCase(object):\n\n def time_init(self):\n return [\n 'init',\n (\n 'heapq',\n 'from heapq import heapify',\n 'heapify(list(map(lambda x: (-x, x), range({size}))))',\n 1,\n ),\n (\n 'OrderHeap',\n 'from xheap import OrderHeap',\n 'OrderHeap(range({size}), key=lambda x: -x)',\n 1,\n ),\n (\n 'XHeap',\n 'from xheap import XHeap',\n 'XHeap(range({size}), key=lambda x: -x)',\n 1,\n ),\n ]\n\n def time_pop(self):\n return [\n 'pop',\n (\n 'heapq',\n 'from heapq import heapify, heappop; heap = list(map(lambda x: (-x, x), range({size}))); heapify(heap)',\n 'heappop(heap)[1]',\n None,\n ),\n (\n 'OrderHeap',\n 'from xheap import OrderHeap; heap = OrderHeap(range({size}), key=lambda x: -x)',\n 'heap.pop()',\n None,\n ),\n (\n 'XHeap',\n 'from xheap import XHeap; heap = XHeap(range({size}), key=lambda x: -x)',\n 'heap.pop()',\n None,\n ),\n ]\n\n def time_push(self):\n return [\n 'push',\n (\n 'heapq',\n 'from heapq import heapify, heappush; heap = list(map(lambda x: (-x, x), range(0, {size}*4, 4))); heapify(heap); i = 1',\n 'heappush(heap, (-i, i)); i += 4',\n None,\n ),\n (\n 'OrderHeap',\n 'from xheap import OrderHeap; heap = OrderHeap(range(0, {size}*4, 4), key=lambda x: -x); i = 1',\n 'heap.push(i); i += 4',\n None,\n ),\n (\n 'XHeap',\n 'from xheap import XHeap; heap = XHeap(range(0, {size}*4, 4), key=lambda x: -x); i = 1',\n 'heap.push(i); i += 4',\n None,\n ),\n ]\n\n\nclass RemovalHeapTimeCase(object):\n\n def time_remove(self):\n return [\n 'remove',\n (\n 'RemovalHeap',\n 'from xheap import RemovalHeap; heap = RemovalHeap(map(lambda x: (-x, x), reversed(range({size})))); i = {size}//2',\n 'heap.remove((-i, i)); i += 1',\n None,\n ),\n (\n 'XHeap',\n 'from xheap import XHeap; heap = XHeap(reversed(range({size})), key=lambda x: -x); i = {size}//2',\n 'heap.remove(i); i += 1',\n None,\n ),\n ]\n\n\ninitial_sizes = [10**3, 10**4, 10**5, 10**6]\nrepetitions = 10000\ndef perform_time_configs(configs):\n for _, setup, stmt, number in configs:\n yield [min(repeat(stmt.format(size=size), setup.format(size=size), number=(number or size//32), repeat=repetitions)) for size in initial_sizes]\n\n\nfor htc in (HeapTimeCase(), OrderHeapTimeCase(), RemovalHeapTimeCase()):\n config_methods = [getattr(htc, method) for method in dir(htc) if method.startswith('time_') and callable(getattr(htc, method))]\n configs_list = [config_method() for config_method in config_methods]\n align_label = max(len(cs[0]) for cs in configs_list)\n align_module = max(len(c[0]) for cs in configs_list for c in cs)\n\n for configs in configs_list:\n label, configs = configs[0], configs[1:]\n results = list(perform_time_configs(configs))\n\n baseline_config = configs[0]\n baseline_results = results[0]\n\n for i, (config, results) in enumerate(zip(configs, results)):\n printed_label = (label if i == 0 else '').ljust(align_label)\n\n print(printed_label, config[0].ljust(align_module), ' '.join('{:5.2f} ({:5.2f}x)'.format(result*1000, result/baseline_result) for result, baseline_result in zip(results, baseline_results)))\n\n print('--------------------------------------------------------------------')\n print('--------------------------------------------------------------------')\n","sub_path":"test_xheap_time.py","file_name":"test_xheap_time.py","file_ext":"py","file_size_in_byte":6353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"184090976","text":"from os import path, makedirs\nfrom argparse import ArgumentParser\nfrom urllib.request import urlopen\nfrom re import sub\n\nfrom . import registry\nfrom . import match\nfrom . import build\n\nREPOSITORY_URL = 'https://cvs.khronos.org/svn/repos/ogl/trunk/doc/registry/public/api/'\n\ndef pretty_print(features, extensions):\n\tprint()\n\n\tprint('===== included features =====')\n\tfor feature in features:\n\t\tprint(' > {}'.format(feature['name']))\n\n\tprint()\n\n\tprint('===== included extensions =====')\n\tfor extension in extensions:\n\t\tprint(' > {}'.format(extension['name']))\n\n\tif len(features) == 0:\n\t\tprint()\n\t\tprint('warning: no features selected -- you may have chosen an invalid API or feature level'.format(args.feature_level))\n\n\tneeds_newline = True\n\tfor extension_name in args.extensions:\n\t\tif not any(extension['name'] == extension_name for extension in extensions):\n\t\t\tif needs_newline:\n\t\t\t\tprint()\n\t\t\t\tneeds_newline = False\n\n\t\t\tprint('warning: extension {} does not exist in the registry or is not supported for the {} API -- skipping'.format(extension_name, args.api))\n\nap = ArgumentParser(description='Generates C bindings from the OpenGL Registry.', fromfile_prefix_chars='@')\nap.add_argument('-r', '--registry', choices=['gl', 'wgl', 'glx', 'egl'], default='gl', help='select the registry to retrieve features and extensions from (default: gl)', metavar='NAME')\nap.add_argument('-a', '--api', help='select a particular api to use when generating bindings (default: use value from -r)', metavar='NAME')\nap.add_argument('-f', '--feature-level', help='select the feature level to generate; all features up to and including the specified level will be generated (default: generate all features supported by the selected api)', metavar='VERSION')\nap.add_argument('-e', '--extension', action='append', default=[], dest='extensions', help='select an extension to add to the bindings; can be specified multiple times (e.g. -e GL_3DFX_multisample -e GL_3DFX_tbuffer)', metavar='NAME')\nap.add_argument('-p', '--profile', choices=['core', 'compatibility'], default='core', help='select a particular profile to use when generating bindings (defualt: core)', metavar='NAME')\nap.add_argument('-d', '--output-directory', default='giggle-output', help='set the directory in which to place generated code files (default: giggle-output)')\nap.add_argument('-o', '--output-pattern', default='giggle', help='set the filename pattern for output files (default: giggle)')\nap.add_argument('-G', '--guard-macro', help='set the name of the macro used as an include guard (default: based on value from -o)')\nap.add_argument('-C', '--cc-macro', help='set the name of the macro used as a calling convention proxy (default: based on value from -o)')\n\nargs = ap.parse_args()\n\n# these defaults are tough to express to argparse so we just set them up manually\nif args.api is None:\n\targs.api = args.registry\n\nif args.guard_macro is None:\n\targs.guard_macro = args.output_pattern.upper()\n\targs.guard_macro = sub(r'^[^A-Za-z_]', '_', args.guard_macro)\n\targs.guard_macro = sub(r'[^A-Za-z_0-9]', '_', args.guard_macro)\n\targs.guard_macro += '_H'\n\nif args.cc_macro is None:\n\targs.cc_macro = args.output_pattern.upper()\n\targs.cc_macro = sub(r'^[^A-Za-z_]', '_', args.cc_macro)\n\targs.cc_macro = sub(r'[^A-Za-z_0-9]', '_', args.cc_macro)\n\targs.cc_macro += '_CC'\n\nif not path.isdir(args.output_directory):\n\tmakedirs(args.output_directory)\n\nwith urlopen(REPOSITORY_URL + args.registry + '.xml') as stream:\n\tregistry_data = registry.parse(stream)\n\nfeatures = match.features(registry_data, args.api, args.feature_level)\nextensions = match.extensions(registry_data, args.api, args.extensions)\n\npretty_print(features, extensions)\n\nchunks = build.prepare(registry_data, features, extensions, args.api, args.profile)\n\nheader_code = build.header(chunks, args.guard_macro, args.cc_macro, args.registry)\n#loader_code = build.loader(chunks)\n\nwith open(path.join(args.output_directory, args.output_pattern + '.h'), 'w') as f:\n\tf.write(header_code)","sub_path":"giggle/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"299731014","text":"#!/usr/bin/env python3\nimport datetime\n\n# Data\ndonors_amts = {'Gates': [('Mr.', ), 150000, 3], 'Brin': [('Mr.', ), 150000, 3], \n'Cerf': [('Mr.', ), 50000, 2], 'Musk': [('Mr.', ), 100000, 1], \n'Berners-Lee': [('Mr.', ), 50000, 2], \n'Wojcicki': [('Ms.', ), 125000, 1], 'Avey': [('Ms.', ), 200000, 2]}\n\n\n# Processing\ndef send_ty():\n global donors_amts\n new_response = 0\n title = ''\n donor_dict = {'title': '', 'last_name': '', 'donation': 0}\n print()\n response = input('Enter full last name of Donor,'\n + '\\n\"list\" for List of Donors'\n + ',\\nor \"e\" to Exit back to Main Menu: ')\n print()\n if response.isalpha():\n if response == 'list':\n print('Here is the list of Donors: ')\n for donor in donors_amts:\n print(donor)\n print()\n else:\n response = response.capitalize()\n if response in donors_amts:\n print('Donor found:', response)\n new_response = input('Enter a Donation amount' +\n ' (in USD): ')\n if not new_response.isnumeric():\n send_ty()\n else:\n new_response = int(new_response)\n donors_amts[response][1] += new_response\n donors_amts[response][2] += 1\n print('Added to', response, '\\'s Donations:',\n new_response, '\\n')\n elif response not in donors_amts:\n title = input('Title: \"Ms.\" or \"Mr.\"?: ')\n new_response = input('Enter a Donation amount' +\n ' (in USD): ')\n if not new_response.isnumeric():\n send_ty()\n else:\n new_response = int(new_response)\n print('Added to list of Donors:', title,\n response, new_response)\n donors_amts[response] = ([(title, ), new_response, 1])\n title = str(donors_amts.get(response)[0])\n title = title.strip('(').strip(')').strip(',')\n title = title.strip('\\'')\n donor_dict = {'title': title,\n 'last_name': response, 'donation': new_response}\n print('Dear {title} {last_name}, Thank you for your generous donation in the amount of {donation} USD.'.format(**donor_dict))\n print()\n program_run()\n\n\ndef get_report():\n print()\n psv = ['Donor Name', '| Total Given', '| Num Gifts',\n '| Average Gift']\n print('{:<15}{:>12}{:>12}{:>12}'.format(psv[0], psv[1],\n psv[2], psv[3]))\n for i in range(55):\n print('-', end='')\n print()\n for donor in donors_amts:\n d1 = donors_amts[donor][1]\n d2 = donors_amts[donor][2]\n print('{:<15}{}{:>10}{:>12}{}{:>11}'.format(donor, ' $',\n d1, d2, ' $', d1 // d2))\n print()\n program_run()\n\n\ndef send_letters():\n global donors_amts\n d_a = donors_amts\n now = datetime.datetime.now()\n for donor in donors_amts:\n title = str(d_a.get(donor)[0])\n title = title.strip('(').strip(')').strip(',').strip('\\'')\n with open(donor + str(now.year) + str(now.month) +\n str(now.day) + '.txt', 'w') as of:\n donor_dict = {'title': title,\n 'last_name': donor, 'donations': d_a.get(donor)[1]}\n of.write(\n 'Dear {title} {last_name},\\nThank you for your generous previous giving in the amount of {donations} USD.'.format(**donor_dict))\n of.write('\\nAttached is our most recent independent,'\n + ' third party audit.\\n')\n of.write('\\nWe hope you agree that we have been good'\n + ' stewards of our donors\\' funds,\\nand that'\n + ' you will consider donating'\n + ' again to our project.\\n')\n of.write('\\nBest wishes for continued success,')\n of.write('\\n[Signature]')\n of.close()\n\n\ndef quit_program():\n print('Program execution completed.')\n return\n\n\ndef program_run():\n print('Main Menu:')\n response = input('Choose from the following:\\n\"1\" - Send a \"Thank You\",'\n + '\\n\"2\" - Create a Report,'\n + '\\n\"3\" - Send Letters to All Donors, or\\n\"q\" to Quit: ')\n menu_dict = {'1': send_ty, '2': get_report,\n '3': send_letters, 'q': quit_program}\n\n if response in menu_dict:\n menu_dict.get(response)()\n else:\n response = input('Choose \"1\", \"2\", \"3\", or \"q\": ')\n if response in menu_dict:\n menu_dict.get(response)()\n else:\n print('That is not an option. Closing program.')\n return\n\n\n# I/O\nif __name__ == '__main__':\n program_run()\n\nelse:\n print('This module is not intended to be imported.')\n","sub_path":"students/johnpharmd/lesson04/mailroom2.py","file_name":"mailroom2.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"7778309","text":"from .FCN import VGGNet, FCN\nfrom .Unet import U_Net, AttU_Net, NestedUNet\nfrom .SegNet import SegNet\nimport torch\n\n\ndef build_model(model_cfg):\n '''\n 根聚输入配置构建模型\n :param model_cfg:\n :return:\n '''\n\n if model_cfg.type == 'FCN':\n vgg_model = VGGNet(requires_grad=True, show_params=model_cfg.show_params)\n fcn_model = FCN(pretrained_net=vgg_model, n_class=model_cfg.num_classes)\n return fcn_model\n elif model_cfg.type == 'Unet':\n u_net = U_Net(in_ch=model_cfg.input_channel, out_ch=model_cfg.num_classes)\n return u_net\n elif model_cfg.type == 'AttUnet':\n attUnet = AttU_Net(img_ch=model_cfg.input_channel, output_ch=model_cfg.num_classes)\n return attUnet\n elif model_cfg.type == 'NestedUnet':\n nestedUnet = NestedUNet(in_ch=model_cfg.input_channel, out_ch=model_cfg.num_classes)\n return nestedUnet\n elif model_cfg.type == 'SegNet':\n segNet = SegNet(input_nbr=model_cfg.input_channel, label_nbr=model_cfg.num_classes)\n return segNet\n elif model_cfg.type == 'CheckPoint': # 加载已有模型\n model = torch.load(model_cfg.check_point_file)\n return model\n\n else:\n raise Exception('没有该模型!')\n","sub_path":"code/model/build_model.py","file_name":"build_model.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"518284903","text":"import os\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom collections import namedtuple, deque\n\nimport torch\n\nis_cuda = torch.cuda.is_available()\n\nif is_cuda: device = torch.device('cuda')\nelse: device = torch.device('cpu')\n\n\nclass ReplayBuffer(object):\n \"\"\"Fixed-size buffer to store experience tuples.\"\"\"\n \n def __init__(self, buffer_size, batch_size, seed):\n \n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)\n \n self.memory = deque(maxlen=buffer_size)\n self.batch_size = batch_size\n \n def add(self, state, action, reward, next_state, done):\n \"\"\"Add a new experience to buffer.\"\"\"\n \n self.memory.append(self.experience(state, action, reward, next_state, done))\n \n def sample(self):\n \"\"\"Randomly sample a batch of experiences from memory.\"\"\"\n experiences = random.sample(self.memory, k=self.batch_size)\n \n states = torch.from_numpy(np.vstack([exp.state for exp in experiences if exp is not None])).float()\n states = states.to(device)\n \n actions = torch.from_numpy(np.vstack([exp.action for exp in experiences if exp is not None])).float()\n actions = actions.to(device)\n \n rewards = torch.from_numpy(np.vstack([exp.reward for exp in experiences if exp is not None])).float()\n rewards = rewards.to(device)\n \n next_states = torch.from_numpy(np.vstack([exp.next_state for exp in experiences if exp is not None])).float()\n next_states = next_states.to(device)\n \n dones = torch.from_numpy(np.vstack([exp.done for exp in experiences if exp is not None]).astype(np.uint8)).float()\n dones = dones.to(device)\n \n return (states, actions, rewards, next_states, dones)\n \n def __len__(self):\n \"\"\"Return the current size of internal memory.\"\"\"\n return len(self.memory)","sub_path":"02-deep-learning/03-reinforcement-learning/applications/unity-ml-agents/Tennis/buffer.py","file_name":"buffer.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"474700679","text":"#!/usr/bin/python\n'''\nStream data acquired from supported I2C sensors.\n\nCurrently Supports:\\n\n\nrefer to SENSORS.supported\n\n'''\nfrom __future__ import print_function\n\nfrom PSL_Apps.utilitiesClass import utilitiesClass\nfrom .templates import sensorGrid\n\n\nimport pyqtgraph as pg\nimport time,random,functools,sys\nimport numpy as np\n\n\nfrom PyQt4 import QtCore, QtGui\n\nparams = {\n'image' : 'sensors.png',\n'name':'Sensor\\nQuickView',\n'hint':'''\n\tDisplay values returned by sensors connected to the I2C input.
\n\tSupported sensors include MPU6050(3-axis Accel/gyro), TSL2561(luminosity),
\n\tHMC5883L(3-axis magnetometer), SHT21(humidity), BMP180(Pressure,Altitude),\n\tMLX90614(PAssive IR based thermometer) etc,\n\t'''\n}\n\nclass AppWindow(QtGui.QMainWindow, sensorGrid.Ui_MainWindow,utilitiesClass):\n\tdef __init__(self, parent=None,**kwargs):\n\t\tsuper(AppWindow, self).__init__(parent)\n\t\tself.setupUi(self)\n\t\tself.I=kwargs.get('I',None)\n\t\tif self.I:\n\t\t\tself.I.I2C.init()\n\t\t\tself.I.I2C.config(400e3)\n\t\tself.setWindowTitle(self.I.H.version_string+' : '+params.get('name','').replace('\\n',' ') )\n\n\t\tfrom PSL.SENSORS.supported import supported\n\t\tself.supported = supported\n\t\t#from PSL.sensorlist import sensors as sensorHints\n\t\t#self.hints = sensorHints\n\n\t\tself.foundSensors=[]\n\t\t\n\t\tself.looptimer = QtCore.QTimer()\n\t\tself.looptimer.timeout.connect(self.updateData)\n\t\tself.looptimer.start(20)\n\t\tself.deviceMenus=[]\n\t\tself.sensorWidgets=[]\n\t\tself.Running =True\n\n\tdef updateData(self):\n\t\tfor a in self.sensorWidgets:\n\t\t\tif a.autoRefresh.isChecked():\n\t\t\t\ta.read()\n\n\tdef autoScan(self):\n\t\tself.scan()\n\n\tdef scan(self):\n\t\tlst = self.I.I2C.scan()\n\t\tfor a in self.sensorWidgets:\n\t\t\ta.setParent(None)\n\t\tself.sensorWidgets=[]\n\t\tprint (lst)\n\t\t\n\t\trow=0;col=0;colLimit=3\n\t\tself.ExperimentLayout.setAlignment(QtCore.Qt.AlignTop)\n\t\tfor a in lst:\n\t\t\tcls=False\n\t\t\tcls_module = self.supported.get(a,None)\n\t\t\tif cls_module:\n\t\t\t\tcls = cls_module.connect(self.I.I2C)\n\t\t\t\tif cls:\n\t\t\t\t\tif col==colLimit:\n\t\t\t\t\t\tcol=0;row+=1\n\t\t\t\t\tnewSensor=self.sensorIcon(cls)\n\t\t\t\t\tself.ExperimentLayout.addWidget(newSensor,row,col)\n\t\t\t\t\tself.sensorWidgets.append(newSensor)\n\t\t\t\t\tcol+=1\n\n\t\t\t\n\tdef __del__(self):\n\t\tself.looptimer.stop()\n\t\tprint ('bye')\n\n\tdef closeEvent(self, event):\n\t\tself.looptimer.stop()\n\t\tself.finished=True\n\t\t\nif __name__ == \"__main__\":\n\tfrom PSL import sciencelab\n\tapp = QtGui.QApplication(sys.argv)\n\tmyapp = AppWindow(I=sciencelab.connect())\n\tmyapp.show()\n\tsys.exit(app.exec_())\n","sub_path":"seel_res/GUI/A_TandM/I_staticsensors.py","file_name":"I_staticsensors.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"562097188","text":"import os\n\nfrom numpy.testing import assert_almost_equal, assert_array_equal\n\nfrom rioxarray.raster_array import UNWANTED_RIO_ATTRS\n\nTEST_DATA_DIR = os.path.join(os.path.dirname(__file__), \"test_data\")\nTEST_INPUT_DATA_DIR = os.path.join(TEST_DATA_DIR, \"input\")\nTEST_COMPARE_DATA_DIR = os.path.join(TEST_DATA_DIR, \"compare\")\n\n\n# xarray.testing.assert_equal(input_xarray, compare_xarray)\ndef _assert_attrs_equal(input_xr, compare_xr, decimal_precision):\n \"\"\"check attrubutes that matter\"\"\"\n for attr in compare_xr.attrs:\n if attr == \"transform\":\n assert_almost_equal(\n tuple(input_xr.rio._cached_transform())[:6],\n compare_xr.attrs[attr][:6],\n decimal=decimal_precision,\n )\n elif (\n attr != \"_FillValue\"\n and attr not in UNWANTED_RIO_ATTRS\n and attr != \"creation_date\"\n ):\n try:\n assert_almost_equal(\n input_xr.attrs[attr],\n compare_xr.attrs[attr],\n decimal=decimal_precision,\n )\n except (TypeError, ValueError):\n assert input_xr.attrs[attr] == compare_xr.attrs[attr]\n\n\ndef _assert_xarrays_equal(\n input_xarray, compare_xarray, precision=7, skip_xy_check=False\n):\n _assert_attrs_equal(input_xarray, compare_xarray, precision)\n if hasattr(input_xarray, \"variables\"):\n # check coordinates\n for coord in input_xarray.coords:\n if coord in \"xy\":\n if not skip_xy_check:\n assert_almost_equal(\n input_xarray[coord].values,\n compare_xarray[coord].values,\n decimal=precision,\n )\n else:\n assert (\n input_xarray[coord].values == compare_xarray[coord].values\n ).all()\n\n for var in input_xarray.rio.vars:\n try:\n _assert_xarrays_equal(\n input_xarray[var], compare_xarray[var], precision=precision\n )\n except AssertionError:\n print(\"Error with variable {}\".format(var))\n raise\n else:\n try:\n assert_almost_equal(\n input_xarray.values, compare_xarray.values, decimal=precision\n )\n except AssertionError:\n where_diff = input_xarray.values != compare_xarray.values\n print(input_xarray.values[where_diff])\n print(compare_xarray.values[where_diff])\n raise\n _assert_attrs_equal(input_xarray, compare_xarray, precision)\n\n compare_fill_value = compare_xarray.attrs.get(\n \"_FillValue\", compare_xarray.encoding.get(\"_FillValue\")\n )\n input_fill_value = input_xarray.attrs.get(\n \"_FillValue\", input_xarray.encoding.get(\"_FillValue\")\n )\n assert_array_equal([input_fill_value], [compare_fill_value])\n assert \"grid_mapping\" in compare_xarray.attrs\n assert (\n input_xarray[input_xarray.attrs[\"grid_mapping\"]]\n == compare_xarray[compare_xarray.attrs[\"grid_mapping\"]]\n )\n for unwanted_attr in UNWANTED_RIO_ATTRS:\n assert unwanted_attr not in input_xarray.attrs\n","sub_path":"test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"319275010","text":"from Models import TreeNode\n\nclass Solution:\n\t# @param root, a tree node\n\t# @return a list of lists of integers\n\tdef zigzagLevelOrder(self, root):\n\t\tcurrent, next = [], []\n\t\tresult = []\n\t\tif root:\n\t\t\tcurrent.append(root)\n\t\t\tresult.append([])\n\t\tis_forward = True\n\t\twhile current:\n\t\t\tfor node in current:\n\t\t\t\tresult[-1].append(node.val)\n\t\t\t\tif is_forward:\n\t\t\t\t\tif node.left: next.append(node.left)\n\t\t\t\t\tif node.right: next.append(node.right)\n\t\t\t\telse:\n\t\t\t\t\tif node.right: next.append(node.right)\n\t\t\t\t\tif node.left: next.append(node.left)\n\t\t\tif next:\n\t\t\t\tresult.append([])\n\t\t\tnext, current = [], next\n\t\t\tis_forward = not is_forward\n\t\treturn result","sub_path":"leetcode_python/103_Binary_Tree_Zigzag_Level_Order_Traversal.py","file_name":"103_Binary_Tree_Zigzag_Level_Order_Traversal.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"23487294","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom shoppingcart import ShoppingCart\nfrom forms import CartAddProductForm\nfrom shop.models import Product\n\n# Create your views here.\ndef shoppingcart_list(request):\n\t\"\"\"\n\tVista para el fichero list.html que muestra los contenidos el carrito de la compra\n\tAuthor: Carlos Li\n\t\"\"\"\n\tform = CartAddProductForm()\n\t_shoppingcart = ShoppingCart(request)\n\treturn render(request,'shoppingcart/list.html',\n\t\t{'shoppingcart': _shoppingcart,\n\t\t'form' : form})\n\ndef shoppingcart_add(request,product_id):\n\t\"\"\"\n\tvista para procesar un formulario para meter un producto en el carrito de la compra\n\tAuthor: Javier Gomez\n\t\"\"\"\n\ttry:\n\t\tproduct = Product.objects.get(id=product_id)\n\texcept ObjectDoesNotExist:\n\t\treturn redirect('product_list')\n\n\tshoppingcart=ShoppingCart(request)\n\tform = CartAddProductForm()\n\tif request.method == 'POST':\n\t\tform = CartAddProductForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tunits = form.cleaned_data['units']\n\t\t\tupdate = form.cleaned_data['update']\n\t\telse: \n\t\t\treturn redirect('product_list')\n\telse:\n\t\treturn redirect('product_list')\n\n\ttry:\n\t\tshoppingcart.addProduct(product=product,\n\t\t\tunits=units,\n\t\t\tupdate_units=update)\n\texcept:\n\t\treturn render(request, \"shop/error.html\", {'error' : \"not enough stock left for:\", 'products' : [product], 'category': None})\n\t\t\n\treturn redirect('shoppingcart_list')\n\ndef shoppingcart_remove(request,product_id):\n\t\"\"\"\n\tVista para procesar un formulario para eliminar un producto del carrito de la compra\n\tAuthor: Carlos Li\n\t\"\"\"\n\ttry:\n\t\tproduct = Product.objects.get(id=int(product_id))\n\texcept ObjectDoesNotExist:\n\t\treturn redirect('product_list')\n\tshoppingcart=ShoppingCart(request)\n\tshoppingcart.removeProduct(product)\n\n\treturn redirect('shoppingcart_list')\t","sub_path":"psi_4/onlineshop/shoppingcart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"574776749","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 18 18:52:10 2019\n\n@author: Jayanth\n\"\"\"\n\n# This script defines a GRU-RNN to map the cepstral components of the signal\n\n# This import makes Python use 'print' as in Python 3.x\n\nfrom __future__ import print_function\n# importing the required module \nimport matplotlib.pyplot as plt \nimport os\nimport h5py\nimport numpy as np\nfrom keras.layers import Dense, Dropout, GRU, CuDNNGRU,CuDNNLSTM\nfrom keras.layers.wrappers import TimeDistributed\nfrom keras.models import Sequential\nfrom keras.optimizers import RMSprop,SGD,Adam\nfrom tfglib import construct_table as ct, utils\n\n#######################\n# Sizes and constants #\n#######################\n# Batch shape\nbatch_size = 8\ntsteps = 50\ndata_dim = 40\n\n# Other constants\nepochs = 100\n# epochs = 25\n\n#############\n# Load data #\n#############\n# Switch to decide if datatable must be build or can be loaded from a file\nbuild_datatable = False\n\nprint('Starting...')\n\na=np.loadtxt('SF1_mfcc.txt')\ne=int(len(a)/40)\ns1_train_data=np.reshape(a,(e,40))\nb=np.loadtxt('TM1_mfcc.txt')\ne=int(len(b)/40)\nt1_train_data=np.reshape(b,(e,40))\nc=np.loadtxt('SF1_val_mfcc.txt')\ne=int(len(c)/40)\ns1_valid_data=np.reshape(c,(e,40))\nd=np.loadtxt('TM1_val_mfcc.txt')\ne=int(len(d)/40)\nt1_valid_data=np.reshape(d,(e,40))\ng=np.loadtxt('SF1_test_mfcc.txt')\ne=int(len(g)/40)\ns1_test_data=np.reshape(g,(e,40))\nh=np.loadtxt('TM1_test_mfcc.txt')\ne=int(len(h)/40)\nt1_test_data=np.reshape(h,(e,40))\n\ns1_train_data=s1_train_data[0:204000,:]\nt1_train_data=t1_train_data[0:204000,:]\ns1_valid_data=s1_valid_data[0:20400,:]\nt1_valid_data=t1_valid_data[0:20400,:]\n\n\n################\n# Prepare data #\n################\n# Take MCP parameter columns\nsrc_train_data = s1_train_data # Source data\ntrg_train_data = t1_train_data # Target data\n\nsrc_valid_data = s1_valid_data # Source data\ntrg_valid_data = t1_valid_data # Target data\n\nsrc_test_data = s1_test_data # Source data\ntrg_test_data = t1_test_data # Target data\n\n\n\n# Remove means and normalize\nsrc_train_mean = np.mean(src_train_data, axis=0) #axis0 =column\nsrc_train_std = np.std(src_train_data, axis=0)\n\nsrc_train_data = (src_train_data - src_train_mean) / src_train_std\nsrc_valid_data = (src_valid_data - src_train_mean) / src_train_std\nsrc_test_data = (src_test_data - src_train_mean) / src_train_std\n\ntrg_train_mean = np.mean(trg_train_data, axis=0)\ntrg_train_std = np.std(trg_train_data, axis=0)\n\ntrg_train_data = (trg_train_data - trg_train_mean) / trg_train_std\ntrg_valid_data = (trg_valid_data - trg_train_mean) / trg_train_std\n\n# Zero-pad and reshape data\nsrc_train_data = utils.reshape_lstm1(src_train_data, tsteps, data_dim)\nsrc_valid_data = utils.reshape_lstm1(src_valid_data, tsteps, data_dim)\nsrc_test_data = utils.reshape_lstm1(src_test_data, tsteps, data_dim)\n\ntrg_train_data = utils.reshape_lstm1(trg_train_data, tsteps, data_dim)\ntrg_valid_data = utils.reshape_lstm1(trg_valid_data, tsteps, data_dim)\n\n# Save training statistics\nwith h5py.File('Intermediate_results/mcp_train_stats.h5', 'w') as f:\n h5_src_train_mean = f.create_dataset(\"src_train_mean\", data=src_train_mean)\n h5_src_train_std = f.create_dataset(\"src_train_std\", data=src_train_std)\n h5_trg_train_mean = f.create_dataset(\"trg_train_mean\", data=trg_train_mean)\n h5_trg_train_std = f.create_dataset(\"trg_train_std\", data=trg_train_std)\n\n f.close()\n \n \n\n################\n# Define Model #\n################\n# Define an GRU-based RNN\nprint('Creating Model')\n'''model = Sequential()\n\nmodel.add(CuDNNGRU(units=70,\n batch_input_shape=(batch_size, tsteps, data_dim),\n return_sequences=True,\n stateful=True))\nmodel.add(Dropout(0.5))\nmodel.add(TimeDistributed(Dense(data_dim)))\n\nrmsprop = RMSprop(lr=0.0001)\nmodel.compile(loss='mse', optimizer=rmsprop, metrics=['accuracy'])'''\nmodel = Sequential()\nmodel.add(CuDNNGRU(units=70,\n batch_input_shape=(batch_size, tsteps, data_dim),\n return_sequences=True,\n stateful=True))\nmodel.add(Dropout(0.1))\n\nmodel.add(TimeDistributed(Dense(data_dim)))\n\nadam = Adam(lr=0.001)\nmodel.compile(loss='mse', optimizer=adam, metrics=['accuracy'])\n\n\n###############\n# Train model #\n###############\nprint('Training')\nepoch = list(range(epochs))\nloss = []\nval_loss = []\n\nfor i in range(epochs):\n print('Epoch', i, '/', epochs)\n history = model.fit(src_train_data,\n trg_train_data,\n batch_size=batch_size,\n verbose=1,\n epochs=1,\n shuffle=False,\n validation_data=(src_valid_data, trg_valid_data))\n\n loss.append(history.history['loss'])\n val_loss.append(history.history['val_loss'])\n\n model.reset_states()\n\nprint('Saving model')\nmodel.save_weights('Intermediate_results/mcp_weights.h5')\n\nwith open('Intermediate_results/mcp_model.json', 'w') as model_json:\n model_json.write(model.to_json())\n\nprint('Saving training results')\nwith h5py.File(os.path.join('training_results', 'baseline', 'mcp_history.h5'),\n 'w') as hist_file:\n hist_file.create_dataset('loss', data=loss,\n compression='gzip', compression_opts=9)\n hist_file.create_dataset('val_loss', data=val_loss,\n compression='gzip', compression_opts=9)\n hist_file.create_dataset('epoch', data=epoch, compression='gzip',\n compression_opts=9)\n\n hist_file.close()\n\nprint('========================' + '\\n' +\n '======= FINISHED =======' + '\\n' +\n '========================')\n\n\n \n# x axis values \nx = epoch\n# corresponding y axis values \ny = loss \n \n# plotting the points \nplt.plot(x, y) \n \n# naming the x axis \nplt.xlabel('x - axis') \n# naming the y axis \nplt.ylabel('y - axis') \n \n# giving a title to my graph \nplt.title('My first graph!') \n \n# function to show the plot \nplt.show() \n\n#exit()\n\n","sub_path":"Voice Conversion 2/SF1_TM1_MCP_training_1.py","file_name":"SF1_TM1_MCP_training_1.py","file_ext":"py","file_size_in_byte":5872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"120675838","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls.defaults import patterns, url\n\nfrom views import *\n\n\nurlpatterns = patterns('',\n url(r'^$', IndexView.as_view(),\n name=\"index\"),\n url(r'^(?P\\d+)/$', AssociationDetailView.as_view(), name=\"item\"),\n url(r'^(?P\\d+)/publications/$', PublicationsView.as_view(), name='publications'),\n)\n","sub_path":"apps/associations/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"296186283","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport plotly.graph_objs as go\nimport statsmodels.api as sm\nfrom statsmodels.tsa.stattools import acf, pacf\nimport pandas as pd\n# from plotly.offline import plot, iplot, init_notebook_mode\n# init_notebook_mode(connected=True)\n\n\n@pd.api.extensions.register_dataframe_accessor(\"timeseriesplots\")\nclass timeseriesplots:\n \n def __init__(self, pandas_obj):\n self._obj = pandas_obj\n \n###############################################################################\n##FUNCTION: Gives a popup dialog box to select csv file\n##INPUT: None\n##OUTPUT: Dataframe selected by user\n###############################################################################\n def plotAcfPacf(self,dep_var,n_lags=1):\n \"\"\"\n This function calculate autocorrelation and partial autocorrelation \n for the given time series and plot them against number of lags. These \n graphs can be used to get values for autoregression(p) by partial \n autocorrelation plot and moving average(q) by autocorrelation plot. \n By counting the number of lags upto which correlation value is greater \n than standard error range. The differencing (diff) can help in \n understanding heteroscedasticity(d)\n\n Parameters\n ----------\n dep_var : 'str', required\n column name of the series.\n n_lags : 'int', optional (Default=1)\n maximum number of desired lags.\n\n Usage\n ------\n >>> acfPacf = df.timeseriesplots.plotAcfPacf('value', n_lags = 40)\n >>> df['valueOneLevel'] = df['value'].diff()\n >>> acfPacfdiff = df.timeseriesplots.plotAcfPacf('valueOneLevel', n_lags = 40)\n >>>\n\n \"\"\"\n df = self._obj\n acf_values = acf(df[dep_var], n_lags=n_lags)\n trace_acf = go.Bar(x=np.array(range(n_lags+1)), y=acf_values)\n data = [trace_acf]\n layout = {\n 'title': \"ACF Plot\"\n }\n acf_fig = {'data': data, 'layout': layout}\n\n pacf_values = pacf(df[dep_var], n_lags=n_lags)\n trace_pacf = go.Bar(x=np.array(range(n_lags+1)), y=pacf_values)\n data = [trace_pacf]\n layout = {\n 'title': \"PACF Plot\"\n }\n pacf_fig = {'data': data, 'layout': layout}\n\n return acf_fig, pacf_fig\n\n \"\"\"\n Return\n ------\n Returns the ACF and PACF plots of the dependent variable\n \"\"\"\n \n \n \n def stl(self, dep_var, model='additive', filt=None, freq=None, two_sided=True, extrapolate_trend=0):\n \"\"\"This function decompose and plot series into trend, seasonality and\n irregularity.\n\n Parameters:\n -----------\n dep_var : 'str', required\n The column name of the dependent variable\n model : 'str' {\"additive\", \"multiplicative\"}, optional (Default=additive)\n Type of seasonal component. Abbreviations are accepted.\n filt : 'array', optional (Default=None)\n The filter coefficients for filtering out the seasonal component.\n The concrete moving average method used in filtering is determined by two_sided.\n freq : 'int', optional (Default=None)\n Frequency of the series. Must be used if x is not a pandas object.\n Overrides default periodicity of x if x is a pandas\n object with a timeseries index.\n two_sided : 'bool', optional (Default=True)\n The moving average method used in filtering.\n If True (default), a centered moving average is computed using the filt.\n If False, the filter coefficients are for past values only.\n extrapolate_trend : 'int' or 'freq', optional (Default=0)\n If set to > 0, the trend resulting from the convolution is\n linear least-squares extrapolated on both ends (or the single one\n if two_sided is False) considering this many (+1) closest points.\n If set to 'freq', use `freq` closest points. Setting this parameter\n results in no NaN values in trend or resid components.\n\n Usage\n ------\n >>> trend,seasonality,irregularity = df.timeseriesplots.stl(['value'],model='additive',filt=None,freq=None,two_sided=True, extrapolate_trend=0)\n >>> \n \"\"\"\n df = self._obj\n # decomposing data into trend, sesonal and irregular component\n stlDecompose = sm.tsa.seasonal_decompose(df[dep_var], model=model, filt=filt, freq=freq, two_sided=two_sided, extrapolate_trend=extrapolate_trend)\n return stlDecompose.trend, stlDecompose.seasonal, stlDecompose.resid\n\n \"\"\"\n Return\n ------\n Returns the decomposition series for the trend, seasonality and residue component\n \"\"\"\n\n \n \n \n def stlFigure(self, trend, seasonal, resid):\n \"\"\"\n This function plot trend, seasonality and irregularity\n\n Parameters:\n -----------\n trend : pandas series, required\n trend series.\n seasonal : pandas series, required\n seasonal series.\n resid : pandas series, required\n Residual/ noise/ Irregularity series.\n \n Usage\n ------\n >>> trendFigure, seasonalityFigure, irregularityFigure = df.timeseriesplots.stlFigure(trend,seasonality,irregularity)\n >>> \n \n \"\"\"\n # setting diffrent component of data into figure\n data = [go.Scatter(x=trend.index,y=trend)]\n layout = go.Layout(\n title=\"Trend\",\n yaxis=dict(\n title=\"Trend\"\n ),\n xaxis=dict(\n title=\"Timeline\"\n )\n )\n trendPlot = go.Figure(data=data,layout=layout)\n data = [go.Scatter(x=seasonal.index,y=seasonal)]\n layout = go.Layout(\n title=\"Seasonality\",\n yaxis=dict(\n title=\"Seasonality\"\n ),\n xaxis=dict(\n title=\"Timeline\"\n )\n )\n seasonalPlot = go.Figure(data=data,layout=layout)\n data = [go.Scatter(x=resid.index,y=resid)]\n layout = go.Layout(\n title=\"Irregularity\",\n yaxis=dict(\n title=\"Irregularity\"\n ),\n xaxis=dict(\n title=\"Timeline\"\n )\n )\n irregularPlot = go.Figure(data=data,layout=layout)\n return trendPlot,seasonalPlot,irregularPlot\n\n \"\"\"\n Return\n ------\n Returns the decomposed visualization figures for the trend, seasonal and residual component\n \"\"\"\n\n # def plot(self, figure, filename = False, isNotebook = False):\n # \"\"\"\n # This function will plot, plotly figures\n # Paramenter:\n # -----------\n # self : A pandas DataFrame\n # figure : Plotly figure object (figure_objs)\n # filename : str object need to be provided to save graphs in current directory\n # isNotebook: boolean, optional. plot graph inside notebook if true\n # \"\"\"\n # config = {'showLink': False, 'displaylogo':False, 'modeBarButtonsToRemove':['sendDataToCloud']}\n # if isNotebook:\n # if filename!=False:\n # iplot(figure, filename=filename, config=config)\n # else:\n # iplot(figure, config=config)\n # else:\n # if filename!=False:\n # plot(figure, filename=filename, config=config)\n # else:\n # plot(figure, config=config)\n","sub_path":"Commodity Price Forecasting Master Codes/Aluminium/timeseriesplots.py","file_name":"timeseriesplots.py","file_ext":"py","file_size_in_byte":7777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"6137464","text":"import sys\nimport os\nfrom os import path\n\nfilename = input(\"Enter the name of the file: \")\nif not os.path.exists(filename):\n sys.exit(\"Cannot find file given\")\nfile = open(filename, \"r\")\n\n#read file line by line and print\nwhile 1:\n line = file.readline()\n print(line, end = \"\")\n if line == \"\":\n break\n","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"381049514","text":"import math\n\n\nclass Solution:\n\n @staticmethod\n def distance(item):\n return math.sqrt(pow(abs(item[0]),2) + pow(abs(item[1]), 2))\n\n def kClosest(self, points, K):\n re = []\n items = list(enumerate(points))\n # print(sorted(items,key=(lambda x:x[1][1])))\n for item in items:\n dis = self.distance(item[1])\n re.append([item, dis])\n result = sorted(re,key=(lambda x:x[1]))\n fn = []\n for i in range(K):\n fn.append(result[i][0][1])\n return fn\n\n\nif __name__ == \"__main__\":\n s = Solution()\n pointss = [[3, 3], [5, -1], [-2, 4]]\n KK = 2\n print(s.kClosest(pointss,KK))\n\n\n# 或者\n# def kClosest(self, points, K):\n# return sorted(points,key=(lambda x: (math.sqrt(pow(abs(x[0]), 2) + pow(abs(x[1]), 2)))))[:K]","sub_path":"973. 最接近原点的 K 个点/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"274464250","text":" #coding: utf-8\r\n# +-------------------------------------------------------------------\r\n# | 宝塔Linux面板\r\n# +-------------------------------------------------------------------\r\n# | Copyright (c) 2015-2016 宝塔软件(http://bt.cn) All rights reserved.\r\n# +-------------------------------------------------------------------\r\n# | Author: 曹觉心 <314866873@qq.com>\r\n# +-------------------------------------------------------------------\r\nfrom BTPanel import session,cache\r\nimport public,os,json,time,apache,iis\r\nclass ajax:\r\n def get_iis_status(self,get):\r\n return iis.iis().get_iis_status(get)\r\n \r\n def get_iis_request_list(self,get):\r\n return iis.iis().get_iis_request_list(get)\r\n\r\n #安装反向代理\r\n def setup_iis_proxy(self,get):\r\n a = iis.iis()\r\n return a.setup_iis_proxy(get)\r\n \r\n #获取反向代理安装状态\r\n def get_iis_proxy_config(self,get):\r\n a = iis.iis()\r\n return a.get_iis_proxy_config(get)\r\n \r\n #设置反向代理默认配置\r\n def set_iis_proxy_config(self,get):\r\n a = iis.iis()\r\n return a.set_iis_proxy_config(get)\r\n\r\n def set_iis_proxy_config(self,get):\r\n a = iis.iis()\r\n return a.set_iis_proxy_config(get)\r\n \r\n #mime类型\r\n def get_mimes(self,get):\r\n return iis.iis().get_mimes(get)\r\n\r\n #mime类型\r\n def add_mimes(self,get):\r\n return iis.iis().add_mimes(get)\r\n\r\n #mime类型\r\n def del_mimes(self,get):\r\n return iis.iis().del_mimes(get)\r\n\r\n def get_iis_request(self,get):\r\n try:\r\n import win32com,pythoncom\r\n try:\r\n import wmi \r\n except :\r\n \r\n os.system(public.get_run_pip('[PIP] install wmi'))\r\n import wmi \r\n \r\n pythoncom.CoInitialize()\r\n vim_obj = win32com.client.GetObject(\"winmgmts:/root/cimv2\") \r\n iiscon = vim_obj.ExecQuery('Select Name,TotalBytesReceived,TotalBytesSent,TotalBytesTransferred,TotalMethodRequests,TotalGetRequests,TotalPostRequests,CurrentConnections,MaximumConnections From Win32_PerfFormattedData_W3SVC_WebService where name=\"_Total\"')\r\n data = {}\r\n for item in iiscon:\r\n data['TotalBytesReceived'] = item.TotalBytesReceived\r\n data['TotalBytesSent'] = item.TotalBytesSent\r\n data['TotalBytesTransferred'] = item.TotalBytesTransferred\r\n data['TotalMethodRequests'] = item.TotalMethodRequests\r\n data['TotalGetRequests'] = item.TotalGetRequests\r\n data['TotalPostRequests'] = item.TotalPostRequests\r\n data['CurrentConnections'] = item.CurrentConnections\r\n data['MaximumConnections'] = item.MaximumConnections\r\n\r\n return data\r\n except :\r\n return public.returnMsg(False,\"获取IIS负载失败,请稍后重试。.\")\r\n\r\n def GetApacheStatus(self,get):\r\n a = apache.apache()\r\n return a.GetApacheStatus()\r\n\r\n #cig管理器\r\n def get_nginx_cig_admin(self,get):\r\n ini_path = os.getenv(\"BT_SETUP\") + '/nginx/config.ini'\r\n if not os.path.exists(ini_path): return public.returnMsg(False,\"配置文件不存在.\")\r\n \r\n import re\r\n data = {}\r\n conf = public.readFile(ini_path);\r\n data['php_versions'] = re.search('php_versions\\s?=(.+)',conf).groups()[0].strip();\r\n data['php_cgi_thread'] = re.search('php_cgi_thread\\s?=(.+)',conf).groups()[0].strip();\r\n \r\n return data;\r\n\r\n def set_nginx_cig_admin(self,get):\r\n ini_path = os.getenv(\"BT_SETUP\") + '/nginx/config.ini'\r\n if not os.path.exists(ini_path): \r\n return public.returnMsg(False,\"配置文件不存在.\") \r\n\r\n import re\r\n conf = public.readFile(ini_path) \r\n conf = re.sub('php_versions\\s?=.+','php_versions = ' + get.php_versions,conf);\r\n conf = re.sub('php_cgi_thread\\s?=.+','php_cgi_thread = ' + get.php_cgi_thread,conf);\r\n public.writeFile(ini_path,conf)\r\n public.serviceReload();\r\n return public.returnMsg(True,\"修改成功.\") \r\n\r\n \r\n def GetNginxStatus(self,get):\r\n #取Nginx负载状态\r\n worker = 0;\r\n workermen = 0\r\n if public.get_server_status('nginx') < 0: return public.returnMsg(False,\"获取失败,服务未启动.\") \r\n\r\n result = public.HttpGet('http://127.0.0.1/nginx_status')\r\n tmp = result.split()\r\n if len(tmp) < 8: return public.returnMsg(False,\"获取失败,可能服务未启动,通过以下方式排除错误:
1、检查80端口是否��占用
2、检查配置文件是否存在错误\") \r\n \r\n data = {}\r\n if \"request_time\" in tmp:\r\n data['accepts'] = tmp[8]\r\n data['handled'] = tmp[9]\r\n data['requests'] = tmp[10]\r\n data['Reading'] = tmp[13]\r\n data['Writing'] = tmp[15]\r\n data['Waiting'] = tmp[17]\r\n else:\r\n data['accepts'] = tmp[9]\r\n data['handled'] = tmp[7]\r\n data['requests'] = tmp[8]\r\n data['Reading'] = tmp[11]\r\n data['Writing'] = tmp[13]\r\n data['Waiting'] = tmp[15]\r\n data['active'] = tmp[2]\r\n data['worker'] = worker\r\n\r\n return data\r\n \r\n def GetPHPStatus(self,get):\r\n #取指定PHP版本的负载状态\r\n return public.returnMsg(False,\"暂不支持!\") \r\n \r\n def CheckStatusConf(self):\r\n return public.returnMsg(False,\"暂不支持!\") \r\n \r\n \r\n def GetTaskCount(self,get):\r\n #取任务数量\r\n return public.M('tasks').where(\"status!=?\",('1',)).count()\r\n \r\n def GetSoftList(self,get):\r\n #取软件列表\r\n import json,os\r\n tmp = public.readFile('data/softList.conf').replace(\"{SETUP_PATH}\",public.format_path(os.getenv(\"BT_SETUP\")));\r\n data = json.loads(tmp)\r\n for x in data['webs']:\r\n if x['name'] == 'IIS':\r\n x['versions'] = [{'status':False,'version':self.get_iis_verison()}]\r\n return data\r\n \r\n def get_iis_verison(self):\r\n import re\r\n sys_vs = public.get_sys_version() \r\n if int(sys_vs[0]) == 6:\r\n version = '7.5'\r\n if int(sys_vs[1]) >= 2: \r\n version = '8.5' \r\n status = public.get_server_status('w3svc') \r\n if status > -1:\r\n version = public.ReadReg('SOFTWARE\\\\Microsoft\\\\InetStp','SetupString')\r\n if version: \r\n version = re.search('(\\d+\\.\\d+)',version).groups()[0]\r\n\r\n elif int(sys_vs[0]) == 10:\r\n version = '10.0'\r\n return version\r\n \r\n def GetLibList(self,get):\r\n #取插件列表\r\n import json,os\r\n tmp = public.readFile('data/libList.conf');\r\n data = json.loads(tmp)\r\n for i in range(len(data)):\r\n data[i]['status'] = self.CheckLibInstall(data[i]['check']);\r\n data[i]['optstr'] = self.GetLibOpt(data[i]['status'], data[i]['opt']);\r\n return data\r\n \r\n def CheckLibInstall(self,checks):\r\n for cFile in checks:\r\n if os.path.exists(cFile): return '已安装';\r\n return '未安装';\r\n \r\n #取插件操作选项\r\n def GetLibOpt(self,status,libName):\r\n optStr = '';\r\n if status == '未安装':\r\n optStr = '安装';\r\n else:\r\n libConfig = '配置';\r\n if(libName == 'beta'): libConfig = '内测资料';\r\n \r\n optStr = ''+libConfig+' | 卸载';\r\n return optStr;\r\n \r\n #取插件AS\r\n def GetQiniuAS(self,get):\r\n filename = public.GetConfigValue('setup_path') + '/panel/data/'+get.name+'As.conf';\r\n if not os.path.exists(filename): public.writeFile(filename,'');\r\n data = {}\r\n data['AS'] = public.readFile(filename).split('|');\r\n data['info'] = self.GetLibInfo(get.name);\r\n if len(data['AS']) < 3:\r\n data['AS'] = ['','','',''];\r\n return data;\r\n\r\n\r\n #设置插件AS\r\n def SetQiniuAS(self,get):\r\n info = self.GetLibInfo(get.name);\r\n filename = public.GetConfigValue('setup_path') + '/panel/data/'+get.name+'As.conf';\r\n conf = get.access_key.strip() + '|' + get.secret_key.strip() + '|' + get.bucket_name.strip() + '|' + get.bucket_domain.strip();\r\n public.writeFile(filename,conf);\r\n public.ExecShell(\"chmod 600 \" + filename)\r\n\r\n result = public.ExecShell(public.get_run_python(\"[PYTHON] \" + public.GetConfigValue('setup_path') + \"/panel/script/backup_\"+get.name+\".py list\"))\r\n \r\n if result[0].find(\"ERROR:\") == -1: \r\n public.WriteLog(\"插件管理\", \"设置插件[\"+info['name']+\"]AS!\");\r\n return public.returnMsg(True, '设置成功!');\r\n return public.returnMsg(False, 'ERROR: 无法连接到'+info['name']+'服务器,请检查[AK/SK/存储空间]设置是否正确!');\r\n \r\n #设置内测\r\n def SetBeta(self,get):\r\n data = {}\r\n data['username'] = get.bbs_name\r\n data['qq'] = get.qq\r\n data['email'] = get.email\r\n result = public.httpPost(public.GetConfigValue('home') + '/Api/WindowsBeta',data);\r\n import json;\r\n data = json.loads(result);\r\n if data['status']:\r\n public.writeFile('data/beta.pl',get.bbs_name + '|' + get.qq + '|' + get.email);\r\n return data;\r\n #取内测资格状态\r\n def GetBetaStatus(self,get):\r\n try:\r\n return public.readFile('data/beta.pl').strip();\r\n except:\r\n return 'False';\r\n \r\n\r\n #获取指定插件信息\r\n def GetLibInfo(self,name):\r\n import json\r\n tmp = public.readFile('data/libList.conf');\r\n data = json.loads(tmp)\r\n for lib in data:\r\n if name == lib['opt']: return lib;\r\n return False;\r\n\r\n #获取文件列表\r\n def GetQiniuFileList(self,get):\r\n try:\r\n import json \r\n \r\n result = public.ExecShell(public.get_run_python(\"[PYTHON] \" + public.GetConfigValue('setup_path') + \"/panel/script/backup_\"+get.name+\".py list\"))\r\n return json.loads(result[0]);\r\n except:\r\n return public.returnMsg(False, '获取列表失败,请检查[AK/SK/存储空间]设是否正确!');\r\n\r\n \r\n \r\n #取网络连接列表\r\n def GetNetWorkList(self,get):\r\n import psutil\r\n netstats = psutil.net_connections()\r\n networkList = []\r\n for netstat in netstats:\r\n tmp = {}\r\n if netstat.type == 1:\r\n tmp['type'] = 'tcp'\r\n else:\r\n tmp['type'] = 'udp'\r\n tmp['family'] = netstat.family\r\n tmp['laddr'] = netstat.laddr\r\n tmp['raddr'] = netstat.raddr\r\n tmp['status'] = netstat.status\r\n p = psutil.Process(netstat.pid)\r\n tmp['process'] = p.name()\r\n tmp['pid'] = netstat.pid\r\n networkList.append(tmp)\r\n del(p)\r\n del(tmp)\r\n networkList = sorted(networkList, key=lambda x : x['status'], reverse=True);\r\n return networkList;\r\n \r\n #取进程列表\r\n def GetProcessList(self,get):\r\n import psutil,pwd\r\n Pids = psutil.pids();\r\n \r\n processList = []\r\n for pid in Pids:\r\n try:\r\n tmp = {}\r\n p = psutil.Process(pid);\r\n if p.exe() == \"\": continue;\r\n \r\n tmp['name'] = p.name(); #进程名称\r\n if self.GoToProcess(tmp['name']): continue;\r\n \r\n \r\n tmp['pid'] = pid; #进程标识\r\n tmp['status'] = p.status(); #进程状态\r\n tmp['user'] = p.username(); #执行用户\r\n cputimes = p.cpu_times()\r\n tmp['cpu_percent'] = p.cpu_percent(0.1);\r\n tmp['cpu_times'] = cputimes.user #进程占用的CPU时间\r\n tmp['memory_percent'] = round(p.memory_percent(),3) #进程占用的内存比例\r\n pio = p.io_counters()\r\n tmp['io_write_bytes'] = pio.write_bytes #进程总共写入字节数\r\n tmp['io_read_bytes'] = pio.read_bytes #进程总共读取字节数\r\n tmp['threads'] = p.num_threads() #进程总线程数\r\n \r\n processList.append(tmp);\r\n del(p)\r\n del(tmp)\r\n except:\r\n continue;\r\n import operator\r\n processList = sorted(processList, key=lambda x : x['memory_percent'], reverse=True);\r\n processList = sorted(processList, key=lambda x : x['cpu_times'], reverse=True);\r\n return processList\r\n \r\n #结束指定进程\r\n def KillProcess(self,get):\r\n #return public.returnMsg(False,'演示服务器,禁止此操作!');\r\n import psutil\r\n p = psutil.Process(int(get.pid));\r\n name = p.name();\r\n if name == 'python': return public.returnMsg(False,'KILL_PROCESS_ERR');\r\n \r\n p.kill();\r\n public.WriteLog('TYPE_PROCESS','KILL_PROCESS',(get.pid,name));\r\n return public.returnMsg(True,'KILL_PROCESS',(get.pid,name));\r\n \r\n def GoToProcess(self,name):\r\n ps = ['sftp-server','login','nm-dispatcher','irqbalance','qmgr','wpa_supplicant','lvmetad','auditd','master','dbus-daemon','tapdisk','sshd','init','ksoftirqd','kworker','kmpathd','kmpath_handlerd','python','kdmflush','bioset','crond','kthreadd','migration','rcu_sched','kjournald','iptables','systemd','network','dhclient','systemd-journald','NetworkManager','systemd-logind','systemd-udevd','polkitd','tuned','rsyslogd']\r\n \r\n for key in ps:\r\n if key == name: return True\r\n \r\n return False\r\n \r\n \r\n def GetNetWorkIo(self,get):\r\n #取指定时间段的网络Io\r\n data = public.M('network').dbfile('system').where(\"addtime>=? AND addtime<=?\",(get.start,get.end)).field('id,up,down,total_up,total_down,down_packets,up_packets,addtime').order('id asc').select()\r\n return self.ToAddtime(data);\r\n \r\n def GetDiskIo(self,get):\r\n #取指定时间段的磁盘Io\r\n data = public.M('diskio').dbfile('system').where(\"addtime>=? AND addtime<=?\",(get.start,get.end)).field('id,read_count,write_count,read_bytes,write_bytes,read_time,write_time,addtime').order('id asc').select()\r\n return self.ToAddtime(data);\r\n def GetCpuIo(self,get):\r\n #取指定时间段的CpuIo\r\n data = public.M('cpuio').dbfile('system').where(\"addtime>=? AND addtime<=?\",(get.start,get.end)).field('id,pro,mem,addtime').order('id asc').select()\r\n\r\n return self.ToAddtime(data,True);\r\n \r\n def get_load_average(self,get):\r\n data = public.M('load_average').dbfile('system').where(\"addtime>=? AND addtime<=?\",(get.start,get.end)).field('id,pro,one,five,fifteen,addtime').order('id asc').select()\r\n return self.ToAddtime(data);\r\n \r\n \r\n def ToAddtime(self,data,tomem = False):\r\n import time\r\n #格式化addtime列\r\n \r\n if tomem:\r\n import psutil\r\n mPre = (psutil.virtual_memory().total / 1024 / 1024) / 100\r\n length = len(data);\r\n he = 1;\r\n if length > 100: he = 1;\r\n if length > 1000: he = 3;\r\n if length > 10000: he = 15;\r\n if he == 1:\r\n for i in range(length):\r\n try:\r\n data[i]['addtime'] = time.strftime('%m/%d %H:%M',time.localtime(float(data[i]['addtime'])))\r\n if tomem and data[i]['mem'] > 100: data[i]['mem'] = data[i]['mem'] / mPre\r\n except : pass\r\n\r\n return data\r\n else:\r\n count = 0;\r\n tmp = []\r\n for value in data:\r\n if count < he: \r\n count += 1;\r\n continue;\r\n value['addtime'] = time.strftime('%m/%d %H:%M',time.localtime(float(value['addtime'])))\r\n if tomem and value['mem'] > 100: value['mem'] = value['mem'] / mPre\r\n tmp.append(value);\r\n count = 0;\r\n return tmp;\r\n \r\n def GetInstalleds(self,softlist):\r\n softs = '';\r\n for soft in softlist['data']:\r\n try:\r\n for v in soft['versions']:\r\n if v['status']: softs += soft['name'] + '-' + v['version'] + '|';\r\n except:\r\n pass\r\n return softs;\r\n \r\n #获取SSH爆破次数\r\n def get_ssh_intrusion(self):\r\n \r\n return 0;\r\n \r\n def UpdatePanel(self,get):\r\n #try: \r\n #取回远程版本信息\r\n if 'updateInfo' in session and hasattr(get,'check') == False:\r\n updateInfo = session['updateInfo'];\r\n else:\r\n login_temp = 'data/login.temp';\r\n if os.path.exists(login_temp):\r\n logs = public.readFile(login_temp)\r\n os.remove(login_temp);\r\n else:\r\n logs = '';\r\n import psutil,panelPlugin,system;\r\n mem = psutil.virtual_memory();\r\n mplugin = panelPlugin.panelPlugin();\r\n mplugin.ROWS = 10000;\r\n panelsys = system.system();\r\n data = {}\r\n data['sites'] = str(public.M('sites').count());\r\n data['ftps'] = str(public.M('ftps').count());\r\n data['databases'] = str(public.M('databases').count());\r\n\r\n data['system'] = panelsys.GetSystemVersion() + '|' + str(mem.total / 1024 / 1024) + 'MB|' + public.getCpuType() + '*' + str(psutil.cpu_count()) + '|' + public.get_webserver() + '|' +session['version'];\r\n data['system'] += '||'+self.GetInstalleds(mplugin.getPluginList(None));\r\n data['logs'] = logs\r\n data['oem'] = ''\r\n data['intrusion'] = self.get_ssh_intrusion();\r\n msg = public.getMsg('PANEL_UPDATE_MSG');\r\n sUrl = public.GetConfigValue('home') + '/api/wpanel/updateWindows';\r\n \r\n updateInfo = json.loads(public.httpPost(sUrl,data));\r\n \r\n if not updateInfo: return public.returnMsg(False,\"CONNECT_ERR\");\r\n updateInfo['msg'] = msg;\r\n session['updateInfo'] = updateInfo;\r\n\r\n #检查是否需要升级\r\n if updateInfo['is_beta'] == 1:\r\n if updateInfo['beta']['version'] == session['version']: return public.returnMsg(False,updateInfo);\r\n else:\r\n if updateInfo['version'] == session['version']: return public.returnMsg(False,updateInfo);\r\n\r\n #是否执行升��程序 \r\n if(updateInfo['force'] == True or hasattr(get,'toUpdate') == True or os.path.exists('data/autoUpdate.pl') == True):\r\n if updateInfo['is_beta'] == 1: updateInfo['version'] = updateInfo['beta']['version']\r\n setupPath = public.GetConfigValue('setup_path');\r\n uptype = 'panel';\r\n httpUrl = public.get_url();\r\n \r\n if httpUrl: updateInfo['downUrl'] = httpUrl + '/win/panel/' + uptype + '_' + updateInfo['version'] + '.zip';\r\n \r\n public.downloadFile(updateInfo['downUrl'],setupPath + '/panel.zip');\r\n if os.path.getsize(setupPath + '/panel.zip') < 1048576: return public.returnMsg(False,\"PANEL_UPDATE_ERR_DOWN\"); \r\n\r\n #处理临时文件目录 \r\n tmpPath = (setupPath + \"/temp/panel\")\r\n tcPath = (tmpPath + '\\class').replace('/','\\\\')\r\n \r\n if not os.path.exists(tmpPath): os.makedirs(tmpPath) \r\n import shutil\r\n if os.path.exists(tcPath): shutil.rmtree(tcPath)\r\n\r\n #解压到临时目录\r\n import zipfile\r\n zip_file = zipfile.ZipFile(setupPath + '/panel.zip') \r\n for names in zip_file.namelist(): \r\n try:\r\n zip_file.extract(names,tmpPath) \r\n except : pass \r\n zip_file.close()\r\n\r\n time.sleep(0.2);\r\n for name in os.listdir(tcPath): \r\n try:\r\n if name.find('cp36-win_amd64.pyd') >=0:\r\n oldName = os.path.join(tcPath,name);\r\n newName = os.path.join(tcPath,name.replace('.cp36-win_amd64.pyd','.pyd'))\r\n\r\n if not os.path.exists(newName):os.rename(oldName,newName)\r\n except :pass\r\n \r\n #过滤文件\r\n file_list = ['config/config.json','config/index.json','data/libList.conf','data/plugin.json']\r\n for ff_path in file_list:\r\n if os.path.exists(tmpPath + '/' + ff_path): os.remove(tmpPath + '/' + ff_path) \r\n\r\n os.system(\"taskkill /im BtTools.exe /f\")\r\n\r\n #兼容不同版本工具箱\r\n toolPath = tmpPath + '/script/BtTools.exe'\r\n if os.path.exists(toolPath):\r\n os.remove(toolPath)\r\n netV = ''\r\n if os.path.exists('data/net'): netV = public.readFile('data/net') \r\n public.downloadFile(httpUrl + '/win/panel/BtTools' + netV + '.exe',toolPath);\r\n\r\n #处理面板程序目录文件\r\n pPath = setupPath + '/panel' \r\n cPath = (pPath + '/class').replace('/','\\\\')\r\n os.system(\"del /s %s\\*.pyc\" % cPath)\r\n os.system(\"del /s %s\\*.pyt\" % cPath)\r\n for name in os.listdir(cPath):\r\n try:\r\n if name.find('.pyd') >=0: os.rename(os.path.join(cPath,name),os.path.join(cPath,name.replace('.pyd','.pyt'))) \r\n except : pass\r\n \r\n tmpPath = tmpPath.replace(\"/\",\"\\\\\")\r\n panelPath = (setupPath+\"/panel\").replace(\"/\",\"\\\\\")\r\n os.system(\"xcopy /s /c /e /y /r %s %s\" % (tmpPath,panelPath))\r\n \r\n session['version'] = updateInfo['version']\r\n if 'getCloudPlugin' in session: del(session['getCloudPlugin']);\r\n if updateInfo['is_beta'] == 1: self.to_beta()\r\n \r\n if os.path.exists(setupPath + '/panel.zip'):os.remove(setupPath + \"/panel.zip\")\r\n return public.returnMsg(True,'PANEL_UPDATE',(updateInfo['version'],));\r\n \r\n #输出新版本信息\r\n data = {\r\n 'status' : True,\r\n 'version': updateInfo['version'],\r\n 'updateMsg' : updateInfo['updateMsg']\r\n };\r\n \r\n return public.returnMsg(True,updateInfo);\r\n #except Exception as ex:\r\n # return public.returnMsg(False,\"更新失败 --> \" + str(ex));\r\n \r\n def to_beta(self):\r\n try:\r\n userInfo = json.loads(public.ReadFile('data/userInfo.json'))\r\n p_data = {}\r\n p_data['uid'] = userInfo['uid'];\r\n p_data['access_key'] = userInfo['access_key']\r\n p_data['username'] = userInfo['username']\r\n public.HttpPost(public.GetConfigValue('home') + '/api/wpanel/to_beta',p_data,5)\r\n except: pass\r\n\r\n #检查是否安装任何\r\n def CheckInstalled(self,get):\r\n checks = ['nginx','apache','W3SVC','FileZilla Server','mysql','MSSQLSERVER','phpmyadmin','php'];\r\n import os\r\n for name in checks:\r\n if name == 'phpmyadmin':\r\n filename = os.getenv(\"BT_SETUP\") + '/' + name\r\n if os.path.exists(filename): return True;\r\n elif name == 'php':\r\n filename = os.getenv(\"BT_SETUP\") + '/' + name\r\n if os.path.exists(filename): \r\n dirs = os.listdir(filename)\r\n if len(dirs) > 0: return True;\r\n else:\r\n status = public.get_server_status(name)\r\n if status >= 0:\r\n if name == 'W3SVC':\r\n if os.path.exists('data/iis.setup'):\r\n return True;\r\n else:\r\n return True;\r\n return False;\r\n \r\n #取已安装软件列表\r\n def GetInstalled(self,get):\r\n import system\r\n data = system.system().GetConcifInfo()\r\n return data;\r\n \r\n #取PHP配置\r\n def GetPHPConfig(self,get):\r\n import re,json\r\n \r\n filename = public.GetConfigValue('setup_path') + '/php/' + get.version + '/php.ini'\r\n if not os.path.exists(filename): return public.returnMsg(False,'PHP_NOT_EXISTS');\r\n phpini = public.readFile(filename);\r\n \r\n data = {}\r\n rep = \"disable_functions\\s*=\\s{0,1}(.*)\"\r\n tmp = re.search(rep,phpini).groups();\r\n data['disable_functions'] = tmp[0];\r\n \r\n rep = \"upload_max_filesize\\s*=\\s*([0-9]+)(M|m|K|k)\"\r\n tmp = re.search(rep,phpini).groups()\r\n data['max'] = tmp[0]\r\n \r\n rep = u\"\\n;*\\s*cgi\\.fix_pathinfo\\s*=\\s*([0-9]+)\\s*\\n\"\r\n tmp = re.search(rep,phpini).groups()\r\n \r\n if tmp[0] == '0':\r\n data['pathinfo'] = False\r\n else:\r\n data['pathinfo'] = True\r\n self.getCloudPHPExt(get)\r\n phplib = json.loads(public.readFile('data/phplib.win'));\r\n libs = [];\r\n tasks = public.M('tasks').where(\"status!=?\",('1',)).field('status,name').select()\r\n for lib in phplib:\r\n lib['task'] = '1';\r\n for task in tasks:\r\n tmp = public.getStrBetween('[',']',task['name'])\r\n if not tmp:continue;\r\n tmp1 = tmp.split('-');\r\n if tmp1[0].lower() == lib['name'].lower():\r\n lib['task'] = task['status'];\r\n lib['phpversions'] = []\r\n lib['phpversions'].append(tmp1[1])\r\n check_lib = re.search('\\n+;?\\w+.*' + lib['check'].replace('[version]',get.version) + '\\s*',phpini)\r\n lib['status'] = False\r\n if check_lib:\r\n if check_lib.group().find(\";\") < 0:\r\n lib['status'] = True \r\n \r\n libs.append(lib)\r\n \r\n data['libs'] = libs;\r\n return data\r\n \r\n #获取当前下载节点\r\n def get_download_url(self):\r\n url = cache.get('download_url')\r\n if not url:\r\n cache.set('download_url',url,1800)\r\n return url\r\n\r\n #获取PHP扩展\r\n def getCloudPHPExt(self,get):\r\n import json\r\n try:\r\n if 'php_ext' in session: return True\r\n \r\n download_url = self.get_download_url() + '/install/lib/phplib.json'\r\n tstr = public.httpGet(download_url)\r\n data = json.loads(tstr);\r\n if not data: return False;\r\n public.writeFile('data/phplib.conf',json.dumps(data));\r\n session['php_ext'] = True\r\n return True;\r\n except:\r\n return False;\r\n \r\n #取PHPINFO信息\r\n def GetPHPInfo(self,get):\r\n version = get.version\r\n ext_path = os.getenv(\"BT_SETUP\") + '/php/' + version + '/php.exe'\r\n if os.path.exists(ext_path):\r\n rRet = public.ExecShell('%s -r \"echo phpinfo();\"' % ext_path,None,None,None,True)\r\n return public.returnMsg(True,rRet[0]); \r\n return public.returnMsg(False,\"请先安装PHP\"+version); \r\n \r\n #检测PHPINFO配置\r\n def CheckPHPINFO(self):\r\n php_versions = ['52','53','54','55','56','70','71','72','73','74','75'];\r\n return public.returnMsg(False,\"暂不支持!\") \r\n \r\n \r\n #清理日志\r\n def delClose(self,get):\r\n public.M('logs').where('id>?',(0,)).delete();\r\n public.WriteLog('TYPE_CONFIG','LOG_CLOSE');\r\n return public.returnMsg(True,'LOG_CLOSE');\r\n \r\n #设置PHPMyAdmin\r\n def setPHPMyAdmin(self,get):\r\n if hasattr(get,'phpversion'):\r\n version = get.phpversion\r\n phpmyadmin_version = public.readFile(os.getenv(\"BT_SETUP\") + '/phpmyadmin/version.pl')\r\n if phpmyadmin_version == '4.0' and (version != '53' and version != '54'):\r\n return public.returnMsg(False,'phpmyadmin 4.0经支持php5.3/5.4'); \r\n \r\n import panelSite\r\n get.siteName = 'phpmyadmin'\r\n get.version = version\r\n panelSite.panelSite().SetPHPVersion(get)\r\n\r\n return public.returnMsg(True,'修改phpmyadmin的php版本为[php-' + version + ']成功');\r\n elif hasattr(get,'port'): \r\n import panelSite\r\n panel_site = panelSite.panelSite()\r\n\r\n get.siteName = 'phpmyadmin'\n try:\r\n phpVersion = panel_site.GetSitePHPVersion(get)['phpversion']\r\n except :\r\n phpVersion = '00'\n \r\n port = get.port\r\n \r\n mainPort = public.readFile('data/port.pl').strip();\r\n rulePort = ['80','443','21','20','8080','8081','8089','11211','6379',mainPort]\r\n if port in rulePort:\r\n return public.returnMsg(False,'AJAX_PHPMYADMIN_PORT_ERR');\r\n\r\n #先删除,后添加\r\n webserver = public.GetWebServer() \r\n if webserver == 'iis':\r\n _appcmd = os.getenv(\"SYSTEMDRIVE\") + '\\\\Windows\\\\System32\\\\inetsrv\\\\appcmd.exe'\r\n public.ExecShell(_appcmd + ' delete site \"phpmyadmin\"')\r\n public.ExecShell(_appcmd + ' delete apppool \"phpmyadmin\"')\r\n elif webserver == 'apache': \r\n confPath = panel_site.get_conf_path(get.siteName)\r\n if os.path.exists(confPath): os.remove(confPath)\r\n \r\n path = os.getenv(\"BT_SETUP\") + '\\\\phpmyadmin\\\\' + public.readFile('data/phpmyadminDirName.pl')\r\n \r\n siteObj = { 'siteName' : 'phpmyadmin' ,'siteDomain': '' ,'sitePort': port,'sitePath':path ,'phpVersion':phpVersion,'type':'PHP' }\r\n if webserver == 'iis': \r\n result = panel_site.iisAdd(siteObj)\r\n elif webserver == 'apache':\r\n result = panel_site.apacheAdd(siteObj)\r\n print(result)\r\n #放行端口\r\n __version = public.get_sys_version() \r\n ps = \"phpmyadmin端口\"\r\n if public.M('firewall').where(\"port=?\",(port,)).count() <= 0:\r\n shell = 'netsh firewall set portopening tcp '+ port.replace(':','-') +' '+ ps\r\n if int(__version[0]) == 6: \r\n shell = 'netsh advfirewall firewall add rule name='+ ps +' dir=in action=allow protocol=tcp localport=' + port.replace(':','-')\r\n result = public.ExecShell(shell);\r\n public.WriteLog(\"TYPE_FIREWALL\", 'FIREWALL_ACCEPT_PORT',(port,))\r\n addtime = time.strftime('%Y-%m-%d %X',time.localtime())\r\n public.M('firewall').add('port,ps,addtime',(port,ps,addtime))\r\n\r\n return public.returnMsg(True,'修改phpmyadmin的端口为[' + port + ']');\r\n elif hasattr(get,'status'):\r\n pass\r\n return public.returnMsg(False,\"暂不支持!\") \r\n\r\n def ToPunycode(self,get):\r\n import re;\r\n get.domain = get.domain.encode('utf8');\r\n tmp = get.domain.split('.');\r\n newdomain = '';\r\n for dkey in tmp:\r\n #匹配非ascii字符\r\n match = re.search(u\"[\\x80-\\xff]+\",dkey);\r\n if not match:\r\n newdomain += dkey + '.';\r\n else:\r\n newdomain += 'xn--' + dkey.decode('utf-8').encode('punycode') + '.'\r\n\r\n return newdomain[0:-1];\r\n \r\n #保存PHP排序\r\n def phpSort(self,get):\r\n if public.writeFile(os.getenv(\"BT_SETUP\") + '/php/sort.pl',get.ssort): return public.returnMsg(True,'SUCCESS');\r\n return public.returnMsg(False,'ERROR');\r\n \r\n #获取广告代码\r\n def GetAd(self,get):\r\n try:\r\n return public.HttpGet(public.GetConfigValue('home') + '/Api/GetAD?name='+get.name + '&soc=' + get.soc);\r\n except:\r\n return '';\r\n \r\n #获取进度\r\n def GetSpeed(self,get):\r\n return public.getSpeed();\r\n \r\n #检查登陆状态\r\n def CheckLogin(self,get):\r\n return True;\r\n \r\n #获取警告标识\r\n def GetWarning(self,get):\r\n warningFile = 'data/warning.json'\r\n if not os.path.exists(warningFile): return public.returnMsg(False,'警告列表不存在!');\r\n import json,time;\r\n wlist = json.loads(public.readFile(warningFile));\r\n wlist['time'] = int(time.time());\r\n return wlist;\r\n \r\n #设置警告标识\r\n def SetWarning(self,get):\r\n wlist = self.GetWarning(get);\r\n id = int(get.id);\r\n import time,json;\r\n for i in xrange(len(wlist['data'])):\r\n if wlist['data'][i]['id'] == id:\r\n wlist['data'][i]['ignore_count'] += 1;\r\n wlist['data'][i]['ignore_time'] = int(time.time());\r\n \r\n warningFile = 'data/warning.json'\r\n public.writeFile(warningFile,json.dumps(wlist));\r\n return public.returnMsg(True,'SET_SUCCESS');\r\n\r\n #获取memcached���态\r\n def GetMemcachedStatus(self,get):\r\n import telnetlib,re;\r\n config_path = public.GetConfigValue('setup_path') + '/memcached/config.json';\r\n conf = public.readFile(config_path);\r\n conf = json.loads(conf)\r\n\r\n bind = '127.0.0.1'\r\n array1 = conf['bind'].split(' ')\r\n if len(array1) >1: bind = array1[1]\r\n\r\n if public.get_server_status('memcached') <= 0: return public.returnMsg(False,\"获取失败,服务未启动.\") \r\n\r\n port = 11211\r\n array2 = conf['port'].split(' ')\r\n if len(array1) >1: port = int(array2[1])\r\n\r\n tn = telnetlib.Telnet(bind,port);\r\n tn.write(b\"stats\\n\");\r\n tn.write(b\"quit\\n\");\r\n data = tn.read_all();\r\n if type(data) == bytes: data = data.decode('utf-8')\r\n\r\n data = data.replace('STAT','').replace('END','').split(\"\\n\");\r\n result = {}\r\n res = ['cmd_get','get_hits','get_misses','limit_maxbytes','curr_items','bytes','evictions','limit_maxbytes','bytes_written','bytes_read','curr_connections'];\r\n for d in data:\r\n if len(d)<3: continue;\r\n t = d.split();\r\n if not t[0] in res: continue;\r\n result[t[0]] = int(t[1]);\r\n result['hit'] = 1;\r\n if result['get_hits'] > 0 and result['cmd_get'] > 0:\r\n result['hit'] = float(result['get_hits']) / float(result['cmd_get']) * 100;\r\n \r\n conf = public.readFile(public.GetConfigValue('setup_path') + '/memcached/config.json');\r\n conf = json.loads(conf)\r\n for x in conf:\r\n result[x] = conf[x].split(' ')[1]\r\n return result;\r\n \r\n #设置memcached缓存大小\r\n def SetMemcachedCache(self,get):\r\n try:\r\n config_path = public.GetConfigValue('setup_path') + '/memcached/config.json';\r\n conf = public.readFile(config_path);\r\n conf = json.loads(conf)\r\n getdict = get.__dict__\n for i in getdict.keys():\r\n if i != \"__module__\" and i != \"__doc__\" and i != \"data\" and i != \"args\" and i != \"action\" and i != \"s\" and i != \"name\":\r\n old_val = conf[i].split(' ')\r\n if len(old_val) >1:\r\n conf[i] = old_val[0] + ' ' + getdict[i]\r\n public.writeFile(config_path,json.dumps(conf))\r\n \r\n public.set_server_status('memcached','stop') \r\n args = ''\r\n for x in conf:\r\n args += ' ' + conf[x]\r\n print( '%s -d runservice%s' % ('\"' + public.GetConfigValue('setup_path')+'/memcached/memcached.exe\"',args))\r\n public.WriteReg(r'SYSTEM\\CurrentControlSet\\services\\memcached','ImagePath', '%s -d runservice%s' % ('\"' + public.GetConfigValue('setup_path')+'/memcached/memcached.exe\"',args))\r\n public.set_server_status('memcached','start')\r\n\r\n return public.returnMsg(True,\"修改成功!\") \r\n except :\r\n return public.returnMsg(False,\"修改失败!\") \r\n \r\n #申请内测版\r\n def apple_beta(self,get):\r\n try:\r\n userInfo = json.loads(public.ReadFile('data/userInfo.json'))\r\n p_data = {}\r\n p_data['uid'] = userInfo['uid'];\r\n p_data['access_key'] = userInfo['access_key']\r\n p_data['username'] = userInfo['username']\r\n result = public.HttpPost(public.GetConfigValue('home') + '/api/wpanel/apple_beta',p_data,5)\r\n try:\r\n return json.loads(result)\r\n except: return public.returnMsg(False,'AJAX_CONN_ERR')\r\n except: return public.returnMsg(False,'AJAX_USER_BINDING_ERR')\r\n\r\n #切回正式版\r\n def to_not_beta(self,get):\r\n try:\r\n userInfo = json.loads(public.ReadFile('data/userInfo.json'))\r\n p_data = {}\r\n p_data['uid'] = userInfo['uid'];\r\n p_data['access_key'] = userInfo['access_key']\r\n p_data['username'] = userInfo['username']\r\n result = public.HttpPost(public.GetConfigValue('home') + '/api/wpanel/to_not_beta',p_data,5)\r\n try:\r\n return json.loads(result)\r\n except: return public.returnMsg(False,'AJAX_CONN_ERR')\r\n except: return public.returnMsg(False,'AJAX_USER_BINDING_ERR')\r\n\r\n #升级测试版\r\n def to_beta(self):\r\n try:\r\n userInfo = json.loads(public.ReadFile('data/userInfo.json'))\r\n p_data = {}\r\n p_data['uid'] = userInfo['uid'];\r\n p_data['access_key'] = userInfo['access_key']\r\n p_data['username'] = userInfo['username']\r\n public.HttpPost(public.GetConfigValue('home') + '/api/wpanel/to_beta',p_data,5)\r\n except: pass\r\n\r\n #获取最新的5条测试版更新日志\r\n def get_beta_logs(self,get):\r\n try:\r\n data = json.loads(public.HttpGet(public.GetConfigValue('home') + '/api/wpanel/get_beta_logs'))\r\n return data\r\n except:\r\n return public.returnMsg(False,'AJAX_CONN_ERR')\r\n\r\n #取PHP-FPM日志\r\n def GetFpmLogs(self,get):\r\n return public.returnMsg(False,\"暂不支持!\") \r\n \r\n #取PHP慢日志\r\n def GetFpmSlowLogs(self,get):\r\n return public.returnMsg(False,\"暂不支持!\") \r\n \r\n #取指定日志\r\n def GetOpeLogs(self,get):\r\n if not os.path.exists(get.path): return public.returnMsg(False,'日志文件不存在!');\r\n return public.returnMsg(True,public.GetNumLines(get.path,1000));\r\n \r\n \r\n \r\n ","sub_path":"mtVIvqMhycn4w0r5/9667fd14520865229ae1dae2cce3020080e48f0b.py","file_name":"9667fd14520865229ae1dae2cce3020080e48f0b.py","file_ext":"py","file_size_in_byte":39392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"249515940","text":"import matplotlib.pyplot as plt\nimport os.path\nfrom potentiostat import Potentiostat\n\nport = '/dev/ttyACM0' \ndatafile = 'linear_sweep_data.txt'\n\ntest_name = 'linearSweep' \ncurr_range = '100uA' \nsample_rate = 200.0 \n\ntest_param = { \n 'quietTime' : 2000, \n 'quietValue' : 0.0, \n 'startValue' : -0.8, \n 'finalValue' : 1.2, \n 'duration' : 8000, \n }\n\ndev = Potentiostat(port) \ndev.set_curr_range(curr_range) \ndev.set_sample_rate(sample_rate)\ndev.set_param(test_name,test_param)\n\nt,volt,curr = dev.run_test(test_name,display='pbar',filename=datafile)\n\nplt.figure(1)\nplt.subplot(211)\nplt.plot(t,volt)\nplt.ylabel('potential (V)')\nplt.grid('on')\n\nplt.subplot(212)\nplt.plot(t,curr)\nplt.ylabel('current (uA)')\nplt.xlabel('time (sec)')\nplt.grid('on')\n\nplt.figure(2)\nplt.plot(volt,curr)\nplt.xlabel('potential (V)')\nplt.ylabel('current (uA)')\nplt.grid('on')\n\nplt.show()\n\n","sub_path":"software/python/potentiostat/docs/_figure_scripts/linear_sweep_fig/get_linear_sweep_data.py","file_name":"get_linear_sweep_data.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"46792767","text":"\"\"\"\nCopyright 2013 Rackspace\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport random\nimport string\nimport time\nfrom math import pow\n\nfrom cloudcafe.common.tools import randomstring\n\nSOURCE_RANDOM = '/dev/urandom'\nSOURCE_ZEROS = '/dev/zero'\nTEMP_LOCATION = '/tmp'\n\n#Binary prefixes\n#IEE_MAGNITUDE = int(pow(2,10))\nEXACT_BYTE = 8\nEXACT_KIBIBYTE = int(pow(2, 10))\nEXACT_MEBIBYTE = int(pow(2, 20))\nEXACT_GIBIBYTE = int(pow(2, 30))\nEXACT_TEBIBYTE = int(pow(2, 40))\n\n#Decimal prefixes\n#SI_MAGNITURE = int(pow(10,3))\n\nEXACT_KILOBYTE = int(pow(10, 3))\nEXACT_MEGABYTE = int(pow(10, 6))\nEXACT_GIGABYTE = int(pow(10, 9))\nEXACT_TERABYTE = int(pow(10, 12))\n\n\ndef timestamp_string(prefix=None, suffix=None, decimal_precision=6):\n '''\n Return a unix timestamp surrounded by any defined prefixes and suffixes\n Decimal precision is full (6) by default.\n '''\n t = str('%f' % time.time())\n int_seconds, dec_seconds = t.split('.')\n for x in range(6 - decimal_precision):\n dec_seconds = dec_seconds[:-1]\n\n int_seconds = str(int_seconds)\n dec_seconds = str(dec_seconds)\n prefix = prefix or ''\n suffix = suffix or ''\n final = None\n if len(dec_seconds) > 0:\n final = '%s%s%s' % (prefix, int_seconds, suffix)\n else:\n final = '%s%s.%s%s' % (prefix, int_seconds, dec_seconds, suffix)\n\n return final\n\n\ndef random_string(prefix=None, suffix=None, size=8):\n \"\"\"\n Return a random string of alphanumeric characaters of 'size' length.\n \"\"\"\n if size <= 0:\n return \"{0}{1}\".format(prefix or '', suffix or '')\n\n charpool = tuple(string.ascii_letters + string.digits)\n final_string = \"\"\n while size > 0:\n segment_size = min(int(len(charpool)/2), size)\n size = size - segment_size\n final_string += \"\".join(\n random.sample((charpool), segment_size))\n return \"{0}{1}{2}\".format(prefix or '', final_string, suffix or '')\n\n\ndef random_ip(pattern=None):\n \"\"\"\n Takes a pattern as a string in the format of #.#.#.# where a # is an\n integer, and a can be substituded with an * to produce a random octet.\n pattern = 127.0.0.* would return a random string between 127.0.0.1 and\n 127.0.0.254\n \"\"\"\n if pattern is None:\n pattern = '*.*.*.*'\n num_asterisks = 0\n for c in pattern:\n if c == '*':\n num_asterisks += 1\n rand_list = [random.randint(1, 255) for i in range(0, num_asterisks)]\n for item in rand_list:\n pattern = pattern.replace('*', str(item), 1)\n return pattern\n\n\ndef random_cidr(ip_pattern=None, mask=None, min_mask=0, max_mask=30):\n \"\"\"\n Gets a random cidr using the random_ip function in this module. If mask\n is None then a random mask between 0 and 30 inclusive will be assigned.\n \"\"\"\n if mask is None:\n mask = random.randint(min_mask, max_mask)\n ip = random_ip(ip_pattern)\n return ''.join([ip, '/', str(mask)])\n\n\ndef random_int(min_int, max_int):\n return random.randint(min_int, max_int)\n\n\ndef rand_name(name='test'):\n return \"{name}{suffix}\".format(\n name=name, suffix=randomstring.get_random_string())\n\n\ndef random_item_in_list(selection_list):\n return random.choice(selection_list)\n\n\ndef bytes_to_gb(val):\n return float(val) / 1073741824\n\n\ndef gb_to_bytes(val):\n return int(val * 1073741824)\n\n\ndef bytes_to_mb(val):\n return float(val) / 1024\n","sub_path":"cloudcafe/common/tools/datagen.py","file_name":"datagen.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"328745015","text":"import numpy as np\nimport sys\nimport math\nimport random\nfrom random import randint\n\nrow_count = 10\ncol_count = 10\n\n# Board where code happens\nboard = []\nfor x in range(10):\n board.append([\"0\"] * 10)\n\n\ndef print_board(board):\n for row in board:\n print(\" \".join(row))\n\n\n# Board for visuals\nvisualBoard = []\nfor x in range(10):\n visualBoard.append([\"0\"] * 10)\n\n\ndef print_visualBoard(visualBoard):\n for row in visualBoard:\n print(\" \".join(row))\n\n\nshipCoordinates = None\n\n\ndef createRandomShips(board, ship, ship2, ship3):\n # getting random point and direction\n rand_row = randint(1, row_count - 2)\n rand_col = randint(1, col_count - 2)\n board[rand_row][rand_col] = ship\n if rand_row == 1:\n direction = 2\n elif rand_row == 8:\n direction = 1\n elif rand_col == 1:\n direction = 4\n elif rand_col == 8:\n direction = 3\n else:\n direction = random.randrange(1, 4)\n\n # for at the ends of rows w/ directions\n if rand_row >= 8 and direction == 2:\n extensionNumber = 2\n elif rand_row == 7 and direction == 2:\n extensionNumber = 3\n elif rand_row == 6 and direction == 2:\n extensionNumber = random.randrange(3, 4)\n elif rand_row == 3 and direction == 1:\n extensionNumber = random.randrange(2, 4)\n elif rand_row == 2 and direction == 1:\n extensionNumber = random.randrange(2, 3)\n elif rand_row == 1 and direction == 1:\n extensionNumber = 2\n # for at the ends of columns w/ directions\n elif rand_col >= 8 and direction == 4:\n extensionNumber = 2\n elif rand_col == 7 and direction == 4:\n extensionNumber = 3\n elif rand_col == 6 and direction == 4:\n extensionNumber = random.randrange(3, 4)\n elif rand_col == 3 and direction == 3:\n extensionNumber = random.randrange(2, 4)\n elif rand_col == 2 and direction == 3:\n extensionNumber = random.randrange(2, 3)\n elif rand_col == 1 and direction == 3:\n extensionNumber = 2\n else:\n extensionNumber = random.randrange(3, 5)\n\n # extending randomly\n if direction == 1: # up\n for i in range(1, extensionNumber):\n board[rand_row - i][rand_col] = ship\n\n elif direction == 2: # down\n for i in range(1, extensionNumber):\n board[rand_row + i][rand_col] = ship\n\n elif direction == 3: # left\n for i in range(1, extensionNumber):\n board[rand_row][rand_col - i] = ship\n\n else: # right\n for i in range(1, extensionNumber):\n board[rand_row][rand_col + i] = ship\n\n # ship #2\n # All variables changed to 2\n rand_row2 = randint(1, row_count - 2)\n rand_col2 = randint(1, col_count - 2)\n # as long as the new ship's starting point doesnt fall on a previous ship, continue. Otherwise assign new value(s) until it doesnt.\n while board[rand_row2][rand_col2] != \"0\":\n # rand_row2 == rand_row and rand_col2 == rand_col:\n rand_row2 = randint(1, row_count - 2)\n rand_col2 = randint(1, col_count - 2)\n\n board[rand_row2][rand_col2] = ship2\n\n if rand_row2 == 1:\n direction2 = 2\n elif rand_row2 == 8:\n direction2 = 1\n elif rand_col2 == 1:\n direction2 = 4\n elif rand_col2 == 8:\n direction2 = 3\n else:\n direction2 = random.randrange(1, 4)\n\n # for at the ends of rows w/ directions\n if rand_row2 >= 8 and direction2 == 2:\n extensionNumber2 = 2\n elif rand_row2 == 7 and direction2 == 2:\n extensionNumber2 = 3\n elif rand_row2 == 6 and direction2 == 2:\n extensionNumber2 = random.randrange(3, 4)\n elif rand_row2 == 3 and direction2 == 1:\n extensionNumber2 = random.randrange(2, 4)\n elif rand_row2 == 2 and direction2 == 1:\n extensionNumber2 = random.randrange(2, 3)\n elif rand_row2 == 1 and direction2 == 1:\n extensionNumber2 = 2\n # for at the ends of columns w/ directions\n elif rand_col2 >= 8 and direction2 == 4:\n extensionNumber2 = 2\n elif rand_col2 == 7 and direction2 == 4:\n extensionNumber2 = 3\n elif rand_col2 == 6 and direction2 == 4:\n extensionNumber2 = random.randrange(3, 4)\n elif rand_col2 == 3 and direction2 == 3:\n extensionNumber2 = random.randrange(2, 4)\n elif rand_col2 == 2 and direction2 == 3:\n extensionNumber2 = random.randrange(2, 3)\n elif rand_col2 == 1 and direction2 == 3:\n extensionNumber2 = 2\n else:\n extensionNumber2 = random.randrange(3, 5)\n\n # check if the spot already is assigned to a value (break) or if not assign new value\n\n # check if the spot already is assigned to a value (break) or if not assign new value\n # truncate ship sizes if needed\n if direction2 == 1: # up\n for i in range(1, extensionNumber2):\n if board[rand_row2 - i][rand_col2] == ship:\n break\n else:\n board[rand_row2 - i][rand_col2] = ship2\n\n elif direction2 == 2: # down\n for i in range(1, extensionNumber2):\n if board[rand_row2 + i][rand_col2] == ship:\n break\n else:\n board[rand_row2 + i][rand_col2] = ship2\n\n elif direction2 == 3: # left\n for i in range(1, extensionNumber2):\n if board[rand_row2][rand_col2 - i] == ship:\n break\n else:\n board[rand_row2][rand_col2 - i] = ship2\n\n else: # right\n for i in range(1, extensionNumber2):\n if board[rand_row2][rand_col2 + i] == ship:\n break\n else:\n board[rand_row2][rand_col2 + i] = ship2\n\n # 3rd ship\n rand_row3 = randint(1, row_count - 2)\n rand_col3 = randint(1, col_count - 2)\n # as long as the new ship's starting point doesnt fall on a previous ship, continue. Otherwise assign new value(s) until it doesnt.\n while board[rand_row3][rand_col3] != \"0\":\n rand_row3 = randint(1, row_count - 2)\n rand_col3 = randint(1, col_count - 2)\n board[rand_row3][rand_col3] = ship3\n if rand_row3 == 1:\n direction3 = 2\n elif rand_row3 == 8:\n direction3 = 1\n elif rand_col3 == 1:\n direction3 = 4\n elif rand_col3 == 8:\n direction3 = 3\n else:\n direction3 = random.randrange(1, 4)\n # print(\"checkpoint\")\n if rand_row3 >= 8 and direction3 == 2:\n extensionNumber3 = 2\n elif rand_row3 == 7 and direction3 == 2:\n extensionNumber3 = 3\n elif rand_row3 == 6 and direction3 == 2:\n extensionNumber3 = random.randrange(3, 4)\n elif rand_row3 == 3 and direction3 == 1:\n extensionNumber3 = random.randrange(2, 4)\n elif rand_row3 == 2 and direction3 == 1:\n extensionNumber3 = random.randrange(2, 3)\n elif rand_row3 == 1 and direction3 == 1:\n extensionNumber3 = 2\n # for at the ends of columns w/ directions\n elif rand_col3 >= 8 and direction3 == 4:\n extensionNumber3 = 2\n elif rand_col3 == 7 and direction3 == 4:\n extensionNumber3 = 3\n elif rand_col3 == 6 and direction3 == 4:\n extensionNumber3 = random.randrange(3, 4)\n elif rand_col3 == 3 and direction3 == 3:\n extensionNumber3 = random.randrange(2, 4)\n elif rand_col3 == 2 and direction3 == 3:\n extensionNumber3 = random.randrange(2, 3)\n elif rand_col3 == 1 and direction3 == 3:\n extensionNumber3 = 2\n else:\n extensionNumber3 = random.randrange(3, 5)\n\n # check if the spot already is assigned to a value (break) or if not assign new value\n # truncate ship sizes if needed\n\n if direction3 == 1: # up\n for i in range(1, extensionNumber3):\n if board[rand_row3 - i][rand_col3] == ship or board[rand_row3 - i][rand_col3] == ship2:\n break\n else:\n board[rand_row3 - i][rand_col3] = ship3\n\n elif direction3 == 2: # down\n for i in range(1, extensionNumber3):\n if board[rand_row3 + i][rand_col3] == ship or board[rand_row3 + i][rand_col3] == ship2:\n break\n else:\n board[rand_row3 + i][rand_col3] = ship3\n\n elif direction3 == 3: # left\n for i in range(1, extensionNumber3):\n if board[rand_row3][rand_col3 - i] == ship or board[rand_row3][rand_col3 - i] == ship2:\n break\n else:\n board[rand_row3][rand_col3 - i] = ship3\n\n else: # right\n for i in range(1, extensionNumber3):\n if board[rand_row3][rand_col3 + i] == ship or board[rand_row3][rand_col3 + i] == ship2:\n break\n else:\n board[rand_row3][rand_col3 + i] = ship3\n\n\ncreateRandomShips(board, \"1\", \"2\", \"3\")\nwinner = False\n# function for guessing row & col values.\nshipsSunk = []\n\n\ndef guessShips(guessRow, guessCol):\n # if guessed before, try again\n global guessCount\n global winner\n\n if board[int(guessRow)][int(guessCol)] == \"A\" or board[int(guessRow)][int(guessCol)] == \"X\":\n print_visualBoard(visualBoard)\n print(\"Already guessed, try again.\")\n # if new guess and miss, count as miss and change the value on the board to an A\n elif board[int(guessRow)][int(guessCol)] == \"0\":\n board[int(guessRow)][int(guessCol)] = \"A\"\n # print out visual board with A\n visualBoard[int(guessRow)][int(guessCol)] = \"A\"\n print_visualBoard(visualBoard)\n guessCount -= 1\n print(\"Row:\", guessRow, \"Col:\", guessCol, \"-\", \"Missed, try again.\")\n # if new guess and hit, count as hit and change the value on the board to an X\n # check the values for each piece and if all the ships have been guessed, then the player wins.\n\n elif board[int(guessRow)][int(guessCol)] == \"1\" or board[int(guessRow)][int(guessCol)] == \"2\" or \\\n board[int(guessRow)][int(guessCol)] == \"3\":\n board[int(guessRow)][int(guessCol)] = \"X\"\n # print out visual board with X\n visualBoard[int(guessRow)][int(guessCol)] = \"X\"\n print_visualBoard(visualBoard)\n guessCount -= 1\n print(\"Row:\", guessRow, \"Col:\", guessCol, \"-\", \"Hit\")\n # list with all pieces on the board\n allpieces = []\n for i in range(0, row_count):\n for j in range(0, col_count):\n allpieces.append(board[i][j])\n # if 1s or 2s or 3s not on board then sunk ships\n if \"1\" not in allpieces:\n shipsSunk.append(\"Ship 1\")\n winner = False\n if \"2\" not in allpieces:\n shipsSunk.append(\"Ship 2\")\n winner = False\n if \"3\" not in allpieces:\n shipsSunk.append(\"Ship 3\")\n winner = False\n # if the board has 1 2 3 (ships) then no winner. otherwise winner = true\n if \"1\" in allpieces or \"2\" in allpieces or \"3\" in allpieces:\n winner = False\n else:\n winner = True\n print(\"You Win!\")\n\n print(\"Ships sunk:\")\n if shipsSunk:\n print(\", \".join(set(shipsSunk)))\n else:\n print(\"None\")\n\n\nprint(\"Welcome to Battleship. The goal of the game is to sink all the ships before your guesses run out.\")\nprint(\"Difficulties: \\n 1: Easy \\n 2: Medium \\n 3: Hard\")\n# print(\"1: Easy\")\n# print(\"2: Medium\")\n# print(\"3: Hard\")\ndifficulty = input(\"Choose from the difficulties listed above by entering in 1, 2, or 3: \")\ndifficultyRange = range(1, 4)\nif difficulty.isdigit():\n if int(difficulty) in difficultyRange:\n difficulty = int(difficulty)\n else:\n while int(difficulty) not in difficultyRange:\n difficulty = int(input(\"Invalid difficulty. Choose a difficulty 1, 2, or 3: \"))\nelse:\n while not difficulty.isdigit():\n difficulty = input(\"Invalid difficulty. Choose a difficulty 1, 2, or 3: \")\n\nif int(difficulty) == 1:\n guessCount = 40\nelif int(difficulty) == 2:\n guessCount = 35\nelif int(difficulty) == 3:\n guessCount = 30\nprint_visualBoard(visualBoard)\n# while there are still guesses left and there is no winner, continue playing the game\nwhile winner == False:\n # ask for input from the user for a row and col value\n # print the board\n if guessCount > 0:\n guessRow = input(\"Enter in a row value: \")\n guessCol = input(\"Enter in a column value: \")\n guessRange = range(0, 10)\n if guessRow.isdigit():\n while int(guessRow) not in guessRange:\n guessRow = int(input(\"Invalid row value. Enter in a row value between 0-9: \"))\n else:\n while not guessRow.isdigit():\n guessRow = input(\"Invalid row value. Enter in a row value between 0-9: \")\n\n if guessCol.isdigit():\n while int(guessCol) not in guessRange:\n guessCol = int(input(\"Invalid column value. Enter in a column value between 0-9: \"))\n\n else:\n while not guessCol.isdigit():\n guessCol = input(\"Invalid column value. Enter in a column value between 0-9: \")\n\n guessShips(guessRow, guessCol)\n else:\n print(\"You Lose - Ran Out of Tries\")\n print(\"Correct board:\")\n print_board(board)\n break\n print(\"Guesses remaining:\", guessCount)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"175177129","text":"table=[]\n\ndef bubbleSort(arr):\n n = len(arr)\n\n # Traverunsorted_listse through all array elements\n for i in range(n):\n\n # Last i elements are already in place\n for j in range(0, n - i - 1):\n\n # traverse the array from 0 to n-i-1\n # Swap if the element found is greater\n # than the next element\n if arr[j] > arr[j + 1]:\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\n return arr\ndef merge_sort(unsorted_list):\n if len(unsorted_list) <= 1:\n return\n# Find the middle point and devide it\n middle = len(unsorted_list) // 2\n left_list = unsorted_list[:middle]\n right_list = unsorted_list[middle:]\n\n left_list = merge_sort(left_list)\n right_list = merge_sort(right_list)\n return list(merge(left_list, right_list))\n\n# Merge the sorted halves\n\ndef merge(left_half,right_half):\n\n res = []\n while len(left_half) != 0 and len(right_half) != 0:\n if left_half[0] < right_half[0]:\n res.append(left_half[0])\n left_half.remove(left_half[0])\n else:\n res.append(right_half[0])\n right_half.remove(right_half[0])\n if len(left_half) == 0:\n res = res + right_half\n else:\n res = res + left_half\n return res\n\n\ndef insertion_sort(InputList):\n for i in range(1, len(InputList)):\n j = i - 1\n nxt_element = InputList[i]\n # Compare the current element with next one\n\n while (InputList[j] > nxt_element) and (j >= 0):\n InputList[j + 1] = InputList[j]\n j = j - 1\n InputList[j + 1] = nxt_element\n return InputList\ndef selection_sort(input_list):\n\n for idx in range(len(input_list)):\n\n min_idx = idx\n for j in range( idx +1, len(input_list)):\n if input_list[min_idx] > input_list[j]:\n min_idx = j\n# Swap the minimum value with the compared value\n\n input_list[idx], input_list[min_idx] = input_list[min_idx], input_list[idx]\n return input_list\ndef partition(array, start, end):\n pivot = array[start]\n low = start + 1\n high = end\n\n while True:\n # If the current value we're looking at is larger than the pivot\n # it's in the right place (right side of pivot) and we can move left,\n # to the next element.\n # We also need to make sure we haven't surpassed the low pointer, since that\n # indicates we have already moved all the elements to their correct side of the pivot\n while low <= high and array[high] >= pivot:\n high = high - 1\n\n # Opposite process of the one above\n while low <= high and array[low] <= pivot:\n low = low + 1\n\n # We either found a value for both high and low that is out of order\n # or low is higher than high, in which case we exit the loop\n if low <= high:\n array[low], array[high] = array[high], array[low]\n # The loop continues\n else:\n # We exit out of the loop\n break\n\n array[start], array[high] = array[high], array[start]\n\n return high\ndef quick_sort(array, start, end):\n if start >= end:\n return\n\n p = partition(array, start, end)\n quick_sort(array, start, p-1)\n quick_sort(array, p+1, end)\n return array\n\n\n\n\n\n","sub_path":"SortingAlgorithms.py","file_name":"SortingAlgorithms.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"390552587","text":"import csv\n\n# Đọc và xử lý file csv\ndata = []\nf = open('files/thong_tin_khach_hang.csv', encoding='utf-8')\nfor dong in csv.reader(f):\n dong[0] = dong[0].upper()\n data.append(dong)\nprint(data)\nf.close()\n\n# Xuất nội dung sang file csv khác\nf = open('files/xl_ttkh.csv', 'w', encoding='utf-8', newline='')\nfor item in data:\n csv.writer(f).writerow(item)\nf.close()\n\n\n","sub_path":"Fundamentals of Python/Day 9/doc_ghi_csv.py","file_name":"doc_ghi_csv.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"616168248","text":"\n\nfrom xai.brain.wordbase.nouns._butte import _BUTTE\n\n#calss header\nclass _BUTTES(_BUTTE, ):\n\tdef __init__(self,): \n\t\t_BUTTE.__init__(self)\n\t\tself.name = \"BUTTES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"butte\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_buttes.py","file_name":"_buttes.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"50587237","text":"from coffin import template\nfrom itertools import groupby\nfrom jinja2 import environmentfilter\nfrom jinja2.filters import make_attrgetter, _GroupTuple\n\nregister = template.Library()\n\n@environmentfilter\ndef ordered_groupby(environment, value, attribute, sort_key=None, reverse=False):\n expr = make_attrgetter(environment, attribute)\n grouped = map(_GroupTuple, groupby(value, expr))\n if sort_key:\n key = make_attrgetter(environment, sort_key)\n return sorted(grouped, key=lambda x: key(x[0]), reverse=reverse)\n else:\n return grouped\n\nregister.filter('ordered_groupby', ordered_groupby)\n","sub_path":"treeshits/templatetags/grouping.py","file_name":"grouping.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"80766323","text":"from os import listdir\nimport subprocess\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport takahe\n\nn = 1000\n\nfiles = listdir('data/newdata')\nfiles = [f'data/newdata/{file}' for file in files]\n\ni = 0\nnfiles = len(files)\n\n# nrows = int(np.floor(np.sqrt(nfiles)))\n# ncols = int(np.ceil(np.sqrt(nfiles)))\n\n# fig, axes = plt.subplots(nrows, ncols)\n# axes = axes.flatten()\n\n# if len(axes) > nfiles:\n# fig.delaxes(axes[-1])\n\ndef format_z(z):\n if z[:2] == \"em\":\n exp = z[-1]\n fmt = rf\"$1\\times 10^{{-{exp}}}$\"\n else:\n fmt = f\"0.{z}\"\n return fmt\n\nfor file in files:\n # file = f'data/newdata/Remnant-Birth-bin-imf135_300-z{z}_StandardJJ.dat'\n\n parts = file.split('/')[-1].split(\"-\")\n z = None\n for part in parts:\n if part[0] == 'z':\n z = part.split(\"_\")[0][1:]\n if z == None:\n print(\"Unable to compute z for this file, aborting!\")\n print(file)\n break\n\n # ax = axes[i]\n # plt.sca(ax)\n\n universe = takahe.universe.create('eds')\n universe.populate(file, name_hints=['m1', 'm2', 'a0', 'e0', 'weight', 'evolution_age', 'rejuvenation_age'], n_stars=n)\n\n universe.populace.compute_delay_time_distribution(label=f\"z=0.{z}\")\n\n # plt.text(0.98, 0.98, rf'z={format_z(z)}', horizontalalignment='right', verticalalignment='top', transform=ax.transAxes)\n\n # plt.yscale('log')\n # plt.xlabel(\" \")\n\n i += 1\n\n print(f'Completed z={z}. ({i}/{nfiles})')\n\nplt.suptitle(f'delay time distribution for different $z$ values')\n# fig.add_subplot(111, frameon=False)\n# plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)\nax = plt.gca()\nbox = ax.get_position()\nax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n\n# Put a legend to the right of the current axis\nax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\nplt.yscale(\"log\")\nplt.ylabel(r\"Mergers [# of events / $M_\\odot$ / Gyr]\")\nplt.xlabel(\"log(age/yrs)\")\n\nplt.show()\n","sub_path":"examples/generating_dtd.py","file_name":"generating_dtd.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"154157528","text":"###MODULES###\n\nimport numpy as np\nimport pandas as pd\nimport os, sys\nimport time as t\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Circle\nfrom matplotlib.ticker import MaxNLocator\nimport pathlib\nfrom matplotlib.colors import Normalize\nfrom scipy import interpolate\nnorm = Normalize()\nfrom resource import getrusage, RUSAGE_SELF\nimport random\n\nmpl.rcParams['axes.linewidth'] = 1.5 #set the value globally\n\n#CONSTANTS\ncwd_PYTHON = os.getcwd() + '/'\nRHO = 1000.0\nDX = 0.025/256.0\nNX = 512\nPERIOD = 0.1\nRADIUSLARGE = 0.002\nRADIUSSMALL = 0.5*RADIUSLARGE\nmaxR = 0.025/RADIUSLARGE\n\ncsfont = {'fontname':'Times New Roman'}\n\n#System Arguments\nTheta = sys.argv[1]\nHx = sys.argv[2]\nHy = sys.argv[3]\nperNumber = int(sys.argv[4])#5\n\n#PARTICLE STREAMLINE CONSTANTS\nnFrames = 240\nminVal, maxVal = -6.0,6.0\nrows, cols = 100, 100\nnPart, nTrail = rows*cols, 80\ntimestep = 0.2/60.0\ndX = 2.0*maxR/(1.0*NX)\nseed = random.seed(11235)\ncwd_FIGS = cwd_PYTHON+'../Figures/ParticleStreamline/L/per{0}/'.format(perNumber)#cwd_PYTHON+\"../../Figures/ParticleStreamline/TestField2/\"\npathlib.Path(cwd_FIGS).mkdir(parents=True, exist_ok=True)\ncwd_Re = cwd_PYTHON+'Theta{0}/Hx{1}/Hy{2}/VTK/AVG/'.format(Theta,Hx,Hy)#cwd_PYTHON+'../../FieldData/TestField/'\ncwd_POS = cwd_PYTHON+'Theta{0}/Hx{1}/Hy{2}/'.format(Theta,Hx,Hy)#cwd_POS = cwd_PYTHON+'../../FieldData/TestField/'\n\n# constructs a filepath for the pos data of Re = $Re\ndef pname(cwd):\n #return cwd+\"/pd.txt\"\n #cwd = cwd_PYTHON\n return cwd+\"pd.txt\"\n\ndef GetPosData(cwd,time,parTheta,parHx,parHy):\n global RADIUSLARGE\n \n #Load position data\n pdData = pd.read_csv(pname(cwd),delimiter=' ')\n #Split up individual sphere data by given index\n UAdata = pdData[pdData['idx'] == 6].copy()\n LAdata = pdData[pdData['idx'] == 19].copy()\n UBdata = pdData[pdData['idx'] == 32].copy()\n LBdata = pdData[pdData['idx'] == 45].copy()\n #Sort data by time and reset indices\n UAdata = UAdata.sort_values(by=['time'])\n LAdata = LAdata.sort_values(by=['time'])\n UBdata = UBdata.sort_values(by=['time'])\n LBdata = LBdata.sort_values(by=['time'])\n UAdata = UAdata.reset_index(drop=True)\n LAdata = LAdata.reset_index(drop=True)\n UBdata = UBdata.reset_index(drop=True)\n LBdata = LBdata.reset_index(drop=True)\n #Rename columns to previous data frames\n UAdata = UAdata.rename(columns={\"x\":\"aXU\", \"y\":\"aYU\"})\n LAdata = LAdata.rename(columns={\"x\":\"aXL\", \"y\":\"aYL\"})\n UBdata = UBdata.rename(columns={\"x\":\"bXU\", \"y\":\"bYU\"})\n LBdata = LBdata.rename(columns={\"x\":\"bXL\", \"y\":\"bYL\"})\n #Combine separate dataframes to create previous dataframe used\n splitDict = {'aXU':UAdata['aXU'],'aYU':UAdata['aYU'],'aXL':LAdata['aXL'],'aYL':LAdata['aYL'],\n 'bXU':UBdata['bXU'],'bYU':UBdata['bYU'],'bXL':LBdata['bXL'],'bYL':LBdata['bYL'],'time':UAdata['time']}\n posData = pd.DataFrame(data=splitDict)\n pos = posData[posData['time'] == time]\n pos = pos.reset_index(drop=True)\n\n #Renormalize\n pos['aXU'] /= RADIUSLARGE\n pos['aXL'] /= RADIUSLARGE\n pos['aYU'] /= RADIUSLARGE\n pos['aYL'] /= RADIUSLARGE\n pos['bXU'] /= RADIUSLARGE\n pos['bXL'] /= RADIUSLARGE\n pos['bYU'] /= RADIUSLARGE\n pos['bYL'] /= RADIUSLARGE\n return pos\n\ndef GetPosDataLength(cwd):\n data = pd.read_csv(pname(cwd),delimiter=' ')\n return len(data['time'])\n\ndef GetAvgFieldData(cwd,idx):\n global RADIUSLARGE\n #Load position data\n #Columns\n #mx.flat my.flat avgW.flat avgP.flat avgUx.flat avgUy.flat\n fieldData = pd.read_csv(cwd+'AVG_%04d.csv'%idx,delimiter=' ')\n print(fieldData.head())\n #All field values to a list\n mxList = fieldData['mx'].values.tolist()\n myList = fieldData['my'].values.tolist()\n WList = fieldData['avgW'].values.tolist()\n PList = fieldData['avgP'].values.tolist()\n UxList = fieldData['avgUx'].values.tolist()\n UyList = fieldData['avgUy'].values.tolist()\n #Convert lists to numpy arrays\n #Reshape them to be Nx x Ny\n Nx, Ny = 1024, 1024\n mxArr = np.array(mxList).reshape((Nx,Ny))/RADIUSLARGE\n myArr = np.array(myList).reshape((Nx,Ny))/RADIUSLARGE\n WArr = np.array(WList).reshape((Nx,Ny))\n PArr = np.array(PList).reshape((Nx,Ny))\n UxArr = np.array(UxList).reshape((Nx,Ny))/RADIUSLARGE\n UyArr = np.array(UyList).reshape((Nx,Ny))/RADIUSLARGE\n return (mxArr, myArr, WArr, PArr, UxArr, UyArr)\n\ndef AddDiscsToPlot(ax,pos):\n #Add Discs\n circle1 = Circle((pos.loc[0,'aXU_rot'], pos.loc[0,'aYU_rot']), 1.0, facecolor=(0.0,)*3,\n linewidth=1,alpha=1.0,zorder=6)\n ax.add_patch(circle1)\n circle2 = Circle((pos.loc[0,'aXL_rot'], pos.loc[0,'aYL_rot']), 0.5, facecolor=(0.0,)*3,\n linewidth=1,alpha=1.0,zorder=6)\n ax.add_patch(circle2)\n circle3 = Circle((pos.loc[0,'bXU_rot'], pos.loc[0,'bYU_rot']), 1.0, facecolor=(0.5,)*3,\n linewidth=1,alpha=1.0,zorder=6)\n ax.add_patch(circle3)\n circle4 = Circle((pos.loc[0,'bXL_rot'], pos.loc[0,'bYL_rot']), 0.5, facecolor=(0.5,)*3,\n linewidth=1,alpha=1.0,zorder=6)\n ax.add_patch(circle4)\n #Add Swimmer \"springs\"\n ax.plot([pos.loc[0,'aXU_rot'],pos.loc[0,'aXL_rot']],\n [pos.loc[0,'aYU_rot'],pos.loc[0,'aYL_rot']],\n color='black',linewidth=3,zorder=6)\n ax.plot([pos.loc[0,'bXU_rot'],pos.loc[0,'bXL_rot']],\n [pos.loc[0,'bYU_rot'],pos.loc[0,'bYL_rot']],\n color=(0.5,)*3,linewidth=3,zorder=6)\n return\n\ndef set_size(w,h, ax=None):\n \"\"\" w, h: width, height in inches \"\"\"\n if not ax: ax=plt.gca()\n l = ax.figure.subplotpars.left\n r = ax.figure.subplotpars.right\n t = ax.figure.subplotpars.top\n b = ax.figure.subplotpars.bottom\n figw = float(w)/(r-l)\n figh = float(h)/(t-b)\n ax.figure.set_size_inches(figw, figh)\n return ax\n\ndef Rotate(xy, theta):\n # https://en.wikipedia.org/wiki/Rotation_matrix#In_two_dimensions\n #First Rotate based on Theta\n #Allocate Arrays\n rotationMatrix = np.zeros((2,2))\n #Calculate rotation matrix\n rotationMatrix[0,0] = np.cos(theta)\n rotationMatrix[0,1] = -1.0*np.sin(theta)\n rotationMatrix[1,0] = np.sin(theta)\n rotationMatrix[1,1] = np.cos(theta)\n return rotationMatrix.dot(xy)\n\ndef CalcLabAngle(pos):\n #Find swimming axis (normal y-axis)\n xU, xL = pos.loc[0,'aXU'], pos.loc[0,'aXL']\n yU, yL = pos.loc[0,'aYU'], pos.loc[0,'aYL']\n labX = xU - xL\n labY = yU - yL\n length = np.hypot(labX,labY)\n normX = labX/length\n normY = labY/length\n #2) Calculate Theta\n if(normX <= 0.0):\n theta = np.arccos(normY)\n else:\n theta = -1.0*np.arccos(normY)+2.0*np.pi\n print('theta = ',theta*180.0/np.pi)\n return 2.0*np.pi - theta\n\ndef InterpolateToNewCoordinateSystem(x,y,mx,my,arrayUx,arrayUy):\n #Create a uniform mesh for the interpolated velocity vectors!\n mx_new, my_new = np.meshgrid(x,y)\n \n #Interpolate Ux and Uy from original cartesian coordainates to new ones\n #Griddata\n print('About to inteprolate field data')\n print('peak memory = ',getrusage(RUSAGE_SELF).ru_maxrss)\n sys.stdout.flush()\n arrayUx_new=interpolate.griddata((mx.flatten(),my.flatten()),arrayUx.flatten() , (mx_new,my_new),method='linear')\n print('X transformation complete')\n print('peak memory = ',getrusage(RUSAGE_SELF).ru_maxrss)\n sys.stdout.flush()\n arrayUy_new=interpolate.griddata((mx.flatten(),my.flatten()),arrayUy.flatten() , (mx_new,my_new),method='linear')\n print('Coordinate Transformation Complete!')\n print('peak memory = ',getrusage(RUSAGE_SELF).ru_maxrss)\n sys.stdout.flush()\n return (arrayUx_new,arrayUy_new)\n\ndef RotateSimulation(cwd,time,mx,my,Ux,Uy,pos):\n global RADIUSLARGE\n #Shift x and y by the CM location\n xCM = 0.25*(pos.loc[0,'aXU'] + pos.loc[0,'bXU'] + pos.loc[0,'aXL'] + pos.loc[0,'bXL'])\n yCM = 0.25*(pos.loc[0,'aYU'] + pos.loc[0,'bYU'] + pos.loc[0,'aYL'] + pos.loc[0,'bYL'])\n #Do the same for mx and my\n mx -= xCM\n my -= yCM\n #Shift pos data by xCM and yCM\n pos['aXU'] -= xCM\n pos['aXL'] -= xCM\n pos['bXU'] -= xCM\n pos['bXL'] -= xCM\n pos['aYU'] -= yCM\n pos['aYL'] -= yCM\n pos['bYU'] -= yCM\n pos['bYL'] -= yCM\n #Rotate Reference frame by swimmer 1's axis\n #Calculate Theta (Rotate by -Theta)\n theta_rotate = CalcLabAngle(pos)\n print('theta_rotate = ',theta_rotate*180.0/np.pi)\n mxy = np.array([mx.flatten(),my.flatten()])\n mxy_rot = np.zeros((2,1024*1024))\n #Do the same for the U field\n Uxy = np.array([Ux.flatten(),Uy.flatten()])\n Uxy_rot = np.zeros((2,1024*1024))\n for jdx in range(1024*1024):\n mxy_rot[:,jdx] = Rotate(mxy[:,jdx],theta_rotate)\n Uxy_rot[:,jdx] = Rotate(Uxy[:,jdx],theta_rotate)\n mx_rot = mxy_rot[0,:].reshape((1024,1024))\n my_rot = mxy_rot[1,:].reshape((1024,1024))\n Ux_rot = Uxy_rot[0,:].reshape((1024,1024))\n Uy_rot = Uxy_rot[1,:].reshape((1024,1024))\n\n aU_pos = np.array([pos.loc[0,'aXU'],pos.loc[0,'aYU']])\n aL_pos = np.array([pos.loc[0,'aXL'],pos.loc[0,'aYL']])\n bU_pos = np.array([pos.loc[0,'bXU'],pos.loc[0,'bYU']])\n bL_pos = np.array([pos.loc[0,'bXL'],pos.loc[0,'bYL']])\n aU_rot = Rotate(aU_pos,theta_rotate)\n print('aU = ',aU_pos)\n print('aU_rot = ',aU_rot)\n aL_rot = Rotate(aL_pos,theta_rotate)\n bU_rot = Rotate(bU_pos,theta_rotate)\n bL_rot = Rotate(bL_pos,theta_rotate)\n pos['aXU_rot'], pos['aYU_rot'] = aU_rot[0], aU_rot[1]\n pos['aXL_rot'], pos['aYL_rot'] = aL_rot[0], aL_rot[1]\n pos['bXU_rot'], pos['bYU_rot'] = bU_rot[0], bU_rot[1]\n pos['bXL_rot'], pos['bYL_rot'] = bL_rot[0], bL_rot[1]\n #Interpolate onto a new coordinate system\n x = np.linspace(-0.025/RADIUSLARGE,0.025/RADIUSLARGE,512)\n y = np.linspace(-0.025/RADIUSLARGE,0.025/RADIUSLARGE,512)\n mx_stream, my_stream = np.meshgrid(x,y)\n interpUx, interpUy = InterpolateToNewCoordinateSystem(x,y,mx_rot,my_rot,Ux_rot,Uy_rot)\n \n return (mx_stream, my_stream, interpUx, interpUy, pos)\n\n#Plot New mesh and interpolated velocity field Ux and Uy\ndef PlotAvgU(cwd,mx,my,Ux,Uy,pos,space):\n global FIGNUM, PERIOD,minVal,maxVal,Theta,Hx,Hy\n #Here, we will visualize the velocity field on the new coordinate system\n nRows, nCols = 1, 1\n fig, ax = plt.subplots(nrows=nRows, ncols=nCols, num=0,figsize=(6,6),dpi=200)\n ax.set_title(r'Average Velocity Field',fontsize=12)\n normUx,normUy = Ux/np.hypot(Ux,Uy),Uy/np.hypot(Ux,Uy)\n magU = np.hypot(Ux,Uy)\n #Plot Vector field with quiver\n ax.quiver(mx[::space,::space],my[::space,::space],\n Ux[::space,::space],Uy[::space,::space],\n color='white',pivot='mid',angles='xy',scale_units='xy', scale=10,zorder=5)\n #Plot magnitude with contourf\n ax.contourf(mx,my,magU,cmap='viridis')\n\n AddDiscsToPlot(ax,pos)\n #print('RSMALL = ',RSMALL)\n ax.axis([minVal,maxVal,minVal,maxVal])\n fig.tight_layout()\n #plt.show()\n fig.savefig(cwd+'avgU_T{0}_Hx{1}_Hy{2}_.png'.format(Theta,np.round(float(Hx)/2.0,2),np.round(float(Hy)/2.0,1)))\n fig.clf()\n plt.close()\n return\n\n#Plot New mesh and interpolated velocity field Ux and Uy\ndef PlotParticles(mx,my,U,pos,particles,frame,cwd):\n global FIGNUM, PERIOD,minVal,maxVal,Theta,Hx,Hy\n #Here, we will visualize the velocity field on the new coordinate system\n nRows, nCols = 1, 1\n fig, ax = plt.subplots(nrows=nRows, ncols=nCols, num=1,figsize=(6,6),dpi=200)\n \n alpha = np.linspace(0.2,1.0,particles.nTime)\n for idTime in range(particles.nTime):\n pointColor = (1.0-(1.0*idTime/(1.0*particles.nTime)),)*3\n alphaValue = alpha[idTime]\n markerSize = particles.size[idTime,0]\n ax.plot(particles.x[idTime].flatten(),particles.y[idTime].flatten(),\n marker='o',ms=markerSize,color=pointColor,zorder=5,alpha=1,linewidth=0)\n\n #Plot Swimmer\n AddDiscsToPlot(ax,pos)\n #print('RSMALL = ',RSMALL)\n ax.axis([minVal,maxVal,minVal,maxVal])\n fig.tight_layout()\n fig.savefig(cwd+'PartStream_T{0}_Hx{1}_Hy{2}_{3}_.png'.format(Theta,np.round(float(Hx)/2.0,2),np.round(float(Hy)/2.0,1),frame))\n #plt.show()\n fig.clf()\n plt.close()\n return\n\nclass Particles:\n def __init__(self,rows,cols,minVal,maxVal,nPart,nTime):\n self.nPart = nPart\n print('nTime = ',nTime)\n self.nTime = nTime\n print('self.nTime = ',self.nTime)\n xvals = np.linspace(minVal,maxVal,rows)\n yvals = np.linspace(minVal,maxVal,cols)\n mx, my = np.meshgrid(xvals,yvals)\n self.x = np.array([mx.flatten()]*nTime).reshape((nTime,nPart))\n self.y = np.array([my.flatten()]*nTime).reshape((nTime,nPart))\n self.xinit = self.x.copy()\n self.yinit = self.y.copy()\n self.vx = np.zeros((nTime,nPart))\n self.vy = np.zeros((nTime,nPart))\n self.vxinit = self.vx.copy()\n self.vyinit = self.vy.copy()\n self.idx = np.array([[int((self.x[a,b] + maxR)/dX) for b in range(self.nPart)] for a in range(self.nTime)])\n self.idy = np.array([[int((self.y[a,b] + maxR)/dX) for b in range(self.nPart)] for a in range(self.nTime)])\n self.age = np.array([random.randrange(10,240) for b in range(nPart)]*nTime).reshape((nTime,nPart))\n self.life = self.age.copy()\n self.size = np.array([np.linspace(0.001,.1,self.nTime) for a in range(self.nPart)]).T.reshape((nTime,nPart))\n self.curr_age = np.zeros((nTime,nPart))\n def CalcMeshIndex(self,idTime):\n self.idx[idTime] = [int((self.x[idTime,b] + maxR)/dX) for b in range(self.nPart)]\n self.idy[idTime] = [int((self.y[idTime,b] + maxR)/dX) for b in range(self.nPart)]\n def AssignVelocity(self,idTime,nX,avgUx,avgUy):\n indices = nX*self.idy[idTime]+self.idx[idTime]\n self.vx[idTime] = avgUx[indices]\n self.vy[idTime] = avgUy[indices]\n\nif __name__ == '__main__':\n #Get AvgVel Field and Rotate Frame\n #Save Vel Field as AvgUx and AvgUy\n #READ ALL AVG FILES IN A SIMULATION DIRECTORY\n #EXTRACT AVERAGE FIELD DATA INTO NUMPY ARRAYS\n #PLOT AVERAGED FIELD DATA\n #Simulation Parameters\n #Extract Position Data\n #Calculate # Periods\n DUMP_INT = 20.0\n nTime = GetPosDataLength(cwd_POS)\n nPer = int(np.trunc(1.0*nTime/DUMP_INT))\n #nPer = 2\n #Paths to data and plots\n cwd_DATA = cwd_Re\n countPer = 0\n for countPer in range(nPer):\n if(countPer == perNumber):\n AVGPlot = pathlib.Path(cwd_DATA+'AVG_%04d.csv'%countPer)\n if AVGPlot.exists ():\n start = t.clock()\n #Get Avg Field Data\n mx,my,avgW,avgP,avgUx,avgUy = GetAvgFieldData(cwd_DATA,countPer)\n #Extract Position and Time Data\n time = np.round(0.05 + countPer*PERIOD,2)\n posData = GetPosData(cwd_POS,time,float(Theta),float(Hx),float(Hy))\n #Plot Averaged Field Data\n #Vorticity And Streamlines\n mx,my,avgUx,avgUy,posData = RotateSimulation(cwd_PYTHON,time,mx,my,avgUx,avgUy,posData)\n stend = t.clock()\n diff = stend - start\n print('Time to run for 1 period = %.5fs'%diff)\n sys.stdout.flush()\n\n #Visual Check of vel field's data (any idx)\n PlotAvgU(cwd_FIGS,mx,my,avgUx,avgUy,posData,6)\n\n #Now that we have the abg velocity field, we can calculate particle trajectories\n #Let's start with 10\n # Initialize 10 random coordinates\n # Each coordinate will have a lifetime from 60 -> 240 frames\n # For each, calculate the new position based on the velocity field (do this for twenty timesteps)\n # The velocity field does not evolve\n # Create array of points (use interpolate.griddata to find velocities)\n # pos += dt*velocity_interp\n # Plot scatter which decreases in opacity and point size for timesteps going backward\n # Advance time for each new frame\n\n #Initialize Uniform Distribution of points. The structure should be a 2D ndarray\n #Choose points in range (-7.5,7.5) for both x and y\n\n #Flatten avg Velocity field\n #print(avgU[1,281,248])\n avgU = np.array([[avgUx],[avgUy]])\n avgUx = avgUx.flatten()\n avgUy = avgUy.flatten()\n magU = np.hypot(avgUx,avgUy)\n while np.amax(magU)*timestep > 0.75*dX:\n timestep *= 0.95\n print('timestep = ',timestep)\n #print('dX = ',dX)\n #print('max dX = ',timestep*np.amax(magU))\n assert 0.75*dX >= np.amax(magU)*timestep\n particles = Particles(rows,cols,minVal,maxVal,nPart,nTrail)\n\n #Initialize Particles\n #Find velocity by index value (no interpolation)\n for idTime in range(nTrail):\n #Calculate Mesh Index for idTime\n particles.CalcMeshIndex(idTime)\n #Assign velocity by index (no interp)\n particles.AssignVelocity(idTime,NX,avgUx,avgUy)\n #Update position idTime+1\n if idTime < nTrail - 1:\n changeX = timestep*particles.vx[idTime]\n changeY = timestep*particles.vy[idTime]\n particles.x[idTime+1:nTime] += changeX\n particles.y[idTime+1:nTime] += changeY\n #Increase age of particles\n particles.curr_age[:idTime+1] -= 1\n #Save Initial particle stream pos and vel\n particles.xinit = particles.x[0,:].copy()#particles.x.copy()\n particles.yinit = particles.y[0,:].copy()#particles.y.copy()\n particles.vxinit = particles.vx[0,:].copy()#particles.vx.copy()\n particles.vyinit = particles.vy[0,:].copy()#particles.vy.copy()\n\n '''\n print('B4')\n print('life[100] = ',particles.age[:,100])\n print('curr_age[100] = ',particles.curr_age[:,100])\n print('x[100] = ',particles.x[:,100])\n print('y[100] = ',particles.y[:,100])\n print('vx[100] = ',particles.vx[:,100])\n print('vy[100] = ',particles.vy[:,100])\n '''\n #Loop over # of frames\n for idxFrame in range(2*nFrames):\n \n #Check Age\n particles.x = np.where(particles.curr_age >= particles.age, particles.xinit, particles.x)\n particles.y = np.where(particles.curr_age >= particles.age, particles.yinit, particles.y)\n particles.vx = np.where(particles.curr_age >= particles.age, particles.vxinit, particles.vx)\n particles.vy = np.where(particles.curr_age >= particles.age, particles.vyinit, particles.vy)\n particles.age = np.where(particles.curr_age >= particles.age, nFrames, particles.age)\n particles.curr_age = np.where(particles.curr_age >= particles.life, 0, particles.curr_age)\n particles.life = particles.age.copy()\n \n #Plot Particle Stream\n if idxFrame >= nFrames:\n PlotParticles(mx,my,avgU,posData,particles,idxFrame-nFrames,cwd_FIGS)\n #Increase particle age\n particles.curr_age += 1\n #Roll positions and velocities back 1. Will need to calculate last position each particle\n particles.x = np.roll(particles.x,-1,axis=0)\n particles.y = np.roll(particles.y,-1,axis=0)\n particles.vx = np.roll(particles.vx,-1,axis=0)\n particles.vy = np.roll(particles.vy,-1,axis=0)\n #Change Position of last value by using velocity of 2nd to last\n changeX = timestep*particles.vx[-2]\n #print('vy[-2,1] = ',particles.vy[-2,1])\n changeY = timestep*particles.vy[-2]\n #print('changeY = ',changeY[1])\n particles.x[-1] = particles.x[-2] + changeX\n particles.y[-1] = particles.y[-2] + changeY\n #Update Mesh Index\n particles.CalcMeshIndex(-1)\n #Update Velocity of first particle in trail\n particles.AssignVelocity(-1,NX,avgUx,avgUy)\n \n if idxFrame %60 == 0:\n print('Frame {0} is complete'.format(idxFrame))\n sys.stdout.flush()\n\n os.chdir(cwd_FIGS)\n strMovie = \"ffmpeg -r 60 -i PartStream_T{0}_Hx{1}_Hy{2}_%d_.png -vcodec libx264 -pix_fmt yuv420p -y PartMov_T{0}_Hx{1}_Hy{2}_.mp4\".format(Theta,np.round(float(Hx)/2.0,2),np.round(float(Hy)/2.0,1))\n os.system(strMovie)\n os.system(\"rm -rf PartStream_T{0}_Hx{1}_Hy{2}_*\".format(Theta,np.round(float(Hx)/2.0,2),np.round(float(Hy)/2.0,1)))\n os.chdir(cwd_PYTHON)\n print('Movie T{0}: Hx{1}: Hy{2} is complete'.format(Theta,Hx,Hy))\n","sub_path":"PRF3-Pairwise/Rd2/PostProcessing/PythonScripts/PairDynamics/ParticleStreamline_1osc.py","file_name":"ParticleStreamline_1osc.py","file_ext":"py","file_size_in_byte":20149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"624099307","text":"# -*- coding: utf-8 -*-\n\n'''\nCreated on May 12, 2016\n\n@author: andres & jose\n\n'''\n\n\nimport sys\nfrom BilleteraElectronica import *\nimport unittest\nfrom datetime import timedelta, datetime\n\n\nclass TestBilleteraElectronica(unittest.TestCase):\n\n @classmethod\n def setUp(self):\n self.Dueno = Dueno(\"Marco\",\"Marcano\",21)\n self.DuenoESP = Dueno(\"ñó\",\"ñí\",21)\n self.Billetera = BilleteraElectronica(1,self.Dueno,\"0000\")\n self.BilleteraESP = BilleteraElectronica(2, self.DuenoESP, \"0001\")\n\n '''Casos de Prueba'''\n\n def testBilletera(self):\n self.assertEqual(self.Billetera.id , 1, \"El ident debe ser 1\")\n self.assertEqual(self.Billetera.dueno.nombres , \"Marco\", \"El nombre debe ser Marco\")\n self.assertEqual(self.Billetera.dueno.apellidos , \"Marcano\", \"El apellido debe ser Marcano\")\n self.assertEqual(self.Billetera.dueno.ci , 21, \"El CI debe ser 21\")\n self.assertEqual(self.Billetera.pin , \"0000\", \"El PIN debe ser 0000\")\n self.assertEqual(self.Billetera.obtenerSaldo(), 0, \"El saldo inicial debe ser 0\")\n \n def testSaldo(self):\n self.assertEqual(self.Billetera.obtenerSaldo(), 0, \"El saldo debe ser 0\")\n\n\n def testBilleteraSpanishChars(self):\n self.assertEqual(self.BilleteraESP.dueno.nombres,\"ñó\")\n self.assertEqual(self.BilleteraESP.dueno.apellidos, \"ñí\")\n\n def testRecarga(self):\n recarga = Recarga(100, datetime(2016,5,12,15,0), 1)\n self.assertEqual(recarga.monto, 100)\n self.assertEqual(recarga.fecha, datetime(2016,5,12,15,0))\n self.assertEqual(recarga.idEst, 1)\n\n def testConsumo(self):\n consumo = Consumo(100, datetime(2016,5,12,15,0), 1)\n self.assertEqual(consumo.monto, 100)\n self.assertEqual(consumo.fecha, datetime(2016,5,12,15,0))\n self.assertEqual(consumo.idEst, 1) \n\n def testConsumir(self):\n consumo = Consumo(100, datetime(2016,5,12,15,0), 1)\n recarga = Recarga(100, datetime(2016,5,12,15,0), 1)\n self.Billetera.recargar(recarga, \"0000\")\n self.Billetera.consumir(consumo, \"0000\")\n saldo = self.Billetera.obtenerSaldo()\n self.assertEqual(saldo, 0, \"El saldo debe ser 0\")\n\n def testRecargar(self):\n recarga = Recarga(100, datetime(2016,5,12,15,0), 1)\n self.Billetera.recargar(recarga, \"0000\")\n saldo = self.Billetera.obtenerSaldo()\n self.assertEqual(saldo, 100, \"El saldo debe ser 100\")\n\n def tesRecargaNegativa(self):\n with self.assertRaises(AssertionError):\n self.Billetera.recargar(-10, datetime(2016,5,11,15,0), 1)\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n","sub_path":"Tarea3/TestBilleteraElectronica.py","file_name":"TestBilleteraElectronica.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"289072335","text":"from googleapiclient.discovery import build\nfrom google.oauth2 import service_account\nfrom reagan.subclass import Subclass\nfrom io import BytesIO\nimport pandas as pd\nfrom time import sleep\n\n\nclass SA360(Subclass):\n def __init__(self, version=\"v2\", verbose=0):\n super().__init__(verbose=verbose)\n self.version = version\n self.service_account_filepath = self.get_parameter_value(\"/sa360/service_account_path\")\n self._create_service()\n self.dcm_api_calls = 0\n\n def _create_service(self):\n\n api_name = \"doubleclicksearch\"\n self.credentials = service_account.Credentials.from_service_account_file(\n self.service_account_filepath\n )\n self.service = build(api_name, self.version, credentials=self.credentials)\n\n def get_report_fragments(self, report_id):\n \"\"\"\n Returns a list containing the fragments (ints) of a report\n - report_id (int): Id of the Report used to make the calls\n \"\"\"\n\n request = self.service.reports().get(reportId=report_id)\n report_status = request.execute()\n while True:\n if report_status[\"isReportReady\"]:\n return [\n report_file[\"url\"].split(\"/\")[-1]\n for report_file in report_status[\"files\"]\n ]\n request = self.service.reports().get(reportId=report_id)\n report_status = request.execute()\n sleep(60)\n\n def file_to_df(self, report_id, report_fragment):\n \"\"\"\n Returns a pandas dataframe given a Report Id and Fragment\n - report_id (int): Id of the Report used to make the calls\n - report_fragment (int): Fragment file of the report\n \"\"\"\n\n request = self.service.reports().getFile(\n reportId=report_id, reportFragment=report_fragment\n )\n report_file = request.execute()\n return pd.read_csv(BytesIO(report_file))\n\n def reports_to_df(self, agency_id, report_type, columns, timerange=None):\n \"\"\"\n Returns a generator that yields a pandas dataframe with 1000000 rows with the report specifications\n - agency_id (int): Id of the Agency used to make the calls\n - report_type (string): Specified report type\n - columns (list): Columns to include in the report\n \"\"\"\n\n body = {\n \"reportScope\": {\"agencyId\": agency_id},\n \"reportType\": report_type,\n \"columns\": [{\"columnName\": col} for col in columns],\n \"downloadFormat\": \"csv\",\n \"maxRowsPerFile\": 1000000,\n \"statisticsCurrency\": \"agency\",\n }\n\n if timerange:\n body['timeRange'] = timerange\n\n # 1. Request Report\n report_request = self.service.reports().request(body=body)\n report = report_request.execute()\n report_id = report[\"id\"]\n\n # 2. Wait for it to finish\n report_fragments = self.get_report_fragments(report_id)\n\n # 3. Download files\n for report_fragment in report_fragments:\n yield self.file_to_df(report_id=report_id, report_fragment=report_fragment)\n\n def decode_error(self, error):\n # Returns a more concise error description\n return eval(error.content.decode())[\"error\"][\"message\"]\n\n\nif __name__ == \"__main__\":\n # pass\n # account report\n # report_type = 'campaign'\n # agency_id = 20700000001049589\n # columns = ['campaignId','campaign','campaignStartDate','campaignEndDate']\n sa = SA360(verbose=1)\n for df in sa.reports_to_df():\n pass","sub_path":"reagan/sa360.py","file_name":"sa360.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"162023296","text":"import os \r\nimport re\r\n \r\ndef main(): \r\n\tfor count, filename in enumerate(os.listdir()): #run through all folder/files\r\n\t\tif not os.path.isdir(filename):\r\n\t\t\tcontinue # Not a directory\r\n\t\tmatch = re.findall(r\"\\d+\",filename) #find all whole numbers in the folder's name. Put in in a list called match\r\n\t\tmatch = [i for i in match if 1920 <= int(i) <= 2021] # delete all identified numbers which are not between 1920 and 2021\r\n\t\tif match:\r\n\t\t\t# Then it found a match!\r\n\t\t\tyear=str(match[-1]) #take the last value in the list. This will skip all the other years in the title of the album. For example in < Artist - The best of 1998 [1988] FLAC >\r\n\t\t\tif filename.find(\"-\")+1==filename.find(year): \r\n\t\t\t\tcontinue # skip if the directory is already named something like -- . Should be done manually\r\n\t\t\tif filename.find(\"-\")+2==filename.find(year):\r\n\t\t\t\tcontinue # skip if the directory is already named something like - - . Already what I want. \r\n\t\t\tnew_filename=rreplace(filename,year,\"\",1) #delete the last occurence of year in the foldername\r\n\t\t\tnew_filename=new_filename.replace(\" - \",\" - \"+year+\" - \",1)\r\n\t\t\tos.rename(filename, new_filename) #comment this if only testing\r\n\t\t\tprint(filename+\" -----> \"+new_filename+\" DONE\")\r\n\r\ndef rreplace(s, old, new, count):\r\n\treturn (s[::-1].replace(old[::-1], new[::-1], count))[::-1] # reverses all the strings in question, performs an ordinary replacement using str.replace on the reversed strings, then reverses the result back the right way round\r\n\r\nif __name__ == '__main__': \r\n # Calling main() function \r\n main() \r\n","sub_path":"fix_years.py","file_name":"fix_years.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"3839356","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n##\n# Copyright 2018 FIWARE Foundation, e.V.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n##\n\n\n__author__ = \"Jose Ignacio Carretero Guarde\"\n\n\nimport json\nimport sys\nfrom functions import *\nfrom OpenstackQueries import OpenstackQueries\nfrom collections import Counter\n\n\nif __name__ == \"__main__\":\n q = OpenstackQueries('fiware-users.ini')\n token = q.token\n sys.stderr.write(q.token + \"\\n\")\n\n d={}\n\n endpoint_groups = q.get_all_endpoint_groups(token=token)\n d['endpoint_groups'] = endpoint_groups\n\n project_list = q.get_all_projects(token=token)\n d['projects']=project_list\n\n user_list = q.get_all_users(token=token)\n d['users']=user_list\n\n role_list = q.get_role_list(token=token)\n d['roles']=role_list\n\n role_assignment_list = q.get_role_assignment_list(token=token)\n d['role_assignments']=role_assignment_list\n\n q.get_all_endpoint_groups_projects(token, endpoint_groups)\n\n servers = q.get_all_servers(token)\n d['servers'] = servers\n\n routers = q.get_all_routers(token)\n d['routers'] = routers\n\n networks = q.get_all_networks(token)\n d['networks'] = networks\n\n images = q.get_all_images(token)\n d['images'] = images\n\n ports = q.get_all_ports(token)\n d['ports'] = ports\n\n cinder_volumes = q.get_all_volumes(token=token)\n d['volumes'] = cinder_volumes\n\n ## Print how many users there are for every role\n role_assingments_count = Counter([k['role_id'] for k in d['role_assignments'] if k.get('role_id')])\n\n d['sum_up'] = {'users': len(d['users']), 'projects': len(d['projects']), \n 'role_assignments_count': role_assingments_count}\n\n sys.stdout.write(json.dumps(d))\n sys.stdout.flush()\n","sub_path":"SkuldForAll/interesting_info.py","file_name":"interesting_info.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"293222647","text":"__author__ = 'http://blog.csdn.net/ghostfromheaven/article/details/8653421'\n# -*- coding:utf-8 -*-\n\nfrom socketserver import ThreadingTCPServer, StreamRequestHandler\nimport datetime\nimport os\nimport sys\nsys.path.append('C:\\\\Python34\\\\lib\\\\site-packages\\\\vlcd')\nos.chdir('C:\\\\Python34\\\\lib\\\\site-packages\\\\vlcd')\nimport vlc\n\n\nclass Mystreamre(StreamRequestHandler):\n \"\"\"ThreadingTCPServer+StreamRequestHandler=多线程socket\n 使用ThreadingTCPServer和StreamRequestHandler编写socket服务的样例。\n ThreadingTCPServer从ThreadingMixIn和TCPServer继承,实现多线程。\n \"\"\"\n def handle(self):\n #fn = self.save_into_file()\n #self.recive_into_time_named_file(fn)\n fnsend = self.file_for_use()\n self.sendblock(fnsend)\n\n def save_into_file(self):\n starttime = datetime.datetime.now()\n filenametemp = starttime.strftime(\"%Y-%M-%D %H-%M-%S\")\n #filenametemp = str(starttime).replace(':', '')\n filename = 'e:\\\\' + filenametemp + '.mkv'\n return filename\n\n def file_for_use(self):\n filename = 'e:\\\\yese.wma'\n return filename\n\n\n def recive_into_time_named_file(self, filename):\n starttime = datetime.datetime.now()\n print('0,newfilename', filename)\n while 1:\n middletime = datetime.datetime.now()\n print('1,while_1 time', middletime)\n data = self.rfile.readlines() # recive file by entire\n print('5,how large is data', sys.getsizeof(data) )\n if data: # save file by time named\n ifdatatime = datetime.datetime.now()\n print('##if_data time:', ifdatatime, type(data), self.client_address)\n with open(filename, 'wb') as fp: # here is what I did\n fp.writelines(data)\n #p = vlc.MediaPlayer(filename)\n #p.play()\n\n\n recvtime = datetime.datetime.now()\n print('2,recv&write-time', recvtime)\n\n\n else:\n closetime = datetime.datetime.now()\n print('3,AAAAAAAAAAAAAAA', (closetime - starttime))\n t = self.getfilesize(filename)\n print('4,filesize is:', t)\n\n #traceback.print_exc()\n break\n\n def getfilesize(self, filename):\n \"\"\"\n http://www.oschina.net/code/piece_full?code=4465\n 多进程分块读取文件\n \"\"\"\n with open(filename, 'r') as fs:\n fs.seek(0, os.SEEK_END)\n filesize = fs.tell()\n return filesize\n\n def sendblock(self, file):\n \"\"\" http://blog.csdn.net/baby313/article/details/7363528\n f.tell()返回一个整数,表示当前文件指针的位置(就是到文件头的比特数).\nseek(offset, whence=SEEK_SET)\nChange the stream position to the given byte offset.\noffset is interpreted relative to the position indicated by whence.\nValues for whence are:\nSEEK_SET or 0 – start of the stream (the default); offset should be zero or positive\nSEEK_CUR or 1 – current stream position; offset may be negative\nSEEK_END or 2 – end of the stream; offset is usually negative\nReturn the new absolute position.\n\nNew in version 3.1: The SEEK_* constants.\n\nNew in version 3.3: Some operating systems could support additional values,\nlike os.SEEK_HOLE or os.SEEK_DATA.\nThe valid values for a file could depend on it being open in text or binary mode.\n\n send data block by block\n BY ME!!\n \"\"\"\n size_of_file = self.getfilesize(file)\n print('size_of_file', size_of_file, self.server.address_family)\n buffer = 30\n count = 0\n with open(file, 'rb') as fp:\n while size_of_file > 0:\n fp.seek(count, os.SEEK_SET)\n fd = fp.read(buffer)\n self.wfile.write(fd)\n count += buffer\n size_of_file -= buffer\n\n \"\"\"with open(file, 'rb') as fp:\n while 1:\n if count > size_of_file:\n break\n else:\n block = fp.read(buffer)\n # do something\n self.wfile.write(block)\n print(self.server.socket)\n count += buffer\n \"\"\"\n\n\n\n\n\nclass Myserver(ThreadingTCPServer):\n \"\"\"\n http://stackoverflow.com/questions/5370778/how-to-count-connected-clients-in-tcpserver\n 重写ThreadingTCPServer,目的是实现对thread的计数\n \"\"\"\n def __init__(self, *args, **kwargs):\n self._num_client = 0\n ThreadingTCPServer.__init__(self, *args, **kwargs)\n\n def process_request(self, request, client_address):\n self._num_client += 1\n print('start thread', self._num_client, self.fileno())\n ThreadingTCPServer.process_request(self, request, client_address)\n\n def process_request_thread(self, request, client_address):\n ThreadingTCPServer.process_request_thread(self, request, client_address)\n self._num_client -= 1\n print('kill thread', self._num_client, self.fileno())\n\n def get_client_number(self):\n print('self._num_client', self._num_client)\n return self._num_client\n\nif __name__ == \"__main__\":\n\n '''ThreadingTCPServer从ThreadingMixIn和TCPServer继承\n 效果等同于继承两个类:\n class ThreadingTCPServer(ThreadingMixIn, TCPServer): pass\n '''\n host = '10.0.1.1'\n port = 12350\n server = Myserver((host, port), Mystreamre)\n print('thread name&number is :', server.fileno())\n server.serve_forever()\n \"\"\"Activate the server; this will keep running until you\n interrupt the program with Ctrl-C\n \"\"\"\n","sub_path":"TESTONLY-socketserver/My_File_reciver.py","file_name":"My_File_reciver.py","file_ext":"py","file_size_in_byte":5654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"602031299","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\nimport re\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.svm import SVC\nfrom pandas.tools.plotting import scatter_matrix\nfrom sklearn import linear_model\n\nfrom utils import *\n\ndef str_to_float(s):\n s = re.sub('[^0-9]+', '', s)\n if s:\n return float(s) \n else:\n return None\n\n\ndef get_nearest_dist_to_metro(s):\n\n s = str (s)\n if s == \"nan\":\n return -1\n\n #trim -> lower -> repl\n s = s.strip().lower().replace(\",\", \".\")\n lst_metro = s.split(\";\")\n lst_dist = []\n\n print (lst_metro)\n pattern_m = re.compile(r'\\d+\\.*\\d*\\s+м')\n pattern_km = re.compile(r'\\d+\\.*\\d*\\s+км')\n pattern = re.compile(r'\\d+\\.*\\d*')\n\n for item in lst_metro:\n result_m = pattern_m.findall(item)\n result_km = pattern_km.findall(item)\n\n if (len(result_m) + len(result_km) > 1):\n raise ValueError('Parse error')\n\n if len (result_m):\n lst_dist.append(float (pattern.findall(result_m[0])[0]))\n\n if len (result_km):\n lst_dist.append((float (pattern.findall(result_km[0])[0]))*1000)\n\n print (max(lst_dist))\n return max(lst_dist)\n\ndef process_type(s):\n tr = {'None': 'none',\n 'Другое':'other',\n 'Магазин':'shop',\n 'Отель':'hotel',\n 'Офис':'office',\n 'Производство':'manufacture',\n 'Склад':'store'}\n return tr[s]\n\ndef get_txt_represent_dist_to_metro(d):\n if (d < 0):\n return 'none'\n elif (d > 0 and d < 1000):\n return 'foot_access'\n else:\n return 'car_access'\n\ndef main():\n\n # load train data\n #_ID_,_TYPE_,_DATE_,_ADDRESS_,_PRICE_,_AREA_, _METRO,_LAT_,_LON_,_DESC_\n df_train = pd.read_csv(r'..\\dataset\\\\champ1_train.csv')\n df_train_correct_lat_lon = pd.read_csv(r'correct_lat_lon_train.csv')\n\n #_ID_,_TOWN_,_ADM_AREA_NAME_,_KIND_,_ADR_LINE_,_LAT_,_LON_\n df_train_addr_town = pd.read_csv(r'..\\ver3\\addr_town_train.csv')\n\n # remove columns\n remove = ['_DESC_', '_ADDRESS_']\n df_train.drop(remove, axis=1, inplace=True)\n\n # convert to float \n df_train['_PRICE_'] = df_train.apply(lambda f : str_to_float(f['_PRICE_']), axis = 1) \n df_train['_AREA_'] = df_train.apply(lambda f : str_to_float(f['_AREA_']), axis = 1) \n df_train['_LAT_'] = df_train_correct_lat_lon['_LAT_']\n df_train['_LON_'] = df_train_correct_lat_lon['_LON_']\n df_train['_METRO_DIST_'] = df_train.apply(lambda f : get_nearest_dist_to_metro(f[' _METRO']), axis = 1) \n df_train['_TYPE_2_'] = df_train.apply(lambda f : process_type(f['_TYPE_']), axis = 1) \n\n df_train['_TXT_REPR_METRO_DIST_'] = df_train.apply(lambda f : get_txt_represent_dist_to_metro(f['_METRO_DIST_']), axis = 1) \n\n df_train['_TOWN_'] = df_train_addr_town['_TOWN_']\n df_train['_ADM_AREA_NAME_'] = df_train_addr_town['_ADM_AREA_NAME_']\n df_train['_KIND_'] = df_train_addr_town['_KIND_']\n\n # fill na\n df_train = df_train.fillna(df_train.median(axis=0), axis=0)\n\n # concat all\n data_numerical = df_train[['_AREA_', '_LAT_', '_LON_', '_METRO_DIST_', '_PRICE_' ]]\n data_categorial_type = pd.get_dummies(df_train['_TYPE_2_'], prefix = '_TYPE')\n data_categorial_metro = pd.get_dummies(df_train['_TXT_REPR_METRO_DIST_'], prefix = '_METRO')\n data_addr_txt = df_train[['_TOWN_', '_ADM_AREA_NAME_', '_KIND_']]\n\n\n data = pd.concat((data_categorial_type, data_categorial_metro ,data_addr_txt, data_numerical), axis=1)\n #data = pd.DataFrame(data, dtype=float)\n data = pd.DataFrame(data)\n\n print (data.shape)\n print (data.columns)\n\n data.to_csv(\"..\\ver3\\train.csv\", encoding='utf-8', index = False)\n\nif __name__ == '__main__':\n main()","sub_path":"src/train_data_prepare.py","file_name":"train_data_prepare.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"297638996","text":"from pram.rule import DiscreteInvMarkovChain, TimeAlways, GoToRule, Rule\r\nfrom pram.entity import Site, GroupQry, GroupSplitSpec\r\nimport random\r\nimport math\r\n\r\nclass SimpleFluProgress(DiscreteInvMarkovChain):\r\n\tdef __init__(self, var, tm, home, name='markov-chain', t=TimeAlways(), memo=None):\r\n\t\tsuper().__init__(var, tm)\r\n\t\tself.home = home\r\n\t\tself.t = t\r\n\r\n\tdef is_applicable(self, group, iter, t):\r\n\t\tif group.gr(Site.AT) == self.home and group.ga(self.var) == \"s\":\r\n\t\t\treturn False\r\n\t\treturn super().is_applicable(group, iter, t)\r\n\r\nclass SimpleGoTo(GoToRule):\r\n\tdef is_applicable(self, group, iter, t):\r\n\t\tif group.ga(\"playable\") == \"yes\":\r\n\t\t\treturn False\r\n\t\treturn super().is_applicable(group, iter, t)\r\n\r\nclass MallMovement(Rule):\r\n\tdef __init__(self, p, sites):\r\n\t\tsuper().__init__(name=\"SimpleMallMovement\", t=TimeAlways())\r\n\t\tself.sites = sites\r\n\t\tself.p = p\r\n\r\n\tdef apply(self, pop, group, iter, t):\r\n\t\tmove_chunk = round(self.p*group.m)\r\n\t\tif move_chunk == 0:\r\n\t\t\treturn [GroupSplitSpec(p=1)]\r\n\r\n\t\tmove_p = move_chunk/group.m\r\n\t\treturn [\r\n\t\t\tGroupSplitSpec(p=move_p, rel_set={ Site.AT: self.sites[random.randint(0, len(self.sites)-1)] }),\r\n\t\t\tGroupSplitSpec(p=1 - move_p)\r\n\t\t]\r\n\r\n\tdef is_applicable(self, group, iter, t):\r\n\t\tif group.ga(\"playable\") == \"yes\":\r\n\t\t\treturn False\r\n\t\tif group.ga(\"flu-status\") == \"s\" and not group.get_mass_at(GroupQry(attr={'flu-status': 'i'})) == 0:\r\n\t\t\treturn False\r\n\t\treturn super().is_applicable(group, iter, t)\r\n\r\nclass MallFlu(Rule):\r\n\tdef __init__(self, p, move_p, sites):\r\n\t\tsuper().__init__(name=\"SimpleMallFlu\", t=TimeAlways())\r\n\t\tself.p = p\r\n\t\tself.move_p = move_p\r\n\t\tself.sites = sites\r\n\r\n\tdef apply(self, pop, group, iter, t):\r\n\t\tif group.m == 0:\r\n\t\t\treturn [GroupSplitSpec(p=1)]\r\n\r\n\t\tflu_mass = round(self.p*group.get_mass_at(GroupQry(attr={'flu-status': 'i'}))*group.m)\r\n\t\tmove_mass = round(self.move_p*group.m)\r\n\r\n\t\tif group.ga(\"flu-status\") ==\"i\":\r\n\t\t\tflu_mass = 0\r\n\r\n\t\tboth_mass = round(((flu_mass/group.m)*(move_mass/group.m))*group.m)\r\n\t\tflu_mass = flu_mass - both_mass\r\n\t\tmove_mass = move_mass - both_mass\r\n\r\n\t\tflu_p = flu_mass/group.m\r\n\t\tmove_p = move_mass/group.m\r\n\t\tboth_p = both_mass/group.m\r\n\r\n\t\tif flu_p + move_p + both_p > 1:\r\n\t\t\tflu_p = 0\r\n\t\t\tmove_p = 0\r\n\t\t\tboth_p = 1\r\n\r\n\t\tmove_ind = random.randint(0, len(self.sites)-1)\r\n\r\n\t\treturn [\r\n\t\t\tGroupSplitSpec(p=move_p, rel_set={ Site.AT: self.sites[move_ind] }),\r\n\t\t\tGroupSplitSpec(p=flu_p, attr_set={ \"flu-status\": \"i\"}),\r\n\t\t\tGroupSplitSpec(p=both_p, attr_set={ \"flu-status\": \"i\"}, rel_set={Site.AT: self.sites[move_ind]}),\r\n\t\t\tGroupSplitSpec(p=1 - move_p - flu_p - both_p)\r\n\t\t]\r\n\r\n\tdef is_applicable(self, group, iter, t):\r\n\t\tif group.ga(\"playable\") == \"yes\":\r\n\t\t\treturn False\r\n\t\treturn super().is_applicable(group, iter, t)\r\n\r\nclass PlayableMallFlu(Rule):\r\n\tdef __init__(self, p):\r\n\t\tsuper().__init__(name=\"PlayableMallFlu\", t=TimeAlways())\r\n\t\tself.p = p\r\n\r\n\tdef apply(self, pop, group, iter, t):\r\n\t\tif group.m == 0:\r\n\t\t\treturn [GroupSplitSpec(p=1)]\r\n\r\n\t\tflu_p = 0\r\n\r\n\t\ttry:\r\n\t\t\t#print(\"Infected: \" + str(group.get_mass_at(GroupQry(attr={'flu-status': 'i'}))))\r\n\t\t\t#print(\"Total: \" + str(group.get_mass_at(GroupQry(attr={'playable': 'no'}))))\r\n\r\n\t\t\tflu_p = self.p*(group.get_mass_at(GroupQry(attr={'flu-status': 'i'}))/group.get_mass_at(GroupQry()))\r\n\t\texcept AttributeError:\r\n\t\t\tflu_p = 0\r\n\r\n\t\tif flu_p > 1:\r\n\t\t\tflu_p = 1\r\n\r\n\t\tflu_mass = 0\r\n\t\tfor i in range(int(group.m)):\r\n\t\t\tcheck = random.uniform(0, 1)\r\n\t\t\tif check < flu_p:\r\n\t\t\t\tflu_mass = flu_mass + 1\r\n\r\n\t\tif group.ga(\"flu-status\") ==\"i\":\r\n\t\t\tflu_mass = 0\r\n\r\n\t\tflu_p = flu_mass/group.m\r\n\r\n\t\tif flu_p > 1:\r\n\t\t\tflu_p = 1\r\n\r\n\t\treturn [\r\n\t\t\tGroupSplitSpec(p=flu_p, attr_set={ \"flu-status\": \"i\"}),\r\n\t\t\tGroupSplitSpec(p=1 - flu_p)\r\n\t\t]\r\n\r\n\tdef is_applicable(self, group, iter, t):\r\n\t\tif not group.ga(\"playable\") == \"yes\":\r\n\t\t\treturn False\r\n\t\treturn super().is_applicable(group, iter, t)","sub_path":"PramFlask/pramity_rules.py","file_name":"pramity_rules.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"130006332","text":"n, m = input().split()\nalice_set = set()\nbob_set = set()\nfor alice_cub in range(n):\n alice_set.add(int(input()))\nfor bob_cub in range(m):\n bob_set.add(int(input()))\nboth_set = alice_set.intersection(bob_set)\nalice_set, bob_set = \\\n alice_set.difference(bob_set), bob_set.difference(alice_set)\nprint(len(both_set))\nif len(both_set) != 0:\n [print(str(cub)) for cub in sorted(both_set)]\nelse:\n print('')\nprint(len(alice_set))\nif len(alice_set) != 0:\n [print(str(cub)) for cub in sorted(alice_set)]\nelse:\n print('')\nprint(len(bob_set))\nif len(bob_set) != 0:\n [print(str(cub)) for cub in sorted(bob_set)]\nelse:\n print('')\n","sub_path":"students/olszewski_bartosz/lesson_06_dictionaries/cubes.py","file_name":"cubes.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"604988272","text":"\n# coding: utf-8\n\n# In[2]:\n\n\n# %load_ext autoreload\n# # from ggplot import *\n# %autoreload 3\nfrom util import *\n\n\n# In[378]:\n\n\n# item = glob.glob('data/pickle/ivt_item/ivt_item_1.pkl')\n\ndef process_batch(file):\n \n batch = pd.read_pickle(file)\n \n result_lst = []\n for idx, group_by_item_id in batch.groupby('ITEM_ID'):\n tmp = list(group_by_item_id.groupby('STOCK_ID'))[0][1] \n result_lst.append(get_feature_engineered_bundle(tmp))\n\n\n results = [result for result in result_lst if result != None]\n result_df = pd.DataFrame(results)\n \n # save feature engineered df\n result_df.to_pickle('data/pickle/ivt_item_feature_engineered/%s' % str(file.split('/')[-1]))\n \n # filter dataframe\n filtered_df = get_filtered_fg_df(result_df)\n\n \n cleaned_item_ids = filtered_df.item_id.values\n cleaned_df = batch[batch['ITEM_ID'].isin(cleaned_item_ids)]\n df_lst =[]\n \n # save images\n save_img(cleaned_df)\n\n\n for idx, group in list(cleaned_df.groupby('ITEM_ID')):\n try:\n df_lst.append(get_sell_amount_by_item_id(group))\n except:\n continue\n \n if len(df_lst) > 0:\n \n result = pd.concat(df_lst)\n result.to_sql(con=engine, name='MWS_COLT_ITEM_SELL_AMT_DEV', if_exists='append', flavor='mysql')\n logging.warning('done with %s' % str(file))\n\n\n# In[379]:\n\n\ndef map_clean_up_target_df(stock_id, group_df):\n\n tmp_df = clean_up_target_df(group_df)[['sell_impute', 'STOCK_AMOUNT', 'STOCK_AMOUNT_imputed']]\n tmp_df['STOCK_ID'] = stock_id\n tmp_df.columns = ['SELL_AMOUNT', 'STOCK_AMOUNT', 'STOCK_AMOUNT_imputed', 'STOCK_ID']\n\n return tmp_df\n\n\n# In[380]:\n\n\ndef get_sell_amount_by_item_id(df, add_sell_amount=False):\n collect_day = df.COLLECT_DAY.values[0]\n reg_id = df.REG_ID.values[0]\n \n tmp_lst = []\n for stock_id, group_df in list(group.groupby('STOCK_ID')):\n tmp_lst.append(map_clean_up_target_df(stock_id, group_df)) \n result = pd.concat(tmp_lst)\n \n \n# df_pivot = df.pivot_table(index='REG_DT', columns='STOCK_ID', values='STOCK_AMOUNT')\n# sell_amount_by_stock = df_pivot.apply(map_clean_up_target_df)\n\n# if add_sell_amount:\n# sell_amount_total = sell_amount_by_stock.sum(axis=1)\n# result = pd.DataFrame(sell_amount_total)\n# result.columns = ['SELL_AMOUNT']\n# result['REG_ID'] = reg_id\n# else:\n# sell_amount_by_stock['REG_DT'] = sell_amount_by_stock.index\n# result = pd.melt(sell_amount_by_stock, id_vars=[\"REG_DT\"], var_name=\"STOCK_ID\", value_name=\"SELL_AMOUNT\")\n\n item_id = df.ITEM_ID.values[0]\n result['ITEM_ID'] = item_id\n result['REG_ID'] = reg_id\n result['UPT_DT'] = pd.to_datetime('now')\n result['COLLECT_DAY'] = collect_day\n result['UPT_ID'] = 'FILTER ALGO'\n\n return result\n\n\n# In[383]:\n\n\nif __name__ == '__main__':\n files = glob.glob('data/pickle/ivt_item/*.pkl')[:2]\n engine = get_engine()\n add_engine_pidguard(engine) \n Parallel(n_jobs=-1)(map(delayed(process_batch), files))\n\n","sub_path":".ipynb_checkpoints/computing_pipeline-checkpoint.py","file_name":"computing_pipeline-checkpoint.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"442616667","text":"from rest_framework.response import Response\nfrom rest_framework.decorators import api_view\n\nfrom .models import InspirationalMessage\nfrom .serializers import InspirationalMessageSerializer\n\nfrom random import randint\n\n\n@api_view([\"POST\"])\ndef get_inspirational_message(request, format=None):\n \"\"\"Get inspirational message.\n \"\"\"\n data = {}\n status_code = 200\n if request.method == \"POST\":\n query = InspirationalMessage.objects.all().order_by(\"-created_date\")\n if query.exists():\n if request.data.get(\"random\", False):\n total = query.count()\n index = randint(0, total - 1)\n insp = query[index]\n serializer = InspirationalMessageSerializer(insp)\n data[\"message\"] = serializer.data\n else:\n insp = query.first()\n serializer = InspirationalMessageSerializer(insp)\n data[\"message\"] = serializer.data\n else:\n data[\"message\"] = {}\n return Response(data, status=status_code)\n","sub_path":"fun/viewsapi.py","file_name":"viewsapi.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"378537051","text":"import os\nimport sys\nimport urllib.request\nimport json, pprint\n\nclient_id = \"6NkOhsRQkJxjefegmXSF\" # 개발자센터에서 발급받은 Client ID 값\nclient_secret = \"DF6mqwUxOJ\" # 개���자센터에서 발급받은 Client Secret 값\n\nurl = \"https://openapi.naver.com/v1/papago/n2mt\"\n\n\n#번역할 언어와 내용\nencText = urllib.parse.quote(\"반갑습니다\")\nsrcLang = 'ko'\ntarLang = 'en'\ndata = \"source={}&target={}&text=\".format(srcLang,tarLang) + encText\n\n#웹 요청\nrequest = urllib.request.Request(url)\nrequest.add_header(\"X-Naver-Client-Id\",client_id)\nrequest.add_header(\"X-Naver-Client-Secret\",client_secret)\n\n#결과를 받아오는 부분\nresponse = urllib.request.urlopen(request, data=data.encode(\"utf-8\"))\n\n#응답이 성공적일 때\nrescode = response.getcode()\nif(rescode==200): #응답 성공\n response_body = response.read()\n print(response_body.decode('utf-8'))\n data = json.loads(data)\n pprint.pprint(data)\n trans_text = data['message']['result']['translatedText']\n\nelse: #응답 실패\n print(\"Error Code:\" + rescode)\n\nprint(\"번역된 결과는 :\",trans_text)\n\n#json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\n#해결법 찾는중.\n","sub_path":"Mashup stud.py","file_name":"Mashup stud.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"136311209","text":"from keras.preprocessing.image import load_img,img_to_array,array_to_img\nfrom keras import backend as K\nimport numpy as np\nimport scipy\n\ndef deprocess_image(x):\n # Util function to convert a tensor into a valid image.\n if K.image_data_format() == 'channels_first':\n print('channels first')\n x = x.reshape((3, x.shape[2], x.shape[3]))\n x = x.transpose((1, 2, 0))\n else:\n print('channels last')\n x = x[:, :, :, ::-1]\n x = x.reshape((x.shape[1], x.shape[2], 3))\n x[:, :, 0] += 123.68\n x[:, :, 1] += 116.779\n x[:, :, 2] += 103.939\n # x=array_to_img(x)\n # x = np.clip(x, 0, 255).astype('uint8')\n\n return x\n\ndef save_img(img, fname):\n pil_img = deprocess_image(img)\n scipy.misc.imsave(fname, pil_img)\n\nimg_path='images/3.jpeg'\norigin=load_img(img_path,target_size=(224,224))\na=img_to_array(origin)\nb=array_to_img(a)\nscipy.misc.imsave('b.jpeg',b)\na[:, :, 0] -= 123.68\na[:, :, 1] -= 116.779\na[:, :, 2] -= 103.939\na=a[:,:,::-1]\nc=np.expand_dims(a, axis=0)\nd=deprocess_image(c)\n# e=b-d\n# print('e')\n# print(e)\nscipy.misc.imsave('d.jpeg',d)\nsave_img(c, fname='c.jpeg')\n\n\n","sub_path":"code/test/test_RGB.py","file_name":"test_RGB.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"160954620","text":"from urllib.parse import urlparse\nfrom django.shortcuts import render, redirect\nfrom django.http import Http404, HttpResponse\nfrom django.db.models import F\nfrom django.contrib import messages\nfrom .models import Link, LinkForm\n\n\ndef get_client_ip(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip\n\n\ndef catchall(request, id):\n try:\n link = Link.objects.get(id=id)\n parsed = urlparse(link.url)\n Link.objects.filter(id=id).update(clicks=F('clicks')+1)\n if parsed.scheme:\n return redirect(link.url)\n return redirect(\"http://\" + link.url)\n except Exception as e:\n return HttpResponse(e)\n parsed = urlparse(id)\n if parsed.netloc:\n link = Link(url=id, ip=get_client_ip(request))\n link.save()\n request.session['short_url'] = \"http://\" + str(request.get_host()) + \"/\" + str(link.id)\n return redirect('/')\n raise Http404(\"Link does not exist\")\n\n\ndef home(request):\n context = {'form': LinkForm}\n if 'short_url' in request.session and request.session['short_url']:\n context['short_url'] = request.session['short_url']\n request.session['short_url'] = None\n if 'url' in request.POST:\n link = Link(url=request.POST['url'], ip=get_client_ip(request))\n link.save()\n request.session['short_url'] = \"http://\" + request.get_host() + \"/\" + link.id\n return redirect('/')\n return render(request, 'index.html', context)\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"477188873","text":"from math import *\r\n\r\nversaoTRT = __file__[__file__.find('trtv')+4:-3]\r\n\r\nclass TRT():\r\n \"\"\"\r\n Classe para calcular a envoltória de uma TRT para disjuntor\r\n Inicia com tensão nominal Ur, corrente de Interrupção simétrica Isc e frequência do sistema.\r\n A variável master é para atrelar essa classe a uma GUI\r\n A variável pontos é a porta de saída dos resultados para cada tipo de envoltória\r\n \"\"\"\r\n def __init__(self, master, Ur, Isc, f):\r\n self.Ur = Ur \r\n self.Isc = Isc\r\n self.w = 2*pi*f\r\n self.master = master\r\n\r\n def Terminal(self, params, kpp, sistema, ensaio):\r\n \"\"\"\r\n Função para calcular os parâmetros da envoltória de uma abertura de falta terminal\r\n Argumentos de entrada enviadas pela GUI são: params (quantos parâmetros terá a envoltória), kpp (fator de primeiro polo)\r\n sistema (linha ou cabo) e ensaio (T100, T60, etc)\r\n Com os dados de entrada, busca nas tabelas da Norma os valores de kaf e RRRV normalizados.\r\n \"\"\"\r\n erros = 0\r\n if sistema == 'Cabo':\r\n tabela1 = 'classe S1'\r\n tabela2 = 'kaf - S1'\r\n else:\r\n tabela1 = 'classe S2'\r\n tabela2 = 'kaf - S2'\r\n\r\n tabelaRRRV = {'Ur':[], 'T100':[], 'T60':[], 'T30':[], 'T10':[]}\r\n tabelaKaf = {'Ur':[], 'T100':[], 'T60':[], 'T30':[], 'T10':[]}\r\n\r\n # Chama uma função da GUI para ler os dados necessários da norma\r\n\r\n for linha in self.master.LeTabelaNorma(tabela=tabela2):\r\n tabelaKaf['Ur'].append(linha[0])\r\n tabelaKaf['T100'].append(linha[1])\r\n tabelaKaf['T60'].append(linha[2])\r\n tabelaKaf['T30'].append(linha[3])\r\n tabelaKaf['T10'].append(linha[4]) \r\n\r\n for linha in self.master.LeTabelaNorma(tabela=tabela1):\r\n tabelaRRRV['Ur'].append(linha[0])\r\n tabelaRRRV['T100'].append(linha[1])\r\n tabelaRRRV['T60'].append(linha[2])\r\n tabelaRRRV['T30'].append(linha[3])\r\n tabelaRRRV['T10'].append(linha[4])\r\n\r\n # Depois de obter os dados crus, filtra para pegar apenas o do estudo em questão\r\n\r\n\r\n if self.Ur >= 100: \r\n kaf = float(tabelaKaf[ensaio][tabelaKaf['Ur'].index('>100')])\r\n rrrv = float(tabelaRRRV[ensaio][tabelaRRRV['Ur'].index('>100')])\r\n else:\r\n kaf = float(tabelaKaf[ensaio][tabelaKaf['Ur'].index('<100')])\r\n try: ind = tabelaRRRV['Ur'].index(str(self.Ur))\r\n except:\r\n mb.showerror('Erro','Tensão Nominal não é normativa. Cálculo não efetuado.')\r\n erros = 1\r\n rrrv = float(tabelaRRRV[ensaio][ind])\r\n\r\n\r\n # Cálculo dos pontos da envoltória. É feita a distinção entre dois ou quatro parâmetros.\r\n # Os resultados são externados através da variável \"pontos\"\r\n\r\n uc = kpp*kaf*self.Ur*sqrt(2/3)\r\n\r\n if not erros:\r\n if params == 2:\r\n t2 = uc/rrrv\r\n self.master.pontos['terminal']['uc'] = uc\r\n self.master.pontos['terminal']['t2'] = t2\r\n\r\n if params == 4:\r\n u1 = 0.75*kpp*self.Ur*sqrt(2/3)\r\n t1 = u1/rrrv\r\n if ensaio == 'T60': t2 = t1*6\r\n else: t2 = t1*4\r\n self.master.pontos['terminal']['u1'] = u1\r\n self.master.pontos['terminal']['t1'] = t1\r\n self.master.pontos['terminal']['uc'] = uc\r\n self.master.pontos['terminal']['t2'] = t2\r\n\r\n\r\n def Capacitivo(self, falta, sistema, aterramento):\r\n \"\"\"\r\n Função para calcular os parâmetros da envoltória de uma abertura de corrente capacitiva.\r\n \"\"\"\r\n erros = 0\r\n if 'Banco' in sistema:\r\n if aterramento: kc = 1\r\n else: kc = 1.4\r\n classe = ''\r\n else:\r\n if falta:\r\n if aterramento:\r\n kc = 1.4\r\n else: kc = 1.7\r\n elif not aterramento: kc = 1.4\r\n elif ' Blindado' in sistema: kc = 1\r\n elif 'não' in sistema: kc = 1.2\r\n else:\r\n if self.Ur >= 52: kc = 1.2\r\n else: kc = 1.4\r\n if 'Cabo' in sistema: classe = 's1'\r\n else: classe = 's2'\r\n\r\n\r\n tabela1 = 'classe ' + classe\r\n tabela2 = 'kaf - ' + classe\r\n\r\n\r\n tabelaRRRV = {'Ur':[], 'T100':[]}\r\n tabelaKaf = {'Ur':[], 'T100':[]}\r\n\r\n # Chama uma função da GUI para ler os dados necessários da norma\r\n\r\n for linha in self.master.LeTabelaNorma(tabela=tabela2, cap=1):\r\n tabelaKaf['Ur'].append(linha[0])\r\n tabelaKaf['T100'].append(linha[1])\r\n\r\n for linha in self.master.LeTabelaNorma(tabela=tabela1, cap = 1):\r\n tabelaRRRV['Ur'].append(linha[0])\r\n tabelaRRRV['T100'].append(linha[1])\r\n\r\n\r\n # Depois de obter os dados crus, filtra para pegar apenas o do estudo em questão\r\n\r\n if classe != '':\r\n if self.Ur >= 100: \r\n kaf = float(tabelaKaf['T100'][tabelaKaf['Ur'].index('>100')])\r\n rrrv = float(tabelaRRRV['T100'][tabelaRRRV['Ur'].index('>100')])\r\n else:\r\n kaf = float(tabelaKaf['T100'][tabelaKaf['Ur'].index('<100')])\r\n try: ind = tabelaRRRV['Ur'].index(str(self.Ur))\r\n except:\r\n mb.showerror('Erro','Tensão Nominal não é normativa. Cálculo não efetuado.')\r\n erros = 1\r\n rrrv = float(tabelaRRRV['T100'][ind])\r\n\r\n\r\n # Cálculo dos pontos da envoltória. É feita a distinção entre dois ou quatro parâmetros.\r\n # Os resultados são externados através da variável \"pontos\"\r\n\r\n kpp = 1.95\r\n\r\n uc = kpp*kc*self.Ur*sqrt(2/3)\r\n\r\n if self.w > 370:\r\n t2 = 7300\r\n else: t2 = 8700\r\n\r\n if not erros:\r\n if classe == '':\r\n self.master.pontos['capacitivo']['uc'] = uc\r\n self.master.pontos['capacitivo']['t2'] = t2\r\n else:\r\n if self.w > 370:\r\n u1 = 0.05*kaf*self.Ur*sqrt(2/3)\r\n else: u1 = 0.02*kaf*self.Ur*sqrt(2/3)\r\n t1 = u1/rrrv\r\n self.master.pontos['capacitivo']['u1'] = u1\r\n self.master.pontos['capacitivo']['t1'] = t1\r\n self.master.pontos['capacitivo']['uc'] = uc\r\n self.master.pontos['capacitivo']['t2'] = t2\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n def Oposicao(self, params, sistema):\r\n \"\"\"\r\n Função para calcular os parâmetros da envoltória de uma abertura em oposição de fases\r\n Argumentos de entrada enviadas pela GUI são: params (quantos parâmetros terá a envoltória), kpp (fator de primeiro polo)\r\n sistema (linha efetivamente aterrado ou não ou cabo)\r\n Com os dados de entrada, busca nas tabelas da Norma os valores de kpp e kaf e RRRV normalizados.\r\n \"\"\"\r\n erros = 0\r\n\r\n if sistema == 'Cabo': tabela = 'classe S1'\r\n elif 'Sistema Efe' in sistema: tabela = 'classe s2'\r\n else: tabela = 'Não'\r\n\r\n\r\n tabelaCtes = {'Ur':[], 'kpp':[], 'kaf':[], 'rrrv':[]}\r\n\r\n # Chama uma função da GUI para ler os dados necessários da norma\r\n\r\n for linha in self.master.LeTabelaNorma(tabela=tabela):\r\n tabelaCtes['Ur'].append(linha[0])\r\n tabelaCtes['kpp'].append(linha[1])\r\n tabelaCtes['kaf'].append(linha[2])\r\n tabelaCtes['rrrv'].append(linha[3])\r\n\r\n # Depois de obter os dados crus, filtra para pegar apenas o do estudo em questão\r\n\r\n if self.Ur >= 100:\r\n kpp = float(tabelaCtes['kpp'][tabelaCtes['Ur'].index('>100')]) \r\n kaf = float(tabelaCtes['kaf'][tabelaCtes['Ur'].index('>100')])\r\n rrrv = float(tabelaCtes['rrrv'][tabelaCtes['Ur'].index('>100')])\r\n else:\r\n try: ind = tabelaCtes['Ur'].index(str(self.Ur))\r\n except:\r\n mb.showerror('Erro','Tensão Nominal não é normativa. Cálculo não efetuado.')\r\n erros = 1\r\n kpp = float(tabelaCtes['kpp'][ind]) \r\n kaf = float(tabelaCtes['kaf'][ind])\r\n rrrv = float(tabelaCtes['rrrv'][ind])\r\n\r\n\r\n # Cálculo dos pontos da envoltória. É feita a distinção entre dois ou quatro parâmetros.\r\n # Os resultados são externados através da variável \"pontos\"\r\n\r\n uc = kpp*kaf*self.Ur*sqrt(2/3)\r\n\r\n if not erros:\r\n if params == 2:\r\n t2 = uc/rrrv\r\n self.master.pontos['oposicao']['uc'] = uc\r\n self.master.pontos['oposicao']['t2'] = t2\r\n\r\n if params == 4:\r\n u1 = 0.75*kpp*self.Ur*sqrt(2/3)\r\n t1 = u1/rrrv\r\n t2 = t1*4\r\n self.master.pontos['oposicao']['u1'] = u1\r\n self.master.pontos['oposicao']['t1'] = t1\r\n self.master.pontos['oposicao']['uc'] = uc\r\n self.master.pontos['oposicao']['t2'] = t2\r\n\r\n\r\n\r\n def Quilo(self, Lx = 'L60'):\r\n \"\"\"\r\n Função para calcular os pontos da envoltória de uma abertura de falta quilométrica (Short-line fault)\r\n É calculado o primeiro ponto de acordo com o anexo A da Norma. O segundo e máximo ponto da envoltória é\r\n obtido das tabelas da Norma\r\n O parâmetro de entrada Lx é o tipo de ensaio que será realizado\r\n \"\"\"\r\n c = 0.3 #velocidade da luz em km/uS\r\n\r\n erros = 0\r\n\r\n # Obtenção dos valores normalizados da Norma\r\n\r\n tabelaSL = {'Ur':[], 'rrrv':[], 'td':[], 'kaf':[]}\r\n for linha in self.master.LeTabelaNorma():\r\n tabelaSL['Ur'].append(linha[0])\r\n tabelaSL['rrrv'].append(linha[1])\r\n tabelaSL['td'].append(linha[2])\r\n tabelaSL['kaf'].append(linha[3])\r\n\r\n if self.Ur >= 100: Unom = '>100'\r\n else: Unom = str(self.Ur)\r\n\r\n try: ind = tabelaSL['Ur'].index(Unom)\r\n except: \r\n erros = 1\r\n mb.showerror('Erro', 'Tensão Nominal não é normativa. Cálculo não efetuado.')\r\n\r\n dUdT = float(tabelaSL['rrrv'][ind])\r\n tD = float(tabelaSL['td'][ind])\r\n kaf = float(tabelaSL['kaf'][ind])\r\n\r\n\r\n # Cálculo do primeiro ponto da envoltória\r\n\r\n Lx = 0.01*float(Lx[1:])\r\n Il = self.Isc * Lx\r\n tdL = 0.1 \r\n Z = 450 # Impedância característica da LT em ohms - Table 8\r\n k = 1.6 # Fator de pico k- Table 8 \r\n Ul = self.Ur*(1-Lx)/sqrt(3) # Queda de tensão na LT até o curto, em kV rms.\r\n u0 = Ul*sqrt(2) # Queda de tensão na LT até o curto, em kV pico.\r\n UlAst = k*u0 # Primeiro pico de tensão TRT no terminal da linha, em kV.\r\n Ll = (Ul/Il)/self.w # Indutância do trecho da LT até o curto-circuito, em H.\r\n tL = k*Ll/Z*1e6 # Tempo até o primeiro pico de tensão de TRT na linha, em micro-seg.\r\n Le = c*tL/2*1e6 # Distância, em relação ao disjuntor, onde o curto é aplicado.\r\n tT = tdL+tL\r\n uS = dUdT*(Lx)*(tT-tD)\r\n u1 = uS + UlAst\r\n\r\n # Cálculo do segundo e último ponto da envoltória\r\n\r\n uc = kaf*self.Ur*sqrt(2/3)\r\n t2 = uc/dUdT/Lx\r\n\r\n # Grava na variável \"pontos\" os pontos finais.\r\n if not erros:\r\n self.master.pontos['quilométrico']['u1'] = u1\r\n self.master.pontos['quilométrico']['t1'] = tT\r\n self.master.pontos['quilométrico']['Le'] = Le\r\n self.master.pontos['quilométrico']['uc'] = uc\r\n self.master.pontos['quilométrico']['t2'] = t2\r\n\r\n\r\n def __str__(self):\r\n \"\"\"Imprime o status de tudo\"\"\"\r\n","sub_path":"trtv1_02.py","file_name":"trtv1_02.py","file_ext":"py","file_size_in_byte":12063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"384723785","text":"#!/usr/bin/env python\nfrom setuptools import setup, find_packages\nVERSION = '0.1.0'\n\ntry:\n import pypandoc\n read_md = pypandoc.convert('README.md', 'rst')\nexcept(IOError, ImportError):\n read_md = open('README.md').read()\n\nsetup(\n name='myyql',\n version=VERSION,\n description=\"YQL(Yahoo Query Language) client written in python3\",\n # see http://pypi.python.org/pypi?:action=list_classifiers\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Utilities\",\n \"License :: OSI Approved :: MIT License\",\n ],\n keywords='python3,yql',\n author='chikyukotei',\n author_email='chikyukotei1122@gmail.com',\n url='https://github.com/chikyukotei/myyql',\n packages=find_packages('myyql', exclude=['examples', 'test']),\n include_package_data=True,\n zip_safe=True,\n long_description=read_md,\n install_requires=[],\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"602510221","text":"from typing import List\nimport sys\n\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n max_diff = 0\n min_price = sys.maxsize\n for i in range(len(prices)):\n if min_price > prices[i]:\n min_price = prices[i]\n max_diff = max(max_diff, prices[i] - min_price)\n return max_diff\n","sub_path":"code/ch13/13.1.2.best_time_to_buy_and_sell_stock.py","file_name":"13.1.2.best_time_to_buy_and_sell_stock.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"150216048","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: Donny You (youansheng@gmail.com)\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport math\nimport random\n\nimport cv2\nimport numpy as np\nfrom utils import image\n\n\ndef filter_bounds(transformed_poly, size):\n # group the boundary\n filtered_polygon = []\n pre_pt = None\n pre_pt_flag = -1\n bound_flags = [False, False, False, False, False]\n for point in transformed_poly:\n if point[0] == 0:\n pt_flag = 0\n bound_flags[0] = True\n elif point[1] == 0:\n pt_flag = 1\n bound_flags[1] = True\n elif point[0] == size[0] - 1:\n pt_flag = 2\n bound_flags[2] = True\n elif point[1] == size[1] - 1:\n pt_flag = 3\n bound_flags[3] = True\n else:\n pt_flag = -1\n bound_flags[4] = True\n\n if pre_pt_flag != pt_flag or pt_flag == -1:\n if pre_pt is not None:\n filtered_polygon.append(pre_pt)\n filtered_polygon.append(point)\n pre_pt = None\n else:\n pre_pt = point\n\n pre_pt_flag = pt_flag\n keep = bound_flags[4] or (bound_flags[0] and bound_flags[1] and bound_flags[2] and bound_flags[3])\n return keep, np.vstack(filtered_polygon)\n\n\ndef transform_label(label, transform_matrix, target_size):\n cls_ids, polygons = label\n new_cls_ids = []\n new_polygons = []\n for index, poly in enumerate(polygons):\n transformed_poly = image.apply_affine_transform(poly, transform_matrix, target_size)\n keep, filtered_poly = filter_bounds(transformed_poly, target_size)\n if keep:\n new_cls_ids.append(cls_ids[index])\n new_polygons.append(filtered_poly)\n return new_cls_ids, new_polygons\n\n\ndef crop_label(label, lt_pt, size):\n cls_ids, polygons = label\n new_cls_ids = []\n new_polygons = []\n for index, poly in enumerate(polygons):\n cropped_poly = poly.copy() - np.array(lt_pt)\n cropped_poly[:, 0] = cropped_poly[:, 0].clip(min=0, max=size[0] - 1)\n cropped_poly[:, 1] = cropped_poly[:, 1].clip(min=0, max=size[1] - 1)\n\n keep, filtered_poly = filter_bounds(cropped_poly, size)\n if keep:\n new_cls_ids.append(cls_ids[index])\n new_polygons.append(filtered_poly)\n return new_cls_ids, new_polygons\n\n\nclass Padding(object):\n \"\"\" Padding the Image to proper size.\n Args:\n stride: the stride of the network.\n pad_value: the value that pad to the image border.\n img: Image object as input.\n Returns::\n img: Image object.\n \"\"\"\n\n def __init__(self, pad=None, pad_ratio=0.5, mean=(104, 117, 123), allow_outside_center=True):\n self.pad = pad\n self.ratio = pad_ratio\n self.mean = mean\n self.allow_outside_center = allow_outside_center\n\n def __call__(self, img, label=None):\n assert isinstance(img, np.ndarray)\n\n if random.random() > self.ratio:\n return img, label\n\n height, width, channels = img.shape\n left_pad, up_pad, right_pad, down_pad = self.pad\n\n target_size = [width + left_pad + right_pad, height + up_pad + down_pad]\n offset_left = -left_pad\n offset_up = -up_pad\n\n expand_image = np.zeros((max(height, target_size[1]) + abs(offset_up),\n max(width, target_size[0]) + abs(offset_left), channels), dtype=img.dtype)\n expand_image[:, :, :] = self.mean\n expand_image[abs(min(offset_up, 0)):abs(min(offset_up, 0)) + height,\n abs(min(offset_left, 0)):abs(min(offset_left, 0)) + width] = img\n img = expand_image[max(offset_up, 0):max(offset_up, 0) + target_size[1],\n max(offset_left, 0):max(offset_left, 0) + target_size[0]]\n\n if label is not None:\n cls_ids, polygons = label\n for poly in polygons:\n poly[:, 0] += abs(min(offset_up, 0))\n poly[:, 1] += abs(min(offset_left, 0))\n\n return img, label\n\n\nclass RandomHFlip(object):\n def __init__(self, swap_pair=None, flip_ratio=0.5):\n self.swap_pair = swap_pair\n self.ratio = flip_ratio\n\n def __call__(self, img, label=None):\n assert isinstance(img, np.ndarray)\n\n if random.random() > self.ratio:\n return img, label\n\n height, width, _ = img.shape\n img = cv2.flip(img, 1)\n if label is not None:\n cls_ids, polygons = label\n for poly in polygons:\n poly[:, 0] = width - poly[:, 0] - 1\n\n return img, label\n\n\nclass RandomSaturation(object):\n def __init__(self, lower=0.5, upper=1.5, saturation_ratio=0.5):\n self.lower = lower\n self.upper = upper\n self.ratio = saturation_ratio\n assert self.upper >= self.lower, \"saturation upper must be >= lower.\"\n assert self.lower >= 0, \"saturation lower must be non-negative.\"\n\n def __call__(self, img, label=None):\n assert isinstance(img, np.ndarray)\n\n if random.random() > self.ratio:\n return img, label\n\n img = img.astype(np.float32)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n img[:, :, 1] *= random.uniform(self.lower, self.upper)\n img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)\n img = np.clip(img, 0, 255).astype(np.uint8)\n return img, label\n\n\nclass RandomHue(object):\n def __init__(self, delta=18, hue_ratio=0.5):\n assert 0 <= delta <= 360\n self.delta = delta\n self.ratio = hue_ratio\n\n def __call__(self, img, label=None):\n assert isinstance(img, np.ndarray)\n\n if random.random() > self.ratio:\n return img, label\n\n img = img.astype(np.float32)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n img[:, :, 0] += random.uniform(-self.delta, self.delta)\n img[:, :, 0][img[:, :, 0] > 360] -= 360\n img[:, :, 0][img[:, :, 0] < 0] += 360\n img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)\n img = np.clip(img, 0, 255).astype(np.uint8)\n return img, label\n\n\nclass RandomPerm(object):\n def __init__(self, perm_ratio=0.5):\n self.ratio = perm_ratio\n self.perms = ((0, 1, 2), (0, 2, 1),\n (1, 0, 2), (1, 2, 0),\n (2, 0, 1), (2, 1, 0))\n\n def __call__(self, img, label=None):\n assert isinstance(img, np.ndarray)\n\n if random.random() > self.ratio:\n return img, label\n\n swap = self.perms[random.randint(0, len(self.perms) - 1)]\n img = img[:, :, swap].astype(np.uint8)\n return img, label\n\n\nclass RandomContrast(object):\n def __init__(self, lower=0.5, upper=1.5, contrast_ratio=0.5):\n self.lower = lower\n self.upper = upper\n self.ratio = contrast_ratio\n assert self.upper >= self.lower, \"contrast upper must be >= lower.\"\n assert self.lower >= 0, \"contrast lower must be non-negative.\"\n\n def __call__(self, img, label=None):\n assert isinstance(img, np.ndarray)\n\n if random.random() > self.ratio:\n return img, label\n\n img = img.astype(np.float32)\n img *= random.uniform(self.lower, self.upper)\n img = np.clip(img, 0, 255).astype(np.uint8)\n\n return img, label\n\n\nclass RandomBrightness(object):\n def __init__(self, shift_value=30, brightness_ratio=0.5):\n self.shift_value = shift_value\n self.ratio = brightness_ratio\n\n def __call__(self, img, label=None):\n assert isinstance(img, np.ndarray)\n\n if random.random() > self.ratio:\n return img, label\n\n img = img.astype(np.float32)\n shift = random.randint(-self.shift_value, self.shift_value)\n img[:, :, :] += shift\n img = np.around(img)\n img = np.clip(img, 0, 255).astype(np.uint8)\n\n return img, label\n\n\nclass RandomResize(object):\n \"\"\"Resize the given numpy.ndarray to random size and aspect ratio.\n\n Args:\n scale_min: the min scale to resize.\n scale_max: the max scale to resize.\n \"\"\"\n\n def __init__(self, scale_range=(0.75, 1.25), aspect_range=(0.9, 1.1), target_size=None,\n resize_bound=None, method='random', max_side_bound=None, scale_list=None, resize_ratio=0.5):\n self.scale_range = scale_range\n self.aspect_range = aspect_range\n self.resize_bound = resize_bound\n self.max_side_bound = max_side_bound\n self.scale_list = scale_list\n self.method = method\n self.ratio = resize_ratio\n\n if target_size is not None:\n if isinstance(target_size, int):\n self.input_size = (target_size, target_size)\n elif isinstance(target_size, (list, tuple)) and len(target_size) == 2:\n self.input_size = target_size\n else:\n raise TypeError('Got inappropriate size arg: {}'.format(target_size))\n else:\n self.input_size = None\n\n def get_scale(self, img_size):\n if self.method == 'random':\n scale_ratio = random.uniform(self.scale_range[0], self.scale_range[1])\n return scale_ratio\n\n elif self.method == 'bound':\n scale1 = self.resize_bound[0] / min(img_size)\n scale2 = self.resize_bound[1] / max(img_size)\n scale = min(scale1, scale2)\n return scale\n\n else:\n print('Resize method {} is invalid.'.format(self.method))\n exit(1)\n\n def __call__(self, img, label=None):\n \"\"\"\n Args:\n img (Image): Image to be resized.\n label (tuple): label to be resized.\n\n Returns:\n Image: Randomly resize image.\n tuple: Randomly resize label.\n list: Randomly resize center points.\n \"\"\"\n assert isinstance(img, np.ndarray)\n\n height, width, _ = img.shape\n if random.random() < self.ratio:\n if self.scale_list is None:\n scale_ratio = self.get_scale([width, height])\n else:\n scale_ratio = self.scale_list[random.randint(0, len(self.scale_list)-1)]\n\n aspect_ratio = random.uniform(*self.aspect_range)\n w_scale_ratio = math.sqrt(aspect_ratio) * scale_ratio\n h_scale_ratio = math.sqrt(1.0 / aspect_ratio) * scale_ratio\n if self.max_side_bound is not None and max(height*h_scale_ratio, width*w_scale_ratio) > self.max_side_bound:\n d_ratio = self.max_side_bound / max(height * h_scale_ratio, width * w_scale_ratio)\n w_scale_ratio *= d_ratio\n h_scale_ratio *= d_ratio\n\n else:\n w_scale_ratio, h_scale_ratio = 1.0, 1.0\n\n converted_size = (int(width * w_scale_ratio), int(height * h_scale_ratio))\n transform_matrix = image.get_affine_transform(img.shape[:2][::-1], converted_size)\n img = cv2.warpAffine(img, transform_matrix, converted_size)\n if label is not None:\n label = transform_label(label, transform_matrix, converted_size)\n\n return img, label\n\n\nclass RandomRotate(object):\n \"\"\"Rotate the input numpy.ndarray and points to the given degree.\n\n Args:\n degree (number): Desired rotate degree.\n \"\"\"\n\n def __init__(self, max_degree, rotate_ratio=0.5, mean=(104, 117, 123)):\n assert isinstance(max_degree, int)\n self.max_degree = max_degree\n self.ratio = rotate_ratio\n self.mean = mean\n\n def __call__(self, img, label=None):\n \"\"\"\n Args:\n img (Image): Image to be rotated.\n maskmap (Image): Mask to be rotated.\n kpt (list): Keypoints to be rotated.\n center (list): Center points to be rotated.\n\n Returns:\n Image: Rotated image.\n list: Rotated key points.\n \"\"\"\n assert isinstance(img, np.ndarray)\n\n if random.random() < self.ratio:\n rotate_degree = random.uniform(-self.max_degree, self.max_degree)\n else:\n return img, label\n\n height, width, _ = img.shape\n\n img_center = (width / 2.0, height / 2.0)\n\n rotate_mat = cv2.getRotationMatrix2D(img_center, rotate_degree, 1.0)\n cos_val = np.abs(rotate_mat[0, 0])\n sin_val = np.abs(rotate_mat[0, 1])\n new_width = int(height * sin_val + width * cos_val)\n new_height = int(height * cos_val + width * sin_val)\n rotate_mat[0, 2] += (new_width / 2.) - img_center[0]\n rotate_mat[1, 2] += (new_height / 2.) - img_center[1]\n img = cv2.warpAffine(img, rotate_mat, (new_width, new_height), borderValue=self.mean).astype(np.uint8)\n if label is not None:\n label = transform_label(label, rotate_mat, (new_height, new_width))\n\n return img, label\n\n\nclass RandomCrop(object):\n \"\"\"Crop the given numpy.ndarray and at a random location.\n\n Args:\n size (int or tuple): Desired output size of the crop.(w, h)\n \"\"\"\n\n def __init__(self, crop_size, crop_ratio=0.5, method='random', grid=None, allow_outside_center=True):\n self.ratio = crop_ratio\n self.method = method\n self.grid = grid\n self.allow_outside_center = allow_outside_center\n\n if isinstance(crop_size, float):\n self.size = (crop_size, crop_size)\n elif isinstance(crop_size, collections.Iterable) and len(crop_size) == 2:\n self.size = crop_size\n else:\n raise TypeError('Got inappropriate size arg: {}'.format(crop_size))\n\n def get_lefttop(self, crop_size, img_size):\n if self.method == 'center':\n return [(img_size[0] - crop_size[0]) // 2, (img_size[1] - crop_size[1]) // 2]\n\n elif self.method == 'random':\n x = random.randint(0, img_size[0] - crop_size[0])\n y = random.randint(0, img_size[1] - crop_size[1])\n return [x, y]\n\n elif self.method == 'grid':\n grid_x = random.randint(0, self.grid[0] - 1)\n grid_y = random.randint(0, self.grid[1] - 1)\n x = grid_x * ((img_size[0] - crop_size[0]) // (self.grid[0] - 1))\n y = grid_y * ((img_size[1] - crop_size[1]) // (self.grid[1] - 1))\n return [x, y]\n\n else:\n print('Crop method {} is invalid.'.format(self.method))\n exit(1)\n\n def __call__(self, img, label=None):\n \"\"\"\n Args:\n img (Image): Image to be cropped.\n maskmap (Image): Mask to be cropped.\n\n Returns:\n Image: Cropped image.\n Image: Cropped maskmap.\n list: Cropped keypoints.\n list: Cropped center points.\n \"\"\"\n assert isinstance(img, np.ndarray)\n\n if random.random() > self.ratio:\n return img, label\n\n height, width, _ = img.shape\n target_size = [min(self.size[0], width), min(self.size[1], height)]\n\n offset_left, offset_up = self.get_lefttop(target_size, [width, height])\n\n img = img[offset_up:offset_up + target_size[1], offset_left:offset_left + target_size[0]]\n if label is not None:\n label = crop_label(label, (offset_left, offset_up), target_size)\n\n return img, label\n\n\nclass Resize(object):\n \"\"\"Resize the given numpy.ndarray to random size and aspect ratio.\n Args:\n scale_min: the min scale to resize.\n scale_max: the max scale to resize.\n \"\"\"\n\n def __init__(self, target_size):\n self.target_size = target_size\n\n def __call__(self, img, label=None):\n assert isinstance(img, np.ndarray)\n\n height, width, _ = img.shape\n scale = 1/self.target_size\n resized_height = int(height*scale)\n resized_width = int(width * scale)\n\n image = cv2.resize(img, (resized_width, resized_height), interpolation=cv2.INTER_LINEAR)\n\n if label is not None:\n cls_ids, polygons = label\n label = (cls_ids, [polygon*scale for polygon in polygons])\n\n return image, label\n\n\nclass CV2AugCompose(object):\n \"\"\"Composes several transforms together.\n\n Args:\n transforms (list of ``Transform`` objects): list of transforms to compose.\n\n Example:\n >>> CV2AugCompose([\n >>> RandomCrop(),\n >>> ])\n \"\"\"\n\n def __init__(self, configer, split='train'):\n self.configer = configer\n self.split = split\n\n self.transforms = dict()\n if self.split == 'train':\n shuffle_train_trans = []\n if self.configer.exists('train_trans', 'shuffle_trans_seq'):\n if isinstance(self.configer.get('train_trans', 'shuffle_trans_seq')[0], list):\n train_trans_seq_list = self.configer.get('train_trans', 'shuffle_trans_seq')\n for train_trans_seq in train_trans_seq_list:\n shuffle_train_trans += train_trans_seq\n\n else:\n shuffle_train_trans = self.configer.get('train_trans', 'shuffle_trans_seq')\n\n if 'random_saturation' in self.configer.get('train_trans', 'trans_seq') + shuffle_train_trans:\n self.transforms['random_saturation'] = RandomSaturation(\n lower=self.configer.get('train_trans', 'random_saturation')['lower'],\n upper=self.configer.get('train_trans', 'random_saturation')['upper'],\n saturation_ratio=self.configer.get('train_trans', 'random_saturation')['ratio']\n )\n\n if 'random_hue' in self.configer.get('train_trans', 'trans_seq') + shuffle_train_trans:\n self.transforms['random_hue'] = RandomHue(\n delta=self.configer.get('train_trans', 'random_hue')['delta'],\n hue_ratio=self.configer.get('train_trans', 'random_hue')['ratio']\n )\n\n if 'random_perm' in self.configer.get('train_trans', 'trans_seq') + shuffle_train_trans:\n self.transforms['random_perm'] = RandomPerm(\n perm_ratio=self.configer.get('train_trans', 'random_perm')['ratio']\n )\n\n if 'random_contrast' in self.configer.get('train_trans', 'trans_seq') + shuffle_train_trans:\n self.transforms['random_contrast'] = RandomContrast(\n lower=self.configer.get('train_trans', 'random_contrast')['lower'],\n upper=self.configer.get('train_trans', 'random_contrast')['upper'],\n contrast_ratio=self.configer.get('train_trans', 'random_contrast')['ratio']\n )\n\n if 'padding' in self.configer.get('train_trans', 'trans_seq'):\n self.transforms['padding'] = Padding(\n pad=self.configer.get('train_trans', 'padding')['pad'],\n pad_ratio=self.configer.get('train_trans', 'padding')['ratio'],\n mean=self.configer.get('normalize', 'mean_value'),\n allow_outside_center=self.configer.get('train_trans', 'padding')['allow_outside_center']\n )\n\n if 'random_brightness' in self.configer.get('train_trans', 'trans_seq') + shuffle_train_trans:\n self.transforms['random_brightness'] = RandomBrightness(\n shift_value=self.configer.get('train_trans', 'random_brightness')['shift_value'],\n brightness_ratio=self.configer.get('train_trans', 'random_brightness')['ratio']\n )\n\n if 'random_hflip' in self.configer.get('train_trans', 'trans_seq') + shuffle_train_trans:\n self.transforms['random_hflip'] = RandomHFlip(\n swap_pair=self.configer.get('train_trans', 'random_hflip')['swap_pair'],\n flip_ratio=self.configer.get('train_trans', 'random_hflip')['ratio']\n )\n\n if 'random_resize' in self.configer.get('train_trans', 'trans_seq') + shuffle_train_trans:\n if self.configer.get('train_trans', 'random_resize')['method'] == 'random':\n if 'scale_list' not in self.configer.get('train_trans', 'random_resize'):\n if 'max_side_bound' in self.configer.get('train_trans', 'random_resize'):\n self.transforms['random_resize'] = RandomResize(\n method=self.configer.get('train_trans', 'random_resize')['method'],\n scale_range=self.configer.get('train_trans', 'random_resize')['scale_range'],\n aspect_range=self.configer.get('train_trans', 'random_resize')['aspect_range'],\n max_side_bound=self.configer.get('train_trans', 'random_resize')['max_side_bound'],\n resize_ratio=self.configer.get('train_trans', 'random_resize')['ratio']\n )\n else:\n self.transforms['random_resize'] = RandomResize(\n method=self.configer.get('train_trans', 'random_resize')['method'],\n scale_range=self.configer.get('train_trans', 'random_resize')['scale_range'],\n aspect_range=self.configer.get('train_trans', 'random_resize')['aspect_range'],\n resize_ratio=self.configer.get('train_trans', 'random_resize')['ratio']\n )\n else:\n if 'max_side_bound' in self.configer.get('train_trans', 'random_resize'):\n self.transforms['random_resize'] = RandomResize(\n method=self.configer.get('train_trans', 'random_resize')['method'],\n scale_list=self.configer.get('train_trans', 'random_resize')['scale_list'],\n aspect_range=self.configer.get('train_trans', 'random_resize')['aspect_range'],\n max_side_bound=self.configer.get('train_trans', 'random_resize')['max_side_bound'],\n resize_ratio=self.configer.get('train_trans', 'random_resize')['ratio']\n )\n else:\n self.transforms['random_resize'] = RandomResize(\n method=self.configer.get('train_trans', 'random_resize')['method'],\n scale_list=self.configer.get('train_trans', 'random_resize')['scale_list'],\n aspect_range=self.configer.get('train_trans', 'random_resize')['aspect_range'],\n resize_ratio=self.configer.get('train_trans', 'random_resize')['ratio']\n )\n\n elif self.configer.get('train_trans', 'random_resize')['method'] == 'focus':\n self.transforms['random_resize'] = RandomResize(\n method=self.configer.get('train_trans', 'random_resize')['method'],\n scale_range=self.configer.get('train_trans', 'random_resize')['scale_range'],\n aspect_range=self.configer.get('train_trans', 'random_resize')['aspect_range'],\n target_size=self.configer.get('train_trans', 'random_resize')['target_size'],\n resize_ratio=self.configer.get('train_trans', 'random_resize')['ratio']\n )\n\n elif self.configer.get('train_trans', 'random_resize')['method'] == 'bound':\n self.transforms['random_resize'] = RandomResize(\n method=self.configer.get('train_trans', 'random_resize')['method'],\n aspect_range=self.configer.get('train_trans', 'random_resize')['aspect_range'],\n resize_bound=self.configer.get('train_trans', 'random_resize')['resize_bound'],\n resize_ratio=self.configer.get('train_trans', 'random_resize')['ratio']\n )\n\n else:\n print('Not Support Resize Method!')\n exit(1)\n\n if 'random_crop' in self.configer.get('train_trans', 'trans_seq') + shuffle_train_trans:\n if self.configer.get('train_trans', 'random_crop')['method'] == 'random':\n self.transforms['random_crop'] = RandomCrop(\n crop_size=self.configer.get('train_trans', 'random_crop')['crop_size'],\n method=self.configer.get('train_trans', 'random_crop')['method'],\n crop_ratio=self.configer.get('train_trans', 'random_crop')['ratio'],\n allow_outside_center=self.configer.get('train_trans', 'random_crop')['allow_outside_center']\n )\n\n elif self.configer.get('train_trans', 'random_crop')['method'] == 'center':\n self.transforms['random_crop'] = RandomCrop(\n crop_size=self.configer.get('train_trans', 'random_crop')['crop_size'],\n method=self.configer.get('train_trans', 'random_crop')['method'],\n crop_ratio=self.configer.get('train_trans', 'random_crop')['ratio'],\n allow_outside_center=self.configer.get('train_trans', 'random_crop')['allow_outside_center']\n )\n\n elif self.configer.get('train_trans', 'random_crop')['method'] == 'grid':\n self.transforms['random_crop'] = RandomCrop(\n crop_size=self.configer.get('train_trans', 'random_crop')['crop_size'],\n method=self.configer.get('train_trans', 'random_crop')['method'],\n grid=self.configer.get('train_trans', 'random_crop')['grid'],\n crop_ratio=self.configer.get('train_trans', 'random_crop')['ratio'],\n allow_outside_center=self.configer.get('train_trans', 'random_crop')['allow_outside_center']\n )\n\n else:\n print('Not Support Crop Method!')\n exit(1)\n\n if 'random_rotate' in self.configer.get('train_trans', 'trans_seq') + shuffle_train_trans:\n self.transforms['random_rotate'] = RandomRotate(\n max_degree=self.configer.get('train_trans', 'random_rotate')['rotate_degree'],\n rotate_ratio=self.configer.get('train_trans', 'random_rotate')['ratio'],\n mean=self.configer.get('normalize', 'mean_value')\n )\n\n if 'resize' in self.configer.get('train_trans', 'trans_seq') + shuffle_train_trans:\n if 'target_size' in self.configer.get('train_trans', 'resize'):\n self.transforms['resize'] = Resize(\n target_size=self.configer.get('train_trans', 'resize')['target_size']\n )\n\n else:\n if 'random_saturation' in self.configer.get('val_trans', 'trans_seq'):\n self.transforms['random_saturation'] = RandomSaturation(\n lower=self.configer.get('val_trans', 'random_saturation')['lower'],\n upper=self.configer.get('val_trans', 'random_saturation')['upper'],\n saturation_ratio=self.configer.get('val_trans', 'random_saturation')['ratio']\n )\n\n if 'random_hue' in self.configer.get('val_trans', 'trans_seq'):\n self.transforms['random_hue'] = RandomHue(\n delta=self.configer.get('val_trans', 'random_hue')['delta'],\n hue_ratio=self.configer.get('val_trans', 'random_hue')['ratio']\n )\n\n if 'random_perm' in self.configer.get('val_trans', 'trans_seq'):\n self.transforms['random_perm'] = RandomPerm(\n perm_ratio=self.configer.get('val_trans', 'random_perm')['ratio']\n )\n\n if 'random_contrast' in self.configer.get('val_trans', 'trans_seq'):\n self.transforms['random_contrast'] = RandomContrast(\n lower=self.configer.get('val_trans', 'random_contrast')['lower'],\n upper=self.configer.get('val_trans', 'random_contrast')['upper'],\n contrast_ratio=self.configer.get('val_trans', 'random_contrast')['ratio']\n )\n\n if 'padding' in self.configer.get('val_trans', 'trans_seq'):\n self.transforms['padding'] = Padding(\n pad=self.configer.get('val_trans', 'padding')['pad'],\n pad_ratio=self.configer.get('val_trans', 'padding')['ratio'],\n mean=self.configer.get('normalize', 'mean_value'),\n allow_outside_center=self.configer.get('val_trans', 'padding')['allow_outside_center']\n )\n\n if 'random_brightness' in self.configer.get('val_trans', 'trans_seq'):\n self.transforms['random_brightness'] = RandomBrightness(\n shift_value=self.configer.get('val_trans', 'random_brightness')['shift_value'],\n brightness_ratio=self.configer.get('val_trans', 'random_brightness')['ratio']\n )\n\n if 'random_hflip' in self.configer.get('val_trans', 'trans_seq'):\n self.transforms['random_hflip'] = RandomHFlip(\n swap_pair=self.configer.get('val_trans', 'random_hflip')['swap_pair'],\n flip_ratio=self.configer.get('val_trans', 'random_hflip')['ratio']\n )\n\n if 'random_resize' in self.configer.get('val_trans', 'trans_seq'):\n if self.configer.get('train_trans', 'random_resize')['method'] == 'random':\n if 'scale_list' not in self.configer.get('val_trans', 'random_resize'):\n if 'max_side_bound' in self.configer.get('val_trans', 'random_resize'):\n self.transforms['random_resize'] = RandomResize(\n method=self.configer.get('val_trans', 'random_resize')['method'],\n scale_range=self.configer.get('val_trans', 'random_resize')['scale_range'],\n aspect_range=self.configer.get('val_trans', 'random_resize')['aspect_range'],\n max_side_bound=self.configer.get('val_trans', 'random_resize')['max_side_bound'],\n resize_ratio=self.configer.get('val_trans', 'random_resize')['ratio']\n )\n else:\n self.transforms['random_resize'] = RandomResize(\n method=self.configer.get('val_trans', 'random_resize')['method'],\n scale_range=self.configer.get('val_trans', 'random_resize')['scale_range'],\n aspect_range=self.configer.get('val_trans', 'random_resize')['aspect_range'],\n resize_ratio=self.configer.get('val_trans', 'random_resize')['ratio']\n )\n else:\n if 'max_side_bound' in self.configer.get('val_trans', 'random_resize'):\n self.transforms['random_resize'] = RandomResize(\n method=self.configer.get('val_trans', 'random_resize')['method'],\n scale_list=self.configer.get('val_trans', 'random_resize')['scale_list'],\n aspect_range=self.configer.get('val_trans', 'random_resize')['aspect_range'],\n max_side_bound=self.configer.get('val_trans', 'random_resize')['max_side_bound'],\n resize_ratio=self.configer.get('val_trans', 'random_resize')['ratio']\n )\n else:\n self.transforms['random_resize'] = RandomResize(\n method=self.configer.get('val_trans', 'random_resize')['method'],\n scale_list=self.configer.get('val_trans', 'random_resize')['scale_list'],\n aspect_range=self.configer.get('val_trans', 'random_resize')['aspect_range'],\n resize_ratio=self.configer.get('val_trans', 'random_resize')['ratio']\n )\n\n elif self.configer.get('val_trans', 'random_resize')['method'] == 'focus':\n self.transforms['random_resize'] = RandomResize(\n method=self.configer.get('val_trans', 'random_resize')['method'],\n scale_range=self.configer.get('val_trans', 'random_resize')['scale_range'],\n aspect_range=self.configer.get('val_trans', 'random_resize')['aspect_range'],\n target_size=self.configer.get('val_trans', 'random_resize')['target_size'],\n resize_ratio=self.configer.get('val_trans', 'random_resize')['ratio']\n )\n\n elif self.configer.get('val_trans', 'random_resize')['method'] == 'bound':\n self.transforms['random_resize'] = RandomResize(\n method=self.configer.get('val_trans', 'random_resize')['method'],\n aspect_range=self.configer.get('val_trans', 'random_resize')['aspect_range'],\n resize_bound=self.configer.get('val_trans', 'random_resize')['resize_bound'],\n resize_ratio=self.configer.get('val_trans', 'random_resize')['ratio']\n )\n\n else:\n print('Not Support Resize Method!')\n exit(1)\n\n if 'random_crop' in self.configer.get('val_trans', 'trans_seq'):\n if self.configer.get('val_trans', 'random_crop')['method'] == 'random':\n self.transforms['random_crop'] = RandomCrop(\n crop_size=self.configer.get('val_trans', 'random_crop')['crop_size'],\n method=self.configer.get('val_trans', 'random_crop')['method'],\n crop_ratio=self.configer.get('val_trans', 'random_crop')['ratio'],\n allow_outside_center=self.configer.get('val_trans', 'random_crop')['allow_outside_center']\n )\n\n elif self.configer.get('val_trans', 'random_crop')['method'] == 'center':\n self.transforms['random_crop'] = RandomCrop(\n crop_size=self.configer.get('val_trans', 'random_crop')['crop_size'],\n method=self.configer.get('val_trans', 'random_crop')['method'],\n crop_ratio=self.configer.get('val_trans', 'random_crop')['ratio'],\n allow_outside_center=self.configer.get('val_trans', 'random_crop')['allow_outside_center']\n )\n\n elif self.configer.get('val_trans', 'random_crop')['method'] == 'grid':\n self.transforms['random_crop'] = RandomCrop(\n crop_size=self.configer.get('val_trans', 'random_crop')['crop_size'],\n method=self.configer.get('val_trans', 'random_crop')['method'],\n grid=self.configer.get('val_trans', 'random_crop')['grid'],\n crop_ratio=self.configer.get('val_trans', 'random_crop')['ratio'],\n allow_outside_center=self.configer.get('val_trans', 'random_crop')['allow_outside_center']\n )\n\n else:\n print('Not Support Crop Method!')\n exit(1)\n\n if 'random_rotate' in self.configer.get('val_trans', 'trans_seq'):\n self.transforms['random_rotate'] = RandomRotate(\n max_degree=self.configer.get('val_trans', 'random_rotate')['rotate_degree'],\n rotate_ratio=self.configer.get('val_trans', 'random_rotate')['ratio'],\n mean=self.configer.get('normalize', 'mean_value')\n )\n\n if 'resize' in self.configer.get('val_trans', 'trans_seq'):\n if 'target_size' in self.configer.get('val_trans', 'resize'):\n self.transforms['resize'] = Resize(\n target_size=self.configer.get('val_trans', 'resize')['target_size']\n )\n\n def __call__(self, img, label=None):\n\n if self.split == 'train':\n shuffle_trans_seq = []\n if self.configer.exists('train_trans', 'shuffle_trans_seq'):\n if isinstance(self.configer.get('train_trans', 'shuffle_trans_seq')[0], list):\n shuffle_trans_seq_list = self.configer.get('train_trans', 'shuffle_trans_seq')\n shuffle_trans_seq = shuffle_trans_seq_list[random.randint(0, len(shuffle_trans_seq_list))]\n else:\n shuffle_trans_seq = self.configer.get('train_trans', 'shuffle_trans_seq')\n random.shuffle(shuffle_trans_seq)\n\n for trans_key in (shuffle_trans_seq + self.configer.get('train_trans', 'trans_seq')):\n img, label = self.transforms[trans_key](img, label)\n\n else:\n for trans_key in self.configer.get('val_trans', 'trans_seq'):\n img, label = self.transforms[trans_key](img, label)\n\n return img, label\n","sub_path":"utils/cv2_aug_transforms.py","file_name":"cv2_aug_transforms.py","file_ext":"py","file_size_in_byte":37657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"150005200","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/9/6 16:37\n# @Author : huiqin.wang\n# @Email : huiqinwang@envisioncn.com\n# @File : fibcache.py\n# @Software: PyCharm\n# @Description: 应用1:缓存中间结果\nfrom functools import wraps\n\nresult_cache = {}\n\n\ndef memo(fn):\n print(result_cache)\n miss = object()\n\n @wraps(fn)\n def wrapper(*args):\n result = result_cache.get(args, miss)\n if result is miss:\n result = fn(*args)\n result_cache[args] = result\n return fn\n return wrapper\n\n\n@memo\ndef fib(n):\n if n < 2:\n return n\n return fib(n-1) + fib(n-2)\n\n\nfib(10)","sub_path":"python-learning/decorators/fibcache.py","file_name":"fibcache.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"500735355","text":"import struct\nimport binascii\n\nfrom Crypto.Cipher import AES\nfrom Crypto.Hash import HMAC, SHA256\nfrom Crypto.Protocol import KDF\nfrom Crypto.Random import random\nfrom Crypto.Random.Fortuna.FortunaGenerator import AESGenerator\n\nfrom dh import create_dh_key, calculate_dh_secret\n\n\nclass PRNG_AES(object):\n seed_size = 32\n\n def __init__(self, secret):\n self.seed = secret\n self.counter = None\n self.bytes_counter = 0\n self.PRNG = AESGenerator()\n self.reseed()\n\n def next(self, bytes_count) -> bytes:\n self.bytes_counter += bytes_count\n if self.bytes_counter > self.PRNG.max_bytes_per_request:\n self.reseed()\n self.bytes_counter = bytes_count\n\n return self.PRNG.pseudo_random_data(bytes_count)\n\n def reseed(self):\n # Derive next seed by SHA256(prev_seed)\n self.seed = SHA256.new(self.seed).digest()\n self.PRNG.reseed(self.seed)\n\n\n# Split the derived key derived by PBKDF2\nclass KeyBlock(object):\n key_hmac_size = 32 # bytes\n key_cipher_size = 32 # for AES256\n iv_size = 16 # for AES CBC mode which iv size = block size = 16 bytes\n key_block_size = key_hmac_size * 2 + PRNG_AES.seed_size * 2 + key_cipher_size * 2 + iv_size * 2\n\n def __init__(self, key_block_bytes):\n if len(key_block_bytes) < KeyBlock.key_block_size:\n raise Exception('Not enough bytes for key block!')\n\n def split(_len, a):\n return a[:_len], a[_len:]\n\n b = key_block_bytes\n\n self.client_write_MAC_secret, b = split(KeyBlock.key_hmac_size, b)\n self.server_write_MAC_secret, b = split(KeyBlock.key_hmac_size, b)\n self.client_write_PRNG_seed, b = split(PRNG_AES.seed_size, b)\n self.server_write_PRNG_seed, b = split(PRNG_AES.seed_size, b)\n self.client_write_key, b = split(KeyBlock.key_cipher_size, b)\n self.server_write_key, b = split(KeyBlock.key_cipher_size, b)\n self.client_write_IV, b = split(KeyBlock.iv_size, b)\n self.server_write_IV, b = split(KeyBlock.iv_size, b)\n\n def __str__(self) -> str:\n return 'client_write_MAC_secret:%s\\n' \\\n 'server_write_MAC_secret:%s\\n' \\\n 'client_write_PRNG_seed:%s\\n' \\\n 'server_write_PRNG_seed:%s\\n' \\\n 'client_write_key:%s\\n' \\\n 'server_write_key:%s\\n' \\\n 'client_write_IV:%s\\n' \\\n 'server_write_IV:%s' % (\n binascii.hexlify(self.client_write_MAC_secret),\n binascii.hexlify(self.server_write_MAC_secret),\n binascii.hexlify(self.client_write_PRNG_seed),\n binascii.hexlify(self.server_write_PRNG_seed),\n binascii.hexlify(self.client_write_key),\n binascii.hexlify(self.server_write_key),\n binascii.hexlify(self.client_write_IV),\n binascii.hexlify(self.server_write_IV),)\n\n\n# The tag is generated using;\n#\n# mac_data = num_to_4_le_bytes(frame_counter)\n# mac_data += cipher_text\n# tag = HMAC_SHA256(mac_data, secret)\n# frame_counter += 1\n#\n# With frame_counter, we could detect frame reorder, drop or replay attack within the same connection.\n# (eg. the same cipher_text will have different tag within the same connection)\nclass HMACWithFrameCounter(object):\n tag_size = 32\n counter_size = 16\n\n def __init__(self, secret: bytes, prng_seed: bytes):\n self.hmac = HMAC.new(secret, digestmod=SHA256)\n self.counter = PRNG_AES(prng_seed)\n\n def calculate_tag(self, frame: bytes) -> bytes:\n _hmac = self.hmac.copy()\n _hmac.update(self.counter.next(HMACWithFrameCounter.counter_size))\n _hmac.update(frame)\n\n return _hmac.digest()[:self.tag_size]\n\n\nclass StealthConn(object):\n random_size = 32 # bytes\n block_size = 16\n\n def __init__(self, conn, client=False, server=False, verbose=False):\n if client == server:\n raise Exception(\"Exo me? You can't be either nor neither of client / server.\")\n\n self.conn = conn\n self.hmac_send = None\n self.hmac_recv = None\n self.cipher_send = None\n self.cipher_recv = None\n self.client = client\n self.server = server\n self.verbose = verbose\n self.initiate_session()\n\n def initiate_session(self):\n # Perform the initial connection handshake for agreeing on a shared secret\n\n # get server and client random\n self_random = random.getrandbits(StealthConn.random_size * 8).to_bytes(StealthConn.random_size,\n byteorder='little')\n self.send(self_random)\n other_random = self.recv()\n if len(other_random) != self.random_size:\n raise Exception('Random size error!')\n\n if self.client:\n server_random = other_random\n client_random = self_random\n else:\n server_random = self_random\n client_random = other_random\n\n # Exchange master_secret via Diffie-Hellman key exchange\n my_public_key, my_private_key = create_dh_key()\n # Send them our public key\n self.send(bytes(str(my_public_key), \"ascii\"))\n # Receive their public key\n their_public_key = int(self.recv())\n # Obtain our shared secret\n master_secret = calculate_dh_secret(their_public_key, my_private_key)\n print(\"Shared master secret: {}\".format(binascii.hexlify(master_secret)))\n\n # Derive hmac key, encrypt key and iv from server_random, client_random and master_secret using PBKDF2\n # Refer to RFC2898\n key_block_bytes = KDF.PBKDF2(master_secret + server_random + client_random, b\"team.football\",\n KeyBlock.key_block_size, prf=lambda p, s: HMAC.new(p, s, SHA256).digest())\n\n print(\"key_block_bytes: {}\".format(binascii.hexlify(key_block_bytes)))\n\n key_block = KeyBlock(key_block_bytes)\n\n print(\"key_block:\\n{}\".format(key_block))\n\n if self.client:\n self.hmac_recv = HMACWithFrameCounter(key_block.server_write_MAC_secret, key_block.server_write_PRNG_seed)\n self.hmac_send = HMACWithFrameCounter(key_block.client_write_MAC_secret, key_block.client_write_PRNG_seed)\n self.cipher_recv = AES.new(key_block.server_write_key, AES.MODE_CBC, key_block.server_write_IV)\n self.cipher_send = AES.new(key_block.client_write_key, AES.MODE_CBC, key_block.client_write_IV)\n else:\n self.hmac_recv = HMACWithFrameCounter(key_block.client_write_MAC_secret, key_block.client_write_PRNG_seed)\n self.hmac_send = HMACWithFrameCounter(key_block.server_write_MAC_secret, key_block.server_write_PRNG_seed)\n self.cipher_recv = AES.new(key_block.client_write_key, AES.MODE_CBC, key_block.client_write_IV)\n self.cipher_send = AES.new(key_block.server_write_key, AES.MODE_CBC, key_block.server_write_IV)\n\n def send(self, data):\n if self.cipher_send:\n if self.verbose:\n print(\"Original data: {}\".format(data))\n data = self.pad(data)\n pre_auth_text = self.cipher_send.encrypt(data)\n if self.verbose:\n print(\"Encrypted data: {}\".format(repr(pre_auth_text)))\n else:\n pre_auth_text = data\n\n if self.hmac_send:\n # generate tag for the cipher text\n tag = self.hmac_send.calculate_tag(pre_auth_text)\n\n if self.verbose:\n print(\"Data tag: {}\".format(repr(tag)))\n\n # append tag at the tail of the cipher text\n authed_text = pre_auth_text + tag\n else:\n authed_text = pre_auth_text\n\n if self.verbose:\n print(\"Sending packet of length {}\".format(len(authed_text)))\n\n # Encode the data's length into an unsigned two byte int ('H')\n pkt_len = struct.pack('H', len(authed_text))\n self.conn.sendall(pkt_len)\n self.conn.sendall(authed_text)\n\n def recv(self):\n # Decode the data's length from an unsigned two byte int ('H')\n pkt_len_packed = self.conn.recv(struct.calcsize('H'))\n unpacked_contents = struct.unpack('H', pkt_len_packed)\n pkt_len = unpacked_contents[0]\n\n received_size = 0\n authed_data = b''\n while received_size < pkt_len:\n r = self.conn.recv(pkt_len - received_size)\n received_size += len(r)\n authed_data += r\n\n if self.verbose:\n print(\"Receiving packet of length {}\".format(pkt_len))\n\n if self.hmac_recv:\n # check tag\n if pkt_len < HMACWithFrameCounter.tag_size:\n self.auth_error()\n\n tag = authed_data[-HMACWithFrameCounter.tag_size:]\n if self.verbose:\n print(\"Received data tag {}\".format(repr(tag)))\n\n cipher_text = authed_data[:pkt_len - HMACWithFrameCounter.tag_size]\n tag_calc = self.hmac_recv.calculate_tag(cipher_text)\n if self.verbose:\n print(\"Calculated data tag {}\".format(repr(tag_calc)))\n\n if tag != tag_calc:\n self.auth_error()\n else:\n cipher_text = authed_data\n\n if self.cipher_recv:\n # decrypt data\n pad_data = self.cipher_recv.decrypt(cipher_text)\n data = self.unpad(pad_data)\n if self.verbose:\n print(\"Encrypted data: {}\".format(repr(cipher_text)))\n print(\"Original data: {}\".format(data))\n else:\n data = cipher_text\n\n return data\n\n # Padding function, implementing PKCS#7\n def pad(self, s):\n s_bytearray = bytearray(s)\n for i in range(1, self.block_size - len(s) % self.block_size + 1):\n s_bytearray.append(self.block_size - len(s) % self.block_size)\n\n s = bytes(s_bytearray)\n return s\n\n # Unpadding function\n def unpad(self, s):\n return s[:-int(s[len(s) - 1])]\n\n def auth_error(self):\n raise Exception(\"Auth check failed!\")\n\n def close(self):\n self.conn.close()\n","sub_path":"lib/comms.py","file_name":"comms.py","file_ext":"py","file_size_in_byte":10179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"217352739","text":"from logicobjects import Sentence\nfrom logicobjects import Atom\nfrom symbol_convert import LogicSymbols\n\n\nclass SentenceParser(object):\n\n def _recurInput(self, iter):\n sens = []\n for c in iter:\n if c == '(':\n result, legal = self._recurInput(iter)\n if not legal:\n raise ValueError('Bad expression: unbalanced parenthesis')\n sens.append(result)\n elif c == ')':\n return sens, True\n else:\n sens.append(c)\n return sens, False\n\n def _filterNegations(self, input_list):\n count = 0\n for i in range(len(input_list)):\n sen = copy.deepcopy(input_list[i - count])\n sen.replace('~', '')\n if len(sen) == 0:\n neg = input_list[i - count]\n del input_list[i - count]\n input_list[i - count] = neg + input_list[i - count]\n count += 1\n\n def _createSentences(self, input_list):\n if type(input_list) is not list:\n return Atom(input_list)\n\n negations = 0\n i = 0\n\n while input_list[i] == '~':\n negations += 1\n i += 1\n\n left_sen = self._createSentences(input_list[i])\n if left_sen is None:\n return None\n\n i += 1\n\n while negations > 0:\n left_sen.negate()\n negations -= 1\n\n # If given a negated atom\n if i == len(input_list):\n return left_sen\n\n oper = input_list[i]\n if oper not in LogicSymbols.OP_CONVERT:\n self.forget_all()\n self.add_text('Unrecognized operator: ' + oper, 0)\n return None\n i += 1\n\n while input_list[i] == '~':\n negations += 1\n i += 1\n\n right_sen = self._createSentences(input_list[i])\n if right_sen is None:\n return None\n\n while negations > 0:\n right_sen.negate()\n negations -= 1\n\n return Sentence(left_sen, right_sen, oper)\n\n def parseInput(self, user_in):\n results_list = self._recurInput(iter(user_in))[0]\n\n return self._createSentences(results_list)\n","sub_path":"sentence_parser.py","file_name":"sentence_parser.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"516369876","text":"\"\"\"\nTo profile the file, install line_profiler, decotrate functions with @profile and run:\n\n```\nkernprof -v -b -l runner.py\n```\n\"\"\"\n\nimport pdb\nimport sys\nimport os\nimport time\nimport random\nimport traceback\nfrom time import sleep\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nimport pandas as pd\nfrom pathos.threading import ThreadPool as ThPool\nfrom pathos.multiprocessing import ProcessPool as ProPool\n\nfrom github import Github\nfrom utils import get_repo_names\nfrom utils import get_existing_results\nfrom utils import get_commits_from_clone_repo\nfrom utils import check_in_problem_repo\nfrom utils import write_problem_repo\nfrom utils import get_commits_stats_from_clone_repo\nimport numpy as np\nfrom datetime import timezone\nimport re\n\nOUTPUT_PATH = \"./monthly_results/\"\nrandom_time = [60, 179, 110, 80, 200, 250, 300, 400]\nQUOTA_LIMIT = 100\n\n\nclass Miner:\n def __init__(self, user_token, debug=False, num_workers=1, batch_size=200, use_clone=True, commits_stats_from_clone=True, token_idx = 0):\n self.g = Github(user_token, per_page=100)\n self.debug_counts = 200 if debug else 0\n self.results = None\n self.num_workers = num_workers\n self.batch_size = batch_size\n self.use_clone=use_clone\n self.commits_stats_from_clone = commits_stats_from_clone\n self.token_idx = token_idx\n\n def get_rate_limit(self, func_name, quota_need, isPrint = False):\n remaining = self.g.rate_limiting\n if (isPrint):\n print(f\"Token idx {self.token_idx}, {self.repo_name}, Running {func_name}, Rate limit: {remaining}\")\n \n start = time.time()\n while self.g.rate_limiting[0] < quota_need:\n delay = random.choice(random_time)\n print(f\"Token idx {self.token_idx}, {self.repo_name}, Delay {delay} sec\")\n sleep(delay) \n\n elapse = time.time() - start\n if elapse > 100:\n print(f\"Wait {elapse/60} minutes!\")\n\n def get_data(self, repo_name, debug=True):\n print('Requests remaining = ' + str(self.g.rate_limiting[0]) + ' for token idx: ' + str(self.token_idx))\n self.repo_name = repo_name\n self.output_folder = self._create_output_folder()\n self.repo = self.g.get_repo(repo_name)\n \n actions = [self._fetch_commit_data, self._add_commits_data_to_results, \n self._get_pull_requests, self._get_issues, self._get_stargazers, \n self._get_forks, self.save_results]\n# actions = [self._fetch_commit_data, self._get_releases, self._add_commits_data_to_results, \n# self._get_pull_requests, self._get_issues, self._get_stargazers, \n# self._get_forks, self.save_results]\n# actions = [self._fetch_commit_data, self._get_releases, self._add_commits_data_to_results, \n# self._get_pull_requests, self._get_issues, self._get_stargazers, \n# self._get_forks, self._get_watchers, self.save_results]\n# actions = [self._get_releases, self._get_commits, \n# self._get_pull_requests, self.save_results]\n\n for act in actions:\n act()\n\n def save_results(self):\n self.results.to_csv(\n OUTPUT_PATH + f\"{self.repo_name.replace('/','-')}_monthly.csv\", index=False\n# OUTPUT_PATH + f\"{self.repo_name.split('/')[-1]}_monthly.csv\", index=False\n )\n \n def _get_results_by_threading(self, func, params):\n \"\"\"\n Query github API by multithreading.\n return a list containing all results.\n \"\"\"\n num_workers = self.num_workers\n# if func.__name__ not in [\"multi_pulls\",\"multi_commits\", \"multi_watchers\", \"multi_releases\"]:\n# num_workers = 1;\n if self.debug_counts:\n p = ThPool(num_workers)\n pool_args = params[: self.debug_counts]\n return p.map(func, pool_args)\n else:\n stats = []\n start = time.time()\n totalCount = params.totalCount\n for i in range(int(params.totalCount/self.batch_size)+1):\n# if self.num_workers != 1 and i != 0 and (i+1)*self.batch_size % 800==0:\n# print(\"Sleep 30 sec\")\n# sleep(30)\n p = ThPool(num_workers)\n self.get_rate_limit(str(func.__name__), 50, True)\n print(f'[start: {i*self.batch_size}] [end: {(i + 1) * self.batch_size}] [totalCount: {params.totalCount}]')\n endIndex = min((i+1) * self.batch_size, totalCount)\n temp = p.map(func, params[i*self.batch_size:endIndex])\n stats += temp\n print(f\"{self.repo_name}, {func.__name__} takes: {round(time.time()-start,3)} secs\" )\n print('Requests remaining = ' + str(self.g.rate_limiting[0]) + ' for token idx: ' + str(self.token_idx))\n# print('Requests remaining = ' + str(self.g.rate_limiting[0]))\n return stats\n\n def _create_output_folder(self):\n result_path = OUTPUT_PATH + self.repo_name.replace(\"/\", '-')\n# result_path = OUTPUT_PATH + self.repo_name.split(\"/\")[-1]\n os.makedirs(result_path, exist_ok=True)\n return result_path\n\n def _read_existing_data(self, file_name):\n path = os.path.join(self.output_folder, file_name)\n if (os.path.isfile(path)):\n return pd.read_csv(path)\n else:\n return None\n\n def _fetch_commit_data(self):\n \"\"\"\n Get commits activity grouped by month.\n \"\"\"\n #def retreieve_commits(commits_dates):\n def retrieve_commits():\n stats = []\n commits = self.repo.get_commits()\n print('Got commits paginated list')\n self.commit_hash_map = dict()\n i = 0\n for commit in commits:\n self.get_rate_limit('_fetch_commit_data', 10, (i % 100 == 0))\n one = {\"commit_id\": commit.sha}\n #one[\"committer_id\"] = commit.author.login if commit.author else \"None\"\n one[\"committer_id\"] = commit.commit.author.email if (commit.commit.author and commit.commit.author.email) else 'None'\n\n #one[\"committed_at\"] = commits_dates[commit.sha][0]\n one[\"committed_at\"] = commit.commit.author.date.astimezone(tz = timezone.utc).replace(tzinfo = None)\n one['committer_domain'] = extract_domain_from_email(commit.commit.author.email) \\\n if (commit.commit.author and commit.commit.author.email) else \"None\"\n self.commit_hash_map[one['commit_id']] = one['committed_at']\n stats.append(one)\n i = i + 1\n return stats\n\n def extract_domain_from_email(email_id):\n try:\n domain = re.match(r'.*@(.*)', email_id).group(1)\n# print('Email id = ' + str(email_id))\n# print('domain = ' + str(domain))\n except:\n domain = \"None\"\n return domain\n \n print(f'Entering fetch commits for {self.repo_name}')\n csv_file_name = f\"{self.repo_name.split('/')[-1]}_commits_and_comments.csv\"\n stats_pd = self._read_existing_data(csv_file_name)\n if stats_pd is not None:\n stats_pd.committed_at = stats_pd.committed_at.astype(\"datetime64[ns]\")\n self.commit_stats = stats_pd\n else:\n if self.commits_stats_from_clone:\n stats = get_commits_stats_from_clone_repo(self.repo_name)\n else:\n #commits_dates = get_commits_from_clone_repo(self.repo_name)\n #stats = retreieve_commits(commits_dates) # get commits dates by clone repo\n stats = retrieve_commits()\n #pdb.set_trace()\n stats_pd = pd.DataFrame.from_records(stats, columns=[\"commit_id\", \"committer_id\", \"committed_at\", \"committer_domain\"])\n stats_pd.committed_at = stats_pd.committed_at.astype(\"datetime64[ns]\")\n\n path = os.path.join(self.output_folder, csv_file_name)\n stats_pd.to_csv(\n path,\n index=False,\n columns=[\"commit_id\", \"committer_id\", \"committed_at\", \"committer_domain\"],\n )\n self.commit_stats = stats_pd\n print('Requests remaining = ' + str(self.g.rate_limiting[0]) + ' for token idx: ' + str(self.token_idx))\n\n\n # @profile\n def _add_commits_data_to_results(self): \n #def _get_commits(self): \n \"\"\"\n Get commits activity grouped by month.\n \"\"\"\n #def retreieve_commits(commits_dates):\n# def retrieve_commits():\n# stats = []\n# commits = self.repo.get_commits()\n# print('Got commits paginated list')\n# for commit in commits:\n# one = {\"commit_id\": commit.sha}\n# #one[\"committer_id\"] = commit.author.login if commit.author else \"None\"\n# one[\"committer_id\"] = commit.commit.author.email if (commit.commit.author and commit.commit.author.email) else 'None'\n#\n# #one[\"committed_at\"] = commits_dates[commit.sha][0]\n# one[\"committed_at\"] = commit.commit.author.date.astimezone(tz = timezone.utc).replace(tzinfo = None)\n# one['committer_domain'] = extract_domain_from_email(commit.commit.author.email) \\\n# if (commit.commit.author and commit.commit.author.email) else \"None\"\n# stats.append(one)\n# return stats\n#\n# def extract_domain_from_email(email_id):\n# try:\n# domain = re.match(r'.*@(.*)', email_id).group(1)\n## print('Email id = ' + str(email_id))\n## print('domain = ' + str(domain))\n# except:\n# domain = \"None\"\n# return domain\n# \n# print('Entering get commits')\n# if self.commits_stats_from_clone:\n# stats = get_commits_stats_from_clone_repo(self.repo_name)\n# else:\n# #commits_dates = get_commits_from_clone_repo(self.repo_name)\n# #stats = retreieve_commits(commits_dates) # get commits dates by clone repo\n# stats = retrieve_commits()\n# #pdb.set_trace()\n# stats_pd = pd.DataFrame.from_records(stats)\n# stats_pd.committed_at = stats_pd.committed_at.astype(\"datetime64[ns]\")\n #stats_pd['committer_domain'] = stats_pd.apply(lambda row: get_committer_domain(row['committer_id']), axis = 1)\n #stats_pd['committer_domain'] = stats_pd.apply(lambda row: print('row = ' + str(row)), axis = 1)\n\n stats_pd = self.commit_stats\n\n start_date, end_date = (\n str(stats_pd.committed_at.min())[:7],\n str(stats_pd.committed_at.max())[:7],\n ) # i.e, 2019-09\n \n self.results = pd.DataFrame(\n {\"dates\": pd.date_range(start=start_date, end=end_date, freq=\"MS\")}\n )\n\n self.results['number_of_contributors'] = 0\n self.results['number_of_commits'] = 0\n self.results['number_of_new_contributors'] = 0\n self.results['number_of_contributor-domains'] = 0\n self.results['number_of_new_contributor-domains'] = 0\n\n current_contributors = set()\n current_contributor_domains = set()\n stats_pd = self.commit_stats\n for i in range(len(self.results)):\n if i != len(self.results) - 1:\n mask = (stats_pd.committed_at >= self.results.dates[i]) & (\n stats_pd.committed_at < self.results.dates[i + 1]\n )\n else:\n mask = stats_pd.committed_at >= self.results.dates[i]\n# if i == 0:\n# mask = stats_pd.committed_at <= self.results.date[i]\n# else:\n# mask = (stats_pd.committed_at <= self.results.date[i]) & (\n# stats_pd.committed_at > self.results.date[i-1]\n# )\n commit_pd = stats_pd[mask]\n self.results.at[i, 'number_of_contributors'] = commit_pd['committer_id'].nunique()\n self.results.at[i, 'number_of_contributor-domains'] = commit_pd['committer_domain'].nunique()\n self.results.at[i, 'number_of_commits'] = commit_pd.shape[0]\n new_contributors = commit_pd[~commit_pd['committer_id'].isin(current_contributors)]['committer_id'].drop_duplicates()\n new_contributor_domains = commit_pd[~commit_pd['committer_domain'].isin(current_contributor_domains)]['committer_domain'].drop_duplicates()\n #print(new_contributors)\n #print('new contributors count = ' + str(new_contributors.count()))\n self.results.at[i, 'number_of_new_contributors'] = new_contributors.count()\n self.results.at[i, 'number_of_new_contributor-domains'] = new_contributor_domains.count()\n current_contributors.update(new_contributors.tolist())\n current_contributor_domains.update(new_contributor_domains.tolist())\n\n# self.results = new_pd.copy()\n# csv_file_name = f\"{self.repo_name.split('/')[-1]}_commits_and_comments.csv\"\n# path = os.path.join(self.output_folder, csv_file_name)\n# stats_pd.to_csv(\n# path,\n# index=False,\n# columns=[\"commit_id\", \"committer_id\", \"committed_at\", \"committer_domain\"],\n# )\n print('Requests remaining = ' + str(self.g.rate_limiting[0]) + ' for token idx: ' + str(self.token_idx))\n# print('Requests remaining = ' + str(self.g.rate_limiting[0]))\n\n def _create_tag_map(self):\n tag_map = dict()\n for tag in self.tags:\n tag_map[tag.name] = tag.commit\n return tag_map\n\n def _get_releases(self):\n \"\"\"\n Get releases for this repo\n \"\"\"\n def multi_releases(release):\n one = {\"release\": str(release.id)}\n #one['title'] = release.title\n one['tag_name'] = release.tag_name\n commit = self.tag_map[release.tag_name]\n sha = commit.sha\n one['commit_id'] = sha\n #one['committed_at'] = commit.commit.author.date.astimezone(tz = timezone.utc).replace(tzinfo = None)\n #one['date'] = commit.commit.author.date.astimezone(tz = timezone.utc).replace(tzinfo = None) #This might be making an API call\n if (sha in self.commit_hash_map):\n one['date'] = self.commit_hash_map[sha]\n else:\n one['date'] = commit.commit.author.date.astimezone(tz = timezone.utc).replace(tzinfo = None)\n return one\n\n # Get tags data\n print(f'Entering get_release for {self.repo_name}')\n self.tags = self.repo.get_tags()\n self.tag_map = self._create_tag_map()\n\n all_releases = self.repo.get_releases()\n stats = self._get_results_by_threading(multi_releases, all_releases)\n stats_pd = pd.DataFrame.from_records(stats, columns=['release', 'tag_name', 'commit_id', 'date']).sort_values(by='date', ignore_index = True)\n stats_pd['days_since_last_release'] = 0\n\n for i in range(len(stats_pd)):\n if (i == 0):\n stats_pd.at[i, 'days_since_last_release'] = 0\n else:\n stats_pd.at[i, 'days_since_last_release'] = (stats_pd.at[i, 'date'] - stats_pd.at[i - 1, 'date']).days\n\n #stats_pd.date = stats_pd.date.astype(\"datetime64[ns]\")\n print(stats_pd)\n self.results = stats_pd.copy()\n csv_file_name = f\"{self.repo_name.replace('/','-')}_releases.csv\"\n path = os.path.join(self.output_folder, csv_file_name)\n stats_pd.to_csv(\n path,\n index=False\n )\n print('Requests remaining = ' + str(self.g.rate_limiting[0]) + ' for token idx: ' + str(self.token_idx))\n# print('Requests remaining = ' + str(self.g.rate_limiting[0]))\n\n # @profile\n def _get_issues(self, state=\"all\"): # Total time: 1.4058 s for debug\n \"\"\"\n Get all the issues from this repo.\n In the csv file, we have the following cols:\n\n issue_id, state(open/closed), comments(int), created_at, closed_at\n\n \"\"\"\n def multi_issues(issue):\n one = {\"id\": str(issue.number)}\n one[\"state\"] = issue.state\n one[\"comments\"] = issue.comments\n #one[\"created_at\"] = str(issue._created_at.value)\n one[\"created_at\"] = issue.created_at.astimezone(tz = timezone.utc).replace(tzinfo = None)\n one[\"closed_at\"] = (\n #str(issue._closed_at.value)\n issue.closed_at.astimezone(tz = timezone.utc).replace(tzinfo = None)\n if issue.closed_at\n else pd.to_datetime(1)\n #else str(pd.to_datetime(1))\n ) # set not closed issue date to 1970-01-01 for calcualte monthly closed issues.\n one[\"title\"] = str(issue.title)\n return one\n\n all_issues = self.repo.get_issues(state=state)\n print('All issues count for ' + str(self.repo_name) + ' = ' + str(all_issues.totalCount))\n stats = self._get_results_by_threading(multi_issues, all_issues)\n\n stats_pd = pd.DataFrame.from_records(stats, columns=['id', 'state', 'comments', 'created_at', 'closed_at', 'title'])\n# stats_pd.created_at = stats_pd.created_at.astype(\"datetime64[ns]\")\n# stats_pd.closed_at = stats_pd.closed_at.astype(\n# \"datetime64[ns]\", errors=\"ignore\"\n# )\n\n self.results[\"number_of_open_issues\"] = 0\n self.results[\"number_of_closed_issues\"] = 0\n self.results[\"number_of_issue_comments\"] = 0 # comments from open + closed issues\n\n for i in range(len(self.results)):\n# if i == 0:\n# open_mask = (stats_pd.created_at <= self.results.date[i])\n# closed_mask = (stats_pd.closed_at <= self.results.date[i]) & (stats_pd.state == 'closed')\n# else:\n# open_mask = (\n# (stats_pd.created_at <= self.results.date[i]) \n# & (stats_pd.created_at > self.results.date[i-1])\n# )\n# closed_mask = (\n# (stats_pd.closed_at <= self.results.date[i]) \n# & (stats_pd.closed_at > self.results.date[i-1]) \n# & (stats_pd.state == 'closed')\n# )\n\n if i != len(self.results) - 1:\n open_mask = (\n (stats_pd.created_at >= self.results.dates[i])\n & (stats_pd.created_at < self.results.dates[i + 1])\n )\n closed_mask = (\n (stats_pd.closed_at >= self.results.dates[i])\n & (stats_pd.closed_at < self.results.dates[i + 1])\n & (stats_pd.state == \"closed\")\n )\n else:\n open_mask = (stats_pd.created_at >= self.results.dates[i])\n closed_mask = (stats_pd.closed_at >= self.results.dates[i]) & (\n stats_pd.state == \"closed\"\n )\n\n self.results.at[i, \"number_of_open_issues\"] = len(stats_pd[open_mask])\n self.results.at[i, \"number_of_closed_issues\"] = len(stats_pd[closed_mask])\n self.results.at[i, \"number_of_issue_comments\"] = sum(\n stats_pd[open_mask].comments\n ) + sum(\n stats_pd[closed_mask].comments\n ) # comments on both open + closed issues.\n \n csv_file_name = f\"{self.repo_name.split('/')[-1]}_issues.csv\"\n path = os.path.join(self.output_folder, csv_file_name)\n stats_pd.to_csv(path, index=False,\n columns=[\"id\", \"created_at\", \"closed_at\", \"state\", \"comments\", \"title\"])\n\n # @profile\n def _get_stargazers(self): # Total time: 0.811028 s for debug\n \"\"\"\n Get monthly stargazers and update it in self.results, will finally save to .csv file\n \"\"\"\n print(f'Entering collection of stars for {self.repo_name}')\n stargazer = self.repo.get_stargazers_with_dates()\n stats = []\n counts = self.debug_counts\n temp_counter = 0\n for star in stargazer:\n self.get_rate_limit('_get_stargazers', 10, (temp_counter % 100 == 0))\n if self.debug_counts:\n counts -= 1\n if counts == 0:\n break\n one = {\"user_id\": star.user.login}\n one[\"starred_at\"] = star.starred_at.astimezone(tz = timezone.utc).replace(tzinfo = None) if star.starred_at else None\n stats.append(one)\n temp_counter = temp_counter + 1\n \n stats_pd = pd.DataFrame.from_records(stats, columns=[\"starred_at\", \"user_id\"])\n stats_pd.sort_values(by=[\"starred_at\"])\n \n self.results[\"number_of_stargazers\"] = 0\n for i in range(len(self.results)):\n# if i == 0:\n# mask = stats_pd.starred_at <= self.results.date[i]\n# else:\n# mask = (\n# (stats_pd.starred_at <= self.results.date[i])\n# & (stats_pd.starred_at > self.results.date[i-1])\n# )\n if i != len(self.results) - 1:\n mask = (stats_pd.starred_at >= self.results.dates[i]) & (\n stats_pd.starred_at < self.results.dates[i + 1]\n )\n else:\n mask = stats_pd.starred_at >= self.results.dates[i]\n\n self.results.at[i, \"number_of_stargazers\"] = len(stats_pd[mask])\n \n csv_file_name = f\"{self.repo_name.split('/')[-1]}_stargazer.csv\"\n path = os.path.join(self.output_folder, csv_file_name)\n stats_pd.to_csv(path, index=False, columns=[\"starred_at\", \"user_id\"])\n print('Requests remaining = ' + str(self.g.rate_limiting[0]) + ' for token idx: ' + str(self.token_idx))\n# print('Requests remaining = ' + str(self.g.rate_limiting[0]))\n\n # @profile\n def _get_forks(self): # Total time: 2.84025 s for debug\n \"\"\"\n Get monthly forks and update it in self.results, will finally save to .csv file\n \"\"\"\n forks = self.repo.get_forks()\n stats = []\n counts = self.debug_counts\n temp_counter = 0\n print(f'Number of forks for {self.repo_name} = {forks.totalCount}')\n for fork in forks: # this line takes 90.1% time of this function\n self.get_rate_limit('_get_forks', 10, (temp_counter % 100 == 0))\n if self.debug_counts:\n counts -= 1\n if counts == 0:\n break\n one = {\"user_id\": fork.owner.login if fork.owner else fork.full_name.split('/')[0]}\n one[\"created_at\"] = fork.created_at.astimezone(tz = timezone.utc).replace(tzinfo = None) if fork.created_at else None\n stats.append(one)\n temp_counter = temp_counter + 1\n \n stats_pd = pd.DataFrame.from_records(stats, columns=[\"created_at\", \"user_id\"])\n stats_pd.sort_values(by=[\"created_at\"])\n\n self.results[\"number_of_forks\"] = 0\n for i in range(len(self.results)):\n# if i == 0:\n# mask = stats_pd.created_at <= self.results.date[i]\n# else:\n# mask = (\n# (stats_pd.created_at <= self.results.date[i])\n# & (stats_pd.created_at > self.results.date[i-1])\n# )\n\n if i != len(self.results) - 1:\n mask = (stats_pd.created_at >= self.results.dates[i]) & (\n stats_pd.created_at < self.results.dates[i + 1]\n )\n else:\n mask = stats_pd.created_at >= self.results.dates[i]\n\n self.results.at[i, \"number_of_forks\"] = len(stats_pd[mask])\n \n csv_file_name = f\"{self.repo_name.split('/')[-1]}_forks.csv\"\n path = os.path.join(self.output_folder, csv_file_name)\n stats_pd.to_csv(path, index=False, columns=[\"created_at\", \"user_id\"])\n print('Requests remaining = ' + str(self.g.rate_limiting[0]) + ' for token idx: ' + str(self.token_idx))\n\n # @profile\n def _get_watchers(self): # Total time: 4.25912 s for debug before multithread=\n \"\"\"\n Get number of watchers. Each watcher requires a API call.\n # for debug \n Before multithreading, Total time: 4.25912 s \n After multithreading, Total time: 1.125 s\n \"\"\"\n def multi_watchers(watcher):\n one = {\"user_id\": watcher.login}\n # created_at line takes 79.0% time of this function\n one[\"created_at\"] = watcher.created_at.astimezone(tz = timezone.utc).replace(tzinfo = None) if watcher.created_at else None\n return one \n\n watchers = self.repo.get_subscribers() # <---- this was wrong, not get_watchers!!\n stats = self._get_results_by_threading(multi_watchers, watchers)\n \n stats_pd = pd.DataFrame.from_records(stats, columns=[\"created_at\", \"user_id\"])\n stats_pd.sort_values(by=[\"created_at\"])\n\n self.results[\"number_of_watchers\"] = 0\n for i in range(len(self.results)):\n# if i == 0:\n# mask = stats_pd.created_at <= self.results.date[i]\n# else:\n# mask = (\n# (stats_pd.created_at <= self.results.date[i])\n# & (stats_pd.created_at > self.results.date[i-1])\n# )\n\n if i != len(self.results) - 1:\n mask = (stats_pd.created_at >= self.results.dates[i]) & (\n stats_pd.created_at < self.results.dates[i + 1]\n )\n else:\n mask = stats_pd.created_at >= self.results.dates[i]\n\n self.results.at[i, \"number_of_watchers\"]= len(stats_pd[mask])\n \n csv_file_name = f\"{self.repo_name.split('/')[-1]}_watchers.csv\"\n path = os.path.join(self.output_folder, csv_file_name)\n stats_pd.to_csv(path, index=False, columns=[\"created_at\", \"user_id\"])\n\n # @profile\n def _get_pull_requests(self, state=\"all\"): # Total time: 192.765 s for debug\n \"\"\"\n Get all the PR from this repo. Note that issues and PR share the same ID system.\n In the csv file, we have the following cols:\n \n PR_id, state(open/closed), comments, created_at, closed_at, merged, merged_at,\n\n \"\"\"\n print(f'Entering get pull requests for {self.repo_name}')\n pulls = self.repo.get_pulls(state=state, sort=\"created\")#, base=\"master\")\n totalCount = pulls.totalCount\n print(f'Number of pull requests for {self.repo_name} = {totalCount}')\n# print('Pulls are here')\n\n def sequential_pull(pull_list):\n ret = []\n temp_counter = 0\n for pr in pull_list:\n self.get_rate_limit('_get_pull_requests', 10, (temp_counter % 100 == 0))\n one = {\"id\": str(pr.number)}\n one[\"state\"] = pr.state\n ## FIXME pr.comments line takes 91.4% time of this function, this will call API once!\n #one[\"comments\"] = (pr.comments)\n one[\"created_at\"] = str(pr.created_at.astimezone(tz = timezone.utc).replace(tzinfo = None))\n # set not closed pr date to 1970-01-01 for calcualte monthly stats\n one[\"closed_at\"] = (\n str(pr.closed_at.astimezone(tz = timezone.utc).replace(tzinfo = None)) if pr.closed_at else str(pd.to_datetime(1))\n )\n # set not merged pr date to 1970-01-01 for calcualte monthly stats.\n one[\"merged_at\"] = (\n str(pr.merged_at.astimezone(tz = timezone.utc).replace(tzinfo = None)) if pr.merged_at else str(pd.to_datetime(1))\n )\n #one[\"merged_by\"] = str(pr.merged_by.login) if pr.merged_by else None\n #one[\"merged\"] = bool(pr._merged.value)\n one[\"merged\"] = True if pr.merged_at else False\n ret.append(one)\n temp_counter = temp_counter + 1\n return ret\n\n def multi_pulls(pr):\n one = {\"id\": str(pr.number)}\n one[\"state\"] = pr.state\n ## FIXME pr.comments line takes 91.4% time of this function, this will call API once!\n #one[\"comments\"] = (pr.comments)\n one[\"created_at\"] = str(pr.created_at.astimezone(tz = timezone.utc).replace(tzinfo = None))\n # set not closed pr date to 1970-01-01 for calcualte monthly stats\n one[\"closed_at\"] = (\n str(pr.closed_at.astimezone(tz = timezone.utc).replace(tzinfo = None)) if pr.closed_at else str(pd.to_datetime(1))\n )\n\n # set not merged pr date to 1970-01-01 for calcualte monthly stats.\n one[\"merged_at\"] = (\n str(pr.merged_at.astimezone(tz = timezone.utc).replace(tzinfo = None)) if pr.merged_at else str(pd.to_datetime(1))\n )\n #one[\"merged_by\"] = str(pr.merged_by.login) if pr.merged_by else None\n\n #one[\"merged\"] = bool(pr._merged.value)\n one[\"merged\"] = True if pr.merged_at else False\n return one\n\n stats = self._get_results_by_threading(multi_pulls, pulls)\n# stats = sequential_pull(pulls)\n stats_pd = pd.DataFrame.from_records(stats,\n columns=[\n \"id\",\n \"created_at\",\n \"closed_at\",\n \"merged_at\",\n \"state\",\n # \"comments\",\n \"merged\"\n ]\n )\n stats_pd.created_at = stats_pd.created_at.astype(\"datetime64[ns]\")\n stats_pd.closed_at = stats_pd.closed_at.astype(\"datetime64[ns]\", errors=\"ignore\")\n stats_pd.merged_at = stats_pd.merged_at.astype(\"datetime64[ns]\", errors=\"ignore\")\n\n self.results[\"number_of_open_PRs\"] = 0\n self.results[\"number_of_closed_PRs\"] = 0\n self.results[\"number_of_merged_PRs\"] = 0\n #self.results[\"PR_mergers\"] = 0\n #self.results[\"number_of_PR_comments\"] = 0 # comments from open + closed issues\n\n for i in range(len(self.results)):\n if i != len(self.results) - 1:\n open_mask = (stats_pd.created_at >= self.results.dates[i]) & (\n stats_pd.created_at < self.results.dates[i + 1]\n )\n closed_mask = (\n (stats_pd.closed_at >= self.results.dates[i])\n & (stats_pd.closed_at < self.results.dates[i + 1])\n & (stats_pd.state == \"closed\")\n & (stats_pd.merged == False)\n ) # all merged PR's state = close, so have to get rid of merged.\n merged_mask = (\n (stats_pd.closed_at >= self.results.dates[i])\n & (stats_pd.closed_at < self.results.dates[i + 1])\n & (stats_pd.merged)\n )\n else:\n open_mask = stats_pd.created_at >= self.results.dates[i]\n closed_mask = (\n (stats_pd.closed_at >= self.results.dates[i])\n & (stats_pd.state == \"closed\")\n & (stats_pd.merged == False)\n )\n merged_mask = (stats_pd.closed_at >= self.results.dates[i]) & (\n stats_pd.merged\n )\n self.results.at[i, \"number_of_open_PRs\"] = len(stats_pd[open_mask])\n self.results.at[i, \"number_of_closed_PRs\"] = len(stats_pd[closed_mask])\n self.results.at[i, \"number_of_merged_PRs\"] = len(stats_pd[merged_mask])\n# self.results.at[i, \"PR_mergers\"] = len(\n# stats_pd[merged_mask].merged_by.unique()\n# )\n# self.results.at[i, \"number_of_PR_comments\"] = (\n# sum(stats_pd[open_mask].comments)\n# + sum(stats_pd[closed_mask].comments)\n# + sum(stats_pd[merged_mask].comments)\n# ) # num of comments on open + closed + merged PRs.\n\n# for i in range(len(self.results)):\n# if (i == 0):\n# open_mask = stats_pd.created_at <= self.results.date[i]\n# closed_mask = (\n# (stats_pd.closed_at <= self.results.date[i])\n# & (stats_pd.state == \"closed\")\n# & (stats_pd.merged == False)\n# )\n# merged_mask = (stats_pd.closed_at <= self.results.date[i]) & (\n# stats_pd.merged\n# )\n# else:\n# open_mask = (stats_pd.created_at <= self.results.date[i]) & (\n# stats_pd.created_at > self.results.date[i-1]\n# )\n# closed_mask = (\n# (stats_pd.closed_at <= self.results.date[i])\n# & (stats_pd.closed_at > self.results.date[i-1])\n# & (stats_pd.state == \"closed\")\n# & (stats_pd.merged == False)\n# )\n# merged_mask = (\n# (stats_pd.closed_at <= self.results.date[i])\n# & (stats_pd.closed_at > self.results.date[i-1])\n# & (stats_pd.merged)\n# )\n#\n# self.results.at[i, \"number_of_open_PRs\"] = len(stats_pd[open_mask])\n# self.results.at[i, \"number_of_closed_PRs\"] = len(stats_pd[closed_mask])\n# self.results.at[i, \"number_of_merged_PRs\"] = len(stats_pd[merged_mask])\n# self.results.at[i, \"PR_mergers\"] = len(\n# stats_pd[merged_mask].merged_by.unique()\n# )\n# self.results.at[i, \"number_of_PR_comments\"] = (\n# sum(stats_pd[open_mask].comments)\n# + sum(stats_pd[closed_mask].comments)\n# + sum(stats_pd[merged_mask].comments)\n# ) # num of comments on open + closed + merged PRs.\n \n csv_file_name = f\"{self.repo_name.split('/')[-1]}_pr.csv\"\n path = os.path.join(self.output_folder, csv_file_name)\n stats_pd.to_csv(path, index=False,\n columns=[\n \"id\",\n \"created_at\",\n \"closed_at\",\n \"merged_at\",\n \"state\",\n# \"comments\",\n \"merged\"\n ]\n )\n\ndef run(token_idx):\n #repo_names = get_repo_names(\"./data/repo_linux.csv\", token_idx, len(_token))\n path = 'tokens.txt'\n tokens = []\n with open(path, 'r') as f:\n for token in f:\n #print('token = ' + str(token))\n tokens.append(str(token).strip())\n #print(tokens)\n# repo_names = get_repo_names(\"./data/repo_list.csv\", token_idx, len(_token))\n repo_names = get_repo_names(\"./data/repo_list.csv\", token_idx, len(tokens))\n existing_results = get_existing_results(OUTPUT_PATH)\n# existing_results = get_existing_results(\"./results/\")\n \n val = len(repo_names)\n print(f\"total repos: {val}\")\n print(f\"token_idx: {token_idx}\")\n# token = list(_token.values())[token_idx]\n token = tokens[token_idx]\n #pdb.set_trace()\n for repo_name in sorted(repo_names):\n print(f'Repo {repo_name} fetched using token {token_idx}')\n sub_name = repo_name.split(\"/\")[-1]\n if sub_name in existing_results or repo_name.replace('/','-') in existing_results:\n # print(f\"{repo_name} exists, skipping...\")\n continue \n if check_in_problem_repo(repo_name):\n # print(f\"{repo_name} has a problem, found in problem_repo.txt, skipping...\")\n continue\n miner = Miner(token, debug=False, commits_stats_from_clone=False, num_workers = 3, token_idx = token_idx)\n if miner.g.rate_limiting[0] < QUOTA_LIMIT:\n # sleep(random.choice(random_time))\n print(f\"{repo_name}: token is not ready...\")\n break\n # miner = Miner(token, debug=False)\n\n print(f\"{repo_name}: start...\")\n try:\n error_message = miner.get_data(repo_name)\n except Exception:\n write_problem_repo(repo_name)\n #print(f\"{miner.repo_name} has errors...\")\n print(f\"{repo_name} has errors...\")\n traceback.print_exc()\n continue\n\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n assert(\"Pass token index!\")\n token_idx = int(sys.argv[1])\n run(token_idx)\n\n","sub_path":"monthly_runner_v3.py","file_name":"monthly_runner_v3.py","file_ext":"py","file_size_in_byte":36652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"627798387","text":"import numpy as np\nimport tensorflow as tf\n\nker = np.loadtxt('data/ker.csv')\nim = np.loadtxt('data/im.csv')\n\nimg = tf.reshape(im,shape=[1,6,6,1])\nkernel = tf.reshape(ker,shape=[3,3,1,1])\n\nk1 = tf.Variable(kernel,name='k1')\n\nconv1 = tf.nn.conv2d(img, k1, strides=[1, 1, 1, 1], padding='SAME', name='conv1')\nrelu1 = tf.nn.relu(conv1)\npool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')\n\ngrad_pr = tf.gradients(pool1,conv1)\ngrad_rc = tf.gradients(relu1,conv1)\ngrad_ck = tf.gradients(conv1,k1)\n\nwith tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n print(sess.run(pool1))\n g = sess.run(grad_ck)[0]\n print(g.shape,'\\n',np.reshape(g,(3,3)))","sub_path":"expl_1.py","file_name":"expl_1.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"271034143","text":"###############################################################################\n# Define everything needed for the bottling of binaries on OSX\n###############################################################################\n\n# Add a manual scheduler for building bottles, and ONLY a manual scheduler\nc['schedulers'].append(ForceScheduler(\n name=\"bottle build\",\n builderNames=[\"bottle_\" + z for z in osx_names],\n reason=FixedParameter(name=\"reason\", default=\"\"),\n branch=FixedParameter(name=\"branch\", default=\"\"),\n revision=FixedParameter(name=\"revision\", default=\"\"),\n repository=FixedParameter(name=\"repository\", default=\"\"),\n project=FixedParameter(name=\"project\", default=\"Bottling\"),\n properties=[\n StringParameter(name=\"formula\", label=\"Formula\", size=30, default=\"staticfloat/juliadeps/\")\n ]\n))\n\n# Steps to build a Homebrew Bottle\nosx_bottle_factory = BuildFactory()\nosx_bottle_factory.useProgress = True\nosx_bottle_factory.addSteps([\n # Clean everything out that's in the directory!\n ShellCommand(\n \tname=\"precleanup\",\n \tcommand=[\"/bin/bash\", \"-c\", \"rm -f *.{sh,gz}\"]\n ),\n \n # Copy our build_bottle.sh script over to the slave:\n FileDownload(\n \tmastersrc=\"../commands/build_bottle.sh\",\n \tslavedest=\"build_bottle.sh\"\n ),\n \n # Next, invoke build_bottle.sh!\n ShellCommand(\n \tcommand=[\"/bin/bash\", \"build_bottle.sh\", Property('formula')],\n \thaltOnFailure=True\n ),\n\n # Grab the output and transfer it back!\n SetPropertyFromCommand(\n \tname=\"Get bottle filename\",\n \tcommand=[\"/bin/bash\", \"-c\", \"ls *.tar.gz\"],\n \tproperty=\"filename\",\n \thaltOnFailure=True\n ),\n MasterShellCommand(\n \tname=\"mkdir bottle_cache\",\n \tcommand=[\"mkdir\", \"-p\", \"/tmp/bottle_cache\"]\n ),\n FileUpload(\n \tslavesrc=Interpolate(\"%(prop:filename)s\"),\n \tmasterdest=Interpolate(\"/tmp/bottle_cache/%(prop:filename)s\")\n ),\n MasterShellCommand(\n \tname=\"Upload to AWS\",\n \tcommand=[\"/bin/bash\", \"-c\", Interpolate(\"~/bin/aws put --fail --public juliabottles/%(prop:filename)s /tmp/bottle_cache/%(prop:filename)s\")],\n \thaltOnFailure=True\n ),\n\n # Cleanup downloaded bottle file\n MasterShellCommand(\n \tname=\"Cleanup\",\n \tcommand=[\"rm\", \"-f\", Interpolate(\"/tmp/bottle_cache/%(prop:filename)s\")]\n ),\n])\n\n# Add bottler builders\nfor name in osx_names:\n c['builders'].append(BuilderConfig(\n name=\"bottle_%s\"%(name),\n slavenames=[name],\n category=\"Bottling\",\n factory=osx_bottle_factory\n ))\n\n","sub_path":"master/bottling.py","file_name":"bottling.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"500587288","text":"# 活动使用同一会议室,时间可能出现重叠,需尽可能安排更多活动\r\n# | Acts | act1 | act2 | act3 | act4 | act5 | act6 |\r\n# | (s) | 1 | 3 | 0 | 5 | 3 | 7 |\r\n# | (f) | 3 | 4 | 4 | 7 | 6 | 8 |\r\n# 分析后得出贪心选择标准:最早结束\r\n\r\n# 冒泡排序算法,将活动集合按结束时间进行排序\r\ndef bubble_sort(s, f):\r\n for i in range(len(f)):\r\n for j in range(0, len(f) - 1 - i):\r\n if f[j] > f[j+1]:\r\n f[j], f[j+1] = f[j+1], f[j]\r\n s[j], s[j+1] = s[j+1], s[j]\r\n return s, f\r\n\r\n\r\n# 根据输入值构造活动数据\r\ndef init_acts(arr):\r\n if len(arr) == 0:\r\n global s, f\r\n s = [0, 3, 1, 5, 3, 7]\r\n f = [4, 4, 3, 7, 6, 8]\r\n return s, f\r\n else:\r\n for ar in arr:\r\n ar = ar[1:-1]\r\n start = int(ar.split(',')[0])\r\n end = int(ar.split(',')[1])\r\n s.append(start)\r\n f.append(end)\r\n return s, f\r\n\r\n\r\n# 贪心算法实现\r\ndef greedy(s, f, n):\r\n a = [True for x in range(n)]\r\n # 初始选择第一个活动\r\n j = 0\r\n for i in range(1, n):\r\n # 如果下一个活动开始时间大于等于上一个活动结束时间\r\n if s[i] >= f[j]:\r\n a[i] = True\r\n j = i\r\n else:\r\n a[i] = False\r\n return a\r\n\r\n# 6(回车) \r\n# (1,3) (3,4) (0,4) (5,7) (3,6) (7,8)\r\nn = int(input(\"请输入活动数量及s-f时间(活动数量与s-f用回车分隔, s-f间用空格分隔, 如不输入s-f, 则按6个活动默认构造数据)\"))\r\narr = input().split()\r\n\r\ns = []\r\nf = []\r\ns, f = init_acts(arr)\r\ns, f = bubble_sort(s, f)\r\nA = greedy(s, f, n)\r\nres = []\r\nfor k in range(len(A)):\r\n if A[k]:\r\n res.append('({},{})'.format(s[k], f[k]))\r\nprint(' '.join(res))\r\n","sub_path":"Greedy/Activity.py","file_name":"Activity.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"528831120","text":"from django.shortcuts import render\nfrom queryUI.forms import QueryForm\nimport datetime\nfrom django.db import connection\nfrom django.conf import settings\nfrom queryUI.models import Query\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nimport psycopg2\n\n\n\ndef index(request):\n time = datetime.datetime.now()\n session = time.strftime('%s')\n return render(request, 'queryUI/index.html', {'session': session})\n\ndef list(request):\n time = datetime.datetime.now()\n session = time.strftime('%s')\n create_time = time.strftime('%d-%b-%Y, %H:%M')\n if request.method == 'POST':\n form = QueryForm(request.POST)\n if form.is_valid():\n query = form.cleaned_data['query']\n job_name = form.cleaned_data['job_name']\n len = 0\n try:\n conn = psycopg2.connect(\"dbname='tapdb' user='dlquery' host='dldb1.sdm.noao.edu' password=''\")\n with conn.cursor() as cursor:\n cursor.execute(query)\n result = cursor.fetchall()\n filePath = settings.MEDIA_ROOT + '/' + time.strftime('%s')\n with open(filePath, 'w') as f:\n for obj in result:\n f.write(\"%s\\n\" % str(obj))\n len += 1\n size = f.tell()\n\n finally:\n connection.close()\n newquery = Query(owner='test', create_time=create_time, job_name=job_name, file_link='http://dldb1.sdm.noao.edu/huang/queryUI/media/' + session,\n status='completed', number_rows=len, size=size)\n newquery.save()\n\n # Redirect to the document list after POST\n return HttpResponseRedirect(reverse('list'))\n else:\n form = QueryForm() # A empty, unbound form\n\n # Load documents for the list page\n documents = Query.objects.all()\n\n # Render list page with the documents and the form\n return render(\n request,\n 'list.html',\n {'documents': documents, 'form': form}\n )","sub_path":"queryUI/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"139951192","text":"'''\nCreated on Jan 22, 2013\n\n@author: oliver\n'''\n\nimport wx\nfrom datetime import datetime\n\n# Make a shorter alias\n_ = wx.GetTranslation\n\nclass WxHelpers(object):\n '''\n classdocs\n '''\n\n def pydate2wxdate(self, date):\n '''\n Converts python datetime object to wx.date object\n @param date: date as Python datetime object\n @type date: datetime\n @return: date as wx.datetime object \n '''\n assert isinstance(date, (datetime, date))\n tt = date.timetuple()\n dmy = (tt[2], tt[1]-1, tt[0])\n wxdate = wx.DateTimeFromDMY(*dmy)\n return wxdate\n \n def wxdate2pydate(self, date):\n '''\n Converts wx.date datetime object to python datetime object\n @param date: date as wx.datetime object\n @type date: wx.datetime\n @return: date as python datetime object \n '''\n assert isinstance(date, wx.DateTime)\n if date.IsValid():\n ymd = map(int, date.FormatISODate().split('-'))\n return datetime(*ymd)\n else:\n return None\n \n def fill_comboboxes(self, dct):\n for cb_name, cb_dct in dct.items():\n attr = getattr(self, cb_name)\n attr.Append('', {'label':'','id':''})\n for k, v in cb_dct.items():\n v = _(v)\n sub_dct = {'label':v,'id':k}\n attr.Append(sub_dct['label'], sub_dct)\n \n\n\n def set_cb_value(self, cb, item_id):\n '''\n Searches for the given item_id in the list of itemObjects\n of the given combobox and set the combobox's selection\n to that item if found.\n \n @param cb: wx.combobox control\n @type cb: object\n @param item_id: item_id to be searched for\n @type item_id: int, string \n '''\n count = cb.GetCount()\n i = 0\n cb.SetValue = ''\n while i < count:\n itemObj = cb.GetClientData(i)\n if itemObj['id'] == item_id:\n cb.SetSelection(i)\n break\n i += 1\n \n def get_cb_value(self, cb):\n '''\n Returns the itemObject of the given combobox's selection.\n ItemObject is a dictionary with keys label and id.\n The label is the string being displayed in the control, the\n id the one that gets returned to be stored in database.\n \n @param cb: wx.combobox control \n @type cb: object\n '''\n dct = {'id':None,'label':''}\n itemObject = cb.GetClientData(cb.GetSelection())\n if itemObject:\n dct['id'] = itemObject['id']\n dct['label'] = itemObject['label']\n return dct\n \n ","sub_path":"program/montehelper/WxHelpers.py","file_name":"WxHelpers.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"280583280","text":"import io\nimport logging\nimport os\nimport pandas as pd\nfrom collections import OrderedDict\n\nlogger = logging.getLogger(__name__)\n\n\ndef create_dir(new_dir):\n # Check if dir exists.. if it doesn't... create it.\n if not os.path.isdir(new_dir):\n try:\n os.makedirs(new_dir)\n except OSError:\n if os.path.exists(new_dir):\n pass\n else:\n raise\n\n\ndef dateparse(yr, mo, da, hr, mn, ss):\n dt = '{} {} {} {} {} {}'.format(yr, mo, da, hr, mn, ss)\n return pd.datetime.strptime(dt, '%Y %m %d %H %M %S')\n\n\ndef parse_header_line(line):\n \"\"\"\n Parse a line into a key, value\n :param line: a line from a text file\n :type line: string\n :return: a tuple containing the key, value for the line\n :rtype: tuple\n \"\"\"\n\n line = line.replace('%', '') # Strip the % sign from the line\n line = line.replace('\\n', '') # Strip the new line character from the end of the line\n key = line[0:line.find(':')] # Grab the key, which is everything to the left of the colon\n value = line[line.find(':') + 2:] # Grab the value, which is everything to the right of the colon\n return key, value\n\n\ndef parse_metadata(codar_file):\n metadata_list = []\n\n for i, line in enumerate(codar_file):\n if line.startswith('%'):\n metadata_list.append([i, line.strip('\\n')])\n return metadata_list\n\n\ndef parse_lluv(lluv):\n # Load file into Generic LLUV container\n lluv_container = dict(header=OrderedDict(), tables=OrderedDict(), footer=OrderedDict())\n\n with open(lluv, 'r') as open_file:\n open_lluv = open_file.readlines()\n\n # Parse header and footer metadata\n table_count = 0\n table = False # Set table to False. Once a table is found, switch to True.\n processing_info = []\n for i, line in enumerate(open_lluv):\n if not table: # If we are not looking at a table\n if line.startswith('%%'):\n continue\n elif line.startswith('%'): # Parse the single commented header lines\n key, value = parse_header_line(line)\n if 'TableType' in line: # Save this data as global header information\n table = True # we found a table\n table_count = table_count + 1 # this is the nth table\n data_header = [] # initialize an empty list for the data header information\n table_data = u''\n lluv_container['tables'][str(table_count)] = OrderedDict()\n lluv_container['tables'][str(table_count)][key] = value\n elif table_count > 0:\n if key == 'ProcessingTool':\n processing_info.append(value)\n elif key == 'End':\n lluv_container['footer']['ProcessingTool'] = processing_info\n lluv_container['footer'][key] = value\n else:\n lluv_container['footer'][key] = value\n else:\n lluv_container['header'][key] = value\n elif table:\n if line.startswith('%%'): # table header information\n line = line.replace('%%', '')\n temp = line.split()\n if 'comp' in temp:\n temp = [x for x in temp if x not in ('comp', 'Distance')]\n data_header.append(tuple(temp))\n elif line.startswith('%'):\n if len(line.split(':')) == 1:\n line = line.replace('%', '')\n table_data += '{}'.format(line)\n # table_data.append(line.split())\n else:\n key, value = parse_header_line(line)\n if 'TableEnd' in line:\n # use pandas read_csv rather than loading directly into dataframe because it automatically\n # interprets the datatype for each column of the csv\n tdf = pd.read_csv(io.StringIO(unicode(table_data)), sep=' ', header=None,\n names=lluv_container['tables'][str(table_count)]['TableColumnTypes'].split(),\n skipinitialspace=True)\n\n # tdf = pd.DataFrame(table_data, columns=lluv_container['tables'][str(table_count)]['TableColumnTypes'].split())\n if table_count > 1:\n tdf.insert(0, '%%', '%')\n else:\n tdf.insert(0, '%%', '')\n lluv_container['tables'][str(table_count)]['data'] = tdf\n # lluv_container['tables'][str(table_count)]['TableHeader'] = data_header\n table = False\n else:\n lluv_container['tables'][str(table_count)][key] = value\n else: # Uncommented lines are the main data table.\n table_data += '{}'.format(line)\n # table_data.append(line.split())\n return lluv_container\n\n\ndef parse_header(codar_file):\n \"\"\"\n Parse the generalized seasonde lluv format header data into a dictionary\n :param data: .readlines() data from an open text file\n :return header_dict: dictionary containing all the important header information for each file\n :rtype: dictionary\n \"\"\"\n header_dict = {} # initialize an empty dictionary\n dist_dict = {}\n # headers = None\n # col_headers = None\n units = None\n n = 0\n for i, line in enumerate(codar_file):\n if '%' in line:\n if 'Distance:' in line:\n key, value = parse_header_line(line)\n key = '{}_{}'.format(key.lower(), str(n))\n dist_dict[key] = float(value.split(' ')[0])\n n = n + 1\n elif 'TableStart' in line:\n headers = i + 1\n units = headers + 1\n # elif i == headers:\n # col_headers = [x.strip() for x in line.split(' ')[1:] if x]\n # # headers += ['', ''] # Site Contributors\n elif i == units:\n # units = [x.strip().replace('(', '').replace(')', '') for x in line.split(' ')[1:] if x]\n break\n else:\n key, value = parse_header_line(line)\n header_dict[key] = value\n return header_dict, dist_dict #, col_headers\n\n\ndef path_within_module(file_path):\n complete_path = os.path.join(os.path.dirname(__file__), '..', file_path)\n return complete_path","sub_path":"functions/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":6571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"268932008","text":"# -*- coding:utf-8 -*-\n# @author:Eric Luo\n# @file:task.py\n# @time:2017/6/8\n#\n# 知识库局部同义词的处理\nimport time\n\nimport requests\nfrom flask import jsonify\n\nfrom .segment import *\n\n\n@app.route('/task', methods=['POST'])\n@app.route('/task/', methods=['POST'])\n@app.route('/task', )\n@app.route('/task/', )\ndef task():\n localtime = time.localtime(time.time())\n print(localtime)\n text = {}\n message = str(request.form['messageText'])\n text['input'] = str(message)\n parameters = \"input=\" + str(text)\n print(\"Parameters\", parameters)\n while True:\n if message == \"quit\":\n print(\"Quit!\")\n exit()\n elif message == \"save\":\n print(\"save\")\n else:\n response = requests.get(\"http://39.108.135.114:8001/ZRobot/getConversation?\", params=parameters)\n data = response.json()\n print(\"Recive:\", data)\n return jsonify({'status': 'OK', 'answer': data['responce']['show']})\n return render_template('task.html')\n","sub_path":"app/backup/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"182420884","text":"\n\"\"\"This module contains code for the bias monitor Bokeh plots.\n\nAuthor\n------\n\n - Ben Sunnquist\n - Maria A. Pena-Guerrero\n\nUse\n---\n\n This module can be used from the command line as such:\n\n ::\n\n from jwql.website.apps.jwql import monitor_pages\n monitor_template = monitor_pages.BiasMonitor()\n monitor_template.input_parameters = ('NIRCam', 'NRCA1_FULL')\n\"\"\"\n\nfrom datetime import datetime, timedelta\nimport os\n\nfrom astropy.stats import sigma_clip\nimport numpy as np\n\nfrom jwql.bokeh_templating import BokehTemplate\nfrom jwql.database.database_interface import session, NIRCamBiasStats, NIRISSBiasStats, NIRSpecBiasStats\nfrom jwql.utils.constants import JWST_INSTRUMENT_NAMES_MIXEDCASE\n\nSCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass BiasMonitor(BokehTemplate):\n\n # Combine the input parameters into a single property because we\n # do not want to invoke the setter unless all are updated\n @property\n def input_parameters(self):\n return (self._instrument, self._aperture)\n\n @input_parameters.setter\n def input_parameters(self, info):\n self._instrument, self._aperture = info\n self.pre_init()\n self.post_init()\n\n def identify_tables(self):\n \"\"\"Determine which database tables to use for the given instrument\"\"\"\n\n mixed_case_name = JWST_INSTRUMENT_NAMES_MIXEDCASE[self._instrument.lower()]\n self.stats_table = eval('{}BiasStats'.format(mixed_case_name))\n\n def load_data(self):\n \"\"\"Query the database tables to get all of the relevant bias data\"\"\"\n\n # Determine which database tables are needed based on instrument\n self.identify_tables()\n\n # Query database for all data in bias stats with a matching aperture,\n # and sort the data by exposure start time.\n self.query_results = session.query(self.stats_table) \\\n .filter(self.stats_table.aperture == self._aperture) \\\n .order_by(self.stats_table.expstart) \\\n .all()\n\n def pre_init(self):\n\n # Start with default values for instrument and aperture because\n # BokehTemplate's __init__ method does not allow input arguments\n try:\n dummy_instrument = self._instrument\n dummy_aperture = self._aperture\n except AttributeError:\n self._instrument = 'NIRCam'\n self._aperture = ''\n\n self._embed = True\n self.format_string = None\n self.interface_file = os.path.join(SCRIPT_DIR, 'yaml', 'monitor_bias_interface.yaml')\n\n def post_init(self):\n\n # Load the bias data\n self.load_data()\n\n # Update the mean bias over time figures\n self.update_mean_bias_figures()\n\n # Update the calibrated 0th group image\n self.update_calibrated_image()\n\n # Update the histogram of the calibrated 0th group image\n if self._instrument == 'NIRISS':\n self.update_calibrated_histogram()\n\n # Update the calibrated collapsed values figures\n if self._instrument != 'NIRISS':\n self.update_collapsed_vals_figures()\n\n def update_calibrated_histogram(self):\n \"\"\"Updates the calibrated 0th group histogram\"\"\"\n\n if len(self.query_results) != 0:\n # Get the most recent data; the entries were sorted by time when\n # loading the database, so the last entry will always be the most recent.\n counts = np.array(self.query_results[-1].counts)\n bin_centers = np.array(self.query_results[-1].bin_centers)\n\n # Update the calibrated image histogram\n self.refs['cal_hist_source'].data = {'counts': counts,\n 'bin_centers': bin_centers}\n self.refs['cal_hist_xr'].start = bin_centers.min()\n self.refs['cal_hist_xr'].end = bin_centers.max()\n self.refs['cal_hist_yr'].start = counts.min()\n self.refs['cal_hist_yr'].end = counts.max() + counts.max() * 0.05\n\n def update_calibrated_image(self):\n \"\"\"Updates the calibrated 0th group image\"\"\"\n\n if len(self.query_results) != 0:\n # Get the most recent data; the entries were sorted by time when\n # loading the database, so the last entry will always be the most recent.\n cal_image_png = self.query_results[-1].cal_image\n cal_image_png = os.path.join('/static', '/'.join(cal_image_png.split('/')[-6:]))\n\n # Update the image source for the figure\n self.refs['cal_image'].image_url(url=[cal_image_png], x=0, y=0, w=2048, h=2048, anchor=\"bottom_left\")\n\n # Update the calibrated image style\n self.refs['cal_image'].xaxis.visible = False\n self.refs['cal_image'].yaxis.visible = False\n self.refs['cal_image'].xgrid.grid_line_color = None\n self.refs['cal_image'].ygrid.grid_line_color = None\n self.refs['cal_image'].title.text_font_size = '22px'\n self.refs['cal_image'].title.align = 'center'\n\n def update_collapsed_vals_figures(self):\n \"\"\"Updates the calibrated median-collapsed row and column figures\"\"\"\n\n if len(self.query_results) != 0:\n for direction in ['rows', 'columns']:\n # Get most recent data; the entries were sorted by time when\n # loading the database, so the last entry will always be the most recent.\n vals = np.array(self.query_results[-1].__dict__['collapsed_{}'.format(direction)])\n pixels = np.arange(len(vals))\n self.refs['collapsed_{}_source'.format(direction)].data = {'pixel': pixels,\n 'signal': vals}\n\n # Update the pixel and signal limits\n self.refs['collapsed_{}_pixel_range'.format(direction)].start = pixels.min() - 10\n self.refs['collapsed_{}_pixel_range'.format(direction)].end = pixels.max() + 10\n self.refs['collapsed_{}_signal_range'.format(direction)].start = vals[4:2044].min() - 10 # excluding refpix\n self.refs['collapsed_{}_signal_range'.format(direction)].end = vals[4:2044].max() + 10\n\n def update_mean_bias_figures(self):\n \"\"\"Updates the mean bias over time bokeh plots\"\"\"\n\n # Get the dark exposures and their starts times\n filenames = [os.path.basename(result.uncal_filename).replace('_uncal.fits', '') for result in self.query_results]\n expstarts_iso = np.array([result.expstart for result in self.query_results])\n expstarts = np.array([datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f') for date in expstarts_iso])\n\n # Update the mean bias figures for all amps and odd/even columns\n for amp in ['1', '2', '3', '4']:\n for kind in ['odd', 'even']:\n bias_vals = np.array([getattr(result, 'amp{}_{}_med'.format(amp, kind)) for result in self.query_results])\n self.refs['mean_bias_source_amp{}_{}'.format(amp, kind)].data = {'time': expstarts,\n 'time_iso': expstarts_iso,\n 'mean_bias': bias_vals,\n 'filename': filenames}\n self.refs['mean_bias_figure_amp{}_{}'.format(amp, kind)].title.text = 'Amp {} {}'.format(amp, kind.capitalize())\n self.refs['mean_bias_figure_amp{}_{}'.format(amp, kind)].hover.tooltips = [('file', '@filename'),\n ('time', '@time_iso'),\n ('bias level', '@mean_bias')]\n\n # Update plot limits if data exists\n if len(bias_vals) != 0:\n self.refs['mean_bias_xr_amp{}_{}'.format(amp, kind)].start = expstarts.min() - timedelta(days=3)\n self.refs['mean_bias_xr_amp{}_{}'.format(amp, kind)].end = expstarts.max() + timedelta(days=3)\n self.refs['mean_bias_yr_amp{}_{}'.format(amp, kind)].start = bias_vals.min() - 20\n self.refs['mean_bias_yr_amp{}_{}'.format(amp, kind)].end = bias_vals.max() + 20\n","sub_path":"jwql/website/apps/jwql/monitor_pages/monitor_bias_bokeh.py","file_name":"monitor_bias_bokeh.py","file_ext":"py","file_size_in_byte":8364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"624096398","text":"\"\"\"\nHelper methods to work with PostgREST.\n\"\"\"\nfrom copy import deepcopy\nimport requests\nimport pdb\n\nclass Postgrest(object):\n \"\"\"\n Class to interact with PostgREST.\n \"\"\"\n def __init__(self, url, auth=None):\n\n self.auth = auth\n self.url = url\n\n self.headers = {\n \"Content-Type\": \"application/json\",\n \"Prefer\": \"return=representation\", # return entire record json in response\n }\n\n if self.auth:\n self.headers[\"Authorization\"] = f\"Bearer {self.auth}\"\n \n\n def insert(self, data=None):\n self.res = requests.post(self.url, headers=self.headers, json=data)\n self.res.raise_for_status()\n return self.res.json()\n\n\n def update(self, query_string, data=None):\n \"\"\"\n This method is dangerous! It is possible to delete and modify records\n en masse. Read the PostgREST docs.\n \"\"\"\n url = f\"{self.url}?{query_string}\"\n self.res = requests.patch(url, headers=self.headers, json=data)\n self.res.raise_for_status()\n return self.res.json()\n\n\n def upsert(self, data=None):\n \"\"\"\n This method is dangerous! It is possible to delete and modify records\n en masse. Read the PostgREST docs.\n \"\"\"\n headers = deepcopy(self.headers)\n headers[\"Prefer\"] += \", resolution=merge-duplicates\"\n self.res = requests.post(self.url, headers=headers, json=data)\n self.res.raise_for_status()\n return self.res.json()\n\n\n def delete(self, query_string):\n \"\"\"\n This method is dangerous! It is possible to delete and modify records\n en masse. Read the PostgREST docs.\n \"\"\"\n url = f\"{self.url}?{query_string}\"\n self.res = requests.delete(url, headers=self.headers)\n self.res.raise_for_status()\n return self.res.json()\n\n\n def select(self, query_string, increment=1000, limit=10000):\n \"\"\"Select records from PostgREST DB. See documentation for horizontal\n and vertical filtering at http://postgrest.org/.\n \n Args:\n query_string (string): a PostgREST-compliant query string.\n\n increment (int, optional): The maximum number of records to\n return request per request. This is applied as a \"limit\" to\n each API request, until the user-specified limit is reached.\n \n Note that the PosgrREST DB itself will likely have limiting\n configured that cannot be exceeded. For example, our\n instances have a limit of 5000 records per request.\n\n limit (int, optional): The maximum number of records to return\n from the query. This method will continue to query records\n until the limit is reached or no more records are returned.\n \n Returns:\n TYPE: List \n \"\"\"\n if not query_string:\n raise Exception(\"Query string cannot be empty.\")\n\n url = f\"{self.url}?{query_string}&limit={increment}\"\n\n records = []\n\n while True:\n query_url = f\"{url}&offset={len(records)}\"\n\n self.res = requests.get(query_url, headers=self.headers)\n\n self.res.raise_for_status()\n\n records += self.res.json()\n\n if len(self.res.json()) < increment or len(records) >= limit:\n return records[0:limit]\n\n\n\n\n\n\n\n\n\n","sub_path":"tdutils/pgrestutil.py","file_name":"pgrestutil.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"317555793","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 2 15:24:48 2018\n\n@author: siqihao\n\"\"\"\n\nimport sys\nimport pdb\n#import ipdb\nimport math\nimport h5py\nimport pickle as pkl\n\nimport librosa\nimport numpy as np\n\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom guitar import Guitar\nimport sequencer\nsys.path.insert(0, '../models')\nimport cqt_transform\nimport utils\n\ndevice = 'cuda'\n\nnum_frets = 20\n\n\ndef cqt_specgram(audio, n_bins, bins_per_octave, hop_length, sr, fmin, filter_scale):\n '''\n :param audio:\n :param sr:\n :return: shape = (n_bins, t)\n '''\n c = librosa.cqt(audio, sr = sr, n_bins = n_bins, bins_per_octave = bins_per_octave, hop_length = hop_length,\n fmin = fmin, filter_scale = filter_scale)\n mag, phase = librosa.core.magphase(c)\n c_p = librosa.amplitude_to_db(mag, amin=1e-13, top_db=120., ref=np.max) / 120.0 + 1.0\n return c_p\n\ndef compute_cqt_spec(audio, n_bins = 84 * 4, bins_per_octave=12 * 4, hop_length = 256, sr = 16000, fmin = librosa.note_to_hz('C1'),\n filter_scale = 0.8):\n return cqt_specgram(audio, n_bins, bins_per_octave, hop_length, sr, fmin, filter_scale)\n\n\nclass Options:\n \n def __init__(self, character_variation = 0.5, string_damping=0.5, string_damping_variation=0.25, pluck_damping=0.5,\n pluck_damping_variation=0.25, string_tension=0.1, stereo_spread=0.2, string_damping_calculation='magic', \n body='simple', mode='karplus-strong'):\n\n self.character_variation = character_variation\n self.string_damping=string_damping\n self.string_damping_variation=string_damping_variation\n self.pluck_damping=pluck_damping\n self.pluck_damping_variation=pluck_damping_variation\n self.string_tension=string_tension\n self.stereo_spread=stereo_spread\n self.string_damping_calculation=string_damping_calculation\n self.body=body\n self.mode=mode\n \n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 6, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(9 * 108 * 108, 360)\n self.fc2 = nn.Linear(360, 84)\n self.fc3 = nn.Linear(84, 2)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = x.view(-1, 9 * 108 * 108) # -1 is the batch_size\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.dropout(x, training=self.training) \n x = self.fc3(x)\n return x\n \n\nclass Net_pitch_sf(nn.Module):\n def __init__(self):\n super(Net_pitch_sf, self).__init__()\n self.conv1 = nn.Conv2d(1, 6, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(9 * 108 * 108, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 8)\n\n def forward(self, x):\n x = F.dropout(x, p=0.1, training=self.training)\n x = self.pool(F.relu(self.conv1(x)))\n x = F.dropout(x, p=0.5, training=self.training)\n x = self.pool(F.relu(self.conv2(x)))\n x = F.dropout(x, p=0.3, training=self.training)\n x = x.view(-1, 9 * 108 * 108) # -1 is the batch_size\n x = F.relu(self.fc1(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = F.relu(self.fc2(x))\n x = F.dropout(x, p=0.2, training=self.training)\n x = self.fc3(x)\n x1 = F.sigmoid(0.5 * x[:, np.arange(6)])\n x2 = torch.cat([x1, x[:, 6].unsqueeze(1)], dim=1)\n x = torch.cat([x2, F.sigmoid(0.5 * x[:, 7]).unsqueeze(1)], dim=1)\n return x\n \n \ndef pad_zeros(image, shape):\n result = np.zeros(shape)\n result[:image.shape[0],:image.shape[1]] = image\n return result\n\n\ndef sample_params_string_tab(size):\n stringNumber = np.array([math.floor(np.random.choice(np.arange(0, 6))) for _ in range(size)], dtype=np.int32)\n tab = np.array([math.floor(np.random.choice(np.arange(0, 12))) for _ in range(size)], dtype=np.int32)\n# print(stringNumber)\n# print(tab)\n # ipdb.set_trace()\n guitars = []\n audio_buffers = []\n cqt_specs = []\n for i in range(size):\n guitars.append(Guitar(options=Options()))\n audio_buffers.append(sequencer.play_note(guitars[i], stringNumber[i], tab[i]))\n cqt_spec = cqt_transform.compute_cqt_spec(audio_buffers[i], n_bins = 336, bins_per_octave=48, hop_length=256).T\n padded_cqt = pad_zeros(cqt_spec, (cqt_spec.shape[1], cqt_spec.shape[1]))\n cqt_specs.append(padded_cqt)\n cqt_specs = np.array(cqt_specs, dtype=np.float32)\n print(cqt_specs.shape)\n return stringNumber, tab, cqt_specs\n \n\ndef generate_data_string_tab(file, size):\n stringNumber, tab, cqt_specs = sample_params_string_tab(size)\n with open(file, 'wb') as fh:\n data_dict = {'parameters' : np.array([stringNumber, tab]).T, 'cqt_spec' : cqt_specs}\n pkl.dump(data_dict, fh)\n fh.close()\n print(file)\n \n \ndef sample_params_pitch_sf(size):\n freqs = np.array(utils.compute_freqs(num_frets))\n character_variation = np.array([np.random.uniform(0, 1) for _ in range(size)], dtype=np.float32)\n string_damping = np.array([np.random.uniform(0, 0.7) for _ in range(size)], dtype=np.float32)\n string_damping_variation = np.array([np.random.uniform(0, 0.5) for _ in range(size)], dtype=np.float32)\n pluck_damping = np.array([np.random.uniform(0, 0.9) for _ in range(size)], dtype=np.float32)\n pluck_damping_variation = np.array([np.random.uniform(0, 0.5) for _ in range(size)], dtype=np.float32)\n string_tension = np.array([np.random.uniform(0, 1) for _ in range(size)], dtype=np.float32)\n stereo_spread = np.array([np.random.uniform(0, 1) for _ in range(size)], dtype=np.float32)\n smoothing_factor = np.array([np.random.uniform(0.5, 1) for _ in range(size)], dtype=np.float32)\n pitch = np.array([np.random.choice(freqs) for _ in range(size)], dtype=np.float32)\n # ipdb.set_trace()\n options = []\n guitars = []\n audio_buffers = []\n cqt_specs = []\n for i in range(size):\n options.append(Options(character_variation[i], string_damping[i], string_damping_variation[i], pluck_damping[i], pluck_damping_variation[i], \n string_tension[i], stereo_spread[i]))\n guitars.append(Guitar(options=options[i]))\n audio_buffers.append(sequencer.play_note(guitars[i], 0, 0, pitch[i], smoothing_factor[i]))\n# print(audio_buffers[i])\n# try:\n# cqt_spec = compute_cqt_spec(audio_buffers[i]).T\n# except ParameterError:\n# print(audio_buffers[i])\n cqt_spec = compute_cqt_spec(audio_buffers[i]).T\n padded_cqt = pad_zeros(cqt_spec, (cqt_spec.shape[1], cqt_spec.shape[1]))\n cqt_specs.append(padded_cqt)\n cqt_specs = np.array(cqt_specs, dtype=np.float32)\n print(cqt_specs.shape)\n return character_variation, string_damping, string_damping_variation, pluck_damping, pluck_damping_variation, string_tension, stereo_spread, pitch, smoothing_factor, cqt_specs\n\n\ndef save_data_hdf5(fname, size):\n\n print('Generating {}...'.format(fname))\n\n freqs = np.array(utils.compute_freqs(num_frets))\n character_variation = np.array([np.random.uniform(0, 1) for _ in range(size)], dtype=np.float32)\n string_damping = np.array([np.random.uniform(0, 0.7) for _ in range(size)], dtype=np.float32)\n string_damping_variation = np.array([np.random.uniform(0, 0.5) for _ in range(size)], dtype=np.float32)\n pluck_damping = np.array([np.random.uniform(0, 0.9) for _ in range(size)], dtype=np.float32)\n pluck_damping_variation = np.array([np.random.uniform(0, 0.5) for _ in range(size)], dtype=np.float32)\n string_tension = np.array([np.random.uniform(0, 1) for _ in range(size)], dtype=np.float32)\n stereo_spread = np.array([np.random.uniform(0, 1) for _ in range(size)], dtype=np.float32)\n smoothing_factor = np.array([np.random.uniform(0.5, 1) for _ in range(size)], dtype=np.float32)\n pitch = np.array([np.random.choice(freqs) for _ in range(size)], dtype=np.float32)\n\n with h5py.File(fname, 'w') as f:\n dset_parameters = f.create_dataset('parameters', (size, 9), maxshape=(None, None), dtype='float32', chunks=(size, 9))\n dset_cqt_specs = f.create_dataset('cqt_spec', (size, 336, 336), maxshape=(None, None, None), dtype='float32', chunks=(1000, 336, 336))\n\n dset_parameters[:] = np.array([character_variation, string_damping, string_damping_variation, pluck_damping, pluck_damping_variation, string_tension, stereo_spread, pitch, smoothing_factor]).T\n\n for i in range(size):\n options = Options(character_variation[i], string_damping[i], string_damping_variation[i], pluck_damping[i], pluck_damping_variation[i], string_tension[i], stereo_spread[i])\n guitar = Guitar(options=options)\n audio_buffer = sequencer.play_note(guitar, 0, 0, pitch[i], smoothing_factor[i])\n cqt_spec = compute_cqt_spec(audio_buffer).T\n padded_cqt = pad_zeros(cqt_spec, (cqt_spec.shape[1], cqt_spec.shape[1]))\n\n dset_cqt_specs[i, :, :] = padded_cqt\n\n print('Finished generating {}!'.format(fname))\n\n\ndef create_datasets_hdf5(suffix):\n save_data_hdf5('val_{}.h5'.format(suffix), 50)\n save_data_hdf5('eval_{}.h5'.format(suffix), 50)\n save_data_hdf5('test_{}.h5'.format(suffix), 50)\n save_data_hdf5('train_{}.h5'.format(suffix), 100)\n\n\ndef generate_data_pitch_sf(file, size):\n character_variation, string_damping, string_damping_variation, pluck_damping, pluck_damping_variation, string_tension, stereo_spread, pitch, smoothing_factor, cqt_specs = sample_params_pitch_sf(size)\n with open(file, 'wb') as fh:\n data_dict = {'parameters' : np.array([character_variation, string_damping, string_damping_variation, pluck_damping, pluck_damping_variation, string_tension, \n stereo_spread, pitch, smoothing_factor]).T, 'cqt_spec' : cqt_specs}\n pkl.dump(data_dict, fh)\n fh.close()\n print(file)\n\n\ndef read_data(file):\n with open(file, 'rb') as fh:\n data = pkl.loads(fh.read())\n fh.close()\n return data\n\n\ndef create_datasets(suffix):\n generate_data_pitch_sf(\"val\" + suffix + \".pkl\", 500)\n# generate_data('test.pkl', 5000)\n# generate_data('eval.pkl', 5000)\n# generate_data('train.pkl', 50000)\n generate_data_pitch_sf(\"test\" + suffix + \".pkl\", 500)\n generate_data_pitch_sf(\"eval\" + suffix + \".pkl\", 100)\n generate_data_pitch_sf(\"train\" + suffix + \".pkl\", 5000)\n\n \ndef read_dataset(suffix):\n #return read_data(\"train\" + suffix + \".pkl\"), read_data(\"test\" + suffix + \".pkl\"), read_data(\"val\" + suffix + \".pkl\"), read_data(\"eval\" + suffix + \".pkl\")\n return read_data(\"val_pitch_sf.pkl\"), read_data(\"test_pitch_sf_sm.pkl\"), read_data(\"val_pitch_sf_sm.pkl\"), read_data(\"eval_pitch_sf.pkl\")\n\n\ndef read_data_hdf5(file):\n f = h5py.File(file, 'r')\n dset_parameters = f['parameters']\n dset_cqt_specs = f['cqt_spec']\n return { 'parameters': dset_parameters, 'cqt_spec': dset_cqt_specs }\n\n\ndef read_dataset_hdf5(suffix):\n return (read_data_hdf5('val_{}.h5'.format(suffix)),\n read_data_hdf5('test_{}.h5'.format(suffix)),\n read_data_hdf5('val_{}.h5'.format(suffix)),\n read_data_hdf5('eval_{}.h5'.format(suffix)))\n\n\nclass MyDataset(torch.utils.data.Dataset):\n def __init__(self, parameters, cqt_spectrograms):\n super(MyDataset, self).__init__()\n \n self.parameters = parameters\n self.cqt_spec = cqt_spectrograms\n \n def __getitem__(self, i):\n return self.cqt_spec[i].T, self.parameters[i]\n \n def __len__(self):\n return len(self.parameters)\n \n \ndef load_data(suffix):\n #create_datasets(suffix)\n #data.generate_data('val.pkl', 5000)\n print(\"loading data...\")\n train_data, test_data, val_data, eval_data = read_dataset(suffix)\n print(\"data loaded\")\n return train_data, test_data, val_data, eval_data\n\n\ndef load_data_hdf5(suffix):\n #create_datasets(suffix)\n #data.generate_data('val.pkl', 5000)\n print(\"loading data...\")\n train_data, test_data, val_data, eval_data = read_dataset_hdf5(suffix)\n print(\"data loaded\")\n return train_data, test_data, val_data, eval_data\n\n\ndef evaluate(net, validation_loader, size, factor, fixed=False):\n criterion = nn.MSELoss()\n val_loss = 0.0\n for i, datapoints in enumerate(validation_loader, 0):\n\n inputs, labels = datapoints\n inputs.unsqueeze_(1)\n labels = np.delete(labels, 6, axis=1) \n inputs = inputs.float().to(device)\n labels = labels.float().to(device)\n \n outputs = net(inputs)\n outputs[:, np.arange(5)] = outputs[:, np.arange(5)] * factor\n labels[:, np.arange(5)] =labels[:, np.arange(5)] * factor\n if fixed:\n outputs[:, 5] = torch.Tensor(np.full((outputs[:, 5].shape[0], 1), 0.6, dtype = np.float32))\n labels[:, 5] = torch.Tensor(np.full((labels[:, 5].shape[0], 1), 0.6, dtype = np.float32))\n outputs[:, 6] = torch.Tensor(np.full((outputs[:, 6].shape[0], 1), 261.63, dtype = np.float32))\n labels[:, 6] = torch.Tensor(np.full((labels[:, 6].shape[0], 1), 261.63, dtype = np.float32))\n else: \n outputs[:, 5] = outputs[:, 5] * 500 \n labels[:, 5] = labels[:, 5] * 500 \n labels[:, 6] = torch.log(labels[:, 6]) * 200\n #outputs[:, 6] = torch.pow(torch.Tensor([2]), outputs[:, 6])\n #outputs[:, 6] = torch.Tensor(np.exp2(outputs[:, 6].detach().cpu().numpy())).to(device)\n outputs[:, 6] = outputs[:, 6] * 200 \n outputs[:, 7] = outputs[:, 7] * factor \n labels[:, 7] = labels[:, 7] * factor\n loss = criterion(outputs, labels)\n\n # print statistics\n val_loss += loss.item()\n #return val_loss/float(len(validation_loader.dataset))\n return val_loss/size\n\n\ndef train_model(net, train_data, val_data, eval_data, batch_size, epochs, suffix, trainsize, valsize, factor, fixed=False):\n print(\"===============Training Data===============\")\n criterion = nn.MSELoss()\n optimizer = optim.Adam(net.parameters(), lr=0.001, weight_decay=1e-6)\n net.train()\n \n transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n trainset = MyDataset(parameters=train_data['parameters'], cqt_spectrograms=train_data['cqt_spec'])\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n shuffle=True, num_workers=2)\n \n valset = MyDataset(parameters=val_data['parameters'], cqt_spectrograms=val_data['cqt_spec'])\n valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size,\n shuffle=False, num_workers=2)\n \n evalset = MyDataset(parameters=eval_data['parameters'], cqt_spectrograms=eval_data['cqt_spec'])\n evalloader = torch.utils.data.DataLoader(evalset, batch_size=4,\n shuffle=False, num_workers=2)\n if fixed:\n suffix += \"_fixed\"\n \n for epoch in range(epochs): # loop over the dataset multiple times\n #net.train()\n running_loss = 0.0\n \n for i, data in enumerate(trainloader, 0):\n # get the inputs\n inputs, labels = data\n #print(inputs.shape)\n #print(labels.shape)\n inputs.unsqueeze_(1)\n labels = np.delete(labels, 6, axis=1)\n inputs = inputs.to(device)\n labels = labels.to(device)\t\t\n #print(inputs.shape)\n # zero the parameter gradients\n optimizer.zero_grad()\n \n # forward + backward + optimize\n outputs = net(inputs)\n outputs[:, np.arange(5)] = outputs[:, np.arange(5)] * factor\n labels[:, np.arange(5)] =labels[:, np.arange(5)] * factor\n if fixed:\n outputs[:, 5] = torch.Tensor(np.full((outputs[:, 5].shape[0], 1), 0.6, dtype = np.float32))\n labels[:, 5] = torch.Tensor(np.full((labels[:, 5].shape[0], 1), 0.6, dtype = np.float32))\n outputs[:, 6] = torch.Tensor(np.full((outputs[:, 6].shape[0], 1), 261.63, dtype = np.float32))\n labels[:, 6] = torch.Tensor(np.full((labels[:, 6].shape[0], 1), 261.63, dtype = np.float32))\n else: \n outputs[:, 5] = outputs[:, 5] * 500\n labels[:, 5] = labels[:, 5] * 500 \n labels[:, 6] = torch.log(labels[:, 6]) * 200\n outputs[:, 6] = outputs[:, 6] * 200\n #outputs[:, 6] = torch.pow(torch.Tensor([2]), outputs[:, 6]) \n #outputs[:, 6] = torch.Tensor(np.exp2(outputs[:, 6].detach().cpu().numpy())).to(device)\n #print(outputs[:, 6])\n #print(labels[:, 6])\n outputs[:, 7] = outputs[:, 7] * factor \n labels[:, 7] = labels[:, 7] * factor\n #print(outputs[:, 0])\n #print(outputs)\n #m = nn.Sigmoid()\n #outputs = m(5 * (outputs - 0.5))\n #out1 = F.sigmoid(5 * (outputs[:, np.arange(7)] - 0.5))\n #outputs = np.c_[x1.detach().cpu().numpy(), x[:, 7].detach().cpu().numpy()]).to(device)\n loss = criterion(outputs, labels.float())\n #print(loss)\n loss.backward()\n #print(\"gradients:\\n\")\n #for param in net.parameters():\n # print(param.grad)\n optimizer.step()\n \n # print statistics\n running_loss += loss.item()\n# if i % 500 == 0: # print every 20 mini-batches\n# print('[%d, %5d] loss: %.3f' %\n# (epoch + 1, i + 1, running_loss/500))\n# running_loss = 0.0\n \n print('epoch %d train_loss: %.6f' % (epoch + 1, running_loss/float(len(trainloader.dataset))))\n with open(\"train_losses\" + suffix + \".txt\", \"a\") as text_file:\n #text_file.write(str(running_loss/float(len(trainloader.dataset))))\n text_file.write(str(running_loss/float(trainsize)))\n text_file.write(\"\\n\")\n \n val_loss = evaluate(net, valloader, valsize, factor)\n print('epoch %d val_loss: %.6f' % (epoch + 1, val_loss))\n with open(\"val_losses\" + suffix + \".txt\", \"a\") as text_file:\n text_file.write(str(val_loss))\n text_file.write(\"\\n\")\n torch.save(net.state_dict(), \"checkpoint\" + suffix + \".pt\")\n \n print('Finished Training')\n \n \ndef merge_images(sources, targets, k=10):\n _, h, w = sources.shape\n print(sources.shape[0], h, w)\n row = int(np.sqrt(sources.shape[0])) # Square root of batch size\n merged = np.zeros([row*h, row*w*2])\n for idx, (s, t) in enumerate(zip(sources, targets)):\n i = idx // row\n j = idx % row\n merged[i*h:(i+1)*h, (j*2)*h:(j*2+1)*h] = s\n merged[i*h:(i+1)*h, (j*2+1)*h:(j*2+2)*h] = t\n return merged\n\n\ndef test_string_tab(net, test_data):\n net.load_state_dict(torch.load('2fac_checkpoint.pt'))\n net.eval()\n criterion = nn.MSELoss()\n\n testset = MyDataset(parameters=test_data['parameters'], cqt_spectrograms=test_data['cqt_spec'])\n testloader = torch.utils.data.DataLoader(testset, batch_size=4,\n shuffle=False, num_workers=2)\n \n inputs, targets = iter(testloader).next()\n inputs = inputs.to(device)\n targets = targets.to(device)\n \n gt_samples = []\n gt_stringNumbers = []\n gt_tabs = []\n \n for i in range(len(targets)):\n gt_stringNumber, gt_tab = targets.cpu().numpy()[i]\n guitar = Guitar(options=Options())\n gt_stringNumbers.append(gt_stringNumber)\n gt_tabs.append(gt_tab)\n #print(\"gt_stringNumber: %.3f, gt_tab: %.3f\" % (gt_stringNumber, gt_tab))\n audio_buffer = sequencer.play_note(guitar, int(round(gt_stringNumber)), int(round(gt_tab)))\n #cqt_spec = cqt_transform.compute_cqt_spec(audio_buffer).T\n #padded_cqt = pad_zeros(cqt_spec, (cqt_spec.shape[1], cqt_spec.shape[1])) \n gt_samples.append(audio_buffer)\n\n with open(\"2fac_gt_data.pkl\", 'wb') as fh:\n data_dict = {'gt_samples' : np.array(gt_samples), 'gt_stringNumbers' : np.array(gt_stringNumbers), 'gt_tabs' : np.array(gt_tabs), 'gt_cqts' : inputs.cpu().numpy()}\n pkl.dump(data_dict, fh)\n fh.close()\n\n preds = net(inputs.unsqueeze_(1))\n preds = preds.detach().cpu().numpy()\n \n pred_samples = []\n pred_cqts = []\n pred_stringNumbers = []\n pred_tabs = []\n \n for i in range(preds.shape[0]):\n pred_stringNumber, pred_tab = preds[i]\n guitar = Guitar(options=Options())\n #print(\"pred_stringNumber: %d, pred_tab: %d\" % (int(round(pred_stringNumber)), int(round(pred_tab))))\n pred_stringNumbers.append(pred_stringNumber)\n pred_tabs.append(pred_tab)\n audio_buffer = sequencer.play_note(guitar, int(round(pred_stringNumber)), int(round(pred_tab)))\n cqt_spec = cqt_transform.compute_cqt_spec(audio_buffer, n_bins=336, bins_per_octave=48, hop_length=256).T\n padded_cqt = pad_zeros(cqt_spec, (cqt_spec.shape[1], cqt_spec.shape[1])) \n pred_cqts.append(padded_cqt.T)\n pred_samples.append(audio_buffer)\n \n with open(\"2fac_pred_data.pkl\", 'wb') as fh:\n data_dict = {'pred_samples' : np.array(pred_samples), 'pred_stringNumbers' : np.array(pred_stringNumbers), 'pred_tabs' : np.array(pred_tabs), 'pred_cqts' : pred_cqts}\n pkl.dump(data_dict, fh)\n fh.close()\n \n print('test_loss: %.3f' % evaluate(net, testloader))\n \n \ndef test_pitch_sf(net, test_data, batch_size, suffix ,testsize, factor):\n net.load_state_dict(torch.load(\"checkpoint\" + suffix + \".pt\"))\n net.eval()\n criterion = nn.MSELoss()\n\n testset = MyDataset(parameters=test_data['parameters'], cqt_spectrograms=test_data['cqt_spec'])\n testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,\n shuffle=False, num_workers=2)\n \n inputs, targets = iter(testloader).next()\n targets = np.delete(targets, 6, axis=1)\n inputs = inputs.to(device)\n targets = targets.to(device)\n \n gt_samples = []\n gt_character_variations = []\n gt_string_dampings = []\n gt_string_damping_variations = []\n gt_pluck_dampings = []\n gt_pluck_damping_variations = []\n gt_string_tensions = []\n gt_pitches = []\n gt_smoothing_factors = []\n gt_dumping_variations = []\n \n for i in range(len(targets)):\n gt_character_variation, gt_string_damping, gt_string_damping_variation, gt_pluck_damping, gt_pluck_damping_variation, gt_string_tension, gt_pitch, gt_smoothing_factor= targets.cpu().numpy()[i]\n gt_dumping_variations.append(gt_pluck_damping_variation)\n options = Options(gt_character_variation.astype(np.float64), gt_string_damping.astype(np.float64), gt_string_damping_variation.astype(np.float64), gt_pluck_damping.astype(np.float64), gt_pluck_damping_variation.astype(np.float64), gt_string_tension.astype(np.float64))\n guitar = Guitar(options=options)\n gt_character_variations.append(gt_character_variation)\n gt_string_dampings.append(gt_string_damping)\n gt_string_damping_variations.append(gt_string_damping_variation)\n gt_pluck_dampings.append(gt_pluck_damping)\n gt_pluck_damping_variations.append(gt_pluck_damping_variation)\n gt_string_tensions.append(gt_string_tension)\n gt_pitches.append(gt_pitch)\n gt_smoothing_factors.append(gt_smoothing_factor)\n #print(\"gt_stringNumber: %.3f, gt_tab: %.3f\" % (gt_stringNumber, gt_tab))\n audio_buffer = sequencer.play_note(guitar, 0, 0, gt_pitch.astype(np.float64), gt_smoothing_factor.astype(np.float64))\n cqt_spec = compute_cqt_spec(audio_buffer).T\n padded_cqt = pad_zeros(cqt_spec, (cqt_spec.shape[1], cqt_spec.shape[1])) \n gt_samples.append(audio_buffer)\n \n print(\"gt:\\n\")\n print(gt_character_variations) \n print(gt_string_dampings)\n print(gt_string_damping_variations)\n print(gt_pluck_dampings)\n print(gt_pluck_damping_variations)\n print(gt_string_tensions)\n print(gt_pitches)\n print(gt_smoothing_factors)\n \n\n with open(\"gt_data\" + suffix + \".pkl\", 'wb') as fh:\n data_dict = {'gt_samples' : np.array(gt_samples), 'gt_character_variations': np.array(gt_character_variations) , 'gt_string_dampings' : np.array(gt_string_dampings), 'gt_string_damping_variations' : np.array(gt_string_damping_variations), 'gt_pluck_dampings' : np.array(gt_pluck_dampings), 'gt_pluck_damping_variations' : np.array(gt_pluck_damping_variations), 'gt_string_tensions' : np.array(gt_string_tensions), \n 'gt_pitches' : np.array(gt_pitches), 'gt_smoothing_factors' : np.array(gt_smoothing_factors), 'gt_cqts' : inputs.cpu().numpy()}\n pkl.dump(data_dict, fh)\n fh.close()\n\n preds = net(inputs.unsqueeze_(1))\n #m = nn.Sigmoid()\n #preds = m(5 * (preds - 0.5))\n preds = preds.detach().cpu().numpy()\n \n pred_samples = []\n pred_cqts = []\n pred_character_variations = []\n pred_string_dampings = []\n pred_string_damping_variations = []\n pred_pluck_dampings = []\n pred_pluck_damping_variations = []\n pred_string_tensions = []\n pred_pitches = []\n pred_smoothing_factors = []\n pred_dumping_variations = []\n pred_log_pitches = [] \n for i in range(preds.shape[0]):\n pred_character_variation, pred_string_damping, pred_string_damping_variation, pred_pluck_damping, pred_pluck_damping_variation, pred_string_tension, pred_pitch, pred_smoothing_factor = preds[i]\n #options = Options(pred_character_variation.astype(np.float64), pred_string_damping.astype(np.float64), pred_string_damping_variation.astype(np.float64), pred_pluck_damping.astype(np.float64), pred_pluck_damping_variation.astype(np.float64), pred_string_tension.astype(np.float64), pred_stereo_spread.astype(np.float64))\n pred_dumping_variations.append(pred_pluck_damping_variation)\n options = Options(pred_character_variation.astype(np.float64), pred_string_damping.astype(np.float64), pred_string_damping_variation.astype(np.float64), pred_pluck_damping.astype(np.float64), pred_pluck_damping_variation.astype(np.float64), pred_string_tension.astype(np.float64))\n guitar = Guitar(options=options)\n pred_character_variations.append(pred_character_variation)\n pred_string_dampings.append(pred_string_damping)\n pred_string_damping_variations.append(pred_string_damping_variation)\n pred_pluck_dampings.append(pred_pluck_damping)\n pred_pluck_damping_variations.append(pred_pluck_damping_variation)\n pred_string_tensions.append(pred_string_tension)\n pred_pitches.append(np.exp(pred_pitch))\n pred_log_pitches.append(pred_pitch)\n pred_smoothing_factors.append(pred_smoothing_factor)\n #print(\"gt_stringNumber: %.3f, gt_tab: %.3f\" % (gt_stringNumber, gt_tab))\n audio_buffer = sequencer.play_note(guitar, 0, 0, np.exp(pred_pitch.astype(np.float64)), pred_smoothing_factor.astype(np.float64))\n #audio_buffer = sequencer.play_note(guitar, 0, 0, pred_pitch, pred_smoothing_factor)\n cqt_spec = compute_cqt_spec(audio_buffer).T\n padded_cqt = pad_zeros(cqt_spec, (cqt_spec.shape[1], cqt_spec.shape[1])) \n pred_cqts.append(padded_cqt.T)\n pred_samples.append(audio_buffer)\n\n print(\"predicted:\\n\")\n print(pred_character_variations) \n print(pred_string_dampings)\n print(pred_string_damping_variations)\n print(pred_pluck_dampings)\n print(pred_pluck_damping_variations)\n print(pred_string_tensions)\n print(pred_pitches)\n print(pred_log_pitches)\t\n print(pred_smoothing_factors)\n #print(pred_dumping_variations)\n\n \n with open(\"pred_data\" + suffix + \".pkl\", 'wb') as fh:\n data_dict = {'pred_samples' : np.array(pred_samples), 'pred_character_variations': np.array(pred_character_variations) , 'pred_string_dampings' : np.array(pred_string_dampings), 'pred_string_damping_variations' : np.array(pred_string_damping_variations), 'pred_pluck_dampings' : np.array(pred_pluck_dampings), 'pred_pluck_damping_variations' : np.array(pred_pluck_damping_variations), 'pred_string_tensions' : np.array(pred_string_tensions), \n 'pred_pitches' : np.array(pred_pitches), 'pred_smoothing_factors' : np.array(pred_smoothing_factors), 'pred_cqts' : pred_cqts}\n pkl.dump(data_dict, fh)\n fh.close()\n \n print('test_loss: %.3f' % evaluate(net, testloader, testsize, factor))\n\n\nif __name__ == '__main__':\n net = Net_pitch_sf().to(device)\n # create_datasets('_pitch_sf_sm')\n\n #create_datasets_hdf5('pitch_sf_sm')\n\n train_data, test_data, val_data, eval_data = load_data(\"_pitch_sf_sm\")\n #train_data, test_data, val_data, eval_data = load_data_hdf5(\"pitch_sf_sm\")\n\n train_model(net, train_data, val_data, eval_data, 32, 100, \"_pitch_sf_fac_log400\", 5000, 500, 400)\n test_pitch_sf(net, test_data, 32, \"_pitch_sf_fac_log400\", 500, 400)\n","sub_path":"guitar-synth/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":29428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"339695690","text":"from typing import List, Optional\n\nfrom sc2.position import Point2\nfrom sc2.units import Units\nfrom sharpy.managers.roles import UnitTask\nfrom sharpy.plans.acts import ActBase\nfrom sc2.ids.unit_typeid import UnitTypeId\nfrom sc2.unit import Unit\n\n\nclass OverlordScoutMain(ActBase):\n def __init__(self):\n self.scout_tag: int = 0\n super().__init__()\n\n async def start(self, knowledge: \"Knowledge\"):\n return await super().start(knowledge)\n\n async def execute(self) -> bool:\n overlords = self.cache.own(UnitTypeId.OVERLORD)\n scouts = self.roles.all_from_task(UnitTask.OverlordScout)\n scout_overlord = overlords.tags_in(scouts.tags)\n non_scout_overlords = overlords.tags_not_in(scouts.tags)\n\n if self.scout_tag:\n return True\n\n if not self.scout_tag and non_scout_overlords.amount > 1:\n scout_overlord = non_scout_overlords[0]\n self.scout_tag = scout_overlord.tag\n\n if scout_overlord:\n self.knowledge.roles.set_task(UnitTask.OverlordScout, scout_overlord)\n target = self.knowledge.expansion_zones[-1].behind_mineral_position_center\n\n self.do(scout_overlord.move(target))\n\n return True\n","sub_path":"sharpy-sc2-develop/sharpy/plans/tactics/zerg/overlord_scout_main.py","file_name":"overlord_scout_main.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"98834746","text":"import pandas as pd\nimport csv\n\nbp = []\ndm = []\nhtn = []\nclas = []\nmale = []\ncmale = []\nmaleB = []\ncmaleB = []\nfemale = []\ncfemale = []\nfemaleB = []\ncfemaleB = []\nignore = ['?']\nskip = ['age', 'sc', 'al', 'bp', 'dm', 'htn']\nhtnP = 0\nhtnF = 0\ndmP = 0\ndmF = 0\nbpP = [0, 0, 0, 0]\nbpF = [0, 0, 0, 0]\ngfrHBP = [0, 0, 0, 0, 0]\ngfrHBF = [0, 0, 0, 0, 0]\ngfrHNP = [0, 0, 0, 0, 0]\ngfrHNF = [0, 0, 0, 0, 0]\ngfrMBP = [0, 0, 0, 0, 0]\ngfrMBF = [0, 0, 0, 0, 0]\ngfrMNP = [0, 0, 0, 0, 0]\ngfrMNF = [0, 0, 0, 0, 0]\n\n\ndef in_ex(cP, gfrV, bpV, dmV, htnV):\n cant = 0\n if gfrV is not None:\n cant += 1\n if bpV is not None:\n cant += 1\n if dmV is not None:\n cant += 1\n if htnV is not None:\n cant += 1\n casos = 0\n casosDeseados = 0\n for i in range(len(bp)):\n match = 0\n if gfrV is not None:\n if cP[i] == gfrV:\n match += 1\n if bpV is not None:\n if bp[i] == bpV:\n match += 1\n if dmV is not None:\n if dm[i] == dmV:\n match += 1\n if htnV is not None:\n if htn[i] == htnV:\n match += 1\n if match == cant:\n casos += 1\n if clas[i] == 1:\n casosDeseados += 1\n return casosDeseados/casos\n\n\ndef probabilidad(cP, gfrP, gfrV, bpV, dmV, htnV):\n if bpP[bpV] == 1.0 or dmV == 1.0 or htnV == 1.0 or gfrP[gfrV] == 1.0:\n return 1.0\n prob = 0.0\n prob += gfrP[gfrV]\n prob += bpP[bpV]\n prob += dmP\n prob += htnP\n prob -= in_ex(cP, gfrV, bpV, None, None)\n prob -= in_ex(cP, gfrV, None, dmV, None)\n prob -= in_ex(cP, gfrV, None, None, htnV)\n prob -= in_ex(cP, None, bpV, dmV, None)\n prob -= in_ex(cP, None, bpV, None, htnV)\n prob -= in_ex(cP, None, None, dmV, htnV)\n prob += in_ex(cP, gfrV, bpV, dmV, None)\n prob += in_ex(cP, gfrV, bpV, None, htnV)\n prob += in_ex(cP, gfrV, None, dmV, htnV)\n prob += in_ex(cP, None, bpV, dmV, htnV)\n prob -= in_ex(cP, gfrV, bpV, dmV, htnV)\n return prob\n\n\ndef sum(lista):\n suma = 0\n for i in range(len(lista)):\n suma += lista[i]\n return suma\n\n\ndef porcentaje(frec, prob, val, res):\n for i in range(len(val)):\n frec[val[i]] += 1\n if res[i] == 1:\n prob[val[i]] += 1\n for i in range(len(prob)):\n prob[i] /= frec[i]\n\n\ndef revisar(gfr, lista, al):\n if gfr >= 60:\n if al < 30:\n lista.append(0)\n elif al < 300:\n lista.append(1)\n else:\n lista.append(2)\n elif gfr >= 45:\n if al < 30:\n lista.append(1)\n elif al < 300:\n lista.append(2)\n else:\n lista.append(3)\n elif gfr >= 30:\n if al < 30:\n lista.append(2)\n else:\n lista.append(3)\n elif gfr >= 15:\n if al < 300:\n lista.append(3)\n else:\n lista.append(4)\n else:\n lista.append(4)\n\n\ndef eGFR(sc, k, a, age, sex, race):\n return 141 * (min(sc/k, 1) ** a)\\\n * (max(sc/k, 1) ** -1.209)\\\n * (0.993**age) * sex * race\n\n\nfile = pd.read_csv('datasets/renal.csv')\nfor i in range(1, file['age'].size):\n _continue = False\n for sname in skip:\n if file[sname][i] in ignore:\n _continue = True\n if _continue:\n continue\n male.append(eGFR(float(file['sc'][i]), 0.9, -0.411, float(file['age'][i]), 1, 1))\n maleB.append(eGFR(float(file['sc'][i]), 0.9, -0.411, float(file['age'][i]), 1, 1.159))\n female.append(eGFR(float(file['sc'][i]), 0.7, -0.329, float(file['age'][i]), 1.018, 1))\n femaleB.append(eGFR(float(file['sc'][i]), 0.7, -0.329, float(file['age'][i]), 1.018, 1.159))\n revisar(male[-1], cmale, float(file['al'][i]))\n revisar(maleB[-1], cmaleB, float(file['al'][i]))\n revisar(female[-1], cfemale, float(file['al'][i]))\n revisar(femaleB[-1], cfemaleB, float(file['al'][i]))\n if float(file['bp'][i]) > 90:\n bp.append(3)\n elif float(file['bp'][i]) > 80:\n bp.append(2)\n elif float(file['bp'][i]) > 60:\n bp.append(1)\n else:\n bp.append(0)\n if file['dm'][i] == 'yes':\n dm.append(1)\n else:\n dm.append(0)\n if file['htn'][i] == 'yes':\n htn.append(1)\n else:\n htn.append(0)\n if file['class'][i] == 'ckd':\n clas.append(1)\n else:\n clas.append(0)\nfor i in range(len(htn)):\n if htn[i] == 0:\n htnF += 1\n if clas[i] == 1:\n htnP += 1\n if dm[i] == 0:\n dmF += 1\n if clas[i] == 1:\n dmP += 1\n bpF[bp[i]] += 1\n if clas[i] == 1:\n bpP[bp[i]] += 1\nporcentaje(gfrHBF, gfrHBP, cmale, clas)\nporcentaje(gfrHNF, gfrHNP, cmaleB, clas)\nporcentaje(gfrMBF, gfrMBP, cfemale, clas)\nporcentaje(gfrMNF, gfrMNP, cfemaleB, clas)\nhtnP /= htnF\ndmP /= dmF\nfor i in range(len(bpP)):\n bpP[i] /= bpF[i]\nwith open('results\\ckd.csv', mode='w') as result_file:\n result_writer = csv.writer(result_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n result_writer.writerow(['Hombres Probabilidad',\n 'Hombres Afrodescendientes',\n 'Mujeres',\n 'Mujeres Afrodescendientes',\n 'Real'])\n for i in range(len(clas)):\n result_writer.writerow([probabilidad(cmale, gfrHBP, clas[i], bp[i], dm[i], htn[i]),\n probabilidad(cmaleB, gfrHNP, clas[i], bp[i], dm[i], htn[i]),\n probabilidad(cfemale, gfrMBP, clas[i], bp[i], dm[i], htn[i]),\n probabilidad(cfemaleB, gfrHNP, clas[i], bp[i], dm[i], htn[i]),\n clas[i]])\n\nprint(\"hombres no afrodescendientes\")\ncasosDeseados = 0\nfor i in range(len(clas)):\n x = probabilidad(cmale, gfrHBP, clas[i], bp[i], dm[i], htn[i])\n if x > 0.8:\n if clas[i] == 1:\n casosDeseados += 1\n else:\n if clas[i] == 0:\n casosDeseados += 1\nprint(casosDeseados/len(clas))\nprint(\"hombres afrodescendientes\")\ncasosDeseados = 0\nfor i in range(len(clas)):\n x = probabilidad(cmaleB, gfrHNP, clas[i], bp[i], dm[i], htn[i])\n if x > 0.8:\n if clas[i] == 1:\n casosDeseados += 1\n else:\n if clas[i] == 0:\n casosDeseados += 1\nprint(casosDeseados/len(clas))\nprint(\"mujeres no afrodescendientes\")\ncasosDeseados = 0\nfor i in range(len(clas)):\n x = probabilidad(cfemale, gfrMBP, clas[i], bp[i], dm[i], htn[i])\n if x > 0.8:\n if clas[i] == 1:\n casosDeseados += 1\n else:\n if clas[i] == 0:\n casosDeseados += 1\nprint(casosDeseados/len(clas))\nprint(\"mujeres afrodescendientes\")\ncasosDeseados = 0\nfor i in range(len(clas)):\n x = probabilidad(cfemaleB, gfrMNP, clas[i], bp[i], dm[i], htn[i])\n if x > 0.8:\n if clas[i] == 1:\n casosDeseados += 1\n else:\n if clas[i] == 0:\n casosDeseados += 1\nprint(casosDeseados/len(clas))\n\n\"\"\"\n Fuente https://www.kidney.org/content/ckd-epi-creatinine-equation-2009\n κ = 0.7 (females) or 0.9 (males)\n α = -0.329 (females) or -0.411 (males)\n sex = 1.018 (females) or 1 (males)\n sex = 1.159 (black) or 1 (no black)\n\"\"\"\n\n","sub_path":"CKD.py","file_name":"CKD.py","file_ext":"py","file_size_in_byte":7233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"117030437","text":"import aiohttp\nimport asyncio\nimport async_timeout\nimport time\n\n\nasync def fetch_page(session, url):\n page_start = time.time()\n async with async_timeout.timeout(10):\n async with session.get(url) as response:\n print(f'Page took {time.time() - page_start} seconds')\n # print(response.status)\n # print(response.text())\n return await response.text()\n\n\nasync def get_multiple_pages(loop, *urls):\n tasks = []\n async with aiohttp.ClientSession(loop=loop) as session:\n for url in urls:\n tasks.append(fetch_page(session, url))\n grouped_tasks = asyncio.gather(*tasks)\n return await grouped_tasks\n\nloop = asyncio.get_event_loop()\nurls = ['https://google.com' for i in range(50)]\nstart = time.time()\npages = loop.run_until_complete(get_multiple_pages(loop, *urls))\nprint(pages[0])\nprint(f'All pages took {time.time() - start} seconds')\n","sub_path":"async_scraping/async_request.py","file_name":"async_request.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"340717717","text":"from flask import Flask, jsonify, request, abort, make_response\nfrom flask_cors import CORS\nfrom typing import Optional, Dict, List, Tuple, Union, Any\n\nimport json\nimport time\nimport base64\n\nfrom tinydb import TinyDB, Query\n\nfrom model.webcase import WebCase\nfrom model.webvalidcase import WebValidCase\nfrom model.webcasepost import WebCasePost\nfrom model.webspec import WebSpec\nfrom model.webpostspec import WebPostSpec\n\nfrom model.mailcase import MailCase\nfrom model.mailvalidcase import MailValidCase\nfrom model.mailcasepost import MailCasePost\nfrom model.mailspec import MailSpec\nfrom model.mailpostspec import MailPostSpec\n\nfrom model.othercase import OtherCase\nfrom model.othervalidcase import OtherValidCase\nfrom model.othercasepost import OtherCasePost\nfrom model.otherspec import OtherSpec\nfrom model.otherpostspec import OtherPostSpec\n\nfrom model.vulnerability import Vulnerability\n\nimport handler\nimport engine\nimport util\n\n\napp: Flask = Flask(__name__)\nCORS(app)\n\n\ndb: Dict[str, TinyDB] = {\n \"web\": TinyDB(\"/project/db/web.json\"),\n \"webvalid\": TinyDB(\"/project/db/webvalid.json\"),\n \"mail\": TinyDB(\"/project/db/mail.json\"),\n \"mailvalid\": TinyDB(\"/project/db/mailvalid.json\"),\n \"other\": TinyDB(\"/project/db/other.json\"),\n \"othervalid\": TinyDB(\"/project/db/othervalid.json\"),\n \"vulnerability\": TinyDB(\"/project/db/vulnerability.json\"),\n}\n\n\n# general\n@app.route(\"/\", methods=[\"GET\"])\ndef index_get():\n length: int = request.args.get(\"length\", default=-1, type=int)\n cases: Dict[str, List[Union[WebCase, MailCase, OtherCase]]] = handler.general.index_get.handle(db=db, length=length)\n\n return jsonify(cases)\n\n\n@app.route(\"/case\", methods=[\"GET\"])\ndef case_get():\n ok: bool\n message: str\n data: Union[Union[WebCase, MailCase, OtherCase], str]\n\n uuid: Optional[str] = request.args.get(\"uuid\", None)\n kind: Optional[str] = request.args.get(\"kind\", None)\n is_valid: Optional[str] = request.args.get(\"is_valid\", None)\n\n ok, message = handler.general.case_get.validation(uuid=uuid, kind=kind, is_valid=is_valid)\n if not ok:\n abort(400, {\"message\": message})\n\n _is_valid: bool = is_valid == \"true\"\n\n ok, data = handler.general.case_get.handle(db=db, uuid=uuid, kind=kind, is_valid=_is_valid)\n if not ok:\n abort(400, {\"message\": data})\n\n return jsonify(data)\n\n\n@app.route(\"/case\", methods=[\"DELETE\"])\ndef case_delete():\n ok: bool\n message: str\n data: str\n\n uuid: Optional[str] = request.args.get(\"uuid\", None)\n kind: Optional[str] = request.args.get(\"kind\", None)\n is_valid: Optional[str] = request.args.get(\"is_valid\", None)\n\n ok, message = handler.general.case_delete.validation(uuid=uuid, kind=kind, is_valid=is_valid)\n if not ok:\n abort(400, {\"message\": message})\n\n _is_valid: bool = is_valid == \"true\"\n\n ok, data = handler.general.case_delete.handle(db=db, uuid=uuid, kind=kind, is_valid=_is_valid)\n if not ok:\n abort(400, {\"message\": data})\n\n return jsonify({\"uuid\": uuid})\n\n\n# web\n@app.route(\"/web/case\", methods=[\"GET\"])\ndef web_case_get():\n length: int = request.args.get(\"length\", default=-1, type=int)\n web_cases: List[WebCase] = handler.web.web_case_get.handle(db=db, length=length)\n\n return jsonify(web_cases)\n\n\n@app.route(\"/web/case\", methods=[\"POST\"])\ndef web_case_post():\n ok: bool\n data: Dict[str, str]\n\n # 未実装\n ok = handler.web.web_case_post.validation(post_data=request.json)\n if not ok:\n abort(400, {\"message\": \"Posted value is invalid.\"})\n\n web_case_post: WebCasePost = request.json\n\n web_case_post[\"spec\"][\"url\"] = \":\".join(web_case_post[\"spec\"][\"url\"].split(\":\")[:2])\n\n signature: str = request.headers.get(\"SECHUV-Token\")\n data: str = base64.b64encode(json.dumps(web_case_post[\"spec\"]).encode()).decode()\n\n # if signature is None or (not util.digisign.verify(signature, data)):\n # abort(500, {\"message\": \"Sign is invalid.\"})\n \n ok, data = handler.web.web_case_post.handle(db=db, web_case_post=web_case_post)\n if not ok:\n abort(500, {\"message\": \"Server error.\"})\n\n return jsonify(data)\n\n\n@app.route(\"/web/valid\", methods=[\"GET\"])\ndef web_valid_get():\n length: int = request.args.get(\"length\", default=-1, type=int)\n web_valid_cases: List[WebValidCase] = handler.web.web_valid_get.handle(db=db, length=length)\n\n return jsonify(web_valid_cases)\n\n\n@app.route(\"/web/valid\", methods=[\"POST\"])\ndef web_valid_post():\n ok: bool\n data: Dict[str, str]\n\n # 未実装\n ok = handler.web.web_valid_post.validation(post_data=request.json)\n if not ok:\n abort(400, {\"message\": \"Posted value is invalid.\"})\n\n web_spec: WebSpec = request.json\n ok, data = handler.web.web_valid_post.handle(db=db, web_spec=web_spec)\n if not ok:\n abort(500, {\"message\": \"Server error.\"})\n\n return jsonify(data)\n\n\n@app.route(\"/web/check\", methods=[\"POST\"])\ndef web_check_post():\n web_post_spec: WebPostSpec = request.json\n\n web_post_spec[\"url\"] = \":\".join(web_post_spec[\"url\"].split(\":\")[:2])\n\n result: List[Dict[str, str]] = engine.web_engine.run(web_post_spec=web_post_spec)\n\n response = make_response(jsonify(result))\n\n if len(result) != 0:\n response.headers[\"SECHUV-Token\"] = util.digisign.sign(base64.b64encode(json.dumps(web_post_spec).encode()).decode())\n response.headers[\"Access-Control-Expose-Headers\"] = \"SECHUV-Token\"\n \n return response\n \n\n# mail\n@app.route(\"/mail/case\", methods=[\"GET\"])\ndef mail_case_get():\n length: int = request.args.get(\"length\", default=-1, type=int)\n mail_cases: List[MailCase] = handler.mail.mail_case_get.handle(db=db, length=length)\n\n return jsonify(mail_cases)\n\n\n@app.route(\"/mail/case\", methods=[\"POST\"])\ndef mail_case_post():\n ok: bool\n data: Dict[str, str]\n\n # 未実装\n ok = handler.mail.mail_case_post.validation(post_data=request.json)\n if not ok:\n abort(400, {\"message\": \"Posted value is invalid.\"})\n\n mail_case_post: MailCasePost = request.json\n\n signature: str = request.headers.get(\"SECHUV-Token\")\n data: str = base64.b64encode(json.dumps(mail_case_post[\"spec\"]).encode()).decode()\n\n # if signature is None or (not util.digisign.verify(signature, data)):\n # abort(500, {\"message\": \"Sign is invalid.\"})\n\n ok, data = handler.mail.mail_case_post.handle(db=db, mail_case_post=mail_case_post)\n if not ok:\n abort(500, {\"message\": \"Server error.\"})\n\n return jsonify(data)\n\n\n@app.route(\"/mail/valid\", methods=[\"GET\"])\ndef mail_valid_get():\n length: int = request.args.get(\"length\", default=-1, type=int)\n mail_valid_cases: List[MailValidCase] = handler.mail.mail_valid_get.handle(db=db, length=length)\n\n return jsonify(mail_valid_cases)\n\n\n@app.route(\"/mail/valid\", methods=[\"POST\"])\ndef mail_valid_post():\n ok: bool\n data: Dict[str, str]\n\n # 未実装\n ok = handler.mail.mail_valid_post.validation(post_data=request.json)\n if not ok:\n abort(400, {\"message\": \"Posted value is invalid.\"})\n\n mail_spec: MailSpec = request.json\n ok, data = handler.mail.mail_valid_post.handle(db=db, mail_spec=mail_spec)\n if not ok:\n abort(500, {\"message\": \"Server error.\"})\n\n return jsonify(data)\n\n\n@app.route(\"/mail/check\", methods=[\"POST\"])\ndef mail_check_post():\n mail_post_spec: MailPostSpec = request.json\n\n result: List[Dict[str, str]] = engine.mail_engine.run(mail_post_spec=mail_post_spec)\n\n response = make_response(jsonify(result))\n\n if len(result) != 0:\n response.headers[\"SECHUV-Token\"] = util.digisign.sign(base64.b64encode(json.dumps(mail_post_spec).encode()).decode())\n response.headers[\"Access-Control-Expose-Headers\"] = \"SECHUV-Token\"\n\n return response\n\n\n\n# other\n@app.route(\"/other/case\", methods=[\"GET\"])\ndef other_case_get():\n length: int = request.args.get(\"length\", default=-1, type=int)\n other_cases: List[OtherCase] = handler.other.other_case_get.handle(db=db, length=length)\n\n return jsonify(other_cases)\n\n\n@app.route(\"/other/case\", methods=[\"POST\"])\ndef other_case_post():\n ok: bool\n data: Dict[str, str]\n\n # 未実装\n ok = handler.other.other_case_post.validation(post_data=request.json)\n if not ok:\n abort(400, {\"message\": \"Posted value is invalid.\"})\n\n other_case_post: OtherCasePost = request.json\n\n signature: str = request.headers.get(\"SECHUV-Token\")\n data: str = base64.b64encode(json.dumps(other_case_post[\"spec\"]).encode()).decode()\n\n # if signature is None or (not util.digisign.verify(signature, data)):\n # abort(500, {\"message\": \"Sign is invalid.\"})\n\n ok, data = handler.other.other_case_post.handle(db=db, other_case_post=other_case_post)\n if not ok:\n abort(500, {\"message\": \"Server error.\"})\n\n return jsonify(data)\n\n\n@app.route(\"/other/valid\", methods=[\"GET\"])\ndef other_valid_get():\n length: int = request.args.get(\"length\", default=-1, type=int)\n other_valid_cases: List[OtherValidCase] = handler.other.other_valid_get.handle(db=db, length=length)\n\n return jsonify(other_valid_cases)\n\n\n@app.route(\"/other/valid\", methods=[\"POST\"])\ndef other_valid_post():\n ok: bool\n data: Dict[str, str]\n\n # 未実装\n ok = handler.other.other_valid_post.validation(post_data=request.json)\n if not ok:\n abort(400, {\"message\": \"Posted value is invalid.\"})\n\n other_spec: OtherSpec = request.json\n ok, data = handler.other.other_valid_post.handle(db=db, other_spec=other_spec)\n if not ok:\n abort(500, {\"message\": \"Server error.\"})\n\n return jsonify(data)\n\n\n@app.route(\"/other/check\", methods=[\"POST\"])\ndef other_check_post():\n other_post_spec: OtherPostSpec = request.json\n\n result: List[Dict[str, str]] = engine.other_engine.run(other_post_spec=other_post_spec)\n\n response = make_response(jsonify(result))\n\n if len(result) != 0:\n response.headers[\"SECHUV-Token\"] = util.digisign.sign(base64.b64encode(json.dumps(other_post_spec).encode()).decode())\n response.headers[\"Access-Control-Expose-Headers\"] = \"SECHUV-Token\"\n \n return response\n\n\n@app.route(\"/vuln\", methods=[\"GET\"])\ndef vuln_get():\n vulnerabilities: List[Vulnerability] = handler.vuln.vuln_get.handle(db=db)\n\n return jsonify(vulnerabilities)\n\n\n@app.route(\"/vuln\", methods=[\"POST\"])\ndef vuln_post():\n ok: bool\n data: Dict[str, str]\n\n # 未実装\n ok = handler.vuln.vuln_post.validation(post_data=request.json)\n if not ok:\n abort(400, {\"message\": \"Posted value is invalid.\"})\n\n vulnerability: Vulnerability = request.json\n ok, data = handler.vuln.vuln_post.handle(db=db, vulnerability=vulnerability)\n if not ok:\n abort(500, {\"message\": \"Server error.\"})\n\n return jsonify(data)\n\n\n@app.route(\"/vuln\", methods=[\"DELETE\"])\ndef vuln_delete():\n vulntype: Optional[str] = request.args.get(\"vulntype\", None)\n\n ok, message = handler.vuln.vuln_delete.validation(vulntype=vulntype)\n if not ok:\n abort(400, {\"message\": message})\n\n ok, data = handler.vuln.vuln_delete.handle(db=db, vulntype=vulntype)\n if not ok:\n abort(400, {\"message\": data})\n\n return jsonify({\"vulntype\": vulntype})\n\n\n@app.route(\"/vuln/\", methods=[\"GET\"])\ndef vuln_vulntype_get(vulntype: str):\n length: int = request.args.get(\"length\", default=-1, type=int)\n\n ok: bool\n message: str\n\n ok, message = handler.vuln.vuln_vulntype_get.validation(db=db, vulntype=vulntype)\n if not ok:\n abort(400, {\"message\": message})\n\n data: Dict[str, Any] = handler.vuln.vuln_vulntype_get.handle(db=db, length=length, vulntype=vulntype)\n\n return jsonify(data)\n\n@app.route(\"/vuln//web\", methods=[\"GET\"])\ndef vuln_vulntype_web_get(vulntype: str):\n length: int = request.args.get(\"length\", default=-1, type=int)\n\n ok: bool\n message: str\n\n ok, message = handler.vuln.vuln_vulntype_web_get.validation(db=db, vulntype=vulntype)\n if not ok:\n abort(400, {\"message\": message})\n\n data: Dict[str, Union[Vulnerability, List[WebCase]]] = handler.vuln.vuln_vulntype_web_get.handle(db=db, length=length, vulntype=vulntype)\n\n return jsonify(data)\n\n\n@app.route(\"/vuln//mail\", methods=[\"GET\"])\ndef vuln_vulntype_mail_get(vulntype: str):\n length: int = request.args.get(\"length\", default=-1, type=int)\n\n ok: bool\n message: str\n\n ok, message = handler.vuln.vuln_vulntype_mail_get.validation(db=db, vulntype=vulntype)\n if not ok:\n abort(400, {\"message\": message})\n\n data: Dict[str, Union[Vulnerability, List[MailCase]]] = handler.vuln.vuln_vulntype_mail_get.handle(db=db, length=length, vulntype=vulntype)\n\n return jsonify(data)\n\n\n@app.route(\"/vuln//other\", methods=[\"GET\"])\ndef vuln_vulntype_other_get(vulntype: str):\n length: int = request.args.get(\"length\", default=-1, type=int)\n\n ok: bool\n message: str\n\n ok, message = handler.vuln.vuln_vulntype_other_get.validation(db=db, vulntype=vulntype)\n if not ok:\n abort(400, {\"message\": message})\n \n data: Dict[str, Union[Vulnerability, List[OtherCase]]] = handler.vuln.vuln_vulntype_other_get.handle(db=db, length=length, vulntype=vulntype)\n\n return jsonify(data)\n\n\n@app.errorhandler(400)\n@app.errorhandler(500)\ndef error_handler(error):\n response = jsonify({ 'message': error.description['message']})\n return response, error.code\n\n\nif __name__ == '__main__':\n time.sleep(1)\n app.run(host=\"0.0.0.0\", port=8080)","sub_path":"sechuv_chve/src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":13394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"218366085","text":"#!/usr/bin/env python\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport argparse\nimport sys\nimport vmprof\n\n\nclass FlameGraphPrinter:\n \"\"\"\n The Flame Graph [1] printer for vmprof profile files.\n\n [1] http://www.brendangregg.com/FlameGraphs/cpuflamegraphs.html\n \"\"\"\n\n def show(self, profile):\n # (str) -> None\n \"\"\"Read and display a vmprof profile file.\n\n :param profile: The filename of the vmprof profile file to convert.\n \"\"\"\n try:\n stats = vmprof.read_profile(profile)\n except Exception as e:\n print(\"Fatal: could not read vmprof profile file '{}': {}\".format(profile, e), file=sys.stderr)\n return\n tree = stats.get_tree()\n self.print_tree(tree)\n\n def _walk_tree(self, parent, node, level, lines):\n if ':' in node.name:\n split = node.name.split(':')\n funcname = split[1]\n rest = split[2:]\n if len(rest) >= 2:\n lineno = rest[0]\n filename = rest[1].split('/')[-1]\n funcname += \":{}:{}\".format(filename, lineno)\n if parent:\n current = parent + ';' + funcname\n else:\n current = funcname\n else:\n current = node.name\n\n count = node.count\n\n level += 1\n for c in node.children.values():\n count -= c.count\n self._walk_tree(current, c, level, lines)\n\n lines.append((current, count))\n\n def print_tree(self, tree):\n lines = []\n self._walk_tree(None, tree, 0, lines)\n lines.sort()\n for p, c in lines:\n print(p, c)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"profile\")\n args = parser.parse_args()\n\n pp = FlameGraphPrinter()\n pp.show(args.profile)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"vmprof-flamegraph.py","file_name":"vmprof-flamegraph.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"235243162","text":"from .ebay_api import EbayApi\nfrom .models import EbayListing\nfrom my_customs.decorators import report_error\nfrom django.core.mail import send_mail\nfrom decouple import config\nimport math\n\nebay = EbayApi()\n\n\n@ report_error\ndef list_item(sku, title, expansion, image_url, quantity, price, condition='Near Mint / Lightly Played', ebay_condition=\"NEW\",\n fulfillment_id='33310623022',\n payment_id=\"24992594022\",\n return_policy_id=\"153557924022\"):\n\n shipping_cost = {\n '33310623022': 0,\n }\n price = math.ceil(price) - 0.01\n title = f\"1x {title} - {expansion} - Magic the Gathering - Fast Shipping\"\n description = f\"Shipping for this item is *Fast and Free*

\" \\\n f\"This auction is for {title} from the {expansion} expansion and will be in \"\\\n f\" {condition} condition.
These card(s) will be inserted into a sleeve, top-loader, team bag, and padded bubble-mailer \"\\\n f\"envelop to provide the maximum level of protection for your purchase. We have many great auctions at affordable prices \"\\\n f\"and provide combined shipping.Be sure to check out our full inventory for the hottest deals around!
\"\\\n f\"If you have any questions or concerns, please let us know. We'll do everything we can to help.
\"\n\n condition_description = 'The items in this auction are in Near Mint or Lightly Played condition with \"No\" or \"minor\" marks / edge-wear'\n\n ebay.create_item(sku=sku, title=title, image_url=image_url, quantity=quantity, description=description, ebay_condition=ebay_condition,\n condition_description=condition_description)\n\n offer_id = ebay.create_offer(sku, price=price, quantity=quantity, category_id='38292', fulfillment_id=fulfillment_id, payment_id=payment_id,\n return_policy_id=return_policy_id, description=description)\n print(offer_id)\n offer_id = offer_id['offerId']\n\n upload = ebay.publish_offer(offer_id)\n try:\n listing_id = upload['listingId']\n\n except Exception as e:\n subject = f'Error listing ebay item'\n message = f'Error listing ebay item\\nError: {e}\\n\\n Items Info:\\n sku: {sku}, title: {title}, offer ID:{offer_id}'\n send_to = [config('my_email'), ]\n email_from = 'TCGFirst'\n send_mail(subject=subject, message=message, recipient_list=send_to, from_email=email_from)\n listing_id = {}\n\n if listing_id:\n new_listing = EbayListing(\n title=title,\n sku=sku,\n listing_id=listing_id,\n payment_policy_id=payment_id,\n offer_id=offer_id,\n category_id='38292',\n fulfillment_policy_id=fulfillment_id,\n return_policy_id=return_policy_id,\n quantity=quantity,\n price=price,\n description=description,\n format='FIXED_PRICE',\n shipping_cost=shipping_cost[fulfillment_id],\n\n )\n\n new_listing.save()\n\n return True\n\n else:\n return False\n\n\n\n","sub_path":"ebay/list_ebay_item.py","file_name":"list_ebay_item.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"631067319","text":"import datetime\nimport pprint\nimport sys\n\nimport requests as re\nfrom lxml import html\n\n\ndef parse_next():\n\n pass\n\n\ndef parse_list(body):\n items = []\n doc = html.fromstring(body)\n news_list = doc.xpath(\"//div[@class='m_txt_news']/ul/li\")\n # print(news_list)\n # print(len(news_list))\n for news in news_list:\n item = {}\n title = news.xpath(\"./a[@class='a_title']\")\n if not title:\n title = news.xpath(\"./a[@class='a_title txt_blod']\")\n title = title[0].text_content()\n # print(title)\n item['title'] = title\n pub_date = news.xpath(\"./a[@class='a_time txt_blod']\")\n if not pub_date:\n pub_date = news.xpath(\"./a[@class='a_time']\")\n\n link = pub_date[0].xpath(\"./@href\")[0]\n # print(link)\n item['link'] = link\n\n pub_date = pub_date[0].text_content()\n # print(pub_date)\n item['pub_date'] = pub_date\n items.append(item)\n return items\n\n\n# url = 'http://finance.takungpao.com/hkstock/cjss/'\n# url = 'http://finance.takungpao.com/hkstock/cjss/index_4.html'\n# body = re.get(url).text\n# items = parse_list(body)\n# print(pprint.pformat(items))\n# print(len(items))\n\n# zhongguojingji = 'http://www.takungpao.com/finance/236132/index.html'\nzhongguojingji = 'http://www.takungpao.com/finance/236132/2.html'\n# Economic_observer 经济观察家\nob = \"http://www.takungpao.com/finance/236134/index.html\"\nbody = re.get(ob).text\n# print(body)\ndoc = html.fromstring(body)\nnews_list = doc.xpath('//div[@class=\"sublist_mobile\"]/dl[@class=\"item\"]')\nprint(len(news_list))\n\n# sys.exit(0)\n\nfor news in news_list:\n link = news.xpath('./dd[@class=\"intro\"]/a/@href')[0]\n print(link)\n\n title = news.xpath(\"./dd/a/@title\")\n print(title[0])\n\n pub_date = news.xpath(\"./dd[@class='date']/text()\")[0]\n # # 发布时间的几种处理\n print(\">>> \", pub_date)\n current_dt = datetime.datetime.now()\n yesterday_dt_str = (datetime.datetime.now() - datetime.timedelta(days=1)).strftime(\"%Y-%m-%d\")\n after_yesterday_dt_str = (datetime.datetime.now() - datetime.timedelta(days=2)).strftime(\"%Y-%m-%d\")\n if \"小时前\" in pub_date: # eg. 20小时前\n hours = int(pub_date.replace('小时前', ''))\n pub_date = (current_dt - datetime.timedelta(hours=hours)).strftime(\"%Y-%m-%d %H:%M:%S\")\n elif \"昨天\" in pub_date: # eg. 昨天04:24\n pub_date = pub_date.replace('昨天', '')\n pub_date = \" \".join([yesterday_dt_str, pub_date])\n elif '前天' in pub_date: # eg. 前天11:33\n pub_date = pub_date.replace(\"前天\", '')\n pub_date = \" \".join([after_yesterday_dt_str, pub_date])\n else: # eg. 02-29 04:24\n pub_date = str(current_dt.year) + '-' + pub_date\n print(pub_date)\n print()\n","sub_path":"PublicOpinion/takungpao/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"478007335","text":"import argparse\nimport time\nimport torch\nimport numpy as np\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import Dataset\nimport os\nfrom sklearn import metrics\nimport copy\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.metrics import roc_curve, auc\n\n\nparser = argparse.ArgumentParser(description='PyTorch semi-supervised MNIST')\nparser.add_argument('--batch-size', type=int, default=32, metavar='N',\n help='input batch size for training (default: 32)')\nparser.add_argument('--epochs', type=int, default=200, metavar='N',\n help='number of epochs to train (default: 100)')\n\nfolder = './PREDICT'\nX_dim = 906 * 2\n\nresultFolder = os.path.join(folder, 'result')\nif not os.path.exists(resultFolder):\n print('mkdir {}'.format(resultFolder))\n os.mkdir(resultFolder)\n\n\ndrug_f = dict()\nwith open(os.path.join(folder, 'drug_feature.txt'), 'r') as f:\n for line in f:\n drug_id, fp = line.strip().split()\n fp = np.array(fp.split(','), dtype='float32')\n drug_f[drug_id] = fp\n\ndisease_f = dict()\nwith open(os.path.join(folder, 'disease_feature.txt'), 'r') as f:\n for line in f:\n disease_id, md = line.strip().split()\n md = np.array(md.split(','), dtype='float32')\n disease_f[disease_id] = md\n\n\nclass InteractionDataset(Dataset):\n def __init__(self, filename, root_dir=folder, transform=None):\n self.interaction = []\n self.label = []\n with open(os.path.join(root_dir, filename), 'r') as f:\n for line in f:\n drug_id, disease_id, label = line.strip().split()\n self.interaction.append((drug_id, disease_id))\n self.label.append(int(label))\n\n self.label = torch.LongTensor(self.label)\n\n def __len__(self):\n return len(self.interaction)\n\n def __getitem__(self, idx):\n idx = idx % len(self)\n drug_id, disease_id = self.interaction[idx]\n\n drug_feature = drug_f[drug_id]\n disease_feature = disease_f[disease_id]\n\n X = np.concatenate([drug_feature, disease_feature])\n X = torch.from_numpy(X)\n label = self.label[idx]\n return X, label\n\n\nargs = parser.parse_args()\ncuda = torch.cuda.is_available()\nif cuda:\n print('Using GPU')\n\nseed = 10\nkwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}\nn_classes = 2\ntrain_batch_size = args.batch_size\nvalid_batch_size = args.batch_size\nepochs = args.epochs\nweight_decay = 0.001\ndropout = 0.2\n\nparams = {\n 'n_classes': n_classes,\n 'X_dim': X_dim,\n 'train_batch_size': train_batch_size,\n 'valid_batch_size': valid_batch_size,\n 'epochs': epochs,\n 'cuda': cuda\n}\nprint('params: {}'.format(params))\n\n\ndef load_data(data_path='../data/'):\n print('loading data')\n train_dataset = InteractionDataset('train.txt')\n valid_dataset = InteractionDataset('valid.txt')\n test_dataset = InteractionDataset('test.txt')\n\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True)\n valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=valid_batch_size, shuffle=True)\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=valid_batch_size, shuffle=True)\n\n return train_loader, valid_loader, test_loader\n\n\nclass FC_DNN(nn.Module):\n def __init__(self):\n super(FC_DNN, self).__init__()\n self.lin1 = nn.Linear(X_dim, 1024)\n self.lin2 = nn.Linear(1024, 512)\n self.lin3 = nn.Linear(512, 256)\n self.lin4 = nn.Linear(256, 128)\n self.cat = nn.Linear(128, n_classes)\n\n def forward(self, x):\n x = self.lin1(x)\n x = F.relu(x)\n x = F.dropout(x, p=dropout, training=self.training)\n\n x = self.lin2(x)\n x = F.relu(x)\n x = F.dropout(x, p=dropout, training=self.training)\n\n x = self.lin3(x)\n x = F.relu(x)\n x = F.dropout(x, p=dropout, training=self.training)\n\n x = self.lin4(x)\n x = F.relu(x)\n x = F.dropout(x, p=dropout, training=self.training)\n\n xcat = self.cat(x)\n return xcat\n\n\ndef report_loss(epoch, loss):\n print()\n print('Epoch-{}; loss: {:.4}'.format(epoch, loss))\n\n\ndef save_model(model, filename):\n torch.save(model, filename)\n\n\ndef load_model(filename):\n return torch.load(filename)\n\n\ndef classification_accuracy(Q, data_loader):\n Q.eval()\n labels = []\n scores = []\n\n loss = 0\n correct = 0\n for batch_idx, (X, target) in enumerate(data_loader):\n X = X.view(-1, X_dim)\n X, target = Variable(X), Variable(target)\n\n if cuda:\n X, target = X.cuda(), target.cuda()\n\n output = Q(X)\n output_probability = F.softmax(output, dim=1)\n\n labels.extend(target.data.tolist())\n if cuda:\n scores.extend(output_probability.cpu().data.numpy()[:, 1].tolist())\n else:\n scores.extend(output_probability.data.numpy()[:, 1].tolist())\n\n loss += F.cross_entropy(output, target, size_average=False).data[0]\n\n pred = output_probability.data.max(1)[1]\n correct += pred.eq(target.data).cpu().sum()\n\n loss /= len(data_loader)\n acc = correct / len(data_loader.dataset)\n\n fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=1)\n auc = metrics.auc(fpr, tpr)\n return loss, acc, auc\n\n\ndef train(fc_dnn, fc_solver, train_labeled_loader):\n TINY = 1e-15\n\n fc_dnn.train()\n for X, target in train_labeled_loader:\n X, target = Variable(X), Variable(target)\n if cuda:\n X = X.cuda()\n target = target.cuda()\n\n X = X.view(-1, X_dim)\n out = fc_dnn(X)\n\n fc_solver.zero_grad()\n loss = F.cross_entropy(out + TINY, target)\n loss.backward()\n fc_solver.step()\n\n return loss.data[0]\n\n\ndef generate_model(train_labeled_loader, valid_loader):\n print('generating new model')\n torch.manual_seed(10)\n model = FC_DNN()\n if cuda:\n model = model.cuda()\n\n lr = 0.001\n fc_solver = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n\n start = time.time()\n max_valid_acc, max_valid_auc = 0, 0\n result_model = None\n\n train_loss, train_acc, train_auc = classification_accuracy(model, train_labeled_loader)\n print('no train loss {}, acc {}, auc {}'.format(train_loss, train_acc, train_auc))\n\n for epoch in range(epochs):\n loss = train(model, fc_solver, train_labeled_loader)\n report_loss(epoch, loss)\n\n train_loss, train_acc, train_auc = classification_accuracy(model, train_labeled_loader)\n print('Train loss {:.4}, acc {:.4}, auc {:.4}'.format(train_loss, train_acc, train_auc))\n\n valid_loss, valid_acc, valid_auc = classification_accuracy(model, valid_loader)\n if valid_acc > max_valid_acc:\n max_valid_acc, max_valid_auc = valid_acc, valid_auc\n result_model = copy.deepcopy(model)\n\n if valid_auc > 0.91:\n fc_solver = optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay)\n\n end = time.time()\n print('Training time: {:.4} seconds'.format(end - start))\n return result_model\n\n\ndef get_result_from_model(Q, data_loader):\n Q.eval()\n labels = []\n scores = []\n\n for batch_idx, (X, target) in enumerate(data_loader):\n X = X.view(-1, X_dim)\n X, target = Variable(X), Variable(target) # target in [0, 9]\n\n if cuda:\n X, target = X.cuda(), target.cuda()\n\n output = Q(X)\n output_probability = F.softmax(output, dim=1)\n\n labels.extend(target.data.tolist())\n if cuda:\n scores.extend(output_probability.cpu().data.numpy()[:, 1].tolist())\n else:\n scores.extend(output_probability.data.numpy()[:, 1].tolist())\n\n return scores, labels\n\n\ndef write_result(probas1, y):\n with open(os.path.join(resultFolder, 'probas1-HNet-DNN.txt'), 'w') as f:\n for val in probas1:\n f.write('{}\\n'.format(val))\n\n with open(os.path.join(resultFolder, 'y-HNet-DNN.txt'), 'w') as f:\n for val in y:\n f.write('{}\\n'.format(val))\n\n\nif __name__ == '__main__':\n train_loader, valid_loader, test_loader = load_data()\n\n best_model = 'best_DNN_model.pkl'\n if not os.path.exists(os.path.join(folder, best_model)):\n fc_dnn = generate_model(train_loader, valid_loader)\n save_model(fc_dnn, os.path.join(folder, best_model))\n else:\n fc_dnn = load_model(os.path.join(folder, best_model))\n\n test_loss, test_acc, test_auc = classification_accuracy(fc_dnn, test_loader)\n print()\n print('Test loss {:.4}, acc {:.4}, auc {:.4}'.format(test_loss, test_acc, test_auc))\n\n probas1, y = get_result_from_model(fc_dnn, test_loader)\n average_precision = average_precision_score(y, probas1)\n print('Test aupr {:.4}'.format(average_precision))\n fpr, tpr, thresholds = roc_curve(y, probas1, pos_label=1)\n roc_auc = auc(fpr, tpr)\n print('Test auc {:.4}'.format(roc_auc))\n write_result(probas1, y)\n print('Done')\n","sub_path":"PREDICT.py","file_name":"PREDICT.py","file_ext":"py","file_size_in_byte":9116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"251371269","text":"'''15/3/2019 Plot single level MSE budget for reanalysis\n'''\n\nimport numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nfrom pylab import rcParams\nimport sh\nfrom data_handling_updates import gradients as gr\nimport matplotlib.patches as patches\n\ndef pentad_mean_climatology(data, years): # Function to get pentad of year\n pentad_years = np.array([])\n for year in years:\n data_year = data.sel(time=str(year))\n if len(data_year.time)==366:\n pentad = np.repeat(np.arange(1., 74.), 5)\n pentad = np.insert(pentad, 10, 2) \n else:\n pentad = np.repeat(np.arange(1., 74.), 5) \n pentad_years = np.concatenate((pentad_years, pentad))\n \n data = data.assign_coords(pentad = ('time', pentad_years))\n \n data_pentads = data.groupby('pentad').mean(('time'))\n \n return data_pentads\n\ndef mse_budg_jra():\n \n cp_air = 1004.6\n L = 2.507e6\n grav = 9.80665\n \n plot_dir = '/scratch/rg419/plots/zonal_asym_runs/'\n mkdir = sh.mkdir.bake('-p')\n mkdir(plot_dir)\n \n data = xr.open_dataset('/disca/share/rg419/JRA_55/heating_terms.nc')\n# data_w = xr.open_dataset('/disca/share/rg419/jra_omega_pentad_clim_alllevs.nc')\n \n mse = (cp_air * data.temp + L * data.sphum + grav * data.height)\n dse = (cp_air * data.temp + grav * data.height)\n qse = (L * data.sphum)\n\n q_diab = L * (data.cnvmr + data.lrgmr + data.vdfmr)/86400.\n t_diab = cp_air * (data.cnvhr + data.lrghr + data.vdfhr + data.lwhr + data.swhr)/86400.\n q_total = t_diab + q_diab\n \n u_mse = (cp_air * data.ucomp_temp + L * data.sphum_u + grav * data.ucomp_height)\n v_mse = (cp_air * data.vcomp_temp + L * data.sphum_v + grav * data.vcomp_height)\n w_mse = (cp_air * data.omega_temp + L * data.sphum_w + grav * data.omega_height)\n u_dse = (cp_air * data.ucomp_temp + grav * data.ucomp_height)\n v_dse = (cp_air * data.vcomp_temp + grav * data.vcomp_height)\n w_dse = (cp_air * data.omega_temp + grav * data.omega_height)\n u_qse = (L * data.sphum_u)\n v_qse = (L * data.sphum_v)\n w_qse = (L * data.sphum_w)\n \n def get_budget_terms(energy, uenergy, venergy, wenergy, diab):\n u_e_eddy = uenergy - energy * data.ucomp\n v_e_eddy = venergy - energy * data.vcomp\n w_e_eddy = wenergy - energy * data.omega\n eddy_conv = -1.*(gr.ddx(u_e_eddy) + gr.ddy(v_e_eddy) + gr.ddp(w_e_eddy))\n horiz_adv = -1.*(data.ucomp * gr.ddx(energy) + data.vcomp * gr.ddy(energy, vector=False))\n denergydt = gr.ddt(energy)*20.\n total = (diab + horiz_adv + vert_adv + eddy_conv)*10.\n return diab, horiz_adv, vert_adv, eddy_conv, denergydt, total\n \n mse_terms = get_budget_terms(mse, u_mse, v_mse, w_mse, q_total)\n dse_terms = get_budget_terms(dse, u_dse, v_dse, w_dse, t_diab)\n qse_terms = get_budget_terms(qse, u_qse, v_qse, w_qse, q_diab)\n \n \n \n for pentad in [32,38,44,50,56]:\n # Start figure with 4 subplots\n rcParams['figure.figsize'] = 17, 7\n rcParams['font.size'] = 14\n fig, ((ax1, ax2, ax3, ax4, ax5, ax6), (ax7, ax8, ax9, ax10, ax11, ax12), (ax13, ax14, ax15, ax16, ax17, ax18)) = plt.subplots(3, 6, sharex='col', sharey='row')\n axes = [ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8, ax9, ax10, ax11, ax12, ax13, ax14, ax15, ax16, ax17, ax18]\n \n i=0\n for energy in [mse_terms, dse_terms, qse_terms]:\n f1 = energy[0].sel(xofyear=pentad).plot.contourf(ax=axes[i*6],x='lon',y='lat', add_labels=False, extend='both', levels=np.arange(-0.1, 0.11, 0.01), add_colorbar=False)\n energy[1].sel(xofyear=pentad).plot.contourf(ax=axes[i*6+1],x='lon',y='lat', add_labels=False, extend='both', levels=np.arange(-0.1, 0.11, 0.01), add_colorbar=False)\n energy[2].sel(xofyear=pentad).plot.contourf(ax=axes[i*6+2],x='lon',y='lat', add_labels=False, extend='both', levels=np.arange(-0.1, 0.11, 0.01), add_colorbar=False)\n energy[3].sel(xofyear=pentad).plot.contourf(ax=axes[i*6+3],x='lon',y='lat', add_labels=False, extend='both', levels=np.arange(-0.1, 0.11, 0.01), add_colorbar=False)\n energy[4].sel(xofyear=pentad).plot.contourf(ax=axes[i*6+4],x='lon',y='lat', add_labels=False, extend='both', levels=np.arange(-0.1, 0.11, 0.01), add_colorbar=False)\n energy[5].sel(xofyear=pentad).plot.contourf(ax=axes[i*6+5],x='lon',y='lat', add_labels=False, extend='both', levels=np.arange(-0.1, 0.11, 0.01), add_colorbar=False)\n i=i+1\n \n for ax in axes:\n ax.set_ylim(-15.,45.)\n ax.set_xlim(90.,225.)\n ax.set_yticks(np.arange(-15.,45.,15.))\n ax.set_xticks(np.arange(90.,226.,45.))\n ax.grid(True,linestyle=':')\n \n land_mask = '/scratch/rg419/python_scripts/land_era/ERA-I_Invariant_0125.nc'\n land = xr.open_dataset(land_mask)\n land.lsm[0,:,:].plot.contour(ax=ax, x='longitude', y='latitude', levels=np.arange(-1.,2.,1.), add_labels=False, colors='k')\n (land.z[0,:,:]/9.81).plot.contour(ax=ax, x='longitude', y='latitude', levels=np.arange(2000.,3001.,1000.), add_labels=False, colors='k')\n \n plt.subplots_adjust(left=0.1, right=0.97, top=0.93, bottom=0.05, hspace=0.25, wspace=0.2)\n cb1=fig.colorbar(f1, ax=axes, use_gridspec=True, orientation = 'horizontal',fraction=0.05, pad=0.05, aspect=60, shrink=1.)\n \n plt.savefig(plot_dir + 'mse_budg_850_JRA55_' + str(pentad) + '.pdf', format='pdf')\n plt.close()\n \n\nif __name__ == \"__main__\":\n \n mse_budg_jra()\n \n ","sub_path":"zonal_asym_runs/mse_budg_jra.py","file_name":"mse_budg_jra.py","file_ext":"py","file_size_in_byte":5633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"622930069","text":"# 对待识别模式x, 分别计算它与N(N=N1+N2+…+Nc)个已知类别样本xj(i)i,i=1,2,…,c,的距离,取k个最近邻样本,\n# 这k个样本中属于哪一类的样本最多, 就判x属于哪一类。\n# 这里,设c = 5,Ni为第i类的样本个数,设 N = 10\n# 假设样本为二维坐标中的点\nimport numpy as np\n\ndef file2matrix(filename):\n f = open(filename, \"r\") # 设置文件对象\n line = f.readline()\n line = line[:-1]\n resultPoint = []\n resultArray = []\n i = 0\n while line: # 直到读取完文件\n line = f.readline() # 读取一行文件,包括换行符\n line = line[:-1] # 去掉换行符,也可以不去\n if line.__len__()>1:\n temp = line.split(\" \")\n array = [temp[0],temp[1],temp[2]]\n resultArray.append(array)\n i+=1\n f.close() # 关闭文件\n return resultArray\n\ndef CalcDistances(x,y):\n sampleArray = file2matrix(\"sampleData.txt\")\n # unknowPosition是要进行分类的点 sampleArray是矩阵中所有的点 distances是某一个点到所有点的距离矩阵\n unknowPosition = [x,y]\n distances = []\n for array in sampleArray:\n # 样本数据\n simpleData = [int(array[0]), int(array[1])]\n vec1 = np.array(unknowPosition)\n vec2 = np.array(simpleData)\n # 方法一\n dist = [np.sqrt(np.sum(np.square(vec1 - vec2))),int(array[2])]\n distances.append(dist)\n return distances\n\ndef KNN(distances,k):\n sortDistances = sorted(distances, key=lambda x: (x[0]))\n count = [0,0,0]\n kNow = 0\n for sortDistance in sortDistances:\n #print(\"按距离从近到远排序:到第\" + str(kNow) + \"个点的距离为:(输出格式距离,点所属的类别)\")\n #print(sortDistance)\n if kNow < k:\n count[sortDistance[1]]+=1\n kNow += 1\n else:\n break\n return count.index(max(count)) # 返回list最大值位置\n\nif __name__ == '__main__':\n k = input(\"input k:\")\n f = open(\"unknowPointData.txt\", \"r\") # 设置文件对象\n line = f.readline()\n line = line[:-1]\n while line: # 直到读取完文件\n if line.__len__() > 1:\n temp = line.split(\" \")\n type = KNN(CalcDistances(int(temp[0]), int(temp[1])), int(k))\n print(\"坐标:(\" + temp[0] + \",\" + str(int(temp[1])) + \") 所属类别编号为:\" + str(type))\n line = f.readline() # 读取一行文件,包括换行符\n f.close() # 关闭文件\n\n","sub_path":"venv/Include/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"211246520","text":"import time\r\nimport random\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom fake_useragent import UserAgent\r\n\r\n\r\nvacId = \"40687051\"\r\n\r\n# Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36\r\n\r\n# https://chromedriver.chromium.org/downloads\r\n# put in Scripts\r\n\r\n# fighting porve that you are not a robot :\r\nchrome_options = Options()\r\n#ua = UserAgent()\r\n#userAgent = ua.random\r\n#print(userAgent)\r\n#chrome_options.add_argument(f'user-agent={userAgent}')\r\n\r\n# Removes navigator.webdriver flag\r\n\r\n# For older ChromeDriver under version 79.0.3945.16\r\nchrome_options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\r\nchrome_options.add_experimental_option('useAutomationExtension', False)\r\n\r\n# For ChromeDriver version 79.0.3945.16 or over\r\nchrome_options.add_argument('--disable-blink-features=AutomationControlled')\r\n\r\nchrome_options.add_argument(\"--window-size=1920,1080\")\r\n\r\n\r\nchrome_options.add_argument(\"--ignore-certificate-error\")\r\nchrome_options.add_argument(\"--ignore-ssl-errors\")\r\n\r\ndriver = webdriver.Chrome(options=chrome_options)\r\n\r\ndriver.maximize_window()\r\ntime.sleep(random.randint(1, 10))\r\n\r\n# URL of website\r\nurl = \"https://taganrog.hh.ru/account/login?backurl=%2F\"\r\n\r\n# Opening the website\r\ndriver.get(url)\r\n\r\n\r\ntime.sleep(random.randint(1, 10))\r\n\r\n\r\n# input \"bloko-input\" // login\r\ninputElement = driver.find_element_by_class_name(\"bloko-input\")\r\ninputElement.send_keys('popov@digitalcontest.org')\r\n\r\n\r\ntime.sleep(random.randint(1, 10))\r\n\r\n\r\n# input password \"bloko-input bloko-input_password\"\r\ninputElement = driver.find_element_by_class_name(\"bloko-input_password\")\r\ninputElement.send_keys(str('sLr2sD9J'))\r\n\r\n\r\ntime.sleep(random.randint(1, 10))\r\n\r\n\r\n# getting the button by class name\r\nbutton = driver.find_element_by_class_name(\"bloko-button_stretched\")\r\n\r\n# clicking on the button\r\nbutton.click()\r\n\r\n\r\ntime.sleep(random.randint(1, 10))\r\n\r\n\r\ndriver.get(f'https://taganrog.hh.ru/employer/vacancyresponses?vacancyId={vacId}')\r\n\r\ntime.sleep(random.randint(1, 10))\r\n\r\n\r\n# bloko-button HH-Pager-Controls-Next HH-Pager-Control\r\n##page = driver.find_elements_by_class_name('HH-Pager-Controls-Next')\r\n\r\n\r\n\r\n#if page:\r\ntime.sleep(random.randint(1, 10))\r\n\r\n\r\n# \"resume-search-item__fullname\"\r\n#####names = driver.find_elements_by_class_name('resume-search-item__fullname')\r\n# HH-Resume-ContactsAjax-Button\r\nshowPhoneBtns = driver.find_elements_by_class_name('HH-Resume-ContactsAjax-Button')\r\n\r\n\r\nfor btn in showPhoneBtns:\r\n btn.click()\r\n driver.save_screenshot(f\"{vacId}/{str(names[i].text)}.png\")\r\n time.sleep(random.randint(100, 300)/100)\r\n\r\n #print(str(names[i].text))\r\n #print(str(showPhoneBtns[i].text))\r\n #print(\"\")\r\n\r\n # driver.save_screenshot(f\"{vacId}{str(names[i].text)}.png\")\r\ntime.sleep(random.randint(1, 10))\r\n\r\n# https://taganrog.hh.ru/employer/vacancyresponses?vacancyId=40687051&page=1\r\n#driver.get(f'https://taganrog.hh.ru/employer/vacancyresponses?vacancyId={vacId}&page={P}')\r\n#page = driver.find_elements_by_class_name('HH-Pager-Controls-Next')\r\n#if page:\r\n# resumeCards = driver.find_elements_by_class_name(\r\n# 'resume-search-item__header')\r\n#P += 1\r\n\r\n\r\n# driver.save_screenshot(\"test.png\")\r\n\r\n# driver.page_source.encode('utf-8')\r\n\r\nprint(driver.page_source.encode(\"utf-8\"))\r\nwith open(\"test.html\", \"w\", encoding=\"utf8\") as f:\r\n f.write(driver.page_source)\r\n\r\n\r\ndriver.close()\r\n","sub_path":"BOT/RPA_HH.py","file_name":"RPA_HH.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"597339206","text":"from flask import (Blueprint, flash, g, redirect, render_template, request,\n url_for)\nfrom werkzeug.exceptions import abort\nfrom simpleflask.db import get_db\n\n\nbp = Blueprint('views', __name__)\n\n\n@bp.route('/')\ndef index():\n page_title = 'Index'\n \n db = get_db()\n entries = db.execute(\n 'SELECT e.id, author, created, title, body'\n ' FROM entry e'\n ' ORDER BY created DESC'\n ).fetchall()\n\n return render_template(\n 'index.html',\n entries=entries,\n page_title=page_title)\n\n@bp.route('/add', methods=('GET', 'POST'))\ndef add():\n page_title = 'Add Entry'\n\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n author = request.form['author']\n error = None\n \n if not title:\n error = 'Title is required.'\n \n if not author:\n error = 'Author is required.'\n\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'INSERT INTO entry (title, body, author)'\n ' VALUES (?, ?, ?)',\n (title, body, author)\n )\n db.commit()\n return redirect(url_for('index'))\n\n return render_template('add.html', page_title=page_title)\n\n\ndef get_entry(id):\n entry = get_db().execute(\n 'SELECT e.id, title, body, created, author'\n ' FROM entry e'\n ' WHERE e.id = ?', (id,)\n ).fetchone()\n\n if entry is None:\n abort(404, \"Entry id {0} doesn't exist.\".format(id))\n\n return entry\n\n\n@bp.route('//edit', methods=('GET', 'POST'))\ndef edit(id):\n page_title = 'Edit Entry'\n\n entry = get_entry(id)\n\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n author = request.form['author']\n error = None\n\n if not title:\n error = 'Title is required.'\n\n if not author:\n error = 'Author is required.'\n\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'UPDATE entry SET title = ?, body = ?, author = ?'\n ' WHERE id = ?',\n (title, body, author, id)\n )\n db.commit()\n return redirect(url_for('index'))\n\n return render_template('edit.html', entry=entry)\n\n\n@bp.route('//delete', methods=('POST',))\ndef delete(id):\n get_entry(id)\n db = get_db()\n db.execute('DELETE FROM entry WHERE id = ?', (id,))\n db.commit()\n return redirect(url_for('index'))\n","sub_path":"simpleflask/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"303797241","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 6 16:30:58 2016\n\n@author: jessime\n\"\"\"\nimport random\nimport time\nimport graphics as grf\nimport move\n\nclass Resource(object):\n \n def __init__(self, kind):\n self.kind = kind\n self.pic = self.make_pic()\n \n def make_pic(self):\n if self.kind == 'glucose':\n loc = grf.Point()\n elif self.kind == 'protein':\n loc = grf.Point()\n elif self.kind == 'lipid':\n loc = grf.Point()\n pic = grf.Image(loc, 'images/{}.gif'.format(self.kind))\n return pic\n\nclass Virus(object):\n \n def __init__(self, win, scale):\n self.win = win\n self.loc = grf.Point(random.randint(25, self.win.width - 225), \n random.randint(25, 50))\n self.pic = grf.Image(self.loc, 'images/virus.gif')\n self.power = random.randint(1, 5) * (int(scale**.5)+1)\n self.health = random.randint(5, 10) * (int(scale**.5)+1)\n self.move_prob = 2\n self.speed = random.randint(1, 4) * (int(scale**.5)+1)\n \n def move(self):\n \"\"\"Move towards cells according to speed attribute\"\"\"\n y = self.pic.anchor.y\n hard_stop = self.win.height - (60+12)\n soft_stop = hard_stop - self.speed\n if random.randint(0,self.move_prob) == 0:\n if y < soft_stop:\n self.pic.move(0, self.speed)\n elif soft_stop <= y < hard_stop:\n self.pic.move(0, hard_stop - y)\n \n def attack(self, cells):\n \"\"\"Do damage to Cell if possible\n \n Parameters\n ----------\n cells : list of Cells\n The cells the player is attempting to defend from viruses \n \"\"\"\n x = self.pic.anchor.x\n y = self.pic.anchor.y\n if y == self.win.height - (60+12):\n for cell in cells:\n if move.point_in_rect(x, y+5, cell.pic):\n cell.health -= self.power\n break\n\nclass Cell(object):\n \"\"\"Cells are what the player is stopping the Viruses from infecting.\n \n Parameters\n ----------\n win : graphics.GraphWin\n The window in which the Cell is drawn\n \n num : int\n Number designated to the Cell. Determines location on screen\n \n \n Attributes\n ----------\n win : graphics.GraphWin\n The window in which the Cell is drawn\n \n num : int\n Number designated to the Cell. Determines location on screen\n \n loc : graphics.Point\n The coordinate location for the cell\n \n pic : graphics.Image\n The picture representation of the Cell shown to player\n \n health : int\n The number of points of damage a Cell can withstand without being destroyed\n \n bar : graphics.Line\n A visual indicator of how much health a Cell has left\n \"\"\"\n \n def __init__(self, win, num):\n self.win = win\n self.num = num\n self.loc = grf.Point(50+(self.num*100), self.win.height-30)\n self.pic = grf.Image(self.loc, 'images/cells.gif')\n self.health = 100\n self.bar = self.make_bar()\n \n def make_bar(self):\n \"\"\"Initalizes the health bar for the cell\n \n Returns\n -------\n bar : graphics.Line\n The health bar for the cell \n \"\"\"\n p1 = grf.Point(self.num*100, self.win.height-30)\n p2 = grf.Point((self.num*100)+self.health, self.win.height-30)\n bar = grf.Line(p1, p2)\n bar.setOutline('red')\n bar.setWidth(4)\n return bar\n \n def update_bar(self):\n \"\"\"Redraw the health bar to reflect current health level\"\"\"\n if self.health != 100:\n self.bar.p2.x = (self.num*100)+self.health\n self.bar.undraw()\n self.bar.draw(self.win) \n \nclass Macrophage(object):\n \n def __init__(self, win):\n self.win = win\n self.pic = grf.Image(grf.Point(575, 200), 'images/macrophage2.gif') \n self.speed = 10\n self.jump = 350\n self.jump_refresh = 3\n self.last_jump = 0\n\n def left(self):\n if self.pic.anchor.x >= 20:\n self.pic.move(-self.speed, 0)\n \n def down(self):\n if self.pic.anchor.y <= self.win.height - 20:\n self.pic.move(0, self.speed)\n\n def right(self):\n if self.pic.anchor.x <= self.win.width - 220:\n self.pic.move(self.speed, 0)\n \n def up(self):\n if self.pic.anchor.y >= 20:\n self.pic.move(0, -self.speed)\n \n def left_jump(self):\n if (self.pic.anchor.x >= 20+self.jump and \n self.last_jump + self.jump_refresh < time.time()):\n self.pic.move(-self.jump, 0)\n self.last_jump = time.time()\n \n def down_jump(self):\n if (self.pic.anchor.y <= self.win.height-20-self.jump and\n self.last_jump + self.jump_refresh < time.time()):\n self.pic.move(0, self.jump)\n self.last_jump = time.time()\n \n def right_jump(self):\n if (self.pic.anchor.x <= self.win.width - 220 - self.jump and\n self.last_jump + self.jump_refresh < time.time()):\n self.pic.move(self.jump, 0)\n self.last_jump = time.time()\n \n def up_jump(self):\n if (self.pic.anchor.y >= 20 + self.jump and\n self.last_jump + self.jump_refresh < time.time()):\n self.pic.move(0, -self.jump)\n self.last_jump = time.time()\n","sub_path":"pieces.py","file_name":"pieces.py","file_ext":"py","file_size_in_byte":5573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"511535735","text":"# -*- coding: utf-8 -*-\n__author__ = 'Duome'\n\n\"\"\" 成都房产信息搜索界面\n label\n 工具栏grid搜索最低价位,最高价位,确定\n\"\"\"\n\nfrom Tkinter import Tk, Frame, Entry, Label, Button\nfrom tkMessageBox import showerror, showinfo\n\nfrom xlsx_filter.filter import Filter\nfrom xlsx_filter.xlsx import Xlsx\n\n\nclass Interface(object):\n \"\"\" 搜索界面 \"\"\"\n\n def __init__(self):\n self.root = Tk()\n self.xlsx = Xlsx(r'anjuke.xlsx') # 将xlsx功能与界面绑定\n self.filter = Filter(self.xlsx.get_data()) # 获取excel中的原始数据后,将filter功能与界面绑定\n self.set_layout()\n\n\n def set_layout(self):\n self.root.config(bg='#F8F8FF')\n self.root.title(u'成都房产信息搜索')\n self.root.geometry('500x100')\n\n frame = Frame(self.root, width=10000, height=10000, bg='#F8F8FF')\n frame.pack()\n\n entry_min_price = Entry(frame, width=10, cursor='xterm')\n entry_min_price.insert(0, u'每平最低价')\n entry_min_price.config(bg='#F5F5F5', font=('Arial', '15'))\n entry_min_price.grid(row=0, column=1, padx=20)\n\n label_line = Label(frame, text=u'————', width=8)\n label_line.config(fg='#00008B', bg='#F8F8FF')\n label_line.grid(row=0, column=2, padx=5)\n\n entry_max_price = Entry(frame, width=10, cursor='xterm')\n entry_max_price.insert(0, u'每平最高价')\n entry_max_price.config(bg='#F5F5F5', font=('Arial', '15'))\n entry_max_price.grid(row=0, column=3, padx=20)\n\n def start_search():\n flag = True\n self.filter.rules = []\n\n ############################# rule 1 #####################################\n try:\n lprice = int(entry_min_price.get())\n hprice = int(entry_max_price.get())\n except (UnicodeEncodeError, ValueError):\n flag = False\n showerror('错误输入', u'输入无法识别,请重新输入')\n else:\n if lprice < 0 or hprice < 0 and lprice > hprice:\n showerror('错误输入', u'输入不正确,请重新输入')\n flag = False\n else:\n cnt = self.xlsx.head.index(u'参考单价')\n rule = lambda x: True if lprice <= x[cnt] < hprice or lprice == x[cnt] == hprice else False\n self.filter.rules.append(rule)\n ############################# rule 1 end #####################################\n\n if flag:\n result_data = self.filter.get_result()\n self.xlsx.save_data(result_data, r'result.xlsx')\n showinfo('过滤成功', u'过滤结果已存入本地result.xlsx中')\n\n button = Button(frame, text=u'搜索', width=10)\n button.config(font=('Arial', '10', 'bold'), fg='#191970', bg='#00BFFF', command=start_search)\n button.grid(row=0, column=4, padx=10)\n\n\n def start(self):\n self.root.mainloop()\n","sub_path":"py/xlsx-filter/xlsx_filter/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"265827340","text":"# -*- coding: utf-8 -*-\nimport hashlib\nimport random\nimport json\nimport datetime\n\nimport pytz\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.models import *\nfrom django.core.mail import EmailMessage\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.template.loader import get_template\nfrom django.urls import reverse_lazy\nfrom django.views.generic import *\nfrom django.shortcuts import render\nfrom rest_framework.decorators import api_view\n\nfrom google_drive import upload_file\nfrom google_calendar import create_event\nfrom project.forms import *\nfrom project.models import *\nfrom task.models import *\nfrom django.urls import reverse\nfrom users.views import send_email\nfrom project_TW import add_project, UpdateProjectTW, DeleteProjectTW\n\n'''\nClase que muestra todos los proyectos.\n'''\nclass Home(TemplateView):\n template_name = 'index.html'\n\n def get_context_data(self, **kwargs):\n context = super(\n Home, self).get_context_data(**kwargs)\n project = Project.objects.all()\n context['project'] = project\n return context\n\n'''\nClase que permite la creación de un nueco proyecto.\n'''\nclass New_Project(FormView):\n template_name = 'page-new-project.html'\n form_class = NewProjectForm\n\n def get_context_data(self, **kwargs):\n context = super(\n New_Project, self).get_context_data(**kwargs)\n context['title'] = 'Agregar'\n return context\n\n def post(self, request, *args, **kwargs):\n post_values = request.POST.copy()\n form = NewProjectForm(post_values)\n if form.is_valid():\n project = form.save(commit=False)\n project.name = post_values['name']\n '''\n Se genera un código aleatorio a partir del nombre del proyecto.\n '''\n code = codeProject(project.name)\n code_exist = Project.objects.filter(code=code).exists()\n '''\n Si el código del proyecto existe, se agrega el caracter siguiente del nombre del proyecto.\n '''\n if code_exist:\n length = len(code)\n code = code + project.name[length]\n project.code=code\n else:\n project.code = code\n '''\n Se transforma el formato de las fechas a como las guarda PostgreSQL. \n '''\n a = post_values['startDate'].split('-')\n startDate = a[2]+'-'+a[1]+'-'+a[0]\n project.startDate = startDate\n b = post_values['endDate'].split('-')\n endDate = b[2] + '-' + b[1] + '-' + b[0]\n project.endDate = endDate\n project.status = post_values['status']\n project.description = post_values['description']\n auth_cliente = post_values['client']\n auth_emp = post_values['company']\n # id de responsable de la empresa\n profile_emp = ProfileUser.objects.get(fk_profileUser_user_id = auth_emp)\n # id del cliente\n profile_client = ProfileUser.objects.get(fk_profileUser_user = auth_cliente)\n\n # Se crea el evento para realizar la conexión con Google Calendar\n\n tz = pytz.timezone('America/Caracas')\n start_datetime = tz.localize(datetime.datetime(int(a[2]), int(a[1]), int(a[0])))\n stop_datetime = tz.localize(datetime.datetime(int(b[2]), int(b[1]), int(b[0])))\n event = {\n 'summary': 'Proyecto '+ str(project.name),\n 'description': project.description,\n 'start': {\n 'dateTime': start_datetime.isoformat(),\n 'timeZone': 'America/Caracas',\n },\n 'end': {\n 'dateTime': stop_datetime.isoformat(),\n 'timeZone': 'America/Caracas',\n },\n 'attendees': [\n {'email': profile_emp.fk_profileUser_user.email},\n {'email': profile_client.fk_profileUser_user.email},\n ],\n }\n\n create_event(event)\n\n project.save()\n '''\n Se busca el proyecto guardado con la finalidad de guardarlo en TeamWork\n '''\n new_project= Project.objects.get(code = project.code)\n '''\n Se cambia el formato de las fechs a como es aceptado en TeamWork\n '''\n startDate = str(new_project.startDate).split('-')\n endDate = str(new_project.endDate).split('-')\n\n #####################Conexion con Team Work###############################\n #id_team_work= add_project(project.name, project.description, ''.join(startDate), ''.join(endDate))\n #project.idTeamWorkProject=id_team_work\n project.save()\n project.save()\n ##################################################\n\n '''\n Se guarda la relación entre el cliente y el proyecto.\n '''\n project_user_client = ProjectUser(user= profile_client, project=new_project)\n '''\n Se guarda la relación entre el responsable por parte de IDBC y el proyecto.\n '''\n project_user_emp = ProjectUser(user= profile_emp, project=new_project, isResponsable= True)\n\n project_user_client.save()\n project_user_emp.save()\n messages.success(request, \"El projecto ha sido guardado exitosamente\")\n return HttpResponseRedirect(reverse_lazy('new_project'))\n else:\n messages.success(request, \"Error al registrar el proyecto\")\n\n return render(request, 'page-new-project.html', {'form': form})\n\n'''\nClase que permite modificar un proyecto.\n'''\nclass Update_Project(TemplateView):\n template_name = 'page-new-project.html'\n form_class = UpdateProjectForm\n\n def get_context_data(self, **kwargs):\n context = super(\n Update_Project, self).get_context_data(**kwargs)\n context['title'] ='Modificar'\n project = Project.objects.get(code=self.kwargs['pk'])\n '''\n Se verifica si los proyectos tienen fechas asociadas de modo que sean mostradas en la respectiva vista. \n '''\n if project.startDate == None:\n startDate = ''\n else:\n startDate = project.startDate.strftime(\"%d-%m-%Y\")\n if project.endDate == None:\n endDate = ''\n else:\n endDate = project.endDate.strftime(\"%d-%m-%Y\")\n\n projectUser = ProjectUser.objects.filter(project_id=project.code)\n client=''\n responsable = ''\n for i in projectUser:\n profileUser = ProfileUser.objects.get(id = i.user_id)\n user = User.objects.get(id=profileUser.fk_profileUser_user_id)\n\n if i.isResponsable:\n responsable=user\n else:\n if str(user.groups.all()[0])=='Cliente':\n client=user\n data = {'name':project,\n 'client':client,\n 'company': responsable,\n 'startDate': startDate,\n 'endDate': endDate,\n 'status': project.status,\n 'description':project.description\n }\n\n form = NewProjectForm(initial=data)\n context['form'] = form\n return context\n\n def post(self, request, *args, **kwargs):\n post_values = request.POST.copy()\n form = UpdateProjectForm(post_values)\n if form.is_valid():\n project_pk = kwargs['pk']\n project = Project.objects.get(pk = project_pk)\n '''\n En caso de que sea el mismo proyecto pero diferente responsable, se busca el responsable actual\n '''\n # caso 1: mismo codigo diferente responsable\n old_responsable = ProjectUser.objects.filter(project = project, isResponsable=True).exists()\n project.name=post_values['name']\n project.status = post_values['status']\n auth_cliente = post_values['client']\n auth_emp = post_values['company']\n # id de nuevo responsable de la empresa\n profile_emp = ProfileUser.objects.get(fk_profileUser_user_id = auth_emp)\n userProject = ProjectUser.objects.filter(user_id=profile_emp.pk, project=project)\n if userProject:\n # caso de que exista la relacion entre usuario y proyecto pero no es responsable\n for i in userProject:\n if i.isResponsable == False:\n relation = ProjectUser.objects.get(id=i.id)\n if old_responsable:\n old_responsable = ProjectUser.objects.get(project = project, isResponsable=True)\n old_responsable.isResponsable = False\n old_responsable.save()\n relation.isResponsable = True\n relation.save()\n else:\n pass\n\n else:\n if old_responsable:\n old_responsable = ProjectUser.objects.get(project = project, isResponsable=True)\n old_responsable.isResponsable = False\n old_responsable.save()\n project_user_emp = ProjectUser(user= profile_emp, project=project, isResponsable= True)\n project_user_emp.save()\n\n profile_client = ProfileUser.objects.get(fk_profileUser_user = auth_cliente)\n old_client = ProjectUser.objects.filter(user=profile_client.pk, project=project)\n print(\"viejo cliente \" + str(old_client))\n if not old_client:\n print(\"no existe\")\n newRelationClient = ProjectUser(project_id = project.code, user_id=profile_client.id, isResponsable=False)\n projectUser = ProjectUser.objects.filter(project_id=project.code)\n for i in projectUser:\n print(i.user_id)\n profileUser = ProfileUser.objects.get(id = i.user_id)\n user = User.objects.get(id=profileUser.fk_profileUser_user_id)\n print(user)\n if str(user.groups.all()[0]) == \"Cliente\":\n deleteClient = ProjectUser.objects.get(user_id=profileUser)\n print(\"me van a eliminar \" +str(deleteClient.user_id))\n deleteClient.delete()\n newRelationClient.save()\n a = post_values['startDate'].split('-')\n startDate = a[2]+'-'+a[1]+'-'+a[0]\n print(startDate)\n project.startDate =startDate\n b = post_values['endDate'].split('-')\n endDate = b[2]+'-'+b[1]+'-'+b[0]\n project.endDate = endDate\n project.description = post_values['description']\n\n project.save()\n project = Project.objects.get(code = project.code)\n print(\"aquiiiiii antes de la fecha\")\n print(str(project.startDate).split('-'))\n startDate = str(project.startDate).split('-')\n endDate = str(project.endDate).split('-')\n print(''.join(startDate))\n\n # ****************** Team Work ***********************\n\n #UpdateProjectTW(project.idTeamWorkProject, project.name, ''.join(startDate), ''.join(endDate),\n # project.description)\n\n # *******************************************************\n\n messages.success(request, \"El proyecto ha sido modificado exitosamente\")\n return HttpResponseRedirect(reverse_lazy('project'))\n else:\n return render(request, 'page-new-project.html', {'form':form, 'pk':self.kwargs['pk']})\n\n'''\nClase que permite detallar un proyecto.\n'''\nclass Detail_Project(TemplateView):\n template_name = 'page-detail-project.html'\n form_class= statusForm\n\n def get_context_data(self, **kwargs):\n context = super(\n Detail_Project, self).get_context_data(**kwargs)\n project = Project.objects.get(code=self.kwargs['pk'])\n '''\n Documentos asociados a un proyecto.\n '''\n documents = Documents.objects.filter(fk_documents_project=project)\n users = User.objects.all()\n\n '''\n Tareas relacionadas a un proyecto.\n '''\n\n user_pk= self.request.user.id\n user = User.objects.get(pk=user_pk)\n profileUser = ProfileUser.objects.get(fk_profileUser_user_id = user_pk)\n if (user.has_perms(['project.add_project'])):\n task = Task.objects.filter(project=project)\n else:\n task= Task.objects.filter(users=profileUser.pk,project=project)\n dependencys = Dependency.objects.all()\n\n projectUser = ProjectUser.objects.filter(project_id=project.code)\n now = datetime.datetime.now()\n '''\n En caso de que no se tenga una frecha establecida, se muestra '---'\n '''\n if (project.endDate == None):\n project.endDate = \"----\"\n context['resta']= project.endDate\n else:\n '''\n Se resta el día de culminación del proyecto con el día actual, para saber cuante tiempo queda para entregar el proyecto.\n '''\n resta = project.endDate - now.date()\n context['resta'] = resta.days\n\n '''\n Esto se qeuría hacer con la finalidad de enviar un correo de recordatorio, al restar un día, sin embargo \n debe hacerse en otro lado\n '''\n # if resta.days == 1:\n # emailUser =[]\n # for i in projectUser:\n # emailUser.append(i.user.fk_profileUser_user.email)\n # print(\"correoooooosss\")\n # print(emailUser)\n # email_subject = 'IDBC Group - Entrega de ' + str(project.name)\n # message_template = 'emailEndProject.html'\n # c = {'project': project.name,\n # 'endDate':project.endDate\n # }\n #\n #\n # send_email(email_subject, message_template, c, emailUser)\n '''\n En caso de que el proyecto no cuente con una descripción, se mostrará 'Descripción no disponible'\n '''\n if (project.description == ''):\n project.description = 'Descripción no disponible'\n '''\n En caso de que el proyecto no cuente con las fechas establecidas, se mostrará 'No disponible'\n '''\n if project.startDate == None or project.endDate== None:\n project.startDate = 'No Disponible'\n project.endDate = 'No Disponible'\n\n '''\n Inicialmente, se colocará al cliente 'No disponible'. Si el el proyecto cuenta con un cliente, esta variable \n cambia con el nombre del mismo\n '''\n client ='No Disponible'\n for i in projectUser:\n profileUser = ProfileUser.objects.get(id = i.user_id)\n user = User.objects.get(id=profileUser.fk_profileUser_user_id)\n '''\n Si el usuario ocupa el rol de cliente, la variable client tendrá este nombre\n '''\n if str(user.groups.all()[0]) == \"Cliente\":\n client = user.get_full_name()\n\n '''\n Se guarda en un arreglo los estados del proyecto. Estos estados pueden verse por el responsable del proyecto.\n '''\n status_project= ['In Progress','Technical Review','Functional Review', 'Customer Acceptance','Done']\n '''\n Estos status se pueden ver por el resto de los usuarios.\n '''\n status = ['In Progress','Technical Review']\n\n\n context['tasks'] = task\n context['dep'] = dependencys\n context['project'] = project\n context['client'] = client\n context['projectUser'] = projectUser\n context['status_project'] = status_project\n context['status']=status\n context['documents']=documents\n context['users']=users\n\n return context\n\n'''\nFunción que permite crear el código del proyecto.\n@:param name: Nombre del proyecto.\n@:return las tres primeras letras del proyecto.\n'''\ndef codeProject(name):\n name = ''.join(name)\n return name[:3]\n\n'''\nFunción que permite validar el nombre de un proyecto.\n'''\ndef ValidateName(request):\n name = request.POST.get('name', None)\n data = {\n 'name_exists': Project.objects.filter(name=name).exists()\n }\n\n return JsonResponse(data)\n\n'''\nFunción que envía los datos por un formato Json a un JS para construir el diagrama de barras. \n'''\ndef BarProgress(request):\n user = request.user.id\n user_pk = User.objects.get(pk=user)\n\n '''\n Si el usuario cuenta con los permisos de agregar un proyecto, se le muestra en el diagrama todos los proyectos\n registrados en el sistema\n '''\n if (user_pk.has_perms(['project.add_project'])):\n proj = Project.objects.all()\n x = [p.name for p in proj]\n '''\n En caso contrario, solo se le muestran los proyectos a los cuales él esté vinculado.\n '''\n else:\n user = ProfileUser.objects.get(fk_profileUser_user=user_pk)\n proj = ProjectUser.objects.filter(user=user)\n x=[]\n for i in proj:\n x.append(Project.objects.get(code=i).name)\n '''\n Estimada se refiere a la cantidad de días que ha de durar un proyecto, esto a través de la duración de sus tareas.\n Real se refiere a la verdadera cantidad de días que se demoró el proyecto, esto en vista a la duración de las tareas.\n '''\n\n array = ([\n ['Proyecto', 'Estimada', 'Real']\n ])\n\n duration = []\n durationDone =[]\n for i in x:\n project = Project.objects.get(name=i)\n tasksCount = Task.objects.filter(project_id=project.code).count()\n '''\n En caso de que un proyecto no cuente con tareas asociadas, en el diagrama de barra se mostrará unicamente el \n nombre del proyecto\n '''\n if tasksCount == 0:\n days = 0\n real=0\n array.append([i,days,real])\n else:\n tasks = Task.objects.filter(project_id=project.code)\n for task in tasks:\n '''\n La duración de los dias estimados se calcula en base a la resta de la fecha final establecida en un \n principio menos la fecha inicial\n '''\n days = task.endDate - task.startDate\n '''\n Como se necesita tener la cantidad de días que dura cada tarea, se procede a guardar en un arreglo la \n duración de cad tarea, con la finalidad de sumarlos todos y poder tener un total de días establecidos.\n '''\n duration.append(days.days)\n days=sum(duration)\n duration = []\n\n '''\n Para calcular la cantidad de tareas que ya han sido terminadas, es decir, que su status se encuentre en\n estado 'Done', se procede a filtrar estas tareas con este status.\n '''\n taskDone = Task.objects.filter(project_id=project.code, status='Done')\n if taskDone:\n for t in taskDone:\n '''\n Como al cambiar el estado de una tarea en Done, se almacena la fecha de culminación del mismo, ésta\n es la mostrada en el diagrama de barras como 'Real'.\n '''\n daysDone = t.endDateReal - t.startDate\n durationDone.append(daysDone.days)\n daysDone = sum(durationDone)\n durationDone = []\n\n array.append([project.name,days,daysDone])\n else:\n array.append([project.name, days, 0])\n\n return JsonResponse(array, safe=False)\n\n'''\nFunción que permite mostrar los detalles de un proyecto, es decir, permite dirigir hacia la vista de 'Detail Project'\nPara esto, se envía toda la información necesaria mediante un Json a un JS.\n'''\ndef ShowDetails(request):\n nameProject = request.GET.get('nameProject', None)\n\n data = {'project': Project.objects.filter(name=nameProject).exists()}\n\n if data['project']:\n project = Project.objects.get(name=nameProject)\n projectUser = ProjectUser.objects.filter(project_id = project.code)\n data['client'] = 'No disponible'\n data['responsable'] = 'No disponible'\n for i in projectUser:\n profileUser = ProfileUser.objects.get(id=i.user_id)\n user = User.objects.get(id=profileUser.fk_profileUser_user_id)\n if i.isResponsable:\n data['responsable']=user.get_full_name()\n else:\n if str(user.groups.all()[0]) == \"Cliente\":\n data['client'] = user.get_full_name()\n\n data['name']= project.name\n data['start'] = project.startDate\n if data['start']== None:\n data['start'] = 'No disponible'\n else:\n data['start'] = project.startDate.strftime(\"%d-%m-%Y\")\n data['end'] = project.endDate\n if data['end'] == None:\n data['end'] = 'No disponible'\n else :\n data['end'] = project.endDate.strftime(\"%d-%m-%Y\")\n data['status'] = project.status\n if data['status'] == '':\n data['status'] = \"Sin status\"\n return JsonResponse(data)\n\n'''\nFunción que permite obtener el código de un proyecto, con la finalidad de redirigir a la página de 'Detail Project' de\ncada uno de los proyectos.\n'''\ndef getCode(request):\n nameProject= request.GET.get('nameProject',None)\n data ={'code' : Project.objects.get(name=nameProject).code}\n return JsonResponse(data)\n\n'''\nFunción que permite eliminar un proyecto.\n@:param code: Código del proyecto.\n'''\ndef DeleteProject(request,code):\n project = Project.objects.get(code=code)\n task = Task.objects.filter(project=project.code).count()\n '''\n Si el proyecto tiene al menos una tarea asociada el proyecto no se puede eliminar.\n '''\n if task > 0:\n messages.success(request, \"El proyecto \" + str(project.name) + \" tiene tareas asociadas. No se puede eliminar\")\n return HttpResponseRedirect(reverse_lazy('project'))\n else:\n #DeleteProjectTW(project.idTeamWorkProject)\n project.delete()\n messages.success(request, \"El proyecto \" + str(project.name) + \" se ha eliminado exitosamente\")\n return HttpResponseRedirect(reverse_lazy('project'))\n\n'''\nClase que permite la visualización de los documentos pertenecientes a un proyecto.\n'''\nclass DocumentsView(FormView):\n template_name = 'page-detail-project.html'\n form_class = DocumentsForm\n\n def post(self, request, *args, **kwargs):\n form = DocumentsForm(request.POST, request.FILES)\n if form.is_valid():\n project_pk = self.kwargs['pk']\n project = Project.objects.get(pk=project_pk)\n if (request.FILES == {}):\n pass\n else:\n '''\n Como al agregar un documento, se pueden agregar varios simultaneamente, se obtienen todos los documentos\n con sus descripciones ingresados por el usuario. Luego se almacenan en su respecctiva tabla.\n '''\n desc = request.POST.getlist('description')\n files =request.FILES.getlist('file')\n for i in zip(files, desc):\n doc = Documents(file=i[0],\n fk_documents_project= project,\n description=i[1])\n doc.save()\n '''\n Luego de guardarlo en la base de datos, se procede a almacenar el documento en el google Drive del\n correo asociado. 'path' se refiere a la ruta donde serán almacenados los docuemntos \n '''\n path= 'ProjectManagement/static/media/'+str(doc.file)\n upload_file(path)\n\n messages.success(request, \"El Documento ha sido guardado exitosamente\")\n return HttpResponseRedirect(reverse_lazy('detail_project',kwargs={\"pk\":self.kwargs['pk']}))\n else:\n messages.success(request, \"No se puede guardar el documento\")\n return HttpResponseRedirect(reverse_lazy('detail_project', kwargs={\"pk\": self.kwargs['pk']}))\n\n'''\nClase que permite agregar más usuarios a un proyecto. Estos usuarios deberán estar registrados previamente en el\nsistema\n'''\nclass MoreUsersView(FormView):\n template_name = 'page-detail-project.html'\n form_class = MoreUsersForm\n\n def post(self, request, *args, **kwargs):\n post_values = request.POST.copy()\n form = MoreUsersForm(post_values)\n if form.is_valid():\n project_pk = self.kwargs['pk']\n project = Project.objects.get(code=project_pk)\n\n '''\n Se obtiene la lista de todos los usuarios ingresados por el usuario.\n '''\n users = post_values.getlist('user')\n for user in users:\n user = User.objects.get(pk=user)\n userProfile = ProfileUser.objects.get(fk_profileUser_user=user)\n existUser = ProjectUser.objects.filter(user=userProfile, project=project).exists()\n\n if not existUser:\n projectUser = ProjectUser(isResponsable=False, project=project, user=userProfile)\n projectUser.save()\n messages.success(request, \"La/as persona/as se ha agregado al proyecto \"+str(project.name))\n return HttpResponseRedirect(reverse_lazy('detail_project', kwargs={\"pk\": self.kwargs['pk']}))\n else:\n project = Project.objects.get(code=self.kwargs['pk'])\n messages.success(request, \"Erro al registrar usuarios en el proyecto \" + str(project.name))\n return HttpResponseRedirect(reverse_lazy('detail_project', kwargs={\"pk\": self.kwargs['pk']}))\n\n'''\nFunción que permite mostrar la tabla de tareas asociadas a un proyecto. La información es enviada mediante Json\na un JS.\n'''\ndef ShowTable(request):\n nameProject = request.GET.get('nameProject', None)\n project=Project.objects.get(name=nameProject)\n data = {'project': Project.objects.filter(name=nameProject).exists()}\n # Usuario que inicia sesion\n user = request.user.id\n user_pk = User.objects.get(pk=user)\n profileUser = ProfileUser.objects.get(fk_profileUser_user=user_pk)\n\n '''\n Si el usuario tiene permisos de agregar un proyecto se muestran todos las tareas asociadas al proyecto\n '''\n\n if (user_pk.has_perms(['project.add_project'])):\n task = Task.objects.filter(project=project)\n else:\n '''\n De lo contrario, solo se muestran las tareas correspondientes al usuario \n '''\n task = Task.objects.filter(users=profileUser.pk, project=project)\n\n x = []\n j = 0\n for i in task:\n # usuario dueño de la tarea\n user=User.objects.get(pk=i.users.fk_profileUser_user_id)\n dependence = Dependency.objects.filter(task_id=i.code)\n d = [] # Arreglo de las tareas que dependen 'requieren de'.\n for dep in dependence:\n d.append(dep.dependence)\n d = (' ').join(d)\n required= Dependency.objects.filter(dependence=i.code)\n r = [] # Arreglos de las tareas que requieren de una tarea 'requerido por'\n for req in required:\n r.append(req.task_id)\n r = (' ').join(r)\n a = i.startDate\n start = str(a.day)+'-'+str(a.month)+'-'+str(a.year)\n b = i.endDate\n end = str(b.day)+'-'+str(b.month)+'-'+str(b.year)\n '''\n La información es enviada mediante arrelgos de arrelgos.\n '''\n y=[]\n y.append(i.code)\n y.append(i.name)\n y.append(user.first_name)\n y.append(user.last_name)\n y.append(start)\n y.append(end)\n y.append(r)\n y.append(d)\n y.append(i.status)\n x.append(y)\n j =j+1\n\n data['task']=x\n\n return JsonResponse(data)\n\n'''\nClase que permite el cambio de status de una tarea\n'''\nclass ChangeStatus(TemplateView):\n template_name = 'page-detail-project.html'\n form_class= statusForm\n\n def post(self, request, *args, **kwargs):\n post_values = request.POST.copy()\n form = statusForm(post_values)\n if form.is_valid():\n project_pk = self.kwargs['pk']\n code_task = self.kwargs['code']\n user_pk = self.request.user.id\n users = User.objects.get(pk=user_pk)\n project = Project.objects.get(pk=project_pk)\n task = Task.objects.get(code=code_task, project=project_pk)\n #Se calcula la fecha actual para saber cuando se cambia el status\n endDateReal = datetime.date.today()\n task.endDateReal = endDateReal\n # Si la fecha de inicio de la tarea es mayor que la actual no se debe cambiar el status\n if (task.startDate > task.endDateReal ):\n messages.success(request, \"La tarea \"+str(task.name)+ \" no ha sido iniciada. No puede cambiar su status\")\n return HttpResponseRedirect(reverse_lazy('detail_project', kwargs={\"pk\": self.kwargs['pk']}))\n old_status = task.status\n task.status = post_values['status']\n projectUser = ProjectUser.objects.filter(project_id = project)\n\n '''\n Se envía un correo electrónico informando a los usuarios interesados que se ha cambiado el status de una \n determinada tarea. Estos usuarios interesados son el responsable de la tarea y el responsable de IDBC del proyecto\n '''\n email_subject = 'IDBC Group - Cambio de Estado de tarea del proyecto ' + str(project.name)\n message_template = 'emailStatusTask.html'\n for i in projectUser:\n if i.isResponsable == True:\n name_responsable = i.user.fk_profileUser_user.first_name\n email_responsable = [i.user.fk_profileUser_user.email]\n c = {'usuario': name_responsable,\n 'name_task': task.name,\n 'project' : project.name,\n 'user':users.first_name +' '+users.last_name,\n 'old_status':old_status,\n 'new_status': task.status,\n 'host': request.META['HTTP_HOST']\n }\n send_email(email_subject, message_template, c, email_responsable)\n\n email_task = [task.users.fk_profileUser_user.email]\n name_responsable = task.users.fk_profileUser_user.first_name\n c = {'usuario': name_responsable,\n 'name_task': task.name,\n 'project': project.name,\n 'user': users.first_name + ' ' + users.last_name,\n 'old_status': old_status,\n 'new_status': task.status,\n 'host': request.META['HTTP_HOST']\n }\n send_email(email_subject, message_template, c, email_task)\n task.save()\n messages.success(request, \"El status de la tarea ha sido modificado exitosamente\")\n return HttpResponseRedirect(reverse_lazy('detail_project', kwargs={\"pk\": self.kwargs['pk']}))\n else:\n messages.success(request, \"Error al cambiar status de tarea\")\n return HttpResponseRedirect(reverse_lazy('detail_project', kwargs={\"pk\": self.kwargs['pk']}))\n\n'''\nFunción que permite activar el botón de cerrar proyecto, siempre y cuando todas sus tareas asociadas estén en estado\n'Done' \n'''\ndef ChangeButton(request):\n code = request.GET.get('code', None)\n all_task = Task.objects.filter(project=code).count()\n done_task = Task.objects.filter(project=code, status=\"Done\").count()\n\n data = {\n 'name_exists': Project.objects.filter(code=code).exists()\n }\n project = Project.objects.get(code=code)\n data['code'] = code\n data['all_task']= all_task\n data['done_task']=done_task\n data['status']=project.status\n\n return JsonResponse(data)\n\n'''\nFunción que permite cerrar un proyecto siempre y cuando el status del mismo sea 'Done'\n@:param pk: identificador del proyecto.\n'''\ndef CloseProject(request, pk):\n project = Project.objects.get(code=pk)\n project.status ='Done'\n project.save()\n messages.success(request, \"El Proyecto \"+str(project.name)+ \" se ha cerrado.\")\n return HttpResponseRedirect(reverse_lazy('detail_project', kwargs={\"pk\": project.code}))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"project/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":32851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"294478798","text":"from django import template\nfrom django.utils.safestring import mark_safe\nfrom django.conf import settings\nimport re\n\nregister = template.Library()\n\n@register.simple_tag\ndef add_units(value, stat):\n value = str(value)\n if stat == \"price_per_serving\":\n value = \"$\" + value\n elif stat == \"serving_size\":\n value = value + \"g\"\n elif stat == \"calories\":\n value = str(int(round(float(value), 0)))\n elif stat == \"fat\":\n value = value + \"g\"\n elif stat == \"cholesterol\":\n value = value + \"mg\"\n elif stat == \"sodium\":\n value = value + \"mg\"\n elif stat == \"carbohydrates\":\n value = value + \"g\"\n elif stat == \"sugar\":\n value = value + \"g\"\n elif stat == \"protein\":\n value = value + \"g\"\n elif stat == \"vitA\":\n value = value + \"%\"\n elif stat == \"vitC\":\n value = value + \"%\"\n elif stat == \"calcium\":\n value = value + \"%\"\n elif stat == \"iron\":\n value = value + \"%\"\n elif stat == \"fiber\":\n value = value + \"g\"\n elif stat == \"serving_size\":\n value = value + \"g\"\n elif stat == \"left_winning_percentage\":\n value = value + \"%\"\n elif stat == \"right_winning_percentage\":\n value = value + \"%\"\n return mark_safe(value)\n\n@register.filter(name='process_attribute')\ndef process_attribute(string):\n if \"vit\" in string:\n string = string.replace(\"vit\", \"vitamin \")\n if \"_\" in string:\n string = string.replace(\"_\", \"-\")\n return mark_safe(string)\n","sub_path":"tournament/game/templatetags/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"287294282","text":"import cv2\nimport numpy as np\nimport pytesseract\nfrom PIL import Image, ImageEnhance, ImageFilter\n#s=pytesseract.pytesseract.tesseract_cmd = 'C:\\\\Program Files (x86)\\\\Tesseract-OCR\\\\tesseract.exe'\n'''image=Image.open(\"img/im.png\") # the second one\nimg_gray = cv2.cvtColor(np.asarray(image), cv2.COLOR_BGR2GRAY) #there is a problem here\n\nth, bw = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\nedges = cv2.Canny(img_gray, th/2, th)\n\n\ncv2.imwrite(\"img/cropped.jpg\", edges)'''\n'''\nim = im.filter(ImageFilter.MedianFilter())\nenhancer = ImageEnhance.Contrast(im)\nim = enhancer.enhance(2)\nim = im.convert('1')\nim.save('resimler/temp2.jpg')\n'''\n\n#text = pytesseract.image_to_string(gray_image)\n#print(\"asd\",text)\n\n#Linux window/threading setup code.\ncv2.startWindowThread()\ncv2.namedWindow(\"Original\")\ncv2.namedWindow(\"Sharpen\")\n\n#Load source / input image as grayscale, also works on color images...\nimgIn = cv2.imread(\"img/im.png\", cv2.IMREAD_GRAYSCALE)\ncv2.imshow(\"Original\", imgIn)\n\n\n#Create the identity filter, but with the 1 shifted to the right!\nkernel = np.zeros( (9,9), np.float32)\nkernel[4,4] = 2.0 #Identity, times two!\n\n#Create a box filter:\nboxFilter = np.ones( (9,9), np.float32) / 81.0\n\n#Subtract the two:\nkernel = kernel - boxFilter\n\n\n#Note that we are subject to overflow and underflow here...but I believe that\n# filter2D clips top and bottom ranges on the output, plus you'd need a\n# very bright or very dark pixel surrounded by the opposite type.\n\ncustom = cv2.filter2D(imgIn, -1, kernel)\ncv2.imshow(\"Sharpen\", custom)\ncv2.imwrite(\"img/cropped.png\", custom)\n\ncv2.waitKey(0)","sub_path":"plaka_okuma/denemeeee.py","file_name":"denemeeee.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"451134882","text":"# coding: utf-8\nimport random\n\nimport requests\nfrom lxml import etree\nfrom fake_useragent import UserAgent\n\nfrom spider.encrypt import encrypt\nfrom config import PROXIES\n\nTIMEOUT = 6\n\n\ndef choice_proxy():\n if PROXIES:\n return random.choice(PROXIES + [''])\n return ''\n\ndef get_user_agent():\n ua = UserAgent()\n return ua.random\n\ndef fetch(url, retry=0):\n s = requests.Session()\n proxies = {\n 'http': choice_proxy()\n }\n s.headers.update({'user-agent': get_user_agent(),\n 'referer': 'http://music.163.com/'})\n try:\n return s.get(url, timeout=TIMEOUT, proxies=proxies)\n except Exception:\n if retry < 3:\n return fetch(url, retry=retry + 1)\n raise\n\n \ndef post(music_id, offset=0, limit=100):\n url = 'http://music.163.com/weapi/v1/resource/comments/R_SO_4_{}?csrf_token='.format(music_id)\n query = {\n 'rid': 'R_SO_4_{}'.format(music_id),\n 'offset': offset,\n 'total': 'true', # 第一页时为true,其他页为false\n 'limit': limit,\n 'csrf_token': ''\n }\n \n headers = {\n 'Connection': 'keep-alive',\n 'Host': 'music.163.com',\n 'Origin': 'http://music.163.com',\n 'Referer': 'http://music.163.com/',\n 'User-Agent': get_user_agent()\n }\n\n data = encrypt(query)\n \n return requests.post(url, headers=headers, data=data)\n\n\ndef get_tree(url):\n r = fetch(url)\n return etree.HTML(r.text)\n\n\n","sub_path":"wangyiyun/spider/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"551134616","text":"import FWCore.ParameterSet.Config as cms\n\nfrom PhysicsTools.MiBiCommonPAT.makeMiBiCommonPAT_cff import *\n\n\n\nprocess = cms.Process(\"MiBiCommonPAT\")\n\n\n\nmakeMiBiCommonPAT(process, GlobalTag=\"GR_R_39X_V6::All\", MC=False, Filter=True, SavePAT=True)\n\nprocess.source.fileNames = cms.untracked.vstring(\n 'file:/tmp/dimatteo/D8700674-35CB-DF11-8C60-0024E85A3ED8.root'\n #'file:/gwpool/users/amassiro/VBF/Releases/CMSSW_3_8_3/src/PhysicsTools/MiBiCommonPAT/669A4128-43D0-DF11-AE93-001A92810ACE.root' \n\n)\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False))\n\nprocess.out.fileName = cms.untracked.string(\"file:/tmp/dimatteo/MiBiCommonPAT.root\")\n\n\n","sub_path":"MiBiCommonPAT/test/makeMiBiCommonPAT_DATA_cfg.py","file_name":"makeMiBiCommonPAT_DATA_cfg.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"467017412","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nfrom collections import deque\n\nclass Solution:\n def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:\n if root is None:\n return []\n answer = []\n q = deque()\n \n q.append((root, 0))\n while q:\n node, level= q.popleft()\n if len(answer) <= level:\n answer.append([node.val])\n else:\n answer[level].append(node.val)\n if node.left is not None:\n q.append((node.left, level + 1))\n if node.right is not None:\n q.append((node.right, level + 1))\n for i in range(1, len(answer), 2):\n answer[i].reverse()\n \n return answer\n","sub_path":"leetcode/python/problem_103.py","file_name":"problem_103.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"376122707","text":"'''Модуль обработки файла с Н-ками.\n'''\n\n'''\nimport random\n# Щепотка цифр\nstr1 = '123456789'\n# Щепотка строчных букв\nstr2 = 'qwertyuiopasdfghjklzxcvbnm'\n# Щепотка прописных букв. Готовится преобразованием str2\nв верхний регистр.\nstr3 = str2.upper()\nprint(str3)\n# Выведет: 'QWERTYUIOPASDFGHJKLZXCVBNM'\n\n# Соединяем все строки в одну\nstr4 = str1+str2+str3\nprint(str4)\n# Выведет: '123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM'\n\n# Преобразуем получившуюся строку в список\nls = list(str4)\n# Тщательно перемешиваем список\nrandom.shuffle(ls)\n# Извлекаем из списка 12 произвольных значений\npsw = ''.join([random.choice(ls) for x in range(12)])\n# Пароль готов\nprint(psw)\n# Выведет: '1t9G4YPsQ5L7'\n'''\n\nimport random\nfrom datetime import datetime\nfrom vars import Year\n\n\nMonths = {9: Year, 10: Year, 11: Year, 12: Year, 1: Year + 1, 2: Year + 1, 3: Year + 1, 4: Year + 1, 5: Year + 1}\ndef get_presence(kid):\n '''Возвращает список дат, в которые ребенок отсутствовал'''\n dates = []\n for i in Months:\n for j in range (1, 29):\n if random.randint(1,100) > 97:\n dates.append(datetime(Months[i],i,j))\n return dates\n\nif __name__ == '__main__':\n print (get_presence(1))\n","sub_path":"presence.py","file_name":"presence.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"222795476","text":"# Index - Returns the index of a specified item in the list\n\nnums = [2, 6, 5, 2, 0, 4]\n\nnums.index(2)\n\n# Output\n0\n# 0 index is the position of \"2\" in the list nums\n# This will only return index of the first instance of a value\n\n\nnums.index(5, 1)\n# This wants the index position of the first 5 after the index of 1\n# Output\n2\n\nnums.index(0, 3, 4)\n# This wants the index of 0 between the index of 3 and 4 (inclusive) \n# Output\n4\n\n\n# Count - How many times is an item present in a list\n\nnums.count(2)\n# Output\n2\n\n\n","sub_path":"Lists/list_index_count.py","file_name":"list_index_count.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"85194434","text":"from django.core.paginator import Paginator\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render, get_object_or_404, redirect\n\nfrom movieBoard.forms import movieForm, reviewForm\nfrom movieBoard.models import movie, review\n\n\n# Create your views here.\n\ndef index(request):\n\n\n return render(request, 'movieBoard/index.html')\n\n\n\ndef list(request):\n movies = movie.objects.all()\n paginator = Paginator(movies, 5)\n\n page = request.GET.get('page')\n items = paginator.get_page(page)\n\n context = {\n 'movies': items\n }\n return render(request, 'movieBoard/list.html', context)\n\n\ndef create(request):\n if request.method == 'POST':\n form = movieForm(request.POST)\n if form.is_valid():\n new_item = form.save()\n return HttpResponseRedirect('/movieBoard/list/')\n form = movieForm()\n return render(request, 'movieBoard/create.html', {'form': form})\n\ndef update(request):\n if request.method == 'POST' and 'id' in request.POST:\n item = get_object_or_404(movie, pk=request.POST.get('id'))\n form = movieForm(request.POST, instance=item)\n if form.is_valid():\n item = form.save()\n elif 'id' in request.GET:\n item = get_object_or_404(movie, pk=request.GET.get('id'))\n form = movieForm(instance=item)\n return render(request, 'movieBoard/update.html', {'form': form})\n\n return HttpResponseRedirect('/movieBoard/list/')\n\n\ndef detail(request, id):\n if id is not None:\n item = get_object_or_404(movie, pk=id)\n reviews = review.objects.filter(movie=item).all()\n return render(request, 'movieBoard/detail.html', {'item': item, 'reviews': reviews})\n\n return HttpResponseRedirect('/movieBoard/list/')\n\n\ndef delete(request):\n if 'id' in request.GET:\n item = get_object_or_404(movie, pk=request.GET.get('id'))\n item.delete()\n\n return HttpResponseRedirect('/movieBoard/list/')\n\n\ndef review_create(request, movie_id):\n if request.method == 'POST':\n form = reviewForm(request.POST) #\n if form.is_valid():\n new_item = form.save()\n return redirect('movie-detail', id=movie_id)\n\n item = get_object_or_404(movie, pk=movie_id)\n form = reviewForm(initial={'movie': item})\n return render(request, 'movieBoard/review_create.html', {'form': form, 'item': item})\n\n\ndef review_delete(request, movie_id, review_id):\n item = get_object_or_404(review, pk=review_id)\n item.delete()\n\n return redirect('movie-detail', id=movie_id)\n\n\n","sub_path":"movieBoard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"370012323","text":"import boto3\nimport os\nimport json\n\nfrom resources import *\nfrom utils import *\n\nclass TemplateLoader(TemplateGenerator):\n __create_key = object()\n\n @classmethod\n def loads(cls, json_string):\n return TemplateLoader(cls.__create_key,\n json_string,\n CustomMembers=[S3, Git])\n\n @classmethod\n def init(cls, parameters):\n template = TemplateLoader.loads({\n 'Description': 'This template is the result of the merge.',\n 'Resources': {}\n })\n template.parameters = parameters\n template.set_version()\n return template\n\n def __init__(self, create_key, cf_template=None, **kwargs):\n assert(create_key == TemplateLoader.__create_key), \\\n \"TemplateLoader objects must be created using TemplateLoader.loads or TemplateLoader.init\"\n super(TemplateLoader, self).__init__(cf_template, **kwargs)\n\n def __iter__(self):\n fields = list(vars(self).keys())\n for props in fields:\n yield (props, getattr(self, props))\n\n def __getitem__(self, key):\n return getattr(self, key)\n\n def __setitem__(self, key, value):\n setattr(self, key, value)\n\n def __iadd__(self, other):\n for key, value in self:\n if key.startswith('_') or key in ['version', 'transform']:\n continue\n\n if key == 'description':\n self[key] = \"{} {}\".format(\n self[key] if self[key] else \"\", \n other[key] if other[key] else \"\"\n )\n else:\n self[key] = {**self[key], **other[key]}\n\n return self\n\n def is_template_contains_custom_resources(self):\n return any([\n res.is_macro()\n for res in self.resources.values()\n ])\n\n def _evaluate_custom_get(self, args):\n if type(args) == list:\n if Macro.macro_prefix in args[0]:\n args[0] = args[0].replace(Macro.macro_prefix, \"\")\n return [''.join(args[:-1]).replace(\":\", \"\"), args[-1]]\n\n if type(args) == str:\n return args.replace(Macro.macro_prefix, \"\").replace(\":\", \"\")\n\n return args\n\n def _evaluate_custom_ref(self, args):\n if type(args) == list:\n if Macro.macro_prefix in args[0]:\n args[0] = args[0].replace(Macro.macro_prefix, \"\")\n return ''.join(args).replace(\":\", \"\")\n if type(args) == str:\n if args.startswith(Macro.macro_prefix):\n return args.replace(Macro.macro_prefix, \"\").replace(\":\", \"\")\n\n return args\n\n def _evaluate_custom_expression(self, data):\n if type(data) == dict and \"Fn::GetAtt\" in data:\n evaluated_params = self._evaluate_custom_expression(data[\"Fn::GetAtt\"])\n data[\"Fn::GetAtt\"] = self._evaluate_custom_get(evaluated_params)\n return data\n\n if type(data) == dict and \"Ref\" in data:\n evaluated_params = self._evaluate_custom_expression(data[\"Ref\"])\n data[\"Ref\"] = self._evaluate_custom_ref(evaluated_params)\n return data\n\n if type(data) == list:\n return [self._evaluate_custom_expression(d) for d in data]\n\n if type(data) == dict:\n return {\n key: self._evaluate_custom_expression(data[key])\n for key in data\n }\n\n return data\n\n def evaluate_custom_expression(self):\n return self.loads(\n self._evaluate_custom_expression(\n self.to_dict()\n )\n )\n\n def _get_template_logical_ids(self):\n logical_ids = []\n for prop, value in self:\n if type(value) == dict:\n logical_ids += list(value.keys())\n return logical_ids\n\n def _translate_custom_reference(self, data):\n if data.startswith(Macro.macro_prefix):\n values = data.split(Macro.macro_separator)\n if values[1] != self.prefix and values[1] in self.logical_ids:\n values.insert(1, self.prefix)\n data = Macro.macro_separator.join(values)\n\n if data in self.logical_ids:\n data = self.prefix + data\n\n return data\n\n def _translate_template(self, data):\n if data in Macro.resources:\n return data\n\n if type(data) == dict:\n return dict(map(self._translate_template, data.items()))\n\n if type(data) == list:\n return list(map(self._translate_template, data))\n\n if type(data) == tuple:\n key, value = data\n if key == 'Ref':\n if value in self.logical_ids:\n value = self.prefix + value\n\n if key == \"Fn::GetAtt\":\n if value[0] in self.logical_ids:\n value[0] = self.prefix + value[0]\n\n if key == \"Export\" and 'Name' in value:\n value[\"Name\"] = {\n \"Fn::Join\": [\"-\", [self.prefix, value[\"Name\"]]]\n }\n\n return (key, self._translate_template(value))\n\n if type(data) == str:\n data = self._translate_custom_reference(data)\n\n return data\n\n def _translate_logical_ids(self, prefix):\n for prop, value in self:\n if type(value) == dict:\n self[prop] = {(prefix + lid):value[lid]for lid in value.keys()}\n\n def translate(self, prefix):\n self.prefix = prefix\n self.logical_ids = self._get_template_logical_ids()\n self._translate_logical_ids(prefix)\n\n json_string = self._translate_template(self.to_dict())\n return self.loads(json_string)\n\n def _set_deletion_policy(self, macro_resource):\n if hasattr(macro_resource, 'DeletionPolicy'):\n value = getattr(macro_resource, 'DeletionPolicy')\n for title in self.resources:\n setattr(self.resources[title], 'DeletionPolicy', value)\n\n def _set_update_replace_policy(self, macro_resource):\n if hasattr(macro_resource, 'UpdateReplacePolicy'):\n value = getattr(macro_resource, 'UpdateReplacePolicy')\n for title in self.resources:\n setattr(self.resources[title], 'UpdateReplacePolicy', value)\n\n def _set_depends_on(self, macro_resource):\n if hasattr(macro_resource, 'DependsOn'):\n for title in self.resources:\n value = list(getattr(macro_resource, 'DependsOn'))\n if hasattr(self.resources[title], 'DependsOn'):\n value += getattr(self.resources[title], 'DependsOn')\n setattr(self.resources[title], 'DependsOn', value)\n\n def _translate_depends_on(self, template_collection):\n for title in self.resources:\n current_resource = self.resources[title]\n\n if hasattr(current_resource, 'DependsOn'):\n depends_on = getattr(current_resource, 'DependsOn')\n\n depends_on_macro = [\n d for d in depends_on \n if Macro.macro_prefix in d\n ]\n\n depends_on_aws = [\n d for d in depends_on \n if Macro.macro_prefix not in d\n ]\n\n for depends_on_value in depends_on_macro:\n logical_id = Macro.to_logical_id(depends_on_value)\n\n if template_collection.contains(logical_id):\n macro_resource, external_template = template_collection.get(logical_id)\n for external_title in external_template.resources:\n external_resource = external_template.resources[external_title]\n if external_resource.is_macro():\n depends_on_aws += [Macro.macro_prefix + external_title]\n else:\n depends_on_aws += [external_title]\n else:\n local_resource = self.resources.get(logical_id)\n if not local_resource:\n print(logical_id)\n if local_resource.is_macro():\n depends_on_aws += [depends_on_value]\n else:\n depends_on_aws.append(logical_id)\n\n\n depends_on_aws = list(set(depends_on_aws))\n setattr(self.resources[title], 'DependsOn', depends_on_aws)\n\n def add_templates(self, template_collection):\n\n for logical_id, macro_resource, external_template in template_collection:\n\n external_template._set_deletion_policy(macro_resource)\n\n external_template._set_update_replace_policy(macro_resource)\n\n external_template._set_depends_on(macro_resource)\n\n self += external_template\n\n self._translate_depends_on(template_collection)\n\n\n\n","sub_path":"Template/source/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":8947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"279469901","text":"import numpy as np\n\ndir_list = [ './Data_20_4hyp/', './Data_50_4hyp/', './Data_100_4hyp/' ]\n# Iterate through dir_list using dir_name\n\nfor dir_name in dir_list:\n\n\tfire2 = np.load(str(dir_name + 'Outputs/res_fire2.npy'), mmap_mode=None, allow_pickle=True, fix_imports=True, encoding='ASCII').reshape(300,99)[0:100,:]\n\tfire5 = np.load(str(dir_name + 'Outputs/res_fire5.npy'), mmap_mode=None, allow_pickle=True, fix_imports=True, encoding='ASCII').reshape(300,99)[0:100,:]\n\tfire10 = np.load(str(dir_name + 'Outputs/res_fire10.npy'), mmap_mode=None, allow_pickle=True, fix_imports=True, encoding='ASCII').reshape(300,99)[0:100,:]\n\tfire20 = np.load(str(dir_name + 'Outputs/res_fire20.npy'), mmap_mode=None, allow_pickle=True, fix_imports=True, encoding='ASCII').reshape(300,99)[0:100,:]\n\tfire50 = np.load(str(dir_name + 'Outputs/res_fire50.npy'), mmap_mode=None, allow_pickle=True, fix_imports=True, encoding='ASCII').reshape(300,99)[0:100,:]\n\n\t# intervals - quartiles with open lower bound and closed upper bound\n\t# [0 - 0.25] -- 0.25\n\t# (0.25 - 0.5] -- 0.5\n\t# (0.5 - 0.75] -- 0.75\n\t# (0.75 - 1.0] -- 1.0\n\n\tprob_list_fire2 = []\n\tprob_list_fire5 = []\n\tprob_list_fire10 = []\n\tprob_list_fire20 = []\n\tprob_list_fire50 = []\n\n\tfor i in xrange(100):\n\n\t\t# classes: 1,2,3,4 COUNTS\n\t\tc1_fire2 = 1\n\t\tc2_fire2 = 1\n\t\tc3_fire2 = 1\n\t\tc4_fire2 = 1\n\t\ttotal_fire2 = 4\n\t\tprob_fire2 = []\n\n\t\t# classes: 1,2,3,4 COUNTS\n\t\tc1_fire5 = 1\n\t\tc2_fire5 = 1\n\t\tc3_fire5 = 1\n\t\tc4_fire5 = 1\n\t\ttotal_fire5 = 4\n\t\tprob_fire5 = []\n\n\t\t# classes: 1,2,3,4 COUNTS\n\t\tc1_fire10 = 1\n\t\tc2_fire10 = 1\n\t\tc3_fire10 = 1\n\t\tc4_fire10 = 1\n\t\ttotal_fire10 = 4\n\t\tprob_fire10 = []\n\n\t\t# classes: 1,2,3,4 COUNTS\n\t\tc1_fire20 = 1\n\t\tc2_fire20 = 1\n\t\tc3_fire20 = 1\n\t\tc4_fire20 = 1\n\t\ttotal_fire20 = 4\n\t\tprob_fire20 = []\n\n\t\t# classes: 1,2,3,4 COUNTS\n\t\tc1_fire50 = 1\n\t\tc2_fire50 = 1\n\t\tc3_fire50 = 1\n\t\tc4_fire50 = 1\n\t\ttotal_fire50 = 4\n\t\tprob_fire50 = []\n\n\n\t\tfor j in xrange(99):\n\n\t\t\tif fire2[i][j] <= 0.25:\n\t\t\t\tfire2[i][j] = 0.25\n\t\t\t\tc1_fire2 += 1\n\t\t\telif fire2[i][j] <= 0.5:\n\t\t\t\tfire2[i][j] = 0.5\n\t\t\t\tc2_fire2 += 1\n\t\t\telif fire2[i][j] <= 0.75:\n\t\t\t\tfire2[i][j] = 0.75\n\t\t\t\tc3_fire2 += 1\n\t\t\telif fire2[i][j] <= 1.0:\n\t\t\t\tfire2[i][j] = 1.0\n\t\t\t\tc4_fire2 += 1\n\t\t\ttotal_fire2 += 1\n\t\t\tprob_fire2.append(float(c1_fire2)/total_fire2)\n\t\t\tprob_fire2.append(float(c2_fire2)/total_fire2)\n\t\t\tprob_fire2.append(float(c3_fire2)/total_fire2)\n\t\t\tprob_fire2.append(float(c4_fire2)/total_fire2)\n\n\t\t\tif fire5[i][j] <= 0.25:\n\t\t\t\tfire5[i][j] = 0.25\n\t\t\t\tc1_fire5 += 1\n\t\t\telif fire5[i][j] <= 0.5:\n\t\t\t\tfire5[i][j] = 0.5\n\t\t\t\tc2_fire5 += 1\n\t\t\telif fire5[i][j] <= 0.75:\n\t\t\t\tfire5[i][j] = 0.75\n\t\t\t\tc3_fire5 += 1\n\t\t\telif fire5[i][j] <= 1.0:\n\t\t\t\tfire5[i][j] = 1.0\n\t\t\t\tc4_fire5 += 1\n\t\t\ttotal_fire5 += 1\n\t\t\tprob_fire5.append(float(c1_fire5)/total_fire5)\n\t\t\tprob_fire5.append(float(c2_fire5)/total_fire5)\n\t\t\tprob_fire5.append(float(c3_fire5)/total_fire5)\n\t\t\tprob_fire5.append(float(c4_fire5)/total_fire5)\n\n\t\t\tif fire10[i][j] <= 0.25:\n\t\t\t\tfire10[i][j] = 0.25\n\t\t\t\tc1_fire10 += 1\n\t\t\telif fire10[i][j] <= 0.5:\n\t\t\t\tfire10[i][j] = 0.5\n\t\t\t\tc2_fire10 += 1\n\t\t\telif fire10[i][j] <= 0.75:\n\t\t\t\tfire10[i][j] = 0.75\n\t\t\t\tc3_fire10 += 1\n\t\t\telif fire10[i][j] <= 1.0:\n\t\t\t\tfire10[i][j] = 1.0\n\t\t\t\tc4_fire10 += 1\n\t\t\ttotal_fire10 += 1\n\t\t\tprob_fire10.append(float(c1_fire10)/total_fire10)\n\t\t\tprob_fire10.append(float(c2_fire10)/total_fire10)\n\t\t\tprob_fire10.append(float(c3_fire10)/total_fire10)\n\t\t\tprob_fire10.append(float(c4_fire10)/total_fire10)\n\n\t\t\tif fire20[i][j] <= 0.25:\n\t\t\t\tfire20[i][j] = 0.25\n\t\t\t\tc1_fire20 += 1\n\t\t\telif fire20[i][j] <= 0.5:\n\t\t\t\tfire20[i][j] = 0.5\n\t\t\t\tc2_fire20 += 1\n\t\t\telif fire20[i][j] <= 0.75:\n\t\t\t\tfire20[i][j] = 0.75\n\t\t\t\tc3_fire20 += 1\n\t\t\telif fire20[i][j] <= 1.0:\n\t\t\t\tfire20[i][j] = 1.0\n\t\t\t\tc4_fire20 += 1\n\t\t\ttotal_fire20 += 1\n\t\t\tprob_fire20.append(float(c1_fire20)/total_fire20)\n\t\t\tprob_fire20.append(float(c2_fire20)/total_fire20)\n\t\t\tprob_fire20.append(float(c3_fire20)/total_fire20)\n\t\t\tprob_fire20.append(float(c4_fire20)/total_fire20)\n\n\t\t\tif fire50[i][j] <= 0.25:\n\t\t\t\tfire50[i][j] = 0.25\n\t\t\t\tc1_fire50 += 1\n\t\t\telif fire50[i][j] <= 0.5:\n\t\t\t\tfire50[i][j] = 0.5\n\t\t\t\tc2_fire50 += 1\n\t\t\telif fire50[i][j] <= 0.75:\n\t\t\t\tfire50[i][j] = 0.75\n\t\t\t\tc3_fire50 += 1\n\t\t\telif fire50[i][j] <= 1.0:\n\t\t\t\tfire50[i][j] = 1.0\n\t\t\t\tc4_fire50 += 1\n\t\t\ttotal_fire50 += 1\n\t\t\tprob_fire50.append(float(c1_fire50)/total_fire50)\n\t\t\tprob_fire50.append(float(c2_fire50)/total_fire50)\n\t\t\tprob_fire50.append(float(c3_fire50)/total_fire50)\n\t\t\tprob_fire50.append(float(c4_fire50)/total_fire50)\n\t\t\t\n\t\tprob_list_fire2.append(prob_fire2)\n\t\tprob_list_fire5.append(prob_fire5)\n\t\tprob_list_fire10.append(prob_fire10)\n\t\tprob_list_fire20.append(prob_fire20)\n\t\tprob_list_fire50.append(prob_fire50)\n\n\tprob_list_fire2 = np.array(prob_list_fire2)\n\tprob_list_fire5 = np.array(prob_list_fire5)\n\tprob_list_fire10 = np.array(prob_list_fire10)\n\tprob_list_fire20 = np.array(prob_list_fire20)\n\tprob_list_fire50 = np.array(prob_list_fire50)\n\n\tnp.savetxt(str(dir_name + \"Class_Output_Probs/res_fire2.csv\"), prob_list_fire2, delimiter=\",\")\n\tnp.savetxt(str(dir_name + \"Class_Output_Probs/res_fire5.csv\"), prob_list_fire5, delimiter=\",\")\n\tnp.savetxt(str(dir_name + \"Class_Output_Probs/res_fire10.csv\"), prob_list_fire10, delimiter=\",\")\n\tnp.savetxt(str(dir_name + \"Class_Output_Probs/res_fire20.csv\"), prob_list_fire20, delimiter=\",\")\n\tnp.savetxt(str(dir_name + \"Class_Output_Probs/res_fire50.csv\"), prob_list_fire50, delimiter=\",\")","sub_path":"LSTM_Random/output_probabilities_4hyp.py","file_name":"output_probabilities_4hyp.py","file_ext":"py","file_size_in_byte":5370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"592686765","text":"from django.shortcuts import render\nimport devapp.repository.category as repository\nimport devapp.models as models\n\n\nclass PageData():\n\n def __init__(self):\n self.categories = {\n 'device_category_list': models.Category.objects.all(), }\n\n self.selected_category_detail = {'selected_category': None}\n\n def get(self):\n data_: dict = {}\n data_.update(self.categories)\n data_.update(self.selected_category_detail)\n\n return data_\n\n\npage_data = PageData()\n\n\ndef home(request):\n return render(\n request,\n 'category.html',\n page_data.get()\n )\n\n\ndef detail(request, id):\n\n page_data.selected_category_detail['selected_category'] = \\\n repository.get_category_detail(id)\n\n return render(\n request,\n 'category.html',\n page_data.get()\n )\n","sub_path":"devapp/views/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"613448312","text":"\"\"\"\n685. Redundant Connection II\n\nIn this problem, a rooted tree is a directed graph such that, there is exactly one node (the root) for which all other nodes are descendants of this node, plus every node has exactly one parent, except for the root node which has no parents.\n\nThe given input is a directed graph that started as a rooted tree with N nodes (with distinct values 1, 2, ..., N), with one additional directed edge added. The added edge has two different vertices chosen from 1 to N, and was not an edge that already existed.\n\nThe resulting graph is given as a 2D-array of edges. Each element of edges is a pair [u, v] that represents a directed edge connecting nodes u and v, where u is a parent of child v.\n\nReturn an edge that can be removed so that the resulting graph is a rooted tree of N nodes. If there are multiple answers, return the answer that occurs last in the given 2D-array.\n\"\"\"\n\n\n# check if a tree is valid\n# tutorial: https://www.cnblogs.com/grandyang/p/8445733.html\n# Runtime: 64 ms, faster than 59.57% of Python3 online submissions for Redundant Connection II.\n# Memory Usage: 13.3 MB, less than 100.00% of Python3 online submissions for Redundant Connection II.\nclass Solution:\n def findRedundantDirectedConnection(self, edges: List[List[int]]) -> List[int]:\n # if met then gg\n # 30 min didn't get a solution\n # key word: every node has exactly one parent ---> this is a tree\n # so this problem is just to find a valid tree\n # a tree won't be valid under three conditions\n # 1. a node has a link to another node in another subtree\n # one node will have in_degree of two\n # 2. a node has a link to another node in it's parent\n # one node will have in_degree of two and there is a loop\n # 3. a node has a link to root node\n # there will be a loop but all the nodes will have in_degree of one\n \n # actually we just want to return the edge that makes a loop\n # or the second edge that makes a \"two-in-degree\" node\n # so we mark the second edge, and jump the second edge when doing union find \n # to form a loop\n parent = collections.defaultdict(int)\n first, second = None, None\n for i in range(len(edges)):\n node1, node2 = edges[i]\n if parent[node2] != 0:\n first = (parent[node2], node2)\n second = (node1, node2)\n edges[i][1] = 0\n continue\n parent[node2] = node1\n \n graph = [i for i in range(len(edges)+1)]\n for i in range(len(edges)):\n if edges[i][1] == 0:\n continue\n node1, node2 = edges[i]\n father1 = self.find(node1, graph)\n father2 = self.find(node2, graph)\n if father1 == father2:\n if first != None:\n return first\n return (node1, node2)\n graph[father2] = father1\n return second\n \n def find(self, node, graph):\n while node != graph[node]:\n node = graph[node]\n return node\n \n \n \n \n \n \n ","sub_path":"Widen/LC685_Redundant_Connection_II.py","file_name":"LC685_Redundant_Connection_II.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"572639210","text":"#!/usr/bin/python\n\nfrom sklearn import preprocessing\nimport pickle\n\nfrom DBHelper import DBHelper\nfrom helper_functions import *\n\n\ndef parse_mails(min_mails = 100):\n db = DBHelper()\n print(\"\\n### RETRIEVING DATA FROM DATABASE ###\")\n db.execute(\"SET group_concat_max_len = 18446744073709547520\")\n results = db.execute(\"\"\"\n select x.from, x.to, m.txt\n from\n (select z.mailId id, GROUP_CONCAT(y.paragraph separator ' ') txt\n from mail_paragraphs z\n join sha_paragraphs y\n on z.pid = y.id\n where z.deleted = 0\n group by z.mailId) m\n join\n (select a.mailId as id, a.from, a.to\n from from_to_mail a\n join (select aut.from f, aut.to t\n from from_to_mail aut\n where aut.from rlike \"^[A-Za-z0-9.-]+@enron.com$\"\n and aut.to rlike \"^[A-Za-z0-9.-]+@enron.com$\"\n and mailId in\n (select mailId from mail_paragraphs where deleted = 0)\n group by f, t\n having count(*) > {0}) b\n on a.from = b.f\n and a.to = b.t) x\n on m.id = x.id;\n \"\"\".format(min_mails))\n\n authors, recipients, emails, cnt, progress = [], [], [], 0, 0\n\n print(\"\\n### PARSING EMAILS ###\")\n filecount = db.rowcount\n for row in results:\n cnt += 1\n emails.append(stem(row))\n authors.append(row[0])\n recipients.append(row[1])\n\n tmp_progress = int(cnt*100 / filecount)\n if tmp_progress % 10 == 0 and progress != tmp_progress:\n progress = tmp_progress\n print(\"-- {} / {} emails parsed ({} %)\".format(cnt, filecount, progress))\n\n le = preprocessing.LabelEncoder()\n le.fit(authors+recipients)\n enc_authors = le.transform(authors)\n enc_recipients = le.transform(recipients)\n\n print(\"-- {} emails from/to {} addresses parsed\".format(cnt, len(le.classes_)))\n pickle.dump(emails, open(\"./data/word_data.pkl\", \"wb\"))\n pickle.dump(np.array(enc_authors), open(\"./data/authors.pkl\", \"wb\"))\n pickle.dump(np.array(enc_recipients), open(\"./data/recipients.pkl\", \"wb\"))\n pickle.dump(np.array(le.classes_), open(\"./data/classes.pkl\", \"wb\"))\n return emails, enc_authors, enc_recipients, le.classes_\n\nif __name__ == \"__main__\":\n parse_mails()\n","sub_path":"src/parse_emails.py","file_name":"parse_emails.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"17505751","text":" # -*- coding: utf-8 -*-\n\n\nfrom flask import Flask\napp = Flask(__name__)\napp.debug = True\n\nfrom flask import render_template\n\n@app.route('/4/')\n@app.route('/4/wkreport')\n\ndef four(name=None):\n\treturn render_template('4.html', name=name)\n\nif __name__=='__main__':\n\tapp.run()\n","sub_path":"DU_card/python/9_flask_html.py","file_name":"9_flask_html.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"455663618","text":"#! /usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame, Series\nimport time\nimport sys\nimport os\nimport math\nfrom LogProc import *\nsys.path.append(r'/Users/shenzhouyang/mycode/数据测试/code/LR/libraries')\nimport statsmodels.api as sm\nfrom WoEPreselection import var_check\nfrom WoEPreselection import top\nfrom WoE import OutputFileProc\nfrom sklearn import metrics\nfrom sklearn.linear_model import LogisticRegression\n\n\n\ndef modeling(dev_df, oot_df, preselected_df , forced_var_list, exclude_var_list, modeling_weight, target_var, model_var_lmt, max_coef, max_iter, corr_threshold, dr, seg, verbose = False):\n\t\n\tlog_file, lst_file = AppendLogs()\n\tfor x in log_file, lst_file:\n\t\tprint('\\n', file = x)\n\t\n\t\n\tpreselected_df['force_ind'] = preselected_df.apply(var_check, axis = 1, check_lst = forced_var_list)\n\tpreselected_df['exclude_ind'] = preselected_df.apply(var_check, axis = 1, check_lst = exclude_var_list)\n\t\n\tnorm_var_df = preselected_df[(preselected_df['force_ind'] == 0) & (preselected_df['exclude_ind'] == 0)]\n\tforced_var_df = preselected_df[preselected_df['force_ind'] == 1]\n\t\n\tmdl_iter_log = OutputFileProc(dr + '/model_iteration_log.txt')\n\t\n\tforced_var_lst = list(forced_var_df['woe'])\n\tcurrent_norm_var_lst = list(norm_var_df['woe'])\n\texclude_var_lst = []\n\tstop_signal = False\n\t\n\titer_step = 1\n\twhile not stop_signal and iter_step <= max_iter:\n\t\t\n\t\tprint('\\nIteration Step {0}...'.format(iter_step))\n\t\tstart_time = time.time()\n\t\t\n\t\tprint('\\nIteration Step: {0}'.format(iter_step), file = mdl_iter_log)\n\t\t\n\t\tcurrent_norm_var_lst = list(set(current_norm_var_lst) - set(exclude_var_lst))\n\t\t\n\t\t#Stepwise selection on DEV\n\t\tprint('\\nStepwise on DEV, {0} normal variables, {1} forced:'.format(len(current_norm_var_lst), len(forced_var_lst)), file = mdl_iter_log)\n\t\tstepwise_summary_df = stepwise(df_in = dev_df, target_var = target_var, norm_var_lst = current_norm_var_lst, forced_var_lst = forced_var_lst, unit_weight = modeling_weight, sle = 0.0001, sls = 0.0001, log_file = lst_file)\t\t\n\t\tprint('Stepwise of iteration {0} finished. {1} variables are selected.\\n'.format(iter_step, len(stepwise_summary_df) - 1), file = lst_file)\n\t\t#stepwise_summary_df.to_csv(dr + '/stepwise_summary_df_dev_step{0}.csv'.format(iter_step), index = False)\n\n\t\t#For debugging\n\t\t#stepwise_summary_df = pd.read_csv(dr + '/stepwise_summary_df_dev_step{0}.csv'.format(iter_step))\n\t\t\n\t\tstepwise_var_lst = list(stepwise_summary_df[(stepwise_summary_df['coef'].notnull()) & (stepwise_summary_df['Entered'] != 'Intercept')]['Entered'])\n\t\tprint('{0} variables after stepwise on DEV.'.format(len(stepwise_var_lst)), file = mdl_iter_log)\n\t\t\n\n\t\tprint('\\nFit on OOT({0} variables):'.format(len(stepwise_var_lst)), file = mdl_iter_log)\n\t\t\n\n\t\tif len(stepwise_var_lst) > 0:\n\t\t\tmdl, oot_pv_df = lr(oot_df, target_var, stepwise_var_lst, modeling_weight)\n\t\t\toot_pv_df.rename(columns = {'coef': 'coef_oot'}, inplace = True)\n\t\t\tstepwise_summary_df = stepwise_summary_df.merge(oot_pv_df, left_on = 'Entered', right_index = True, how = 'left')\n\t\t\t\n\t\t\n\n\t\t#Coef on Dev > max_coef or < 0, Coef on OOT < 0, Exclude\n\t\tfor idx in stepwise_summary_df.index:\n\t\t\tif stepwise_summary_df.loc[idx, 'Entered'] == 'Intercept':\n\t\t\t\tstepwise_summary_df.loc[idx, 'coef_exclude'] = -1\n\t\t\telif stepwise_summary_df.loc[idx, 'Step'] == 0:\n\t\t\t\tstepwise_summary_df.loc[idx, 'coef_exclude'] = 0\n\t\t\telif stepwise_summary_df.loc[idx, 'coef'] > max_coef or stepwise_summary_df.loc[idx, 'coef'] < 0:\n\t\t\t\tstepwise_summary_df.loc[idx, 'coef_exclude'] = 1\n\t\t\telse:\n\t\t\t\tstepwise_summary_df.loc[idx, 'coef_exclude'] = 0\n\t\t\t\n\t\t\t\n\t\t\tif stepwise_summary_df.loc[idx, 'Entered'] == 'Intercept':\n\t\t\t\tstepwise_summary_df.loc[idx, 'oot_coef_exclude'] = -1\n\t\t\telif stepwise_summary_df.loc[idx, 'Step'] == 0:\n\t\t\t\tstepwise_summary_df.loc[idx, 'oot_coef_exclude'] = 0\n\t\t\telif stepwise_summary_df.loc[idx, 'coef_oot'] < 0:\n\t\t\t\tstepwise_summary_df.loc[idx, 'oot_coef_exclude'] = 1\n\t\t\telse:\n\t\t\t\tstepwise_summary_df.loc[idx, 'oot_coef_exclude'] = 0\n\t\t\t\t\n\t\tdev_coef_exclude_df = stepwise_summary_df[stepwise_summary_df['coef_exclude'] == 1]\n\t\toot_coef_exclude_df = stepwise_summary_df[stepwise_summary_df['oot_coef_exclude'] == 1]\n\t\t\n\t\t\n\t\tfor i, v in enumerate([dev_coef_exclude_df, oot_coef_exclude_df]):\n\t\t\t\n\t\t\tif len(v) > 0:\n\t\t\t\tif i == 0:\n\t\t\t\t\tprint('{0} variables removed due to coef on DEV:'.format(len(dev_coef_exclude_df)), file = mdl_iter_log)\n\t\t\t\telse:\n\t\t\t\t\tprint('{0} variables removed due to coef on OOT:'.format(len(oot_coef_exclude_df)), file = mdl_iter_log)\n\t\t\t\t\n\t\t\t\tfor idx in v.index:\n\t\t\t\t\tprint('{0:5}\\t{1:36} {2:10.6} {3:10.6}'.format(v.loc[idx, 'Step'], v.loc[idx, 'Entered'], v.loc[idx, 'coef'], v.loc[idx, 'coef_oot']), file = mdl_iter_log)\n\t\t\n\t\tprint('{0} unique variables removed after coef constraints.'.format(len(stepwise_summary_df[(stepwise_summary_df['coef_exclude'] == 1) | (stepwise_summary_df['oot_coef_exclude'] == 1)])), file = mdl_iter_log)\n\t\t\n\t\t#Stepwise Removal, remove the latter one\n\t\tstepwise_exclude_df = stepwise_summary_df[stepwise_summary_df['Removed'].notnull()]\n\t\tprint('\\n{0} variables removed due to stepwise removal:'.format(len(stepwise_exclude_df)), file = mdl_iter_log)\n\t\tif len(stepwise_exclude_df) > 0:\n\t\t\tfor idx in stepwise_exclude_df.index:\n\t\t\t\tprint('{0:36}\\t{1:5}'.format(stepwise_exclude_df.loc[idx, 'Entered'], stepwise_exclude_df.loc[idx, 'Step']), file = mdl_iter_log)\n\t\t\n\t\tcoef_keep_df = stepwise_summary_df[(stepwise_summary_df['coef_exclude'] == 0) & (stepwise_summary_df['oot_coef_exclude'] == 0) & (stepwise_summary_df['coef'].notnull()) & (stepwise_summary_df['Removed'].isnull())]\n\t\t\n\t\t#Collinearity check\n\t\tprint('\\n{0} variables in collinearity check:'.format(len(coef_keep_df)), file = mdl_iter_log)\n\t\tcorr_var_lst = list(coef_keep_df['Entered']) \n\t\tcorr_df = dev_df[corr_var_lst].corr(method = 'pearson')\n\t\t\n\t\thigh_corr_dict = {}\n\t\thigh_corr_drop_lst = []\n\t\tfor idx in corr_df.index:\n\t\t\thigh_corr_dict[idx] = []\n\t\t\tfor col in corr_df.columns:\n\t\t\t\tif abs(corr_df.loc[idx, col]) > corr_threshold:\n\t\t\t\t\thigh_corr_dict[idx].append(col)\n\t\t\tif len(high_corr_dict[idx]) == 1:\n\t\t\t\tdel high_corr_dict[idx]\n\t\t\telse:\n\t\t\t\thigh_corr_dict[idx].sort(key = lambda t: int(coef_keep_df[coef_keep_df['Entered'] == t]['Step']))\n\t\t\t\tprint('{0}:'.format(idx), end = ' ', file = mdl_iter_log)\n\t\t\t\tfor i, item in enumerate(high_corr_dict[idx]):\n\t\t\t\t\tprint('{0}/{1}'.format(item, int(coef_keep_df[coef_keep_df['Entered'] == item]['Step'])), end = ', ', file = mdl_iter_log)\n\t\t\t\t\tif i > 0:\n\t\t\t\t\t\thigh_corr_drop_lst.append(item)\n\t\t\t\tprint('', file = mdl_iter_log)\n\t\thigh_corr_drop_lst = list(set(high_corr_drop_lst))\n\t\tstepwise_summary_df['high_corr_exclude'] = stepwise_summary_df['Entered'].apply(lambda t: 1 if t in high_corr_drop_lst else 0)\n\t\thigh_corr_exclude_df = stepwise_summary_df[stepwise_summary_df['high_corr_exclude'] == 1]\n\t\tprint('\\n{0} variables removed due to collinearity:'.format(len(high_corr_exclude_df)), file = mdl_iter_log)\n\t\tif len(high_corr_exclude_df) > 0:\n\t\t\tfor idx in high_corr_exclude_df.index:\n\t\t\t\tprint('{0:36}\\t{1:5}'.format(high_corr_exclude_df.loc[idx, 'Entered'], high_corr_exclude_df.loc[idx, 'Step']), file = mdl_iter_log)\n\t\t\n\t\t\n\t\t\n\t\t#Final result of the iteration:\t\n\t\tif verbose:\n\t\t\tstepwise_summary_df.to_csv(dr + '/Feature_selection_iter_{0}.csv'.format(iter_step), index = False)\n\t\texclude_var_lst = list(stepwise_summary_df[(stepwise_summary_df['coef_exclude'] == 1) | (stepwise_summary_df['oot_coef_exclude'] == 1) | (stepwise_summary_df['high_corr_exclude'] == 1) | (stepwise_summary_df['Removed'].notnull())]['Entered'])\n\t\tprint('\\nTotal {0} unique variables removed in iteration {1}:'.format(len(exclude_var_lst), iter_step), file = mdl_iter_log)\n\t\tif len(exclude_var_lst) > 0:\n\t\t\tfor var in exclude_var_lst:\n\t\t\t\tprint(var, file = mdl_iter_log)\n\t\telse:\n\t\t\tprint('\\nNo variables removed in iteration {0}. Iteration terminated.'.format(iter_step), file = mdl_iter_log)\n\t\t\tstop_signal = True\n\t\t\n\t\titer_step += 1\n\t\tif iter_step > max_iter:\n\t\t\tprint('\\nMax iteration reached. Iteration terminated.', file = mdl_iter_log)\n\t\t\n\t\tprint('Time Cost: %.2fs'%(time.time() - start_time))\n\n\tmdl_iter_log.close()\n\t\n\t#Output the final scorecard\n\t#For Debugging\n\t#stepwise_summary_df = pd.read_csv(dr + '/Feature_selection_iter_14.csv')\n\t\n\tvar_lst = list(stepwise_summary_df[stepwise_summary_df['Entered'] != 'Intercept']['Entered'][:model_var_lmt])\n\tstepwise_summary_df[stepwise_summary_df['Entered'] != 'Intercept'].to_csv(dr + '/final_model_candidate_vars.csv', index = False)\n\tfinal_scorecard_df = final_model(dev_df, oot_df, var_lst, target_var, modeling_weight, dr, lst_file)\n\tfinal_scorecard_df = final_scorecard_df.merge(stepwise_summary_df[['Step', 'Entered']], left_index = True, right_on = 'Entered', how = 'left')\n\t\n\tupdate_psi_df = update_psi(dev_df, oot_df, modeling_weight, target_var, final_scorecard_df)\n\t\n\tpreselected_df = preselected_df.merge(update_psi_df, left_on = 'woe', right_index = True, how = 'left')\n\tfinal_scorecard_df = final_scorecard_df.merge(preselected_df, left_on = 'Entered', right_on = 'woe', how = 'left')\n\tfinal_scorecard_df.rename(columns = {'Entered': 'variable'}, inplace = True)\n\n\tfinal_scorecard_df = final_scorecard_df[['Step', 'variable', 'varname', 'woe', 'label', 'type', 'KS_dev', 'IV_dev', 'KS_oot', 'IV_oot', 'CORR_dev', 'CORR_oot','PSI_pTot', 'PSI_BR','force_ind', 'exclude_ind', 'coef']]\n\tfinal_scorecard_df.to_csv(dr + '/model_var_info_s{0}.csv'.format(seg), index = False)\n\t\n\tcorr_df = dev_df[var_lst].corr(method = 'pearson')\n\tcorr_df.to_csv(dr + '/final_corr_matrix_s{0}.csv'.format(seg))\n\ndef update_psi(dev_df, oot_df, modeling_weight, target_var, final_scorecard_df):\n\t\n\teps = 1.0e-38\n\tmdl_var_lst = list(final_scorecard_df[final_scorecard_df['Step'] >= 0]['Entered'])\n\tpsi_dict = {}\n\tfor var in mdl_var_lst:\n\t\tstats_dev = cal_stats(dev_df, var, modeling_weight, target_var)\n\t\tstats_oot = cal_stats(oot_df, var, modeling_weight, target_var)\t\t\n\t\tstats_dev = stats_dev.merge(stats_oot[[var, 'ptot', 'br']], on = var, how = 'left')\n\t\t\n\t\tstats_dev['PSI_pTot'] = (stats_dev['ptot_x'] - stats_dev['ptot_y']) * ((stats_dev['ptot_x'] + eps)/(stats_dev['ptot_y'] + eps)).apply(math.log)\n\t\tstats_dev['PSI_BR'] = (stats_dev['br_x'] - stats_dev['br_y']) * ((stats_dev['br_x'] + eps)/(stats_dev['br_y'] + eps)).apply(math.log)\n\t\t\n\t\tpsi_dict[var] = [stats_dev['PSI_pTot'].sum(), stats_dev['PSI_BR'].sum()]\n\n\tpsi_df = pd.DataFrame(psi_dict).T\n\tpsi_df.columns = ['PSI_pTot', 'PSI_BR']\n\treturn psi_df\n\ndef cal_stats(df_in, var, modeling_weight, target_var):\n\tdf_s = df_in.groupby(by = var, as_index = False)[[modeling_weight, target_var]].sum()\n\tdf_s['br'] = 1.0 * df_s[target_var]/df_s[modeling_weight]\n\tdf_s['ptot'] = df_s[modeling_weight]/df_s[modeling_weight].sum()\n\treturn df_s\n\ndef lr(df_in, target_var, variable_lst, weight_var):\n\tmdl = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial').fit(df_in[variable_lst], df_in[target_var], df_in[weight_var])\n\tcoef_df = pd.DataFrame(mdl.coef_.tolist()[0],columns = ['coef'])\n\tcoef_df.index = range(1,len(coef_df) + 1)\n\tcoef_df.loc[0] = mdl.intercept_.tolist()[0] \n\tcoef_df.sort_index(inplace = True)\n\tcoef_df.index = ['Intercept'] + variable_lst\n\treturn mdl, coef_df\n\t\n\ndef final_model(dev_df, oot_df, var_lst, target_var, modeling_weight, tgt, log_file):\n\t\n\tmdl, coef_df = lr(dev_df, target_var, var_lst, modeling_weight)\n\n\tprint('\\nSummary of the final model:', file = log_file)\n\tprint(coef_df, file = log_file)\n\t\n\t#KS, AUC\n\tfor i, df in enumerate([dev_df, oot_df]):\n\t\tif i == 0:\n\t\t\tprint('Performance on DEV:', file = log_file)\n\t\telse:\n\t\t\tprint('Performance on OOT:', file = log_file)\n\t\t\n\t\tdf['prob_score'] = list(pd.DataFrame(mdl.predict_proba(df[var_lst]))[1])\n\t\tks, auc = KS_AUC(df['prob_score'], df[target_var], df[modeling_weight])\n\t\tprint('KS: {:.6}'.format(ks), file = log_file)\n\t\tprint('AUC: {:.6}'.format(auc), file = log_file)\n\t\n\tpd.to_pickle(mdl, tgt + '/lr_model_obj.pickle')\n\t\n\t#Score alignment\n\talign(dev_df[target_var], dev_df[modeling_weight], dev_df['prob_score'], tgt + '/prob_score_alignment.py', base_point = 600, scale = 60, base_odds = 35)\n\t\n\treturn coef_df\n\ndef KS_AUC(y_score, y_true, x_weight):\n\tfpr,tpr,thresholds = metrics.roc_curve(y_true = y_true, y_score = y_score, sample_weight = x_weight)\n\tks_s = tpr - fpr\n\tks = ks_s.max()\n\tauc = metrics.roc_auc_score(y_true = y_true, y_score = y_score, sample_weight = x_weight)\n\treturn ks, auc\n\t\n\n\t\ndef align(target_vector, weight_vector, score_vector, scorecard_py, base_point = 600, scale = 60, base_odds = 35):\n\t\n\tscore_vector = (-1) * (1/score_vector - 1).apply(np.log)\n\tX = np.array(score_vector)\n\tclf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial').fit(X.reshape(-1, 1), target_vector, weight_vector)\n\t\n\tm = np.log(2)/(1.0*scale)\n\tb = np.log(base_odds) - m*base_point\n\tm1 = float(clf.coef_)/m\n\tm0 = (float(clf.intercept_) - b)/m\n\toutf = OutputFileProc(scorecard_py)\n\tprint(\"import numpy as np\\n\", file = outf)\n\tprint(\"def score_align(prob_scr):\\n\", file = outf)\n\tprint(\"\treturn ({0:.4}) + ({1:.4}) * np.log(1.0/prob_scr - 1)\\n\".format(m0, m1), file = outf)\n\toutf.close()\n\t\t\ndef stepwise(df_in, target_var, norm_var_lst, forced_var_lst, unit_weight, sle, sls, log_file):\n\t\n\tincluded = []\n\tstepwise_summary_df = pd.DataFrame([], columns = ['Step', 'Entered', 'SLE', 'Removed', 'SLS'])\n\tstep = 0\n\t\n\tfor i, var in enumerate(forced_var_lst):\n\t\tstepwise_summary_df.loc[i] = [step, var, np.nan, np.nan, np.nan]\n\t\n\twhile True:\n\t\tchanged = False\n\t\t#Forward\n\t\tcurrent_candidate_lst = list(set(norm_var_lst) - set(included))\n\t\tnew_pval_series = pd.Series(index = current_candidate_lst)\n\t\tfor new_var in current_candidate_lst:\n\t\t\tX = df_in[included + [new_var] + forced_var_lst]\n\t\t\tX.insert(0, 'Intercept', 1)\n\t\t\tmodel = sm.OLS(df_in[target_var], X).fit()\n\t\t\tnew_pval_series[new_var] = model.pvalues[new_var]\n\t\tbest_pval = new_pval_series.min()\n\t\tif best_pval <= sle:\n\t\t\tbest_feature = new_pval_series.index[new_pval_series.argmin()]\n\t\t\tincluded.append(best_feature)\n\t\t\tchanged = True\n\t\t\tprint('Add {:30} with p-value {:.6}'.format(best_feature, best_pval), file = log_file)\n\t\t\tstep += 1\n\t\t\tstepwise_summary_df.loc[step + len(forced_var_lst) - 1] = [step, best_feature, best_pval, np.nan, np.nan]\n\n\t\t\n\t\t#Backward\n\t\tX = df_in[included + forced_var_lst]\n\t\tX.insert(0, 'Intercept', 1)\n\t\tmodel = sm.OLS(df_in[target_var], X).fit()\n\t\t# use all coefs except intercept\n\t\tpvalues_series = model.pvalues.iloc[1:]\n\t\t#Don't remove forced variables due to p-values\n\t\tpvalues_series = pvalues_series[~pvalues_series.index.isin(forced_var_lst)]\n\t\tworst_pval = pvalues_series.max()\n\t\tif worst_pval > sls:\n\t\t\tworst_feature = pvalues_series.index[pvalues_series.argmax()]\n\t\t\tincluded.remove(worst_feature)\n\t\t\tchanged = True\n\t\t\tprint('Drop {:30} with p-value {:.6}'.format(worst_feature, worst_pval), file = log_file)\n\t\t\tstepwise_summary_df.loc[step + len(forced_var_lst) - 1, 'Removed'] = worst_feature\n\t\t\tstepwise_summary_df.loc[step + len(forced_var_lst) - 1, 'SLS'] = worst_pval\n\t\t\tif best_feature == worst_feature:\n\t\t\t\tprint('The latest added feature is removed. Stepwise is terminated.', file = log_file)\n\t\t\t\tbreak\n\t\tif not changed:\n\t\t\tbreak\n\t\n\t#Finally fit the model\n\tincluded = set(stepwise_summary_df[stepwise_summary_df['Entered'] != 'Intercept']['Entered'])\n\tremoved = set(stepwise_summary_df[stepwise_summary_df['Removed'].notnull()]['Removed'])\n\t\n\tfinal_var_lst = list(included - removed)\n\t\n\tmdl, coef_df = lr(df_in, target_var, final_var_lst, unit_weight)\n\t\n\t\n\tstepwise_summary_df = coef_df.merge(stepwise_summary_df, left_index = True, right_on = 'Entered', how = 'outer')\n\tstepwise_summary_df['Step'] = stepwise_summary_df['Step'].fillna(-1)\n\tstepwise_summary_df.sort_values(by = 'Step', ascending = True, inplace = True)\n\tstepwise_summary_df.index = range(len(stepwise_summary_df))\n\tstepwise_summary_df = stepwise_summary_df[['Step', 'Entered', 'SLE', 'Removed', 'SLS', 'coef']] \n\t\n\t\n\treturn stepwise_summary_df\n\n\t\n\t\t\n","sub_path":"LR/libraries/modeling.py","file_name":"modeling.py","file_ext":"py","file_size_in_byte":15987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"499244541","text":"def scrape():\n file_name = str(raw_input(\"file> \"))\n keyword = str(raw_input(\"keyword> \"))\n f = open(file_name, \"r\")\n lines = f.readlines()\n new_lines = []\n f.close()\n for x in lines:\n print(x)\n if keyword in x:\n new_lines.append(x)\n print(\"\\tmatch\")\n f = open(file_name, \"w\")\n for x in new_lines:\n f.write(x + \"\\n\")\n f.close()\n print(\"File now contains ONLY lines containing the specified keyword.\")\n","sub_path":"WebHead/filescraper.py","file_name":"filescraper.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"138796206","text":"####################\n# ES-DOC CIM Questionnaire\n# Copyright (c) 2016 ES-DOC. All rights reserved.\n#\n# University of Colorado, Boulder\n# http://cires.colorado.edu/\n#\n# This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].\n####################\n\n__author__ = 'allyn.treshansky'\n\nfrom django.db import models\nfrom django.db.models.fields import FieldDoesNotExist\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom uuid import uuid4, UUID as generate_uuid\nfrom collections import OrderedDict\n\nfrom Q.questionnaire import APP_LABEL, q_logger\nfrom Q.questionnaire.q_fields import QPropertyTypes, QAtomicPropertyTypes, allow_unsaved_fk, QUnsavedRelatedManager\nfrom Q.questionnaire.q_utils import EnumeratedType, EnumeratedTypeList, BAD_CHARS_LIST, validate_no_bad_chars, validate_no_bad_suggestion_chars, validate_no_spaces, validate_no_reserved_words, validate_no_profanities, pretty_string, find_in_sequence, serialize_model_to_dict\nfrom Q.questionnaire.q_constants import *\n\n#############\n# constants #\n#############\n\n# these fields are all handled behind-the-scenes\n# there is no point passing them around to serializers or forms\nQCUSTOMIZATION_NON_EDITABLE_FIELDS = [\"guid\", \"created\", \"modified\", ]\n\n###############\n# global vars #\n###############\n\nclass CustomizationType(EnumeratedType):\n\n def __unicode__(self):\n return u\"%s\" % (self.get_name())\n\nCustomizationTypes = EnumeratedTypeList([\n CustomizationType(\"MODEL\", \"Model Customization\"),\n CustomizationType(\"CATEGORY\", \"Category Customization\"),\n CustomizationType(\"PROPERTY\", \"Property Customization\"),\n])\n\n##############\n# global fns #\n##############\n\ndef get_new_customizations(project=None, ontology=None, model_proxy=None, **kwargs):\n\n key = kwargs.pop(\"key\")\n customizations = kwargs.pop(\"customizations\", {})\n\n # TODO: CHANGE THIS TO USE GUIDS INSTEAD OF NAMES FOR KEYS\n # TODO: TRY TO REWRITE THIS TO USE \"prefix\" AGAIN (INSTEAD OF EXPLICIT \"key\")\n\n model_proxy_key = key\n if model_proxy_key not in customizations:\n model_customization = QModelCustomization(\n project=project,\n ontology=ontology,\n proxy=model_proxy,\n )\n model_customization.reset()\n customizations[model_proxy_key] = model_customization\n else:\n model_customization = customizations[model_proxy_key]\n\n category_customizations = []\n for catgegory_proxy in ontology.categorization.category_proxies.all():\n category_proxy_key = \"{0}.{1}\".format(model_proxy_key, catgegory_proxy.name)\n with allow_unsaved_fk(QCategoryCustomization, [\"model_customization\"]):\n if category_proxy_key not in customizations:\n category_customization = QCategoryCustomization(\n proxy=catgegory_proxy,\n model_customization=model_customization,\n )\n category_customization.reset()\n customizations[category_proxy_key] = category_customization\n else:\n category_customization = customizations[category_proxy_key]\n category_customizations.append(category_customization)\n # assert category_customizations[-1].proxy == ontology.categorization.get_uncategorized_category_proxy()\n model_customization.category_customizations(manager=\"allow_unsaved_category_customizations_manager\").add_potentially_unsaved(*category_customizations)\n\n property_customizations = []\n for property_proxy in model_proxy.property_proxies.all():\n property_proxy_key = \"{0}.{1}\".format(model_proxy_key, property_proxy.name)\n with allow_unsaved_fk(QPropertyCustomization, [\"model_customization\", \"category\"]):\n # close this context manager before using the custom related manager\n # (too much hackery at once)\n if property_proxy_key not in customizations:\n category_customization = find_in_sequence(\n lambda c: c.proxy.has_property(property_proxy),\n category_customizations\n )\n property_customization = QPropertyCustomization(\n proxy=property_proxy,\n model_customization=model_customization,\n category=category_customization,\n )\n property_customization.reset()\n category_customization.property_customizations(manager=\"allow_unsaved_categories_manager\").add_potentially_unsaved(property_customization)\n customizations[property_proxy_key] = property_customization\n else:\n property_customization = customizations[property_proxy_key]\n property_customizations.append(property_customization)\n\n ############################\n # here begins the icky bit #\n ############################\n\n if property_customization.use_subforms():\n subform_key = \"{0}.{1}\".format(model_proxy.name, property_proxy.name) # this property in this model (only 1 level deep)\n target_model_customizations = []\n for target_model_proxy in property_proxy.relationship_target_models.all():\n target_model_proxy_key = \"{0}.{1}\".format(subform_key, target_model_proxy.name)\n if target_model_proxy_key not in customizations:\n target_model_customization = get_new_customizations(\n project=project,\n ontology=ontology,\n model_proxy=target_model_proxy,\n key=target_model_proxy_key,\n customizations=customizations,\n )\n else:\n target_model_customization = customizations[target_model_proxy_key]\n\n target_model_customizations.append(target_model_customization)\n property_customization.relationship_target_model_customizations(manager=\"allow_unsaved_relationship_target_model_customizations_manager\").add_potentially_unsaved(*target_model_customizations)\n\n ##########################\n # here ends the icky bit #\n ##########################\n\n model_customization.property_customizations(manager=\"allow_unsaved_property_customizations_manager\").add_potentially_unsaved(*property_customizations)\n\n return customizations[model_proxy_key]\n\n\ndef get_existing_customizations(project=None, ontology=None, model_proxy=None, customization_name=None, customization_id=None):\n \"\"\"\n can get an existing customization either via id or name\n :param project:\n :param ontology:\n :param model_proxy:\n :param customization_name:\n :param customization_id:\n :return:\n \"\"\"\n\n # this fn will throw a \"QModelCustomization.DoesNotExist\" error if the name is wrong;\n # it is up to the calling method to catch that and do something sensible\n\n if not customization_id:\n model_customization = QModelCustomization.objects.get(\n ontology=ontology,\n proxy=model_proxy,\n project=project,\n name__iexact=customization_name,\n )\n else:\n model_customization = QModelCustomization.objects.get(pk=customization_id)\n assert model_customization.ontology == ontology\n assert model_customization.proxy == model_proxy\n assert model_customization.project == project\n if customization_name:\n assert model_customization.name.lower() == customization_name.lower()\n\n return model_customization\n\ndef serialize_new_customizations(current_model_customization, **kwargs):\n \"\"\"\n need a special fn to cope w/ this\n b/c getting DRF to work w/ potentially infinite recursion is impossible\n it is likely that these customizations will need to be serialized before they have been saved\n\n therefore the m2m fields will not yet exist in the db\n the workflow goes:\n * get_new_customizations where calls to create are wrapped in \"allow_unsaved_fk\" & custom \"QUnsavedRelatedManager\" are used\n * those customizations get cached in the current session\n * AJAX calls to the RESTful API access those cached customizations\n * which needs to be serialized via this fn and then passed as data to QModelCustomizationSerializer\n :param customizations\n :return: OrderedDict\n \"\"\"\n previously_serialized_customizations = kwargs.pop(\"previously_serialized_customizations\", {})\n prefix = kwargs.pop(\"prefix\", None)\n\n # get model customization stuff...\n model_customization_key = current_model_customization.get_fully_qualified_key(prefix=prefix)\n if model_customization_key not in previously_serialized_customizations:\n model_customization_serialization = serialize_model_to_dict(\n current_model_customization,\n include={\n \"key\": current_model_customization.get_key(),\n \"proxy_name\": str(current_model_customization.proxy),\n \"display_detail\": False,\n },\n exclude=QCUSTOMIZATION_NON_EDITABLE_FIELDS + [\"synchronization\", ]\n )\n previously_serialized_customizations[model_customization_key] = model_customization_serialization\n else:\n model_customization_serialization = previously_serialized_customizations[model_customization_key]\n\n # and the categories stuff...\n category_customization_serializations = []\n for category_customization in current_model_customization.category_customizations(manager=\"allow_unsaved_category_customizations_manager\").all():\n category_customization_key = category_customization.get_fully_qualified_key(prefix=prefix)\n if category_customization_key not in previously_serialized_customizations:\n category_customization_serialization = serialize_model_to_dict(\n category_customization,\n include={\n \"key\": category_customization.get_key(),\n \"num_properties\": category_customization.property_customizations(manager=\"allow_unsaved_categories_manager\").count(),\n \"proxy_name\": str(category_customization.proxy),\n \"display_properties\": True,\n \"display_detail\": False,\n },\n exclude=QCUSTOMIZATION_NON_EDITABLE_FIELDS\n )\n previously_serialized_customizations[category_customization_key] = category_customization_serialization\n else:\n category_customization_serialization = previously_serialized_customizations[category_customization_key]\n category_customization_serializations.append(category_customization_serialization)\n\n # and the properties stuff...\n property_customization_serializations = []\n for property_customization in current_model_customization.property_customizations(manager=\"allow_unsaved_property_customizations_manager\").all():\n property_customization_key = property_customization.get_fully_qualified_key(prefix=prefix)\n if property_customization_key not in previously_serialized_customizations:\n use_subforms = property_customization.use_subforms()\n category_customization = property_customization.category\n property_customization_serialization = serialize_model_to_dict(\n property_customization,\n include={\n \"key\": property_customization.get_key(),\n \"category_key\": category_customization.get_key(),\n \"proxy_name\": str(property_customization.proxy),\n \"display_detail\": False,\n # \"enumeration_choices\": standard_property_customization.get_enumeration_choices_value(),\n \"use_subforms\": use_subforms,\n },\n exclude=QCUSTOMIZATION_NON_EDITABLE_FIELDS\n )\n\n ############################\n # here begins the icky bit #\n ############################\n\n subform_customizations_serializations = []\n if use_subforms:\n subform_prefix = property_customization.get_fully_qualified_key() # note I do _not_ pass the prefix kwarg\n for subform_model_customization in property_customization.relationship_target_model_customizations(manager=\"allow_unsaved_relationship_target_model_customizations_manager\").all():\n subform_model_customization_key = subform_model_customization.get_fully_qualified_key(prefix=subform_prefix)\n if subform_model_customization_key not in previously_serialized_customizations:\n subform_customizations_serialization = serialize_new_customizations(\n subform_model_customization,\n previously_serialized_customizations=previously_serialized_customizations,\n prefix=subform_prefix,\n )\n previously_serialized_customizations[subform_model_customization_key] = subform_customizations_serialization\n else:\n subform_customizations_serialization = previously_serialized_customizations[subform_model_customization_key]\n subform_customizations_serializations.append(subform_customizations_serialization)\n property_customization_serialization[\"relationship_target_model_customizations\"] = subform_customizations_serializations\n\n ##########################\n # here ends the icky bit #\n ##########################\n\n else:\n property_customization_serialization = previously_serialized_customizations[property_customization_key]\n property_customization_serializations.append(property_customization_serialization)\n\n # and put it all together...\n serialization = OrderedDict(model_customization_serialization)\n serialization[\"categories\"] = category_customization_serializations\n serialization[\"properties\"] = property_customization_serializations\n\n return serialization\n\ndef get_model_customization_by_fn(fn, current_model_customization):\n \"\"\"\n returns the 1st matching QModelCustomization in the customization hierarchy...\n if the top-level model_customization matches, this is simple\n if the cusotmizations are existing (ie: already saved), this is straightforward\n if the customizations are new (ie: not saved), this is icky\n :param fn: fn to use to find customization\n :param customizations: customizations to check\n :return: QModelCustomization\n \"\"\"\n # RECALL THAT AS OF v0.16.0.0 INSTEAD OF PASSING A COMPLEX NESTED DICTIONARY\n # I AM JUST PASSING A SINGLE model_customization INSTANCE W/ ALL ITS M2M FIELDS ALREADY COMPLETE\n if fn(current_model_customization):\n return current_model_customization\n\n if current_model_customization.is_new():\n return get_customization_by_fn_recusively(\n fn,\n current_model_customization.property_customizations(manager=\"allow_unsaved_property_customizations_manager\").all(),\n CustomizationTypes.MODEL,\n )\n else: # current_model_customization.is_existing()\n return find_in_sequence(\n fn,\n QModelCustomization.objects.filter(\n project=current_model_customization.project,\n name=current_model_customization.name,\n )\n )\n\n\ndef get_category_customization_by_fn(fn, current_model_customization):\n \"\"\"\n returns the 1st matching QCategoryCustomization in the customization hierarchy...\n if the top-level category_customization matches, this is simple\n if the cusotmizations are existing (ie: already saved), this is straightforward\n if the customizations are new (ie: not saved), this is icky\n :param fn: fn to use to find customization\n :param current_model_customization: customizations to check\n :return: QCategoryCustomization\n \"\"\"\n # RECALL THAT AS OF v0.16.0.0 INSTEAD OF PASSING A COMPLEX NESTED DICTIONARY\n # I AM JUST PASSING A SINGLE model_customization INSTANCE W/ ALL ITS M2M FIELDS ALREADY COMPLETE\n\n category_customization = find_in_sequence(\n fn,\n current_model_customization.category_customizations(manager=\"allow_unsaved_category_customizations_manager\").all()\n )\n if category_customization:\n return category_customization\n\n if current_model_customization.is_new():\n return get_customization_by_fn_recusively(\n fn,\n current_model_customization.property_customizations(manager=\"allow_unsaved_property_customizations_manager\").all(),\n CustomizationTypes.CATEGORY,\n )\n else: # current_model_customization.is_existing()\n return find_in_sequence(\n fn,\n QCategoryCustomization.objects.filter(\n model_customization__project=current_model_customization.project,\n name=current_model_customization.name,\n )\n )\n\n\ndef get_property_customization_by_fn(fn, current_model_customization):\n \"\"\"\n returns the 1st matching QPropertyCustomization in the customization hierarchy...\n if the top-level property_customization matches, this is simple\n if the cusotmizations are existing (ie: already saved), this is straightforward\n if the customizations are new (ie: not saved), this is icky\n :param fn: fn to use to find customization\n :param current_model_customization: customizations to check\n :return: QPropertyCustomization\n \"\"\"\n # RECALL THAT AS OF v0.16.0.0 INSTEAD OF PASSING A COMPLEX NESTED DICTIONARY\n # I AM JUST PASSING A SINGLE model_customization INSTANCE W/ ALL ITS M2M FIELDS ALREADY COMPLETE\n\n property_customization = find_in_sequence(\n fn,\n current_model_customization.property_customizations(manager=\"allow_unsaved_property_customizations_manager\").all()\n )\n if property_customization:\n return property_customization\n\n if current_model_customization.is_new():\n return get_customization_by_fn_recusively(\n fn,\n current_model_customization.property_customizations(manager=\"allow_unsaved_property_customizations_manager\").all(),\n CustomizationTypes.PROPERTY,\n )\n else: # current_model_customization.is_existing()\n return find_in_sequence(\n fn,\n QPropertyCustomization.objects.filter(\n model_customization__project=current_model_customization.project,\n name=current_model_customization.name,\n )\n )\n\n\ndef get_customization_by_fn_recusively(fn, current_property_customizations, customization_type, **kwargs):\n \"\"\"\n used in conjunction w/ the \"get__customization_by_fn\" fns above\n recursively goes through the customization hierarchy (of unsaved customizations)\n returns the first customization that returns True for fn\n :param fn: fn to call\n :param property_customizations: the property customizations from which to begin checking\n :param customization_type: the type of customization to check\n :return: either QModelCustomization or QCategoryCustomization or QPropertyCustomization or None\n \"\"\"\n\n previously_recursed_customizations = kwargs.pop(\"previously_recursed_customizations\", set())\n\n for property_customization in current_property_customizations:\n property_customization_key = property_customization.get_key()\n if property_customization_key not in previously_recursed_customizations:\n if customization_type == CustomizationTypes.PROPERTY and fn(property_customization):\n return property_customization\n\n if property_customization.use_subforms():\n target_model_customizations = property_customization.relationship_target_model_customizations(manager=\"allow_unsaved_relationship_target_model_customizations_manager\").all()\n for target_model_customization in target_model_customizations:\n\n if customization_type == CustomizationTypes.MODEL:\n if fn(target_model_customization):\n return target_model_customization\n\n elif customization_type == CustomizationTypes.CATEGORY:\n target_category_customization = find_in_sequence(\n fn,\n target_model_customization.category_customizations(manager=\"allow_unsaved_category_customizations_manager\").all()\n\n )\n if target_category_customization:\n return target_category_customization\n\n else: # customization_type == CustomizationTypes.PROPERTY\n pass # (this will already have been checked above)\n\n previously_recursed_customizations.add(property_customization_key) # only tracking property_customizations b/c those are the only recursive things\n matching_customization = get_customization_by_fn_recusively(\n fn,\n target_model_customization.property_customizations(manager=\"allow_unsaved_property_customizations_manager\").all(),\n customization_type,\n previously_recursed_customizations=previously_recursed_customizations,\n )\n if matching_customization:\n return matching_customization\n\ndef recurse_through_customizations(fn, current_model_customization, customization_types, **kwargs):\n \"\"\"\n recursively applies fn recursively to all customization types\n :param fn: fn to call\n :param current_model_customization: the model customization from which to begin checking\n :param customization_type: the type of customizations to check\n :return: either QModelCustomization or QCategoryCustomization or QPropertyCustomization or None\n \"\"\"\n\n previously_recursed_customizations = kwargs.pop(\"previously_recursed_customizations\", set())\n\n if CustomizationTypes.MODEL in customization_types:\n fn(current_model_customization)\n\n for category_customization in current_model_customization.category_customizations(manager=\"allow_unsaved_category_customizations_manager\").all():\n if CustomizationTypes.CATEGORY in customization_types:\n fn(category_customization)\n\n for property_customization in current_model_customization.property_customizations(manager=\"allow_unsaved_property_customizations_manager\").all():\n property_customization_key = property_customization.get_key()\n if property_customization_key not in previously_recursed_customizations:\n if CustomizationTypes.PROPERTY in customization_types:\n fn(property_customization)\n\n if property_customization.use_subforms():\n target_model_customizations = property_customization.relationship_target_model_customizations(manager=\"allow_unsaved_relationship_target_model_customizations_manager\").all()\n for target_model_customization in target_model_customizations:\n previously_recursed_customizations.add(property_customization_key) # only tracking property_customizations b/c those are the only recursive things\n recurse_through_customizations(\n fn,\n target_model_customization,\n customization_types,\n previously_recursed_customizations=previously_recursed_customizations,\n )\n\ndef set_name(model_customization, new_name):\n recurse_through_customizations(\n lambda c: c.set_name(new_name),\n model_customization,\n [CustomizationTypes.MODEL, CustomizationTypes.CATEGORY, CustomizationTypes.PROPERTY],\n )\n\n\ndef set_owner(model_customization, new_owner):\n recurse_through_customizations(\n lambda c: c.set_owner(new_owner),\n model_customization,\n [CustomizationTypes.MODEL],\n )\n\n\ndef set_shared_owner(model_customization, new_owner):\n recurse_through_customizations(\n lambda c: c.set_shared_owner(new_owner),\n model_customization,\n [CustomizationTypes.MODEL],\n )\n\n\n##############\n# base class #\n##############\n\nclass QCustomization(models.Model):\n\n class Meta:\n app_label = APP_LABEL\n abstract = True\n verbose_name = \"_Questionnaire Customization\"\n verbose_name_plural = \"_Questionnaire Customizations\"\n\n guid = models.UUIDField(default=uuid4, editable=False) # unique=True)\n created = models.DateTimeField(auto_now_add=True, editable=False)\n modified = models.DateTimeField(auto_now=True, editable=False)\n\n # all customizations share a name\n # (this makes finding related customizations simple: \".filter(project=parent.project, name=parent.name)\" )\n name = models.CharField(\n max_length=LIL_STRING,\n blank=False,\n verbose_name=\"Customization Name\",\n validators=[validate_no_bad_chars, validate_no_spaces, validate_no_reserved_words, validate_no_profanities],\n help_text=\"A unique name for this customization. Spaces or the following characters are not allowed: \\\"%s\\\".\" % BAD_CHARS_LIST,\n )\n\n def __eq__(self, other):\n if isinstance(other, QCustomization):\n return self.guid == other.guid\n return NotImplemented\n\n def __ne__(self, other):\n equality_result = self.__eq__(other)\n if equality_result is NotImplemented:\n return equality_result\n return not equality_result\n\n @classmethod\n def get_field(cls, field_name):\n \"\"\"\n convenience fn for getting the Django Field instance from a model class\n note that this is a classmethod; when called from an instance it will just convert that instance to its class\n \"\"\"\n try:\n field = cls._meta.get_field_by_name(field_name)\n return field[0]\n except FieldDoesNotExist:\n return None\n\n def get_fully_qualified_key(self, parent_key=None):\n msg = \"{0} must define a custom 'get_fully_qualified_key' method.\".format(self.__class__.__name__)\n raise NotImplementedError(msg)\n\n def get_key(self):\n # convert UUID to str b/c UUID does not play nicely w/ JSON\n return str(self.guid)\n\n def is_existing(self):\n return self.pk is not None\n\n def is_new(self):\n return self.pk is None\n\n def reset(self):\n msg = \"{0} must define a custom 'reset' method.\".format(self.__class__.__name__)\n raise NotImplementedError(msg)\n\n def get_unique_together(self):\n \"\"\"\n 'unique_together' validation is only enforced if all the unique_together fields appear in the ModelForm\n this fn returns the fields to check for manual validation\n \"\"\"\n unique_together = self._meta.unique_together\n return list(unique_together)\n\n#######################\n# model customization #\n#######################\n\nclass QModelCustomizationQuerySet(models.QuerySet):\n \"\"\"\n As of Django 1.7 I can use custom querysets as managers\n to ensure that these custom methods are chainable\n whoo-hoo\n \"\"\"\n\n def documents(self):\n return self.filter(proxy__stereotype__iexact=\"document\")\n\n def owned_documents(self, user):\n return self.documents().filter(owner=user)\n\n def shared_documents(self, user):\n return self.documents().filter(shared_owners__in=[user.pk])\n\n\nclass QModelCustomization(QCustomization):\n\n class Meta:\n app_label = APP_LABEL\n abstract = False\n ordering = (\"order\", )\n verbose_name = \"_Questionnaire Model Customization\"\n verbose_name_plural = \"_Questionnaire Model Customizations\"\n\n class _QModelCustomizationUnsavedRelatedManager(QUnsavedRelatedManager):\n\n def get_unsaved_related_field_name(self):\n _field = QModelCustomization.get_field(\"relationship_source_property_customization\")\n _related_field_name = _field.related.name\n _unsaved_related_field_name = \"_unsaved_{0}\".format(_related_field_name)\n return _unsaved_related_field_name\n\n # custom managers...\n objects = QModelCustomizationQuerySet.as_manager()\n allow_unsaved_relationship_target_model_customizations_manager = _QModelCustomizationUnsavedRelatedManager()\n\n project = models.ForeignKey(\"QProject\", blank=False, related_name=\"model_customizations\")\n ontology = models.ForeignKey(\"QOntology\", blank=False, null=False)\n proxy = models.ForeignKey(\"QModelProxy\", blank=False, null=False)\n\n owner = models.ForeignKey(User, blank=False, null=True, related_name=\"owned_customizations\", on_delete=models.SET_NULL)\n shared_owners = models.ManyToManyField(User, blank=True, related_name=\"shared_customizations\")\n\n description = models.TextField(\n blank=True,\n help_text=\"An explanation of how this customization is intended to be used. This information is for informational purposes only.\",\n verbose_name=\"Customization Description\",\n )\n\n order = models.PositiveIntegerField(\n blank=True,\n null=True\n )\n\n is_default = models.BooleanField(\n blank=False,\n null=False,\n default=False,\n help_text=\"Every CIM Document Type must have one default customization. If this is the first customization you are creating, please ensure this checkbox is selected.\",\n verbose_name=\"Is Default Customization?\"\n )\n\n model_title = models.CharField(\n max_length=BIG_STRING,\n verbose_name=\"Name that should appear on the Document Form\",\n blank=False, null=True\n )\n model_description = models.TextField(\n blank=True,\n null=True,\n help_text=\"This text will appear as documentation in the editing form. Inline HTML formatting is permitted. The initial documentation comes from the ontology.\",\n verbose_name=\"A description of the document\",\n )\n model_show_all_categories = models.BooleanField(\n default=False,\n verbose_name=\"Display empty categories?\",\n help_text=\"Include categories in the editing form for which there are no (visible) properties associated with\",\n )\n\n # this fk is just here to provide the other side of the relationship to property_customization\n # I only ever access \"property_customization.relationship_target_model_customizations\"\n relationship_source_property_customization = models.ForeignKey(\"QPropertyCustomization\", blank=True, null=True, related_name=\"relationship_target_model_customizations\")\n\n synchronization = models.ManyToManyField(\"QSynchronization\", blank=True)\n\n def __str__(self):\n return pretty_string(self.name)\n\n def set_name(self, new_name):\n # used w/ \"recurse_through_customization\" in global fn \"set_name\" above\n self.name = new_name\n\n def set_owner(self, new_owner):\n # used w/ \"recurse_through_customization\" in global fn \"set_owner\" above\n self.owner = new_owner\n\n def set_shared_owner(self, new_shared_owner):\n # used w/ \"recurse_through_customization\" in global fn \"set_shared_owner\" above\n self.shared_owners.add(new_shared_owner)\n\n def clean(self, *args, **kwargs):\n\n other_customizers = QModelCustomization.objects.filter(\n proxy=self.proxy,\n project=self.project,\n ).exclude(pk=self.pk)\n\n # there can be only 1 \"default\" customization for each project/proxy/ontology combination\n if self.is_default:\n if other_customizers.filter(is_default=True).count() != 0:\n raise ValidationError({\n \"is_default\": _(\"A default customization already exists. There can be only one default customization per project.\")\n })\n\n if self.proxy.is_document():\n\n if other_customizers.filter(proxy__stereotype__iexact=\"document\", name=self.name).count() != 0:\n raise ValidationError({\n \"name\": _(\"A customization for this proxy and project with this name already exists.\"),\n # \"proxy\": _(\"A customization for this proxy and project with this name already exists.\"),\n # \"project\": _(\"A customization for this proxy and project with this name already exists.\"),\n })\n\n super(QModelCustomization, self).clean(*args, **kwargs)\n\n def get_fully_qualified_key(self, prefix=None):\n fully_qualified_key = \"{0}.{1}\".format(self.proxy.get_fully_qualified_key(), self.guid)\n if prefix:\n return \"{0}.{1}\".format(prefix, fully_qualified_key)\n return fully_qualified_key\n\n def is_synchronized(self):\n return self.synchronization.count() == 0\n\n def is_unsynchronized(self):\n return not self.is_synchronized()\n\n def reset(self):\n proxy = self.proxy\n\n self.order = proxy.order\n\n self.model_title = pretty_string(proxy.name)\n self.model_description = proxy.documentation\n self.model_show_all_categories = False\n\n def save(self, *args, **kwargs):\n # force all (custom) \"clean\" methods to run\n self.full_clean()\n super(QModelCustomization, self).save(*args, **kwargs)\n\n ###########################################\n # some fns which are called from handlers #\n ###########################################\n\n def updated_ontology(self):\n\n property_customizers = list(self.property_customizers.all()) # the list fn forces immediate qs evaluation\n for property_customizer in property_customizers:\n\n if property_customizer.field_type == QPropertyTypes.RELATIONSHIP:\n # recurse through subforms...\n for target_model_customizer in property_customizer.target_model_customizers.all():\n target_model_customizer.updated_ontology()\n\n property_proxy = property_customizer.proxy\n # TODO: DOUBLE-CHECK _ALL_ THE WAYS THAT THE ONTOLOGY COULD HAVE BEEN CHANGED\n if property_proxy.required and not property_customizer.required:\n property_customizer.required = True\n property_customizer.save()\n\n##########################\n# category customization #\n##########################\n\nclass QCategoryCustomizationQuerySet(models.QuerySet):\n \"\"\"\n As of Django 1.7 I can use custom querysets as managers\n to ensure that these custom methods are chainable\n whoo-hoo\n \"\"\"\n\n def get_by_key(self, key):\n if isinstance(key, basestring):\n key = generate_uuid(key)\n # TODO: THERE IS THE CHANCE OF MULTIPLE CUSTOMIZATIONS W/ THE SAME KEY B/C OF RECURSION\n # TODO: THIS MAKES SURE TO ONLY EVER RETURN THE 1ST ONE\n # TODO: IN THE LONG-TERM, I SHOULD FIX THIS FROM HAPPENING\n # return self.get(guid=key)\n matching_category_customizations = self.filter(guid=key)\n if matching_category_customizations:\n return matching_category_customizations[0]\n return None\n\n\nclass QCategoryCustomization(QCustomization):\n\n class Meta:\n app_label = APP_LABEL\n abstract = False\n ordering = (\"order\", )\n verbose_name = \"_Questionnairee Category Customization\"\n verbose_name_plural = \"_Questionnairee Category Customizations\"\n\n class _QCategoryCustomizationUnsavedRelatedManager(QUnsavedRelatedManager):\n\n def get_unsaved_related_field_name(self):\n _field = QCategoryCustomization.get_field(\"model_customization\")\n _related_field_name = _field.related.name\n _unsaved_related_field_name = \"_unsaved_{0}\".format(_related_field_name)\n return _unsaved_related_field_name\n\n # custom managers...\n objects = QCategoryCustomizationQuerySet.as_manager()\n allow_unsaved_category_customizations_manager = _QCategoryCustomizationUnsavedRelatedManager()\n\n proxy = models.ForeignKey(\"QCategoryProxy\", blank=False, null=False)\n\n model_customization = models.ForeignKey(\"QModelCustomization\", blank=False, related_name=\"category_customizations\")\n\n category_title = models.CharField(max_length=TINY_STRING, blank=False, validators=[validate_no_profanities, ])\n documentation = models.TextField(blank=True, null=True)\n order = models.PositiveIntegerField(blank=True, null=True)\n\n def __str__(self):\n return pretty_string(self.category_title)\n\n def get_fully_qualified_key(self, prefix=None):\n fully_qualified_key = \"{0}.{1}\".format(self.proxy.get_fully_qualified_key(), self.guid)\n if prefix:\n return \"{0}.{1}\".format(prefix, fully_qualified_key)\n return fully_qualified_key\n\n def has_property(self, property_customization):\n return property_customization in self.property_customizations.all()\n\n def set_name(self, new_name):\n # used w/ \"recurse_through_customization\" in global fn \"set_name\" above\n self.name = new_name\n\n def reset(self):\n\n proxy = self.proxy\n\n self.category_title = proxy.name\n self.documentation = proxy.documentation\n self.order = proxy.order\n\n###########################\n# property customizations #\n###########################\n\n\nclass QPropertyCustomization(QCustomization):\n\n class Meta:\n app_label = APP_LABEL\n abstract = False\n ordering = (\"order\", )\n verbose_name = \"_Questionnaire Property Customization\"\n verbose_name_plural = \"_Questionnaire Property Customization\"\n\n class _QPropertyCustomizationUnsavedRelatedManager(QUnsavedRelatedManager):\n\n def get_unsaved_related_field_name(self):\n _field = QPropertyCustomization.get_field(\"model_customization\")\n _related_field_name = _field.related.name\n _unsaved_related_field_name = \"_unsaved_{0}\".format(_related_field_name)\n return _unsaved_related_field_name\n\n class _QCategoryCustomizationUnsavedRelatedManager(QUnsavedRelatedManager):\n\n def get_unsaved_related_field_name(self):\n _field = QPropertyCustomization.get_field(\"category\")\n _related_field_name = _field.related.name\n _unsaved_related_field_name = \"_unsaved_{0}\".format(_related_field_name)\n return _unsaved_related_field_name\n\n # custom managers...\n # according to Django [https://docs.djangoproject.com/en/1.9/topics/db/managers/#custom-managers-and-model-inheritance], the 1st manager specified is the default manager; so I must explicitly reset \"objects\" here\n objects = models.Manager()\n allow_unsaved_property_customizations_manager = _QPropertyCustomizationUnsavedRelatedManager()\n allow_unsaved_categories_manager = _QCategoryCustomizationUnsavedRelatedManager()\n\n proxy = models.ForeignKey(\"QPropertyProxy\", blank=False, null=False)\n\n model_customization = models.ForeignKey(\"QModelCustomization\", blank=False, related_name=\"property_customizations\")\n category = models.ForeignKey(\"QCategoryCustomization\", blank=True, null=True, related_name=\"property_customizations\")\n\n # ALL fields...\n property_title = models.CharField(max_length=LIL_STRING, blank=False, validators=[validate_no_profanities, ])\n is_required = models.BooleanField(default=True, blank=True, verbose_name=\"Is this property required?\")\n is_required.help_text = _(\n \"All required properties must be completed prior to publication. \"\n \"A property that is defined as required in the CIM or a CV cannot be made optional.\"\n )\n is_hidden = models.BooleanField(default=True, blank=True, verbose_name=\"Should this property not be displayed?\")\n is_hidden.help_text = _(\n \"A property that is defined as required in an ontology cannot be hidden.\"\n )\n is_editable = models.BooleanField(default=True, verbose_name=\"Can this property be edited?\")\n is_nillable = models.BooleanField(default=True, verbose_name=\"Should nillable options be allowed?\")\n is_nillable.help_text = \\\n \"A nillable property can be intentionally left blank for several reasons: {0}.\".format(\n \", \".join([nr[0] for nr in NIL_REASONS])\n )\n documentation = models.TextField(\n blank=True,\n null=True,\n verbose_name=_(\n \"What is the help text to associate with this property?\"\n \"
Any initial help text comes from the CIM Schema or a CIM Controlled Vocabulary.
\"\n \"
Note that basic HTML tags are supported.
\"\n )\n )\n inline_help = models.BooleanField(default=False, blank=True, verbose_name=\"Should the help text be displayed inline?\")\n order = models.PositiveIntegerField(blank=True, null=True)\n field_type = models.CharField(max_length=BIG_STRING, blank=False, choices=[(ft.get_type(), ft.get_name()) for ft in QPropertyTypes])\n\n # ATOMIC fields...\n atomic_default = models.CharField(\n max_length=BIG_STRING,\n blank=True,\n null=True,\n verbose_name=_(\n \"What is the default value of this property?\"\n \"
Note that this only applies to new and not existing documents
\"\n )\n )\n atomic_type = models.CharField(\n max_length=BIG_STRING,\n blank=False,\n verbose_name=\"How should this field be rendered?\",\n choices=[(ft.get_type(), ft.get_name()) for ft in QAtomicPropertyTypes],\n default=QAtomicPropertyTypes.DEFAULT.get_type(),\n help_text = \"By default, all fields are rendered as strings. However, a field can be customized to accept longer snippets of text, dates, email addresses, etc.\",\n )\n atomic_suggestions = models.TextField(\n blank=True,\n null=True,\n validators=[validate_no_bad_suggestion_chars],\n verbose_name=\"Are there any suggestions you would like to offer as auto-completion options?\",\n help_text=\"Please enter a \\\"|\\\" separated list of words or phrases. (These suggestions will only take effect for text fields.)\",\n )\n\n # ENUMERATION fields...\n enumeration_open = models.BooleanField(blank=False, default=False, verbose_name='Can a user can specify a custom \"OTHER\" value?')\n\n # RELATIONSHIP fields...\n relationship_show_subform = models.BooleanField(\n default=False,\n blank=True,\n verbose_name=_(\n \"Should this property be rendered in its own subform?\"\n \"
Note that a relationship to another CIM Document cannot use subforms, while a relationship to anything else must use subforms.
\"\n ),\n help_text=_(\n \"Checking this will cause the property to be rendered as a nested subform within the parent form;\"\n \"All properties of this model will be available to view and edit in that subform.\"\n \"Unchecking it will cause the attribute to be rendered as a reference widget.\"\n )\n )\n\n # using the reverse of the fk defined on model_customization instead of this field\n # (so that I can use a custom manager to cope w/ unsaved instances)\n # relationship_target_model_customizations = models.ManyToManyField(\"QModelCustomization\", blank=True, related_name=\"+\")\n\n def __str__(self):\n return pretty_string(self.proxy.name)\n\n def get_fully_qualified_key(self, prefix=None):\n fully_qualified_key = \"{0}.{1}\".format(self.proxy.get_fully_qualified_key(), self.guid)\n if prefix:\n return \"{0}.{1}\".format(prefix, fully_qualified_key)\n return fully_qualified_key\n\n def reset(self):\n\n proxy = self.proxy\n\n self.field_type = proxy.field_type\n\n # ALL field types...\n self.property_title = pretty_string(proxy.name)\n self.order = proxy.order\n self.is_required = proxy.is_required()\n self.is_hidden = False\n self.is_editable = True\n self.documentation = proxy.documentation\n self.inline_help = False\n\n assert self.category is not None # even \"uncategorized\" properties should use the \"UncategorizedCategory\"\n\n # ATOMIC fields...\n if self.field_type == QPropertyTypes.ATOMIC:\n self.atomic_default = proxy.atomic_default\n self.atomic_type = proxy.atomic_type\n self.atomic_suggestions = \"\"\n\n # ENUMERATION fields...\n elif self.field_type == QPropertyTypes.ENUMERATION:\n self.enumeration_open = proxy.enumeration_open\n # TODO: DO I NEED TO DEAL W/ \"enumeration_choices\" OR \"enumeration_default\" ?\n\n # RELATIONSHIP fields...\n else: # self.field_type == QPropertyTypes.RELATIONSHIP:\n self.relationship_show_subform = not self.use_references()\n\n\n def set_name(self, new_name):\n # used w/ \"recurse_through_customization\" in global fn \"set_name\" above\n self.name = new_name\n\n def use_references(self):\n \"\"\"\n As of v0.14 all RELATIONSHIPS to a CIM Document _must_ use a reference\n :return: Boolean\n \"\"\"\n if self.field_type == QPropertyTypes.RELATIONSHIP:\n target_models_are_documents = [tm.is_document() for tm in self.proxy.relationship_target_models.all()]\n # double-check that all targets are the same type of class...\n assert len(set(target_models_are_documents)) == 1\n return all(target_models_are_documents)\n return False\n\n def use_subforms(self):\n \"\"\"\n As of v0.14 all RELATIONSHIPS to a CIM Entity (non-Document) _must_ use a subform\n :return: Boolean\n \"\"\"\n if self.field_type == QPropertyTypes.RELATIONSHIP:\n target_models_are_documents = [tm.is_document() for tm in self.proxy.relationship_target_models.all()]\n # double-check that all targets are the same type of class...\n assert len(set(target_models_are_documents)) == 1\n return not any(target_models_are_documents)\n return False\n","sub_path":"Q/questionnaire/models/models_customizations.py","file_name":"models_customizations.py","file_ext":"py","file_size_in_byte":46222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"183915314","text":"# -*- coding: utf-8 -*-\nimport os\nimport pytz\nfrom datetime import datetime, timedelta\n\ndef path(*args):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), '..', *args))\n\nSECRET_KEY = '\\x8f\\xb2KTt\\xd0\\xe7\\xf1\"\\xd2\\x86:\\xb9\\xdcF\\xf1\\x18\\x1c\\x92\\xb6\\xbbk\\xa0\\x02'\n\nSITE_NAME = u'Менеджер сайтов'\n\nIMAGES = ['png', 'jpg', 'jpeg']\n\nMAX_CONTENT_LENGTH = 32 * 1024 * 1024\n\nUPLOADS_FOLDER = path('media')\n\nMEDIA_FOLDER = path('media')\n\nMEDIA_URL = '/media/'\n\nSITE_URL = '/'","sub_path":"app/settings/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"263305265","text":"from datetime import datetime\nimport os\nfrom pathlib import Path\nimport subprocess\nimport copy\n\n# Note: Run preprocess_data.py file in the main repository directory or the preproc directory of the repository.\n\n\nurls_download = [\"https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.dat.gz\",\n \"https://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_trembl.dat.gz\"]\n\nprint(f\"{datetime.now()} - Start downloading files.\")\npath_current_dir = os.path.abspath(os.path.dirname(__file__))\npath_data_dir = path_current_dir.split(\"/preproc\")[0]+\"/data\"\npaths_raw_data = []\nfor url in urls_download:\n print(f\"{datetime.now()} - Start downloading from: {url}\")\n Path(path_data_dir).mkdir(exist_ok=True)\n subprocess.run([\"wget\", url, \"-P\", path_data_dir])\n paths_raw_data.append(path_data_dir+\"/\"+url.split(\"/\")[-1])\n print(f\"{datetime.now()} - Download finished.\")\n\n\nprint(f\"{datetime.now()} - Decompress downloaded files.\")\npaths_data = []\nfor path_raw in paths_raw_data:\n path_decomp = path_raw.split(\".gz\")[0]\n print(f\"{datetime.now()} - Decompress: {path_raw}\")\n subprocess.run([\"gunzip\", path_raw, path_decomp])\n paths_data.append(path_decomp)\n print(f\"{datetime.now()} - Decompressed to: {path_decomp}\")\n\n\nprint(f\"{datetime.now()} - Preprocessing files and saving to csv.\")\n# Raw data setup see user manual: https://web.expasy.org/docs/userman.html\nlinetype_conversion = {\n \"ID\": \"id\",\n \"AC\": \"accn\", # accession number\n \"DT\": \"date\",\n \"DE\": \"desc\", # DEscription\n \"GN\": \"gene\", # Gene Name\n \"OS\": \"spec\", # Organism Species\n \"OG\": \"orga\", # OrGanelle\n \"OC\": \"clas\", # Organism Classification\n \"OX\": \"taxo\", # Organism taxonomy cross-reference\n \"OH\": \"host\", # Organism Host\n \"RN\": \"refn\", # Reference Number\n \"RP\": \"refp\", # Reference Position\n \"RC\": \"refc\", # Reference Comment\n \"RX\": \"refx\", # Reference cross-reference\n \"RG\": \"refg\", # Reference Group\n \"RA\": \"refa\", # Reference Author\n \"RT\": \"reft\", # Reference Title\n \"RL\": \"refl\", # Reference Location\n \"CC\": \"text\", # free text comments\n \"DR\": \"xdb\", # Database cross-Reference\n \"FT\": \"xns\", # Cross-references to the nucleotide sequence database # RECHECK\n \"PE\": \"exist\", # Protein existence\n \"KW\": \"kw\", # KeyWord\n \"FT\": \"ft\", # Feature Table\n \"SQ\": \"seqh\", # SeQuence header)\n \" \": \"seq\",\n}\n\npreprocessing_fields = [\"id\",\"accn\",\"date\",\"desc\",\"gene\",\"spec\",\"orga\",\"clas\",\"taxo\",\"host\",\"refn\", \"refp\", \"refc\", \"refx\", \"refg\", \"refa\", \"reft\", \"refl\", \"text\",\"xdb\",\"ft\",\"exist\",\"kw\",\"seqh\",\"seq\"]\n\ndef get_csv(path, fields):\n path_out = path.split(\".\")[0]+\".csv\"\n print(f\"{datetime.now()} - Processing: {path}\")\n print(f\"{datetime.now()} - Saving to: {path_out}\")\n print(\"Processing file line:\")\n \n i = 0\n data = {k: \"\" for k in fields}\n with open(path, 'r') as rf, open(path_out, 'w') as wf:\n while True:\n if i == 0: # write header to csv\n header = \",\".join(fields)+\"\\n\"\n wf.write(header)\n \n if i % 1_000_000 == 0:\n print(i, end=\", \")\n i += 1\n \n rline = rf.readline()\n \n if rline.startswith(\"CC ----\") or \\\n rline.startswith(\"CC Copy\") or \\\n rline.startswith(\"CC Dist\"):\n continue\n elif rline == \"\": # EOF is empty string\n print(f\"\\n{datetime.now()} - Processing complete.\")\n break\n \n elif rline.startswith(\"//\"): # end of entry, save line to csv file\n for key in data.keys(): \n if key == \"seq\":\n data[key] = data[key].replace(\" \",\"\") # remove spaces in AA sequence\n \n wline = \",\".join([x.replace(\",\",\";\") for x in data.values()])+\"\\n\"\n wf.write(wline)\n data = {k: \"\" for k in fields} # create new empty data dict\n continue\n \n key = linetype_conversion[rline[:2]] # get line key\n content = \" \".join(rline[5:].split()) # get line content\n data[key] += content if data[key] == \"\" else \" \"+content\n return path_out\n\npaths_csv = []\nfor path in paths_data:\n path_out = get_csv(path, fields=preprocessing_fields)\n paths_csv.append(path_out)\n print(f\"{datetime.now()} - Preprocessed file saved to: {path_out}\")\n\n\n#print(f\"{datetime.now()} - Getting string lengths for every column.\")\n#cols = copy.deepcopy(preprocessing_fields)\n#cols.append(\"text_all\")\n#\n#def get_cols_len_csv(path, cols):\n# path_out = path.split(\".\")[0]+\"_len.csv\"\n# print(f\"Processing: {path}\")\n# print(f\"Saving to: {path_out}\")\n# i = 0\n# with open(path, 'r') as rf, open(path_out, 'w') as wf:\n# while True:\n# if i % 1_000_000 == 0:\n# print(i, end=\", \")\n# i += 1\n#\n# line = rf.readline()\n# if line == \"\": # EOF is an empty string\n# break\n#\n# line = line.replace(\"\\n\",\"\").split(\",\")\n#\n# if i == 1: # get index values for the wanted columns\n# idx = dict()\n# for c in cols:\n# if c == \"text_all\":\n# continue\n# idx[c] = line.index(c)\n#\n# wline = \",\".join(cols)+\"\\n\" # write header\n# wf.write(wline)\n# continue\n#\n# out = []\n# text_all = 0\n# for c in cols:\n# if c == \"id\":\n# out.append(line[idx[c]].split(\" \")[0])\n# elif c == \"text_all\":\n# out.append(str(text_all))\n# else:\n# length = len(line[idx[c]])\n# text_all += length\n# out.append(str(length))\n#\n# wline = \",\".join(out)+\"\\n\"\n# wf.write(wline)\n# return path_out\n#\n#for path in paths_csv:\n# path_out = get_cols_len_csv(path, cols)\n# print(f\"{datetime.now()} - String lengths data saved to: {path_out}\")\n\n\nprint(f\"{datetime.now()} - Merging preprocessed csv files into one csv file.\")\npath_csv_full = path_data_dir+\"/uniprot_full.csv\"\nsubprocess.run([\"cat\", paths_csv[0], f\"<(tail +2 {paths_csv[1]})\", \">\", path_csv_full])\nprint(f\"{datetime.now()} - Merged files saved to: {path_csv_full}\")\n\n\nprint(f\"{datetime.now()} - Data preprocessing done.\")\n\n","sub_path":"preproc/preprocess_data.py","file_name":"preprocess_data.py","file_ext":"py","file_size_in_byte":6548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"525222213","text":"\nimport avango\nimport avango.gua\nimport avango.script\nfrom avango.script import field_has_changed\n\n\nfrom Text import TextField\n\n\nclass Legend:\n\n counter = 0\n\n\n def __init__(self):\n self.group_node = avango.gua.nodes.TransformNode(Name = \"Legend\" + str(self.counter))\n self.trimesh_loader = avango.gua.nodes.TriMeshLoader()\n\n self.labels = []\n self.materials = []\n\n Legend.counter += 1\n\n\n def get_group_node(self):\n return self.group_node\n\n\n def add_item(self, text, material):\n quad = self.trimesh_loader.create_geometry_from_file(\"quad\" + str(len(self.labels)), \"data/objects/quad.obj\", material, avango.gua.LoaderFlags.DEFAULTS)\n quad.Transform.value = avango.gua.make_trans_mat(0.5, -0.5 - len(self.labels), 0.0) * avango.gua.make_scale_mat(0.8)\n self.group_node.Children.value.append(quad)\n textfield = TextField()\n textfield.group_node.Transform.value = avango.gua.make_trans_mat(1.5, -0.5 - len(self.labels), 0.0)\n textfield.Anchor.value = TextField.ANCHOR_CENTER_LEFT\n textfield.Text.value = text\n self.group_node.Children.value.append(textfield.create())\n self.labels.append(text)\n self.materials.append(material)\n","sub_path":"infovis/Legend.py","file_name":"Legend.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"136358445","text":"# MIT License\n\n# Copyright (c) [2019] [Coburn Wightman]\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport logging\n\nimport vxi11\n\nlogger = logging.getLogger(__name__)\n\nclass InstrumentDevice(object):\n '''Base class for Instrument Devices.\n\n All devices should inherit from this class overriding the methods\n that make sense for the intended device. Since each method of this base class is\n expected to respond rationally, a very simple device might override one method only.\n\n See the \"VXI-11 TCP/IP Instrument Protocol Specification\" for details on\n each device_xxx procedure. The procedures are from the host perspective, ie\n a device write is a write to the device and device read is a read from the device.\n '''\n def __init__(self, device_name):\n self.device_name = device_name\n self.srq_enabled = False\n \n def name(self):\n return self.device_name\n \n def has_lock(self):\n return False\n \n def device_abort(self):\n error = vxi11.ERR_NO_ERROR\n return error\n \n def device_write(self, opaque_data): # 11\n \"The device_write RPC is used to write data to the specified device\"\n error = vxi11.ERR_NO_ERROR\n\n if False:\n error = vxi11.ERR_IO_TIMEOUT\n elif False:\n error = vxi11.ERR_IO_ERROR\n elif False:\n error = vxi11.ERR_ABORT\n else:\n error = vxi11.ERR_OPERATION_NOT_SUPPORTED\n \n return error\n \n def device_read(self): #= 12\n \"The device_read RPC is used to read data from the device to the controller\"\n error = vxi11.ERR_NO_ERROR\n opaque_data = \"\"\n \n if False:\n error = vxi11.ERR_IO_TIMEOUT\n elif False:\n error = vxi11.ERR_IO_ERROR\n elif False:\n error = vxi11.ERR_ABORT\n else:\n error = vxi11.ERR_OPERATION_NOT_SUPPORTED\n \n result = error, opaque_data\n return result\n\n def device_readstb(self, link_id, flags, lock_timeout, io_timeout): # 13, generic params\n \"The device_readstb RPC is used to read a device's status byte.\"\n error = vxi11.ERR_NO_ERROR\n stb = 0\n\n if False:\n error = vxi11.ERR_IO_TIMEOUT\n elif False:\n error = vxi11.ERR_IO_ERROR\n elif False:\n error = vxi11.ERR_ABORT\n else:\n error = vxi11.ERR_OPERATION_NOT_SUPPORTED\n \n return error, stb\n\n def device_trigger(self, link_id, flags, lock_timeout, io_timeout): # 14, generic params\n \"The device_trigger RPC is used to send a trigger to a device.\"\n error = vxi11.ERR_NO_ERROR\n \n if False:\n error = vxi11.ERR_IO_TIMEOUT\n elif False:\n error = vxi11.ERR_IO_ERROR\n elif False:\n error = vxi11.ERR_ABORT\n else:\n error = vxi11.ERR_OPERATION_NOT_SUPPORTED\n \n return error\n\n def device_clear(self, link_id, flags, lock_timeout, io_timeout): # 15, generic params\n \"The device_clear RPC is used to send a device clear to a device\"\n error = vxi11.ERR_NO_ERROR\n \n if False:\n error = vxi11.ERR_IO_TIMEOUT\n elif False:\n error = vxi11.ERR_IO_ERROR\n elif False:\n error = vxi11.ERR_ABORT\n else:\n error = vxi11.ERR_OPERATION_NOT_SUPPORTED\n \n return error\n\n def device_remote(self, link_id, flags, lock_timeout, io_timeout): # 16, generic params\n \"The device_remote RPC is used to place a device in a remote state wherein all programmable local controls are disabled\"\n error = vxi11.ERR_NO_ERROR\n \n if False:\n error = vxi11.ERR_IO_TIMEOUT\n elif False:\n error = vxi11.ERR_IO_ERROR\n elif False:\n error = vxi11.ERR_ABORT\n else:\n error = vxi11.ERR_OPERATION_NOT_SUPPORTED\n \n return error\n\n def device_local(self, link_id, flags, lock_timeout, io_timeout): # 17, generic params\n \"The device_local RPC is used to place a device in a local state wherein all programmable local controls are enabled\"\n error = vxi11.ERR_NO_ERROR\n \n if False:\n error = vxi11.ERR_IO_TIMEOUT\n elif False:\n error = vxi11.ERR_IO_ERROR\n elif False:\n error = vxi11.ERR_ABORT\n else:\n error = vxi11.ERR_OPERATION_NOT_SUPPORTED\n \n return error\n\n def device_lock(self, ): # = 18\n \"The device_lock RPC is used to acquire a device's lock.\"\n error = vxi11.ERR_NO_ERROR\n \n if False:\n error = vxi11.ERR_IO_TIMEOUT\n elif False:\n error = vxi11.ERR_IO_ERROR\n elif False:\n error = vxi11.ERR_ABORT\n else:\n error = vxi11.ERR_OPERATION_NOT_SUPPORTED\n \n return error\n\n def device_unlock(self): # = 19\n \"The device_unlock RPC is used to release locks acquired by the device_lock RPC.\"\n error = vxi11.ERR_NO_ERROR\n \n if False:\n error = vxi11.ERR_IO_TIMEOUT\n elif False:\n error = vxi11.ERR_IO_ERROR\n elif False:\n error = vxi11.ERR_ABORT\n else:\n error = vxi11.ERR_OPERATION_NOT_SUPPORTED\n \n return error\n\n def device_enable_srq(self, enable, handle): # = 20\n \"The device_enable_srq RPC is used to enable or disable the sending of device_intr_srq RPCs by thenetwork instrument server\"\n error = vxi11.ERR_NO_ERROR\n \n if enable == True:\n self.srq_handle = handle\n self.srq_enabled = True\n else:\n self.srq_enabled = False\n \n return error\n\n def device_docmd(self, flags, io_timeout, lock_timeout, cmd, network_order, data_size, opaque_data_in): # = 22\n \"The device_docmd RPC allows a variety of operations to be executed\"\n error = vxi11.ERR_NO_ERROR\n \n if False:\n error = vxi11.ERR_IO_TIMEOUT\n elif False:\n error = vxi11.ERR_IO_ERROR\n elif False:\n error = vxi11.ERR_ABORT\n else:\n error = vxi11.ERR_OPERATION_NOT_SUPPORTED\n\n opaque_data_out = \"\"\n return error, opaque_data_out\n \nclass DefaultInstrumentDevice(InstrumentDevice):\n '''The default device is the device registered with the name of \"inst0\".\n\n The vxi-11 spec expects the default device to respond to the *IDN? command.\n If a custom default_device_handler is not specified when the InstrumentServer is\n initialized, this is the one that will be used.\n\n Many instruments have only one device, the \"inst0\" device. copy this class \n to YourDeviceHandler, use as boilerplate, and register it when the InstrumentServer\n is initialized.\n '''\n def __init__(self, device_name):\n super(DefaultInstrumentDevice, self).__init__(device_name)\n #self.device_name = 'inst0'\n self.idn = 'python-vxi11-server', 'bbb', '1234', '567'\n self.result = 'empty'\n \n def device_write(self, opaque_data):\n error = vxi11.ERR_NO_ERROR\n\n if opaque_data == '*IDN?':\n mfg, model, sn, fw = self.idn\n self.result = \"{} {} {} {}\".format(mfg, model, sn, fw)\n elif opaque_data == '*DEVICE_LIST?':\n devs = self.device_list()\n self.result = ''\n isFirst = True\n for dev in devs:\n if isFirst:\n self.result = '{}'.format(dev)\n isFirst = False\n else:\n self.result = '{}, {}'.format(self.result, dev)\n else:\n self.result = 'invalid'\n \n #logger.info(\"%s: device_write(): %s\", self.name(), opaque_data)\n return error\n \n def device_read(self):\n error = vxi11.ERR_NO_ERROR\n return error, self.result\n","sub_path":"vxi11_server/instrument_device.py","file_name":"instrument_device.py","file_ext":"py","file_size_in_byte":9035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"652151292","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: Quoc-Tuan Truong \n\"\"\"\n\n\ndef test_with_ratio_split():\n from ...data import reader\n from ...eval_strategies import RatioSplit\n from ...models import PMF\n from ...metrics import MAE, RMSE, Recall, FMeasure\n from ..experiment import Experiment\n\n data = reader.txt_to_uir_triplets('./cornac/data/tests/data.txt')\n exp = Experiment(eval_strategy=RatioSplit(data, verbose=True),\n models=[PMF(1, 0)],\n metrics=[MAE(), RMSE(), Recall(1), FMeasure(1)],\n verbose=True)\n exp.run()\n\n assert (1, 4) == exp.avg_results.shape\n\n assert 1 == len(exp.user_results)\n assert 4 == len(exp.user_results['PMF'])\n assert 2 == len(exp.user_results['PMF']['MAE'])\n assert 2 == len(exp.user_results['PMF']['RMSE'])\n assert 2 == len(exp.user_results['PMF']['Recall@1'])\n assert 2 == len(exp.user_results['PMF']['F1@1'])\n\n try:\n Experiment(None, None, None)\n except ValueError:\n assert True\n\n try:\n Experiment(None, [PMF(1, 0)], None)\n except ValueError:\n assert True","sub_path":"cornac/experiment/tests/test_experiment.py","file_name":"test_experiment.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"637806177","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# pylint: disable-msg=C0103\n\n##***** BEGIN LICENSE BLOCK *****\n##Version: MPL 1.1\n##\n##The contents of this file are subject to the Mozilla Public License Version\n##1.1 (the \"License\"); you may not use this file except in compliance with\n##the License. You may obtain a copy of the License at\n##http:##www.mozilla.org/MPL/\n##\n##Software distributed under the License is distributed on an \"AS IS\" basis,\n##WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License\n##for the specific language governing rights and limitations under the\n##License.\n##\n##The Original Code is the AllegroGraph Java Client interface.\n##\n##The Original Code was written by Franz Inc.\n##Copyright (C) 2006 Franz Inc. All Rights Reserved.\n##\n##***** END LICENSE BLOCK *****\n\nfrom __future__ import absolute_import\n\nfrom franz import miniclient\nfrom ..exceptions import InitializationException, IllegalArgumentException,\\\n ServerException\nfrom ..model import URI, ValueFactory\nfrom .repositoryconnection import RepositoryConnection\nimport threading\nimport urllib\n\n\n# * A Sesame repository that contains RDF data that can be queried and updated.\n# * Access to the repository can be acquired by opening a connection to it.\n# * This connection can then be used to query and/or update the contents of the\n# * repository. Depending on the implementation of the repository, it may or may\n# * not support multiple concurrent connections.\n# *

\n# * Please note that a repository needs to be initialized before it can be used\n# * and that it should be shut down before it is discarded/garbage collected.\n# * Forgetting the latter can result in loss of data (depending on the Repository\n# * implementation)!\nclass Repository:\n RENEW = 'RENEW'\n ACCESS = 'ACCESS'\n OPEN = 'OPEN'\n CREATE = 'CREATE'\n REPLACE = 'REPLACE'\n\n def __init__(self, catalog, database_name, access_verb, multi_threaded_mode=False):\n self.catalog = catalog\n self.mini_catalog = catalog.mini_catalog\n self.mini_repository = None\n self.database_name = database_name\n self.access_verb = access_verb.upper()\n self.multi_threaded_mode=multi_threaded_mode\n ## system state fields:\n self.connection = None\n self.value_factory = None\n self.is_initialized = False\n self.federated_triple_stores = None\n self.mapped_predicates = {}\n self.mapped_datatypes = {}\n ## make sure that ValueFactory is initialized:\n #self.getConnection()\n #self.getValueFactory()\n \n def getDatabaseName(self):\n \"\"\"\n Return the name of the database (remote triple store) that this repository is\n interfacing with.\n \"\"\" \n return self.database_name\n \n def _create_triple_store(self, quotedDbName):\n miniCat = self.mini_catalog\n if self.federated_triple_stores:\n miniCat.federateTripleStores(quotedDbName, [urllib.quote_plus(ts) for ts in self.federated_triple_stores])\n else:\n miniCat.createTripleStore(quotedDbName)\n \n def _attach_to_mini_repository(self):\n \"\"\"\n Create a mini-repository and execute a RENEW, OPEN, CREATE, or ACCESS.\n \n TODO: FIGURE OUT WHAT 'REPLACE' DOES\n \"\"\"\n def remove_double_quotes(name):\n if name.startswith('\"'):\n name = name[1:-1]\n return name\n #clearIt = False\n quotedDbName = urllib.quote_plus(self.database_name)\n miniCat = self.mini_catalog\n repositoryNames = [remove_double_quotes(name) for name in miniCat.listTripleStores()]\n if self.access_verb == Repository.RENEW:\n if quotedDbName in repositoryNames:\n ## not nice, since someone else probably has it open:\n miniCat.deleteTripleStore(quotedDbName)\n self._create_triple_store(quotedDbName) \n elif self.access_verb == Repository.CREATE:\n if quotedDbName in repositoryNames:\n raise ServerException(\n \"Can't create triple store named '%s' because a store with that name already exists.\",\n quotedDbName)\n self._create_triple_store(quotedDbName)\n elif self.access_verb == Repository.OPEN:\n if not quotedDbName in repositoryNames:\n raise ServerException(\n \"Can't open a triple store named '{0}' because there is none.\".format(quotedDbName))\n elif self.access_verb == Repository.ACCESS:\n if not quotedDbName in repositoryNames:\n self._create_triple_store(quotedDbName) \n self.mini_repository = miniCat.getRepository(quotedDbName)\n# ## we are done unless a RENEW requires us to clear the store\n# if clearIt:\n# self.mini_repository.deleteMatchingStatements(None, None, None, None)\n\n def _get_mini_repository(self):\n return self.mini_repository\n \n def initialize(self):\n \"\"\"\n Initializes this repository. A repository needs to be initialized before\n it can be used. Return 'self' (so that we can chain this call if we like).\n \"\"\"\n if self.is_initialized:\n raise InitializationException(\"A repository cannot be initialized twice.\")\n self._attach_to_mini_repository()\n ## EXPERIMENTATION WITH INITIALIZING AN ENVIRONMENT. DIDN'T LOOK RIGHT - RMM\n# self.environment = self.mini_repository.createEnvironment()\n# print \"ENV\", self.environment\n# self.mini_repository.deleteEnvironment(self.environment)\n# print \"ENV AfTER\", self.mini_repository.listEnvironments()\n ## tricky: 'Literal' can be called before ValueFactory is initialized, causing\n ## it to break. Here we make sure its up:\n self.getValueFactory()\n self.is_initialized = True\n return self \n \n def addFederatedTripleStores(self, tripleStoreNames):\n \"\"\"\n Make this repository a federated store that includes the stores named in\n 'tripleStoreNames'. This call must precede the call to 'initialize'. It\n may be called multiple times. \n \"\"\"\n if self.is_initialized:\n raise InitializationException(\"Federated triples stores must be added prior to calling 'initialize'.\")\n if not self.access_verb in [Repository.CREATE, Repository.RENEW]:\n raise InitializationException(\"Adding federated triple stores requires a CREATE or RENEW access option.\\n\" +\n \"The current access is set to '%s'.\" % self.access_verb)\n if not self.federated_triple_stores:\n self.federated_triple_stores = set([])\n for ts in tripleStoreNames:\n self.federated_triple_stores.add(ts)\n return self\n \n def indexTriples(self, all=False, asynchronous=False):\n \"\"\"\n Index the newly-added triples in the store. This should be done after every \n significant-sized load of triples into the store.\n If 'all', re-index all triples in the store. If 'asynchronous', spawn\n the indexing task as a separate thread, and don't wait for it to complete.\n Note. Upon version 4.0, calling this will no longer be necessary. \n \"\"\"\n self._get_mini_repository().indexStatements(all=all)\n\n def updateFreeTextIndexing(self):\n return self._get_mini_repository().updateFreeTextIndexing()\n\n def listFreeTextPredicates(self):\n return self._get_mini_repository().listFreeTextPredicates()\n\n def registerFreeTextPredicate(self, uri=None, namespace=None, localname=None):\n \"\"\"\n Register a predicate 'uri' (or 'namespace'+'localname'), telling the RDF store to index\n text keywords belonging to strings in object position in the corresponding\n triples/statements. This is needed to make the fti:match operator\n work properly.\n \"\"\"\n uri = str(uri) or (namespace + localname)\n if not uri.startswith('<'):\n uri = '<' + uri + '>'\n self._get_mini_repository().registerFreeTextPredicate(uri)\n \n def _translate_inlined_type(self, type):\n if type == \"int\": return \"int\"\n elif type == \"datetime\": return \"date-time\"\n elif type == \"float\": return \"double-float\"\n else:\n raise IllegalArgumentException(\"Unknown inlined type '%s'\\n. Legal types are \" +\n \"'int', 'float', and 'datetime'\")\n \n def registerDatatypeMapping(self, predicate=None, datatype=None, nativeType=None):\n \"\"\"\n Register an inlined datatype. If 'predicate', then object arguments to triples\n with that predicate will use an inlined encoding of type 'nativeType' in their \n internal representation.\n If 'datatype', then typed literal objects with a datatype matching 'datatype' will\n use an inlined encoding of type 'nativeType'.\n \"\"\"\n predicate = predicate.getURI() if isinstance(predicate, URI) else predicate\n datatype = datatype.getURI() if isinstance(datatype, URI) else datatype\n if predicate:\n if not nativeType:\n raise IllegalArgumentException(\"Missing 'nativeType' parameter in call to 'registerDatatypeMapping'\")\n lispType = self._translate_inlined_type(nativeType)\n #mapping = [predicate, lispType, \"predicate\"]\n self.mapped_predicates[predicate] = lispType\n elif datatype:\n lispType = self._translate_inlined_type(nativeType or datatype)\n #mapping = [datatype, lispType, \"datatype\"]\n self.mapped_datatypes[datatype] = lispType\n if predicate:\n self._get_mini_repository().addMappedPredicate(\"<%s>\" % predicate, lispType) \n else:\n self._get_mini_repository().addMappedType(\"<%s>\" % datatype, lispType)\n \n def shutDown(self):\n \"\"\"\n Shuts the store down, releasing any resources that it keeps hold of.\n Once shut down, the store can no longer be used.\n \n TODO: WE COULD PRESUMABLY ADD SOME LOGIC TO MAKE A RESTART POSSIBLE, ALTHOUGH\n THE ACCESS OPTION MIGHT NOT MAKE SENSE THE SECOND TIME AROUND (KILLING THAT IDEA!)\n \"\"\"\n self.mini_catalog = None\n self.mini_repository = None\n\n def isWritable(self):\n \"\"\"\n Checks whether this store is writable, i.e. if the data contained in\n this store can be changed. The writability of the store is\n determined by the writability of the Sail that this store operates\n on.\n \"\"\"\n return self._get_mini_repository().is_writable()\n\n def getConnection(self):\n \"\"\"\n Opens a connection to this store that can be used for querying and\n updating the contents of the store. Created connections need to be\n closed to make sure that any resources they keep hold of are released. The\n best way to do this is to use a try-finally-block \n \"\"\"\n if not self.connection:\n self.connection = RepositoryConnection(self)\n return self.connection\n\n def getValueFactory(self):\n \"\"\"\n Return a ValueFactory for this store\n \"\"\"\n if not self.value_factory:\n self.value_factory = ValueFactory(self)\n return self.value_factory\n\n \n \n \n","sub_path":"SemanticWebApproach/RoboWriter/allegrordf-1.0.1/franz/openrdf/repository/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":11493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"651205624","text":"from itertools import groupby\nfrom typing import Any, Tuple\n\nfrom framework.tasks import BaseTask\n\nfrom .task_operator import TaskOperator\n\n\nclass MultipleTaskOperator(TaskOperator):\n '''\n group_idが付与されたinputsを、group_idごとにグルーピングしてタスクを実行する\n '''\n\n # Overrided\n def execute_task(self, task: BaseTask, inputs: Tuple[Any, ...]) -> Tuple[Any, ...]:\n outputs_list = []\n groups = [list(value) for key, value in groupby(inputs, lambda x: x['group_id'])]\n # e.g.\n # groups:\n # [{'group_id': 1}, {'group_id': 1}]\n for group in groups:\n outputs = task.execute(group)\n # group_idの付与\n group_id = group[0]['group_id']\n for x in outputs:\n x['group_id'] = group_id\n outputs_list.append(outputs)\n # e.g.\n # outputs_list:\n # [[{'group_id': 1}, {'group_id': 1}], [{'group_id': 2}]]\n # return_value:\n # [{'group_id': 1}, {'group_id': 1}, {'group_id': 2}]\n return tuple(flatten for inner in outputs_list if inner for flatten in inner)\n","sub_path":"modules/framework/airflow_extentions/multiple_task_operator.py","file_name":"multiple_task_operator.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"369626278","text":"from flask import g, render_template, request\n\nfrom app import flask_app, SENTIMENT_ANALYZE\n\n\n@flask_app.route('/')\n@flask_app.route('/index')\ndef index():\n # sent = 'I love this movies.'\n # label,score = SENTIMENT_ANALYZE.classify(sent)\n # print(label,' => ',score)\n # data = {'class_label':label, 'score':score}\n return render_template('index.html')\n\n\n@flask_app.route('/results', methods=['POST', 'GET'])\ndef results():\n if request.method == 'POST':\n review = request.form['review']\n label, score = SENTIMENT_ANALYZE.classify(review)\n # print(label,' => ',score)\n data = {'prediction':label, 'probability':round(score*100, 2), 'content':review}\n return render_template('result.html', data=data)\n else:\n return render_template('index.html')\n\n\n@flask_app.route('/save-review', methods=['POST', 'GET'])\ndef save_review():\n if request.method == 'POST':\n review = request.get_json()\n SENTIMENT_ANALYZE.train(review['text'], int(review['prediction']))\n SENTIMENT_ANALYZE.save_sentiment_to_db(review['text'], int(review['prediction']))\n data = {'prediction': '', 'probability': round(0 * 100, 2), 'content': ''}\n return render_template('result.html', data=data)\n else:\n return render_template('error.html')\n\n\n@flask_app.teardown_appcontext\ndef close_connection(exception):\n db = getattr(g, '_database', None)\n if db is not None:\n db.close()\n\n@flask_app.errorhandler(404)\ndef page_not_found(e):\n return render_template(\"404.html\")","sub_path":"app/view/Routes.py","file_name":"Routes.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"57178672","text":"'''\nCreated on 9 de may. de 2016\n\n@author: Daniela Sanchez\n'''\nimport re\n\nimport json\nimport pandas as pd\nimport numpy as np\nfrom ete3 import Tree\nimport statsmodels.formula.api as sm\n\n\nclass ParseJplace():\n \n def __init__(self, jfileComp, jfileMinus):\n self.jfileComp = jfileComp\n self.jfileMinus = jfileMinus\n self.perfil_comp = None \n self.perfil_minus = None\n self.dir = None\n self.df= None\n \n \n def parseTree(self,tree, minus_sequence, edge_num):\n dict1 = {}\n win_id = []\n for i in edge_num:\n #add a name to nodes\n patt = re.compile(r'\\)\\:\\d\\.[0-9e\\-]+\\{%s\\}' %i)\n obj_search = patt.search(tree)\n if obj_search is not None:\n pat_sub = re.compile(r'(\\:\\d\\.[0-9e\\-]+\\{%s\\})' %i)\n tree = re.sub(pat_sub, r'%s\\1' %i, tree) \n #extract the id of each branch or nodes\n patt1 = re.compile(r'(\\w+)\\:\\d\\.[0-9e\\-]+\\{%s\\}' %i)\n obj1 = patt1.search(tree)\n dict1[i] = str(obj1.group(1))\n #Remove \"{number}\" from the tree \n pattern=re.compile(r'\\{\\d+\\}')\n new_tree = str(pattern.sub('',tree))\n nw = Tree(new_tree, format=1)\n #extract branch names for each node\n for node in nw.traverse(\"postorder\"):\n if not node.is_leaf() and node.up: # For all internal nodes\n if node.name != '':\n dict1[int(node.name)] = node.get_leaf_names()\n \n for y, x in dict1.items():\n if y in edge_num:\n if type(x) is list:\n for k in x:\n win_id.append(k)\n else:\n win_id.append(x)\n \n dist_fin =[]\n self.df = pd.read_csv('distance.csv', index_col = 0) \n for a in win_id:\n p_dist = self.df.loc[minus_sequence,a]\n dist_fin.append(p_dist) \n return dist_fin\n \n def parseMinus(self):\n dict1 = {}\n pattern = re.compile(r'_')\n pattern1 = re.compile(r'\\w+_\\w+_minus(.*)\\.jplace')\n keys = ['n_place', 'phy_dist', 'maxdist'] #, 'pendant_len']\n self.dir = 'pplacer/'\n for x in self.jfileMinus:\n f_exp = pattern1.search(x)\n minus_win = f_exp.group(1)\n jason = '%s%s' %(self.dir, x)\n with open(jason) as data_file:\n data = json.load(data_file)\n \n for i in data['placements']: \n for a in i['nm']:\n id_win = a[0]\n id_seq = pattern.split(id_win)[0]\n \n if id_seq == minus_win:\n n_place = len(i['p'])\n values = [n_place]\n if n_place == 1:\n values.append(0)\n for y in i['p']:\n #values.append(y[0])\n #pendant\n #values.append(y[5])\n #Append max distance\n edgeVal= [y[1]]\n distance = self.parseTree(data['tree'], minus_win, edgeVal)\n distMax = max(distance)\n values.append(distMax) \n else:\n #APPEND DIST\n edge_val = [y[1] for y in i['p']]\n dist = self.parseTree(data['tree'], minus_win, edge_val)\n meanDist = np.mean(dist)\n values.append(meanDist)\n #print edge_val\n #d_len = np.mean([y[0] for y in i['p']])\n #p_len = np.mean([y[5] for y in i['p']]) \n #values.append(d_len)\n #values.append(p_len)\n #add max distance\n maxDist = max(dist)\n values.append(maxDist)\n #print values \n dict1[id_win]= dict(zip(keys, values)) \n \n self.perfil_minus = pd.DataFrame.from_dict(dict1, orient = 'index')\n self.perfil_minus.to_csv('%sperfil_minus.csv' %self.dir)\n\n \n def parseSimple(self):\n self.parseMinus()\n dict1 = {}\n pattern = re.compile(r'_')\n keys = ['n_place', 'phy_dist', 'maxdist'] #, 'pendant_len']\n for x in self.jfileComp:\n jason = '%s%s' %(self.dir, x)\n with open(jason) as data_file:\n data = json.load(data_file)\n \n for i in data['placements']:\n n_place = len(i['p'])\n for a in i['nm']:\n values = [n_place]\n id_win = a[0]\n id_tree = pattern.split(id_win)[0]\n #print id_tree\n \n if n_place > 1:\n edge_val = [y[1] for y in i['p']]\n dist = self.parseTree(data['tree'], id_tree, edge_val)\n meanDist = np.mean(dist)\n values.append(meanDist)\n #print edge_val\n #d_len = np.mean([y[0] for y in i['p']])\n #p_len = np.mean([y[5] for y in i['p']]) \n #values.append(d_len)\n #values.append(p_len) \n \n \n else:\n values.append(0)\n #for y in i['p']:\n #values.append(y[0])\n #values.append(y[5])\n ##Add max distance\n ##values.append(0)\n \n #Add max distance\n distance= list(self.df.loc[id_tree,])\n maxDist = max(distance)\n values.append(maxDist)\n \n #print values \n dict1[id_win]= dict(zip(keys, values))\n \n self.perfil_comp = pd.DataFrame.from_dict(dict1, orient = 'index')\n self.perfil_comp.to_csv('%sperfil_completo.csv' %self.dir)\n\n \n def correlation(self):\n self.parseSimple()\n dict_cor = {}\n for i in self.perfil_comp.index.tolist():\n comp = list(self.perfil_comp.loc[i,])\n minus = list(self.perfil_minus.loc[i,])\n df = pd.DataFrame({\"A\":minus, \"B\":comp})\n result = sm.ols(formula=\"A ~ B\", data=df).fit()\n inter = result.params.Intercept\n dict_cor[i] = inter\n corr_coef = pd.DataFrame.from_dict(dict_cor, orient = 'index')\n corr_coef.columns=['intercept']\n corr_coef.to_csv('%sintercepts.csv' %self.dir) \n return corr_coef\n ","sub_path":"jplace.py","file_name":"jplace.py","file_ext":"py","file_size_in_byte":7129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"18272711","text":"import select\nimport socket\nimport queue\nserver = socket.socket()\nserver.bind(('localhost',9999))\nserver.listen(5)\nserver.setblocking(False) #设置为非阻塞\n\ninputs=[server] #交给select管理\nmsg_queuses={}\noutputs=[]\n\nwhile True:\n r_list,w_list,exception_list=select.select(inputs,outputs,inputs) #select来管理\n #内核返回给select是一个完整的列表 假如你有500个连接,就给你返回500个,但是select帮我们处理了 把这500个里面有几个就绪的在过滤出来\n #返回给我的用户程序就是 r_list,w_list,exception_list\n #所以此时用户程序再去循环这个r_list的时候,看到的就已经是就绪的了!!!\n #r_list只要有返回,就肯定是有数据的! select已经帮你过滤好了\n\n for s in r_list:\n if s in server:\n conn,addr = s.accept()\n print('客户端的信息',conn,addr)\n\n inputs.append(conn) #把用户发过来的数据加入到select处理中 因为上面已经写了个select方法直接添加即可\n\n msg_queuses[conn] = queue.Queue() #以conn为key 生成一个队列\n else:\n try:\n data = s.recv(1024)\n print(\"recv data from [%s]:[%s]\" %(s.getpeername(),data.decode()))\n msg_queuses[s].put(data)\n if s not in outputs:\n outputs.remove(s)\n except ConnectionResetError as e:\n print(\"conn closed \",s.getpeername(),e)\n\n inputs.remove(s)\n if s in outputs:\n outputs.remove(s)\n del msg_queuses[s]\n\n for s in w_list:\n try:\n data = msg_queuses[s].get_nowait()\n s.send(data.upper())\n except queue.Empty as e:\n outputs.remove(s)","sub_path":"untitled1/dream/day-11/socket_sever (select).py","file_name":"socket_sever (select).py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"388196318","text":"import matplotlib.pyplot as plt\n\n\nclass ScatterPlot:\n\n def __init__(self,dataExtractor,buffered=False):\n self.buffered=buffered\n self.fig,self.subp=plt.subplots()\n self.plots=dict()\n self.minX=float('Inf')\n self.maxX=-float('Inf')\n self.minY=float('Inf')\n self.maxY=-float('Inf')\n plt.grid(True)\n self.lastSample=None\n self.startTime=None\n self.dataY=dict()\n self.dataX=dict()\n self.extractor=dataExtractor\n self.setYLabel(self.extractor.getYLabel())\n self.setXLabel(self.extractor.getXLabel())\n plt.ion()\n plt.show()\n\n def setYLabel(self,label):\n plt.ylabel(label)\n\n def setXLabel(self,label):\n plt.xlabel(label)\n\n def setTitle(self,title):\n plt.suptitle(title)\n\n def adjustLimits(self):\n for key in self.dataY.keys():\n if self.dataY[key][-1]>self.maxY:\n self.maxY=self.dataY[key][-1]\n if self.dataY[key][-1]self.maxX:\n self.maxX=self.dataX[key][-1]\n if self.dataX[key][-1]H\", fobj.read(2)); length2 += 2; length2a += 2\n fobj.read(2); length2 += 2; length2a += 2; length2b += 2\n ans[\"NumberOfPrimaryVideoStreamEntries\"], = struct.unpack(\">B\", fobj.read(1)); length2 += 1; length2a += 1; length2b += 1\n ans[\"NumberOfPrimaryAudioStreamEntries\"], = struct.unpack(\">B\", fobj.read(1)); length2 += 1; length2a += 1; length2b += 1\n ans[\"NumberOfPrimaryPGStreamEntries\"], = struct.unpack(\">B\", fobj.read(1)); length2 += 1; length2a += 1; length2b += 1\n ans[\"NumberOfPrimaryIGStreamEntries\"], = struct.unpack(\">B\", fobj.read(1)); length2 += 1; length2a += 1; length2b += 1\n ans[\"NumberOfSecondaryAudioStreamEntries\"], = struct.unpack(\">B\", fobj.read(1)); length2 += 1; length2a += 1; length2b += 1\n ans[\"NumberOfSecondaryVideoStreamEntries\"], = struct.unpack(\">B\", fobj.read(1)); length2 += 1; length2a += 1; length2b += 1\n ans[\"NumberOfSecondaryPGStreamEntries\"], = struct.unpack(\">B\", fobj.read(1)); length2 += 1; length2a += 1; length2b += 1\n fobj.read(5); length2 += 5; length2a += 5; length2b += 5\n\n # Loop over stream list names ...\n for name in [\"PrimaryVideoStreamEntries\", \"PrimaryAudioStreamEntries\", \"PrimaryPGStreamEntries\", \"SecondaryPGStreamEntries\", \"PrimaryIGStreamEntries\", \"SecondaryAudioStreamEntries\", \"SecondaryVideoStreamEntries\"]:\n # Loop over entries and add to list ...\n ans[name] = []\n for i in range(ans[\"NumberOf{0:s}\".format(name)]):\n tmp = {}\n res, length2, length2a, length2b, length2c = load_StreamEntry(fobj, length2, length2a, length2b)\n tmp[\"StreamEntry\"] = res\n res, length2, length2a, length2b, length2c = load_StreamAttributes(fobj, length2, length2a, length2b)\n tmp[\"StreamAttributes\"] = res\n ans[name].append(tmp)\n\n # Pad out the read ...\n if length2b != ans[\"Length\"]:\n l = ans[\"Length\"] - length2b # [B]\n fobj.read(l); length2 += l; length2a += l; length2b += l\n\n # Return answer ...\n return ans, length2, length2a, length2b\n","sub_path":"MPLS/load_STNTable.py","file_name":"load_STNTable.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"162981516","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nipl_df = pd.read_csv('data/ipl_dataset.csv', index_col=None)\n\n\n# Solution\ndef plot_matches_by_team():\n ipl_df['count'] = 1\n data = ipl_df[['batting_team','match_code','count']]\n matches = data.groupby(['batting_team','match_code']).agg(np.unique)\n teams = matches['count'].groupby(['batting_team']).sum()\n bar_plot = teams.plot(kind = 'bar')\n plt.show()\n","sub_path":"q02_plot_matches_by_team/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"275853880","text":"from django.shortcuts import render\nfrom django.contrib.auth.models import User\n\n# Create your views here.\n\n\ndef index(request):\n print(request.user)\n if request.user.is_authenticated:\n social = request.user.social_auth.get(provider='fitbit')\n token = social.extra_data['access_token']\n print(token)\n return render(request, 'temp/index.html')\n\n# https://www.fitbit.com/oauth2/authorize?client_id=22D3H4&redirect_uri=http://localhost:8000/complete/fitbit/&state=dqOuEqrXhT7nxrTyxK0fpGvgOklUQDo2&response_type=code&scope=activity+heartrate+profile+sleep+weight+profile\n","sub_path":"django-social-auth-trial/tutorial/temp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"27926862","text":"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef MeanData(arr):\r\n size=len(arr)\r\n sum=0\r\n \r\n for i in range(size):\r\n sum=sum+arr[i]\r\n \r\n return(sum/size)\r\n\r\ndef MLHeadBrain(Name):\r\n dataset=pd.read_csv(Name)\r\n print(\"Size of our dataset is:\",dataset.shape)\r\n \r\n X=dataset[\"Head Size(cm^3)\"].values\r\n Y=dataset[\"Brain Weight(grams)\"].values\r\n \r\n print(\"Lenght of X:\",len(X))\r\n print(\"Lenght of Y:\",len(Y))\r\n \r\n Mean_X=0\r\n Mean_Y=0\r\n Mean_X=MeanData(X)\r\n Mean_Y=MeanData(Y)\r\n print(type(Mean_X))\r\n\r\n print(\"Mean of independent variable is\", Mean_X)\r\n print(\"Mean of dependent variable is\", Mean_Y)\r\n #m=(Sum(X-Xb)*(Y-Yb))/Sum(X-Xb)^2\r\n numerator=0\r\n denomenator=0\r\n \r\n for i in range(len(X)):\r\n numerator=numerator+int((X[i] - Mean_X)*(Y[i] - Mean_Y))\r\n denomenator=denomenator+(X[i]-Mean_X)**2\r\n \r\n m=numerator/denomenator\r\n print(\"Value of m is\",m)\r\n \r\n c=Mean_Y-(m*Mean_X)\r\n print(\"Value of c is: \",c)\r\n \r\n X_Start=np.min(X)-200\r\n X_End=np.max(X)+200\r\n \r\n x=np.linspace(X_Start,X_End)\r\n y=m*x+c\r\n \r\n plt.plot(x,y,color='r',label=\"Line of Regression\")\r\n plt.scatter(X,Y,color='b', label=\"Data Plot\")\r\n \r\n plt.xlabel(\"Head size\")\r\n plt.ylabel(\"Brain Weight\")\r\n \r\n plt.legend()\r\n plt.show()\r\n\r\ndef main():\r\n \r\n #print(\"Enter the name of the dataset\")\r\n #name=input()\r\n MLHeadBrain(\"HeadBrain.csv\")\r\n\r\nif __name__==\"__main__\":\r\n main()\r\n","sub_path":"Head Brain Case Study/LinearReg_OwnFunc.py","file_name":"LinearReg_OwnFunc.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"209392593","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\n# #########################################################################\n# Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved. #\n# #\n# Copyright 2015. UChicago Argonne, LLC. This software was produced #\n# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #\n# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #\n# U.S. Department of Energy. The U.S. Government has rights to use, #\n# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #\n# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #\n# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #\n# modified to produce derivative works, such modified software should #\n# be clearly marked, so as not to confuse it with the version available #\n# from ANL. #\n# #\n# Additionally, redistribution and use in source and binary forms, with #\n# or without modification, are permitted provided that the following #\n# conditions are met: #\n# #\n# * Redistributions of source code must retain the above copyright #\n# notice, this list of conditions and the following disclaimer. #\n# #\n# * Redistributions in binary form must reproduce the above copyright #\n# notice, this list of conditions and the following disclaimer in #\n# the documentation and/or other materials provided with the #\n# distribution. #\n# #\n# * Neither the name of UChicago Argonne, LLC, Argonne National #\n# Laboratory, ANL, the U.S. Government, nor the names of its #\n# contributors may be used to endorse or promote products derived #\n# from this software without specific prior written permission. #\n# #\n# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #\n# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #\n# POSSIBILITY OF SUCH DAMAGE. #\n# #########################################################################\n\n'''\nAuthor: Walan Grizolli\n\n\n'''\n\n# %%% imports cell\n\nimport sys\n\nif len(sys.argv) != 1:\n import matplotlib\n matplotlib.use('Agg')\n\nimport numpy as np\nimport matplotlib\nimport os\n\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom wavepy.utils import easyqt\nimport wavepy.utils as wpu\n\nfrom scipy.ndimage.filters import uniform_filter1d\nfrom matplotlib.patches import Rectangle\n\nwpu._mpl_settings_4_nice_graphs(otheroptions={'axes.formatter.use_mathtext':True,\n 'axes.formatter.limits': '-3, 4'})\n\n\n# %%\ndef _n_profiles_H_V(arrayH, arrayV, virtual_pixelsize,\n zlabel=r'z',\n titleH='Horiz', titleV='Vert',\n nprofiles=5, filter_width=0,\n remove2ndOrder=False,\n saveFileSuf='',\n saveFigFlag=True):\n\n xxGrid, yyGrid = wpu.grid_coord(arrayH, virtual_pixelsize)\n\n fit_coefs = [[], []]\n data2saveH = None\n data2saveV = None\n labels_H = None\n labels_V = None\n\n plt.rcParams['lines.markersize'] = 4\n plt.rcParams['lines.linewidth'] = 2\n\n # Horizontal\n if np.all(np.isfinite(arrayH)):\n\n plt.figure(figsize=(12, 12*9/16))\n\n xvec = xxGrid[0, :]\n data2saveH = np.c_[xvec]\n header = ['x [m]']\n\n ls_cycle, lc_jet = wpu.line_style_cycle(['-'], ['o', 's', 'd', '^'],\n ncurves=nprofiles,\n cmap_str='gist_rainbow_r')\n\n lc = []\n labels_H = []\n for i, row in enumerate(np.linspace(filter_width//2,\n np.shape(arrayV)[0]-filter_width//2-1,\n nprofiles + 2, dtype=int)):\n\n if i == 0 or i == nprofiles + 1:\n continue\n\n\n if filter_width != 0:\n yvec = arrayH[row-filter_width:row+filter_width, :]\n yvec = np.sum(yvec, 0)/filter_width/2\n else:\n yvec = arrayH[row, :]\n\n lc.append(next(lc_jet))\n p01 = np.polyfit(xvec, yvec, 1)\n fit_coefs[0].append(p01)\n\n if remove2ndOrder:\n yvec -= p01[0]*xvec + p01[1]\n\n plt.plot(xvec*1e6, yvec, next(ls_cycle), color=lc[i-1],\n label=str(row))\n\n if not remove2ndOrder:\n plt.plot(xvec*1e6, p01[0]*xvec + p01[1], '--',\n color=lc[i-1], lw=3)\n\n data2saveH = np.c_[data2saveH, yvec]\n header.append(str(row))\n labels_H.append(str(row))\n\n if remove2ndOrder:\n titleH = titleH + ', 2nd order removed'\n plt.legend(title='Pixel Y', loc=0, fontsize=12)\n\n plt.xlabel(r'x [$\\mu m$]', fontsize=18)\n plt.ylabel(zlabel, fontsize=18)\n plt.title(titleH + ', Filter Width = {:d} pixels'.format(filter_width),\n fontsize=20)\n\n if saveFigFlag:\n wpu.save_figs_with_idx(saveFileSuf + '_H')\n\n plt.show(block=False)\n\n header.append(zlabel + ', Filter Width = {:d} pixels'.format(filter_width))\n\n wpu.save_csv_file(data2saveH,\n wpu.get_unique_filename(saveFileSuf +\n '_WF_profiles_H', 'csv'),\n headerList=header)\n\n plt.figure(figsize=(12, 12*9/16))\n plt.imshow(arrayH, cmap='RdGy',\n vmin=wpu.mean_plus_n_sigma(arrayH, -3),\n vmax=wpu.mean_plus_n_sigma(arrayH, 3))\n plt.xlabel('Pixel')\n plt.ylabel('Pixel')\n plt.title(titleH + ', Profiles Position')\n\n currentAxis = plt.gca()\n\n _, lc_jet = wpu.line_style_cycle(['-'], ['o', 's', 'd', '^'],\n ncurves=nprofiles,\n cmap_str='gist_rainbow_r')\n\n for i, row in enumerate(np.linspace(filter_width//2,\n np.shape(arrayV)[0]-filter_width//2-1,\n nprofiles + 2, dtype=int)):\n\n if i == 0 or i == nprofiles + 1:\n continue\n\n currentAxis.add_patch(Rectangle((-.5, row - filter_width//2 - .5),\n np.shape(arrayH)[1], filter_width,\n facecolor=lc[i-1], alpha=.5))\n plt.axhline(row, color=lc[i-1])\n\n if saveFigFlag:\n wpu.save_figs_with_idx(saveFileSuf + '_H')\n\n plt.show(block=True)\n\n # Vertical\n if np.all(np.isfinite(arrayV)):\n\n plt.figure(figsize=(12, 12*9/16))\n\n xvec = yyGrid[:, 0]\n data2saveV = np.c_[xvec]\n header = ['y [m]']\n\n\n ls_cycle, lc_jet = wpu.line_style_cycle(['-'], ['o', 's', 'd', '^'],\n ncurves=nprofiles,\n cmap_str='gist_rainbow_r')\n\n lc = []\n labels_V = []\n for i, col in enumerate(np.linspace(filter_width//2,\n np.shape(arrayH)[1]-filter_width//2-1,\n nprofiles + 2, dtype=int)):\n\n if i == 0 or i == nprofiles + 1:\n continue\n\n if filter_width != 0:\n yvec = arrayV[:, col-filter_width:col+filter_width]\n yvec = np.sum(yvec, 1)/filter_width/2\n else:\n yvec = arrayV[:, col]\n\n lc.append(next(lc_jet))\n p10 = np.polyfit(xvec, yvec, 1)\n fit_coefs[1].append(p10)\n\n if remove2ndOrder:\n yvec -= p10[0]*xvec + p10[1]\n\n plt.plot(xvec*1e6, yvec, next(ls_cycle), color=lc[i-1],\n label=str(col))\n\n if not remove2ndOrder:\n plt.plot(xvec*1e6, p10[0]*xvec + p10[1], '--',\n color=lc[i-1], lw=3)\n\n data2saveV = np.c_[data2saveV, yvec]\n header.append(str(col))\n labels_V.append(str(col))\n\n if remove2ndOrder:\n titleV = titleV + ', 2nd order removed'\n\n plt.legend(title='Pixel X', loc=0, fontsize=12)\n\n plt.xlabel(r'y [$\\mu m$]', fontsize=18)\n plt.ylabel(zlabel, fontsize=18)\n\n plt.title(titleV + ', Filter Width = {:d} pixels'.format(filter_width),\n fontsize=20)\n if saveFigFlag:\n wpu.save_figs_with_idx(saveFileSuf + '_Y')\n plt.show(block=False)\n\n header.append(zlabel + ', Filter Width = {:d} pixels'.format(filter_width))\n\n wpu.save_csv_file(data2saveV,\n wpu.get_unique_filename(saveFileSuf +\n '_WF_profiles_V', 'csv'),\n headerList=header)\n\n plt.figure(figsize=(12, 12*9/16))\n plt.imshow(arrayV, cmap='RdGy',\n vmin=wpu.mean_plus_n_sigma(arrayV, -3),\n vmax=wpu.mean_plus_n_sigma(arrayV, 3))\n plt.xlabel('Pixel')\n plt.ylabel('Pixel')\n plt.title(titleV + ', Profiles Position')\n\n currentAxis = plt.gca()\n\n for i, col in enumerate(np.linspace(filter_width//2,\n np.shape(arrayH)[1]-filter_width//2-1,\n nprofiles + 2, dtype=int)):\n\n if i == 0 or i == nprofiles + 1:\n continue\n\n\n currentAxis.add_patch(Rectangle((col - filter_width//2 - .5, -.5),\n filter_width, np.shape(arrayV)[0],\n facecolor=lc[i-1], alpha=.5))\n plt.axvline(col, color=lc[i-1])\n\n if saveFigFlag:\n wpu.save_figs_with_idx(saveFileSuf + '_Y')\n\n plt.show(block=True)\n\n return data2saveH, data2saveV, labels_H, labels_V, fit_coefs\n\n\n# %%\n\ndef integrate_DPC_cumsum(data_DPC, wavelength,\n grazing_angle=0.0, projectionFromDiv=1.0,\n labels=[],\n xlabel='x', ylabel='Height',\n titleStr='', saveFileSuf=''):\n\n ls_cycle, lc_cycle = wpu.line_style_cycle(['-'], ['o', 's', 'd', '^'],\n ncurves=data_DPC.shape[1] - 1,\n cmap_str='gist_rainbow_r')\n\n if grazing_angle//.00001 > 0:\n projection = 1/np.sin(grazing_angle)*projectionFromDiv\n else:\n projection = projectionFromDiv\n\n xvec = data_DPC[:, 0]*projection\n\n plt.figure(figsize=(12, 12*9/16))\n list_integrated = [xvec]\n\n header = [xlabel + ' [m]']\n\n for j_line in range(1, data_DPC.shape[1]):\n\n integrated = (np.cumsum(data_DPC[:, j_line] - np.mean(data_DPC[:, j_line]))\n *(xvec[1] - xvec[0]))\n\n integrated *= -1/2/np.pi*wavelength*np.abs(projection)\n\n # TODO: check here!!\n\n if j_line == 1:\n factor_x, unit_x = wpu.choose_unit(xvec)\n factor_y, unit_y = wpu.choose_unit(integrated)\n\n list_integrated.append(integrated)\n header.append(labels[j_line - 1])\n\n plt.plot(xvec*factor_x,\n integrated*factor_y,\n next(ls_cycle), c=next(lc_cycle),\n label=labels[j_line - 1])\n\n marginx = 0.1*np.ptp(xvec*factor_x)\n plt.xlim([np.min(xvec*factor_x)-marginx,\n np.max(xvec*factor_x)+marginx])\n plt.xlabel(xlabel + r' [$' + unit_x + ' m$]')\n plt.ylabel(ylabel + r' [$' + unit_y + ' m$]')\n plt.legend(loc=0, fontsize=12)\n\n if grazing_angle//.00001 > 0:\n\n plt.title(titleStr + 'Mirror Height,\\n' +\n 'grazing angle {:.2f} mrad,\\n'.format(grazing_angle*1e3) +\n 'projection due divergence = ' +\n r'$ \\times $ {:.2f}'.format(projectionFromDiv))\n else:\n plt.title(titleStr + 'Integration Cumulative Sum')\n\n plt.tight_layout()\n wpu.save_figs_with_idx(saveFileSuf)\n plt.show()\n\n data2saveV = np.asarray(list_integrated).T\n\n header.append(ylabel + ' [m]')\n\n if grazing_angle//.00001 > 0:\n header.append('grazing_angle = {:.4g}'.format(grazing_angle))\n\n if projectionFromDiv//1 != 1:\n header.append('projection due divergence = ' +\n '{:.2f}x'.format(projectionFromDiv))\n\n wpu.save_csv_file(data2saveV,\n wpu.get_unique_filename(saveFileSuf +\n '_integrated_' + xlabel, 'csv'),\n headerList=header)\n\n return np.asarray(list_integrated).T\n\n\n# %%\n\ndef curv_from_height(height, virtual_pixelsize,\n grazing_angle=0.0, projectionFromDiv=1.0,\n labels=[],\n xlabel='x', ylabel='Curvature',\n titleStr='', saveFileSuf=''):\n\n ls_cycle, lc_cycle = wpu.line_style_cycle(['-'], ['o', 's', 'd', '^'],\n ncurves=height.shape[1] - 1,\n cmap_str='gist_rainbow_r')\n\n if grazing_angle//.00001 > 0:\n projection = 1/np.sin(grazing_angle)*projectionFromDiv\n else:\n projection = projectionFromDiv\n\n projected_pixel = virtual_pixelsize*projection\n xvec = wpu.realcoordvec(height.shape[0]-2, projected_pixel)\n\n print('projected_pixel')\n print(projected_pixel)\n\n plt.figure(figsize=(12, 12*9/16))\n list_curv = [xvec]\n\n header = [xlabel + ' [m]']\n\n for j_line in range(1, height.shape[1]):\n\n curv = np.diff(np.diff(height[:, j_line]))/projected_pixel**2\n\n if j_line == 1:\n factor_x, unit_x = wpu.choose_unit(xvec)\n\n #factor_y, unit_y = wpu.choose_unit(curv)\n\n list_curv.append(curv)\n header.append(labels[j_line - 1])\n\n plt.plot(xvec*factor_x, curv,\n next(ls_cycle), c=next(lc_cycle),\n label=labels[j_line - 1])\n\n marginx = 0.1*np.ptp(xvec*factor_x)\n plt.xlim([np.min(xvec*factor_x)-marginx,\n np.max(xvec*factor_x)+marginx])\n plt.xlabel(xlabel + r' [$' + unit_x + ' m$]')\n plt.ylabel(ylabel + r'[$m^{-1}$]')\n plt.legend(loc=0, fontsize=12)\n\n if grazing_angle//.00001 > 0:\n\n plt.title(titleStr + 'Mirror Curvature,\\n' +\n 'grazing angle {:.2f} mrad,\\n'.format(grazing_angle*1e3) +\n 'projection due divergence = ' +\n r'$ \\times $ {:.2f}'.format(projectionFromDiv))\n else:\n plt.title(titleStr + 'Curvature')\n\n plt.tight_layout()\n wpu.save_figs_with_idx(saveFileSuf)\n plt.show()\n\n data2saveV = np.asarray(list_curv).T\n\n header.append(ylabel + ' [1/m]')\n\n if grazing_angle//.00001 > 0:\n header.append(', grazing_angle = {:.4g}'.format(grazing_angle))\n\n if projectionFromDiv//1 != 1:\n header.append('projection due divergence = ' +\n '{:.2f}x'.format(projectionFromDiv))\n\n wpu.save_csv_file(data2saveV,\n wpu.get_unique_filename(saveFileSuf +\n '_curv_' + xlabel, 'csv'),\n headerList=header)\n\n return np.asarray(list_curv).T\n\n# %%\n\n\ndef _intial_gui_setup(sys_argv):\n\n global inifname # name of .ini file\n inifname = os.curdir + '/.' + os.path.basename(__file__).replace('.py', '.ini')\n\n for i, argv in enumerate(sys_argv):\n print('arg {}: '.format(i) + argv)\n\n if len(sys_argv) == 1:\n\n default_ini = wpu.load_ini_file(inifname)\n p0 = float(default_ini['Parameters']['Photon Energy [eV]'])\n p1 = float(default_ini['Parameters']['grazing angle [mrad]'])\n p2 = int(default_ini['Parameters']['n profiles'])\n p3 = int(default_ini['Parameters']['filter width'])\n p4 = float(default_ini['Parameters']['projection From Divergence'])\n\n if easyqt.get_yes_or_no('Load new files?\\n' +\n '[ESC load file(s) of previous run]'):\n\n fnameH = easyqt.get_file_names(title='Select DPC Horizontal\\n' +\n '(and Vertical if you want)')\n fnameV = None\n\n if len(fnameH) == 1:\n fnameH = fnameH[0]\n wpu.print_blue('MESSAGE: Horiz DPC: Loading ' + fnameH)\n elif len(fnameH) == 0:\n fnameH = None\n elif len(fnameH) == 2:\n [fnameH, fnameV] = fnameH\n wpu.print_blue('MESSAGE: Horiz DPC: Loading ' + fnameH)\n wpu.print_blue('MESSAGE: Vert DPC: Loading ' + fnameV)\n\n if fnameV is None:\n fnameV = easyqt.get_file_names(title='Select DPC Vertical')\n\n if len(fnameV) == 1:\n fnameV = fnameV[0]\n wpu.print_blue('MESSAGE: Vert DPC: Loading ' + fnameV)\n\n elif len(fnameV) == 0:\n fnameV = None\n\n else:\n fnameH = default_ini['Files']['dpc H']\n fnameV = default_ini['Files']['dpc V']\n\n wpu.print_blue('MESSAGE: Horiz DPC: Loading ' + fnameH)\n wpu.print_blue('MESSAGE: Vert DPC: Loading ' + fnameV)\n\n if fnameH == 'None':\n fnameH = None\n if fnameV == 'None':\n fnameV = None\n\n phenergy = easyqt.get_float(\"Enter Photon Energy [KeV]\",\n title='Experimental Values',\n default_value=p0*1e-3)*1e3\n\n grazing_angle = easyqt.get_float('Grazing angle [mrad]\\n' +\n '[0.0 to ignore projection]',\n title='Experimental Values',\n default_value=p1)*1e-3\n\n projectionFromDiv = easyqt.get_float('projection From Divergence\\n' +\n '[Multiplication factor]',\n title='Experimental Values',\n default_value=p4)\n\n nprofiles = easyqt.get_int(\"Number of profiles to plot\",\n title='Experimental Values',\n default_value=p2)\n\n filter_width = easyqt.get_int(\"Width fo uniform filter [pixels]\",\n title='Experimental Values',\n default_value=p3)\n\n remove2ndOrder = easyqt.get_yes_or_no(\"Remove 2nd Order?\",\n title='Experimental Values')\n\n elif len(sys_argv) == 8:\n\n if 'none' in sys_argv[1].lower():\n fnameH = None\n else:\n fnameH = sys_argv[1]\n\n if 'none' in sys_argv[2].lower():\n fnameV = None\n else:\n fnameV = sys_argv[2]\n\n phenergy = float(sys_argv[3])*1e3\n nprofiles = int(sys_argv[4])\n filter_width = int(sys_argv[5])\n grazing_angle = float(sys_argv[6])*1e-3\n projectionFromDiv = float(sys_argv[7])\n\n else:\n\n print('ERROR: wrong number of inputs: {} \\n'.format(len(argv)-1) +\n 'Usage: \\n'\n '\\n' +\n os.path.basename(__file__) + ' : (no inputs) load dialogs \\n'\n '\\n' +\n os.path.basename(__file__) + ' [args] \\n'\n '\\n'\n 'arg1: file name DPC Horiz (type \"None\" '\n ' to ignore it)\\n'\n 'arg2: file name DPC Vert (type \"None\" '\n ' to ignore it)\\n'\n 'arg3: Photon Energy [KeV]\\n'\n 'arg4: Number of profiles to plot\\n'\n 'arg5: Width of uniform filter [pixels]\\n'\n 'arg6: Grazing angle to project coordinates to mirror [mrad], use zero to ignore\\n'\n 'arg7: Projection From Divergence, use 1 to ignore'\n '\\n')\n\n exit(-1)\n\n wpu.set_at_ini_file(inifname, 'Files', 'DPC H', fnameH)\n wpu.set_at_ini_file(inifname, 'Files', 'DPC V', fnameV)\n wpu.set_at_ini_file(inifname, 'Parameters', 'Photon Energy [eV]', phenergy)\n wpu.set_at_ini_file(inifname, 'Parameters',\n 'grazing angle [mrad]', grazing_angle*1e3)\n wpu.set_at_ini_file(inifname, 'Parameters',\n 'projection From Divergence', projectionFromDiv)\n wpu.set_at_ini_file(inifname, 'Parameters', 'n profiles', nprofiles)\n wpu.set_at_ini_file(inifname, 'Parameters', 'filter width', filter_width)\n\n wpu.set_at_ini_file(inifname, 'Parameters', 'Remove 2nd Order', remove2ndOrder)\n\n return (fnameH, fnameV,\n phenergy, grazing_angle, projectionFromDiv,\n nprofiles, remove2ndOrder, filter_width)\n\n\n# %% Main functions to be used from the outside\n\ndef dpc_profile_analysis(fnameH, fnameV,\n phenergy,\n grazing_angle=0.0, projectionFromDiv=1.0,\n nprofiles=1,\n remove2ndOrder=False, filter_width=0):\n\n wavelength = wpu.hc/phenergy\n\n if fnameH is not None:\n diffPhaseH, virtual_pixelsize, _ = wpu.load_sdf_file(fnameH)\n\n if fnameV is not None:\n diffPhaseV, virtual_pixelsize, _ = wpu.load_sdf_file(fnameV)\n\n if fnameH is None:\n diffPhaseH = diffPhaseV*np.nan\n\n if fnameV is None:\n diffPhaseV = diffPhaseH*np.nan\n saveFileSuf = fnameH.rsplit('/', 1)[0] + '/profiles/' +\\\n fnameH.rsplit('/', 1)[1]\n saveFileSuf = saveFileSuf.rsplit('_X')[0] + '_profiles'\n else:\n saveFileSuf = fnameV.rsplit('/', 1)[0] + '/profiles/' +\\\n fnameV.rsplit('/', 1)[1]\n saveFileSuf = saveFileSuf.rsplit('_Y')[0] + '_profiles'\n\n if not os.path.exists(saveFileSuf.rsplit('/', 1)[0]):\n os.makedirs(saveFileSuf.rsplit('/', 1)[0])\n\n (dataH, dataV,\n labels_H, labels_V,\n fit_coefs) = _n_profiles_H_V(diffPhaseH,\n diffPhaseV,\n virtual_pixelsize,\n 'DPC [rad/m]',\n titleH='WF DPC Horz',\n titleV='WF DPC Vert',\n saveFileSuf=saveFileSuf,\n nprofiles=nprofiles,\n remove2ndOrder=remove2ndOrder,\n filter_width=filter_width)\n\n fit_coefsH = np.array(fit_coefs[0])\n fit_coefsV = np.array(fit_coefs[1])\n\n print(fit_coefsH)\n print(fit_coefsV)\n\n if __name__ == '__main__':\n wpu.log_this(preffname=saveFileSuf, inifname=inifname)\n\n if fnameH is not None:\n\n radii_fit_H = (2*np.pi/wavelength/fit_coefsH[:][0])\n\n wpu.print_blue('MESSAGE: Radius H from fit profiles: ')\n print(radii_fit_H)\n wpu.log_this('radius fit Hor = ' + str(radii_fit_H))\n\n integratedH = integrate_DPC_cumsum(dataH, wavelength,\n #grazing_angle=grazing_angle,\n xlabel='x',\n labels=labels_H,\n titleStr='Horizontal, ',\n saveFileSuf=saveFileSuf + '_X')\n\n curv_H = curv_from_height(integratedH, virtual_pixelsize[0],\n #grazing_angle=grazing_angle,\n #projectionFromDiv=projectionFromDiv,\n xlabel='x',\n labels=labels_H,\n titleStr='Horizontal, ',\n saveFileSuf=saveFileSuf + '_X')\n\n if fnameV is not None:\n\n radii_fit_V = (2*np.pi/wavelength/fit_coefsV[:][0])\n\n wpu.print_blue('MESSAGE: Radius V from fit profiles: ')\n print(radii_fit_V)\n wpu.log_this('radius fit Vert = ' + str(radii_fit_V))\n\n integratedV = integrate_DPC_cumsum(dataV, wavelength,\n grazing_angle=grazing_angle,\n projectionFromDiv=projectionFromDiv,\n xlabel='y',\n labels=labels_V,\n titleStr='Vertical, ',\n saveFileSuf=saveFileSuf + '_Y')\n\n curv_V = curv_from_height(integratedV, virtual_pixelsize[1],\n grazing_angle=grazing_angle,\n projectionFromDiv=projectionFromDiv,\n xlabel='y',\n labels=labels_V,\n titleStr='Vertical, ',\n saveFileSuf=saveFileSuf + '_Y')\n\nif __name__ == '__main__':\n\n (fnameH, fnameV,\n phenergy, grazing_angle, projectionFromDiv,\n nprofiles, remove2ndOrder, filter_width) = _intial_gui_setup(sys.argv)\n\n dpc_profile_analysis(fnameH, fnameV,\n phenergy, grazing_angle,\n projectionFromDiv, nprofiles, remove2ndOrder, filter_width)\n\n\n\n\n","sub_path":"wavepytools/imaging/single_grating/dpc_profile_analysis_no_filter.py","file_name":"dpc_profile_analysis_no_filter.py","file_ext":"py","file_size_in_byte":26715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"497301702","text":"import pytest\nimport allure\nimport json\nimport requests\nimport time\nfrom selenium import webdriver\n\n\n@allure.step(\"Waiting for resource availability {url}\")\ndef url_data(url, timeout=10):\n while timeout:\n response = requests.get(url)\n if not response.ok:\n time.sleep(1)\n timeout -= 1\n else:\n if 'video' in url:\n return response.content\n else:\n return response.text\n return None\n\n\n@pytest.hookimpl(tryfirst=True, hookwrapper=True)\ndef pytest_runtest_makereport(item):\n outcome = yield\n rep = outcome.get_result()\n if rep.outcome != 'passed':\n item.status = 'failed'\n else:\n item.status = 'passed'\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--browser_name\",\n action=\"store\",\n default=\"chrome\",\n help=\"Choose browser: chrome or firefox\")\n parser.addoption(\n \"--url\",\n action=\"store\",\n default=\"https://demo.opencart.com/\",\n help=\"Enter url\")\n parser.addoption(\n \"--executor\",\n action=\"store\",\n default=\"127.0.0.1\",\n help=\"Enter your host\")\n parser.addoption(\n \"--browserVersion\",\n action=\"store\",\n default=\"87.0\",\n help=\"Enter browser version\")\n parser.addoption(\n \"--vnc\",\n action=\"store_true\",\n default=False,\n help=\"Choose True or False for option\")\n parser.addoption(\n \"--logs\",\n action=\"store_true\",\n default=False,\n help=\"Choose True or False for option\")\n parser.addoption(\n \"--videos\",\n action=\"store_true\",\n default=False,\n help=\"Choose True or False for option\")\n\n\n@pytest.fixture()\ndef url(request):\n return request.config.getoption(\"--url\")\n\n\n@pytest.fixture()\ndef browser(request):\n \"\"\" Запуск драйвера в зависимости от выбранного браузера \"\"\"\n browser_name = request.config.getoption(\"--browser_name\")\n executor = request.config.getoption(\"--executor\")\n bversion = request.config.getoption(\"--browserVersion\")\n vnc = request.config.getoption(\"--vnc\")\n logs = request.config.getoption(\"--logs\")\n videos = request.config.getoption(\"--videos\")\n\n test_name = request.node.name\n module_name = request.module.__name__\n\n caps = {\n \"browserName\": browser_name,\n \"browserVersion\": bversion,\n \"screenResolution\": \"1440x900\",\n \"name\": test_name + module_name,\n \"selenoid:options\": {\n \"enableVNC\": vnc,\n \"enableVideo\": videos,\n \"enableLog\": logs\n }}\n\n browser = webdriver.Remote(command_executor=f\"http://{executor}:4444/wd/hub\",\n desired_capabilities=caps)\n\n # Attach browser data\n allure.attach(\n name=browser.session_id,\n body=json.dumps(browser.desired_capabilities),\n attachment_type=allure.attachment_type.JSON)\n\n # Add environment info to allure-report\n with open(\"allure-report/environment.xml\", \"w+\") as file:\n file.write(f\"\"\"\n \n Browser\n {browser_name}\n \n \n Browser.Version\n {bversion}\n \n \n Executor\n http://{executor}:4444/wd/hub\"\n \n \n \"\"\")\n\n browser.maximize_window()\n yield browser\n browser.quit()\n if request.node.status != 'passed':\n if logs:\n allure.attach(\n name=\"selenoid_log_\" + browser.session_id,\n body=url_data(f\"{executor}/logs/{browser.session_id}.log\"),\n attachment_type=allure.attachment_type.TEXT)\n if videos:\n allure.attach(\n body=url_data(f\"http://{executor}:8080/video/{browser.session_id}.mp4\"),\n name=\"video_for_\" + browser.session_id,\n attachment_type=allure.attachment_type.MP4)\n\n if videos and url_data(f\"http://{executor}:8080/video/{browser.session_id}.mp4\"):\n requests.delete(url=f\"http://{executor}:8080/video/{browser.session_id}.mp4\")\n if logs and url_data(f\"{executor}/logs/{browser.session_id}.log\"):\n requests.delete(url=f\"{executor}/logs/{browser.session_id}.log\")\n","sub_path":"lesson18/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"583121584","text":"import csv\nimport obot\nimport commands\nimport sqlite3\nimport time\nimport random\nimport praw\nimport sys\nimport re\nfrom random import randint\nimport random\nfrom datetime import datetime\nfrom pytz import timezone\n\nshutdown = False\n\n\ndef load_previous_comments():\n db = sqlite3.connect('Rdatabase.db')\n cursor = db.cursor()\n coms = set()\n cursor.execute(\"SELECT commentid FROM Rdatabase where Time >= (SELECT max(time) from Rdatabase) - 172800\")\n results = cursor.fetchall()\n for row in results:\n coms.add(row[0])\n return coms\n\n\ndef load_ai_phrase():\n with open('thoughts.txt', 'r') as f:\n aiphrase = [line.strip() for line in f]\n return aiphrase\n\n\ndef load_random_phrase():\n with open('Commands.txt', 'r', encoding='utf8') as f:\n randomcmd = [line.strip() for line in f]\n return randomcmd\n\n\ndef load_ignore():\n with open('ignore.txt', 'r', encoding='utf8') as f:\n ignorelist = [line.strip() for line in f]\n return ignorelist\n\n\ndef load_pignore():\n with open('pyrrhaignore.txt', 'r', encoding='utf8') as f:\n ignorelist = [line.strip() for line in f]\n return ignorelist\n\n\ndef get_my_cake_day(username):\n try:\n redditor = r.redditor(username)\n return time.strftime(\"%m%d\", time.gmtime(redditor.created_utc))\n except praw.errors.NotFound:\n print(\"No Cake Day\")\n\n\ndef ship_search(name1, name2):\n response = \"I have no data on \" + name1 + \" X \" + name2 + \". [You could fill in the shipsheet?](https://docs.google.com/spreadsheets/d/1JpinKp5XW6htsPAri0kRMGKrxQwi458YU6HY734wuwE/edit#gid=0)\"\n with open('Main.csv', 'r') as f:\n reader = csv.reader(f)\n columnlist = next(reader)\n if name2 in columnlist:\n x = columnlist.index(name2)\n try:\n for row in reader:\n if str(row[0]) == name1:\n if str(row[x]).isspace():\n response1 = \"BLANK DATA. [You could fill in the shipsheet?](https://docs.google.com/spreadsheets/d/1JpinKp5XW6htsPAri0kRMGKrxQwi458YU6HY734wuwE/edit#gid=0)\"\n else:\n response1 = (row[x])\n print(row[x])\n response = name1 + \" X \" + name2 + \" is called: \" + response1\n except:\n pass\n return response\n\n\ndef ot3_search(name1, name2, name3):\n try:\n with open('OT3.csv', 'r') as f:\n reader = csv.reader(f, delimiter=',')\n fulllist = (list(reader))\n for sublist in fulllist:\n if name1 in sublist:\n if name2 in sublist:\n if name3 in sublist:\n othervalue = str(sublist).encode('utf-8').strip()\n othervalue = str(othervalue.decode('utf-8'))\n commaindex = othervalue.index(',') - 1\n othervalue = othervalue[2:commaindex]\n response = othervalue\n response = name1 + \" X \" + name2 + \" X \" + name3 + \" is called: \" + response\n except:\n response = \"I have no data on \" + name1 + \" X \" + name2 + \" X \" + name3 + \". [You could fill in the shipsheet?](https://docs.google.com/spreadsheets/d/1JpinKp5XW6htsPAri0kRMGKrxQwi458YU6HY734wuwE/edit#gid=0)\"\n return response\n\n\ndef proccess_comments(current, comment, choice):\n # Add to the suggestion text file\n if current.startswith(\"suggestion\"):\n reply = \"[You can make Suggestions here!](https://docs.google.com/forms/d/e/1FAIpQLScp8yibCZRKqNcvvUT69VfEs2inp4DvNFvakGWubIAyv8D4EA/viewform?usp=sf_link) \\n \\n Pennybot has saved this comment as well! \\n \\n ^^^^^^^^/u/weerdo5255 \"\n filesug = open(\"Suggestions.txt\", \"a\")\n filesug.write(str(comment.body) + \"FROM:\" + str(comment.author) + \"\\n\")\n filesug.close()\n\n # Access the AI Function\n elif current.startswith(\"thoughts\"):\n thoughtstring = \" \"\n randomnum = (random.randint(0, 3))\n try:\n x = 0\n phrases = load_ai_phrase()\n while (x <= randomnum):\n thoughtstring = thoughtstring + random.choice(phrases) + \"\\n\"\n x += 1\n except:\n thoughtstring = (\"I don't have any thoughts at the moment.\")\n\n reply = thoughtstring\n\n elif current.startswith(\"shutdown\") or current.startswith(\"shut down\"):\n print(comment.author)\n if comment.author in mods or comment.author == \"Weerdo5255\":\n print(\"Emergency Shutdown Initiated! At: \" + time.asctime(time.localtime(\n time.time())))\n sys.exit()\n else:\n reply = \"You are not Pyrrha!\"\n\n elif current.startswith(\"statistics\"):\n db = sqlite3.connect(\"Rdatabase.db\")\n db.text_factory = str\n cursor = db.cursor()\n cursor.execute(\n \"SELECT COUNT(Body), Body FROM Rdatabase WHERE Body is not null AND Author = 'PennyBotV2' GROUP BY Body ORDER BY COUNT(Body) DESC\")\n rows = cursor.fetchall()\n rows_result = [row for row in rows]\n rows_result = rows_result[1:6]\n print(rows_result)\n stringout = \"\"\n # reply = '\\n \\n '.join(str(p.replace(\"(\")) for p in rows_result)\n for i in rows_result:\n string = str(i)\n string = string.replace('(', \"\", 1)\n string = string.replace('\"', \"\", 1)\n string = string.replace(\"'\", \"\", 1)\n string = string.replace(',', \"|\", 1)\n\n string = string[:-2]\n stringout += (string + \"\\n\")\n reply = \"Count | Response \\n :--|:-- \\n\" + stringout\n db.close()\n\n elif current.startswith(\"meat person list\"):\n db = sqlite3.connect(\"Cdatabase.db\")\n db.text_factory = str\n cursor = db.cursor()\n cursor.execute(\n \"SELECT Distinct Comment_Author, count(Body) FROM Cdatabase WHERE Body LIKE '%pennybot,%' OR Body LIKE '%Pennybot,%' OR Body LIKE '%PennyBot,%' OR Body LIKE '%PB,%' OR Body LIKE '%pb,%' OR Body LIKE '%pennybotv2,%' GROUP BY Comment_Author ORDER BY count(Body) DESC;\")\n rows = cursor.fetchall()\n db.close()\n rows_result = [row for row in rows]\n rows_result = rows_result[3:10]\n print(rows_result)\n stringout = \"\"\n # reply = '\\n \\n '.join(str(p.replace(\"(\")) for p in rows_result)\n for i in rows_result:\n string = str(i)\n string = string.replace('(', \"\", 1)\n string = string.replace('\"', \"\", 1)\n string = string.replace(\"'\", \"\", 1)\n string = string.replace(\"'\", \"\", 1)\n string = string.replace(',', \"|\", 1)\n\n string = string[:-1]\n stringout += (string + \"\\n\")\n reply = \"Meat Person | Rank \\n :--|:-- \\n\" + stringout\n\n\n elif current.startswith(\"ignore\") or current.startswith(\"mute\"):\n submission = comment.submission.id\n print(submission)\n postauthor = comment.submission.author\n print(postauthor)\n if str(comment.author) in mods or str(comment.author) == \"Weerdo5255\":\n reply = \"Sorry! I'll be quite!\"\n file = open(\"ignore.txt\", \"a\")\n file.write(str(comment.submission.id) + \"\\n\")\n file.close()\n elif str(comment.author) == str(postauthor):\n reply = \"Sorry! I'll be quite!\"\n file = open(\"ignore.txt\", \"a\")\n file.write(str(comment.submission.id) + \"\\n\")\n file.close()\n else:\n reply = \"You do not have sufficient privileges for that action.\"\n\n elif current.startswith(\"pennycheck\"):\n lookingfor = \"pennycheck\"\n indexcount = current.index(lookingfor) + 10\n current = current.lstrip(current[:indexcount])\n current = current.strip()\n reply = \"[Here are the Images I could Find!](http://iqdb.org/?url=\" + current + \")\"\n\n elif current.startswith(\"pennykarma\"):\n reply = \"Here is the [Karma Decay](http://karmadecay.com/\" + str(\n comment.submission.url) + \")\"\n\n elif current.startswith(\"random\"):\n phrases = load_random_phrase()\n current = str(random.choice(phrases))\n current = current.lower()\n print(current)\n reply = commands.penny_commands(current)\n reply = \"My \" + current + \" Command: \" + reply\n\n elif current.startswith(\"<\"):\n try:\n index1 = current.index(' ')\n index2 = current.index(' ', index1 + 1)\n index3 = current.index(' ', index2 + 1)\n index4 = current.index('>')\n name1 = current[1:index1]\n name2 = current[index2 + 1:index3]\n name3 = current[index3 + 3:index4]\n reply = ot3_search(name1.title(), name2.title(), name3.title())\n except:\n index1 = current.index(' ')\n index2 = current.index(' ', index1 + 1)\n index4 = current.index('>')\n name1 = current[1:index1]\n name2 = current[index2 + 1:index4]\n reply = ship_search(name1.title(), name2.title())\n\n else:\n reply = commands.penny_commands(current, choice)\n\n return reply\n\n\ndef find_mispelling(comment, sid):\n reply = \"\"\n respond = False\n pyrrhaignoreposts = load_pignore()\n if sid not in pyrrhaignoreposts:\n if \"phyrra\" in comment:\n reply = \"[Phyrra](http://streamable.com/c0fel)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyrah\" in comment:\n reply = \"[Pyrah](https://streamable.com/rpnvt)? Do you mean Pyrrha?\"\n respond = True\n elif \"phyrrah\" in comment:\n reply = \"[Phyrrah](http://streamable.com/jsf47)? Do you mean Pyrrha?\"\n respond = True\n elif \"phyrrha\" in comment:\n reply = \"[Phyrrha](https://streamable.com/60hdz)? Do you mean Pyrrha?\"\n respond = True\n elif \"phryrra\" in comment:\n reply = \"[Phryrra](http://streamable.com/jc9af)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyhrra\" in comment:\n reply = \"[Pyhrra](http://streamable.com/11tag)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyrrah\" in comment:\n reply = \"[Pyrrah](http://streamable.com/ks8mf)? Do you mean Pyrrha?\"\n respond = True\n elif \"phrrya\" in comment:\n reply = \"[Phrrya](http://streamable.com/t4hg5)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyrhha\" in comment:\n reply = \"[Pyrhha](http://streamable.com/ovdli)? Do you mean Pyrrha?\"\n respond = True\n elif \"pirrah\" in comment:\n reply = \"[Pirrah](http://streamable.com/nm2lz)? Do you mean Pyrrha?\"\n respond = True\n elif \"piera\" in comment:\n reply = \"[Piera](http://streamable.com/8aken)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyra\" in comment:\n reply = \"[Pyra](http://streamable.com/ys90o)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyhra\" in comment:\n reply = \"[Pyhra](http://streamable.com/q4vm1)? Do you mean Pyrrha?\"\n respond = True\n elif \"pierra\" in comment:\n reply = \"[Pierra](http://streamable.com/h8qxx)? Do you mean Pyrrha?\"\n respond = True\n elif \"pierah\" in comment:\n reply = \"[Pierah](http://streamable.com/gkd5o)? Do you mean Pyrrha?\"\n respond = True\n elif \"priah\" in comment:\n reply = \"[Priah](http://streamable.com/qcp0p)? Do you mean Pyrrha?\"\n respond = True\n elif \"phyrria\" in comment:\n reply = \"[Phyrria](http://streamable.com/8hqps)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyrra\" in comment:\n reply = \"[Pyrra](http://streamable.com/d4nnu)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyrhaa\" in comment:\n reply = \"[Pyrhaa](http://streamable.com/iiz8c)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyyra\" in comment:\n reply = \"[Pyyra](http://streamable.com/ww1gk)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyrrea\" in comment:\n reply = \"[Pyrrea](http://streamable.com/cyehb)? Do you mean Pyrrha?\"\n respond = True\n elif \"pureha\" in comment:\n reply = \"[Pureha](http://streamable.com/inysv)? Do you mean Pyrrha?\"\n respond = True\n elif \"pharah\" in comment:\n reply = \"[Pharah](http://streamable.com/i0ttw)? Do you mean Pyrrha?\"\n respond = True\n elif \"pharaoh\" in comment:\n reply = \"[Pharaoh](http://streamable.com/v12ah)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyhhra\" in comment:\n reply = \"[Pyhhra](http://streamable.com/clfwa)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyrhha\" in comment:\n reply = \"[Pyrhha](http://streamable.com/rmn9d)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyhraa\" in comment:\n reply = \"[Pyhraa](http://streamable.com/we8bd)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyyrah\" in comment:\n reply = \"[Pyyrah](http://streamable.com/lsjn2)? Do you mean Pyrrha?\"\n respond = True\n elif \"phyyra\" in comment:\n reply = \"[Phyyra](http://streamable.com/x8i9j)? Do you mean Pyrrha?\"\n respond = True\n elif \"pryyha\" in comment:\n reply = \"[Pryyha](http://streamable.com/5wbug)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyyrha\" in comment:\n reply = \"[Pyyrha](http://streamable.com/34og7)? Do you mean Pyrrha?\"\n respond = True\n elif \"phyra\" in comment:\n reply = \"[Phyra](https://streamable.com/3nbyt)? Do you mean Pyrrha?\"\n respond = True\n elif \"prryha\" in comment:\n reply = \"[Prryha](http://streamable.com/0sj7t)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyraah\" in comment:\n reply = \"[Pyraah](http://streamable.com/srreq)? Do you mean Pyrrha?\"\n respond = True\n elif \"pearhat\" in comment:\n reply = \"[Pearhat](http://streamable.com/i8z81)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyyrahe\" in comment:\n reply = \"[Pyyrahe](http://streamable.com/upyvf)? Do you mean Pyrrha?\"\n respond = True\n elif \"purra\" in comment:\n reply = \"[Purra](http://streamable.com/pwx3t)? Do you mean Pyrrha?\"\n respond = True\n elif \"prhhya\" in comment:\n reply = \"[Prhhya](http://streamable.com/8c471)? Do you mean Pyrrha?\"\n respond = True\n elif \"pyrrahe\" in comment:\n reply = \"[Pyrrahe](http://streamable.com/woxdj)? Do you mean Pyrrha?\"\n respond = True\n elif \"ilya\" in comment:\n reply = \"[What, oh.](https://i.imgur.com/dWoj8oX.gifv) Do you mean Ilia?\"\n respond = True\n elif \"ileah\" in comment:\n reply = \"[What, oh.](https://i.imgur.com/dWoj8oX.gifv Do you mean Ilia?\"\n respond = True\n elif \"ilea\" in comment:\n reply = \"[What, oh.](https://i.imgur.com/dWoj8oX.gifv) Do you mean Ilia?\"\n respond = True\n elif \"iliah\" in comment:\n reply = \"[What, oh.](https://i.imgur.com/dWoj8oX.gifv) Do you mean Ilia?\"\n respond = True\n if respond is True:\n file = open(\"pyrrhaignore.txt\", \"a\")\n file.write(sid + \"\\n\")\n file.close()\n return reply\n\n\ndef find_penny(comment):\n replied = False\n ignoreposts = load_ignore()\n author = str(comment.author)\n sid = comment.submission.id\n if sid not in ignoreposts:\n response = []\n commentlist = str(comment.body).splitlines(True)\n if \"PennyBotV2\" in author:\n print(\"My Own Comment, ignoring.\")\n else:\n for current in commentlist:\n # Looking at each line of a body comment turn it lower case and cut out v2\n current = current.lower()\n current = current.replace(\"v2\", \"\")\n # Scan for mention of pennybot and respond\n if \"pennybot,\" in current:\n lookingfor = \"pennybot,\"\n # Remove pennybot from the string start scanning for response\n indexcount = current.index(lookingfor) + 9\n current = current.lstrip(current[:indexcount])\n current = current.strip()\n try:\n choice = int(re.search(r'\\d+', current).group())\n print(choice)\n except:\n choice = (randint(0, 9))\n replied = True\n txtreply = proccess_comments(current, comment, choice)\n response.append(txtreply)\n elif \"pb,\" in current:\n lookingfor = \"pb,\"\n # Remove pennybot from the string start scanning for response\n indexcount = current.index(lookingfor) + 3\n current = current.lstrip(current[:indexcount])\n current = current.strip()\n try:\n choice = int(re.search(r'\\d+', current).group())\n print(choice)\n except:\n choice = (randint(0, 9))\n replied = True\n txtreply = proccess_comments(current, comment, choice)\n response.append(txtreply)\n else:\n txtreply = find_mispelling(current, sid)\n if txtreply is not \"\":\n replied = True\n response.append(txtreply)\n\n if replied:\n cake = get_my_cake_day(str(comment.author))\n now_utc = datetime.now(timezone('UTC'))\n now_pacific = now_utc.astimezone(timezone('US/Pacific'))\n nowp = now_pacific.strftime(\"%m%d\")\n if nowp == cake:\n string = \"\"\n for x in response:\n if x is not None:\n string += x + \" \\n \\n\"\n try:\n if string is not \"\":\n comment.reply(string + \"\\n \\n Pennybot wishes you a Happy Cake Day as well!\")\n print(\n \"Found a Penny comment at: \" + time.asctime(time.localtime(\n time.time())) + \"\\nIn thread: \" + comment.submission.shortlink + \" \\nI responded with:\" + \"\\n\" + string)\n except:\n print(\"Couldn't respond at: \" + time.asctime(time.localtime(\n time.time())) + \"\\nIn thread: \" + comment.submission.shortlink)\n else:\n string = \"\"\n for x in response:\n if x is not None:\n string += str(x) + \" \\n \\n\"\n try:\n comment.reply(string)\n print(\n \"Found a Penny comment at: \" + time.asctime(time.localtime(\n time.time())) + \"\\nIn thread: \" + comment.submission.shortlink + \" \\nI responded with:\" + \"\\n\" + string)\n except:\n print(\"Couldn't respond at: \" + time.asctime(time.localtime(\n time.time())) + \"\\nIn thread: \" + comment.submission.shortlink)\n\n\n\ndef com_stream(subreddit, comdone):\n for comment in subreddit.stream.comments():\n if not str(comment.id) in comdone:\n db = sqlite3.connect(\"Rdatabase.db\")\n db.text_factory = str\n cursor = db.cursor()\n cursor.execute('INSERT INTO Rdatabase VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',\n (\n str(comment.submission.title), str(comment.submission.id),\n str(comment.submission.shortlink),\n str(comment.submission.score), str(comment.id), str(comment.score),\n int(comment.created_utc),\n str(comment.body), str(comment.author)))\n db.commit()\n comdone.add(comment.id)\n db = sqlite3.connect(\"Cdatabase.db\")\n db.text_factory = str\n cursor = db.cursor()\n cursor.execute('INSERT INTO Cdatabase VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',\n (str(comment.submission.title), str(comment.submission.id), str(comment.submission.permalink),\n str(comment.submission.shortlink), str(comment.submission.author), str(comment.author), str(comment.id),\n int(comment.created_utc), str(comment.body), int(comment.score),\n str(comment.author_flair_css_class), int(comment.created_utc)))\n db.commit()\n # print(str(comment.body).encode(\"utf-8\"))\n find_penny(comment)\n\n\n\nwhile True:\n try:\n mods = []\n r = obot.login()\n subredditlist = ['fnki','rwby']\n for sub in subredditlist:\n for moderator in r.subreddit(sub).moderator():\n mods.append(str(moderator))\n subjoin = \"+\"\n subreddit = r.subreddit(subjoin.join(subredditlist))\n print(\"The \" + str(subreddit) + \" Moderators: \" + str(mods))\n com_stream(subreddit, load_previous_comments())\n except Exception as e:\n print(e)\n print(\"Waiting 20 seconds to restart\")\n time.sleep(20)\n pass\n","sub_path":"run_comments.py","file_name":"run_comments.py","file_ext":"py","file_size_in_byte":21512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"26943909","text":"import threading\nimport CrawlerRunner\n\n\ndef run(begin_index, length, filename):\n CrawlerRunner.CrawlerRunner.main_function(begin_index, length, filename)\n\n\ndef create_thread(target, begin_index, length, filename):\n t = threading.Thread(target=target, args=(begin_index, length, filename))\n t.start()\n\n\ndef start(number_of_threading, first_index, each_data_length):\n \"\"\"创建启动线程\"\"\"\n for i in range(0, number_of_threading):\n first_index_for_each_one = each_data_length * i + first_index # 计算每个线程的初始 index\n create_thread(run, first_index_for_each_one, each_data_length, './data/'+str(first_index)+\"/Descriptor\"+str(i)+\".csv\")\n\n\nif __name__ == '__main__':\n number_of_threading = input(\"你想开几个线程(how many threads you want?)10~100:\")\n first_index = input(\"你想获取的第一个分子库的MC number是哪个(which is you first 'MC number' you want in its library ?)10000~500000:\")\n each_data_length = input(\"每个线程你想获取多少个分子就停止?(how many molecular you want for each thread?10~100):<1000(大于1000,csv读写困难)\")\n start(int(number_of_threading), int(first_index), int(each_data_length))\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"326575063","text":"import numpy as np\nimport tensorflow as tf\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation\nfrom keras.optimizers import SGD\nfrom keras.backend import get_session\n\nmodel = Sequential()\nmodel.add(Dense(100, input_shape=(3,)))\nmodel.add(Activation('relu'))\nmodel.add(Dense(100, input_shape=(3,)))\nmodel.add(Activation('relu'))\nmodel.add(Dense(100, input_shape=(3,)))\nmodel.add(Activation('relu'))\nmodel.add(Dense(100, input_shape=(3,)))\nmodel.add(Activation('relu'))\nmodel.add(Dense(100, input_shape=(3,)))\nmodel.add(Activation('relu'))\nmodel.add(Dense(1))\n\nmodel.summary()\n\nmodel.compile(loss='mean_squared_error', optimizer=\"adam\")\n\n# Must be in this order to enable automatic discovery\nmodel.model._make_train_function()\nmodel.model._make_predict_function()\n\n# Make sure weight assign placeholders are created\nweights = model.get_weights()\nmodel.set_weights(weights)\n\ntf.train.write_graph(get_session().graph.as_graph_def(), './', 'NN1.pb', as_text=False)\n","sub_path":"TCC/Results/Models/NN1.py","file_name":"NN1.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"56785428","text":"#!/usr/bin/env python\n# https://codingcompetitions.withgoogle.com/codejam/round/0000000000876ff1\n\ndef solve(TC):\n\n R, C = TC.split(' ')\n S = \"\\n\"\n for r in range(int(R)):\n S1 = \"\"\n S2 = \"\"\n for c in range(int(C)):\n if c == 0 and r == 0:\n S1 = \"..\" \n S2 = \"..\"\n else:\n S1 += \"+-\"\n S2 += \"|.\"\n S += S1+\"+\\n\"\n S += S2+\"|\\n\"\n\n for c in range(int(C)):\n S += \"+-\"\n S += \"+\"\n\n return S\n\nif __name__ == \"__main__\":\n T = int(input())\n for t in range(1, T+1):\n TC = input()\n print(\"Case #%i: %s\" % (t, solve(TC)))\n","sub_path":"2022/qualification/punched_cards.py","file_name":"punched_cards.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"603107218","text":"# Copyright 2017 AT&T Corporation.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_log import log\n\nfrom tempest.common import utils\nfrom tempest.lib.common.utils import data_utils\nfrom tempest.lib.common.utils import test_utils\nfrom tempest.lib import decorators\n\nfrom patrole_tempest_plugin import rbac_exceptions\nfrom patrole_tempest_plugin import rbac_rule_validation\nfrom patrole_tempest_plugin.tests.api.network import rbac_base as base\n\nLOG = log.getLogger(__name__)\n\n\nclass NetworkSegmentsRbacTest(base.BaseNetworkRbacTest):\n\n @classmethod\n def skip_checks(cls):\n super(NetworkSegmentsRbacTest, cls).skip_checks()\n if not utils.is_extension_enabled('multi-provider', 'network'):\n msg = \"multi-provider extension not enabled.\"\n raise cls.skipException(msg)\n\n @classmethod\n def resource_setup(cls):\n super(NetworkSegmentsRbacTest, cls).resource_setup()\n # Find the network type that is supported by the current cloud by\n # checking which network type other networks currently have. This is\n # done because there is no tempest.conf option enumerating supported\n # network types.\n networks = cls.networks_client.list_networks()['networks']\n network_types = [n['provider:network_type'] for n in networks\n if n['provider:network_type'] != 'flat']\n if not network_types:\n raise cls.skipException(\n 'Could not find network with provider:network_type that is '\n 'not \"flat\".')\n cls.network_type = network_types[0]\n\n def _create_network_segments(self):\n segments = [{'provider:network_type': self.network_type},\n {'provider:network_type': self.network_type}]\n\n body = self.networks_client.create_network(\n name=data_utils.rand_name(self.__class__.__name__),\n segments=segments)\n network = body['network']\n self.addCleanup(test_utils.call_and_ignore_notfound_exc,\n self.networks_client.delete_network,\n network['id'])\n return network\n\n @rbac_rule_validation.action(service=\"neutron\",\n rules=[\"create_network\",\n \"create_network:segments\"])\n @decorators.idempotent_id('9e1d0c3d-92e3-40e3-855e-bfbb72ea6e0b')\n def test_create_network_segments(self):\n \"\"\"Create network with segments.\n\n RBAC test for the neutron create_network:segments policy\n \"\"\"\n with self.override_role():\n self._create_network_segments()\n\n @rbac_rule_validation.action(service=\"neutron\",\n rules=[\"get_network\", \"update_network\",\n \"update_network:segments\"],\n expected_error_codes=[404, 403, 403])\n @decorators.idempotent_id('0f45232a-7b59-4bb1-9a91-db77d0a8cc9b')\n def test_update_network_segments(self):\n \"\"\"Update network segments.\n\n RBAC test for the neutron update_network:segments policy\n \"\"\"\n network = self._create_network_segments()\n new_segments = [{'provider:network_type': self.network_type}]\n\n with self.override_role():\n self.networks_client.update_network(network['id'],\n segments=new_segments)\n\n @rbac_rule_validation.action(service=\"neutron\",\n rules=[\"get_network\",\n \"get_network:segments\"],\n expected_error_codes=[404, 403])\n @decorators.idempotent_id('094ff9b7-0c3b-4515-b19b-b9d2031337bd')\n def test_show_network_segments(self):\n \"\"\"Show network segments.\n\n RBAC test for the neutron get_network:segments policy\n \"\"\"\n network = self._create_network_segments()\n\n with self.override_role():\n body = self.networks_client.show_network(network['id'],\n fields='segments')\n response_network = body['network']\n\n # If user does not have access to the network segments attribute,\n # no NotFound or Forbidden exception are thrown. Instead,\n # the response will have an empty network body only.\n if not response_network:\n LOG.info(\"NotFound or Forbidden exception are not thrown when \"\n \"role doesn't have access to the endpoint. Instead, \"\n \"the response will have an empty network body.\")\n raise rbac_exceptions.RbacEmptyResponseBody()\n","sub_path":"patrole_tempest_plugin/tests/api/network/test_network_segments_rbac.py","file_name":"test_network_segments_rbac.py","file_ext":"py","file_size_in_byte":5209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"556676273","text":"from django.conf.urls import include, url\nfrom blog.views import post_list, post_list_category, post_detail\nfrom django.views.generic.base import RedirectView\n\nurlpatterns = [\n\n # Make a list of all the posts\n url(r'^blog/$', RedirectView.as_view(url='pagina/1'), name='blog'),\n url(r'^blog/pagina/$', RedirectView.as_view(url='1'), name='blog'),\n url(r'^blog/pagina/(?P[0-9]+)$', post_list),\n\n # Make a list of all the posts in a category\n url(r'^blog/(?P[\\w\" \"-]+)/$', RedirectView.as_view(url='pagina/1'), name='blog'),\n url(r'^blog/(?P[\\w\" \"-]+)/pagina/$', RedirectView.as_view(url='1'), name='blog'),\n url(r'^blog/(?P[\\w\" \"-]+)/pagina/(?P[0-9]+)$', post_list_category),\n\n # Print the post detail\n url(r'^blog/(?P[\\w\" \"-]+)/(?P[\\w-]+)/$', post_detail, name='post-detail'),\n]","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"442046907","text":"import os\nimport sys\nimport json\nimport os\nfrom urllib.parse import urlparse\nget_version_cmd =\"cat ./resource-helmChart/version\"\nchart_version = open(\"./resource-helmChart/version\").read()\nget_metadata_cmd = \"cat ./resource-helmChart/metadata.json\"\nwith open('./resource-helmChart/metadata.json') as f:\n meta_data_json = json.load(f)\nfor item in meta_data_json:\n if item[\"name\"] == \"repository\":\n helm_server= item[\"value\"]\n if item[\"name\"] == \"chart\":\n chartname= item[\"value\"]\nparsed_url = urlparse(helm_server).hostname\nprint(parsed_url)\napi_end_point = parsed_url + \"/api/v2.0\"\nhelm_server = helm_server.replace(parsed_url,api_end_point)\nprint(helm_server)\nurl = helm_server + \"/charts/\" + chartname + \"/\" + str(chart_version).strip() + \"/labels\"\n\nprint (\"url: \",url)\ncommand = \"curl -k -v -u admin:VMware@vida12345 -X POST \\\"\" + url + \"\\\" -H 'accept: application/json' -H 'Content-Type: application/json' -d '{\\\"id\\\": 1}'\"\nos.system(command)\nprint(command)\n","sub_path":"add-tag-vmware.py","file_name":"add-tag-vmware.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"264431586","text":"import time\r\nfrom random import randrange\r\n\r\n#welcoming the user\r\nname = input(\"\\t HANGMAN GAME\"+\"\\n\"+\"What is your name? \")\r\n\r\nprint(\"Hi \" + name.capitalize(), \"Let's start the game!\"+\"\\n\"+\"Good Luck!\")\r\nx=randrange(0,5,1)\r\nprint (\"\")\r\n\r\n#wait for 1 second\r\ntime.sleep(1)\r\n\r\n#print(\"guess karna shuru kar chal\")\r\nguess1 =[\"mahesh\",\"university\",\"warden\",\"cricket\",\"library\"]\r\n#for motivating the players\r\nquote=[\"Come on!You got this\",\"Good try though\",\"You can do it!\"]\r\nword=guess1[x]\r\n\r\n#creates an variable with an empty value\r\nguesses = ''\r\n\r\n#remaining guesses\r\nremguess = 10\r\n#check if the guesses left are more than zero\r\nwhile remguess > 0:\r\n #for characters left count\r\n charleft = 0\r\n\r\n # for every character in the word\r\n for char in word:\r\n\r\n # see if the character is in the players guess\r\n if char in guesses:\r\n print(char.upper(),end=\" \")\r\n else:\r\n # to print the number char left\r\n print(\"_\",end=\" \")\r\n charleft += 1\r\n\r\n # print You Won\r\n if charleft == 0:\r\n print(\"Congrats You won boii!\")\r\n #exit\r\n break\r\n print()\r\n # ask the user to guess a character\r\n guess = input(\"Your Guess:\")\r\n\r\n # set the players guess to guesses\r\n guesses += guess\r\n #for backspace\r\n bck='\\b'\r\n y=randrange(0,3,1)\r\n f=quote[y]\r\n\r\n # when character guessed is not present in the word\r\n if guess not in word:\r\n remguess -= 1\r\n print(\"Wrong\",end=\" \")\r\n if remguess>0:\r\n print(f)\r\n #show the guesses left\r\n print(\"You have \" + str(remguess)+\" more guesses left\")\r\n print()\r\n if remguess == 0:\r\n\r\n # user lost\r\n print(\"You Lost! Well tried\")\r\n print(\"The word is \"+word)\r\n else:\r\n print()\r\n","sub_path":"HANGMAN_GAME.py","file_name":"HANGMAN_GAME.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"561444118","text":"x = int(input())\nmasuk = list(map(int, input().split()))\n \nfor i in range(x):\n for j in range(x):\n if i==j:\n print(masuk[i],end=\"\")\n else:\n print('0', end=\"\")\n if j < x-1:\n print(' ',end='')\n else:\n print()\n","sub_path":"26.Matriks_Diagonal.py","file_name":"26.Matriks_Diagonal.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"602242518","text":"import threading\nfrom queue import Queue\nimport time\nimport socket\nimport sys\nimport os\nimport subprocess\n\n'''def checkid():\n\n\tprint(\"This scan requires root privilege!\")\n\tsys.exit(1)\n'''\nfrom scapy.all import *\n# a print_lock is what is used to prevent \"double\" modification of shared variables.\n# this is used so while one thread is using a variable, others cannot access\n# it. Once done, the thread releases the print_lock.\n# to use it, you want to specify a print_lock per thing you wish to print_lock.\nprint_lock = threading.Lock()\n\n\n\ntarget = sys.argv[1]\n#ip = socket.gethostbyname(target)\n\n\ndef stealth_scan(port):\n\n\tip = IP(dst=target)\n\tTCP_SYN = TCP(sport=RandShort(),dport=int(port),flags='S',seq=40) \n\tTCP_SYNACK = sr1(ip/TCP_SYN,timeout=0.3,verbose=0) # send packet and wait for the first reply\n\tif not TCP_SYNACK or TCP_SYNACK.getlayer(TCP).flags != 0x12: # SEQ Number for SYN-ACK\n\t\tpass#print \"\\n\"+str(port)+\":closed\\n\" # response from our target aka hostip - expect RST\n\telse:\n\t\tprint(\"\\n\"+str(port)+\":open\\n\") # response from our target aka hostip - expect SYN-ACK\n\n\n\t\n\n\n\n\n\n# The threader thread pulls an worker from the queue and processes it\ndef threader():\n\twhile True:\n # gets an worker from the queue\n\t\tworker = q.get()\n\n # Run the example job with the avail worker in queue (thread)\n\t\tstealth_scan(worker)\n\n # completed with the job\n\t\tq.task_done()\n\n\n\n \n\n# Create the queue and threader \nq = Queue()\n\n# how many threads are we going to allow for\nfor x in range(40):\n\tt = threading.Thread(target=threader)\n\n # classifying as a daemon, so they will die when the main dies\n\tt.daemon = True\n\n # begins, must come after daemon definition\n\tt.start()\n\n\nstart = time.time()\n\n# 100 jobs assigned.\nfor worker in range(int(sys.argv[2]),int(sys.argv[3])):\n\tq.put(worker)\n\n# wait until the thread terminates.\nq.join()\n\n\nprint(('Entire job took:',time.time() - start))\n","sub_path":"stealth_scan.py","file_name":"stealth_scan.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"170338024","text":"#!/usr/bin/python3\n\nfrom board_driver_simulator import open, close, set_port, get_port\nimport time \n\n'''\n2. Zmodyfikować funkcję set_point tak aby korzystała z\nportu wYjściowego. Użyć przesunięcia bitowego. \nTest: Funkcjonalność z poprzedniego zadania\n(cyklicznie przesuwa�� punkt świetlny (0,1,2,3,0,1,...)\nco ok. ¼ sekundy) \n'''\n\ndef set_point(position):\n\tif position==0:\n\t\tset_port(1<<13)\n\telif position==1:\n\t\tset_port(1<<5)\n\telif position==2:\n\t\tset_port(1<<9)\n\telif position==3:\n\t\tset_port(1<<17)\n\telse:\n\t\tset_port(0)\n\ntry: \n open() \n#--------------------- \n while(True): \n set_point(0)\n time.sleep(0.25)\n set_point(1)\n time.sleep(0.25)\n set_point(2)\n time.sleep(0.25)\n set_point(3)\n time.sleep(0.25)\n#--------------------- \nfinally: \n close()\n","sub_path":"Part III and IV/cz4_02.py","file_name":"cz4_02.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"268973208","text":"#!/usr/bin/env python3\n\nT = int(input())\n\ndef group(s):\n res = [s[0]]\n for state in s:\n if state != res[-1]:\n res.append(state)\n return res\n\ndef count(s):\n res = 0\n for state in s:\n if state == '-':\n res += 2\n if s[0] == '-':\n res -= 1\n return res\n\nfor i in range(T):\n s = input()\n santized = group(s)\n print('Case #{}: {}'.format(i+1, count(santized)))\n","sub_path":"codes/CodeJamCrawler/16_0_2/jiangz/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"566058531","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 18 15:22:03 2021\n\n@author: jesel\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\ntfd = tfp.distributions\ntfk = tfp.math.psd_kernels\n\nimport unittest\nimport time\n\nfrom gpflow.config import default_float\n\nfrom point.helper import get_lrgp, method\nfrom point.misc import Space\n\nrng = np.random.RandomState(40)\n\n\n\ndef expandedSum(x, n =0):\n z1 = tf.expand_dims(x, 1)\n z2 = tf.expand_dims(x, 0)\n return (z1 + z2, z1 - z2)\n\n\n\nclass Test_Quadratic_Term(unittest.TestCase):\n # test \n # out = w.T @ M @ w\n # grad = w.T @ gradM @ w\n # where w = ones(4)\n \n def setUp(self):\n self.v = tf.Variable(1, dtype=default_float(), name='sig')\n self.l = tf.Variable([0.2], dtype=default_float(), name='l')\n self.gamma = 1 / (2 * self.l **2 )\n self.G = tf.constant([[1, 0.5]], dtype=default_float(), name='w')\n self.verbose = True\n\n #@unittest.SkipTest\n def test(self):\n\n lrgp = get_lrgp(method = method.RFF_NO_OFFSET, n_features = 1, variance = self.v, length_scale = self.l, space = Space([-1,1]), n_components = 2, random_state = rng)\n lrgp._G = self.G\n lrgp.fit(sample = False)\n \n cache = tf.ones((2 * lrgp.n_components,1), dtype=default_float())\n lrgp._latent = cache\n \n #### TF GRADIENT\n t0 = time.time()\n with tf.GradientTape() as tape: \n lrgp.fit(sample = False)\n intM = tf.transpose(lrgp._latent) @ lrgp.M() @ lrgp._latent\n grad_tf = tape.gradient(intM, lrgp.trainable_variables) \n \n adjuster = 1/ lrgp.gradient_adjuster\n grad_tf_l = tf.expand_dims(grad_tf[0],1)*adjuster[0]\n grad_tf_l = grad_tf_l[0]\n grad_tf_v = grad_tf[1]*adjuster[1]\n \n if self.verbose :\n print(\"TF loss:= %f - in [%f] \" % (intM,time.time() - t0))\n print(grad_tf_v[0])\n print(grad_tf_l[0])\n print(\"\")\n \n #### IMPLEMENTED GRADIENT\n t0 = time.time()\n (intD, grad) = lrgp.integral(get_grad = True)\n \n\n grad_v = grad[1][0]\n grad_l = grad[0][0]\n \n if self.verbose :\n print(\"Implementation loss:= %f - in [%f] \" % (intD, time.time() - t0))\n print(grad_v)\n print(grad_l)\n\n intM = intM[0][0].numpy()\n \n #### TEST\n #test loss values\n self.assertAlmostEqual(intM, intD.numpy(), places=7)\n self.assertAlmostEqual(intM, 2.478777715283165, places=7)\n \n #test gradient variance\n self.assertAlmostEqual(grad_v.numpy(),grad_tf_v[0].numpy() , places=7)\n self.assertAlmostEqual(grad_v.numpy(), 2.4787777152831656, places=7)\n \n #test gradient l\n self.assertAlmostEqual(grad_l.numpy(),grad_tf_l[0].numpy() , places=7)\n self.assertAlmostEqual(grad_l.numpy(), 10.405324731885159, places=7)\n \n \nclass Test_Offset_Term(unittest.TestCase):\n \n\n def setUp(self):\n self.v = tf.Variable(2.0, dtype=default_float(), name='sig')\n self.l = tf.Variable([0.2], dtype=default_float(), name='l')\n self.gamma = 1 / (2 * self.l **2 )\n self.G = tf.constant([[1, 0.5]], dtype=default_float(), name='w')\n self.verbose = True\n\n @unittest.SkipTest\n def test(self):\n\n lrgp = get_lrgp(method = method.RFF_NO_OFFSET, n_features = 1, variance = self.v, length_scale = self.l, space = Space([-1,1]), n_components = 2, random_state = rng)\n lrgp._G = self.G\n lrgp.fit(sample = False)\n \n cache = tf.ones((2 * lrgp.n_components,1), dtype=default_float())\n lrgp._latent = cache\n \n #### TF GRADIENT\n t0 = time.time()\n with tf.GradientTape() as tape: \n lrgp.fit(sample = False)\n loss = tf.transpose(lrgp.m()) @ lrgp._latent\n grad_tf = tape.gradient(loss, lrgp.trainable_variables) \n\n adjuster = 1/ lrgp.gradient_adjuster\n grad_tf_l = grad_tf[0]*adjuster[0]\n grad_tf_v = grad_tf[1]*adjuster[1]\n \n if self.verbose :\n print(\"TF offset.loss:= %f - in [%f] \" % (loss,time.time() - t0))\n print(grad_tf_v)\n print(grad_tf_l)\n print(\"\")\n \n #### IMPLEMENTED GRADIENT\n t0 = time.time()\n (m, grad) = lrgp.m(get_grad = True)\n out = tf.transpose(lrgp.m()) @ lrgp._latent\n\n grad_l = tf.transpose(grad[0]) @ lrgp._latent\n grad_v = tf.transpose(grad[1]) @ lrgp._latent\n \n if self.verbose :\n print(\"Implementation offset.loss:= %f - in [%f] \" % (out, time.time() - t0))\n print(grad_v[0][0])\n print(grad_l[0][0])\n \n #### TEST\n #test loss values\n out = out[0][0].numpy()\n self.assertAlmostEqual(out, loss.numpy(), places=7)\n self.assertAlmostEqual(out, 0.09520800541790975, places=7)\n \n #test gradient variance\n grad_v = grad_v[0][0].numpy()\n self.assertAlmostEqual(grad_v, grad_tf_v[0].numpy() , places=7)\n self.assertAlmostEqual(grad_v, 0.023802001354477437, places=7)\n \n #test gradient l\n grad_l = grad_l[0][0].numpy()\n self.assertAlmostEqual(grad_l, grad_tf_l[0].numpy(), places=7)\n self.assertAlmostEqual(grad_l, 5.650854327926622, places=7)\n\n \n\n \nclass Test_features_der(unittest.TestCase):\n \n def setUp(self):\n self.v = tf.Variable(1, dtype=default_float(), name='sig')\n self.l = tf.Variable([0.2], dtype=default_float(), name='l')\n self.gamma = 1 / (2 * self.l **2 )\n self.G = tf.constant([[1, 0.5]], dtype=default_float(), name='w')\n \n X = np.array( [[-1.37923991], \n [ 0.02771165], \n [-0.84617041], \n [-1.3370345 ], \n [-1.4243213] , \n [ 0.07479864], \n [ 1.05240778], \n [ 0.07683154], \n [ 0.5529944 ], \n [ 0.00898941]])\n \n self.X = tf.convert_to_tensor(X, dtype=default_float())\n self.verbose = True\n \n @unittest.SkipTest\n def test(self):\n \n lrgp = get_lrgp(method = method.RFF_NO_OFFSET, n_features = 1, variance = self.v, length_scale = self.l, space = Space([-1,1]), n_components = 2, random_state = rng)\n lrgp._G = self.G\n lrgp.fit(sample = False)\n\n #TF : compute the quadratic term ones x features x ones\n N = self.X.shape[0]\n cache1 = tf.expand_dims(tf.experimental.numpy.hstack([tf.ones(N, dtype=default_float())]) ,0)\n cache2 = tf.expand_dims(tf.experimental.numpy.hstack([tf.ones(2 * lrgp.n_components, dtype=default_float())]) ,1)\n \n t0 = time.time()\n with tf.GradientTape() as tape: \n lrgp.fit(sample = False)\n loss_tf = cache1 @ lrgp.feature(self.X) @ cache2\n grad_tf = tape.gradient(loss_tf, lrgp.trainable_variables) \n \n if self.verbose is True :\n print(\"TF loss:= %f - in [%f] \" % (loss_tf, time.time() - t0))\n print( grad_tf[0])\n print( grad_tf[1])\n \n #Recalculation \n grad_adj = lrgp.gradient_adjuster\n out, grads = lrgp.feature(self.X, get_grad = True)\n \n loss = cache1 @ out @ cache2\n dl = grad_adj[0] * (cache1 @ grads[0] @ cache2)\n dv = grad_adj[1] * (cache1 @ grads[1] @ cache2)\n \n if self.verbose :\n print(\"\")\n print(\"Implementation loss:= %f - in [%f] \" % (loss, time.time() - t0))\n print(dl[0][0])\n print(dv[0][0])\n \n #### TEST\n self.assertAlmostEqual(loss_tf[0], loss[0], places=7)\n self.assertAlmostEqual(loss[0][0].numpy(), 4.880451286990073 , places=7)\n \n self.assertAlmostEqual(grad_tf[1].numpy(), dv[0][0].numpy(), places=7)\n self.assertAlmostEqual(dv[0][0].numpy(), 1.5425167974338596 , places=7)\n\n self.assertAlmostEqual(dl[0][0].numpy(), grad_tf[0][0].numpy(), places=7)\n self.assertAlmostEqual(dl[0][0].numpy(), 6.745985740267533 , places=7)\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n\n\n\n\n\n\n\n\n","sub_path":"test/test_lowr_grad_rff_1D.py","file_name":"test_lowr_grad_rff_1D.py","file_ext":"py","file_size_in_byte":8449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"369508166","text":"from django import forms\nfrom django.contrib import admin\nfrom .models import Report, Target\n\nclass ReportAdminForm(forms.ModelForm):\n class Meta:\n model = Report\n fields = '__all__'\n widgets = {\n 'reporter_name': forms.TextInput(attrs={'size': '50'}),\n 'title': forms.TextInput(attrs={'size': '50'}),\n }\n\n@admin.register(Report)\nclass ReportAdmin(admin.ModelAdmin):\n list_display = ('title', 'date_updated', 'status', 'type')\n ordering = ('-date_updated',)\n list_filter = ('status',)\n search_fields = ('title',)\n form = ReportAdminForm\n\nclass TargetAdminForm(forms.ModelForm):\n class Meta:\n model = Target\n fields = '__all__'\n widgets = {\n 'name': forms.TextInput(attrs={'size': '50'})\n }\n\n@admin.register(Target)\nclass TargetAdmin(admin.ModelAdmin):\n list_display = ('name', 'is_active')\n ordering = ('name',)\n list_filter = ('is_active',)\n search_fields = ('name',)\n form = TargetAdminForm\n","sub_path":"bugbounty/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"606624621","text":"import pytest\nimport torch\n\nfrom sentiment.explainer import explainer\nfrom sentiment.explainer.explainers import gradient_shap\nfrom sentiment.model.model import bert\n\n\n@pytest.mark.integration\ndef test_gradient_shap():\n text = \"This movie was boring :-(\"\n\n result = explainer.explain(text=text, target=4, explainer=\"gradient_shap\")\n\n assert len(result.explanation) == 7\n assert result.explanation[0][0] == \"This\"\n assert result.explanation[4][0] == \":\"\n assert \"delta\" in result.meta\n\n\n@pytest.mark.integration\ndef test_that_partial_model_is_faithful():\n\n model = bert.model\n tokenizer = bert.tokenizer\n\n def full_forward(model_input):\n pred = model(model_input)\n return torch.softmax(pred[0], dim=1)\n\n partial_forward, _ = gradient_shap.create_partial_model(model)\n\n for text in [\"This movie was boring :-(\",\n \"Die Sonne hat gelacht, was für ein Tag!\",\n \"Leider war der Urlaub zu schnell vorbei...\"]:\n model_input = torch.tensor([tokenizer.encode(text, add_special_tokens=False)],\n dtype=torch.int64)\n\n embedded_input = model.bert.embeddings(model_input)\n\n assert torch.all(torch.eq(full_forward(model_input), partial_forward(embedded_input)))\n","sub_path":"review-sentiment/sentiment-backend/tests/explainer/explainers/test_gradient_shap.py","file_name":"test_gradient_shap.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"285936789","text":"from datetime import date,datetime\r\n\r\nauthorList = 'author.txt'\r\nbookList = 'books.txt'\r\ntransactionList = 'transactions.txt'\r\n\r\n\r\nclass Author():\r\n def __init__(self,name=None,dob=None,nationality=None):\r\n self.name = name\r\n self.dob = dob\r\n self.nationality = nationality\r\n \r\n def get_age(self):\r\n today = date.today()\r\n datee = self.dob\r\n datee= datee.split('/')\r\n year = int(datee[0])\r\n \r\n return today.year - year \r\n \r\n def __str__(self):\r\n return 'Author info- Name:'+ self.name +' Nationality:'+ self.nationality + ' Age:'+str(self.get_age())\r\n def __add__(self,other):\r\n self.name = other.name\r\n self.dob = other.dob\r\n self.nationality = other.nationality\r\n return Author(self.name,self.dob,self.nationality)\r\n \r\n def load_authors():\r\n inFile = open(authorList, 'r') \r\n doc = inFile.readlines()\r\n author_list = [x.strip() for x in doc]\r\n return author_list\r\n \r\n def author_to_text(self):\r\n file = open('author.txt','a')\r\n file.write(self.name+','+self.dob+','+self.nationality +'\\n')\r\n \r\n def del_author(name):\r\n inFile = open(authorList, 'r') \r\n doc = inFile.readlines()\r\n print (doc)\r\n inFile.close()\r\n inFile = open(authorList, 'w')\r\n for line in doc:\r\n if name not in line:\r\n inFile.write(line)\r\n inFile.close()\r\n \r\n def search_author(name):\r\n ''' name(string) of the author to be searched. This function returns \r\n the details of the author. This function has to be called as a method of \r\n the class when used outside the class '''\r\n \r\n inFile = open(authorList, 'r') \r\n doc = inFile.readlines()\r\n inFile.close()\r\n for line in doc:\r\n if name in line:\r\n break\r\n #return Author(name=line[:-1])\r\n line=line.split(',')\r\n return Author(name=line[0],dob=line[1],nationality=line[2])\r\n \r\n def search_by_nationality(nationality):\r\n inFile = open(authorList, 'r') \r\n doc = inFile.readlines()\r\n inFile.close()\r\n temp = []\r\n for line in doc:\r\n if nationality in line:\r\n pos = line.find(',')\r\n temp.append(line[:pos])\r\n return Author(temp)\r\n \r\na = Author('mohamed','1993/1/1','egyptian')\r\nb = Author('fahmy','1993/1/1','canadian')\r\nc=a+b\r\nprint(a)\r\nprint(c)\r\nc.author_to_text()\r\n\r\n\r\n \r\nclass Book():\r\n def __init__(self,book_name=None,author_name=None,publisher_name=None):\r\n self.book_name = str(book_name)\r\n self.author_name = str(author_name)\r\n self.publisher_name = str(publisher_name)\r\n self.author = Author.search_author(author_name)\r\n \r\n def __str__(self):\r\n return 'Book Info- Book Name:'+ self.book_name +' Publisher:'+ self.publisher_name + '\\n'+ self.author.__str__()+'\\n'\r\n \r\n def __add__(self,other):\r\n self.book_name = other.book_name\r\n self.author_name = other.author_name\r\n self.publisher_name = other.publisher_name\r\n return Book(self.book_name,self.author_name,self.publisher_name)\r\n \r\n def load_books():\r\n inFile = open(bookList, 'r') \r\n doc = inFile.readlines()\r\n book_list = [x.strip() for x in doc]\r\n return book_list\r\n \r\n def book_to_text(self):\r\n file = open('books.txt','w+')\r\n file.write(self.book_name+','+self.author_name+','+self.publisher_name +'\\n')\r\n \r\n def del_book(name):\r\n inFile = open(bookList, 'r') \r\n doc = inFile.readlines()\r\n print (doc)\r\n inFile.close()\r\n inFile = open(bookList, 'w')\r\n for line in doc:\r\n if name not in line:\r\n inFile.write(line)\r\n inFile.close()\r\n \r\n def search_book(name):\r\n ''' name(string) of the book to be searched. This function returns \r\n the details of the book. This function has to be called as a method of \r\n the class when used outside the class '''\r\n \r\n inFile = open(bookList, 'r') \r\n doc = inFile.readlines()\r\n inFile.close()\r\n for line in doc:\r\n if name in line:\r\n break\r\n line=line.split(',')\r\n return Book(book_name=line[0],author_name=line[1],publisher_name=line[2])\r\n \r\n def search_by_author(name=None, nationality=None, age=None):\r\n inFile = open(bookList, 'r') \r\n doc = inFile.readlines()\r\n inFile.close()\r\n temp = []\r\n if name != None:\r\n for line in doc:\r\n if name in line:\r\n pos = line.find(',')\r\n temp.append(line[:pos])\r\n if nationality != None:\r\n name = Author.search_by_nationality(nationality)\r\n for line in doc:\r\n for line2 in name:\r\n if line2 in line:\r\n pos = line.find(',')\r\n if line[:pos] not in temp:\r\n temp.append(line[:pos])\r\n if age != None:\r\n name = Author.search_by_age(age)\r\n for line in doc:\r\n for line2 in name:\r\n if line2 in line:\r\n pos = line.find(',')\r\n if line[:pos] not in temp:\r\n temp.append(line[:pos])\r\n return temp\r\n \r\n\r\na = Book('circuits','mohamed','company1')\r\nb = Book('circuits','fahmy','company2')\r\nc=a+b\r\nprint(a)\r\nprint(c)\r\n\r\nauthor = Author.search_author('segun')\r\nprint(type(author))\r\nprint(author)\r\n\r\n\r\nclass Transaction():\r\n def __init__(self,bookname=None,username=None,t_type=None):\r\n self.bookname = str(bookname)\r\n self.username = str(username)\r\n self.date_time = datetime.now()\r\n self.t_type = int(t_type)\r\n \r\n def load_transactions():\r\n inFile = open(transactionList, 'r') \r\n doc = inFile.readlines()\r\n transaction_list = [x.strip() for x in doc]\r\n return transaction_list\r\n \r\n def add_transaction(self):\r\n file = open(transactionList,'a+')\r\n file.write(self.bookname+','+self.username+','+str(self.date_time) +','+str(self.t_type)+'\\n')\r\n \r\n def del_transaction(bookname = None,username = None, timedate= None):\r\n inFile = open(transactionList, 'r') \r\n doc = inFile.readlines()\r\n inFile.close()\r\n inFile = open(transactionList, 'w')\r\n for line in doc:\r\n if bookname not in line and username not in line and str(timedate) not in line:\r\n inFile.write(line)\r\n inFile.close()\r\n \r\n def search_transaction(tr_date=None, username=None, bookname=None): # date has to be inputted as a string\r\n inFile = open(transactionList, 'r') \r\n doc = inFile.readlines()\r\n inFile.close()\r\n temp = []\r\n if tr_date != None:\r\n for line in doc:\r\n if str(tr_date) in line:\r\n temp.append(line[:-1])\r\n if username != None:\r\n for line in doc:\r\n if username in line and line[:-1] not in temp:\r\n temp.append(line[:-1])\r\n if bookname != None:\r\n for line in doc:\r\n if bookname in line and line[:-1] not in temp:\r\n temp.append(line[:-1])\r\n return temp\r\n \r\nt1 = Transaction('Advanced Physics','Abhinav',1)\r\nt2=Transaction('FACTs','Bhavin',1)\r\nt3 = Transaction('PS modelling','Gurjeet',0)\r\nt4 = Transaction('Networking principles','Karanbir',1)\r\n\r\nt1.add_transaction()\r\nt2.add_transaction()\r\nt3.add_transaction()\r\nt4.add_transaction()\r\nTransaction.search_transaction(username='mohamed')\r\nprint(Transaction.load_transactions())\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Assignment-4.py","file_name":"Assignment-4.py","file_ext":"py","file_size_in_byte":7937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"337044756","text":"from typing import Any, Mapping, NamedTuple, Optional, Sequence\n\nfrom dagster import check\nfrom dagster.core.definitions.events import AssetKey\n\n\nclass AssetIn(\n NamedTuple(\n \"_AssetIn\",\n [\n (\"asset_key\", Optional[AssetKey]),\n (\"metadata\", Optional[Mapping[str, Any]]),\n (\"namespace\", Optional[Sequence[str]]),\n ],\n )\n):\n def __new__(\n cls,\n asset_key: Optional[AssetKey] = None,\n metadata: Optional[Mapping[str, Any]] = None,\n namespace: Optional[Sequence[str]] = None,\n ):\n check.invariant(\n not (asset_key and namespace),\n (\"Asset key and namespace cannot both be set on AssetIn\"),\n )\n\n # if user inputs a single string, coerce to list\n namespace = [namespace] if isinstance(namespace, str) else namespace\n\n return super(AssetIn, cls).__new__(\n cls,\n asset_key=check.opt_inst_param(asset_key, \"asset_key\", AssetKey),\n metadata=check.opt_inst_param(metadata, \"metadata\", Mapping),\n namespace=check.opt_list_param(namespace, \"namespace\", str),\n )\n","sub_path":"python_modules/dagster/dagster/core/asset_defs/asset_in.py","file_name":"asset_in.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"407177400","text":"import difflib\nimport pprint\nfrom unittest import TestCase\n\n\nclass ExtendedTestCase(TestCase):\n\n def assertSeqAlmostEqual(self, seq1, seq2, places=None, msg=None, delta=None):\n \"\"\"An equality assertion for ordered sequences (like lists and tuples).\n\n For the purposes of this function, a valid ordered sequence type is one\n which can be indexed and has a length.\n\n Args:\n seq1: The first sequence to compare.\n seq2: The second sequence to compare.\n places: Number of decimals to match.\n delta: Maximum difference. Supersedes places.\n msg: Optional message to use on failure instead of a list of\n differences.\n \"\"\"\n seq_type_name = type(seq1).__name__\n\n differing = None\n try:\n len1 = len(seq1)\n except (TypeError, NotImplementedError):\n differing = f'First {seq_type_name} has no length. Non-sequence?'\n\n if differing is None:\n try:\n len2 = len(seq2)\n except (TypeError, NotImplementedError):\n differing = f'Second {seq_type_name} has no length. Non-sequence?'\n\n if differing is None:\n if seq1 == seq2:\n return\n\n differing = ''\n\n for i in range(min(len1, len2)):\n try:\n item1 = seq1[i]\n except (TypeError, IndexError, NotImplementedError):\n differing += f'\\nUnable to index element {i} of first {seq_type_name}\\n'\n break\n\n try:\n item2 = seq2[i]\n except (TypeError, IndexError, NotImplementedError):\n differing += f'\\nUnable to index element {i} of second {seq_type_name}\\n'\n break\n\n try:\n self.assertAlmostEqual(item1, item2, places=places, delta=delta, msg=msg)\n except AssertionError as e:\n differing += f'\\nFirst differing element {i}:\\n{e}\\n'\n break\n else:\n if len1 == len2:\n return\n\n if len1 > len2:\n differing += f'\\nFirst {seq_type_name} contains {len1 - len2} additional elements.\\n'\n elif len1 < len2:\n differing += f'\\nSecond {seq_type_name} contains {len2 - len1} additional elements.\\n'\n\n standardMsg = differing + f\"\\n{seq2}\\n\"\n diffMsg = '\\n' + '\\n'.join(\n difflib.ndiff(pprint.pformat(seq1).splitlines(),\n pprint.pformat(seq2).splitlines()))\n\n standardMsg = self._truncateMessage(standardMsg, diffMsg)\n msg = self._formatMessage(msg, standardMsg + diffMsg)\n self.fail(msg)\n","sub_path":"meyer/unit_test/ExtendedTestCase.py","file_name":"ExtendedTestCase.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"607515166","text":"import math\n\nimport numpy as np\n\nfrom PyEngine3D.Utilities import *\n\n\nclass StaticActor:\n def __init__(self, name, **object_data):\n self.name = name\n self.selected = False\n self.model = None\n self.has_mesh = False\n\n # transform\n self.transform = TransformObject()\n self.transform.set_pos(object_data.get('pos', [0, 0, 0]))\n self.transform.set_rotation(object_data.get('rot', [0, 0, 0]))\n self.transform.set_scale(object_data.get('scale', [1, 1, 1]))\n\n self.set_model(object_data.get('model'))\n\n self.instance_pos = RangeVariable(**object_data.get('instance_pos',\n dict(min_value=Float3(-10.0, 0.0, -10.0),\n max_value=Float3(10.0, 0.0, 10.0))))\n self.instance_rot = RangeVariable(**object_data.get('instance_rot', dict(min_value=FLOAT3_ZERO)))\n self.instance_scale = RangeVariable(**object_data.get('instance_scale', dict(min_value=1.0)))\n self.instance_pos_list = object_data.get('instance_pos_list', [])\n self.instance_rot_list = object_data.get('instance_rot_list', [])\n self.instance_scale_list = object_data.get('instance_scale_list', [])\n\n self.instance_matrix = None\n self.instance_radius_offset = object_data.get('instance_radius_offset', 0.0)\n self.instance_radius_scale = object_data.get('instance_radius_scale', 1.0)\n\n self.instance_count = object_data.get('instance_count', 1)\n self.set_instance_count(self.instance_count)\n\n self.attributes = Attributes()\n\n def delete(self):\n pass\n\n def set_model(self, model):\n if model:\n self.model = model\n self.has_mesh = model.mesh is not None\n\n def get_save_data(self):\n save_data = dict(\n name=self.name,\n model=self.model.name if self.model else '',\n pos=self.transform.pos.tolist(),\n rot=self.transform.rot.tolist(),\n scale=self.transform.scale.tolist(),\n instance_count=self.instance_count,\n instance_pos=self.instance_pos.get_save_data(),\n instance_rot=self.instance_rot.get_save_data(),\n instance_scale=self.instance_scale.get_save_data(),\n instance_pos_list=self.instance_pos_list,\n instance_rot_list=self.instance_rot_list,\n instance_scale_list=self.instance_scale_list,\n )\n return save_data\n\n def set_instance_count(self, count):\n self.instance_count = count\n\n if 1 < count:\n self.instance_pos_list = [self.instance_pos.get_uniform() for i in range(count)]\n self.instance_rot_list = [self.instance_rot.get_uniform() for i in range(count)]\n self.instance_scale_list = [self.instance_scale.get_uniform() for i in range(count)]\n\n self.instance_matrix = np.zeros(count, (np.float32, (4, 4)))\n\n offset_max = FLOAT32_MIN\n scale_max = FLOAT32_MIN\n self.instance_radius_offset = 0.0\n self.instance_radius_scale = 1.0\n\n for i in range(count):\n uniform_scale = self.instance_scale_list[i]\n offset_max = max(offset_max, max(np.abs(self.instance_pos_list[i])))\n scale_max = max(scale_max, np.abs(uniform_scale))\n\n self.instance_matrix[i][...] = MATRIX4_IDENTITY\n matrix_scale(self.instance_matrix[i], uniform_scale, uniform_scale, uniform_scale)\n matrix_rotate(self.instance_matrix[i], *self.instance_rot_list[i])\n matrix_translate(self.instance_matrix[i], *self.instance_pos_list[i])\n self.instance_radius_offset = offset_max\n self.instance_radius_scale = scale_max\n else:\n self.instance_matrix = None\n\n def get_attribute(self):\n self.attributes.set_attribute('name', self.name)\n self.attributes.set_attribute('pos', self.transform.pos)\n self.attributes.set_attribute('rot', self.transform.rot)\n self.attributes.set_attribute('scale', self.transform.scale)\n self.attributes.set_attribute('model', self.model.name if self.model else '')\n self.attributes.set_attribute('instance_count', self.instance_count)\n self.attributes.set_attribute('instance_pos', self.instance_pos.get_save_data())\n self.attributes.set_attribute('instance_rot', self.instance_rot.get_save_data())\n self.attributes.set_attribute('instance_scale', self.instance_scale.get_save_data())\n return self.attributes\n\n def set_attribute(self, attribute_name, attribute_value, parent_info, attribute_index):\n item_info_history = []\n parent_attribute_name = attribute_name\n while parent_info is not None:\n parent_attribute_name = parent_info.attribute_name\n item_info_history.insert(0, parent_info)\n parent_info = parent_info.parent_info\n\n if 1 < len(item_info_history) or 'instance_scale' == parent_attribute_name:\n attribute = getattr(self, item_info_history[0].attribute_name)\n if attribute is not None and isinstance(attribute, RangeVariable):\n if 'min_value' == attribute_name:\n attribute.set_range(attribute_value, attribute.value[1])\n elif 'max_value' == attribute_name:\n attribute.set_range(attribute.value[0], attribute_value)\n self.set_instance_count(self.instance_count)\n else:\n if attribute_name == 'pos':\n self.transform.set_pos(attribute_value)\n elif attribute_name == 'rot':\n self.transform.set_rotation(attribute_value)\n elif attribute_name == 'scale':\n self.transform.set_scale(attribute_value)\n elif attribute_name == 'instance_count':\n self.set_instance_count(attribute_value)\n elif hasattr(self, attribute_name):\n setattr(self, attribute_name, attribute_value)\n\n def get_mesh(self):\n return self.model.mesh if self.has_mesh else None\n\n def get_geometries(self):\n return self.model.mesh.geometries if self.has_mesh else None\n\n def get_material_instance(self, index):\n return self.model.material_instances[index] if self.model else None\n\n def set_selected(self, selected):\n self.selected = selected\n\n def update(self, dt):\n self.transform.update_transform()\n\n\nclass SkeletonActor(StaticActor):\n def __init__(self, name, **object_data):\n StaticActor.__init__(self, name, **object_data)\n\n self.animation_time = 0.0\n self.animation_buffers = []\n self.prev_animation_buffers = []\n\n if self.has_mesh:\n for animation in self.model.mesh.animations:\n if animation:\n animation_buffer = animation.get_animation_transforms(0.0)\n # just initialize\n self.animation_buffers.append(animation_buffer.copy())\n self.prev_animation_buffers.append(animation_buffer.copy())\n\n def get_prev_animation_buffer(self, index):\n return self.prev_animation_buffers[index]\n\n def get_animation_buffer(self, index):\n return self.animation_buffers[index]\n\n def update(self, dt):\n self.transform.update_transform()\n\n # update animation\n if self.has_mesh:\n for i, animation in enumerate(self.model.mesh.animations):\n if animation:\n frame_count = animation.frame_count\n if frame_count > 1:\n self.animation_time = math.fmod(self.animation_time + dt, animation.animation_length)\n frame = animation.get_time_to_frame(self.animation_time)\n else:\n frame = 0.0\n self.prev_animation_buffers[i][...] = self.animation_buffers[i]\n self.animation_buffers[i][...] = animation.get_animation_transforms(frame)\n","sub_path":"PyEngine3D/Render/Actor.py","file_name":"Actor.py","file_ext":"py","file_size_in_byte":8129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"163726476","text":"#!/Users/BillyBuehl/anaconda3/bin/python\n# rename ever file in a folder\n\nimport os, shutil, sys\n\nclass bcolors:\n BLUE = '\\033[94m'\n RED = '\\033[91m'\n GREEN = '\\033[92m'\n ENDC = '\\033[0m'\n\n# get cwd\ndirectory = os.getcwd()\nprint('Altering files in: ' + bcolors.BLUE + directory + bcolors.ENDC)\nprint(bcolors.RED + '\\'x\\'' + bcolors.ENDC + ' to quit.')\n\n# set filenames\nquitters = ['x', 'clear', 'quit']\nif len(sys.argv) > 1:\n response = input('Use ' + bcolors.GREEN + sys.argv[1] + bcolors.ENDC + ' for filenames? (y/n): ')\n if response in quitters:\n sys.exit()\n elif response == 'y':\n filename = sys.argv[1]\n else:\n filename = input('Filename: ')\nelse:\n filename = input('Enter name for files: ')\n\n# prompt for types you want to change\nprint()\nmutableTypes = []\nprint('File types to change:')\nprint('\\'\\' to break')\nwhile True:\n fileType = input('filetype (ex. \\'.jpg\\'): ')\n if fileType == '':\n break\n mutableTypes.append(fileType)\n\n\n# get files to change\nfiles2change = []\nfor item in os.listdir(directory):\n if os.path.splitext(item)[1] in mutableTypes:\n files2change.append(item)\n\n\n# change files\nprint()\nnum = 0\nfor item in files2change:\n fileExt = os.path.splitext(item)[1]\n newFile = filename + '_' + str(num) + fileExt\n os.rename(item, newFile)\n print(item + ' renamed to: ' + bcolors.GREEN + newFile + bcolors.ENDC)\n num += 1\n\nprint('\\n' + bcolors.GREEN + 'Done.' + bcolors.ENDC)","sub_path":"renameFiles.py","file_name":"renameFiles.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"188589205","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#Esempio di script per scaricare i dati da ECMWF\n#I dati riguardano il globo e sono in formato grib.\n#Il primop passo: riconvertirli in formato netCDF e trasformare il grigliato gaussioano ridotto in grigliato gaussiano\n#\n# cdo -R -f nc copy filegrib filenetCdf\n#\n#I grigliati vanno quindi riconvertiti in formato regolare lonlat: possiamo utlizzare il file griglia.txt tenendo conto\n# che la risoluzione spaziale di era5 è 31km x 31km (circa 0.25)\n#\n#cdo remapbil,griglia.txt fileRuotato fileLonLat \n\nfrom ecmwfapi import ECMWFDataServer\n\nserver = ECMWFDataServer()\n \nserver.retrieve({\n 'class' : \"ea\", \n 'dataset' : \"era5\",\n 'stream' : \"oper\",\n 'padding' : \"0\",\n 'time' : \"0000/0100/0200/0300/0400/0500/0600/0700/0800/0900/1000/1100/1200/1300/1400/1500/1600/1700/1800/1900/2000/2100/2200/2300\",\n 'date' : \"2015-01-01/to/2015-12-31\",\n 'type' : \"an\",\n 'step' : \"0\",\t\n 'levtype' : \"sfc\", \n 'param' : \"166.128\",\n 'target' : \"v10.nc\",\n 'format' : \"netcdf\",\n 'grid' : \"0.25/0.25\", \t\n 'area' : \"50/4/35/21\" \t\n })\n","sub_path":"script_python_donwdloadERA5/script_scarica_era5_wind_v.py","file_name":"script_scarica_era5_wind_v.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"261058767","text":"class SList:\n\n def __init__(self):\n self.head = None\n\n def print_values(self):\n\n runner = self.head\n while(runner != None):\n print(runner.value)\n runner = runner.next\n return self\n \n def add_to_front(self, value):\n\n new_node = SLNode(value)\n new_node.next = self.head\n self.head = new_node\n return self\n\n def add_to_back(self,value):\n\n if self.head == None:\n self.add_to_front(value)\n return self\n \n new_node = SLNode(value)\n runner = self.head\n\n while(runner.next != None):\n runner = runner.next\n \n runner.next = new_node\n return self\n \n def remove_from_front(self):\n node_tobe_removed = self.head\n self.head = node_tobe_removed.next\n # node_tobe_removed.next = None\n returned_value = node_tobe_removed.value\n # del node_tobe_removed\n return returned_value\n \n def remove_from_back(self):\n runner = self.head\n while(runner.next.next != None):\n runner = runner.next\n returned_value = runner.next.value\n runner.next = None\n return returned_value\n \n def value_at(self, value):\n runner = self.head\n counter = 0\n while(runner != None):\n if runner.value == value:\n if runner.next != None:\n return counter\n else:\n return -1 # last element\n counter += 1\n runner = runner.next \n return -2 # no value\n\n def remove_val(self, value):\n at = self.value_at(value)\n if at > 0:\n runner = self.head\n i = 0\n while(i < at-1):\n runner = runner.next\n i += 1\n runner.next = runner.next.next\n\n elif at == 0:\n self.remove_from_back()\n\n elif at == -1:\n self.remove_from_back()\n \n elif at == -2:\n return \"The list hasn't this value\"\n \n def last_index(self):\n i = 0\n runner = self.head\n while(runner.next != None):\n runner = runner.next\n i += 1\n return i\n \n \n def insert_at(self, value, n):\n last_idx = self.last_index()\n\n if n == last_idx:\n self.add_to_back(value)\n return self\n\n elif n > last_idx:\n print(\"Not Allowed\")\n return self\n \n elif n == 0:\n self.add_to_front(value)\n return self\n \n runner = self.head\n i = 0\n\n while(i < n-1 ):\n runner = runner.next\n i += 1 \n new_node = SLNode(value)\n new_node.next = runner.next\n runner.next = new_node \n return self\n\nclass SLNode:\n\n def __init__(self, value):\n self.value = value\n self.next = None\n \n\n\n ","sub_path":"_python/data_structures/singly_linked_lists.py","file_name":"singly_linked_lists.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"175116286","text":"import m3u8\n\nfrom fHDHR.tools import isint, isfloat\n\n\nclass OriginChannels():\n\n def __init__(self, fhdhr, origin):\n self.fhdhr = fhdhr\n self.origin = origin\n\n def get_channels(self):\n\n stations_url = 'https://api.locastnet.org/api/watch/epg/' + str(self.origin.location[\"DMA\"])\n url_headers = {'Content-Type': 'application/json', 'authorization': 'Bearer %s' % self.origin.token}\n\n try:\n stationsReq = self.fhdhr.web.session.get(stations_url, headers=url_headers)\n stationsReq.raise_for_status()\n except self.fhdhr.web.exceptions.SSLError as err:\n self.fhdhr.logger.error('Error while getting stations: %s' % err)\n return []\n except self.fhdhr.web.exceptions.HTTPError as err:\n self.fhdhr.logger.error('Error while getting stations: %s' % err)\n return []\n\n stationsRes = stationsReq.json()\n\n cleaned_channels = []\n for station_item in stationsRes:\n\n thumbnails = []\n for thumb_opt in [\"logo226Url\", \"logoUrl\"]:\n\n try:\n thumbnail = station_item[thumb_opt]\n except TypeError:\n thumbnail = None\n except KeyError:\n thumbnail = None\n if thumbnail:\n thumbnails.append(thumbnail)\n if not len(thumbnails):\n thumbnails = [None]\n\n clean_station_item = {\n \"name\": station_item[\"name\"],\n \"id\": station_item[\"id\"],\n \"thumbnail\": thumbnails[0]\n }\n\n # Typically this will be `2.1 KTTW` but occasionally Locast only provides a channel number here\n # fHDHR device.channels will provide us a number if that is the case\n if (isint(str(station_item['callSign']).split(\" \")[0])\n or isfloat(str(station_item['callSign']).split(\" \")[0])):\n clean_station_item[\"number\"] = str(station_item['callSign']).split(\" \")[0]\n clean_station_item[\"callsign\"] = str(\" \".join(station_item['callSign'].split(\" \")[1:]))\n else:\n clean_station_item[\"callsign\"] = str(station_item['callSign'])\n\n cleaned_channels.append(clean_station_item)\n return cleaned_channels\n\n def get_channel_stream(self, chandict):\n videoUrl = ('https://api.locastnet.org/api/watch/station/' +\n str(chandict[\"origin_id\"]) + '/' +\n self.origin.location['latitude'] + '/' +\n self.origin.location['longitude']\n )\n videoUrl_headers = {\n 'Content-Type': 'application/json',\n 'authorization': 'Bearer %s' % self.origin.token,\n 'User-Agent': \"curl/7.64.1\"}\n\n try:\n videoUrlReq = self.fhdhr.web.session.get(videoUrl, headers=videoUrl_headers)\n videoUrlReq.raise_for_status()\n except self.fhdhr.web.exceptions.HTTPError as err:\n self.fhdhr.logger.error('Error while getting station URL: %s' % err)\n return None\n\n videoUrlRes = videoUrlReq.json()\n\n if self.fhdhr.config.dict[\"origin\"][\"force_best\"]:\n streamurl = self.m3u8_beststream(videoUrlRes['streamUrl'])\n else:\n streamurl = videoUrlRes['streamUrl']\n return streamurl\n\n def m3u8_beststream(self, m3u8_url):\n bestStream = None\n videoUrlM3u = m3u8.load(m3u8_url)\n self.fhdhr.logger.info('force_best set in config. Checking for Best Stream')\n\n if len(videoUrlM3u.playlists) == 0 or not videoUrlM3u.is_variant:\n self.fhdhr.logger.info('No Stream Variants Available.')\n return m3u8_url\n\n for videoStream in videoUrlM3u.playlists:\n if not bestStream:\n bestStream = videoStream\n elif videoStream.stream_info.bandwidth > bestStream.stream_info.bandwidth:\n bestStream = videoStream\n\n if bestStream:\n self.fhdhr.logger.info('BestStream URL Found!')\n return bestStream.absolute_uri\n else:\n self.fhdhr.logger.info('No Stream Variant Found.')\n return m3u8_url\n","sub_path":"origin/origin_channels.py","file_name":"origin_channels.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"330594212","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\n__author__ = \"monoDrive\"\n__copyright__ = \"Copyright (C) 2018 monoDrive\"\n__license__ = \"MIT\"\n__version__ = \"1.0\"\n\n\nimport logging\nimport socket\nimport threading\n\ntry:\n from Queue import Queue\n from Queue import Empty\nexcept:\n from queue import Queue # for Python 3\n from queue import Empty\n\nfrom monodrive.networking.messaging import Message\n\n\nclass BaseClient(object):\n \"\"\"\n BaseClient that communicates to Unreal Engine\n\n There is corresponding software within Unreal that will accept signals\n from a port using TCP.\n \"\"\"\n def __init__(self, endpoint, raw_message_handler):\n self.endpoint = endpoint\n self.raw_message_handler = raw_message_handler\n self.b_socket_connnected = False # if socket == None, means client is not connected\n self.wait_connected = threading.Event()\n self.sock = None\n\n # Start a thread to get data from the socket\n self.receiving_thread = threading.Thread(target=self.__receiving)\n self.receiving_thread.setDaemon(1)\n #self.receiving_thread.start()\n\n def connect(self, timeout = 1):\n \"\"\" Setup connection to the socket listening in Unreal. \"\"\"\n if not self.isconnected():\n try:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #adding this option so all ports are closed\n #self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.connect(self.endpoint)\n #self.sock = s\n self.b_socket_connnected = True\n self.receiving_thread.start()\n #self.sock = s\n # logging.getLogger(\"network\").debug(\"connected to %s\" % self.endpoint)\n except Exception as e:\n # logging.getLogger(\"network\").error(\n # 'Can not connect to {0} \\n Is your game running? \\n Error {1}'.format(str(self.endpoint), e))\n self.b_socket_connnected = False\n\n def isconnected(self):\n \"\"\" Return if the connection is available. \"\"\"\n return self.b_socket_connnected\n\n def disconnect(self):\n \"\"\" Remove the connection with the client properly. \"\"\"\n if self.isconnected():\n logging.getLogger(\"network\").info(\"BaseClient, request disconnect from server in {0}\".format(\n threading.current_thread().name))\n self.b_socket_connnected = False\n\n #time.sleep(1)\n self.sock.shutdown(socket.SHUT_RDWR)\n # Because socket is on read in __receiving thread,\n # need to call shutdown to force it to close\n if self.sock: # This may also be set to None in the __receiving thread\n self.sock.close()\n self.sock = None\n #time.sleep(0.1) # this is tricky\n \n\n def __receiving(self):\n \"\"\" Method used within thread to retrieve information from the socket. \"\"\"\n # logging.getLogger(\"network\").info(\"TCPClient start receiver on {0}\".format(threading.current_thread().name))\n while self.isconnected():\n message = Message()\n try:\n message.read(self.sock)\n logging.getLogger(\"network\").debug(str(self.endpoint) + \" \" + str(message))\n except Exception as e:\n logging.getLogger(\"network\").error('Failed to receive message: %s' % str(e))\n logging.getLogger(\"network\").error(str(self.endpoint) + \" raw data:\" + str(message.raw_data))\n message = None\n\n if message is None:\n # logging.getLogger(\"network\").info('TCPClient: remote disconnected, no more message')\n self.disconnect()\n continue\n\n if self.raw_message_handler:\n self.raw_message_handler(message) # will block this thread\n else:\n pass\n \n\n def send(self, message):\n \"\"\" Return response from Unreal \"\"\"\n if self.isconnected():\n return message.write(self.sock)\n else:\n logging.getLogger(\"network\").error('Fail to send message, client is not connected')\n return False\n\n\nclass Client(object):\n \"\"\"\n Client is the public interface for the Unreal communcation.\n\n \"\"\"\n def __init__(self, endpoint):\n self.message_client = BaseClient(endpoint, self.__raw_message_handler)\n self.message_id = 0\n self.responses = Queue()\n\n\n self.isconnected = self.message_client.isconnected\n self.connect = self.message_client.connect\n self.disconnect = self.message_client.disconnect\n self.queue = Queue()\n\n self.data_ready = threading.Event()\n self.stop_event = threading.Event()\n self.main_thread = threading.Thread(target=self.worker, args=(self,))\n self.main_thread.setDaemon(1)\n self.main_thread.start()\n \n\n def __raw_message_handler(self, raw_message):\n self.responses.put(raw_message)\n\n def stop(self, timeout=2):\n logging.getLogger(\"network\").debug('stopping client')\n self.stop_event.set()\n self.main_thread.join(timeout=timeout)\n logging.getLogger(\"network\").debug('stopped client')\n\n def worker(self, _client):\n while not self.stop_event.is_set():\n if self.data_ready.wait(.1):\n self.data_ready.clear()\n while not self.queue.empty():\n task = self.queue.get()\n task()\n self.queue.task_done()\n\n def request(self, message, timeout=5):\n \"\"\" Return a response from Unreal \"\"\"\n def do_request():\n if not self.message_client.send(message):\n return None\n\n # request can only be sent in the main thread, do not support multi-thread submitting request togethergetting here 2\n if threading.current_thread().name == self.main_thread.name:\n do_request()\n else:\n self.queue.put(do_request)\n self.data_ready.set()\n\n response = None\n #while len(responses) < num_messages:\n try:\n response = self.responses.get(True, timeout)\n #responses.append(response)\n except Empty:\n logging.getLogger(\"network\").error('Can not receive a response from server. \\\n timeout after {:0.2f} seconds'.format(timeout))\n #responses.append(None)\n #self.message_id += 1 # Increment only after the request/response cycle finished\n\n #if num_messages == 1:\n # return responses[0]\n return response\n","sub_path":"monodrive/networking/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"637464201","text":"import cv2\r\nimport numpy as np\r\nfrom openpyxl import Workbook\r\n\r\n# for ms excel e.g. 'AZ' to 'BA'\r\ndef NextColumn(string):\r\n # ['A', 'B', 'C', .. etc]\r\n alphaB = [chr(asciiNum) for asciiNum in range(65, 91)]\r\n\r\n # extract alphabet from string\r\n onlyAlpha = \"\"\r\n for char in string:\r\n if char.isalpha():\r\n onlyAlpha += char\r\n\r\n # if last character in 'onlyAlpha' is n o t 'Z'\r\n if alphaB.index(onlyAlpha[-1]) + 1 <= alphaB.index('Z'):\r\n # go to next letter e.g. 'A' to 'B'\r\n onlyAlpha = onlyAlpha[:-1] + alphaB[alphaB.index(onlyAlpha[-1]) + 1]\r\n\r\n # if 'Z'\r\n else:\r\n\r\n # change last character from 'Z' to 'A'\r\n onlyAlpha = onlyAlpha[:-1] + 'A'\r\n\r\n # assume all onlyAlpha characters from first character to previous last character [:-1]\r\n # are 'Z'(s) >> if so, they will be change to 'A'(s) and a new 'A' will be added to the end\r\n isZs = True\r\n\r\n # loop through from from first character to previous last character [:-1]\r\n for pos in range(len(onlyAlpha) - 2, -1, -1):\r\n # if 'Z'\r\n if alphaB.index(onlyAlpha[pos]) + 1 > alphaB.index('Z') and isZs == True:\r\n onlyAlpha = onlyAlpha[:pos] + 'A' + onlyAlpha[pos + 1:]\r\n\r\n # if not 'Z'\r\n else:\r\n if isZs == True:\r\n # go to next letter e.g. 'A' to 'B' >> only once can happened >> if it happened, no more flips\r\n onlyAlpha = onlyAlpha[:pos] + alphaB[alphaB.index(onlyAlpha[pos]) + 1] + onlyAlpha[pos + 1:]\r\n isZs = False\r\n\r\n # only in case all characters in onlyAlpha is/are 'Z'(s)\r\n if isZs == True:\r\n onlyAlpha = onlyAlpha + 'A'\r\n\r\n return onlyAlpha\r\n\r\n# for debug (let pixels be more readable)\r\n# record pixels in excel file >> each pixel column in text photo should be recorded in excel column\r\ndef photoTextPixelToExcel(text, by_width_or_height):\r\n wb = Workbook()\r\n ws = wb.active\r\n\r\n # prev: will be used to compare the w i d t h of previous iteration with current one\r\n # + here store the w i d t h of first element in array as first element in iteration\r\n prev = int(text[0][by_width_or_height])\r\n\r\n # store the first element in excel\r\n ws['A1'] = str(text[0][0]) + ' ' + str(+ text[0][1]) + ' ' + str(text[0][2])\r\n\r\n currentExcelLetter = 'A'\r\n currentExcelNum = 2\r\n\r\n # loop from the element that is from 2nd element\r\n for x in range(1, text.size):\r\n current = int(text[x][by_width_or_height])\r\n if (current != prev):\r\n # get the next excel comlumn\r\n currentExcelLetter = NextColumn(currentExcelLetter)\r\n currentExcelNum = 1\r\n ws[currentExcelLetter + str(currentExcelNum)] = str(text[x][0]) + ' ' + str(+ text[x][1]) + ' ' + str(text[x][2])\r\n currentExcelNum += 1\r\n prev = current\r\n\r\n wb.save(\"empty_book.xlsx\")\r\n\r\n# extract w h o l e text pixels from photo : sorted by rows\r\ndef extractWholeTextFromPhoto(img, img_height, img_width):\r\n # *** img should be already converted to black and white\r\n\r\n dtype = np.dtype([('height', int, 1), ('width', int, 1), ('color', int, 1)])\r\n text = np.array([(0, 0, 0)], dtype=dtype)\r\n\r\n #img[columns, width]\r\n # searching pixels\r\n # 1 . top to bottom >> range(img_height)\r\n # 2 . left to right >> range(img_width)\r\n\r\n for x in range(img_width):\r\n for y in range(img_height):\r\n\r\n # check if it is is character (recognize non whited spaces)\r\n if img[y, x] != 255:\r\n\r\n # store characters\r\n aText = np.array([(y, x, img[y, x])], dtype=dtype)\r\n text = np.concatenate((text, aText), axis=0)\r\n\r\n text = np.delete(text, 0, 0)\r\n return text\r\n\r\n# extract text borders' pixels from photo : sorted by rows\r\ndef extractTextBorderFromPhotoByWidth(img, img_height, img_width):\r\n\r\n dtype = np.dtype([('height', int, 1), ('width', int, 1), ('color', int, 1)])\r\n text = np.array([(0, 0, 0)], dtype=dtype)\r\n\r\n\r\n #img[columns, width]\r\n # searching pixels\r\n # 1 . top to bottom >> range(img_height)\r\n # 2 . left to right >> range(img_width)\r\n\r\n for x in range(img_width):\r\n # True: while the text pixels start on a column only store the first pixel\r\n # and set isText to True and never store another pixel until isText become False\r\n isText = True\r\n\r\n # the first pixel on column\r\n prev = img[0, x]\r\n\r\n # check if first pixel on column is character (recognize non whited spaces)\r\n if prev != 255:\r\n # store characters\r\n aText = np.array([(0, x, img[0, x])], dtype=dtype)\r\n text = np.concatenate((text,aText), axis=0)\r\n isText = False\r\n\r\n for y in range(1, img_height):\r\n current = img[y, x]\r\n\r\n # check if it is is character (recognize non whited spaces)\r\n if current != prev and current != 255 and isText == True:\r\n # store characters\r\n aText = np.array([(y, x, img[y, x])], dtype=dtype)\r\n text = np.concatenate((text, aText), axis=0)\r\n isText = False\r\n else:\r\n # the bottom text border that is followed by white pixel\r\n if current == 255 and prev != 255:\r\n aText = np.array([(y-1, x, prev)], dtype=dtype)\r\n text = np.concatenate((text, aText), axis=0)\r\n isText = True\r\n\r\n\r\n prev = img[y, x]\r\n\r\n text = np.delete(text, 0, 0)\r\n return text\r\n\r\n\r\n# extract text borders' pixels from photo : sorted by rows\r\ndef extractTextBorderFromPhotoByHeight(img, img_height, img_width):\r\n\r\n dtype = np.dtype([('height', int, 1), ('width', int, 1), ('color', int, 1)])\r\n text = np.array([(0, 0, 0)], dtype=dtype)\r\n\r\n\r\n #img[columns, width]\r\n # searching pixels\r\n # 1 . left to right >> range(img_width)\r\n # 2 . top to bottom >> range(img_height)\r\n\r\n for y in range(img_height):\r\n # True: while the text pixels start on a column only store the first pixel\r\n # and set isText to True and never store another pixel until isText become False\r\n isText = True\r\n\r\n # the first pixel on column\r\n prev = img[y, 0]\r\n\r\n # check if first pixel on column is character (recognize non whited spaces)\r\n if prev != 255:\r\n # store characters\r\n aText = np.array([(y, 0, img[y, 0])], dtype=dtype)\r\n text = np.concatenate((text,aText), axis=0)\r\n isText = False\r\n\r\n for x in range(1, img_width):\r\n current = img[y, x]\r\n\r\n # check if it is is character (recognize non whited spaces)\r\n if current != prev and current != 255 and isText == True:\r\n # store characters\r\n aText = np.array([(y, x, img[y, x])], dtype=dtype)\r\n text = np.concatenate((text, aText), axis=0)\r\n isText = False\r\n else:\r\n # the bottom text border that is followed by white pixel\r\n if current == 255 and prev != 255:\r\n aText = np.array([(y, x-1, prev)], dtype=dtype)\r\n text = np.concatenate((text, aText), axis=0)\r\n isText = True\r\n\r\n\r\n prev = img[y, x]\r\n\r\n text = np.delete(text, 0, 0)\r\n return text\r\n\r\n\r\n# extract an inner text pixels from photo\r\ndef extractInnerLineByWidth(img, img_text):\r\n\r\n for border in range(0, img_text.size, 2):\r\n top_border = img_text[border][0]\r\n bottom_border = img_text[border + 1][0]\r\n\r\n\r\n border_difference = bottom_border - (top_border - 1) # - 1 because ::: e.g. top_border is 83 and bottom_border is 85\r\n # 85 - 83 is 2 !! while the total difference is 83 (1) + 84 (1) + 85(1)\r\n # = 3\r\n half = border_difference / 2\r\n #print(top_border, bottom_border, border_difference, half, end=\"\")\r\n\r\n if border_difference % 2 == 0:\r\n half += 1\r\n else:\r\n half += 0.5\r\n\r\n half = int(half)\r\n\r\n img[(top_border - 1) + half][img_text[border][1]] = 180\r\n\r\n #print(\"\",half)\r\n return img\r\n\r\n# extract an inner text pixels from photo\r\ndef extractInnerLineByHeight(img, img_text):\r\n\r\n for border in range(0, img_text.size, 2):\r\n top_border = img_text[border][1]\r\n bottom_border = img_text[border + 1][1]\r\n\r\n\r\n border_difference = bottom_border - (top_border - 1) # - 1 because ::: e.g. top_border is 83 and bottom_border is 85\r\n # 85 - 83 is 2 !! while the total difference is 83 (1) + 84 (1) + 85(1)\r\n # = 3\r\n half = border_difference / 2\r\n #print(top_border, bottom_border, border_difference, half, end=\"\")\r\n\r\n if border_difference % 2 == 0:\r\n half += 1\r\n else:\r\n half += 0.5\r\n\r\n half = int(half)\r\n\r\n img[img_text[border][0]][(top_border - 1) + half] = 80\r\n\r\n #print(\"\",half)\r\n return img\r\n\r\n\r\ndef main():\r\n np.set_printoptions(threshold=np.nan)\r\n\r\n img = cv2.imread('test.png',cv2.IMREAD_GRAYSCALE)\r\n height, width = img.shape\r\n\r\n img_text = extractTextBorderFromPhotoByHeight(img, height, width)\r\n img = extractInnerLineByHeight(img, img_text)\r\n\r\n img_text = extractTextBorderFromPhotoByWidth(img, height, width)\r\n img = extractInnerLineByWidth(img, img_text)\r\n\r\n # order by_height = 0\r\n # order by_width = 1\r\n #photoTextPixelToExcel(img_text, 0)\r\n\r\n print(img_text)\r\n cv2.imwrite('result.png', img)\r\n cv2.imshow('image',img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\nmain()","sub_path":"ATR - workshop.py","file_name":"ATR - workshop.py","file_ext":"py","file_size_in_byte":9920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"214442125","text":"import sys\nimport csv\n\nfile_contents = sys.stdin\n\nfor LINE in file_contents:\n\tline = LINE.strip()\n\tLIST = line.split(\",\")\n\tif LIST[0] != \"ball\":\n\t\tcontinue\n\tif(LIST[9] != '\"\"' and LIST[9] != \"run out\" and LIST[9] != \"retired hurt\" and LIST[9] != \"obstructing the field\"):\n\t\twickets = 1\n\telse:\n\t\twickets = 0 \n\tprint(LIST[4],LIST[6],wickets,1, sep=\",\") \t\t#Batsman, Bowler, No. of wickets, No. of Deliveries\n \n\t\n\t\t\n","sub_path":"adminmgr/media/code/python/map1/mapper1.py","file_name":"mapper1.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"246898938","text":"import numpy as np\nimport pandas as pd\n\nfrom xray.core import formatting\nfrom xray.core.pycompat import PY3\n\nfrom . import TestCase\n\n\nclass TestFormatting(TestCase):\n\n def test_get_indexer_at_least_n_items(self):\n cases = [\n ((20,), (slice(10),)),\n ((3, 20,), (0, slice(10))),\n ((2, 10,), (0, slice(10))),\n ((2, 5,), (slice(2), slice(None))),\n ((1, 2, 5,), (0, slice(2), slice(None))),\n ((2, 3, 5,), (0, slice(2), slice(None))),\n ((1, 10, 1,), (0, slice(10), slice(None))),\n ((2, 5, 1,), (slice(2), slice(None), slice(None))),\n ((2, 5, 3,), (0, slice(4), slice(None))),\n ((2, 3, 3,), (slice(2), slice(None), slice(None))),\n ]\n for shape, expected in cases:\n actual = formatting._get_indexer_at_least_n_items(shape, 10)\n self.assertEqual(expected, actual)\n\n def test_first_n_items(self):\n array = np.arange(100).reshape(10, 5, 2)\n for n in [3, 10, 13, 100, 200]:\n actual = formatting.first_n_items(array, n)\n expected = array.flat[:n]\n self.assertItemsEqual(expected, actual)\n\n with self.assertRaisesRegexp(ValueError, 'at least one item'):\n formatting.first_n_items(array, 0)\n\n def test_format_item(self):\n cases = [\n (pd.Timestamp('2000-01-01T12'), '2000-01-01T12:00:00'),\n (pd.Timestamp('2000-01-01'), '2000-01-01'),\n (pd.Timestamp('NaT'), 'NaT'),\n ('foo', \"'foo'\"),\n (u'foo', \"'foo'\" if PY3 else \"u'foo'\"),\n (b'foo', \"b'foo'\" if PY3 else \"'foo'\"),\n (1, '1'),\n (1.0, '1.0'),\n ]\n for item, expected in cases:\n actual = formatting.format_item(item)\n self.assertEqual(expected, actual)\n\n def format_array_flat(self):\n actual = formatting.format_array_flat(np.arange(100), 10),\n expected = '0 1 2 3 4 ...'\n self.assertEqual(expected, actual)\n\n actual = formatting.format_array_flat(np.arange(100.0), 10),\n expected = '0.0 1.0 ...'\n self.assertEqual(expected, actual)\n\n actual = formatting.format_array_flat(np.arange(100.0), 1),\n expected = '0.0 ...'\n self.assertEqual(expected, actual)\n\n actual = formatting.format_array_flat(np.arange(3), 5),\n expected = '0 1 2'\n self.assertEqual(expected, actual)\n","sub_path":"xray/test/test_formatting.py","file_name":"test_formatting.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"14189559","text":"import json\n\nwith open('s3-bucket-world-Get-policy.json', 'r') as json_file:\n data = json.load(json_file)\n for i in data:\n print('')\n print('ID: ' + i['id'])\n print('Name: ' + i['name'])\n pol = i['policy']\n\n for stat in pol['Statement']:\n if isinstance(stat['Action'], str):\n if \"Get\" in stat['Action']:\n print('ACTION = ', stat)\n elif isinstance(stat['Action'], list):\n for x in range(len(stat['Action'])):\n if \"Get\" in stat['Action'][x]:\n print('ACTION = ', stat)\n","sub_path":"python/parse_scripts/s3_bucket_policy_parse.py","file_name":"s3_bucket_policy_parse.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"127932171","text":"class Solution:\n\tdef maxProfit(self, prices: List[int]) -> int:\n\t\tnums = prices\n\t\tif not nums:\n\t\t\treturn 0\n\t\tres = 0\n\t\tmaxAfter = -1\n\t\tfor i in range(len(nums))[::-1]:\n\t\t\tmaxAfter = max(maxAfter, nums[i])\n\t\t\tres = max(res, maxAfter - nums[i])\n\t\treturn res\n\n","sub_path":"dynamic-programming/best-time-to-buy-and-sell-stock.py","file_name":"best-time-to-buy-and-sell-stock.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"244965632","text":"import os\nimport yaml\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nfrom fastvision.utils.checkpoints import LoadStatedict, SqueezeModel\nfrom fastvision.loss import CrossEntropyLoss\nfrom fastvision.utils.sheduler import CosineLR, LinearLR, ExponentialLR\nfrom fastvision.metrics import Accuracy\n\nfrom data_gen import create_dataloader\nfrom utils.fit import Fit\nfrom models.model import resnet18\n\ndef dataloader_fn(args, device):\n data_dict = yaml.safe_load(open(args.data_yaml, 'r'))\n\n num_classes = data_dict['num_classes']\n category_names = data_dict['categories']\n assert (num_classes == len(category_names)), f\"num_classes {num_classes} must equal len(category_names) {len(category_names)}\"\n\n train_dir = os.path.join(data_dict['data_root'], data_dict['train_dir'])\n train_loader = create_dataloader(prefix='train', data_dir=train_dir, batch_size=args.batch_size, input_size=args.input_size, num_workers=args.num_workers, device=device, cache=args.cache_dir, use_cache=args.use_data_cache, shuffle=True, pin_memory=False, drop_last=False)\n\n val_dir = os.path.join(data_dict['data_root'], data_dict['test_dir'])\n val_loader = create_dataloader(prefix='test', data_dir=val_dir, batch_size=args.batch_size, input_size=args.input_size, num_workers=args.num_workers, device=device, cache=args.cache_dir, use_cache=args.use_data_cache, shuffle=True, pin_memory=False, drop_last=False)\n\n # show_dataset(prefix='train', data_dir=train_dir, category_names=category_names, num_workers=num_workers, cache=cache, use_cache=use_cache)\n\n args.num_classes = num_classes\n args.category_names = category_names\n\n return train_loader, val_loader, data_dict\n\ndef loss_fn(device):\n loss = CrossEntropyLoss(reduction='mean')\n\n if device.type == 'cuda':\n loss = loss.cuda()\n\n return loss\n\ndef optimizer_fn(model, lr, weight_decay):\n\n # optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay)\n return optimizer\n\ndef model_fn(args, device):\n\n model = resnet18(in_channels=args.in_channels, num_classes=args.num_classes, including_top=True)\n\n if args.pretrained_weights:\n model = LoadStatedict(model=model, weights=args.pretrained_weights, device=device, strict=False)\n\n if device.type == 'cuda':\n print('Model : using cuda')\n model = model.cuda()\n\n if device.type == 'cuda' and args.DataParallel:\n print('Model : using DataParallel')\n model = nn.DataParallel(model)\n\n if device.type == 'cuda' and args.DistributedDataParallel and args.SyncBatchNorm:\n print('Model : using SyncBatchNorm')\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n\n model = SqueezeModel(model, 'all', True)\n model = SqueezeModel(model, ['fc'], True)\n\n model.half().float()\n return model\n\ndef Train(args, device):\n\n # ======================= Data Loader ============================\n train_loader, val_loader, data_dict = dataloader_fn(args, device=device)\n\n # ======================= Model ============================\n model = model_fn(args, device=device)\n\n # # ======================= Loss ============================\n loss = loss_fn(device)\n\n # # ======================= metrics ============================\n metric = Accuracy()\n\n # ======================= Optimizer ============================\n optimizer = optimizer_fn(model=model, lr=1, weight_decay=args.weight_decay) # here lr have to set to 1\n scheduler = CosineLR(optimizer=optimizer, steps=args.epochs * len(train_loader), initial_lr=args.initial_lr, last_lr=args.last_lr)\n # scheduler = ExponentialLR(optimizer=optimizer, steps=1 * len(train_loader), initial_lr=2e-6, last_lr=1e-4)\n\n est = Fit(\n model=model,\n optimizer=optimizer,\n scheduler=scheduler,\n loss=loss,\n metric=metric,\n\n start_epoch=0,\n end_epoch=args.epochs,\n\n device = device,\n\n train_loader=train_loader,\n val_loader=val_loader,\n )\n\n # est.find_lr()\n\n est.trainEpoches()","sub_path":"resnet/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"605592383","text":"# -*- coding: utf-8 -*-\nfrom house_renting.spider_settings import lianjia, a58\n\nBOT_NAME = 'house_renting'\n\nCOMMANDS_MODULE = 'house_renting.commands'\nSPIDER_MODULES = ['house_renting.spiders']\nNEWSPIDER_MODULE = 'house_renting.spiders'\n\nUSER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 ' \\\n 'Safari/605.1.15 '\n\nROBOTSTXT_OBEY = False\n\nDOWNLOAD_DELAY = 10\n\nCONCURRENT_REQUESTS_PER_DOMAIN = 1\n\nCOOKIES_ENABLED = True\n\nTELNETCONSOLE_ENABLED = False\n\nDEFAULT_REQUEST_HEADERS = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en',\n}\n\n# SPIDER_MIDDLEWARES = {\n# 'house_renting.middlewares.HouseRentingSpiderMiddleware': 543,\n# }\n\n# DOWNLOADER_MIDDLEWARES = {\n# 'house_renting.middlewares.MyCustomDownloaderMiddleware': 543,\n# }\n\nITEM_PIPELINES = {\n 'house_renting.pipelines.HouseRentingPipeline': 100,\n 'house_renting.pipelines.DuplicatesPipeline': 200,\n 'scrapy.pipelines.images.ImagesPipeline': 300,\n 'house_renting.pipelines.ESPipeline': 400,\n}\nIMAGES_STORE = '/house-renting/data/images'\nMEDIA_ALLOW_REDIRECTS = True\n\n# Enable and configure the AutoThrottle extension (disabled by default)\n# See http://doc.scrapy.org/en/latest/topics/autothrottle.html\nAUTOTHROTTLE_ENABLED = True\n# The initial download delay\nAUTOTHROTTLE_START_DELAY = 5\n# The maximum download delay to be set in case of high latencies\nAUTOTHROTTLE_MAX_DELAY = 10\n# The average number of requests Scrapy should be sending in parallel to\n# each remote server\nAUTOTHROTTLE_TARGET_CONCURRENCY = 1.0\n# Enable showing throttling stats for every response received:\nAUTOTHROTTLE_DEBUG = True\n\n# Enable and configure HTTP caching (disabled by default)\n# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings\n# HTTPCACHE_ENABLED = True\n# HTTPCACHE_EXPIRATION_SECS = 0\n# HTTPCACHE_DIR = 'httpcache'\n# HTTPCACHE_IGNORE_HTTP_CODES = []\n# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'\n\nLOG_LEVEL = 'DEBUG'\n\nSPIDER_SETTINGS = {\n 'lianjia': {\n 'cities': lianjia.cities,\n 'available_cities': lianjia.available_cities,\n 'available_cities_map': lianjia.available_cities_map,\n },\n '58': {\n 'cities': a58.cities,\n 'available_cities': a58.available_cities,\n 'available_cities_map': a58.available_cities_map,\n },\n}\n","sub_path":"crawler/house_renting/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"437661740","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 18 14:48:26 2017\n\nFind kth smallest value in BST\nFind kth largest value in BST\n\n\"\"\"\n\nclass Node(object):\n\n def __init__(self, key=None, left=None, right=None, parent=None):\n self.key = key\n self.left = left\n self.right = right\n self.parent = parent\n\n def __repr__(self):\n return '(k:{})'.format(self.key)\n\ndef CreateSampleBST():\n\n # Not a complete binary tree\n node0 = Node(16)\n node1 = Node(8, None, None, node0)\n node2 = Node(20, None, None, node0)\n node3 = Node(7, None, None, node1)\n node4 = Node(9, None, None, node1)\n node5 = Node(19, None, None, node2)\n node6 = Node(21, None, None, node2)\n node7 = Node(13, None, None, node4)\n\n node4.right = node7\n node2.left = node5\n node2.right = node6\n node1.left = node3\n node1.right = node4\n node0.left = node1\n node0.right = node2\n\n# # Complete BST tree example\n# node0 = Node(4)\n# node1 = Node(2, None, None, node0)\n# node2 = Node(6, None, None, node0)\n# node3 = Node(1, None, None, node1)\n# node4 = Node(3, None, None, node1)\n# node5 = Node(5, None, None, node2)\n# node6 = Node(7, None, None, node2)\n#\n# node0.left = node1\n# node0.right = node2\n# node1.left = node3\n# node1.right = node4\n# node2.left = node5\n# node2.right = node6\n#\n return node0\n\ndef InOrderTraverseIt(root):\n \n if root.left:\n InOrderTraverseIt(root.left)\n\n print('{}, '.format(root.key), end='')\n\n if root.right:\n InOrderTraverseIt(root.right)\n\n\ndef FindTheKthSmallest(root, k, count, bFound, smallestk):\n # Apply in-order traversal and count until it is reached to the kth smallest element\n \n if bFound:\n return bFound\n \n if root.left:\n# print('1. bFound: {}'.format(bFound))\n bFound = FindTheKthSmallest(root.left, k, count, bFound, smallestk)\n\n# print('key: {} cout[0]: {} bFound: {}'.format(root.key, count[0], bFound))\n count[0] += 1\n if k == count[0]:\n# print('Found')\n smallestk[0] = root.key\n bFound = True\n return bFound\n\n if root.right:\n# print('2. bFound: {}'.format(bFound))\n bFound = FindTheKthSmallest(root.right, k, count, bFound, smallestk)\n\n return bFound\n\ndef FindTheKthLargest(root, k, count, bFound, kthlargest):\n \n if bFound:\n return bFound\n\n if root.right:\n bFound = FindTheKthLargest(root.right, k, count, bFound, kthlargest)\n\n# print('key: {} cout[0]: {} bFound: {}'.format(root.key, count[0], bFound))\n count[0] += 1\n if k == count[0]:\n# print('Found')\n kthlargest[0] = root.key\n bFound = True\n return bFound\n \n if root.left:\n bFound = FindTheKthLargest(root.left, k, count, bFound, kthlargest)\n\n return bFound\n\n\ndef FindKLargestElements(root, k, count, arr, bFinished):\n \n if bFinished:\n return arr, bFinished\n \n if root.right:\n arr, bFinished = FindKLargestElements(root.right, k, count, arr, bFinished)\n \n# print('key: {} k: {} count: {}'.format(root.key, k, count[0]))\n count[0] += 1\n if count[0] <= k:\n arr.append(root.key)\n if count[0] == k:\n bFinished = True\n return arr, bFinished\n \n if root.left:\n arr, bFinished = FindKLargestElements(root.left, k, count, arr, bFinished)\n\n return arr, bFinished\n\n# Find the smallest k elements - finish the traverse once it is reached!!!\n\n\ndef main():\n \n root = CreateSampleBST()\n k = 3\n print('\\nIn Order Traversal:')\n InOrderTraverseIt(root)\n print('\\n')\n\n kthSmallest = [None]\n FindTheKthSmallest(root, k, [0], False, kthSmallest )\n print('\\n{}th Smallest Element: {}'.format(k, kthSmallest[0]))\n\n kthlargest = [None]\n FindTheKthLargest(root, k, [0], False, kthlargest )\n print('\\n{}th Largest Element: {}'.format(k, kthlargest[0]))\n\n\n arr, bFinished = FindKLargestElements(root, k, [0], [], False)\n print('\\nK largest Elements: {}'.format(arr))\n\nif __name__=='__main__':\n main()","sub_path":"mulakat/EPI15_BinarySearchTrees/findKthSmallestElementinBST.py","file_name":"findKthSmallestElementinBST.py","file_ext":"py","file_size_in_byte":4072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"86232342","text":"import socket\n\n\n# def is_open(ip, port):\n# print(ip, port)\n# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# try:\n# s.connect((ip, port))\n# s.shutdown(2)\n# print('%d is open' % port)\n# return True\n# except Exception, e:\n# print('%d is down' % port)\n# print(e)\n# return False\n\ndef is_open(ip, port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((ip, port))\n if result == 0:\n print(\"Port %d is already in use\")\n return True\n else:\n return False\n\n","sub_path":"example/asynchronous_racos/port_conflict.py","file_name":"port_conflict.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"410935081","text":"from rest_framework.test import APITestCase\n\nfrom eums.test.api.api_test_helpers import create_user\nfrom eums.test.config import BACKEND_URL\n\n\nENDPOINT_URL = BACKEND_URL + 'programme/'\n\n\nclass ProgrammeEndPointTest(APITestCase):\n def test_should_create_programme(self):\n focal_person = create_user(self)\n programme_details = {'name': \"Programme 1\", 'focal_person': focal_person['id']}\n response = self.client.post(ENDPOINT_URL, programme_details, format='json')\n\n self.assertEqual(response.status_code, 201)\n self.assertDictContainsSubset(programme_details, response.data)","sub_path":"eums/test/api/test_programme_endpoint.py","file_name":"test_programme_endpoint.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"300609467","text":"#Problem Link : https://leetcode.com/problems/subtract-the-product-and-sum-of-digits-of-an-integer/\n\nclass Solution:\n def subtractProductAndSum(self, n: int) -> int:\n my_prod = 1\n my_sum = 0\n \n while n:\n n, remainder = divmod(n, 10)\n my_prod *= remainder \n my_sum += remainder\n \n return my_prod - my_sum\n","sub_path":"1281_Sum_product.py","file_name":"1281_Sum_product.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"567499521","text":"import sqlite3\n\n\nconnection = sqlite3.connect(\"hr.db\")\ncursor = connection.cursor()\n\n\nclass CliInterface:\n def __init__(self):\n pass\n\n def start(self):\n print(\"Welcome! You're the last HR :(\")\n\n while True:\n command = input(\"Enter command > \")\n try:\n self.__command_dispatcher(command)\n except Exit:\n break\n\n def __command_dispatcher(self, command):\n parts = command.split(\" \")\n\n if parts[0] == \"show_students\":\n show_students()\n\n elif parts[0] == \"show_courses\":\n print(\"All courses are: \")\n show_courses()\n\n elif parts[0] == \"courses_with_students\":\n print(\"For each student - list the courses he has attending\")\n courses_with_students()\n\n elif parts[0] == \"the_smartest_students\":\n the_smartest_students()\n\n elif parts[0] == 'exit':\n raise Exit\n\n elif parts[0] == \"help\":\n print(\"These are all possible commands:\\nshow_students,\\n\\\nshow_courses,\\ncourses_with_students,\\nthe_smartest_students,\\nexit\")\n\n else:\n print('Not a valid command')\n\n\nclass Exit(Exception):\n pass\n\nstudents_query = \"\"\"\n SELECT students.name, github\n FROM Students\n\"\"\"\n\n\ndef show_students():\n cursor.execute(students_query)\n rows = cursor.fetchall()\n for row in rows:\n print(row[0], row[1])\n\n\ncourses_query = \"\"\"\n SELECT courses.name\n FROM Courses\n\"\"\"\n\n\ndef show_courses():\n cursor.execute(courses_query)\n rows = cursor.fetchall()\n for row in rows:\n print(row[0])\n\n\nstudent_courses_query = \"\"\"\n SELECT students.name, course_id, courses.name\n FROM Students\n JOIN Students_to_Courses\n ON Students.id = student_id\n JOIN Courses\n ON Courses.id = course_id\n\"\"\"\n\n\ndef courses_with_students():\n cursor.execute(student_courses_query)\n rows = cursor.fetchall()\n dictionary = {}\n for row in rows:\n if row[0] not in dictionary:\n dictionary[row[0]] = []\n dictionary[row[0]].append(row[2])\n\n for element in dictionary:\n print(element + ': ' + ', '.join(dictionary[element]))\n\nthe_most_courses = \"\"\"\n SELECT students.name, COUNT(course_id) AS course_count, github\n FROM Students\n JOIN Students_to_Courses\n ON Students.id = student_id\n GROUP BY id\n ORDER BY course_count DESC\n\"\"\"\n\n\ndef the_smartest_students():\n cursor.execute(the_most_courses)\n rows = cursor.fetchmany(5)\n for row in rows:\n print(row[0], row[2] + ', courses: ' + str(row[1]))\n\n\ndef main():\n new = CliInterface()\n new.start()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"week8/1/queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"325566657","text":"from tkinter import *\n\n\n# Configuración raiz\nroot = Tk()\n\"\"\"\n# Variables dinamicas\ntexto = StringVar()\ntexto.set(\"Un Nuevo texto\")\n\n\nLabel(root, text=\"Hola Mundo\").pack(anchor=\"nw\")\nlabel = Label(root, text=\"Otra etiqueta\")\nlabel.pack(anchor=\"center\")\nLabel(root, text=\"Tres etiqueta\").pack(anchor=\"se\")\n\n\nlabel.config(bg=\"green\", fg=\"blue\", font=(\"Verdana\", 24))\nlabel.config(textvariable=texto)\n\"\"\"\nimagen = PhotoImage(file=\"imagen.gif\")\nLabel(root, image=imagen, bd=0).pack(side=\"left\")\n# Bucle aplicación\nroot.mainloop()","sub_path":"Fase 4 - Temas avanzados/Tema 13 - Interfaces gráficas con tkinter/label.py","file_name":"label.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"116819019","text":"#!/usr/bin/python\n\nimport time\nimport json\nfrom elasticsearch import Elasticsearch # install via \"$ sudo pip install elasticsearch\"\nes = Elasticsearch()\n\nindex=\"records-fresh-ie-20180316\"\n\ndef searchIp(ip):\n query = \"doc.ip: {}\".format(ip)\n result = es.search(index = index,\n q=query,\n _source=False)\n # print(result)\n return result\n\ndef searchP443Fingerprint(fp):\n query = \"doc.p443.data.http.response.request.tls_handshake.server_certificates.certificate.parsed.subject_key_info.fingerprint_sha256.keyword: {}\".format(fp)\n result = es.search(index = index,\n q=query,\n _source=False)\n # print(result)\n return result\n\ndef searchP443FingerprintGroupByCipherSuite(fp):\n body={\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"query_string\": {\n \"query\": \"doc.p443.data.http.response.request.tls_handshake.server_certificates.certificate.parsed.subject_key_info.fingerprint_sha256.keyword: {}\".format(fp),\n \"analyze_wildcard\": True,\n \"default_field\": \"*\"\n }\n }\n ]\n }\n },\n \"aggs\": {\n \"cipherSuite\": {\n \"terms\": {\n \"field\": \"doc.p443.data.http.response.request.tls_handshake.server_hello.cipher_suite.name.keyword\",\n \"size\": 100,\n \"order\": {\n \"_count\": \"desc\"\n }\n }\n }\n }\n }\n result = es.search(index = index,\n body=body,\n _source=False)\n # print(result)\n return result\n\ndef searchIpTest():\n print(\"Search IP Test:\")\n avgTime = 0\n ip = \"34.240.5.183\"\n for i in range(5):\n startTime=time.time()\n searchIp(ip)\n endTime=time.time()\n timeTaken=endTime-startTime\n print(\" {}: {}s\".format(i, timeTaken))\n avgTime=((avgTime*i)+timeTaken)/(i+1)\n print(\" Avg time taken: {}s\".format(avgTime))\n\ndef searchP443FingerprintTest():\n print(\"Search P443 Fingerprint Test:\")\n avgTime = 0\n fp = \"9f0050378fa2a1389b35cf74e0f1063ad42eaebc5a324b10c6aacf3ab08f7a94\"\n for i in range(5):\n startTime=time.time()\n result = searchP443Fingerprint(fp)\n endTime=time.time()\n timeTaken=endTime-startTime\n print(\" {}: {}s {} matches found\".format(i, timeTaken, result[\"hits\"][\"total\"]))\n avgTime=((avgTime*i)+timeTaken)/(i+1)\n print(\" Avg time taken: {}s\".format(avgTime))\n\ndef searchP443FingerprintGroupByCipherSuiteTest():\n print(\"Search P443 Fingerprint, Group By Cipher Suite Test:\")\n avgTime = 0\n fp = \"9f0050378fa2a1389b35cf74e0f1063ad42eaebc5a324b10c6aacf3ab08f7a94\"\n for i in range(5):\n startTime=time.time()\n result = searchP443FingerprintGroupByCipherSuite(fp)\n endTime=time.time()\n timeTaken=endTime-startTime\n print(\" {}: {}s {} matches found\".format(i, timeTaken, result[\"hits\"][\"total\"]))\n cipherSuites=result[\"aggregations\"][\"cipherSuite\"][\"buckets\"]\n for suite in cipherSuites:\n print(\" {}: {}\".format(suite[\"key\"], suite[\"doc_count\"]))\n avgTime=((avgTime*i)+timeTaken)/(i+1)\n print(\" Avg time taken: {}s\".format(avgTime))\n\ndef runTests():\n # searchIpTest()\n # searchP443FingerprintTest()\n searchP443FingerprintGroupByCipherSuiteTest()\n\nrunTests()","sub_path":"Code/Evaluation/Elasticsearch.py","file_name":"Elasticsearch.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"217571934","text":"# coding: utf-8\n\n# app: mx.ine.tlx.apps.commons\n# módulo: módulos\n# descripción: Modelos relacionadas con los Módulos de Atención Ciudadana\n# autor: Javier Sanchez Toledano\n# fecha: lunes, 5 de marzo de 2018\n\nfrom django.db import models\nfrom apps.commons.behaviors import Authorable, Trackable, Timestampable\n\n\nB = 1\nB1 = 2\nB2 = 3\nB3 = 4\nB4 = 5\nB5 = 6\nB6 = 7\nB7 = 8\nB8 = 9\nB9 = 10\n\nESTACIONES = (\n (B, 'Básico'),\n (B1, 'Básico + 1'),\n (B2, 'Básico + 2'),\n (B3, 'Básico + 3'),\n (B4, 'Básico + 4'),\n (B5, 'Básico + 5')\n)\n\nFIJO = 1\nFIJO_ADICIONAL = 2\nSEMIFIJO = 3\nMOVIL = 4\nURBANO = 5\n\nTIPO_MAC = (\n (FIJO, 'Fijo Distrital'),\n (FIJO_ADICIONAL, 'Fijo Adicional'),\n (SEMIFIJO, 'Semifijo'),\n (MOVIL, 'Móvil'),\n (URBANO, 'Urbano Itinerante')\n)\n\n\nclass Modulo(Trackable, Authorable, Timestampable):\n \"\"\"Modelo para registro de macs.\"\"\"\n distrito = models.PositiveSmallIntegerField()\n modulo = models.CharField('Módulo', max_length=6, help_text='Clave completa del MAC a 6 dígitos')\n\n class Meta:\n \"\"\"Metadatos del modelo Modulo\"\"\"\n unique_together = (('distrito', 'modulo'), )\n\n def __str__(self):\n return self.modulo\n\n @property\n def actual(self):\n \"\"\"Regresa la revisión actual.\"\"\"\n return self.historialmodulo.first()\n\n\nclass HistorialModulo(Trackable, Authorable, Timestampable):\n \"\"\"Historial de configuraciones de módulo.\"\"\"\n modulo = models.ForeignKey(Modulo, on_delete=models.CASCADE, related_name=\"historialmodulo\")\n tipo = models.PositiveSmallIntegerField('Tipo de Módulo', choices=TIPO_MAC)\n estaciones = models.PositiveSmallIntegerField('Estaciones', choices=ESTACIONES)\n doble_turno = models.BooleanField('Doble Turno', default=False)\n fecha_inicio = models.DateField(\"Inicio de operaciones\")\n fecha_termino = models.DateField(\"Fecha de término de operaciones\")\n horario = models.TextField(help_text='Escribe el horario del módulo')\n observaciones = models.TextField(help_text='Describe brevemente la razón del cambio')\n\n class Meta:\n \"\"\"Metadatos del modelo HistorialModulo.\"\"\"\n ordering = ['-fecha_inicio']\n get_latest_by = 'fecha_inicio'\n\n def __str__(self):\n return \"%s - %s: %s (%s)\" % (self.modulo, self.modulo.historialmodulo.last().get_tipo_display(), self.modulo.historialmodulo.last().get_estaciones_display(), self.fecha_inicio)\n","sub_path":"src/apps/commons/modulos.py","file_name":"modulos.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"214067549","text":"from matplotlib.image import imread\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\ndef read_image(file_name):\n image_matrix = imread(file_name)\n print(image_matrix.shape)\n return image_matrix\n\n\ndef output_image(image_arr):\n plt.imshow(image_arr, interpolation='nearest')\n plt.show()\n\n\ndef patch_sampling(image_arr, patch_width, patch_heigh, patch_distance):\n image_heigh = image_arr.shape[0]\n image_width = image_arr.shape[1]\n result = []\n for line_index in range(len(image_arr)):\n if line_index % patch_distance == 0:\n for each_pixel_index in range(len(image_arr[line_index])):\n if line_index + patch_heigh <= image_heigh and each_pixel_index + patch_width < image_width:\n if each_pixel_index % patch_distance == 0:\n patch_arr = image_arr[line_index:line_index+patch_heigh, each_pixel_index:each_pixel_index+patch_width]\n result.append((patch_arr, (line_index, each_pixel_index)))\n return result\n\nif __name__ == '__main__':\n patch_width = 224\n patch_heigh = 224\n patch_distance = 8\n data_size = 40\n result = []\n for i in range(1, 41):\n image_data = read_image(\"graph_data/Images/test_{}.png\".format(i))\n annotation_data = read_image(\"graph_data/Annotation/test_{}.png\".format(i))\n image_patch_result = patch_sampling(image_data, patch_width, patch_heigh, patch_distance)\n annotation_patch_result = patch_sampling(annotation_data, patch_width, patch_heigh, patch_distance)\n result.append((image_patch_result, annotation_patch_result))\n # result: an array, element: (origin_data, annotation_data), length=number of origin image\n # origin_data/annotation_data: an array, element: (patch_data, index_of_left_top_pixel), length=number of patch\n # patch_data: three dimension array\n # each patch_data in origin data should relate to the same index patch_data in annotation_data\n\n","sub_path":"patch_samping.py","file_name":"patch_samping.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"234199493","text":"from selenium.webdriver.common.by import By\n\nfrom web_selenium.base.basepage import BasePage\n\n\nclass MainPage(BasePage):\n\n _base_url = 'https://testerhome.com/'\n # _base_url = 'https://www.baidu.com/'\n\n shequ = {\n 'locat': (\n By.LINK_TEXT, '社区'\n ),\n 'action': 'click'\n }\n\n bug = {\n 'locat': (\n By.LINK_TEXT, 'Bug 曝光台'\n ),\n 'action': 'click'\n }\n\n wenda = {\n 'locat': (\n By.LINK_TEXT, '问答'\n ),\n 'action': 'click'\n }\n\n shetuan = {\n 'locat': (\n By.LINK_TEXT, '社团'\n ),\n 'action': 'click'\n }\n\n zhaopin = {\n 'locat': (\n By.LINK_TEXT, '招聘'\n ),\n 'action': 'click'\n }\n\n wiki = {\n 'locat': (\n By.LINK_TEXT, 'Wiki'\n ),\n 'action': 'click'\n }\n\n kaiyuan = {\n 'locat': (\n By.LINK_TEXT, '开源项目'\n ),\n 'action': 'click'\n }\n\n kuzhan = {\n 'locat': (\n By.LINK_TEXT, '酷站'\n ),\n 'action': 'click'\n }\n\n bangdan = {\n 'locat': (\n By.LINK_TEXT, 'TTF榜单'\n ),\n 'action': 'click'\n }\n\n def go_shequ(self):\n self.steps(**self.shequ)\n return self\n\n def go_bug(self):\n self.steps(**self.bug)\n return self\n\n def go_wenda(self):\n self.steps(**self.wenda)\n return self\n\n def go_shetuan(self):\n self.steps(**self.shetuan)\n return self\n\n def go_zhaopin(self):\n self.steps(**self.zhaopin)\n return self\n\n def go_wiki(self):\n self.steps(**self.wiki)\n return self\n\n def go_kaiyuan(self):\n self.steps(**self.kaiyuan)\n return self\n\n def go_kuzhan(self):\n self.steps(**self.kuzhan)\n return self\n\n def go_bangdan(self):\n self.steps(**self.bangdan)\n return self\n\n def go_home(self):\n self.driver.get(self._base_url)","sub_path":"tests/main_process/main_page.py","file_name":"main_page.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"489144350","text":"import numpy as np\nfrom icecream import ic\n\ndef det(matrix):\n return np.linalg.det(matrix)\n\ndef main():\n print('input lenght of matrix')\n lenght = int(input())\n matrix = np.zeros((lenght,lenght))\n for i in range(lenght):\n print(f'input row num.{i} with space')\n row = [int(i) for i in input().split()]\n matrix[i] = row\n ic(matrix)\n return np.linalg.det(matrix)\n\nif __name__ == '__main__':\n ic(main())\n","sub_path":"det.py","file_name":"det.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"40022868","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/11/18 22:10\n# @Author : alvin\n# @File : test01.py\n# @Software: PyCharm\nimport pytest\nlogin_data=[(\"admin\",\"111111\"),(\"admin\",\"\")]\ndef login(user,psw):\n print(\"登录的账户:%s,登录密码:%s\"%(user,psw))\n if psw:\n return True\n else:\n return False\n@pytest.mark.parametrize(\"user,psw\",login_data)\ndef test_login(user,psw):\n result = login(user,psw)\n assert result == True\n\nif __name__ == \"__main__\":\n pytest.main([\"-s\",\"test01.py\"])","sub_path":"pytest_yoyo/one/oneonethree/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"19874647","text":"\"\"\"\nExtract embeddings using a trained model.\nUsage:\n python3 get_embeddings.py config.xvector-NB.yaml\n\"\"\"\nimport sys\nimport lidbox.api\n\ndef main(config_path):\n split2meta, labels, config = lidbox.api.load_splits_from_config_file(config_path)\n split2ds = lidbox.api.create_datasets(split2meta, labels, config)\n split2numpy_ds, target2label = lidbox.api.extract_embeddings_as_numpy_data(split2ds, labels)\n train = split2numpy_ds[\"train\"]\n print(train[\"X\"].shape)\n print(train[\"y\"].shape)\n print(train[\"ids\"].shape)\n\nif __name__ == \"__main__\":\n assert len(sys.argv) == 2, \"first argument should be config yaml path\"\n main(sys.argv[1])\n","sub_path":"examples/common-voice/scripts/get_embeddings.py","file_name":"get_embeddings.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"319154072","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom dateutil.parser import *\n\nimport gdata.photos.service\nimport gdata.media\nimport gdata.geo\n\nclass PicasaHandler:\n pwa = \"\"\n\n def __init__(self, user, password):\n self.pwa = gdata.photos.service.PhotosService()\n self.pwa.email = user \n self.pwa.password = password \n self.pwa.source = 'python'\n self.pwa.ProgrammaticLogin() \n\n def upload(self,path,nameAlbum):\n albumId = self.getAlbumId(nameAlbum)\n if albumId is None: albumId = self.createAlbum(nameAlbum)\n album_url = '/data/feed/api/user/default/albumid/%s' % (albumId)\n nome = path.rsplit('/',1)[1] #Split path from the file name\n nome = nome.split('.')[0] #Remove the extension\n photo = self.pwa.InsertPhotoSimple(album_url, nome, '', path, content_type='image/jpeg') \n return photo.published.text\n \n def getAlbumId(self,nameAlbum):\n if nameAlbum is None:\n albumId = \"default\"\n else:\n album = self.searchAlbum(nameAlbum)\n albumId = None if album is None else album.gphoto_id.text\n return albumId\n\n def createAlbum(self,nameAlbum):\n album = self.pwa.InsertAlbum(title=nameAlbum, summary=nameAlbum)\n return album\n\n def searchAlbum(self,nameAlbum):\n albums = self.pwa.GetUserFeed()\n for album in albums.entry:\n if nameAlbum == album.title.text: return album\n\n def listAlbums(self):\n lista = []\n albums = self.pwa.GetUserFeed()\n for album in albums.entry:\n lista.append(album.title.text)\n return lista\n\n def listPhotosAlbum(self,nameAlbum):\n albumId = self.getAlbumId(nameAlbum)\n photos = self.pwa.GetFeed('/data/feed/api/user/default/albumid/%s?kind=photo&imgmax=d' % (albumId))\n listPhotos = []\n for photo in photos.entry:\n listPhotos.append(photo.content.src)\n return listPhotos\n\n def getUpdate(self,limitDate):\n photos = self.pwa.GetFeed('/data/feed/api/user/default?kind=photo&imgmax=d')\n listPhotos = []\n limitDate = parse(limitDate)\n for photo in photos.entry:\n photoDate = photo.published.text\n if(parse(photoDate) > limitDate):\n listPhotos.append(photo.content.src)\n else:\n break\n return listPhotos\n","sub_path":"picasa_handler.py","file_name":"picasa_handler.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"555822713","text":"import rbdl\nimport numpy as np\n\ndef get_model():\n\n model = rbdl.Model()\n\n m1 = 1\n m2 = 1\n l1 = 1\n l2 = 1\n model.gravity = np.array([0, -9.81, 0])\n joint_rot_y = rbdl.Joint.fromJointType(\"JointTypeRevoluteZ\")\n xtrans = rbdl.SpatialTransform()\n\n xtrans.r = np.array([0., -3., 0.])\n\n link0 = rbdl.Body.fromMassComInertia(0,\n np.array([0., 0, 0.]),\n np.diag([0.0, 0.0, 0.0])\n )\n\n link1 = rbdl.Body.fromMassComInertia(m1,\n np.array([0., -l1 * 0.5, 0.]),\n np.diag([m1 * l1 * l1 / 3., m1 * l1 * l1 / 30., m1 * l1 * l1 / 3.])\n )\n\n link2 = rbdl.Body.fromMassComInertia(m2,\n np.array([0., -l2 * 0.5, 0.]),\n np.diag([m2 * l2 * l2 / 3., m2 * l2 * l2 / 30., m1 * l2 * l2 / 3.])\n )\n\n a = model.AppendBody(rbdl.SpatialTransform(), joint_rot_y, link0)\n b = model.AppendBody(xtrans, joint_rot_y, link1)\n #c = model.AppendBody(xtrans, joint_rot_y, link2)\n\n return model\n\n\ndef finite_differences(model, x, u):\n \"\"\" calculate gradient of plant dynamics using finite differences\n x np.array: the state of the system\n u np.array: the control signal\n \"\"\"\n dof = u.shape[0]\n num_states = model.q_size\n\n A = np.zeros((num_states, num_states))\n B = np.zeros((num_states, dof))\n\n eps = 1e-4 # finite differences epsilon\n for ii in range(num_states):\n # calculate partial differential w.r.t. x\n inc_x = x.copy()\n inc_x[ii] += eps\n state_inc, _ = runge_integrator(model, inc_x, u.copy())\n dec_x = x.copy()\n dec_x[ii] -= eps\n state_dec, _ = runge_integrator(model, dec_x, u.copy())\n A[:, ii] = (state_inc - state_dec) / (2 * eps)\n\n for ii in range(dof):\n # calculate partial differential w.r.t. u\n inc_u = u.copy()\n inc_u[ii] += eps\n state_inc, _ = runge_integrator(model, x.copy(), inc_u)\n dec_u = u.copy()\n dec_u[ii] -= eps\n state_dec, _ = runge_integrator(model, x.copy(), dec_u)\n B[:, ii] = (state_inc - state_dec) / (2 * eps)\n\n return A, B\n\n\ndef runge_integrator(model, t, y, h, tau):\n\n k1 = rhs(model, y, tau)\n k2 = rhs(model, y + 0.5 * h * k1, tau)\n k3 = rhs(model, y + 0.5 * h * k2, tau)\n k4 = rhs(model, y + h * k3, tau)\n\n return (k1 + 2. * k2 + 2. * k3 + k4)/6.0\n\n\ndef rhs(model, y, tau):\n\n dim = model.dof_count\n res = np.zeros(dim * 2)\n Q = np.zeros(model.q_size)\n QDot = np.zeros(model.qdot_size)\n QDDot = np.zeros(model.qdot_size)\n Tau = np.zeros(model.qdot_size)\n Tau[0] = tau\n for i in range(0, dim):\n Q[i] = y[i]\n QDot[i] = y[i + dim]\n\n rbdl.ForwardDynamics(model, Q, QDot, Tau, QDDot)\n for i in range(0, dim):\n res[i] = QDot[i]\n res[i + dim] = QDDot[i]\n\n return res","sub_path":"Testing/get_lin_test.py","file_name":"get_lin_test.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"187805068","text":"\"\"\"Create dataframe with messages required to run attitude tests.\n\nStore topics required for attitude tests.\nAdd missing messages to the dataframe which are required for attitude tests.\n\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport argparse\nimport os\nimport pyulog\nfrom pyulgresample import ulogconv as conv\nfrom pyulgresample import mathpandas as mpd\nfrom pyulgresample import loginfo\nfrom pyulgresample.ulogdataframe import DfUlg, TopicMsgs\n\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nparser = argparse.ArgumentParser(description=\"Script to process attitude\")\nparser.add_argument(\"filename\", metavar=\"file.ulg\", help=\"ulog file\")\n\n\ndef add_roll_pitch_yaw(df):\n \"\"\"Compute roll, pitch and yaw angle and add them to the dataframe.\n\n Arguments:\n df -- dataframe containing messages from the required topics\n\n \"\"\"\n roll, pitch, yaw = mpd.series_quat2euler(\n df[\"T_vehicle_attitude_0__F_q_0\"],\n df[\"T_vehicle_attitude_0__F_q_1\"],\n df[\"T_vehicle_attitude_0__F_q_2\"],\n df[\"T_vehicle_attitude_0__F_q_3\"],\n )\n df[\"T_vehicle_attitude_0__NF_roll\"] = roll.values\n df[\"T_vehicle_attitude_0__NF_pitch\"] = pitch.values\n df[\"T_vehicle_attitude_0__NF_yaw\"] = yaw.values\n\n\ndef add_euler_error(df):\n \"\"\"Compute orientation error as euler angles and add them to the dataframe.\n\n Arguments:\n df -- dataframe containing messages from the required topics\n\n \"\"\"\n df[\"T_vehicle_attitude_setpoint_0__NF_e_roll\"] = mpd.angle_wrap(\n df[\"T_vehicle_attitude_setpoint_0__F_roll_body\"]\n - df[\"T_vehicle_attitude_0__NF_roll\"]\n )\n df[\"T_vehicle_attitude_setpoint_0__NF_e_pitch\"] = mpd.angle_wrap(\n df[\"T_vehicle_attitude_setpoint_0__F_pitch_body\"]\n - df[\"T_vehicle_attitude_0__NF_pitch\"]\n )\n df[\"T_vehicle_attitude_setpoint_0__NF_e_yaw\"] = mpd.angle_wrap(\n df[\"T_vehicle_attitude_setpoint_0__F_yaw_body\"]\n - df[\"T_vehicle_attitude_0__NF_yaw\"]\n )\n\n\ndef add_vehicle_z_axis(df):\n \"\"\"Compute the body z axis in world coordinate system and add it to the dataframe.\n\n Arguments:\n df -- dataframe containing messages from the required topics\n\n \"\"\"\n x = pd.Series(\n np.zeros(df.shape[0]),\n index=df[\"timestamp\"],\n name=\"T_vehicle_attitude_0__NF_body_z_axis_x\",\n )\n y = pd.Series(\n np.zeros(df.shape[0]),\n index=df[\"timestamp\"],\n name=\"T_vehicle_attitude_0__NF_body_z_axis_y\",\n )\n z = pd.Series(\n np.ones(df.shape[0]),\n index=df[\"timestamp\"],\n name=\"T_vehicle_attitude_0__NF_body_z_axis_z\",\n )\n x, y, z = mpd.series_quatrot(\n x,\n y,\n z,\n df[\"T_vehicle_attitude_0__F_q_0\"],\n df[\"T_vehicle_attitude_0__F_q_1\"],\n df[\"T_vehicle_attitude_0__F_q_2\"],\n df[\"T_vehicle_attitude_0__F_q_3\"],\n )\n\n df[x.name] = x.values\n df[y.name] = y.values\n df[z.name] = z.values\n\n\ndef add_desired_tilt(df):\n \"\"\"Compute desired tilt angle and add it to the dataframe.\n\n Arguments:\n df -- dataframe containing messages from the required topics\n\n \"\"\"\n if \"T_vehicle_attitude_setpoint_0__NF_body_z_axis_sp_x\" not in df:\n add_desired_z_axis(df)\n\n x = pd.Series(np.zeros(df.shape[0]), index=df[\"timestamp\"], name=\"x\")\n y = pd.Series(np.zeros(df.shape[0]), index=df[\"timestamp\"], name=\"y\")\n z = pd.Series(np.ones(df.shape[0]), index=df[\"timestamp\"], name=\"z\")\n\n tilt = mpd.series_dot(\n x,\n y,\n z,\n df[\"T_vehicle_attitude_setpoint_0__NF_body_z_axis_sp_x\"],\n df[\"T_vehicle_attitude_setpoint_0__NF_body_z_axis_sp_y\"],\n df[\"T_vehicle_attitude_setpoint_0__NF_body_z_axis_sp_z\"],\n )\n tilt.where(\n tilt < 1, 1, inplace=True\n ) # ensure that angle 1 is never exceeded\n df[\"T_vehicle_attitude_setpoint_0__NF_tilt_desired\"] = tilt.values\n df[\"T_vehicle_attitude_setpoint_0__NF_tilt_desired\"] = df[\n \"T_vehicle_attitude_setpoint_0__NF_tilt_desired\"\n ].apply(np.arccos)\n\n\ndef add_tilt(df):\n \"\"\"Compute tilt angle and add it to the dataframe.\n\n Arguments:\n df -- dataframe containing messages from the required topics\n\n \"\"\"\n if \"T_vehicle_attitude_0__NF_body_z_axis_x\" not in df:\n add_vehicle_z_axis(df)\n\n x = pd.Series(np.zeros(df.shape[0]), index=df[\"timestamp\"], name=\"x\")\n y = pd.Series(np.zeros(df.shape[0]), index=df[\"timestamp\"], name=\"y\")\n z = pd.Series(np.ones(df.shape[0]), index=df[\"timestamp\"], name=\"z\")\n\n tilt = mpd.series_dot(\n x,\n y,\n z,\n df[\"T_vehicle_attitude_0__NF_body_z_axis_x\"],\n df[\"T_vehicle_attitude_0__NF_body_z_axis_y\"],\n df[\"T_vehicle_attitude_0__NF_body_z_axis_z\"],\n )\n tilt.where(\n tilt < 1, 1, inplace=True\n ) # ensure that angle 1 is never exceeded\n df[\"T_vehicle_attitude_0__NF_tilt\"] = tilt.values\n df[\"T_vehicle_attitude_0__NF_tilt\"] = df[\n \"T_vehicle_attitude_0__NF_tilt\"\n ].apply(np.arccos)\n\n\ndef add_vehicle_inverted(df):\n \"\"\"Check if the vehicle is tilted more than 90 degrees and add that information to the dataframe.\n\n Arguments:\n df -- dataframe containing messages from the required topics\n\n \"\"\"\n if \"T_vehicle_attitude_0__NF_body_z_axis_z\" not in df:\n add_vehicle_z_axis(df)\n\n df[\n \"T_vehicle_attitude_0__NF_tilt_more_90\"\n ] = df.T_vehicle_attitude_0__NF_body_z_axis_z.values\n df[df[[\"T_vehicle_attitude_0__NF_tilt_more_90\"]] >= 0] = 0\n df[df[[\"T_vehicle_attitude_0__NF_tilt_more_90\"]] < 0] = 1\n\n\ndef add_desired_z_axis(df):\n \"\"\"Compute the desired body z axis in world coordinate system and add it to the dataframe.\n\n Arguments:\n df -- dataframe containing messages from the required topics\n\n \"\"\"\n x = pd.Series(\n np.zeros(df.shape[0]),\n index=df[\"timestamp\"],\n name=\"T_vehicle_attitude_setpoint_0__NF_body_z_axis_sp_x\",\n )\n y = pd.Series(\n np.zeros(df.shape[0]),\n index=df[\"timestamp\"],\n name=\"T_vehicle_attitude_setpoint_0__NF_body_z_axis_sp_y\",\n )\n z = pd.Series(\n np.ones(df.shape[0]),\n index=df[\"timestamp\"],\n name=\"T_vehicle_attitude_setpoint_0__NF_body_z_axis_sp_z\",\n )\n\n x, y, z = mpd.series_quatrot(\n x,\n y,\n z,\n df[\"T_vehicle_attitude_setpoint_0__F_q_d_0\"],\n df[\"T_vehicle_attitude_setpoint_0__F_q_d_1\"],\n df[\"T_vehicle_attitude_setpoint_0__F_q_d_2\"],\n df[\"T_vehicle_attitude_setpoint_0__F_q_d_3\"],\n )\n df[x.name] = x.values\n df[y.name] = y.values\n df[z.name] = z.values\n\n\ndef plot_time_series(df, plt):\n \"\"\"Plot a time series.\n\n Arguments:\n df -- dataframe containing messages from the required topics\n plt -- plot\n\n \"\"\"\n # Remove the plot frame lines\n delta = (df[\"timestamp\"].max() - df[\"timestamp\"].min()) / 10\n plt.xticks(\n np.arange(\n df[\"timestamp\"].min(),\n df[\"timestamp\"].max(),\n step=np.around(delta, decimals=1),\n )\n )\n plt.grid()\n\n\ndef main():\n \"\"\"Call methods and create pdf with plots showing relevant data.\"\"\"\n args = parser.parse_args()\n # create dataframe-ulog class for Attitude/Attiutde-setpoint topic\n att = DfUlg.create(\n args.filename, topics=[\"vehicle_attitude\", \"vehicle_attitude_setpoint\"]\n )\n\n with PdfPages(\"attitude.pdf\") as pdf:\n\n # roll pitch and yaw error\n add_roll_pitch_yaw(att.df)\n add_euler_error(att.df)\n\n plt.figure(0, figsize=(20, 13))\n df_tmp = att.df[\n [\n \"timestamp\",\n \"T_vehicle_attitude_setpoint_0__NF_e_roll\",\n \"T_vehicle_attitude_setpoint_0__NF_e_pitch\",\n \"T_vehicle_attitude_setpoint_0__NF_e_yaw\",\n ]\n ].copy()\n df_tmp.plot(x=\"timestamp\", linewidth=0.8)\n plot_time_series(df_tmp, plt)\n plt.title(\"Roll-Pitch-Yaw-Error\")\n plt.ylabel(\"rad\")\n pdf.savefig()\n plt.close(0)\n\n # inverted\n add_vehicle_z_axis(att.df)\n add_vehicle_inverted(att.df)\n plt.figure(1, figsize=(20, 13))\n df_tmp = att.df[\n [\"timestamp\", \"T_vehicle_attitude_0__NF_tilt_more_90\"]\n ].copy()\n df_tmp.plot(x=\"timestamp\", linewidth=0.8)\n plot_time_series(df_tmp, plt)\n plt.title(\"Inverted\")\n plt.ylabel(\"boolean\")\n pdf.savefig()\n plt.close(1)\n\n # tilt and desired tilt\n add_desired_z_axis(att.df)\n add_desired_tilt(att.df)\n add_tilt(att.df)\n\n pos_tilt = loginfo.get_param(att.ulog, \"MPC_TILTMAX_AIR\", 0)\n man_tilt = loginfo.get_param(att.ulog, \"MPC_MAN_TILT_MAX\", 0)\n plt.figure(2, figsize=(20, 13))\n df_tmp = att.df[\n [\n \"timestamp\",\n \"T_vehicle_attitude_0__NF_tilt\",\n \"T_vehicle_attitude_setpoint_0__NF_tilt_desired\",\n ]\n ].copy()\n df_tmp[\"MPC_TILTMAX_AIR\"] = pos_tilt * np.pi / 180\n df_tmp[\"MPC_MAN_TILT_MAX\"] = man_tilt * np.pi / 180\n df_tmp.plot(x=\"timestamp\", linewidth=0.8, style=[\"-\", \"-\", \"--\", \"--\"])\n\n plot_time_series(df_tmp, plt)\n plt.title(\"Tilt / Desired Tilt\")\n plt.ylabel(\"rad\")\n pdf.savefig()\n plt.close(2)\n\n print(\"attitude.pdf was created\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"examples/attitude.py","file_name":"attitude.py","file_ext":"py","file_size_in_byte":9424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"370134407","text":"# 숫자 정사각형\nimport sys\n\nN, M = map(int, sys.stdin.readline().rstrip().split())\nrec = []\nfor _ in range(N):\n rec.append(sys.stdin.readline().rstrip())\n\nmax_size = 1\n\n\ndef solve():\n global rec, max_size, N, M\n for size in range(2, N + 1):\n find_size = False\n for y in range(size - 1, N):\n for x in range(size - 1, M):\n if 0 <= y - (size - 1) < N and 0 <= x - (size - 1) < M:\n # print(x - (size - 1), y - (size - 1), x, y)\n if rec[y - (size - 1)][x - (size - 1)] == rec[y - (size - 1)][x] \\\n == rec[y][x - (size - 1)] == rec[y][x]:\n max_size = size * size\n find_size = True\n break\n if find_size:\n break\n\n\nif N > 1 or M > 1:\n solve()\n\nprint(max_size)\n","sub_path":"SsangWoo/python/baekjoon/1051.py","file_name":"1051.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"551277943","text":"import json\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.contrib.auth.decorators import login_required \n\nfrom album.forms import PhotoForm\nfrom authentification.models import BusinessUser\nfrom myshop.decorators import ajax_required, is_businessuser\n\n\n@login_required\ndef album_view(request, pk):\n if not BusinessUser.objects.filter(pk=pk).exists():\n return HttpResponseRedirect(reverse_lazy('welcome'))\n businessuser = BusinessUser.objects.get(pk=pk)\n photos = businessuser.album.photos.all()\n title = \"{}'s album\".format(businessuser.user.username)\n return render(request, 'album/album.html', {'photos': photos, 'businessuser': businessuser, 'title': title})\n\n@login_required\n@is_businessuser\n@ajax_required\ndef new_photo_view(request):\n if request.method == 'POST':\n form = PhotoForm(request.POST, request.FILES)\n\n if form.is_valid():\n obj = form.save(commit=False)\n obj.album = request.user.businessuser.album\n obj.save()\n return HttpResponse(json.dumps({'success': True, 'id': obj.id}), content_type='application/json')\n return HttpResponseBadRequest()\n\n\n@login_required\n@is_businessuser\n@ajax_required\ndef delete_photo_view(request, pk):\n if request.method == 'POST':\n request.user.businessuser.album.photos.get(pk=pk).delete()\n return HttpResponse(json.dumps({'success': True}), content_type='application/json')\n return HttpResponseBadRequest()","sub_path":"album/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"403127937","text":"# -*- coding: utf-8 -*-\n# Change coding to UTF-8\n\n# Import section\nimport os\nimport subprocess\nimport re\n\n# Creating temp file with all lectures\nwith open('temp.tex', 'w', encoding=\"utf8\") as temp:\n # Adding header\n with open('header.tex', 'r', encoding=\"utf8\") as header:\n temp.write(header.read())\n\n # Adding necessary lines (beginning of document, title and table of contents)\n temp.write('\\n\\\\begin{document}\\n\\n')\n temp.write('\\\\maketitle\\n\\n')\n temp.write('\\\\tableofcontents\\n\\n')\n\n # Collecting text from separate lecture files\n lectures = []\n\n for elem in os.listdir('./'):\n if os.path.isfile(os.path.join('./', elem)):\n if elem.startswith('algorithms'):\n if elem.endswith('.tex'):\n lectures.append(elem)\n lectures.sort()\n\n # Adding it to the temp file\n for lecture_name in lectures:\n with open(lecture_name, 'r', encoding='utf8') as lecture:\n temp_lines = lecture.readlines()\n for line in temp_lines[3:-1]:\n temp_line = line.replace('section*', 'section')\n temp.write(temp_line)\n\n # Adding the final line\n temp.write(r'\\end{document}')\n\n# In order to create table of contents, I have to compile it twice.\nfor _ in range(2):\n proc=subprocess.Popen(['pdflatex', '--quiet', 'temp.tex'])\n proc.communicate()\n\n# Saving the file\nos.chdir('..')\nfor file in os.listdir('./'):\n if file == 'algorithms_all_lectures.pdf':\n os.remove(os.path.join('./', file))\n\nos.rename('./tex/temp.pdf', 'algorithms_all_lectures.pdf')\nos.chdir('tex')\n\n# Removing the litter\npattern = 'temp*'\nfor file in os.listdir('./'):\n if re.search(pattern, file):\n os.remove(os.path.join('./', file))\n","sub_path":"algorithms-old/tex/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"448222564","text":"# Copyright (c) 2015 Ultimaker B.V.\n# Uranium is released under the terms of the AGPLv3 or higher.\n\nfrom UM.Job import Job\nfrom UM.Application import Application\nfrom UM.Message import Message\n\nimport os.path\n\nfrom UM.i18n import i18nCatalog\ni18n_catalog = i18nCatalog(\"uranium\")\n\n## A Job subclass that performs mesh loading.\n#\n# The result of this Job is a MeshData object.\nclass ReadMeshJob(Job):\n def __init__(self, filename):\n super().__init__()\n self._filename = filename\n self._handler = Application.getInstance().getMeshFileHandler()\n self._device = Application.getInstance().getStorageDevice(\"LocalFileStorage\")\n\n def getFileName(self):\n return self._filename\n\n def run(self):\n loading_message = Message(i18n_catalog.i18nc(\"Loading mesh message, {0} is file name\", \"Loading {0}\".format(self._filename)))\n loading_message.show()\n self.setResult(self._handler.read(self._filename, self._device))\n loading_message.hide()\n result_message = Message(i18n_catalog.i18nc(\"Finished loading mesh message, {0} is file name\", \"Loaded {0}\".format(self._filename)))\n result_message.show()\n","sub_path":"UM/Mesh/ReadMeshJob.py","file_name":"ReadMeshJob.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"414259072","text":"import logging.config\nimport os\nfrom configparser import ConfigParser\n\nimport yaml\nfrom cassandra.cqlengine import connection\n\nfrom se.domain2.account.account import AccountRepo\nfrom se.domain2.domain import BeanContainer\nfrom se.domain2.time_series.time_series import TSFunctionRegistry, TimeSeriesRepo\nfrom se.infras.ib import IBMinBar, IBTick, IBAdjustedDailyBar, IBMarketData\nfrom se.infras.repos import TimeSeriesRepoImpl, AccountRepoImpl\n\nBeanContainer.register(TimeSeriesRepo, TimeSeriesRepoImpl())\nBeanContainer.register(AccountRepo, AccountRepoImpl())\n\nif not os.getenv(\"config.dir\"):\n raise RuntimeError(\"没有配置config.dir\")\n\n# 初始化日志配置\nlog_config = \"{}/log.yaml\".format(os.getenv(\"config.dir\"))\nif os.getenv(\"config.log\"):\n log_config = os.getenv(\"config.log\")\n\nif os.path.exists(log_config):\n logging.config.dictConfig(yaml.load(open(log_config), Loader=yaml.SafeLoader))\n logging.info(\"初始化日志配置成功\")\nelse:\n logging.basicConfig(level=logging.INFO)\n logging.info(\"没有log的配置文件,将使用默认配置\")\n\n# 初始化应用配置\nconfig_file = \"{}/config_default.ini\".format(os.getenv(\"config.dir\"))\nif not os.path.exists(config_file):\n raise RuntimeError(\"需要配置文件config_default.ini\")\nconfig = ConfigParser()\nconfig.read(config_file)\n# 如果运行目录存在config.ini的话,则替换默认的配置\nif os.path.exists(\"config.ini\"):\n config.read(\"config.ini\")\nlogging.info(\"初始化应用配置成功\")\n\n# 初始化DB连接\nconnection.setup(config.get(\"cassandra\", \"contact_points\").split(\",\"),\n config.get(\"cassandra\", \"session_keyspace\"), protocol_version=3,\n port=config.getint(\"cassandra\", \"port\"))\n\n# 注册时序类型\nTSFunctionRegistry.register(IBMinBar(config.get(\"ib_data\", \"host\"), config.getint(\"ib_data\", 'port'),\n config.getint('ib_data', 'client_id')))\nTSFunctionRegistry.register(IBTick(config.get(\"ib_data\", \"host\"), config.getint(\"ib_data\", 'port'),\n config.getint('ib_data', 'client_id')))\nTSFunctionRegistry.register(IBMarketData(config.get(\"ib_data\", \"host\"), config.getint(\"ib_data\", 'port'),\n config.getint('ib_data', 'client_id')))\nTSFunctionRegistry.register(IBAdjustedDailyBar(config.get(\"ib_data\", \"host\"), config.getint(\"ib_data\", 'port'),\n config.getint('ib_data', 'client_id')))\n","sub_path":"se/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"223560832","text":"from mlib.boot.stream import arr, ints\nfrom mlib.file import File\nfrom mlib.math import nopl_high\nfrom mlib.proj.struct import vers\nfrom qrsalg.PeakDetectionAlg import PeakDetectionAlg, find_local_maxima\nclass ManualPeakDetection(PeakDetectionAlg):\n MANUAL_INPUT_FILE = File('_data/EP1163_10min_ManInput.mat')\n def preprocess(self, ecg, Fs):\n return nopl_high(ecg, Fs)\n def rpeak_detect(self, ecg_raw, Fs, ecg_flt, ecg_raw_nopl_high):\n # just fixpeaks\n qrs = arr(self.MANUAL_INPUT_FILE['heartbeatevents']['py'][0]).flatten()[0].flatten()\n qrs = ints(qrs)\n qrs = qrs[qrs >= self.rawslice.start]\n qrs = qrs[qrs < self.rawslice.stop]\n if vers(self.version) >= vers(1):\n qrs = find_local_maxima(\n qrs,\n ecg_flt,\n AUTO=True,\n CROP_FIRST_LAST=True\n )\n return qrs\n","sub_path":"qrsalg/Manual.py","file_name":"Manual.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"461429735","text":"'''\nCreated on Sep 20, 2017\n\n@author: micha\n'''\n#https://coinmarketcap.com/\n\n\nimport csv\nimport pandas as pd\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nimport urllib.request\n\n\n\ndef init():\n #df = marketcap()\n\n\n df2 = pctvolume()\n print(df2.head())\n\n\n\ndef marketcap():\n url = 'https://coinmarketcap.com/'\n df = pd.DataFrame()\n\n with urllib.request.urlopen(url) as response:\n html = response.read()\n \n soup = BeautifulSoup(html, 'html.parser')\n\n\n namelist = soup.find_all('span', attrs={'class': 'currency-symbol'})\n mc = soup.find_all('td', attrs={'class': 'no-wrap market-cap text-right'})\n vol = soup.find_all('a', attrs={'class': 'volume'})\n\n\n symbols = []\n markcaps = []\n volumes = []\n coins = []\n names = []\n for i in range(len(namelist)):\n startindex = str(namelist[i]).index('s/')\n endindex = str(namelist[i]).index('/\"')\n name = str(namelist[i])[startindex +2 :endindex].upper()\n\n names.append(name)\n coins.append(namelist[i])\n symbols.append(namelist[i].text.strip())\n markcaps.append(mc[i].text.strip())\n volumes.append(vol[i].text.strip())\n\n\n\n df['name'] = names\n df['coin'] = symbols\n df['markcap'] = markcaps\n df['volume24hr'] = volumes\n\n return df\n\n\n\n\ndef pctvolume():\n url = 'https://coinmarketcap.com/currencies/volume/24-hour/'\n df = pd.DataFrame()\n\n with urllib.request.urlopen(url) as response:\n html = response.read()\n \n soup = BeautifulSoup(html, 'html.parser')\n\n pctvol = soup.find_all('h3', attrs={'class': 'volume-header'})\n\n pctvols = []\n for i in range(100):\n pctvols.append(str(pctvol[i].text.strip()).upper())\n\n\n\n df['pct'] = pctvols\n\n return df\n \n\n\n\ndef gainersloosers():\n return ''\n\n\n\n\n\n\ninit()\n\n","sub_path":"src/PyCrypto/coinmarketcap.py","file_name":"coinmarketcap.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"162081757","text":"import sys\nfrom os.path import join, isfile, isdir\nfrom os import makedirs\nimport mock\nimport pytest\nfrom PySide2 import QtWidgets\n\nfrom Tests import save_gui_path\nfrom pyleecan.GUI.Dialog.DMachineSetup.DMachineSetup import DMachineSetup\nfrom pyleecan.GUI.Dialog.DMachineSetup.SSimu.SSimu import SSimu\nfrom pyleecan.GUI.Dialog.DMachineSetup.SPreview.SPreview import SPreview\nfrom pyleecan.definitions import DATA_DIR as data_test, MAIN_DIR\nfrom pyleecan.Functions.load import load_matlib\n\nmatlib_path = join(data_test, \"Material\")\nmachine_path = join(MAIN_DIR, \"Data\", \"Machine\")\n\nSCIM_dict = {\n \"file_path\": join(machine_path, \"Railway_Traction.json\").replace(\"\\\\\", \"/\"),\n \"table\": [\n (\"Machine Type\", \"SCIM\"),\n (\"Stator slot number\", \"36\"),\n (\"Rotor slot number\", \"28\"),\n (\"Pole pair number\", \"3\"),\n (\"Topology\", \"Internal Rotor\"),\n (\"Stator phase number\", \"3\"),\n (\"Stator winding resistance\", \"0.02392 Ohm\"),\n (\"Machine total mass\", \"342.8 kg\"),\n (\"Stator lamination mass\", \"143.6 kg\"),\n (\"Stator winding mass\", \"59.06 kg\"),\n (\"Rotor lamination mass\", \"97.54 kg\"),\n (\"Rotor winding mass\", \"21.12 kg\"),\n (\"Shaft mass\", \"21.51 kg\"),\n ],\n \"Nrow\": 13,\n}\nIPMSM_dict = {\n \"file_path\": join(machine_path, \"Toyota_Prius.json\").replace(\"\\\\\", \"/\"),\n \"table\": [\n (\"Machine Type\", \"IPMSM\"),\n (\"Stator slot number\", \"48\"),\n (\"Pole pair number\", \"4\"),\n (\"Topology\", \"Internal Rotor\"),\n (\"Stator phase number\", \"3\"),\n (\"Stator winding resistance\", \"0.03595 Ohm\"),\n (\"Machine total mass\", \"33.38 kg\"),\n (\"Stator lamination mass\", \"15.78 kg\"),\n (\"Stator winding mass\", \"4.001 kg\"),\n (\"Rotor lamination mass\", \"5.006 kg\"),\n (\"Rotor magnet mass\", \"1.236 kg\"),\n (\"Shaft mass\", \"7.355 kg\"),\n ],\n \"Nrow\": 12,\n}\nload_preview_test = [SCIM_dict, IPMSM_dict]\n\n# python -m pytest ./Tests/GUI/Dialog/DMachineSetup/test_SPreview.py\nclass TestSPreview(object):\n def setup_method(self):\n \"\"\"Setup the workspace and the GUI\"\"\"\n # MatLib widget\n material_dict = load_matlib(matlib_path=matlib_path)\n self.widget = DMachineSetup(\n material_dict=material_dict, machine_path=machine_path\n )\n\n @classmethod\n def setup_class(cls):\n \"\"\"Start the app for the test\"\"\"\n print(\"\\nStart Test TestSPreview\")\n if not QtWidgets.QApplication.instance():\n cls.app = QtWidgets.QApplication(sys.argv)\n else:\n cls.app = QtWidgets.QApplication.instance()\n\n @classmethod\n def teardown_class(cls):\n \"\"\"Exit the app after all the test\"\"\"\n cls.app.quit()\n\n @pytest.mark.parametrize(\"test_dict\", load_preview_test)\n def test_load(self, test_dict):\n \"\"\"Check that you can load a machine\"\"\"\n assert isfile(test_dict[\"file_path\"])\n\n return_value = (test_dict[\"file_path\"], \"Json (*.json)\")\n with mock.patch(\n \"PySide2.QtWidgets.QFileDialog.getOpenFileName\", return_value=return_value\n ):\n # To trigger the slot\n self.widget.b_load.clicked.emit()\n\n # Check loaded machine is fully defined\n assert type(self.widget.w_step) is SSimu\n # select preview step\n self.widget.nav_step.setCurrentRow(self.widget.nav_step.count() - 2)\n assert type(self.widget.w_step) is SPreview\n # Check the table\n assert self.widget.w_step.tab_machine.tab_param.rowCount() == test_dict[\"Nrow\"]\n for ii, content in enumerate(test_dict[\"table\"]):\n assert (\n self.widget.w_step.tab_machine.tab_param.item(ii, 0).text()\n == content[0]\n )\n assert (\n self.widget.w_step.tab_machine.tab_param.item(ii, 1).text()\n == content[1]\n )\n # Check Draw FEMM\n FEMM_dir = join(save_gui_path, \"Draw_FEMM\")\n if not isdir(FEMM_dir):\n makedirs(FEMM_dir)\n femm_path = join(FEMM_dir, self.widget.machine.name + \".fem\")\n assert not isfile(femm_path)\n\n return_value = (\n femm_path,\n \"FEMM (*.fem)\",\n )\n with mock.patch(\n \"PySide2.QtWidgets.QFileDialog.getSaveFileName\", return_value=return_value\n ):\n self.widget.w_step.tab_machine.b_FEMM.clicked.emit()\n assert isfile(femm_path)\n\n\nif __name__ == \"__main__\":\n a = TestSPreview()\n a.setup_class()\n a.setup_method()\n for test_dict in load_preview_test:\n a.test_load(test_dict)\n a.teardown_class()\n print(\"Done\")\n","sub_path":"Tests/GUI/Dialog/DMachineSetup/test_SPreview.py","file_name":"test_SPreview.py","file_ext":"py","file_size_in_byte":4658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"116593324","text":"#Runtime 139ms, Beats 46.44%\n#Basic idea: Two pointers. Search other 2 elements based on needs instead of thorough search.\n\nclass Solution(object):\n def threeSumClosest(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n nums.sort()\n result = nums[0]+nums[1]+nums[2]\n for i in xrange(len(nums)-2):\n j = i+1\n k = len(nums)-1\n while(jabs(sum(temp)-target):\n self.result = sum(temp)\n return\n for i in xrange(idx,len(nums)):\n temp.append(nums[i])\n self.search(nums,target,i+1,temp,k-1)\n void = temp.pop()\n","sub_path":"Array/*16.3Sum Closest.py","file_name":"*16.3Sum Closest.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"310302609","text":"from django.contrib import messages\nfrom django.core.mail import BadHeaderError , send_mail\nfrom django.http import HttpResponseRedirect , QueryDict\nfrom django.shortcuts import redirect , get_object_or_404 , render\nfrom django.utils import timezone\nfrom django.urls import reverse,path\nfrom django.contrib.auth.views import LoginView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nimport datetime,pytz,random, string\n\nfrom django.views.generic.base import TemplateView\nfrom . import views\nfrom django.views.generic.edit import CreateView , UpdateView , FormView\nfrom django.views.generic import ListView\nfrom django import forms\nfrom .forms import RequestForm , RequestIdForm , RequestPasswordForm , RequestGetForm , RequestSendForm\nfrom .forms import CustomerForm, AdminLoginForm\n\nfrom .models import Request , Customer\nimport copy\n\nclass RequestMainView(TemplateView):\n template_name = 'management_system/request_main.html'\n\n# 入館申請画面 (UC-01)\nclass RequestAddView(FormView):\n model = Request\n template_name = 'management_system/request_add.html'\n success_url = '/management_system/add/finish/'\n form_class = RequestSendForm\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class=form_class)\n # form.fields['name'].widget = forms.CheckboxSelectMultiple()\n print(form.errors)\n return form\n\n def form_valid(self,form):\n context = {\n 'form' : form\n }\n if self.request.POST.get('next', '') == 'confirm':\n req = RequestForm(self.request.POST).save(commit=False)\n # 申請されたものの入館時間より早い入館時間を持ち、遅い退館時間を持つもの\n # 及び退館時間も同様\n # 以上の二点に該当するものをfilterで持ってくる\n requests = list(filter(lambda x:True if(req.scheduled_entry_datetime >= x.scheduled_entry_datetime and req.scheduled_entry_datetime < x.scheduled_exit_datetime) else False ,Request.objects.all()))\n requests += list(filter(lambda x:True if(req.scheduled_exit_datetime > x.scheduled_entry_datetime and req.scheduled_exit_datetime <= x.scheduled_exit_datetime) else False ,Request.objects.all()))\n print(requests)\n hit = 0\n cus_hit = 0\n if(len(requests) != 0):\n for req in requests:\n if(req.approval == 1):\n print('承認済みの申請と時間が被りました')\n # listの判別\n cus = req.email\n if(cus.email == self.request.POST.get('email') and cus.name == self.request.POST.get('name') and cus.organization_name == self.request.POST.get('organization_name') and cus.tell_number == self.request.POST.get('tell_number')):\n # そのままcustomerを使う\n print(str(cus.id)+' 全件一致しました')\n print('あなたの申請がこの時間に入っています')\n customer = cus\n cus_hit = 1\n else:\n # Customerを入力のものと置き換える\n print(str(cus.id)+' このデータは全件一致しませんでした')\n hit = 1\n if(hit == 1):\n messages.success(self.request, 'すでに申請されている時間帯なのでこの時間は申請できません')\n context['form_message'] = '重複'\n print('すでに申請されている時間帯なのでこの時間は申請できません')\n print(req)\n elif(cus_hit == 1):\n messages.success(self.request, 'あなたの申請がこの時間に入っています')\n return render(self.request, 'management_system/request_add_check.html', context)\n\n if self.request.POST.get('next', '') == 'back':\n return render(self.request, 'management_system/request_add.html', context)\n if self.request.POST.get('next', '') == 'create':\n # 入力されたものに対してオブジェクトを生成\n req = RequestForm(self.request.POST).save(commit=False)\n cust = CustomerForm(self.request.POST).save(commit=False)\n # 入館時間が現在時刻よりも前の場合は申請を受け付けない\n if req.scheduled_entry_datetime < timezone.localtime():\n messages.success(self.request, '過去の時間に入館申請はできません')\n return render(self.request, 'management_system/request_add.html', context)\n # 入館時間よりも退館時間の方が前の時、申請を受け付けない\n if req.scheduled_entry_datetime >= req.scheduled_exit_datetime:\n messages.success(self.request, '入力時間が正しくありません')\n return render(self.request, 'management_system/request_add.html', context)\n # 時間の判定\n # 承認済みの時間にかぶせて申請が入った場合のみはじく\n\n # emailに該当するものをすべて取得\n customers = list(Customer.objects.filter(email=self.request.POST.get('email')))\n # listの判別\n hit = 0\n for cus in customers:\n # print(cus.tell_number)\n if(cus.name == cust.name and cus.organization_name == self.request.POST.get('organization_name') and cus.tell_number == self.request.POST.get('tell_number')):\n # そのままcustomerを使う\n print(str(cus.id)+' 全件一致しました')\n customer = cus\n hit = 1\n else:\n # Customerを入力のものと置き換える\n print(str(cus.id)+' このデータは全件一致しませんでした')\n \n # 一致しなかったときにustomerをRequestに保持させる\n if(hit == 0):\n customer = cust\n # カスタマーの追加とそれを引数に渡す\n\n customer.save()\n req.password = ''.join([random.choice(string.digits) for i in range(4)])\n req.email = customer\n req.request_datetime = timezone.localtime()\n req.save()\n\n self.sendMail(form,req)\n return super().form_valid(form)\n else:\n # 正常動作ではここは通らない。エラーページへの遷移でも良い\n return redirect(reverse_lazy('base:main'))\n\n def sendMail(self, form,req):\n print('保存しました')\n\n subject = ' W社DC利用申請受領のお知らせ'\n massage = req.email.organization_name+' '+ req.email.name + '様\\n\\n'+'お世話になっております。\\nW社でございます。\\n\\n以下の内容でのデータセンターの利用申請を受け付け致しました。\\n申請の承認につきましては、管理者が確認後再度連絡させていただきます。\\n\\n------利用申請内容------\\n申請日時 : ' + req.request_datetime.strftime('%Y/%m/%d %H:%M:%S') + '\\n入館予定日時 : '+req.scheduled_entry_datetime.strftime('%Y/%m/%d %H:%M:%S') +'\\n退館予定日時 : ' + req.scheduled_exit_datetime.strftime('%Y/%m/%d %H:%M:%S') + '\\n------------------------------\\n\\nそれに伴い' + req.email.name + '様の申請番号とパスワードを以下に記載いたします。\\n\\n申請番号 : ' + str(req.pk) + '\\nパスワード : '+ str(req.password) +'\\n\\n申請番号は入退館時に必要になりますので厳重に保管下さい。\\n\\nまた、利用申請が承認されていない状態であれば下記URLで申請内容の修正、取消が行えます。\\n\\nURL : http://t17cs015.pythonanywhere.com/management_system/fix/login/\\n\\n------------------------------\\nW社 DCセンター管理部\\ndbcenterw01@gmail.com\\n------------------------------\\n\\n本メールは”データセンター入退館管理システム”からの自動送信です。\\n'\n from_email = 'dbcenterw1@gmail.com'\n recipient_list = [\n req.email.__str__()\n ]\n print('send mail')\n send_mail(subject,massage,from_email,recipient_list)\n # messages.success(self.request, '申請を受理しました')\n\n return 0\n\n # 元addcheck\n # カレンダー関係のだけ残ってます\n\n # def post(self, request, *args, **kwargs): \n \n # print(request.POST.get('scheduled_entry_datetime')) \n # print(type(request.POST.get('scheduled_entry_datetime')))\n\n # entrys = request.POST.get('scheduled_entry_datetime')\n # exits = request.POST.get('scheduled_exit_datetime')\n\n # entryt= timezone.datetime.strptime(entrys,'%Y-%m-%dT%H:%M')\n # exitt = timezone.datetime.strptime(exits,'%Y-%m-%dT%H:%M')\n\n # jp = pytz.timezone('Asia/Tokyo')\n # print(jp.localize(exitt))\n\n # req1.scheduled_entry_datetime = jp.localize(entryt)\n # obj1.scheduled_exit_datetime = jp.localize(exitt)\n\n# 申請送信完了後の画面(UC-01)\nclass RequestAddFinishView(TemplateView):\n template_name = 'management_system/request_add_finish.html'\n success_url = '/management_system/add'\n \n# 実績入力完了後の画面(UC-02)\nclass RequestPerformanceFinishView(TemplateView):\n template_name = 'management_system/request_performance_check.html'\n success_url = '/management_system/login'\n\n def get_context_data(self, **kwarg):\n context = super().get_context_data(**kwarg)\n context['time'] = timezone.localtime()\n print('time')\n print(context['time'])\n return context\n\n def post(self, request, *args, **kwargs):\n return render(self.request, 'management_system/request_login.html', context)\n\n# 実績入力画面 (UC-02)\nclass RequestLoginView(TemplateView):\n model = Request\n template_name = 'management_system/request_login.html'\n\n def post(self, request, *args, **kwargs):\n request_id = self.request.POST.get('request_id')\n requests = list(Request.objects.filter(id=request_id))\n if len(requests) == 0:\n print('このidは存在しません')\n messages.success(self.request, 'idとパスワードが一致しません')\n return HttpResponseRedirect(reverse('login'))\n\n request = get_object_or_404(Request, pk=request_id)\n print('入力されたpas : ' + self.request.POST.get('password'))\n print('本来のpas : ' + str(request.password))\n print('入館時間 : ' + str(request.entry_datetime))\n print('退館時間 : ' + str(request.exit_datetime))\n \n if(int(self.request.POST.get('password')) != request.password ):\n print('login fail')\n messages.success(self.request, 'idとパスワードが一致しません')\n return HttpResponseRedirect(reverse('login'))\n\n elif(request.entry_datetime!=None and request.exit_datetime!=None):\n print('この申請は既に退館済みです')\n messages.success(self.request, 'この申請は既に退館済みです')\n return HttpResponseRedirect(reverse('login'))\n elif(request.approval == 0):\n print('この申請は承認前のため入館できません')\n messages.success(self.request, 'この申請は承認前のため入館できません')\n return HttpResponseRedirect(reverse('login'))\n else:\n print('login sucsess')\n print('loginId:' + request_id)\n return HttpResponseRedirect(reverse('performance', kwargs = {'pk':request_id}))\n \n def get_context_data(self, **kwarg):\n print('make forms')\n context = super().get_context_data(**kwarg)\n context['form_id'] = RequestIdForm()\n context['form_password'] = RequestPasswordForm()\n return context\n\n# 実績入力画面 (UC-02)\nclass RequestPerformanceView(TemplateView):\n model = Request\n template_name = 'management_system/request_performance.html'\n success_url = ''\n\n def post(self, request, *args, **kwargs):\n \n print('entry')\n print('self')\n print(self)\n print('request')\n print(request)\n print('args')\n print(args)\n print('kwargs')\n print(kwargs)\n\n request = get_object_or_404(Request,pk=kwargs.get('pk'))\n if(request.entry_datetime==None):\n request.entry_datetime = timezone.localtime()\n elif(request.exit_datetime==None):\n request.exit_datetime = timezone.localtime()\n else:\n print('already logined')\n return HttpResponseRedirect(reverse('login'))\n request.save()\n print (request)\n \n\n return HttpResponseRedirect(reverse('performancefinish'))\n\n def get_context_data(self, **kwarg):\n context = super().get_context_data(**kwarg)\n print('getRequest')\n\n if( kwarg.get('pk') == None ):\n print('get false')\n messages.success(self.request, 'idに一致するものが存在しませんでした')\n return context\n else: \n print('getSucsess')\n request = get_object_or_404(Request,pk=kwarg.get('pk'))\n customer = get_object_or_404(Customer,pk=request.email.pk)\n print(request)\n\n \n context['form_id'] = {'request_id':kwarg.get('pk')}\n context['form_request'] = request\n context['form_customer'] = customer\n if(request.entry_datetime == None):\n context['form_message'] = '入館'\n elif(request.exit_datetime==None):\n context['form_message'] = '退館'\n else:\n print('already logined')\n\n return context\n \n# 実績修正画面ログイン (UC-05)\nclass RequestFixLoginView(TemplateView):\n model = Request\n template_name = 'management_system/request_login.html'\n\n def post(self, request, *args, **kwargs):\n request_id = self.request.POST.get('request_id')\n requests = list(Request.objects.filter(id=request_id))\n if len(requests) == 0:\n print('このidは存在しません')\n messages.success(self.request, 'idとパスワードが一致しません')\n return HttpResponseRedirect(reverse('fixlogin'))\n\n request = get_object_or_404(Request, pk=request_id)\n print('入力されたpas : ' + self.request.POST.get('password'))\n print('本来のpas : ' + str(request.password))\n print('入館時間 : ' + str(request.entry_datetime))\n print('退館時間 : ' + str(request.exit_datetime))\n \n if(int(self.request.POST.get('password')) != request.password ):\n print('login fail')\n messages.success(self.request, 'idとパスワードが一致しません')\n return HttpResponseRedirect(reverse('fixlogin'))\n \n # ここは未承認かの判定にしたい\n if(request.approval!=0):\n print('この申請は既に承認済みです')\n messages.success(self.request, 'この申請は既に承認済みのため修正できません')\n return HttpResponseRedirect(reverse('fixlogin'))\n \n else:\n print('login sucsess')\n print('loginId:' + request_id)\n return HttpResponseRedirect(reverse('fix', kwargs = {'pk':request_id}))\n \n def get_context_data(self, **kwarg):\n print('make forms')\n context = super().get_context_data(**kwarg)\n context['form_id'] = RequestIdForm()\n context['form_password'] = RequestPasswordForm()\n return context\n\n# 実績修正画面 (UC-05)\nclass RequestFixView(UpdateView):\n model = Request\n template_name = 'management_system/request_fix.html'\n success_url = '../fix/login'\n # form = RequestSendForm\n fields = ['scheduled_entry_datetime', 'scheduled_exit_datetime', 'purpose_admission']\n cust = Customer\n\n def get_context_data(self, **kwarg):\n print('make forms:RexestFix')\n context = super().get_context_data(**kwarg)\n self.cust = Customer(context['object'].email)\n \n return context\n\n def form_valid(self,form):\n context = {\n 'form' : form\n }\n\n self.object = self.get_object()\n print(self.object.email.email)\n print(self.request.POST.get('email'))\n if(self.object.email.email == self.request.POST.get('email')):\n if(self.object.email.name == self.request.POST.get('name')):\n if(self.object.email.organization_name == self.request.POST.get('organization_name')):\n if(self.object.email.tell_number == self.request.POST.get('tell_number')):\n print('以前申請したものと全件一致しました')\n self.sendMail(self.object)\n return super().form_valid(form)\n \n print('顧客情報が修正されたのでDBを確認します')\n\n # emailに該当するものをすべて取得\n customers = list(Customer.objects.filter(email=self.request.POST.get('email')))\n # listの判別\n hit = 0\n for cus in customers:\n # print(cus.tell_number)\n if(cus.name == self.request.POST.get('name') and cus.organization_name == self.request.POST.get('organization_name') and cus.tell_number == self.request.POST.get('tell_number')):\n # そのままcustomerを使う\n print(str(cus.id)+' 全件一致しました')\n customer = cus\n hit = 1\n else:\n # Customerを入力のものと置き換える\n print(str(cus.id)+' このデータは全件一致しませんでした')\n \n # 一致しなかったときにcustomerをRequestに保持させる\n if(hit == 0):\n # customer = self.cust\n customer = CustomerForm(self.request.POST).save()\n \n # カスタマーの追加とそれを引数に渡す\n print(hit)\n self.object = form.save(commit=False)\n self.object.email = customer\n print(self.object.email.id)\n print(self.object)\n self.object.save()\n\n self.sendMail(self.object)\n \n return HttpResponseRedirect(self.get_success_url())\n \n def sendMail(self, req):\n print('保存しました')\n\n subject = ' W社DC利用修正受領のお知らせ'\n massage = req.email.organization_name+' '+ req.email.name + '様\\n\\n'+'お世話になっております。\\nW社でございます。\\n\\n以下の内容でのデータセンターの利用修正を受け付け致しました。\\n申請修正の承認につきましては、管理者が確認後再度連絡させていただきます。\\n\\n------利用申請内容------\\n申請日時 : ' + req.request_datetime.strftime('%Y/%m/%d %H:%M:%S') + '\\n入館予定日時 : '+req.scheduled_entry_datetime.strftime('%Y/%m/%d %H:%M:%S') +'\\n退館予定日時 : ' + req.scheduled_exit_datetime.strftime('%Y/%m/%d %H:%M:%S') + '\\n------------------------------\\n\\nそれに伴い' + req.email.name + '様の申請番号、パスワードは以前発行したとおりです\\n' + '\\n\\n申請番号、パスワードは入退館時に必要になりますので厳重に保管下さい。\\n\\nまた、利用申請が承認されていない状態であれば下記URLで申請内容の修正、取消が行えます。\\n\\nURL : http://t17cs015.pythonanywhere.com/management_system/fix/login/\\n\\n------------------------------\\nW社 DCセンター管理部\\ndbcenterw01@gmail.com\\n------------------------------\\n\\n本メールは”データセンター入退館管理システム”からの自動送信です。\\n'\n from_email = 'dbcenterw1@gmail.com'\n recipient_list = [\n req.email.__str__()\n ]\n print('send mail')\n send_mail(subject,massage,from_email,recipient_list)\n messages.success(self.request, '修正を受理しました')\n\n return 0\n\nclass AdminLoginView(LoginView):\n form_class = AdminLoginForm\n next = 'admin_list'\n template_name = 'management_system/admin_login.html'\n\nclass RequestListView(LoginRequiredMixin, ListView):\n model = Request\n login_url = 'admin_login'\n template_name = 'management_system/admin_list.html'\n ascending_order = False\n searched_string = \"\"\n\n def get_context_data(self, **kwargs):\n context = super(RequestListView, self).get_context_data(**kwargs)\n\n context[\"ascending_order\"] = \"true\" if self.ascending_order == True else \"false\"\n context[\"searched_string\"] = self.searched_string\n\n return context\n\n def get_queryset(self):\n results = self.model.objects.all()\n\n q_name = self.request.GET.get('name')\n q_order = self.request.GET.get('order')\n\n if q_name is not None:\n results = results.filter(email__organization_name__contains=q_name)\n self.searched_string = q_name\n else:\n self.searched_string = \"\"\n\n if q_order == \"desce\":\n self.ascending_order = False\n results = results.order_by(\"request_datetime\").reverse()\n elif q_order == \"asce\":\n self.ascending_order = True\n results = results.order_by(\"request_datetime\")\n\n return results\n\n# 申請承認画面 \nclass AdminApprovalView(TemplateView):\n model = Request\n template_name = 'management_system/admin_approval.html'\n success_url = ''\n\n def form_valid(self,form):\n print('form')\n print(form)\n return super().form_valid(form)\n\n def form_invalid(self,form):\n print('form')\n print(form)\n return super().form_invalid(form)\n\n def post(self, request, *args, **kwargs):\n print('self')\n print(self)\n print('request')\n print(request)\n print(request.POST)\n print('args')\n print(args)\n print('kwargs')\n print(kwargs)\n \n # 承認データの取得\n req = get_object_or_404(Request,pk=kwargs.get('pk'))\n\n # 承認がクリックされた場合の処理\n if 'approval' in request.POST:\n requests = list(filter(lambda x:True if(req.scheduled_entry_datetime >= x.scheduled_entry_datetime and req.scheduled_entry_datetime < x.scheduled_exit_datetime) else False ,Request.objects.all()))\n requests += list(filter(lambda x:True if(req.scheduled_exit_datetime > x.scheduled_entry_datetime and req.scheduled_exit_datetime <= x.scheduled_exit_datetime) else False ,Request.objects.all()))\n print(requests)\n if(len(requests) != 0):\n for requ in requests:\n if(requ.approval == 1):\n print('すでに申請されている時間帯なのでこの時間は申請できません')\n print(requ)\n messages.success(self.request, 'すでに申請されている時間帯なのでこの時間は申請できません')\n return HttpResponseRedirect(reverse('admin_approval' , kwargs={'pk':kwargs.get('pk')}))\n\n print('承認します')\n req.approval = 1\n messages.success(self.request, 'id:'+str(kwargs.get('pk'))+'の申請を承認しました')\n # 拒否がクリックされた場合の処理\n if 'noapproval' in request.POST:\n req.approval = 0\n messages.success(self.request, 'id:'+str(kwargs.get('pk'))+'の申請を拒否しました')\n print('拒否します')\n\n\n req.save()\n print (req)\n return HttpResponseRedirect(reverse('admin_list'))\n \n def get_context_data(self, **kwarg):\n context = super().get_context_data(**kwarg)\n print('getRequest')\n\n if( kwarg.get('pk') == None ):\n print('get false')\n messages.success(self.request, 'idに一致するものが存在しませんでした')\n return context\n else: \n print('getSucsess')\n request = get_object_or_404(Request,pk=kwarg.get('pk'))\n customer = get_object_or_404(Customer,pk=request.email.pk)\n print(request)\n\n context['form_id'] = {'request_id':kwarg.get('pk')}\n context['form_request'] = request\n context['form_customer'] = customer\n context['form_message'] = '承認'\n \n return context\n","sub_path":"management_system/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":24896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"285735167","text":"import json\nimport maya.cmds as cmds\nimport maya.mel as mel\n\nfrom Qt import QtWidgets, QtCore\nfrom hbtools.cute.widgets import MayaWidgetHB, AccordionWidget, FloatSliderWidget\nfrom hbtools.maya import cute_utils as cu\nfrom hbtools.maya.olm.olm_model import VertexModel\n\n\nclass OlmWidget(MayaWidgetHB):\n _TITLE = \"Blendshape Controller\"\n _WIDTH = 400\n\n signal_refresh_blendshape = QtCore.Signal()\n signal_set_blendshape = QtCore.Signal()\n\n signal_selection_mode = QtCore.Signal()\n signal_add_row = QtCore.Signal()\n signal_remove_row = QtCore.Signal()\n\n signal_import_selection = QtCore.Signal()\n signal_export_selection = QtCore.Signal()\n\n signal_calculate = QtCore.Signal()\n\n signal_vertex_sphere_size = QtCore.Signal(float)\n signal_control_sphere_size = QtCore.Signal(float)\n\n def __init__(self, stay_top=True, parent=None):\n super(OlmWidget, self).__init__(parent)\n self._blendshape_combox = None\n self._set_button = None\n self._table_view = None\n self._table_model = None\n self._init(stay_top)\n\n def _get_object_name(self):\n return \"OLM_Widget\"\n\n def _get_window_title(self):\n return \"Blendshape Controller\"\n\n # Interface #\n\n def _init(self, stay_top):\n self.setWindowTitle(self._TITLE)\n # self.setFixedWidth(self._WIDTH) # TODO; fix in mixin init.\n if stay_top:\n self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)\n\n self.setLayout(QtWidgets.QVBoxLayout())\n\n accordion = AccordionWidget(self)\n accordion.setRolloutStyle(accordion.Maya)\n accordion.setSpacing(0)\n self.layout().addWidget(accordion)\n\n widget, self._blendshape_combox, self._set_button = self._create_blendshape_widget()\n accordion.addItem(\"Blendshape\", widget)\n\n widget, self._table_view, self._table_model = self._create_vertex_widget()\n accordion.addItem(\"Vertex Controllers\", widget)\n\n widget = self._create_sphere_widget()\n accordion.addItem(\"Sphere Options\", widget)\n\n widget = self._create_simulation_widget()\n accordion.addItem(\"Simulation\", widget)\n\n widget = self._create_import_export_widget()\n accordion.addItem(\"Import | Export\", widget)\n\n def _create_blendshape_widget(self):\n grid_layout = QtWidgets.QGridLayout()\n grid_layout.setColumnStretch(0, 1)\n grid_layout.setColumnStretch(1, 2)\n grid_layout.setColumnStretch(2, 1)\n\n label = QtWidgets.QLabel(\"Blendshapes\")\n grid_layout.addWidget(label, 0, 0)\n\n blendshape_combox = QtWidgets.QComboBox()\n blendshape_combox.addItems(self._get_blendshapes())\n grid_layout.addWidget(blendshape_combox, 0, 1)\n\n button = QtWidgets.QPushButton()\n button.setIcon(self.style().standardIcon(QtWidgets.QStyle.SP_BrowserReload))\n button.setIconSize(QtCore.QSize(20, 20))\n button.clicked.connect(self.signal_refresh_blendshape.emit)\n grid_layout.addWidget(button, 0, 2)\n\n button = QtWidgets.QPushButton(\"Use Blendshape\")\n button.clicked.connect(self.signal_set_blendshape.emit)\n grid_layout.addWidget(button, 1, 0, 1, 3)\n\n widget = QtWidgets.QWidget()\n widget.setLayout(grid_layout)\n return widget, blendshape_combox, button\n\n def _create_vertex_widget(self):\n table_model = VertexModel()\n\n def highlight(index):\n self._table_model.highlight(index.row())\n\n table_view = QtWidgets.QTableView()\n table_view.setModel(table_model)\n table_view.clicked[QtCore.QModelIndex].connect(highlight)\n table_view.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n table_view.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)\n\n for i in range(table_view.horizontalHeader().count()):\n table_view.horizontalHeader().setSectionResizeMode(i, QtWidgets.QHeaderView.Stretch)\n\n layout = QtWidgets.QVBoxLayout()\n layout.addWidget(table_view)\n\n space = QtWidgets.QSpacerItem(16, 16, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n layout.addItem(space)\n\n button = QtWidgets.QPushButton(\"Enter Vertex Selection\")\n button.clicked.connect(self.signal_selection_mode.emit)\n layout.addWidget(button)\n\n button = QtWidgets.QPushButton(\"Add Vertex\")\n button.clicked.connect(self.signal_add_row.emit)\n layout.addWidget(button)\n\n space = QtWidgets.QSpacerItem(16, 16, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n layout.addItem(space)\n\n button = QtWidgets.QPushButton(\"Delete Vertex\")\n button.clicked.connect(self.signal_remove_row.emit)\n layout.addWidget(button)\n\n widget = QtWidgets.QWidget()\n widget.setLayout(layout)\n\n return widget, table_view, table_model\n\n def _create_sphere_widget(self):\n layout = QtWidgets.QVBoxLayout()\n\n vertex_sphere_size = FloatSliderWidget(title=\"Vertex Scale\", min_value=0.0, max_value=1.0, start_value=self._table_model._SPHERE_DEF_SIZE)\n vertex_sphere_size.signal_value_changed.connect(self.signal_vertex_sphere_size.emit)\n layout.addWidget(vertex_sphere_size)\n\n control_sphere_size = FloatSliderWidget(title=\"Controller Scale\", min_value=0.0, max_value=1.0, start_value=self._table_model._SPHERE_DEF_SIZE)\n control_sphere_size.signal_value_changed.connect(self.signal_control_sphere_size.emit)\n layout.addWidget(control_sphere_size)\n\n widget = QtWidgets.QWidget()\n widget.setLayout(layout)\n return widget\n\n def _create_simulation_widget(self):\n # TODO; expand with more options (Constant evaluation, method)\n button = QtWidgets.QPushButton(\"Calculate\")\n button.clicked.connect(self.signal_calculate.emit)\n return button\n\n def _create_import_export_widget(self):\n layout = QtWidgets.QVBoxLayout()\n\n button = QtWidgets.QPushButton(\"Import\")\n button.clicked.connect(self._import)\n layout.addWidget(button)\n\n button = QtWidgets.QPushButton(\"Export\")\n button.clicked.connect(self._export)\n layout.addWidget(button)\n\n widget = QtWidgets.QWidget()\n widget.setLayout(layout)\n return widget\n\n # Data #\n\n @classmethod\n def _get_blendshapes(cls):\n \"\"\" Returns a string list of blendshapes names. \"\"\"\n raise NotImplementedError()\n\n # Close #\n\n def closeEvent(self, event):\n # TODO; does not work with the Mixin in Maya 2017.\n self._table_model.delete()\n return super(OlmWidget, self).closeEvent(event)\n","sub_path":"hbtools/maya/olm/olm_view.py","file_name":"olm_view.py","file_ext":"py","file_size_in_byte":6690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"16034357","text":"import json\nimport nlp.bonus as bonus\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.chunk import ne_chunk\nfrom nltk.tag import pos_tag\nfrom nltk.downloader import download\nfrom nltk import data\nfrom nlp.Person import Person\nfrom nlp.recognize import recognize\n\n# resources downloaded for nltk\ntry:\n data.find('tokenizers/punkt')\nexcept LookupError:\n download('punkt')\ntry:\n data.find('taggers/averaged_perceptron_tagger')\nexcept LookupError:\n download('averaged_perceptron_tagger')\ntry:\n data.find('corpora/words')\nexcept LookupError:\n download('words')\ntry:\n data.find('chunkers/maxent_ne_chunker')\nexcept LookupError:\n download('maxent_ne_chunker')\n\n\ndef extractArticle(article):\n \"\"\"\\\n extractArticle returns a 3-tuple that contains:\n - the map of first names to last names (\"DISCARD\" as value for duplicate first name)\n - the map of sur names to first names (\"DISCARD\" as value for duplicate last name)\n - the map of full names (tuple) to their corresponding Person objects\n The first name and sur name maps are used for incomplete name token lookup in extractSentence; they are\n built from the full names detected by nltk (first + surname)\n \"\"\"\n firstNames = {}\n surNames = {}\n localProfileMap = {}\n fir = \"\"\n sur = \"\"\n for paragraph in article.Paragraphs:\n tokenized = ne_chunk(pos_tag(word_tokenize(paragraph)))\n for element in tokenized.subtrees():\n if element.label() == 'PERSON':\n # case: Firstname Lastname\n if len(element.leaves()) == 1 or len(element.leaves()) == 2:\n # case: single name, needs special recognition\n if len(element.leaves()) == 1:\n fir = \"\"\n sur = element.leaves()[0][0]\n fir = recognize(fir, sur)\n else:\n fir = element.leaves()[0][0]\n sur = element.leaves()[1][0]\n fir = recognize(fir, sur)\n if fir != \"DISCARD\":\n # add a person with new name\n if not localProfileMap.get((fir, sur)):\n localProfileMap[(fir, sur)] = Person((fir, sur))\n\n if not firstNames.get(fir):\n firstNames[fir] = sur\n elif firstNames.get(fir) == sur or firstNames.get(fir) == \"DISCARD\":\n continue\n else:\n firstNames[fir] = \"DISCARD\"\n\n if not surNames.get(sur):\n surNames[sur] = fir\n elif surNames.get(sur) == fir or surNames.get(sur) != \"DISCARD\":\n continue\n else:\n surNames[sur] = \"DISCARD\"\n\n # case: Firstname (Middlename) Lastname (Sr./Jr./II/III/IV)\n elif len(element.leaves()) > 2:\n if element.leaves()[-1] == \"Sr.\" or element.leaves()[-1] == \"Jr.\" or element.leaves()[-1] == \"II\" or element.leaves()[-1] == \"III\" or element.leaves()[-1] == \"IV\":\n fir = element.leaves()[0][0]\n sur = element.leaves()[-2][0]\n if not localProfileMap.get((fir, sur)):\n localProfileMap[(fir, sur)] = Person((fir, sur))\n\n if not firstNames.get(fir):\n firstNames[fir] = sur\n elif firstNames.get(fir) == sur or firstNames.get(fir) == \"DISCARD\":\n continue\n else:\n firstNames[fir] = \"DISCARD\"\n\n if not surNames.get(sur):\n surNames[sur] = fir\n elif surNames.get(sur) == fir or surNames.get(sur) == \"DISCARD\":\n continue\n else:\n surNames[sur] = \"DISCARD\"\n return firstNames, surNames, localProfileMap\n\n\ndef extractSentence(sentence, firstNames, surNames):\n \"\"\"\\\n extractSentence returns the full names of people in the sentence.\n If the person's name in the sentence is incomplete (only last name or first name), look up its full name\n by firstNames and surNames map, provided that either sur or first name is unambiguous (refer to only one individual)\n \"\"\"\n res = set()\n tokenized = ne_chunk(pos_tag(word_tokenize(sentence)))\n\n fir = \"\"\n sur = \"\"\n potentialFir = \"\"\n potentialSur = \"\"\n for element in tokenized.subtrees():\n if element.label() == 'PERSON':\n isValidPerson = True\n if len(element.leaves()) == 1:\n potentialFir = surNames.get(element.leaves()[0][0])\n potentialSur = firstNames.get(element.leaves()[0][0])\n if potentialFir != None and potentialFir != \"DISCARD\":\n fir = potentialFir\n sur = element.leaves()[0][0]\n elif potentialSur != None and potentialSur != \"DISCARD\":\n fir = element.leaves()[0][0]\n sur = potentialSur\n else:\n print(\"Unrecognized name: \" + element.leaves()[0][0])\n isValidPerson = False\n elif len(element.leaves()) == 2:\n fir = element.leaves()[0][0]\n sur = element.leaves()[1][0]\n fir = recognize(fir, sur)\n if fir == \"DISCARD\":\n isValidPerson = False\n elif len(element.leaves()) > 2:\n if element.leaves()[-1] == \"Sr.\" or element.leaves()[-1] == \"Jr.\" or element.leaves()[-1] == \"II\" or element.leaves()[-1] == \"III\" or element.leaves()[-1] == \"IV\":\n fir = element.leaves()[0][0]\n sur = element.leaves()[-2][0]\n else:\n fir = element.leaves()[0][0]\n sur = element.leaves()[-1][0]\n fir = recognize(fir, sur)\n if fir == \"DISCARD\":\n isValidPerson = False\n\n else:\n isValidPerson = False\n\n if isValidPerson:\n res.add((fir, sur))\n print((fir, sur))\n return res\n\n\ndef printArticleInfo(article, firstNames, surNames, titlenames):\n \"\"\"\\\n Print out article extraction result in terminal\n \"\"\"\n print(\"IN TITLE: [\", end=\"\")\n for x in titlenames:\n print(\" \", end=\"\")\n print(x, end=\"\")\n print(\" ]\")\n print(\"\\nFIRSTNAME - LASTNAME: \")\n for x in firstNames.keys():\n print(x, firstNames.get(x))\n print(\"\\nLASTNAME - FIRSTNAME: \")\n for x in surNames.keys():\n print(x, surNames.get(x))\n print(\"\\n\")\n\n\ndef merge(profileMap, localProfileMap, url, title):\n \"\"\"\\\n Update global profileMap with the result of local profileMap.\n Append url, title of the article to the global profileMap\n \"\"\"\n for localProfile in localProfileMap.values():\n if localProfile.mention == 0:\n continue\n globalProfile = profileMap.get(localProfile.nametuple)\n if not globalProfile:\n globalProfile = Person(localProfile.nametuple)\n profileMap[localProfile.nametuple] = globalProfile\n globalProfile.boostMention(localProfile.mention)\n for assocName, assocTuple in localProfile.association.items():\n globalAssocTuple = globalProfile.association.get(assocName)\n if not globalAssocTuple:\n globalProfile.association[assocName] = (\n assocTuple[0], set([(url, title)]))\n else:\n globalAssocTuple[1].add((url, title))\n globalProfile.association[assocName] = (\n globalAssocTuple[0] +\n assocTuple[0], globalAssocTuple[1]\n )\n\n\ndef boostAssociation(person, fullnames, value):\n \"\"\"\n Given a person and a set of people, update his association value with everyone else\n \"\"\"\n for associate in fullnames.difference(set([person.nametuple])):\n # we don't need the second part of the tuple yet in localProfileMap\n if not person.association.get(associate):\n person.association[associate] = (\n value, )\n else:\n assocTuple = person.association[associate]\n person.association[associate] = (\n assocTuple[0] + value, )\n\n\ndef updateBySentence(localProfileMap, name, fullnames):\n currPerson = localProfileMap.get(name)\n if not currPerson:\n currPerson = Person(name)\n localProfileMap[name] = currPerson\n # update association strength of each person with others\n boostAssociation(currPerson, fullnames, bonus.C_SAME_SENTENCE)\n\n\ndef updateByPara(currPara, localProfileMap, name, fullnames):\n currPerson = localProfileMap.get(name)\n # in the future currPara could be used for context lookup, rn just a dummy flag\n if currPara == 0:\n currPerson.boostMention(bonus.M_FIRST_PARA_BONUS)\n currPerson.boostMention(bonus.M_PARA_BONUS)\n boostAssociation(currPerson, fullnames, bonus.C_SAME_PARA)\n\n\ndef updateByTitle(localProfileMap, name, fullnames):\n currPerson = localProfileMap.get(name)\n # if somehow a person in the title is not recorded in our lookup map\n if not currPerson:\n currPerson = Person(name)\n localProfileMap[name] = currPerson\n currPerson.boostMention(bonus.M_TITLE_BONUS)\n boostAssociation(currPerson, fullnames, bonus.C_SAME_TITLE)\n\n\ndef updateMap(profileMap, article):\n \"\"\"\\\n Modifies the profileMap passed in with each article information.\n Retrives all names in the article first, then scan title and paragraphs to update\n corresponding association strength and mention count\n \"\"\"\n print(\">>> extracting names...\")\n firstNames, surNames, localProfileMap = extractArticle(article)\n print(\">>> scanning title...\")\n fullnames = extractSentence(article.Title, firstNames, surNames)\n titlenames = fullnames\n for name in fullnames:\n updateByTitle(localProfileMap, name, fullnames)\n print(\">>> scanning paragraphs...\")\n # set that contains all people in a paragraph\n pplInPara = set()\n fullnames = set()\n currParragraph = 0\n for para in article.Paragraphs:\n listOfSentence = sent_tokenize(para)\n # get full name tuples from each sentence in the paragraph and update association by sentence of the person\n for sent in listOfSentence:\n fullnames = extractSentence(sent, firstNames, surNames)\n # update association strength of each person with others\n # problem: For three ppl a,b,c, a relate to b,c doesn't imply that b is related to c\n for name in fullnames:\n # unnecessary if the assumption that named entity in each sentence would be in lookup map is correct\n pplInPara.add(name)\n updateBySentence(localProfileMap, name, fullnames)\n # update association by paragraph\n for name in pplInPara:\n updateByPara(currParragraph, localProfileMap, name, pplInPara)\n pplInPara = set()\n currParragraph += 1\n merge(profileMap, localProfileMap, article.URL, article.Title)\n printArticleInfo(article, firstNames, surNames, list(titlenames))\n","sub_path":"py/nlp/updateMap.py","file_name":"updateMap.py","file_ext":"py","file_size_in_byte":11507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"327776942","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n__author__ = 'Nb'\n\nfrom NbHelper.Experimental.Parser.LexicalAnalyser import TokenStream\nfrom NbHelper.Experimental.Parser.Token import *\nfrom NbHelper.Experimental.Parser.PythonSyntax import *\n\n\nclass SyntaxStream():\n \"\"\"Stream of syntax.\"\"\"\n\n SEEKING = 0\n PENDING = 1\n INTERRUPTED = 2\n\n def __init__(self, token_stream: TokenStream, construct_empty_instance=False):\n if not construct_empty_instance:\n self.token_stream = token_stream\n self.stream = []\n self._parse()\n else:\n self.stream = []\n\n def __str__(self):\n return '\\n'.join([str(syntax) for syntax in self.stream])\n\n def _parse(self):\n STATE = self.PENDING\n container = []\n for token in self.token_stream.stream:\n if STATE == self.SEEKING:\n if not isinstance(token, EndOfLine):\n container.append(token.string)\n if STATE == self.PENDING:\n if isinstance(token, Plus):\n self.stream.append(GeneralBiOperator(GeneralPlus))\n if isinstance(token, Equal):\n self.stream.append(GeneralBiOperator(GeneralAssign))\n if isinstance(token, DoubleEqual):\n self.stream.append(GeneralBiOperator(GeneralEqual))\n if isinstance(token, Not):\n self.stream.append(GeneralBiOperator(GeneralNot))\n if isinstance(token, String):\n self.stream.append(GeneralString(token.content))\n if isinstance(token, Number):\n if isinstance(token.content, int):\n self.stream.append(GeneralInteger(token.content))\n else:\n self.stream.append(GeneralFloat(token.content))\n if isinstance(token, Divide):\n self.stream.append(GeneralBiOperator(GeneralDivide))\n if isinstance(token, EndOfLine):\n self.stream.append(GeneralEndOfLine(token.content))\n if isinstance(token, Multiply):\n self.stream.append(GeneralOperator(GeneralMultiply))\n if isinstance(token, Comment):\n STATE = self.SEEKING\n if isinstance(token, (EndOfFile, EndOfLine)):\n if STATE == self.SEEKING:\n STATE = self.INTERRUPTED\n if STATE == self.INTERRUPTED:\n self.stream.append(GeneralComment(''.join(container)))\n self.stream.append(GeneralEndOfLine('\\n'))\n container = []\n STATE = self.PENDING\n\n # noinspection PyTypeChecker\n @staticmethod\n def new():\n return SyntaxStream([], construct_empty_instance=True)\n\n\nclass SyntaxTree():\n def __init__(self, syntax_stream: SyntaxStream):\n self.syntax_stream = syntax_stream.stream\n self._parse_operator()\n\n def __str__(self):\n return '\\n'.join([str(syntax) for syntax in self.syntax_stream])\n\n def _parse_operator(self):\n for i in range(len(self.syntax_stream)):\n syntax = self.syntax_stream[i]\n if isinstance(syntax, GeneralBiOperator):\n left = self.syntax_stream[i - 1]\n right = self.syntax_stream[i + 1]\n if isinstance(syntax.content, GeneralAssign):\n if isinstance(left, GeneralString):\n if isinstance(right, GeneralValue):\n self.syntax_stream[i].left = GeneralName(left.content)\n self.syntax_stream[i].right = right\n self.syntax_stream[i - 1] = None\n self.syntax_stream[i + 1] = None\n elif isinstance(syntax.content, (GeneralPlus, GeneralEqual, GeneralMultiply, GeneralDivide)):\n if type(left) == type(right):\n self.syntax_stream[i].left = left\n self.syntax_stream[i].right = right\n self.syntax_stream[i - 1] = None\n self.syntax_stream[i + 1] = None\n self.syntax_stream = [syntax for syntax in self.syntax_stream if syntax is not None]\n\n\ndef TokeniseSyntaxStream(syntax_stream: SyntaxStream) -> TokenStream:\n result = TokenStream.new()\n token_stream = result.stream\n for syntax in syntax_stream.stream:\n if isinstance(syntax, GeneralComment):\n token_stream.append(Comment('/', 0, 0))\n token_stream.append(String(syntax.content, 0, 0))\n elif isinstance(syntax, GeneralBiOperator):\n if isinstance(syntax.content, GeneralAssign):\n if isinstance(syntax.right, GeneralNumber):\n token_stream.append(Name(syntax.left.content, 0, 0))\n token_stream.append(Equal('=', 0, 0))\n token_stream.append(Number(str(syntax.right.content), 0, 0))\n elif isinstance(syntax.right, GeneralString):\n token_stream.append(Name(syntax.left.content, 0, 0))\n token_stream.append(Equal('=', 0, 0))\n token_stream.append(String(syntax.right.content, 0, 0))\n elif isinstance(syntax.right, GeneralKeyword):\n token_stream.append(Name(syntax.left.content, 0, 0))\n token_stream.append(Equal('=', 0, 0))\n token_stream.append(String(str(syntax.right.content), 0, 0))\n if not isinstance(syntax, GeneralEndOfLine):\n token_stream.append(EndOfLine('\\n', 0, 0))\n token_stream[-1] = EndOfFile('\\n', 0, 0)\n return result\n\n\ndef ParseKeywordFromDict(keyword: dict, syntax_tree: SyntaxStream) -> SyntaxStream:\n for i in range(len(syntax_tree.stream)):\n syntax = syntax_tree.stream[i]\n if isinstance(syntax, GeneralString):\n if syntax.content in keyword.keys():\n syntax_tree.stream[i] = keyword.get(syntax.content)(syntax.content)\n return syntax_tree","sub_path":"Experimental/Parser/Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":6087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"578588514","text":"import sys\nn = int(input().strip())\n\ndictionary = dict([])\nfor i in range(n):\n # arr = [(key, value) for (key, value) in input().strip().split(' ')]\n (key, value) = input().strip().split(' ')\n dictionary[key] = value\nfor query in sys.stdin:\n query = query.strip()\n if query in dictionary:\n print(query + \"=\" + dictionary[query])\n else:\n print(\"Not found\")\n","sub_path":"30_days_of_code_challenges/day8-dictionaries_and_maps.py","file_name":"day8-dictionaries_and_maps.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"104831161","text":"# -*- coding:utf-8 -*-\nfrom typing import List\n\n\nclass Solution:\n def sortColors(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n count_list = [0] * 3\n for num in nums:\n count_list[num] += 1\n print(count_list)\n for index, count in enumerate(count_list):\n for i in range(count):\n nums[sum(count_list[:index])+i] = index\n","sub_path":"src/Algorithms/75颜色分类/75.py","file_name":"75.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"156613627","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('web', '0013_auto_20150123_0940'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='taskcenter',\n name='groups',\n field=models.ManyToManyField(default=None, to='web.Group', verbose_name='\\u9009\\u62e9\\u7ec4'),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='taskcenter',\n name='hosts',\n field=models.ManyToManyField(default=None, to='web.Host', verbose_name='\\u9009\\u62e9\\u4efb\\u52a1\\u4e3b\\u673a'),\n preserve_default=True,\n ),\n ]\n","sub_path":"任务编排系统/TaskPlanner/web/migrations/0014_auto_20150123_0942.py","file_name":"0014_auto_20150123_0942.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"174837495","text":"from collections import defaultdict\nimport random\nimport time\nimport numpy as np\nimport copy\nimport random\nimport collections\nfrom game import Board, Game\nfrom statistics import Statistic\nfrom players.vanilla_uct_player import Vanilla_UCT\nfrom players.scripts.random_script import RandomPlayer\nfrom players.scripts.lelis_script import LelisPlayer\nfrom players.scripts.DSL import DSL\nimport sys\n\n\nif __name__ == \"__main__\": \n \n random = RandomPlayer()\n lelis = LelisPlayer()\n \n# import importlib\n# module = importlib.import_module('players.scripts.generated.Script1')\n# class_ = getattr(module, 'Script1')\n# instance = class_()\n \n dsl = DSL()\n dsl.generateRandomScript(1)\n\n victories1 = 0\n victories2 = 0\n for _ in range(1):\n game = Game(n_players = 2, dice_number = 4, dice_value = 3, column_range = [2,6],\n offset = 2, initial_height = 1)\n uct1 = Vanilla_UCT(c = 1, n_simulations = 10)\n uct2 = Vanilla_UCT(c = 10, n_simulations = 1)\n \n is_over = False\n who_won = None\n \n infinite_loop = 0\n current_player = game.player_turn\n while not is_over:\n# print('Player: ', current_player)\n moves = game.available_moves()\n if game.is_player_busted(moves):\n# print('Player ', current_player, ' busted!')\n if current_player == 1:\n current_player = 2\n else:\n current_player = 1\n continue\n else:\n if game.player_turn == 1:\n# chosen_play = uct1.get_action(game)\n chosen_play = lelis.get_action(game)\n else:\n# chosen_play = instance.get_action(game)\n chosen_play = random.get_action(game)\n if chosen_play == 'n':\n if current_player == 1:\n current_player = 2\n else:\n current_player = 1\n print('Chose: ', chosen_play)\n game.print_board()\n game.play(chosen_play)\n game.print_board()\n \n print()\n who_won, is_over = game.is_finished()\n \n print(who_won, is_over)\n if who_won == 1:\n victories1 += 1\n if who_won == 2:\n victories2 += 1\n print(victories1, victories2)\n print('Player 1: ', victories1 / (victories1 + victories2))\n print('Player 2: ', victories2 / (victories1 + victories2))\n \n \n #main()","sub_path":"main-xai.py","file_name":"main-xai.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"575609183","text":"import re\n\n\n# WTF is re??\ndef domain_name(url):\n \"\"\"\n parse the domain name from a given url\n :param url: the url given\n :type url: str\n :return: the domain name\n :rtype: str\n\n >>> domain_name(\"http://github.com/carbonfive/raygun\")\n 'github'\n >>> domain_name(\"http://www.zombie-bites.com\")\n 'zombie-bites'\n >>> domain_name(\"https://www.cnet.com\")\n 'cnet'\n >>> domain_name('ww.xakep.r')\n 'xakep'\n \"\"\"\n if 'ww.' not in url[: url.find('.') + 1]:\n if '//' in url:\n return url[url.find('//') + 2: url.find('.')]\n else:\n return url[: url.find('.')]\n else:\n return url[url.find('.') + 1: url.find('.', url.find('.') + 1)]\n\n\ndef soln_domain_name(url):\n return re.search('(https?://)?(www\\d?\\.)?(?P[\\w-]+)\\.', url).group(\n 'name')\n","sub_path":"Python/kyu_5/!_extract_domain_name_from_url.py","file_name":"!_extract_domain_name_from_url.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"597466041","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/notetool/crawler/movies.py\n# Compiled at: 2020-01-06 03:23:12\n# Size of source mod 2**32: 3273 bytes\nimport re, requests\nfrom lxml import html\n\ndef get_html(keywd, url):\n param = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'}\n Url = url % keywd\n html = requests.get(Url, params=param).content.decode('utf8')\n return html\n\n\ndef get_movielink(text):\n tree = html.fromstring(text)\n ctree = tree.xpath('//div[@class=\"clearfix search-item\"]')\n link = []\n for item in ctree:\n print(item.xpath('em/text()')[0], item.xpath('div[2]/div/a/strong/text()')[0], ':', item.xpath('div[2]/div/a/@href')[0])\n link.append((item.xpath('div[2]/div/a/@href')[0], item.xpath('em/text()')[0]))\n\n return link\n\n\ndef get_downloadlink(link):\n if type_link == '电视剧':\n from_url = 'http://www.zimuzu.tv/resource/index_json/rid/%s/channel/tv' % link.split('/')[(-1)]\n else:\n from_url = 'http://www.zimuzu.tv/resource/index_json/rid/%s/channel/movie' % link.split('/')[(-1)]\n param = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0', \n 'Referer':'http://www.zimuzu.tv%s' % link}\n data = requests.get(from_url, params=param).content.decode('utf8')\n data = ''.join(data.split('=')[1:])\n print(data)\n pattern = '

followers_b:\n is_bigger = True\n else:\n if followers_b > followers_a:\n is_bigger = True\n return is_bigger\n\n\n# Starting game function\ndef start(last_person:dict=None, winnings=0, first_time=True):\n # Show logo\n print(logo)\n\n # Print The winings in case is not the first time\n if not first_time:\n print(f\"You're right! Current score: {winnings}.\")\n\n # Choose samples\n people = data[:]\n if last_person == None:\n person_a = choice(people)\n else:\n person_a = last_person\n person_b = choose_person(person_a, people)\n\n # Present samples\n print(f\"Compare A: {person_a['name']}, a {person_a['description']}, from {person_a['country']}.\")\n print(vs)\n print(f\"Against B: {person_b['name']}, a {person_b['description']}, from {person_b['country']}.\")\n\n # Ask the User\n question = \"Who has more followers? Type 'A' or 'B': \"\n answer = get_answer(question, \"a\", \"b\")\n\n # Compare samples\n answer_is_right = compare(answer, person_a, person_b, people)\n\n if answer_is_right:\n clear()\n winnings += 1\n start(person_b, winnings, False)\n else:\n clear()\n print(logo)\n print(f\"Sorry, that's wrong. Final score: {winnings}.\")\n text_restart = \"Do you want to play again? Type (y)es or (n)o: \"\n restart = get_answer(text_restart, \"y\", \"n\")\n if restart == \"y\":\n start()\n else:\n return\n\n\n# Aplication\nstart()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"145893601","text":"#coding=utf-8\r\n\r\nfrom .models import article,comment\r\nfrom django import forms\r\n\r\nclass articleForm(forms.ModelForm):\r\n class Meta:\r\n model=article\r\n fields =['title','body','abstract']\r\n widgets = {\r\n 'title': forms.TextInput(attrs={'class': 'blog_input','placeholder':\"请输入标题\"}),\r\n 'body': forms.Textarea(attrs={'placeholder':\"请输入正文\",'class':\"textarea\"}),\r\n 'abstract': forms.Textarea(attrs={'placeholder':\"请输入摘要,限200个字\",'class':\"abstract\"}),\r\n }\r\n\r\nclass comForm(forms.ModelForm):\r\n class Meta:\r\n model=comment\r\n fields=['user_com','body_com']\r\n widgets={\r\n 'user_com':forms.TextInput(attrs={'class': 'com_user','placeholder':\"请输入标题\"}),\r\n 'body_com':forms.Textarea(attrs={'class':\"my_com_body\",'placeholder':\"请输入评论内容\"}),\r\n }","sub_path":"个人代码/myblog/myadmin/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"91831548","text":"# search path for measurement sets and images to load\ndatapath = [ ]\n\n# location of geodetic and ephemera data\nmeasurespath = \"~/.casa/measures\"\n\n# location of the cachedir\ncachedir = '~/.casa'\n\n# location of the optional user's startup.py\nstartupfile = '~/.casa/startup.py'\n\n# automatically update measures data if not current (measurespath must be user-writable)\nmeasures_update = True\n\n# log file path/name\nlogfile='casa-%s.log' % _time.strftime(\"%Y%m%d-%H%M%S\", _time.gmtime())\n\n# do not create a log file when True, If True, then any logfile value is ignored and there is no log file\nnologfile = False\n\n# print log output to terminal when True (in addition to any logfile and CASA logger)\nlog2term = False\n\n# do not start the CASA logger when True\nnologger = False\n\n# avoid starting GUI tools when True. If True then the CASA logger is not started even if nologger is False\nnogui = False\n\n# the IPython prompt color scheme. Must be one of \"Neutral\", \"NoColor\", \"Linux\" or \"LightBG\", default \"Neutral\"\ncolors = \"Neutral\"\n\n# startup without a graphical backend if True\nagg = False\n\n# attempt to load the pipeline modules and set other options appropriate for pipeline use if True\npipeline = False\n\n# create and use an IPython log using the iplogfile path\niplog = False\n\n# the IPython log file path name to be used when iplog is True\niplogfile='ipython-%s.log' % _time.strftime(\"%Y%m%d-%H%M%S\", _time.gmtime())\n\n# allow anonymous usage reporting\ntelemetry_enabled = True\n\n# location to place telemetry data prior to reporting\ntelemetry_log_directory = '~/.casa/telemetry'\n\n# maximum size of telemetry recording\ntelemetry_log_limit = 20480\n\n# telemetry recording size that triggers a report\ntelemetry_log_size_interval = 60\n\n# telemetry recording report frequency\ntelemetry_submit_interval = 604800\n\n# allow anonymous crash reporting\ncrashreporter_enabled = True\n\n# include the user's local site-packages in the python path if True. May conflict with CASA modules\nuser_site = False\n","sub_path":"casaconfig/private/config_defaults_static.py","file_name":"config_defaults_static.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"439344015","text":"from selenium import webdriver\nimport time\nfrom bs4 import BeautifulSoup\nimport re\nimport urllib.parse\nimport DatacolctVo as dv\nimport psycopg2\n\n#CLST 테이블 URL URL 정보 호출\ndef tblDataCstQery():\n try:\n keyword_query=[]\n # Default port =1200\n conn_string = \"host='121.160.17.80' dbname ='EcoBank' user='dev' password='nie12345' port='12000'\"\n conn = psycopg2.connect(conn_string)\n cur = conn.cursor()\n\n\n taget_query =\"\"\"\n SELECT data_colct_url \n FROM data_scraping_analysis.tbl_data_clst \n Where data_clst_orig_no = '4' \n \"\"\" \n cur.execute(taget_query)\n rows = cur.fetchall()\n for row in rows:\n keyword_query.append(row)\n conn.close()\n return keyword_query \n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n#데이터 중복 체크\ndef urlOverlap(url):\n urlData=existingURL()\n for urlList in urlData:\n urlList=str(urlList)\n urlList=re.sub(\",\",\"\",urlList)\n urlList=urlList.replace(\")\",\"\")\n urlList=urlList.replace(\"(\",\"\")\n urlList=urlList.replace(\"'\",\"\")\n if url == urlList:\n url=None\n return url\n\ndef existingURL():\n try:\n keyword_query=[]\n # Default port =1200\n conn_string = \"host='121.160.17.80' dbname ='EcoBank' user='dev' password='nie12345' port='12000'\"\n conn = psycopg2.connect(conn_string)\n cur = conn.cursor()\n\n taget_query =\"\"\"\n SELECT \n master.news_colct_url AS \"newsColctUrl\"\n FROM \n data_scraping_analysis.tbl_news_colct AS master \n LEFT JOIN\n data_scraping_analysis.tbl_data_clst AS sub\n ON\n master.news_clst_no = sub.data_clst_no\n LEFT JOIN\n data_scraping_analysis.tbl_data_clkw AS sub_clkw\n ON\n sub.data_clst_kwrd_no = sub_clkw.data_clkw_no\n LEFT JOIN\n data_scraping_analysis.tbl_data_clor AS sub_clor\n ON\n sub.data_clst_orig_no = sub_clor.data_clor_no\n\n where sub_clor.data_clor_ttle = '중앙일보' \n \"\"\" \n cur.execute(taget_query)\n rows = cur.fetchall()\n for row in rows:\n keyword_query.append(row)\n conn.close()\n return keyword_query \n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\n# def keyword(urlquery): \n# Keyword=urllib.parse.quote(urlquery)\n# return Keyword\n\n\n#상세 페이지 파싱\ndef detail_page(href,browser_path):\n tmp = href.split('/')\n domain = tmp[2]\n class_name='article_body fs1 mg'\n res=urllib.request.urlopen(href).read()\n html = BeautifulSoup(res, \"html.parser\")\n context = html.find_all(\"div\", class_=class_name)\n content_list=[]\n \n context[0:9000]\n for co in context:\n co =co .text.strip()\n content_list.append(co)\n return content_list\n#date 파싱\ndef datemethod(st_list_count,html,browser):\n date_list=[]\n try:\n for dates in html.find_all(\"span\", class_='byline'):\n dates=dates.text.strip()\n date_list.append(re.search('\\d{4}.\\d{2}.\\d{2} \\d{2}:\\d{2}',dates).group())\n return date_list[st_list_count-1]\n except:\n date_list=[]\n browserdate=browser.find_element_by_xpath('//*[@id=\"content\"]/div[2]/div[2]/ul/li['+str(st_list_count)+']/div/span[2]/em[2]').text\n return browserdate\n\n#href 파싱\ndef hrefmethod(st_list_count,html):\n href_list=[]\n for i in html.find_all(\"strong\", class_=\"headline mg\"):\n try:\n href_list.append(i.find('a').get('href'))\n except:\n pass\n return href_list[st_list_count-1]\n\n#제목 파싱\ndef titlemethod(st_list_count,html):\n title_list=[]\n search_news = html.find_all(\"strong\", class_=\"headline mg\")\n for search_news_st in search_news:\n for i in search_news_st.find_all(\"a\"):\n title_list.append(i.text.strip())\n return title_list[st_list_count-1] \n\n#매체정보 파싱\ndef bread_crumbsmethod(st_list_count,html):\n bread_list=[]\n for bread in html.find_all(\"span\", class_='byline'):\n bread =bread .text.strip()\n bread=bread.replace('|\\n','')\n bread_list.append(re.sub('\\d{4}.\\d{2}.\\d{2} \\d{2}:\\d{2}','',bread))\n\n return bread_list[st_list_count-1]\n\n# 메인 함수 호출 pknumber= 데이터 수집 고유번호,urlquery= 데이터 수집 키워드 값,browser_path= 팬텀 js 설치 경로,order_input= 수집 시작값,order=수집 종료 값,delay = 페이지 로딩 시간)\ndef centermagazine(pknumber,urlquery,browser_path,order_input,order,delay):\n try:\n news_flag='Centermagazine'\n # browser_path=\"C:/Users/seo/Desktop/chromedrive/chromedriver.exe\"\n browser=webdriver.PhantomJS(browser_path)\n# order_input=1\n# order=3\n\n Keyword=urllib.parse.quote(urlquery)\n urllist=[]\n title_list=[]\n media_list=[]\n media_data_list=[]\n context_list=[]\n href_list=[]\n\n\n for order_input in range(order_input,order):\n# url=\"https://search.joins.com/TotalNews?page=\"+str(order_input)+\"&Keyword=\"+str(keyword)+\"&SortType=New&SearchCategoryType=TotalNews\"\n # database Environment Setting\n url=\"https://search.joins.com/TotalNews?page=\"+str(order_input)+\"&Keyword=\"+str(Keyword)+\"&SortType=New&SearchCategoryType=TotalNews\"\n browser.get(url)\n browser.implicitly_wait(delay)\n\n source = browser.page_source\n html = BeautifulSoup(source, \"html.parser\")\n\n st_list_count=1\n dt_list_count=10\n\n\n print(url)\n for st_list_count in range(st_list_count,dt_list_count):\n\n title=titlemethod(st_list_count,html)\n title_list.append(title)\n dv.DatacolctVo.set_title_list(dv,title)\n\n media=bread_crumbsmethod(st_list_count,html)\n media_list.append(media)\n dv.DatacolctVo.set_media_data_list(dv,media)\n\n href=hrefmethod(st_list_count,html)\n if href == None:\n print(\"데이터 중복\")\n raise\n href_list.append(href)\n dv.DatacolctVo.set_href_list(dv,href)\n\n media_data=datemethod(st_list_count,html,browser)\n media_data_list.append(media_data)\n\n context=detail_page(href,browser_path)\n context_list.append(context)\n dv.DatacolctVo.set_context_list(dv,context)\n\n dv.DatacolctVo.set_st_list_count(dv,st_list_count)\n dv.DatacolctVo.set_url_data(dv,url)\n dv.DatacolctVo.set_order_input(dv,order_input)\n time.sleep(delay)\n return title_list,context_list,href_list,media_data_list,st_list_count,url,order_input,pknumber\n except Exception as e :\n print(\"Last page\")\n print(e)\n return title_list,context_list,href_list,media_data_list,st_list_count,url,order_input,pknumber\n browser.close()\n\n\n","sub_path":"newsCrawler/joongangnews.py","file_name":"joongangnews.py","file_ext":"py","file_size_in_byte":7669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"484261722","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import preprocessing\nimport argparse\nimport numpy as np\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-train\", \"--trainingdataset\", required=True)\nap.add_argument(\"-test\", \"--testingdataset\")\nap.add_argument(\"-attri\", \"--attributes\", type=str, required=True)\nap.add_argument(\"-cat\", \"--category\", type=str)\nap.add_argument(\"-c\", \"--class\", type=int, required=True)\nargs = vars(ap.parse_args())\nle = preprocessing.LabelEncoder()\n\nbalance_data = pd.read_csv(args['trainingdataset'], sep=',')\n\nX = balance_data.values[:, int(args['attributes'].split(':')[0]):int(args['attributes'].split(':')[1])]\nY = balance_data.values[:, args['class']]\n# Y = Y.astype('int')\n\nif args['category']:\n for i in range(int(args['category'].split(':')[0]), int(args['category'].split(':')[1])):\n col = int(args['category'].split(':')[0])\n le.fit(list(set(X[:, i - col])))\n X[:, i - col] = le.transform(X[:, i - col])\n\nif args['testingdataset']:\n testing_data = pd.read_csv(args['testingdataset'], sep=',')\n X_test = testing_data.values[:, int(args['attributes'].split(':')[0]):int(args['attributes'].split(':')[1])]\n y_test = testing_data.values[:, args['class']]\n X_train = X\n y_train = Y\nelse:\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.3, random_state=100)\nclf = KNeighborsClassifier(n_neighbors=3)\nclf.fit(X_train, y_train)\nX_pre = np.array([1] * 18).reshape(1, 18)\nprint(clf.predict(X_pre))\nprint(clf.kneighbors(X_pre, n_neighbors=3))\nprint(y_train[835], y_train[383], y_train[829])\n","sub_path":"Tradeoff between Performance and Explanation of Classification Models/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"487677864","text":"import os\nimport platform\nimport textwrap\n\nfrom jinja2 import Template\n\nfrom conans.errors import ConanException\nfrom conans.util.files import normalize\n\nsh_activate = textwrap.dedent(\"\"\"\\\n #!/usr/bin/env sh\n\n {%- for it in modified_vars %}\n export CONAN_OLD_{{it}}=\"${{it}}\"\n {%- endfor %}\n\n while read -r line; do\n LINE=\"$(eval echo $line)\";\n export \"$LINE\";\n done < \"{{ environment_file }}\"\n\n export CONAN_OLD_PS1=\"$PS1\"\n export PS1=\"({{venv_name}}) $PS1\"\n\"\"\")\n\nsh_deactivate = textwrap.dedent(\"\"\"\\\n #!/usr/bin/env sh\n export PS1=\"$CONAN_OLD_PS1\"\n unset CONAN_OLD_PS1\n\n {% for it in modified_vars %}\n export {{it}}=\"$CONAN_OLD_{{it}}\"\n unset CONAN_OLD_{{it}}\n {%- endfor %}\n {%- for it in new_vars %}\n unset {{it}}\n {%- endfor %}\n\"\"\")\n\nbat_activate = textwrap.dedent(\"\"\"\\\n @echo off\n\n {%- for it in modified_vars %}\n SET \"CONAN_OLD_{{it}}=%{{it}}%\"\n {%- endfor %}\n\n FOR /F \"usebackq tokens=1,* delims==\" %%i IN (\"{{ environment_file }}\") DO (\n CALL SET \"%%i=%%j\"\n )\n\n SET \"CONAN_OLD_PROMPT=%PROMPT%\"\n SET \"PROMPT=({{venv_name}}) %PROMPT%\"\n\"\"\")\n\nbat_deactivate = textwrap.dedent(\"\"\"\\\n @echo off\n\n SET \"PROMPT=%CONAN_OLD_PROMPT%\"\n SET \"CONAN_OLD_PROMPT=\"\n\n {% for it in modified_vars %}\n SET \"{{it}}=%CONAN_OLD_{{it}}%\"\n SET \"CONAN_OLD_{{it}}=\"\n {%- endfor %}\n {%- for it in new_vars %}\n SET \"{{it}}=\"\n {%- endfor %}\n\"\"\")\n\nps1_activate = textwrap.dedent(\"\"\"\\\n {%- for it in modified_vars %}\n $env:CONAN_OLD_{{it}}=$env:{{it}}\n {%- endfor %}\n\n foreach ($line in Get-Content \"{{ environment_file }}\") {\n $var,$value = $line -split '=',2\n $value_expanded = $ExecutionContext.InvokeCommand.ExpandString($value)\n Set-Item env:\\\\$var -Value \"$value_expanded\"\n }\n\n function global:_old_conan_prompt {\"\"}\n $function:_old_conan_prompt = $function:prompt\n function global:prompt {\n write-host \"({{venv_name}}) \" -nonewline; & $function:_old_conan_prompt\n }\n\"\"\")\n\nps1_deactivate = textwrap.dedent(\"\"\"\\\n $function:prompt = $function:_old_conan_prompt\n remove-item function:_old_conan_prompt\n\n {% for it in modified_vars %}\n $env:{{it}}=$env:CONAN_OLD_{{it}}\n Remove-Item env:CONAN_OLD_{{it}}\n {%- endfor %}\n {%- for it in new_vars %}\n Remove-Item env:{{it}}\n {%- endfor %}\n\"\"\")\n\n\nBAT_FLAVOR = \"bat\"\nPS1_FLAVOR = \"ps1\"\nSH_FLAVOR = \"sh\"\n\n\ndef _variable_placeholder(flavor, name, append_with_spaces):\n \"\"\"\n :param flavor: flavor of the execution environment\n :param name: variable name\n :return: placeholder for the variable name formatted for a certain execution environment.\n (e.g., cmd, ps1, sh).\n \"\"\"\n if flavor == BAT_FLAVOR:\n return \"%{}%\".format(name)\n if flavor == PS1_FLAVOR:\n return \"$env:%s\" % name\n # flavor == sh\n return \"${%s+ $%s}\" % (name, name) if append_with_spaces else \"${%s+:$%s}\" % (name, name)\n\n\ndef _format_values(flavor, variables, append_with_spaces):\n \"\"\"\n Formats the values for the different supported script language flavors.\n :param flavor: flavor of the execution environment\n :param variables: variables to be formatted\n :return:\n \"\"\"\n\n if flavor in [BAT_FLAVOR, PS1_FLAVOR]:\n path_sep, quote_elements = \";\", False\n else:\n path_sep, quote_elements = \":\", True\n\n for name, value in variables:\n # activate values\n if isinstance(value, list):\n append_space = name in append_with_spaces\n placeholder = _variable_placeholder(flavor, name, append_space)\n if append_space:\n # Variables joined with spaces look like: CPPFLAGS=\"one two three\"\n value = \" \".join(value+[placeholder])\n value = \"\\\"%s\\\"\" % value if quote_elements else value\n else:\n # Quoted variables joined with pathset may look like:\n # PATH=\"one path\":\"two paths\"\n # Unquoted variables joined with pathset may look like: PATH=one path;two paths\n value = [\"\\\"%s\\\"\" % v for v in value] if quote_elements else value\n if flavor == SH_FLAVOR:\n value = path_sep.join(value) + placeholder\n else:\n value = path_sep.join(value + [placeholder])\n else:\n # single value\n value = \"\\\"%s\\\"\" % value if quote_elements else value\n if platform.system() != \"Windows\":\n value = value.replace(\"\\\\\", \"\\\\\\\\\")\n\n # deactivate values\n existing = name in os.environ\n yield name, value, existing\n\n\ndef _files(env_vars, vars_with_spaces, flavor, activate_tpl, deactivate_tpl, venv_name,\n env_filepath):\n ret = list(_format_values(flavor, env_vars.items(), vars_with_spaces))\n modified_vars = [name for name, _, existing in ret if existing]\n new_vars = [name for name, _, existing in ret if not existing]\n\n activate_content = activate_tpl.render(environment_file=env_filepath,\n modified_vars=modified_vars, new_vars=new_vars,\n venv_name=venv_name)\n deactivate_content = deactivate_tpl.render(modified_vars=modified_vars, new_vars=new_vars)\n\n environment_lines = [\"{}={}\".format(name, value) for name, value, _ in ret]\n # This blank line is important, otherwise the script doens't process last line\n environment_lines.append('')\n\n if flavor == SH_FLAVOR:\n # replace CRLF->LF guarantee it is always LF, irrespective of current .py file\n activate_content = activate_content.replace(\"\\r\\n\", \"\\n\")\n deactivate_content = deactivate_content.replace(\"\\r\\n\", \"\\n\")\n environment = \"\\n\".join(environment_lines)\n else:\n activate_content = normalize(activate_content)\n deactivate_content = normalize(deactivate_content)\n environment = os.linesep.join(environment_lines)\n\n return activate_content, deactivate_content, environment\n\n\ndef env_files(env_vars, vars_with_spaces, flavor, folder, name, venv_name):\n env_filename = \"environment{}.{}.env\".format(name, flavor)\n activate_filename = \"activate{}.{}\".format(name, flavor)\n deactivate_filename = \"deactivate{}.{}\".format(name, flavor)\n\n templates = {SH_FLAVOR: (sh_activate, sh_deactivate),\n BAT_FLAVOR: (bat_activate, bat_deactivate),\n PS1_FLAVOR: (ps1_activate, ps1_deactivate)}\n try:\n activate, deactivate = templates[flavor]\n except KeyError:\n raise ConanException(\"Unrecognized flavor: %s\" % flavor)\n activate_tpl, deactivate_tpl = Template(activate), Template(deactivate)\n\n env_filepath = os.path.abspath(os.path.join(folder, env_filename))\n activate, deactivate, envfile = _files(env_vars, vars_with_spaces, flavor, activate_tpl,\n deactivate_tpl, venv_name, env_filepath)\n\n result = {activate_filename: activate,\n deactivate_filename: deactivate,\n env_filename: envfile}\n return result\n","sub_path":"conans/client/envvars/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":7097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"137558378","text":"# Work with Python 3.6\nimport discord\nimport point_counter as pc\n\nTOKEN = \"This isn't supposed to be shared.\"\n\nclient = discord.Client()\n\nprint('Starting point counter...')\npoint_counter = pc.Point_Counter(\"data.dat\")\nprint('Done.')\n\n@client.event\nasync def on_message(message):\n if message.content == 'f':\n msg = '{0.author.mention} has paid their respects.'.format(message)\n await client.send_message(message.channel, msg)\n if message.content.startswith('!p'):\n print(\"Point Counter message recieved.\")\n msg = point_counter.handle(message.content)\n await client.send_message(message.channel, msg)\n\n@client.event\nasync def on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n\nclient.run(TOKEN)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"103993695","text":"from abc import ABC, abstractmethod\nfrom typing import List\nimport math\n\n\nclass Model(ABC):\n @staticmethod\n @abstractmethod\n def get_domain(name) -> list:\n ...\n\n @property\n @abstractmethod\n def is_positive(self) -> bool:\n ...\n\n @abstractmethod\n def get_value(self, key):\n ...\n\n\ndef make_branches(attr, domain, instances):\n # branches shape:\n # dict(\n # attribute_name:\n # List[\n # positive_number,\n # negative_number,\n # List[positive_instances],\n # List[negative_instances]\n # ]\n # )\n branches = dict([(d, [0, 0, [], []]) for d in domain])\n for ins in instances:\n key = ins.get_value(attr)\n if ins.is_positive:\n branches.get(key)[0] += 1\n branches.get(key)[2].append(ins)\n else:\n branches.get(key)[1] += 1\n branches.get(key)[3].append(ins)\n return branches\n\n\ndef entropy(instances):\n \"\"\"\n count positive, negative instances and calc entropy\n :param instances: List[Model]\n :return: entropy - float\n \"\"\"\n pos = 0\n neg = 0\n for ins in instances:\n if ins.is_positive:\n pos += 1\n else:\n neg += 1\n return entropy_zero(pos, neg, len(instances))\n\n\ndef entropy_zero(pos, neg, _all):\n \"\"\"\n gets pos, neg, all value and calc below formula\n Ppos * log2(Ppos) - Pneg * log2(Pneg)\n :param pos: positive number - int\n :param neg: negative number - int\n :param _all: all instances number - int [often equal to pos + neg]\n :return: entropy value\n \"\"\"\n if _all == 0:\n return 0\n pp = pos / _all\n pn = neg / _all\n return -pp * (math.log2(pp) if pp != 0 else 0) - pn * (math.log2(pn) if pn != 0 else 0)\n\n\ndef information_gain(ant, instances, attr, domain: list) -> float:\n \"\"\"\n gets entropy, instances, attribute, domain and return information gain\\n\n entropy - sigma(1, len(domain)) ((posi+negi)/len(instances)) * entropy_zero(posi, negi, posi+negi)\\n\n :param ant: entropy float\n :param instances: List[Model]\n :param attr: attribute\n :param domain: list attribute domain\n :return:\n \"\"\"\n if ant is None:\n ant = entropy(instances)\n instances: List[Model] = instances\n branches = make_branches(attr, domain, instances)\n for pos, neg, pl, nl in branches.values():\n ant -= (pos + neg) / len(instances) * entropy_zero(pos, neg, pos + neg)\n return ant\n","sub_path":"supervised/Decision_Tree/decision.py","file_name":"decision.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"630002307","text":"import time\nimport sys\nfrom psychopy import visual,event,core\n \nwin = visual.Window([400,400],color=\"black\", units=\"pix\")\nsquare = visual.Rect(win,lineColor=\"black\",fillColor=\"blue\",size=[100,100], pos=[0,0])\n\nwhile True:\n\tif event.getKeys('left'):\n\t\tsquare.size += (10, 0) \n\tif event.getKeys('right'):\n\t\tsquare.size -= (10, 0)\n\n\tsquare.draw()\n\twin.flip()\n\n\tif event.getKeys('q'):\n\t\tbreak\n\nsys.exit()\n","sub_path":"exercise_1_10.py","file_name":"exercise_1_10.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"250045492","text":"from multiprocessing import Process\nfrom xml.dom import minidom\nimport wget\nimport os\nfrom dbSchema import drop_cpe_collection, db_insert\nfrom dbSchema import db_insert_bulk, get_cpes_count\nimport zipfile\nimport glob\n\ncpe_latest_zip = 'http://static.nvd.nist.gov/feeds/xml/cpe/dictionary/official-cpe-dictionary_v2.3.xml.zip'\n\ndef extract_cpe_zip(cpe_latest_xml_zip):\n with zipfile.ZipFile(cpe_latest_xml_zip, \"r\") as z:\n z.extractall(\".\")\n\n# Remove downloaded filesand temporary files\ndef cleanup():\n for current in glob.glob(os.path.join('.', '*.xml')):\n os.remove(current)\n for current in glob.glob(os.path.join('.', '*.zip')):\n os.remove(current)\n for current in glob.glob(os.path.join('.', '*.tmp')):\n os.remove(current)\n\ndef update_cpe_db():\n drop_cpe_collection()\n get_cpe_data(download_cpe_xml_zip(cpe_latest_zip))\n cleanup()\n\n# Download CPE Update definitions\ndef download_cpe_xml_zip(cpe_latest_url):\n # Cleanup before downloads\n cleanup()\n cpe_latest_xml_zip = wget.download(cpe_latest_url, bar=wget.bar_adaptive)\n extract_cpe_zip(cpe_latest_xml_zip)\n cpe_latest_xml = \"official-cpe-dictionary_v2.3.xml\"\n return cpe_latest_xml\n\ndef make_post(cpe_uri_string):\n row = {}\n cpe_string = cpe_uri_string\n cpe_string_components = cpe_string.split(\":\")\n row['cpe_full_uri'] = cpe_string\n row['cpe_uri_begin'] = cpe_string_components[0]\n row['cpe_schema'] = cpe_string_components[1]\n row['cpe_part'] = cpe_string_components[2]\n row['cpe_vendor'] = cpe_string_components[3]\n row['cpe_product'] = cpe_string_components[4]\n row['cpe_version'] = cpe_string_components[5]\n row['cpe_update'] = cpe_string_components[6]\n row['cpe_edition'] = cpe_string_components[7]\n row['cpe_language'] = cpe_string_components[8]\n row['cpe_sw_edition'] = cpe_string_components[9]\n row['cpe_target_sw'] = cpe_string_components[10]\n row['cpe_target_hw'] = cpe_string_components[11]\n row['cpe_other'] = cpe_string_components[12]\n return row\n\n# Read XML and add records into database\ndef get_cpe_data(cpe_latest_xml):\n print(\"[+] Extracting\")\n xmldoc = minidom.parse(cpe_latest_xml)\n cpe_2_3_items = xmldoc.getElementsByTagName('cpe-23:cpe23-item')\n print(\"[+] Extraction done!\")\n cpe_items_to_db = list()\n for cpe_2_3_item in cpe_2_3_items:\n # Temporay holder before bulk inserts\n cpe_items_to_db.append(make_post(cpe_2_3_item.attributes['name'].value))\n # Bulk Insert Call\n records = db_insert_bulk(cpe_items_to_db)\n # Uncomment the below to print mongo document ids\n # print(records)\n\nif __name__ == \"__main__\":\n update_cpe_db()\n","sub_path":"netscan/nvdcpes.py","file_name":"nvdcpes.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"17029773","text":"import os\nimport sys\nsys.path.append('.')\nimport cv2\nimport torch\nimport numpy as np\nfrom models.adacofnet import AdaCoFNet\nfrom tqdm import tqdm\nfrom skimage.metrics import peak_signal_noise_ratio as psnr\nfrom skimage.metrics import structural_similarity as ssim\n# import time\n\ndef main():\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = AdaCoFNet(kernel_size=5, dilation=1)\n checkpoint = torch.load('checkpoint/kernelsize_5/ckpt.pth', map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['state_dict'])\n # state = torch.load('../checkpoint/kernelsize_5/ckpt.pth')\n # model.load_state_dict(state,strict=True)\n model.eval()\n model.to(device)\n\n # path = '../../data/UCF101/ucf101_interp_ours/'\n path = '/data/codec/zhangdy/video_interpolation/source_data/ucf101_interp_ours/'\n dirs = os.listdir(path)\n\n psnr_list = []\n ssim_list = []\n time_list = []\n # print('=========>Start Calculate PSNR and SSIM')\n for d in tqdm(dirs):\n img0 = (path + d + '/frame_00.png')\n img1 = (path + d + '/frame_02.png')\n gt = (path + d + '/frame_01_gt.png')\n img0 = (torch.tensor(cv2.imread(img0).transpose(2, 0, 1) / 255.)).to(device).float().unsqueeze(0)\n img1 = (torch.tensor(cv2.imread(img1).transpose(2, 0, 1) / 255.)).to(device).float().unsqueeze(0)\n gt = (torch.tensor(cv2.imread(gt).transpose(2, 0, 1) / 255.)).to(device).float().unsqueeze(0)\n\n # inference\n pred = model(img0, img1)[0]\n pred = torch.clamp(pred, 0, 1)\n # Calculate indicators\n out = pred.detach().cpu().numpy().transpose(1, 2, 0)\n out = np.round(out * 255) / 255.\n gt = gt[0].cpu().numpy().transpose(1, 2, 0)\n psnr = compute_psnr(gt, out)\n ssim = compute_ssim(gt, out)\n psnr_list.append(psnr)\n ssim_list.append(ssim)\n # print(\"Avg PSNR: {} SSIM: {}\".format(np.mean(psnr_list), np.mean(ssim_list)))\n # print('=========>Start Calculate Inference Time')\n\n # inference time\n for i in range(100):\n start = torch.cuda.Event(enable_timing=True)\n end = torch.cuda.Event(enable_timing=True)\n start.record()\n pred = model(img0, img1)[0]\n end.record()\n torch.cuda.synchronize()\n time_list.append(start.elapsed_time(end))\n time_list.remove(min(time_list))\n time_list.remove(max(time_list))\n print(\"Avg PSNR: {} SSIM: {} Time: {}\".format(np.mean(psnr_list), np.mean(ssim_list), np.mean(time_list) / 100))\n\n\ndef compute_psnr(im1, im2):\n p = psnr(im1, im2)\n return p\n\ndef compute_ssim(im1, im2):\n isRGB = len(im1.shape) == 3 and im1.shape[-1] == 3\n s = ssim(im1, im2, K1=0.01, K2=0.03, gaussian_weights=True, sigma=1.5, use_sample_covariance=False,\n multichannel=isRGB)\n return s\n\nif __name__ =='__main__':\n main()","sub_path":"UCF101/AdaCoF_UCF101.py","file_name":"AdaCoF_UCF101.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"63805766","text":"import socket\r\nimport sys\r\nimport time\r\nimport os\r\nfrom os import path\r\n\r\nfrom threading import Thread\r\nfrom socketserver import ThreadingMixIn\r\nfrom divideFile import divideFile\r\nnum_peer=0\r\npeer_list=[('127.0.0.1',4322)]\r\nmyIp='127.0.0.1'\r\nmyPort=4321\r\n\r\ndef tracker_connect(TCP_IP,PORT):\r\n\ts_tracker = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\ts_tracker.connect((TCP_IP, PORT))\r\n\twhile(True):\r\n\t\tpeer_list= s_tracker.recv(1024)\r\n\t\tnum_peer=len(peer_list)\r\n\t\ttime.sleep(20)\r\n\r\ndef listenPeers():\r\n\ttcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\ttcpsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\ttcpsock.bind((myIp, myPort))\r\n\tif not os.path.exists('Backup_Store'):\r\n\t\tos.mkdir('Backup_Store')\r\n\twhile(True):\r\n\t\ttcpsock.listen(5)\r\n\t\t(conn, (ip,port)) = tcpsock.accept()\r\n\t\tprint(conn)\r\n\t\tdata1=conn.recv(30)\r\n\t\tdata1=str(data1,'utf-8')\r\n\t\tif(data1=='Backup'):\r\n\r\n\t\t\tif not os.path.exists('Backup_Store/'+ip):\r\n\t\t\t\tos.mkdir('Backup_Store/'+ip)\r\n\t\t\tdata=conn.recv(1024)\r\n\t\t\tdata=str(data,'utf-8')\r\n\t\t\tprint(data)\r\n\t\t\t#print(\"look at me!!!\")\r\n\t\t\tfileName=data.split(\"#\",1)[0]\r\n\t\t\tchunkData=data.split(\"#\",1)[1]\r\n\t\t\tf = open('Backup_Store/'+ip+'/'+fileName, \"w\")\r\n\t\t\tf.write(chunkData)\r\n\r\n\t\telif(data1=='Retrieve'):\r\n\t\t\tfileName=conn.recv(1024)\r\n\t\t\tf=open('Backup_Store/'+ip+'/'+fileName, \"r\")\r\n\t\t\tdata=f.read()\r\n\t\t\tconn.send(data)\r\n\r\n\r\ndef connect_peer(ip,port):\r\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\tprint(ip,port)\r\n\ts.connect((ip, port))\r\n\tprint(s)\r\n\treturn s,ip, port\r\n\t\t \r\n\r\ndef backup(socks,chunks, fileName):\r\n\tcount=0\r\n\tfor chunk in chunks:\r\n\t\tfor sock in socks:\r\n\t\t\tchunk_id=count\r\n\t\t\tcount+=1\r\n\t\t\tchunk=fileName+\"#\"+chunk\r\n\t\t\tmydata.append(tuple((fileName,chunk_id,sock[1],sock[2])))\r\n\t\t\tnewThread=sendFile(fileName, chunk, sock)\r\n\t\t\tnewThread.start()\r\n\t\t\tthreads.append(newThread)\r\n\t\t\t\r\nclass sendFile(Thread):\r\n \r\n def __init__(self,fileName, chunk,sock):\r\n Thread.__init__(self)\r\n self.ip = sock[1]\r\n self.port = sock[2]\r\n self.sock = sock[0]\r\n self.fileName=fileName\r\n self.chunk=chunk\r\n print (\" New thread started for Peer (\"+ip+\":\"+str(port)+\")\")\r\n \r\n def run(self):\r\n \tbackup=bytes('Backup','utf-8')\r\n \tself.sock.send(backup)\r\n \tdata=bytes(self.chunk,'utf-8')\r\n \tprint(data)\r\n \tself.sock.send(data)\r\n \tprint(self.sock)\r\n \t#self.sock.close()\r\n\r\ndef retrieveFile(fileName):\r\n\tfileMetadata=[]\r\n\tfor data in mydata:\r\n\t\tif (data[0]==fileName):\r\n\t\t\tfileMetadata.append(data[0],data[2], data[3], data[1]) # fileName,ip, port, chunk_id\r\n\r\nclass getChunk(Thread):\r\n \r\n def __init__(self, ip, port, chunkId,fileName):\r\n Thread.__init__(self)\r\n self.ip = ip\r\n self.port = port\r\n self.chunkId=chunkId\r\n self.fileName=fileName\r\n print (\" New thread started for Peer (\"+ip+\":\"+str(port)+\")\")\r\n\r\n \r\n def run(self):\r\n \ts_getChunk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n \ts_getChunk.connect((self.ip, self.port))\r\n \ts_getChunk.send('Retrieve')\r\n \ts_getChunk.send(self.fileName)\r\n \tdata_retrieved.append(s_getChunk.recv(1024))\r\n\r\ndef getFile(fileMetadata):\r\n\tfor data in fileMetadata:\r\n\t\tip=fileMetadata[1]\r\n\t\tport=fileMetadata[2]\r\n\t\tchunkId=fileMetadata[3]\r\n\t\tfileName=fileMetadata[0]\r\n\t\tnewThread=getChunk(ip, port, chunkId,fileName)\r\n\t\tnewThread.start()\r\n\t\trThreads.append(newThread)\r\n\r\n\r\nthreads=[]\r\nrThreads=[]\r\ndata_retrieved=[]\r\n#tracker ip and port\r\nTCP_IP='127.0.0.1'\r\nTCP_PORT=3454\r\n#th = Thread(target=tracker_connect,args=(TCP_IP,TCP_PORT))\r\n#th.start()\r\nmydata=[]\r\nth_listen=Thread(target=listenPeers)\r\nth_listen.start()\r\nwhile(True):\r\n\tprint('Menu:\\n')\r\n\tprint('1. Backup\\n')\r\n\tprint('2. Retrieve\\n')\r\n\tchoice=input(\"List your choice\")\r\n\tif(choice=='1'):\r\n\t\t#print(\"hihihihiih\")\r\n\t\tnum_peer_instant=len(peer_list)\r\n\t\tsockets_list=[]\r\n\t\tfile_name=input(\"Enter the filename:\")\r\n\t\tfor i in range(0, num_peer_instant):\r\n\t\t\tip=peer_list[i][0]\t\t#getting ip and port of peer\r\n\t\t\tport=int(peer_list[i][1])\r\n\t\t\tsockets_list.append(connect_peer(ip,port)) \r\n\t\tconnections=len(sockets_list)\r\n\t\tchunk_list=divideFile(file_name,connections)\r\n\t\tbackup(sockets_list,chunk_list,file_name)\r\n\r\n\telif(choice=='2'):\r\n\t\tfileName=input(\"Enter the file name:\")\r\n\t\tflag=0\r\n\t\tfor data in mydata:\r\n\t\t\tif(fileName==data[0]):\r\n\t\t\t\tflag=1\r\n\t\tif(flag==0):\r\n\t\t\tprint(\"Error!!! File not backuped up\")\r\n\t\t\tcontinue\r\n\t\tfileMetadata=retrieveFile(fileName)\r\n\t\tfileRetrieved=getFile(fileMetadata)\r\n\t\tchunkIds=[]\r\n\t\tfor data in fileMetadata:\r\n\t\t\tchunkIds.append(fileMetadata[3])\r\n\t\tzipped_pairs = zip(chunkIds, data_retrieved) \r\n\t\tz = [x for _, x in sorted(zipped_pairs)]\r\n\t\tprint(z)\r\n\t\t\r\n\t\t\t\r\n","sub_path":"P2P file transfer python/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"346742032","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom .forms import GeolocationForm, DeviceValidityForm, \\\n FrequencyRangeForm, DeviceDescriptorForm, DeviceOwnerForm, SpectrumForm\n\nfrom .models import DeviceDescriptor, Geolocation, SpectrumSpec, DeviceValidity, \\\n Spectrum, Frequency, RulsetInfo\nfrom django.views.decorators.csrf import csrf_exempt\n\n# Create your views here.\ndef index(request):\n respuesta = {}\n return render(request, \"paws/index.html\", respuesta)\n\ndef documentacion_registro(request):\n return render(request, \"paws/recomendacion_registro.html\", {})\n\ndef dispositivos_validados(request):\n \"\"\"Consulta la lista de dispositivos validados y no validados \"\"\"\n devicevalidity = DeviceValidity.objects.all()\n devices = devicevalidity.values(\"deviceDesc\", \"isValid\", \"reason\")\n for data in devices:\n id_deviceDesc = data[\"deviceDesc\"]\n devicesDesc = DeviceDescriptor.objects.get(pk=id_deviceDesc)\n data.update({\"geolocation\": devicesDesc.geolocation , \"serial\":devicesDesc.serial_Number})\n # print(devicesDesc.geolocation)\n # print(devices)\n return render(request, \"paws/dispositivos_validados.html\", {\"devices\": devices})\n\ndef canales_regiones(request):\n \"\"\" Como informacion util del lado del cliente, para el maestro\n se necesita hacer otro proceso\n \"\"\"\n spectrum_form = SpectrumForm() # este formulario es para mostrar la lista de regiones a partir de codigo DANE\n if request.POST:\n spectrum = Spectrum.objects.filter(geolocation=request.POST[\"geolocation\"]) # filtra el espectro de acuerdo al codigo DANE\n spectrum = spectrum.values(\"operation\", \"channels\")\n geolocation = Geolocation.objects.get(pk=request.POST[\"geolocation\"])\n # print(geolocation.region)\n datos = {\"canales_ocupados\": spectrum, \"spectrum_form\": spectrum_form,\n \"ciudad\": geolocation.city, \"departamento\": geolocation.region}\n else:\n datos = {\"spectrum_form\": spectrum_form}\n return render(request, \"paws/canales_regiones.html\",datos)\n\ndef register(request):\n \"\"\" esta funcion se encarga de realizar el \n REGISTRATION_REQ mediante un formulario web\n \"\"\"\n freq_range = FrequencyRangeForm()\n device_descriptor = DeviceDescriptorForm()\n device_owner = DeviceOwnerForm()\n device_validity = DeviceValidityForm()\n print(request.POST)\n if request.POST:\n freq_range = FrequencyRangeForm(request.POST)\n device_descriptor = DeviceDescriptorForm(request.POST)\n device_owner = DeviceOwnerForm(request.POST)\n device_validity = DeviceValidityForm(request.POST)\n\n if device_validity.is_valid() or freq_range.is_valid() and device_descriptor.is_valid() and device_owner.is_valid():\n\n device_descriptor = device_descriptor.save(commit=False)\n device_descriptor.device_capabilities = freq_range.save()\n device_descriptor.save()\n\n device_owner = device_owner.save(commit=False)\n device_owner.device_descriptor = device_descriptor\n device_owner.save()\n\n device_validity = device_validity.save(commit=False)\n device_validity.deviceDesc = device_descriptor\n device_validity.save()\n\n return render(request, \"paws/registro_exitoso.html\", {\"registro\": \"Registro exitoso de dispositivo\", \"info_reg\":request.POST})\n\n respuesta = {\"freq_range\": freq_range,\n \"device_descriptor\": device_descriptor,\n \"device_owner\": device_owner,\n \"device_validity\":device_validity,}\n\n return render(request, \"paws/register.html\", respuesta)\n\n#Operaciones del protocolo entre el maestro y el esclavo\n@csrf_exempt\ndef init_req(request):\n \"\"\" Es la funcion que da inicio al protocolo PAWS \"\"\"\n master_data = request.POST \n device = DeviceDescriptor.objects.filter(serial_Number=master_data[\"serial_Number\"]).filter(model_Id=master_data[\"model_Id\"])\n if len(device) >= 1:\n spectrumspec = SpectrumSpec.objects.filter(geolocation=master_data[\"dane_code\"])\n ruleset_id = spectrumspec.values(\"rulset_Info\")\n ruleset_id = ruleset_id[0]\n ruleset_id = ruleset_id[\"rulset_Info\"]\n print(ruleset_id)\n ruleset_info = RulsetInfo.objects.filter(pk=ruleset_id)\n ruleset_info = list(ruleset_info.values(\"authority\", \"rulsetId\"))\n INIT_RESP = {\"ruleset_info\":ruleset_info}\n return JsonResponse(INIT_RESP)\n\n\n@csrf_exempt\ndef avail_spectrum(request):\n \"\"\"Esta funcion se realiza con el fin de retornar AVAIL_SPECTRUM_RESP\n para dar respuesta a las peticiones del maestro sd\n \"\"\"\n master_data = request.POST # donde se encuentran las peticiones del maestro\n #bases de datos que se consultan de acuerdo a las peticiones del maestro\n device = DeviceDescriptor.objects.filter(serial_Number=master_data[\"serial_Number\"]).filter(ruleset_Ids=master_data[\"ruleset_Ids\"]).filter(model_Id=master_data[\"model_Id\"])\n # print(device)\n \n if len(device)==1:\n device = device[0]\n # print(device.geolocation.pk)\n #consulta de los canales ocupados por regiones\n spectrum = Spectrum.objects.filter(geolocation=device.geolocation.pk)\n ocuppied_channels = [] #lista de los canales ocupados\n for data in spectrum:\n start_freq = Frequency.objects.get(pk=data.channels.pk)\n ocuppied_channels.append(start_freq.frequency)\n #consulta de todos los canales para seleccionar los canales libres\n all_channels = Frequency.objects.all()\n all_channels = all_channels.values(\"frequency\")\n all_channels = list(map(lambda table: table[\"frequency\"], all_channels))\n #comparacion de canales para extraer los canales libres\n free_channels = []\n for i in all_channels:\n if i in ocuppied_channels:\n pass\n else:\n free_channels.append(i)\n else:\n print(\"informacion repetida o inexistente\")\n\n #base de datos del espectro consultada y filtrada de acuerdo a la informacion\n #georeferenciada del maestro\n\n #formacion de la respuesta AVAIL_SPECTRUM_RESP\n avail_spectrum_resp = {\"serial_Number\": device.serial_Number,\n \"manufacturer_Id\":device.manufacturer_Id,\n \"model_Id\": device.model_Id, \"ruleset_Ids\": device.ruleset_Ids,\n \"free_spectra\":free_channels,\n }\n return JsonResponse(avail_spectrum_resp)\n\n@csrf_exempt\ndef spectrum_use_resp(request):\n msg_master = request.POST\n # validacion de la existencia del dispositivo\n device = DeviceDescriptor.objects.filter(serial_Number=msg_master[\"serial_Number\"]).filter(ruleset_Ids=msg_master[\"ruleset_Ids\"]).filter(model_Id=msg_master[\"model_Id\"]).count()\n if device == 1 :\n # informacion relacionada con el espectro\n spectra_use = float(msg_master[\"spectra_use\"])\n frequency = Frequency.objects.filter(frequency=spectra_use)\n frequency = frequency.values(\"channels\", \"frequency\")\n if len(frequency)==1:\n frequency = frequency[0]\n channels = frequency[\"channels\"]\n last_pk = Spectrum.objects.all().latest('pk')\n spectrum = Spectrum.objects.create(pk=last_pk.id+1, operation=msg_master[\"operation\"], channels_id= channels, geolocation_id=msg_master[\"dane_code\"])\n msg_to_master = {\"info\": \"information ok\"}\n else:\n msg_to_master = {\"info\": \"not valid information\"}\n else:\n msg_to_master = {\"info\": \"not valid information\"}\n return JsonResponse(msg_to_master)\n \n@csrf_exempt\ndef delete_channel_paws(request):\n msg_from_master = request.POST\n pk_channel = Frequency.objects.filter(frequency=msg_from_master[\"freq_used\"])\n pk_channel = pk_channel[0].pk\n spectrum = Spectrum.objects.filter(channels=pk_channel).filter(geolocation=msg_from_master[\"dane_code\"])\n if len(spectrum)==1:\n spectrum.delete()\n return JsonResponse({\"delete\": \"delete ok\"})\n\n","sub_path":"src/project_module3/paws/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"653400761","text":"#!/usr/bin/env python3\n\nfrom pathlib import Path\nfrom typing import List, Tuple\nimport subprocess\nfrom enum import Enum\n\nimport svgwrite\nfrom sympy.ntheory.primetest import isprime\n\nfrom code.utils import SvgGrid, SIZE, DIST_DIR\n\n\ndef save_prime_poster(path: Path,\n size:Tuple[int, int]=SIZE['SQUARE'],\n background_fill=None,\n numbers_per_line:int=100,\n with_border:bool=True):\n\n font_size = 2\n font_family = 'Space Mono'\n ordinary_prime_fill = 'black'\n twin_prime_fill = '#F10056'\n text_fill = 'white'\n width = size[0]\n height = size[1]\n\n if with_border:\n padding = width/20.0\n else:\n padding = 0\n\n grid = SvgGrid(size=size,\n n_rows=numbers_per_line,\n n_columns=numbers_per_line,\n padding=padding)\n\n number = 1\n for cell in grid:\n if isprime(number):\n if isprime(number+2) or isprime(number-2):\n circle_fill = twin_prime_fill\n else:\n circle_fill = ordinary_prime_fill\n grid.draw_circle(cell, fill_color=circle_fill)\n grid.draw_text(cell,\n text=str(number),\n color=text_fill,\n font_size=font_size,\n font_family=font_family)\n number += 1\n\n grid.save(path)\n\n\nif __name__ == '__main__':\n svg_path = DIST_DIR/'primes.svg'\n save_prime_poster(svg_path, numbers_per_line=100)\n subprocess.run(['convert', str(svg_path), str(svg_path.with_suffix('.png'))])\n","sub_path":"code/primes.py","file_name":"primes.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"441596411","text":"from sys import argv\n\nscript, user_name = argv\n\nprint(f\"Hi {user_name}, I'm the {script} script.\")\n\nprompt1 = 'name the friend you would like to be reminded to call: '\n\nprompt2 = 'name the next friend you would like to call: '\n\n\n\n\n\n\n\n\n\n#friends = ['sally on jan'+ str(1), 'marge on feb' + str(1), 'karen on mar' + str(1), 'audrey on apr' + str(1), 'delia on may' + str(1)]\n\nfriend1 = input(prompt1)\n\nnext_friend = input(prompt2)\n\nfriends = [friend1, next_friend, next_friend]\n\n\nnext_friend = input(prompt2) # at this point, why doesn't friend2 point to the new value input when input(prompt2) function is called for the second time and variable is reassigned?\n\ndef reminder(friends):\n\tprint(\"I'd like to help you remember to stay in touch with your friends by creating a reminder schedule of whom to call when.\")\n\tfor friend in friends:\n\t\tprint(f\"call {friend}\")\n\t\t\nreminder(friends)\n\n\n\n\n\n\n\t\n\n","sub_path":"Python_Hard_Way_W2/function_practice.py","file_name":"function_practice.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"414700764","text":"import requests\r\nfrom urllib.parse import quote\r\n\r\nfrom Spider_ksd.head import *\r\n\r\n# from .oss_utils import *\r\nfrom Spider_ksd.utils.oss_utils import OSSUtils\r\n\r\n\r\nclass SnapshotUtils(object):\r\n\r\n def __init__(self):\r\n self.browser = webdriver.PhantomJS()\r\n\r\n # 界面宽度\r\n self.browser.set_window_size(1000, 1024)\r\n\r\n # 设置超时\r\n self.browser.set_page_load_timeout(15)\r\n\r\n def get_png(self, request_url):\r\n \"\"\"\r\n :param: request_url\r\n :return: 图片md5名称,图片内容\r\n \"\"\"\r\n img_name = None\r\n img_content = None\r\n try:\r\n self.browser.get(request_url)\r\n img_content = self.browser.get_screenshot_as_png()\r\n\r\n # 算图片名称,MD5\r\n img_name = hashlib.md5(request_url.encode('utf-8')).hexdigest()\r\n # print(img_name)\r\n time.sleep(0.5)\r\n except Exception as e:\r\n logging.warning('============ timeout****{} ============'.format(str(e)))\r\n return img_name, img_content\r\n\r\n def close_browser(self):\r\n self.browser.close()\r\n\r\n\r\ndef get_splash_png(request_url, is_post=False, body=None):\r\n \"\"\"\r\n 使用splash服务进行截图,支持异步\r\n :param request_url\r\n :return: 图片md5名称,图片内容\r\n \"\"\"\r\n img_name = None\r\n img_content = None\r\n method = \"POST\" if is_post else \"GET\"\r\n lua = \"\"\"\r\n function main(splash, args)\r\n splash:set_viewport_full()\r\n splash:go(\"http://www.ahtba.org.cn/Notice/NoticeDetail?id=%s\")\r\n splash:wait(1)\r\n return splash:png()\r\n end\r\n \"\"\"\r\n try:\r\n url = 'http://localhost:8050/execute?lua_source=' + quote(lua % (str(body)))\r\n # print(lua%body)\r\n response = requests.get(url)\r\n img_content = response.content\r\n\r\n # 算图片名称,MD5\r\n img_sou = request_url + body\r\n img_name = hashlib.md5(img_sou.encode('utf-8')).hexdigest()\r\n # OSSUtils.upload('{}.png'.format(img_name), img_content)\r\n print(img_name)\r\n except Exception as e:\r\n logging.warning('============ timeout****{} ============'.format(e))\r\n return img_name, img_content\r\n\r\n # def close_browser(self):\r\n # self.browser.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n item = SnapshotUtils()\r\n # img_name, img_content = get_splash_png(request_url='http://www.ahtba.org.cn/Notice/NoticeContent',\r\n # is_post=True, body='691710')\r\n # img_name, img_content = item.get_png(\"http://www.baidu.com\")\r\n\r\n # url = 'http://localhost:8050/render.png?url=http://www.ahtba.org.cn/Notice/NoticeDetail?id=691680&width=1000&height=700'\r\n # response = requests.get(url)\r\n # print(img_content)\r\n # with open('./t.png', 'wb') as f:\r\n # f.write(img_content)\r\n\r\n while True:\r\n keyword = RedisPipeline.poper('bid_url')\r\n print(keyword)\r\n img_name, img_content = item.get_png(keyword)\r\n if img_name and img_content:\r\n OSSUtils.upload('{}.png'.format(img_name), img_content)\r\n\r\n print(img_name)\r\n\r\n break\r\n","sub_path":"Spider/Spider_ksd/utils/snapshot_utils.py","file_name":"snapshot_utils.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"439749952","text":"# EXERCISE PAGE 51\nstudents = [\n (\"John\", [\"CompSci\", \"Physics\"]),\n (\"Vusi\", [\"Maths\", \"CompSci\", \"Stats\"]),\n (\"Jess\", [\"CompSci\", \"Accounting\", \"Economics\", \"Management\"]),\n (\"Sara\", [\"InfSys\", \"Accounting\", \"Economics\", \"Comlaw\"]),\n (\"Zuki\", [\"Sociology\", \"Economica\", \"Law\", \"Stats\", \"Music\"])\n ]\n\n# Print All Students with a count of their courses.\n\nfor name, subject in students:\n print(name, \"takes\", len(subject), \"courses\")\n","sub_path":"list_students.py","file_name":"list_students.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"487770790","text":"import argparse\n\n\ndef get_args():\n argparser = argparse.ArgumentParser(description=__doc__)\n\n argparser.add_argument(\n '-c', '--config',\n dest='config',\n metavar='C',\n default='None',\n help='The Configuration file')\n \n argparser.add_argument(\n '-m','--model',\n dest='model',\n metavar=\"M\",\n default='None',\n help='The model to use. Select one of the following: Conv, dillution, lstm')\n args = argparser.parse_args()\n return args\n","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"216740464","text":"\"\"\"\nTest the 'memory find' command.\n\"\"\"\n\nfrom __future__ import print_function\n\n\nimport lldb\nfrom lldbsuite.test.lldbtest import *\nimport lldbsuite.test.lldbutil as lldbutil\nfrom lldbsuite.test.decorators import *\n\n\nclass MemoryFindTestCase(TestBase):\n\n mydir = TestBase.compute_mydir(__file__)\n\n def setUp(self):\n # Call super's setUp().\n TestBase.setUp(self)\n # Find the line number to break inside main().\n self.line = line_number('main.cpp', '// break here')\n\n def test_memory_find(self):\n \"\"\"Test the 'memory find' command.\"\"\"\n self.build()\n exe = self.getBuildArtifact(\"a.out\")\n self.runCmd(\"file \" + exe, CURRENT_EXECUTABLE_SET)\n\n # Break in main() after the variables are assigned values.\n lldbutil.run_break_set_by_file_and_line(\n self, \"main.cpp\", self.line, num_expected_locations=1, loc_exact=True)\n\n self.runCmd(\"run\", RUN_SUCCEEDED)\n\n # The stop reason of the thread should be breakpoint.\n self.expect(\"thread list\", STOPPED_DUE_TO_BREAKPOINT,\n substrs=['stopped', 'stop reason = breakpoint'])\n\n # The breakpoint should have a hit count of 1.\n self.expect(\"breakpoint list -f\", BREAKPOINT_HIT_ONCE,\n substrs=[' resolved, hit count = 1'])\n\n # Test the memory find commands.\n\n self.expect(\n 'memory find -s \"in const\" `stringdata` `stringdata+(int)strlen(stringdata)`',\n substrs=[\n 'data found at location: 0x',\n '69 6e 20 63',\n 'in const'])\n\n self.expect(\n 'memory find -e \"(uint8_t)0x22\" `&bytedata[0]` `&bytedata[15]`',\n substrs=[\n 'data found at location: 0x',\n '22 33 44 55 66'])\n\n self.expect(\n 'memory find -e \"(uint8_t)0x22\" `&bytedata[0]` `&bytedata[2]`',\n substrs=['data not found within the range.'])\n\n self.expect('memory find -s \"nothere\" `stringdata` `stringdata+5`',\n substrs=['data not found within the range.'])\n\n self.expect('memory find -s \"nothere\" `stringdata` `stringdata+10`',\n substrs=['data not found within the range.'])\n","sub_path":"packages/Python/lldbsuite/test/functionalities/memory/find/TestMemoryFind.py","file_name":"TestMemoryFind.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"635644531","text":"from django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\nfrom operator import itemgetter\n\nimport slack\n\n\nclass SlackWorkspace(models.Model):\n access_token = models.CharField(max_length=255)\n team_name = models.CharField(max_length=255)\n team_id = models.CharField(max_length=255, unique=True)\n team_domain = models.CharField(max_length=255, blank=True)\n\n class Meta:\n verbose_name = \"Slack Workspace\"\n verbose_name_plural = \"Slack Workspaces\"\n\n def __str__(self):\n return f\"{self.team_name}\"\n \n def update_team_info(self):\n \"\"\"\n Update team information\n \"\"\"\n client = slack.WebClient(token=self.access_token)\n response = client.team_info()\n team_domain = response.get(\"team\", {}).get(\"domain\")\n \n self.team_domain = team_domain\n self.save()\n \n def update_channels(self):\n \"\"\"\n Fetch all channels from Slack and create them\n \"\"\"\n client = slack.WebClient(token=self.access_token)\n response = client.conversations_list(\n exclude_archived=\"true\",\n limit=500,\n )\n\n channels = [SlackChannel(\n workspace=self,\n name=c.get(\"name\"),\n channel_id=c.get(\"id\")\n ) for c in sorted(response.get('channels', []), key=itemgetter('name'))]\n \n SlackChannel.objects.bulk_create(channels, ignore_conflicts=True)\n\n\nclass SlackChannel(models.Model):\n workspace = models.ForeignKey(\"SlackWorkspace\", related_name=\"channels\", on_delete=models.CASCADE)\n name = models.CharField(max_length=255)\n channel_id = models.CharField(max_length=255)\n\n class Meta:\n verbose_name = \"Channel\"\n verbose_name_plural = \"Channels\"\n unique_together = ('workspace', 'channel_id')\n\n def __str__(self):\n return f\"{self.workspace.team_name} - {self.name}\"\n \n def send_question_message(self, message):\n \"\"\"\n Sends a Yes/No question Slack message to this channel\n \"\"\"\n payload = [\n {\n \t\t\t\"type\": \"section\",\n \t\t\t\"text\": {\n \t\t\t\t\"type\": \"mrkdwn\",\n \t\t\t\t\"text\": message,\n \t\t\t}\n \t\t},\n {\n \t\t\t\"type\": \"actions\",\n \t\t\t\"elements\": [\n \t\t\t\t{\n \t\t\t\t\t\"type\": \"button\",\n \t\t\t\t\t\"text\": {\n \t\t\t\t\t\t\"type\": \"plain_text\",\n \t\t\t\t\t\t\"emoji\": True,\n \t\t\t\t\t\t\"text\": \"Yes\"\n \t\t\t\t\t},\n \t\t\t\t\t\"style\": \"primary\",\n \t\t\t\t\t\"value\": \"meeting-notes-yes\"\n \t\t\t\t},\n \t\t\t\t{\n \t\t\t\t\t\"type\": \"button\",\n \t\t\t\t\t\"text\": {\n \t\t\t\t\t\t\"type\": \"plain_text\",\n \t\t\t\t\t\t\"emoji\": True,\n \t\t\t\t\t\t\"text\": \"No\"\n \t\t\t\t\t},\n \t\t\t\t\t\"style\": \"danger\",\n \t\t\t\t\t\"value\": \"meeting-notes-no\"\n \t\t\t\t}\n \t\t\t]\n \t\t}\n ]\n \n self.send_message(message, blocks=payload)\n \n def send_ephemeral_message(self, user_id, message, blocks=None):\n \"\"\"\n Sends an ephemeral Slack message to the user\n \"\"\"\n client = slack.WebClient(token=self.workspace.access_token)\n\n response = client.chat_postEphemeral(\n channel=self.channel_id,\n user=user_id,\n text=message,\n blocks=blocks\n )\n \n def send_message(self, message, blocks=None):\n \"\"\"\n Sends a Slack message to this channel\n \"\"\"\n client = slack.WebClient(token=self.workspace.access_token)\n\n response = client.chat_postMessage(\n channel=self.channel_id,\n text=message,\n blocks=blocks,\n link_names=True\n )\n\n def create_file(self, title):\n \"\"\"\n Creates empty editable text file in channel\n \"\"\"\n client = slack.WebClient(token=self.workspace.access_token)\n\n response = client.files_upload(\n channels=self.channel_id,\n content=title,\n filetype=\"txt\",\n title=title\n )\n\n def get_all_channel_members(self):\n \"\"\"\n Returns all members of the channel.\n \"\"\"\n client = slack.WebClient(token=self.workspace.access_token)\n\n r = client.conversations_members(\n channel=self.channel_id,\n )\n\n member_ids = r.data[\"members\"]\n members = []\n\n for member_id in member_ids:\n response = client.users_info(user=member_id)\n members.append(\n {\n \"id\": response.data[\"user\"][\"id\"],\n \"name\": response.data[\"user\"][\"name\"]\n }\n )\n\n return members\n","sub_path":"mubasir/slack/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"503425264","text":"import logging\nimport os\nimport random\nimport threading\nimport time\nimport traceback\nimport math\n\nimport rpyc\nfrom rpyc.utils import helpers\nfrom rpyc.core import AsyncResultTimeout\nimport socket\n\nimport sys\n\n\nclass NodeRef:\n def __init__(self, name, hostAddr, port):\n self.name = name\n self.host = hostAddr\n self.port = port\n\n\n'''\nA RAFT RPC server class.\n\nPlease keep the signature of the is_leader() method unchanged (though\nimplement the body of that function correctly. You will need to add\nother methods to implement ONLY the leader election part of the RAFT\nprotocol.\n'''\n\n\nclass RaftNode(rpyc.Service):\n # used to calculate the seconds to wait before calling an election,\n # based on typical round-trip communication delay for contacting 1 active node\n ELECTION_TIMEOUT_BASELINE = 0.3\n # how long to wait when making a connection to another node before giving up and assuming the other node is down\n # also used to limit the delay if an RPC somehow got stuck partway through, but that's highly unlikely\n # since RPC execution time (on the other node) is 2-5 ms\n CONNECTION_TIMEOUT = 0.35\n\n NODE_STATE_FOLDER = \"node_states\"\n NODE_LOGS_FOLDER = \"node_logs\"\n\n BACKUP_SEPARATOR = \":\"\n TERM_BACKUP_KEY = \"term\"\n VOTE_BACKUP_KEY = \"vote\"\n CURR_LEADER_BACKUP_KEY = \"currLeader\"\n\n \"\"\"\n Initialize the class using the config file provided and also initialize\n any datastructures you may need.\n \"\"\"\n\n def __init__(self, configFilePath, nodeIdentityIndex):\n self.identityIndex = nodeIdentityIndex\n\n self.isCandidate = False\n self.currTerm = 0\n self.voteTarget = None # who the node is voting for in the current term\n self.currLeader = None # who has been elected leader in the current term\n\n # should these not be reentrant?\n self.stateFileLock = threading.RLock()\n self.stateLock = threading.RLock()\n\n self.nodeStateFilePath = os.path.join(RaftNode.NODE_STATE_FOLDER, \"node\" + str(self.identityIndex) + \".txt\")\n\n # set up logging\n nodeName = \"raftNode\" + str(nodeIdentityIndex)\n self.nodeLogger = logging.getLogger(nodeName)\n self.nodeLogger.setLevel(logging.DEBUG)\n\n if not os.path.exists(RaftNode.NODE_LOGS_FOLDER):\n os.makedirs(RaftNode.NODE_LOGS_FOLDER)\n\n logFilePath = os.path.join(RaftNode.NODE_LOGS_FOLDER, nodeName + \".log\")\n logFileHandler = logging.FileHandler(logFilePath)\n logFileHandler.setLevel(logging.DEBUG)\n consoleHandler = logging.StreamHandler()\n consoleHandler.setLevel(logging.WARN)\n formatter = logging.Formatter('%(asctime)s - %(threadName)s - %(levelname)s - %(message)s')\n logFileHandler.setFormatter(formatter)\n consoleHandler.setFormatter(formatter)\n self.nodeLogger.addHandler(logFileHandler)\n self.nodeLogger.addHandler(consoleHandler)\n\n if os.path.exists(self.nodeStateFilePath):\n with open(self.nodeStateFilePath, mode=\"r\") as nodeBackup:\n nodeStateBackup = self._load_node_backup(nodeBackup)\n\n storedTermStr = nodeStateBackup.get(RaftNode.TERM_BACKUP_KEY)\n if storedTermStr is not None:\n storedTermVal = int(storedTermStr)\n self.currTerm = storedTermVal\n\n storedVoteStr = nodeStateBackup.get(RaftNode.VOTE_BACKUP_KEY)\n if storedVoteStr is not None:\n storedVoteVal = int(storedVoteStr)\n if storedVoteVal >= 0:\n self.voteTarget = storedVoteVal\n\n storedCurrLeaderStr = nodeStateBackup.get(RaftNode.CURR_LEADER_BACKUP_KEY)\n if storedCurrLeaderStr is not None:\n storedCurrLeaderVal = int(storedCurrLeaderStr)\n if storedCurrLeaderVal >= 0:\n self.currLeader = storedCurrLeaderVal\n\n self.nodeLogger.info(\"loading backup of prior node state from disk:\\n\"\n \"term %d; voteTarget %d (-1 standing for None); \"\n \"current leader %d (-1 standing for None)\", self.currTerm,\n self.voteTarget or -1, self.currLeader or -1)\n\n self.otherNodes = []\n with open(configFilePath) as nodesConfigFile:\n nodesConfigFile.readline() # ignore first line with node count\n\n for nodeInd, nodeLine in enumerate(nodesConfigFile):\n if nodeInd != nodeIdentityIndex:\n otherNodeTerms = nodeLine.split(\":\")\n otherNodeName = otherNodeTerms[0].strip()\n otherNodeHost = otherNodeTerms[1].strip()\n otherNodePort = otherNodeTerms[2].strip()\n otherNodePort = int(otherNodePort)\n otherNode = NodeRef(otherNodeName, otherNodeHost, otherNodePort)\n self.otherNodes.append(otherNode)\n\n numOtherNodes = len(self.otherNodes)\n numNodes = 1+ numOtherNodes\n\n\n #subtracting 1 because this node already provides itself with 1 vote when it's a candidate\n if numNodes % 2 == 0:\n numVotesNeeded = numNodes/2 + 1 -1\n else:\n numVotesNeeded = math.ceil(numNodes/2) -1\n # based on worst-case where only a bare majority of nodes are still alive & one or more of those live nodes\n # is after all of the dead ones in the list\n minimumElectionTimeout = (numOtherNodes-numVotesNeeded)*RaftNode.CONNECTION_TIMEOUT + \\\n RaftNode.ELECTION_TIMEOUT_BASELINE*numVotesNeeded\n #todo try 0.5-1.5 rather than 1-2 or 0.75-1.75\n self.electionTimeout = (1 + random.random())*minimumElectionTimeout\n self._restart_timer()\n\n self.heartbeatInterval = 0.5*minimumElectionTimeout\n\n self.nodeLogger.critical(\"I am node %d (election timeout %f) and I just finished being constructed, with %d fellow nodes\",\n self.identityIndex, self.electionTimeout, len(self.otherNodes))\n for otherNodeDesc in self.otherNodes:\n self.nodeLogger.debug(\"other node %s is at host %s and port %d\", otherNodeDesc.name, otherNodeDesc.host,\n otherNodeDesc.port)\n\n def _save_node_state(self):\n '''writes node state to disk'''\n # saveStartTime = time.time()\n if not os.path.exists(RaftNode.NODE_STATE_FOLDER):\n os.makedirs(RaftNode.NODE_STATE_FOLDER)\n\n self.stateFileLock.acquire()\n\n with open(self.nodeStateFilePath, mode=\"w\") as nodeStateStorageFile:\n termLine = RaftNode.TERM_BACKUP_KEY + RaftNode.BACKUP_SEPARATOR + str(self.currTerm) + \"\\n\"\n nodeStateStorageFile.write(termLine)\n voteTargetIndex = self.voteTarget if self.voteTarget is not None else -1\n voteLine = RaftNode.VOTE_BACKUP_KEY + RaftNode.BACKUP_SEPARATOR + str(voteTargetIndex) + \"\\n\"\n nodeStateStorageFile.write(voteLine)\n\n currLeaderIndex = self.currLeader or -1\n currLeaderLine = RaftNode.CURR_LEADER_BACKUP_KEY + RaftNode.BACKUP_SEPARATOR + str(currLeaderIndex) + \"\\n\"\n nodeStateStorageFile.write(currLeaderLine)\n\n nodeStateStorageFile.flush()\n os.fsync(nodeStateStorageFile.fileno())\n\n self.stateFileLock.release()\n\n # saveDuration = time.time() - saveStartTime\n # self.nodeLogger.debug(\"saving node state took %f seconds\", saveDuration)\n\n def _load_node_backup(self, backupFile):\n '''reads a list of key-value pairs from the file containing a backup of the node's state\n :return: that list of key-value pairs of state information'''\n backupDict = {}\n\n for backupLine in backupFile:\n if backupLine != \"\":\n lineTokens = backupLine.split(RaftNode.BACKUP_SEPARATOR)\n if len(lineTokens) == 2:\n currKey = lineTokens[0].strip()\n currVal = lineTokens[1].strip()\n\n backupDict[currKey] = currVal\n else:\n self.nodeLogger.error(\"malformed line in node backup file: %s\", backupLine)\n\n return backupDict\n\n def _restart_timer(self):\n '''resets the election timer'''\n self.lastContactTimestamp = time.time()\n # electionTimerStartupStartTime = time.time()\n electionTimer = threading.Timer(self.electionTimeout, self.check_for_election_timeout)\n electionTimer.start()\n\n # electionTimerStartupDuration = time.time() - electionTimerStartupStartTime\n # self.nodeLogger.debug(\"starting up an election timer took %f seconds\", electionTimerStartupDuration)\n\n\n\n def exposed_is_leader(self):\n '''returns whether this node is a leader\n Meant to be called as RPC\n :return boolean: whether this node is a leader\n '''\n return self.currLeader == self.identityIndex\n\n def exposed_append_entries(self, leaderTerm, leaderIndex):\n '''tries to reset this node's election timer on behalf of an RPC-caller node that thinks it's the current leader\n Meant to be called as RPC\n :param leaderTerm int: the term which the caller thinks is most recent\n :param leaderIndex int: the index of the node which thinks it's the leader & is sending a heartbeat to this node\n :return tuple(int, boolean): what term this node thinks is most recent and whether this node is recognizing\n the caller of the RPC as the leader\n '''\n willAppendEntries = False\n\n appendEntriesStartTime = time.time()\n\n self.nodeLogger.debug(\"about to acquire LOCK to execute append_entries RPC for leader node %d \"\n \"which was in term %d\", leaderIndex, leaderTerm)\n self.stateLock.acquire()\n self.nodeLogger.debug(\"successfully acquired LOCK to execute append_entries RPC for leader node %d \"\n \"which was in term %d\", leaderIndex, leaderTerm)\n\n\n termAtStartOfAppendEntries = self.currTerm\n\n if leaderTerm < self.currTerm:\n self.nodeLogger.info(\n \"while in term %d, received append_entries() from stale leader %d which thought it was in term %d\",\n self.currTerm, leaderIndex, leaderTerm)\n else:\n self.nodeLogger.debug(\n \"while in term %d, executing append_entries on behalf of node %d, the leader in term %d\",\n self.currTerm, leaderIndex, leaderTerm)\n\n self._restart_timer()\n\n if leaderTerm > self.currTerm:\n if self.voteTarget is not None:\n self.nodeLogger.warning(\"was in election for term %d, voting for candidate node %d, \"\n \"when received request to append entries in later term %d\", self.currTerm,\n self.voteTarget, leaderTerm)\n self.voteTarget = None\n\n self.nodeLogger.critical(\"was in term %d with candidate status %s and current leader index %d when \"\n \"received heartbeat from leader node %d in higher term %d\", self.currTerm,\n self.isCandidate, self.currLeader or -1, leaderIndex, leaderTerm)\n self.isCandidate = False\n self.currLeader = None\n self.currTerm = leaderTerm\n\n if self.currLeader != leaderIndex:\n self.nodeLogger.critical(\"acknowledging node %d as the leader for term %d\", leaderIndex, self.currTerm)\n\n self.voteTarget = None\n self.currLeader = leaderIndex\n self._save_node_state()\n willAppendEntries = True\n\n self.nodeLogger.debug(\"releasing LOCK after executing append_entries RPC for leader node %d \"\n \"which was in term %d\", leaderIndex, leaderTerm)\n self.stateLock.release()\n\n appendEntriesDuration = time.time() - appendEntriesStartTime\n self.nodeLogger.debug(\"while starting in term %d, executing append_entries for leader node %d \"\n \"which was in term %d took %f seconds\", termAtStartOfAppendEntries, leaderIndex,\n leaderTerm, appendEntriesDuration)\n\n return (self.currTerm, willAppendEntries)\n\n def call_append_entries(self, otherNodeDesc):\n '''send an append_entries/heartbeat 'message' to another node by calling that RPC on that node\n :param otherNodeDesc NodeRef: a description of the other node\n :return tuple(int, boolean): what term the other node thinks is most recent and whether the other node accepts\n this node as the leader\n '''\n # assert self.exposed_is_leader() this might not always be true because of concurrency\n appendEntriesRetVal = None\n\n heartbeatRpcStartTime = time.time()\n\n try:\n nodeConnStream = rpyc.SocketStream.connect(otherNodeDesc.host, otherNodeDesc.port,\n timeout= RaftNode.CONNECTION_TIMEOUT, attempts= 1)\n nodeConn = rpyc.connect_stream(nodeConnStream)\n otherNodeRoot = nodeConn.root\n timedAppendEntriesProxy = helpers.timed(otherNodeRoot.append_entries, RaftNode.CONNECTION_TIMEOUT)\n appendEntriesPromise = timedAppendEntriesProxy(self.currTerm, self.identityIndex)\n appendEntriesRetVal = appendEntriesPromise.value\n except AsyncResultTimeout:\n self.nodeLogger.info(\"connection timed out while leader node %d in term %d tried to send append_entries \"\n \"to node %s\", self.identityIndex, self.currTerm, otherNodeDesc.name)\n except (socket.timeout, ConnectionRefusedError):\n self.nodeLogger.info(\"leader node %d in term %d was unable to connect to another node %s\",\n self.identityIndex, self.currTerm, otherNodeDesc.name)\n except EOFError:\n self.nodeLogger.info(\"leader node %d in term %d lost connection to another node %s\",\n self.identityIndex, self.currTerm, otherNodeDesc.name)\n except Exception as e:\n self.nodeLogger.error(\"Exception for leader node %d in term %d: %s\\n%s\\n%s\",\n self.identityIndex, self.currTerm, e.__doc__, str(e), traceback.format_exc())\n\n heartbeatRpcDuration = time.time() - heartbeatRpcStartTime\n self.nodeLogger.debug(\"sending append_entries to other node %s took %f seconds\", otherNodeDesc.name,\n heartbeatRpcDuration)\n\n\n return appendEntriesRetVal\n\n def exposed_request_vote(self, candidateTerm, candidateIndex):\n '''tries to get this node's vote in an election on behalf of an RPC-caller node which is a candidate in that election\n Meant to be called as RPC\n\n :param candidateTerm: the term which the caller/candidate node thinks is most recent & which its election is in\n :param candidateIndex: the index of that caller/candidate node\n :return tuple(int, boolean): what term this node thinks is most recent and whether this node is voting for the\n caller candidate node\n '''\n willVote = False\n\n voteRequestStartTime = time.time()\n\n self.nodeLogger.debug(\"about to acquire LOCK to execute request_vote RPC for candidate node %d \"\n \"which was in term %d\", candidateIndex, candidateTerm)\n self.stateLock.acquire()\n self.nodeLogger.debug(\"successfully acquired LOCK to execute request_vote RPC for candidate node %d \"\n \"which was in term %d\", candidateIndex, candidateTerm)\n\n termAtStartOfVoteRequest = self.currTerm\n\n if candidateTerm < self.currTerm:\n self.nodeLogger.info(\"while in term %d, received request_vote() from stale candidate %d \"\n \"which thought it was in term %d\", self.currTerm, candidateIndex, candidateTerm)\n else:\n self.nodeLogger.debug(\n \"while in term %d, executing request_vote on behalf of node %d, a candidate in term %d\",\n self.currTerm, candidateIndex, candidateTerm)\n if candidateTerm > self.currTerm:\n if self.voteTarget is not None:\n self.nodeLogger.warning(\"was in election for term %d, voting for candidate node %d, \"\n \"when received request for vote in later term %d\", self.currTerm,\n self.voteTarget, candidateTerm)\n self.voteTarget = None\n\n self.nodeLogger.critical(\"was in term %d with candidate status %s and current leader index %d when \"\n \"received request for vote in higher term %d\", self.currTerm, self.isCandidate,\n self.currLeader or -1, candidateTerm)\n self.isCandidate = False\n self.currLeader = None\n self.currTerm = candidateTerm\n self._save_node_state()\n self._restart_timer()\n else:\n if self.exposed_is_leader():\n self.nodeLogger.warning(\"elected leader %d received request_vote() from candidate %d \"\n \"when both are in term %d\", self.identityIndex, candidateIndex,\n self.currTerm)\n elif self.isCandidate:\n self.nodeLogger.warning(\"candidate node %d received request_vote() from other candidate %d \"\n \"when both are in term %d\", self.identityIndex, candidateIndex,\n self.currTerm)\n elif self.currLeader is not None:\n self.nodeLogger.warning(\"follower node %d received request_vote() from candidate node %d when both \"\n \"are in term %d but another node %d has already been elected leader\",\n self.identityIndex, candidateIndex, self.currTerm, self.currLeader)\n\n if not self.isCandidate and self.currLeader is None and self.voteTarget is None:\n self.nodeLogger.critical(\"casting vote for candidate node %d in term %d\", candidateIndex, self.currTerm)\n self._restart_timer()\n\n self.voteTarget = candidateIndex\n self._save_node_state()\n willVote = True\n\n self.nodeLogger.debug(\"releasing LOCK after executing request_vote RPC for candidate node %d \"\n \"which was in term %d\", candidateIndex, candidateTerm)\n self.stateLock.release()\n\n voteRequestDuration = time.time() - voteRequestStartTime\n self.nodeLogger.debug(\"while starting in term %d, executing request_vote for candidate node %d which was in term %d \"\n \"took %f seconds\", termAtStartOfVoteRequest, candidateIndex, candidateTerm, voteRequestDuration)\n\n return (self.currTerm, willVote)\n\n def call_request_vote(self, otherNodeDesc):\n '''sends a vote request message to some other node by calling that RPC on that node\n :param otherNodeDesc NodeRef: a description of the other node\n :return tuple(int, boolean): what term the other node thinks is most recent and\n whether the other node will vote for this one\n '''\n assert self.isCandidate\n requestVoteRetVal = None\n\n voteRequestRpcStartTime = time.time()\n\n try:\n nodeConnStream = rpyc.SocketStream.connect(otherNodeDesc.host, otherNodeDesc.port,\n timeout=RaftNode.CONNECTION_TIMEOUT, attempts=1)\n nodeConn = rpyc.connect_stream(nodeConnStream)\n otherNodeRoot = nodeConn.root\n timedRequestVoteProxy = helpers.timed(otherNodeRoot.request_vote, RaftNode.CONNECTION_TIMEOUT)\n voteRequestPromise = timedRequestVoteProxy(self.currTerm, self.identityIndex)\n requestVoteRetVal = voteRequestPromise.value\n except AsyncResultTimeout:\n self.nodeLogger.info(\"connection timed out while candidate node %d in term %d tried to send request_vote \"\n \"to node %s\", self.identityIndex, self.currTerm, otherNodeDesc.name)\n except (socket.timeout, ConnectionRefusedError):\n self.nodeLogger.info(\"candidate node %d in term %d was unable to connect to another node %s\",\n self.identityIndex, self.currTerm, otherNodeDesc.name)\n except EOFError:\n self.nodeLogger.info(\"candidate node %d in term %d lost connection to another node %s\",\n self.identityIndex, self.currTerm, otherNodeDesc.name)\n except Exception as e:\n self.nodeLogger.error(\"Exception for candidate node %d in term %d: %s\\n%s\\n%s\",\n self.identityIndex, self.currTerm, e.__doc__, str(e), traceback.format_exc())\n\n voteRequestRpcDuration = time.time() - voteRequestRpcStartTime\n self.nodeLogger.debug(\"sending vote request to node %s took %f seconds\", otherNodeDesc.name,\n voteRequestRpcDuration)\n\n return requestVoteRetVal\n\n def check_for_election_timeout(self):\n '''checks whether the election timer has actually expired and if so starts an election in a new term'''\n self.nodeLogger.debug(\"about to acquire LOCK to check for election timeout\")\n self.stateLock.acquire()\n self.nodeLogger.debug(\"successfully acquired LOCK to check for election timeout\")\n\n if self.exposed_is_leader():\n self.nodeLogger.info(\"this node is ignoring an election timeout because it's the leader and so releases the LOCK\")\n self.stateLock.release()\n else:\n self.nodeLogger.debug(\"checking whether election should be started\")\n if (time.time() - self.lastContactTimestamp) > self.electionTimeout:\n self.isCandidate = True\n self.currLeader = None\n self.voteTarget = self.identityIndex\n self.currTerm += 1\n self._save_node_state()\n self._restart_timer()\n\n self.nodeLogger.critical(\"starting election for the new term %d\", self.currTerm)\n electionTerm = self.currTerm\n\n\n numVotes = 1\n numNodes = 1 + len(self.otherNodes)\n\n nodesToContact = self.otherNodes.copy()\n\n self.nodeLogger.debug(\"about to contact the %d other nodes\", len(nodesToContact))\n\n self.nodeLogger.debug(\"releasing the LOCK after starting election for term %d\", self.currTerm)\n self.stateLock.release()\n\n while len(nodesToContact) > 0:\n currOtherNode = nodesToContact.pop(0)\n\n self.nodeLogger.debug(\"about to acquire LOCK to send vote request to node %s\", currOtherNode.name)\n self.stateLock.acquire()\n self.nodeLogger.debug(\"successfully acquired LOCK to send vote request to node %s\", currOtherNode.name)\n\n if not self.isCandidate or self.currLeader is not None or electionTerm != self.currTerm:\n self.nodeLogger.debug(\"releasing LOCK (before contacting node %s) as part of terminating the \"\n \"election which was running for term %d\", currOtherNode.name, electionTerm)\n self.stateLock.release()\n break\n\n self.nodeLogger.debug(\"releasing LOCK just before requesting vote from node %s\", currOtherNode.name)\n self.stateLock.release()\n\n\n self.nodeLogger.debug(\"sending vote request to node %s, with %d more nodes \"\n \"to be contacted afterwards\", currOtherNode.name, len(nodesToContact))\n nodeVoteResponse = self.call_request_vote(currOtherNode)\n\n\n self.nodeLogger.debug(\"acquiring LOCK in order to process results of requesting vote from node %s\",\n currOtherNode.name)\n self.stateLock.acquire()\n self.nodeLogger.debug(\"successfully acquired LOCK in order to process results of requesting vote from node %s\",\n currOtherNode.name)\n\n if not self.isCandidate or self.currLeader is not None or electionTerm != self.currTerm:\n self.nodeLogger.debug(\"releasing LOCK (after contacting node %s) as part of terminating the \"\n \"election which was running for term %d\", currOtherNode.name, electionTerm)\n self.stateLock.release()\n break\n\n\n if nodeVoteResponse is None:\n nodesToContact.append(currOtherNode)\n elif nodeVoteResponse[1]:\n numVotes += 1\n self.nodeLogger.critical(\"received vote from other node %s in term %d\", currOtherNode.name,\n self.currTerm)\n else:\n responderTerm = nodeVoteResponse[0]\n if responderTerm > self.currTerm:\n self.nodeLogger.critical(\"terminating election for term %d because a vote request response\"\n \" informed this node of higher term %d\", self.currTerm, responderTerm)\n self.isCandidate = False\n self.voteTarget = None\n self.currLeader = None\n self.currTerm = responderTerm\n self._save_node_state()\n self._restart_timer()\n\n # possible race condition with _leaderStatus?\n if numVotes > numNodes / 2.0:\n self.nodeLogger.critical(\"becoming the leader for the term %d with %d votes!!!\",\n self.currTerm, numVotes)\n self.isCandidate = False\n self.voteTarget = None\n self.currLeader = self.identityIndex\n self._save_node_state()\n\n #releases lock before control flow leaves this function\n self.nodeLogger.debug(\"releasing the LOCK after winning election for term %d\", self.currTerm)\n self.stateLock.release()\n\n self.send_heartbeats()\n\n #handles lock releasing in all cases except the one where this node just won an election\n if numVotes <= numNodes / 2.0:\n self.nodeLogger.debug(\"releasing LOCK after contacting a node %s\", currOtherNode.name)\n self.stateLock.release()\n else:\n self.nodeLogger.debug(\"releasing LOCK after finding that it isn't time for an election yet\")\n self.stateLock.release()\n\n def send_heartbeats(self):\n '''as leader, send heartbeat/append_entries messages to all other nodes\n so that they don't start elections in new terms'''\n self.stateLock.acquire()\n\n if not self.exposed_is_leader():\n self.nodeLogger.warning(\"in term %d, node attempted to send heartbeats out despite not being the leader (and releases the LOCK)\",\n self.currTerm)\n self.stateLock.release()\n else:\n heartbeatTimer = threading.Timer(self.heartbeatInterval, self.send_heartbeats)\n heartbeatTimer.start()\n\n leaderTerm = self.currTerm\n\n nodesToContact = self.otherNodes.copy()\n\n self.nodeLogger.debug(\"release the LOCK just before contacting the %d other nodes\", len(nodesToContact))\n\n self.stateLock.release()\n\n while len(nodesToContact) > 0:\n currOtherNode = nodesToContact.pop(0)\n\n self.nodeLogger.debug(\"acquiring the LOCK to send heartbeat to node %s\", currOtherNode.name)\n self.stateLock.acquire()\n self.nodeLogger.debug(\"successfully acquired the LOCK to send heartbeat to node %s\", currOtherNode.name)\n\n if not self.exposed_is_leader():\n self.nodeLogger.info(\"former leader (from term %d) is releasing the LOCK rather than try to send any more heartbeats (before trying to contact node %s)\", leaderTerm, currOtherNode.name)\n self.stateLock.release()\n break\n\n self.nodeLogger.debug(\"releasing LOCK just before sending heartbeat to node %s\", currOtherNode.name)\n self.stateLock.release()\n\n\n self.nodeLogger.debug(\"sending heartbeat to node %s, with %d more nodes to be contacted \"\n \"afterwards\", currOtherNode.name, len(nodesToContact))\n nodeHeartbeatResponse = self.call_append_entries(currOtherNode)\n\n\n self.nodeLogger.debug(\"acquiring the LOCK to process node %s 's response to a heartbeat\", currOtherNode.name)\n self.stateLock.acquire()\n self.nodeLogger.debug(\"successfully acquired the LOCK to process node %s 's response to a heartbeat\", currOtherNode.name)\n\n if not self.exposed_is_leader():\n self.nodeLogger.info(\"former leader (from term %d) is releasing the LOCK rather than try to send any more heartbeats (after trying to contact node %s)\", leaderTerm, currOtherNode.name)\n self.stateLock.release()\n break\n\n\n if nodeHeartbeatResponse is None:\n nodesToContact.append(currOtherNode)\n else:\n responderTerm = nodeHeartbeatResponse[0]\n if responderTerm > self.currTerm:\n self.nodeLogger.critical(\"this node was leader in term %d but is abandoning that status because \"\n \"a heartbeat response informed it of a higher term %d\",\n self.currTerm, responderTerm)\n self.currLeader = None\n self.currTerm = responderTerm\n self._restart_timer()\n\n self.nodeLogger.debug(\"releasing the LOCK after sending heartbeat to node %s\", currOtherNode.name)\n self.stateLock.release()\n\n\nif __name__ == '__main__':\n from rpyc.utils.server import ThreadPoolServer\n\n nodeNum = -1\n currNodePort = -1\n\n configFileName = sys.argv[1]\n\n currNodeIndexStr = sys.argv[2]\n currNodeIndex = int(currNodeIndexStr)\n\n with open(configFileName) as configFile:\n nodeNumLine = configFile.readline()\n if nodeNumLine[:2] == \"N:\":\n nodeNumStr = nodeNumLine[2:]\n nodeNumStr = nodeNumStr.strip()\n nodeNum = int(nodeNumStr)\n else:\n print(\"invalid config file- bad initial node count line: %s\" % nodeNumLine)\n raise Exception(\"bad config file\")\n\n if currNodeIndex < nodeNum:\n nodeDescriptions = configFile.readlines()\n if len(nodeDescriptions) == nodeNum:\n currNodeLine = nodeDescriptions[currNodeIndex]\n nodeTerms = currNodeLine.split(\":\")\n nodePortStr = nodeTerms[2].strip()\n currNodePort = int(nodePortStr)\n else:\n print(\"invalid config file- wrong number of lines of node descriptions %s\" % nodeNumLine)\n raise Exception(\"bad config file\")\n else:\n print(\"unacceptably high index %d for node system which only has %d nodes\" % (currNodeIndex, nodeNum))\n raise Exception(\"bad node index\")\n\n if currNodePort > 0:\n server = ThreadPoolServer(RaftNode(configFileName, currNodeIndex), port=currNodePort)\n server.start()\n","sub_path":"raftnode.py","file_name":"raftnode.py","file_ext":"py","file_size_in_byte":32612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"584724606","text":"#!/usr/bin/env python3\n\nimport sys\nfrom cpu import *\n\n\n \nif len(sys.argv) != 2:\n print(f\"usage: {sys.argv[0]} filename\", file = sys.stderr)\n \ncpu = CPU()\ncpu.load(sys.argv[1])\ncpu.run()","sub_path":"ls8/ls8.py","file_name":"ls8.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"69585448","text":"# coding=utf-8\n# https://leetcode-cn.com/problems/subsets\n\n\nclass Solution(object):\n def subsets(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n ans = []\n for i in range(len(nums) - 1, -1, -1):\n tmp = nums[i]\n if ans:\n for j in range(0, len(ans), 1):\n ans.append(ans[j] + [tmp])\n ans.append([tmp])\n else:\n ans = [[tmp]]\n ans.append([])\n return ans\n","sub_path":"78.py","file_name":"78.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"302146737","text":"o=open(\"a.txt\",\"r\")\nnl=0\nnw=0\nnc=0\nfor line in o:\n\tnl += 1\n\tline=line.strip(\"\\n\")\n\twords=line.split()\n\tnw+=len(words)\n\tnc+=len(line.replace(\" \",\"\"))\no.close()\nprint(\"line\",nl,\"words\",nw,\"char\",nc)","sub_path":"Sem3/Python/Scripts/Unit 4/countword.py","file_name":"countword.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"95868931","text":"#coding:utf-8\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport jieba\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nimport re\r\n\r\n#根据得分数区分好评和差评\r\ndef add_label(star):\r\n if star>'3':\r\n return 1\r\n else:\r\n return 0\r\n\r\n#jieba分词\r\ndef chinese_word_cut(data):\r\n return \"\".join(jieba.cut(data))\r\n\r\n#读取停用词表\r\ndef get_custom_stopwords(filename):\r\n with open(filename,'r',encoding='utf-8') as f:\r\n stopwords = f.read()\r\n stopwords_list = stopwords.split('\\n')\r\n custom_stopwords_list = [i for i in stopwords_list]\r\n return custom_stopwords_list\r\n\r\n#生成词向量\r\ndef creat_words_vector(filename):\r\n stopwords = get_custom_stopwords(filename)\r\n vector = CountVectorizer(max_df = 0.8, \r\n min_df = 3, \r\n token_pattern=u'(?u)\\\\b[^\\\\d\\\\W]\\\\w+\\\\b', \r\n stop_words=frozenset(stopwords))\r\n return vector\r\n \r\ndef main():\r\n data_filename = 'data.csv'\r\n stopwords_filename = 'stopwords.txt'\r\n data = pd.read_csv(data_filename,encoding='utf-8',dtype=str)\r\n data = data.astype(str)\r\n #添加一列用于区分好评和差评\r\n data['sentiment'] = data.star.apply(add_label)\r\n #添加一列用于存储分词后的评论\r\n data['cut_comment'] = data.comment.apply(chinese_word_cut)\r\n #根据停用词表生成词向量\r\n vector = creat_words_vector(stopwords_filename)\r\n x = data['cut_comment']\r\n y = data['sentiment']\r\n #划分测试集和训练集\r\n x_train,x_test,y_train,y_test = train_test_split(x, y, test_size=0.2, random_state=22)\r\n nb = MultinomialNB()\r\n x_train_vect = vector.fit_transform(x_train)\r\n nb.fit(x_train_vect, y_train)\r\n x_test_vect = vector.transform(x_test)\r\n print(nb.score(x_test_vect, y_test)) \r\n \r\nmain()\r\n","sub_path":"sentiment_analysis.py","file_name":"sentiment_analysis.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"123182988","text":"######################################\n#\n# This script takes a prerecorded dataset of RSS values from listenSomeLinks_gen.py\n# and runs it through the hmm border crossing localization system. The script\n# writes to file the times at which the system detects a short segment crossing.\n\n\n#####################################\n# Import libraries\n#####################################\nimport numpy as np\nimport hmm_class_v11 as anHmmClass\nimport circ_buff_class as aCircBuff\nimport accuracy_class as aAccuracyClass\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n########################\n# Import data\n########################\n\n# True Crossing Data - holds the time and state of the true crossings\ntrue_file = 'data/xing_data/lola/crossing_times_mod_2015_07_21.txt'\n \n# experimental crossing RSS data\nf_rss_all = open('data/rss_data/lola/original/rss_data_2015_07_21_all.txt','r')\n\n\n# # True Crossing Data - holds the time and state of the true crossings\n# true_file = 'data/xing_data/park/crossing_times_mod_2015_07_23.txt'\n# \n# # experimental crossing RSS data\n# f_rss_all = open('data/rss_data/park/rss_data_2015_07_23_all.txt','r')\n\n\n######################################\n# Crossing Experiment\n######################################\nnumNodes = 6\nnumStates = numNodes\nuseChNum = [1] #[0,1,2,3]\nnumCh = len(useChNum)\nnum_links = numCh*numNodes*(numNodes-1)/2\n \n#######################################\n# HMM buffer sizes\n#######################################\nltb_len = 51 # length of the long term buffer\nstate_len = 5 # length of the long term buffer\n \n################################\n# Parameters for the HMM\n################################\nA = np.zeros((numStates,numStates))\nA[0,0] = 0.75\nA[0,1:] = (1-A[0,0])/(numStates-1)\nA[1:,0] = 0.4\nfor ii in range(1,int(numStates-1)+1):\n A[ii,ii] = 0.6\npi = np.zeros((numStates,1))\npi[0,0] = 0.9\npi[1:,0] = (1-pi[0,0])/(numStates-1)\nV = np.array(range(-105, -20) + [127]) # possible RSS values\nmin_p = 0.0001\np127 = 0.03\nbuff_len = 10\n\nmy_hmm = anHmmClass.myHmmClass(A,pi,V,min_p,p127,buff_len,num_links,numCh,ltb_len,state_len)\n\n# create an accuracy object\npm_time = 3.0\naccObj = aAccuracyClass.myAccuracyClass(pm_time)\n\n################################\n# Other parameters for the program\n################################\nxing_times = [] # a list of the crossing times\ntimes_all = []\nxing_states = []\nstates_all = []\nrss_link = []\n \n################################\n# Loop to get the first calibration.\n# During this time, the most likely \n# state is nan\n################################\nfor line in f_rss_all:\n \n lineList = [float(i) for i in line.split()]\n time_ms = lineList.pop(-1) # remove last element\n rss = np.array(lineList)[1::2]\n \n # use only the specified link\n rss = rss[num_links*useChNum[0] + np.arange(num_links)]\n \n if rss[5] == 127:\n rss_link.append(np.nan)\n else:\n rss_link.append(rss[5])\n \n my_hmm.observe(rss)\n states_all.append(my_hmm.get_fb_state())\n times_all.append(time_ms)\n \n if np.logical_not(np.isnan(my_hmm.get_fb_state())) & (my_hmm.get_fb_state() != 0):\n xing_times.append(time_ms)\n xing_states.append(my_hmm.get_fb_state())\n\nxing_data = np.loadtxt(true_file)\n\naccObj.get_accuracy(np.array([xing_times,xing_states]).T, true_file, np.array(times_all))\naccObj.print_summary()\n\nf, axarr = plt.subplots(2, sharex=True)\n\naxarr[0].plot(times_all,states_all)\naxarr[0].plot(xing_data[:,0],xing_data[:,1],'rx')\naxarr[0].grid()\naxarr[1].plot(times_all,rss_link)\nplt.show()\n\n\n\n\n\n\n\n\n\n","sub_path":"run_hmm_for_xing_times_v2.py","file_name":"run_hmm_for_xing_times_v2.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"446239978","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/9/2 4:38 PM\n# @Author : Criss Chan\n# __from__='EmmaTools https://github.com/crisschan/EMMATools'\n# @File : image_chage.py\n# @Software: PyCharm\n# @instruction:所有图片的转换类\n\nfrom PIL import Image\nimport os\nclass ImageChange(object):\n def __init__(self,image_inpath,image_outpath):\n '''\n\n :param image_path: 图片的路径\n '''\n self.image_inpath=image_inpath\n self.image_outpath=image_outpath\n\n def IsValidImage(self,img_path):\n '''\n 判断文件是否为有效(完整)的图片\n :param img_path:图片路径\n :return:True:有效 False:无效\n '''\n bValid = True\n try:\n Image.open(img_path).verify()\n except:\n bValid = False\n return bValid\n def ChangeImage(self):\n '''\n 转换图片格式\n :param img_path:图片路径\n :return: True:成功 False:失败\n '''\n if self.IsValidImage(self.image_inpath):\n try:\n str = self.image_inpath.rsplit(\".\", 1)\n output_img_path = str[0] + \".jpg\"\n im = Image.open(self.image_inpath)\n im.save(self.image_outpath)\n return True\n except:\n return False\n else:\n return False\n#\n# if __name__ == '__main__':\n# ff = os.getcwd()\n# for root, dirs, files in os.walk(ff+'/in/'):\n# for afile in files:\n# ic = ImageChange(ff+'/in/'+afile,ff+'/out/')\n# ic.ChangeImage()","sub_path":"image_change.py","file_name":"image_change.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"83138219","text":"# (C) 2015 Kyoto University Mechatronics Laboratory\n# Released under the GNU General Public License, version 3\n\"\"\"\nServer for Yozakura motor commands.\n\nAfter connecting with the client, the server receives requests and responds\naccordingly. Can read joystick input, and connect to multiple clients\nsimultaneously.\n\n\"\"\"\nimport logging\nimport pickle\nimport socket\nimport socketserver\nimport time\n\nclass Handler(socketserver.BaseRequestHandler):\n \"\"\"\n A handler for connection requests.\n\n It gets called by the server automatically whenever a new client connects.\n\n Attributes\n ----------\n request : socket\n Handles communication with the client\n wheels_single_stick : bool\n Whether the wheels are controlled by only the left analog stick.\n reverse mode : bool\n Whether reverse mode is engaged. In reverse mode, the x- and y- inputs\n are both inverted.\n\n \"\"\"\n def __init__(self, request, client_address, server):\n self._logger = logging.getLogger(\"{client_ip}_handler\".format(\n client_ip=client_address[0]))\n self._logger.debug(\"New handler created\")\n super().__init__(request, client_address, server)\n\n def handle(self):\n \"\"\"\n Handle the requests to the server.\n\n Once connected to the client, the handler loops and keeps listening for\n requests. This allows us to find out when the client is disconnected,\n and also allows for a much higher communication rate with the robot.\n\n Pickle is used on the server and client sides to transfer Python\n objects.\n\n Requests handled:\n - state : Reply with the state of the controller.\n - inputs : Reply with the raw input data from the state.\n - speeds : Perform calculations and send the required motor speed\n data.\n - echo : Reply with what the client has said.\n - print : ``echo``, and print to ``stdout``.\n\n \"\"\"\n self._logger.info(\"Connected to client\")\n self.request.settimeout(0.5) # seconds\n self.wheels_single_stick = False\n self.reverse_mode = False\n self._sticks_timestamp = self._reverse_timestamp = time.time()\n\n # TODO(murata): Remove everything related to _sensors_client and the\n # try/finally block once you add your udp server.\n self._sensors_client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self._sensors_client.bind((\"\", 9999))\n\n try:\n while True:\n try:\n data = self.request.recv(64).decode().strip()\n except socket.timeout:\n self._logger.warning(\"Lost connection to robot\")\n self._logger.info(\"Robot will shut down motors\")\n continue\n self._logger.debug('Received: \"{}\"'.format(data))\n\n if data == \"\": # Client exited safely.\n self._logger.info(\"Terminating client session\")\n break\n\n if data == \"state\":\n state = self.server.controllers[\"main\"].get_state()\n reply = pickle.dumps(state)\n\n elif data == \"inputs\":\n state = self.server.controllers[\"main\"].get_state()\n dpad, lstick, rstick, buttons = state.data\n reply = pickle.dumps(((dpad.x, dpad.y),\n (lstick.x, lstick.y),\n (rstick.x, rstick.y),\n buttons.buttons))\n\n elif data == \"speeds\":\n state = self.server.controllers[\"main\"].get_state()\n reply = pickle.dumps(self._get_needed_speeds(state))\n\n elif data.split()[0] == \"echo\":\n reply = \" \".join(data.split()[1:])\n\n elif data.split()[0] == \"print\":\n reply = \" \".join(data.split()[1:])\n self._logger.info('Client says: \"{}\"'.format(reply))\n\n else:\n reply = 'Unable to parse command: \"{}\"'.format(data)\n self._logger.debug(reply)\n\n try:\n self.request.sendall(str.encode(reply))\n except TypeError: # Already bytecode\n self.request.sendall(reply)\n\n # Receive sensor data\n raw_data, address = self._sensors_client.recvfrom(64)\n self._logger.debug(\"{}\".format(pickle.loads(raw_data)))\n\n finally:\n self._sensors_client.close()\n\n def _get_needed_speeds(self, state):\n \"\"\"\n Get required speeds based on controller state and system state.\n\n Inputs handled:\n - L1, L2 : Rotate left flipper.\n - R1, R2 : Rotate right flipper.\n - lstick : x- and y-axes control wheels in single-stick mode;\n y-axis controls left-side wheels in dual-stick mode.\n - rstick : y-axis controls right-side wheels in dual-stick\n mode.\n - L3 : Toggle the control mode between single and dual sticks.\n - R3 : Toggle reverse mode\n\n Parameters\n ----------\n state : State\n Represents the controller states.\n\n Returns\n -------\n float\n The speed inputs for each of the four motors, with values\n between -1 and 1. The four motors are:\n - Left motor\n - Right motor\n - Left flipper\n - Right flipper\n\n \"\"\"\n # TODO(masasin): Handle select : Synchronize flipper positions.\n # TODO(masasin): Handle start : Move flippers to forward position.\n dpad, lstick, rstick, buttons = state.data\n\n if buttons.is_pressed(\"L3\"):\n self._switch_control_mode()\n if buttons.is_pressed(\"R3\"):\n self._engage_reverse_mode()\n\n if self.reverse_mode:\n # Wheels\n if self.wheels_single_stick:\n self._logger.debug(\"lx: {:9.7} \".fromat(lstick.x) +\n \"ly: {:9.7}\".format(lstick.y))\n if abs(lstick.y) == 0: # Rotate in place\n lmotor = -lstick.x\n rmotor = lstick.x\n else:\n l_mult = (1 - lstick.x) / (1 + abs(lstick.x))\n r_mult = (1 + lstick.x) / (1 + abs(lstick.x))\n lmotor = lstick.y * l_mult\n rmotor = lstick.y * r_mult\n else:\n self._logger.debug(\"ly: {:9.7} \".fromat(lstick.y) +\n \"ry: {:9.7}\".format(rstick.y))\n lmotor = rstick.y\n rmotor = lstick.y\n\n # Flippers\n if buttons.all_pressed(\"L1\", \"L2\"):\n rflipper = 0\n elif buttons.is_pressed(\"L1\"):\n rflipper = 1\n elif buttons.is_pressed(\"L2\"):\n rflipper = -1\n else:\n rflipper = 0\n\n if buttons.all_pressed(\"R1\", \"R2\"):\n lflipper = 0\n elif buttons.is_pressed(\"R1\"):\n lflipper = 1\n elif buttons.is_pressed(\"R2\"):\n lflipper = -1\n else:\n lflipper = 0\n\n else: # Forward mode\n # Wheels\n if self.wheels_single_stick:\n self._logger.debug(\"lx: {:9.7} \".format(lstick.x) +\n \"ly: {:9.7}\".format(lstick.y))\n if abs(lstick.y) == 0: # Rotate in place\n lmotor = lstick.x\n rmotor = -lstick.x\n else:\n l_mult = (1 + lstick.x) / (1 + abs(lstick.x))\n r_mult = (1 - lstick.x) / (1 + abs(lstick.x))\n lmotor = -lstick.y * l_mult\n rmotor = -lstick.y * r_mult\n else:\n self._logger.debug(\"ly: {:9.7} \".format(lstick.y) +\n \"ry: {:9.7}\".format(rstick.y))\n lmotor = -lstick.y\n rmotor = -rstick.y\n\n # Flippers\n if buttons.all_pressed(\"L1\", \"L2\"):\n lflipper = 0\n elif buttons.is_pressed(\"L1\"):\n lflipper = 1\n elif buttons.is_pressed(\"L2\"):\n lflipper = -1\n else:\n lflipper = 0\n\n if buttons.all_pressed(\"R1\", \"R2\"):\n rflipper = 0\n elif buttons.is_pressed(\"R1\"):\n rflipper = 1\n elif buttons.is_pressed(\"R2\"):\n rflipper = -1\n else:\n rflipper = 0\n\n return lmotor, -rmotor, -lflipper, rflipper\n\n def _switch_control_mode(self):\n \"\"\"\n Toggle the control mode between single and dual analog sticks.\n\n Ignores the toggle directive if the mode has been switched within the\n last second.\n\n \"\"\"\n current_time = time.time()\n\n if current_time - self._sticks_timestamp >= 1:\n if self.wheels_single_stick:\n self.wheels_single_stick = False\n self._logger.info(\"Control mode switched: Use \" +\n \"lstick and rstick to control robot\")\n else:\n self.wheels_single_stick = True\n self._logger.info(\"Control mode switched: Use \" +\n \"lstick to control robot\")\n self._sticks_timestamp = current_time\n\n def _engage_reverse_mode(self):\n \"\"\"\n Toggle the control mode between forward and reverse.\n\n In reverse mode, the regular inputs will cause the robot to move\n in reverse as if it were moving forward.\n\n Ignores the toggle directive if the mode has been switched within the\n last second.\n\n \"\"\"\n current_time = time.time()\n\n if current_time - self._reverse_timestamp >= 1:\n if self.reverse_mode:\n self.reverse_mode = False\n self._logger.info(\"Reverse mode disabled!\")\n else:\n self.reverse_mode = True\n self._logger.info(\"Reverse mode enabled!\")\n self._reverse_timestamp = current_time\n\n\nclass Server(socketserver.ForkingMixIn, socketserver.TCPServer):\n \"\"\"\n A TCP Server.\n\n Parameters\n ----------\n server_address : 2-tuple of (str, int)\n The address at which the server is listening. The elements are the\n server address and the port number respectively.\n handler_class : Handler\n The request handler. Each new request generates a separate process\n running that handler.\n\n Attributes\n ----------\n controllers : dict\n Contains all registered motors.\n\n **Dictionary format :** {name (str): controller (Controller)}\n\n Examples\n --------\n >>> server = Server((\"192.168.11.1\", 22), Handler)\n >>> server.serve_forever()\n\n \"\"\"\n allow_reuse_address = True # Can resume immediately after shutdown\n\n def __init__(self, server_address, handler_class):\n self._logger = logging.getLogger(\"{}_server\".format(server_address[0]))\n self._logger.debug(\"Creating server\")\n super().__init__(server_address, handler_class)\n self._logger.info(\"Listening to port {}\".format(server_address[1]))\n self.controllers = {}\n\n def serve_forever(self, poll_interval=0.5):\n \"\"\"\n Handle requests until an explicit ``shutdown()`` request.\n\n Parameters\n ----------\n poll_interval : float, optional\n The polling interval, in seconds.\n\n \"\"\"\n self._logger.info(\"Server started\")\n try:\n super().serve_forever(poll_interval)\n except (KeyboardInterrupt, SystemExit):\n pass\n\n def add_controller(self, controller):\n \"\"\"\n Register a controller.\n\n Parameters\n ----------\n controller : Controller\n The controller to be registered.\n\n \"\"\"\n self._logger.debug(\"Adding controller {}\".format(controller))\n self.controllers[controller.name] = controller\n\n def remove_controller(self, controller):\n \"\"\"Deregister a controller.\n\n Parameters\n ----------\n controller : Controller\n The controller to be deregistered.\n\n \"\"\"\n self._logger.debug(\"Removing controller {}\".format(controller))\n del self.controllers[controller.name]\n","sub_path":"operator_station/src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":12621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"462692620","text":"from django.db import models\n\nfrom teams.models import Team\n\n# Create your models here.\nclass IPLSeason(models.Model):\n season = models.IntegerField(default=2020, primary_key=True)\n\n def __str__(self):\n return str(self.season) + ' season'\n\n\nclass Matches(models.Model):\n\n options = (\n ('B', 'BAT'),\n ('F', 'FIELD'),\n )\n\n year = models.ForeignKey(IPLSeason, on_delete=models.CASCADE, default=2020)\n match_no = models.AutoField(primary_key=True)\n team_one = models.ForeignKey(Team, on_delete=models.CASCADE, blank=False, related_name=\"team_one\", default=\"\")\n team_two = models.ForeignKey(Team, on_delete=models.CASCADE, blank=False, related_name=\"team_two\", default=\"\")\n toss = models.ForeignKey(Team, on_delete=models.CASCADE, blank=False, related_name=\"toss\", default=\"\")\n elected = models.CharField(max_length=1, choices=options)\n first_inning_score = models.IntegerField(blank=False)\n first_inning_over = models.FloatField(blank=False)\n second_inning_score = models.IntegerField(blank=False)\n second_inning_over = models.FloatField(blank=False)\n match_won = models.ForeignKey(Team, on_delete=models.CASCADE, related_name=\"match_won\", default=\"\")\n\n def __str__(self):\n return str(self.match_no) + ' ' + str(self.team_one) + ' vs ' + str(self.team_two)","sub_path":"ipl/matches/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"550558049","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\naccs = [\"m32p32\", \"m32p64\", \"m32p96\", \"m64p64\"]\r\n\r\ndef find_res1(acc_name):\r\n fp = f\"res1_{acc_name}.csv\"\r\n data = np.loadtxt(fp, str, delimiter=\",\")\r\n data = data[1:, :]\r\n return data\r\n\r\ndef find_res2( acc_name):\r\n fp = f\"res2_{acc_name}.csv\"\r\n data = np.loadtxt(fp, str, delimiter=\",\")\r\n data = data[1:, :]\r\n return data\r\n\r\ndef find_conv_eff(acc_name):\r\n data = find_res1(acc_name)\r\n layers = list(data[:,0])\r\n effs = list(data[:,6])\r\n effs = [float(it) for it in effs]\r\n conv_layers, conv_effs = [], []\r\n for i in range(len(layers)):\r\n if \"Conv\" in layers[i]:\r\n conv_layers.append(layers[i])\r\n conv_effs.append(effs[i]/100)\r\n return conv_layers, conv_effs\r\n\r\ndef find_conv_throughput(acc_name):\r\n data = find_res1(acc_name)\r\n layers = list(data[:,0])\r\n throughputs = list(data[:,5])\r\n throughputs = [float(it) for it in throughputs]\r\n conv_layers, conv_throughputs = [], []\r\n for i in range(len(layers)):\r\n if \"Conv\" in layers[i]:\r\n conv_layers.append(layers[i])\r\n conv_throughputs.append(throughputs[i])\r\n return conv_layers, conv_throughputs\r\n\r\ndef find_normalized_latency(acc_name):\r\n data = find_res1(acc_name)\r\n layer_types = list(data[:,1])\r\n contribs = list(data[:,3])\r\n contribs = [float(it) for it in contribs]\r\n ret = {\r\n \"Conv\": 0, \"Fc\": 0, \"Pool\": 0, \"Add\": 0\r\n }\r\n for i in range(len(layer_types)):\r\n if \"Conv\" in layer_types[i]:\r\n ret[\"Conv\"] += contribs[i]/100\r\n if \"Pool\" in layer_types[i]:\r\n ret[\"Pool\"] += contribs[i]/100\r\n if \"Add\" in layer_types[i]:\r\n ret[\"Add\"] += contribs[i]/100\r\n if \"Fc\" in layer_types[i]:\r\n ret[\"Fc\"] += contribs[i]/100\r\n return ret\r\n\r\ndef find_conv_eff2():\r\n conv_effs = []\r\n for acc_name in accs:\r\n conv_layers, tmp = find_conv_eff(acc_name)\r\n conv_effs.append(tmp)\r\n return conv_layers, conv_effs\r\n\r\ndef find_conv_throughput2():\r\n conv_throughputs = []\r\n for acc_name in accs:\r\n conv_layers, tmp = find_conv_throughput(acc_name)\r\n conv_throughputs.append(tmp)\r\n return conv_layers, conv_throughputs\r\n\r\ndef find_normalized_latency2():\r\n ret = []\r\n for acc_name in accs:\r\n ret.append(find_normalized_latency(acc_name))\r\n return ret\r\n\r\ndef plot_conv_throughput():\r\n conv_layers, conv_throughputs = find_conv_throughput2()\r\n\r\n colors = ['#FF0000', '#008A00', '#0000FF', '#FF00FF']\r\n markers = [\"D\", \"o\", \"*\", \"d\"]\r\n spine_width = 3\r\n for i in range(len(accs)):\r\n plt.plot(range(len(conv_layers)), conv_throughputs[i], color=colors[i], marker=markers[i], markersize=10, linewidth=3)\r\n for i in range(len(accs)):\r\n plt.plot([0,len(conv_layers)-1], [819.2*(i+1), 819.2*(i+1)], color=colors[i], linestyle=\"-.\", linewidth=3)\r\n plt.ylim((0,3500))\r\n \r\n ax = plt.gca()\r\n ax.xaxis.set_visible(False)\r\n\r\n ax.tick_params(which='both', width=3, length=7, direction='in')\r\n ax.yaxis.set_tick_params(labelsize=20)\r\n plt.yticks([0,500,1000,1500,2000,2500,3000,3500], [\"$\\mathbf{0}$\",\"$\\mathbf{500}$\",\"$\\mathbf{1000}$\",\"$\\mathbf{1500}$\",\"$\\mathbf{2000}$\",\"$\\mathbf{2500}$\",\"$\\mathbf{3000}$\",\"$\\mathbf{3500}$\"])\r\n\r\n ax.spines['top'].set_linewidth(spine_width)\r\n ax.spines['bottom'].set_linewidth(spine_width)\r\n ax.spines['left'].set_linewidth(spine_width)\r\n ax.spines['right'].set_linewidth(spine_width)\r\n \r\n plt.subplots_adjust(right=0.99, top=0.97, bottom=0.03)\r\n \r\n plt.savefig(f\"throughput.png\")\r\n\r\ndef plot_normalized_latency():\r\n normalized_latencys = find_normalized_latency2()\r\n colors = ['c', 'm', 'y', \"#6A00FF\"]\r\n width = 0.75\r\n spine_width = 6\r\n\r\n plt.figure(figsize=(2.4,4.8))\r\n for i in range(len(accs)):\r\n normalized_latency = normalized_latencys[i]\r\n plt.bar([i], normalized_latency[\"Conv\"], bottom=0, color=colors[0], width=width)\r\n plt.bar([i], normalized_latency[\"Fc\"], bottom=normalized_latency[\"Conv\"], color=colors[1], width=width)\r\n plt.bar([i], normalized_latency[\"Pool\"], bottom=normalized_latency[\"Conv\"]+normalized_latency[\"Fc\"], color=colors[2], width=width)\r\n plt.bar([i], normalized_latency[\"Add\"], bottom=normalized_latency[\"Conv\"]+normalized_latency[\"Fc\"]+normalized_latency[\"Pool\"], color=colors[3], width=width)\r\n plt.xlim((-0.5,3.5))\r\n plt.ylim((0,1))\r\n\r\n ax = plt.gca()\r\n ax.yaxis.set_visible(False)\r\n ax.tick_params(which='both', width=6, length=10, direction='in')\r\n ax.xaxis.set_tick_params(labelsize=20)\r\n plt.xticks([0,1,2,3], [\"M32P32\",\"M32P64\", \"M32P96\", \"M64P64\"], rotation=60)\r\n\r\n ax.spines['top'].set_linewidth(0)\r\n ax.spines['bottom'].set_linewidth(spine_width)\r\n ax.spines['left'].set_linewidth(0)\r\n ax.spines['right'].set_linewidth(0)\r\n\r\n plt.subplots_adjust(left=0.03, right=0.97, top=0.97, bottom=0.25)\r\n\r\n plt.savefig(f\"normalized_latency.png\")\r\n\r\nplot_conv_throughput()\r\nplot_normalized_latency()","sub_path":"experimental_results/vgg11/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":5089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"506128978","text":"\nimport os\nimport re\nimport sys\nimport wave\n\nimport numpy\nimport numpy as np\nimport skimage.io # scikit-image\nimport librosa\nimport matplotlib\n\nfrom random import shuffle\nfrom six.moves import urllib\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nDATA_DIR = '../speech/'\npcm_path = \"../speech/\" # 8 bit\nwav_path = \"../speech/\" # 16 bit s16le\npath = pcm_path\nCHUNK = 4096\ntest_fraction=0.1 # 10% of data for test / verification\n\ndef mfcc_batch_generator(batch_size=10,numClass=10):\n batch_features = []\n labels = []\n files = os.listdir(path)\n while True:\n print(\"loaded batch of %d files\" % len(files))\n shuffle(files)\n i=0\n for wav in files:\n print(i)\n i=i+1\n if not wav.endswith(\".wav\"): continue\n wave, sr = librosa.load(path+wav, mono=True) \n #analyse fichier audio et retourne amplitude et frequence \n \n label=dense_to_one_hot(int(wav.split(\"_\")[0]),numClass) \n #label = tenseur correspondant au label du fichier audio\n \n labels.append(label) \n # ajoute le label du fichier au tableau de label\n\n\n mfcc=librosa.feature.mfcc(wave,sr) \n #renvoie un tableau representant le spctre d un son en fonction de son amplitude et de son echantillonage\n\n # print(np.array(mfcc).shape)\n if (len(mfcc[0]) > 80) : continue \n mfcc=np.pad(mfcc,((0,0),(0,80-len(mfcc[0]))), mode='constant', constant_values=0) \n # normaliser la taille des tableaux mfcc\n \n batch_features.append(np.array(mfcc)) \n # ajoute le tableau mfcc correspondant au fichier audio dans le tableau general batch features\n \n if len(batch_features) >= batch_size: \n # quand on a traiter et transformer batch-size fichier : \n # print(np.array(batch_features).shape)\n # yield np.array(batch_features), labels\n yield batch_features, labels \n # met dans le generateur le batch_features et les labels\n \n batch_features = [] \n # Reset for next batch\n \n labels = []\n\n\n\n# If you set dynamic_pad=True when calling tf.train.batch the returned batch will be automatically padded with 0s. Handy! A lower-level option is to use tf.PaddingFIFOQueue.\n# only apply to a subset of all images at one time\n\n\n# multi-label\n\ndef dense_to_one_hot(labels_dense, num_classes=10):\n \"\"\"Convert class labels from scalars to one-hot vectors.\"\"\"\n return numpy.eye(num_classes)[labels_dense]\n\n\n\n\n\n","sub_path":"SpeechRecognition/v1.5/speech_data3.py","file_name":"speech_data3.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"146738543","text":"\"\"\"\r\n-*- coding: utf-8 -*-\r\n\r\nitinerary_class.py\r\n\r\nInit creates a dataframe of itinerary information with a few required columns:\r\n['modern_name', 'day', 'month', 'year'] 'latitude' and 'longitude are also\r\nhelpful, but can also be filled in with a Gazetteer if desired.\r\n\r\nVariables List:\r\n self.itin_df - the pandas dataframe of data read in for the itinerary\r\n self.no_flag - a True/False value used as a gate if there are correct\r\n columns and other attributes in the dataframe\r\n self.error_checks - a List of trings that encode various smaller errors\r\n discovered in the dataframe (missing cells of data, bad date formats,\r\n lookup failures in gazetteer dataframes, etc.) Can be printed with\r\n the \"error_output\" function.\r\n self.latlong - tracks whether the itin_df includes latitude and longitude\r\n data; is a True/False value\r\n self.name - a truncated version of the input filename used for default\r\n output files and error message txt files.\r\n\r\nFunction List:\r\n fuzzy_gaz_name_match(self, gaz_df):\r\n This function adds a column to the itinerary dataframe that labels\r\n each name in the modern_name column with the highest matching ratio\r\n name in the gazetteer.\r\n attribute_lookup(self, gazetteer_dataframe, attributes):\r\n This function takes a separate gazetteer and identifies the named\r\n attribute in the gazetteer for each row in the itinerary. It creates\r\n a new column in the Itinerary with that information, leaving a None\r\n if no entry is found in the Gazetteer.\r\n format_dates(self):\r\n Takes the day, month, and year columns and creates a date(yyyy-mm-dd)\r\n cell in a new column for every row. The new dataframe drops any\r\n NaN rows missing date information\r\n itin_to_gaz(self):\r\n Takes every unique location in the Itinerary and creates a Gazetteer\r\n dataframe for export. If the itinerary includes Lat/Long or geo_ids\r\n these are included in the output dataframe.\r\n itin_to_trips(self, date_style='full_date'):\r\n Separates out all individual trips in the itinerary, ignoring blanks\r\n and repeated locations. The output dataframe has origin and\r\n destination columns for date, name, lat, long, and geo_id. The output\r\n can be done with full dates (which uses format_dates prior to running)\r\n or can be run leaving in day/month/year columns. Using 'months' for\r\n date_style returns only months and days (not recommended for trips).\r\n Using full_date returns formatted dates; using 'all' returns formatted\r\n dates but also maintains the day/month/year columns.\r\n error_output(self, tofile=False, filename=None):\r\n This creates a txt file with all errors accumulated in running the\r\n various functions. It will record specific line errors for problems\r\n arrising from looking up data in a Gazetteer to formating dates. If\r\n tofile remains False, the output will be in the interactive python\r\n session. If filename remains None, a default based on the original\r\n itinerary file name is created.\r\n\r\nFunctions called by main Function List:\r\n def _max_lev(self, itin_name, gaz_df):\r\n Takes a name and a dataframe with a 'modern_name' column and compares\r\n the name to every entry in the modern_name column. It retuns the\r\n best ratio match in the column. If there is an exact match, instead\r\n of the name it enters 'exact match,' if there is no match better than\r\n 50% it enters a None.\r\n _gaz_lookup(self, gaz_df, attribute, row_index, column='modern_name'):\r\n Looks up the desired attribute where the row_index matches the\r\n modern_name column (or other column as desired - modern_name is the\r\n current default)\r\n _date_formater(self, row):\r\n takes the day, month, and year and returns a single date(yyyy-mm-dd)\r\n _trips_date_style(self, date_style):\r\n Determines whether the trips dataframe will be output with fully\r\n formatted dates or only month and year columns. (accepts 'month',\r\n 'full_date', or 'all').\r\n _undated_locations(self):\r\n Records all rows in which there is a location listed without a\r\n complete date. These locations are dropped if date_style is full_date\r\n but will be kept in the 'month' style.\r\n _distance_calc(self, trip_row):\r\n Returns a great circle distance between two pairs of lat/long\r\n coordinates, called as part of the trips dataframe.\r\n _verify_cols(self):\r\n Only checks if all columns needed in other functions exist and have\r\n the proper names - returns an error and prevents other functions\r\n from running if they are not.\r\n\r\nPossible inclusions:\r\n A) The Itinerary Analysis functions might be streamlined and\r\nincluded although currently they are not. The functions themselves are too\r\nlong and based largely on the 'modern_name' row, which is stripped out and\r\nacted on separately - reincorporating that to a pandas DF based class should\r\nbe the next step.\r\n B) Set a general \"reference column\" variable that can be used to match\r\ncolumns with other dataframes - right now this is basically taken by the\r\n'modern_name' column and is often fixed. It should be a transmitted variable\r\nthat can more easily be set in several functions. ('modern_name' is the\r\nreference more than a dozen times...)\r\n\r\n@author: Adam Franklin-Lyons\r\n Marlboro College | Python 3.7\r\n\r\nCreated on Tue May 21 20:27:31 2019\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport datetime as dt\r\nfrom numpy import cos, sin, arcsin, sqrt, radians\r\n# from pyproj import Geod\r\nimport Levenshtein as lev\r\n\r\nclass Itinerary:\r\n\r\n def __init__(self, file_name, latlong=False):\r\n \"\"\"Import a gazetteer file into a Pandas DataFrame\"\"\"\r\n self.itin_df = pd.read_csv(file_name, error_bad_lines=False,\r\n encoding='utf-8-sig')\r\n self.name = file_name.split('.')[0]\r\n self.latlong = latlong\r\n self.no_flag, self.error_checks = self._verify_cols()\r\n\r\n def fuzzy_gaz_name_match(self, gaz_df):\r\n \"\"\"\r\n For each modern_name in the itinerary, this function adds a column\r\n to the itinerary dataframe that has the highest matching ratio\r\n name in the gazetteer.\r\n \"\"\"\r\n values = self.itin_df['modern_name'].notna()\r\n self.itin_df['gaz_match'] = self.itin_df.loc[values,\r\n 'modern_name'].apply(lambda x:\r\n self._max_lev(x, gaz_df))\r\n\r\n def _max_lev(self, itin_name, gaz_df):\r\n \"\"\"\r\n Takes a name and a dataframe with a 'modern_name' column and compares\r\n the name to every entry in the modern_name column. It retuns the\r\n best ratio match in the column. If there is an exact match, instead\r\n of the name it enters 'exact match,' if there is no match better than\r\n 50% it enters a None.\r\n \"\"\"\r\n name_series = gaz_df['modern_name'].apply(str)\r\n lev_series = name_series.apply(lambda x: lev.ratio(x.lower(),\r\n str(itin_name).lower()))\r\n max_lev = lev_series.max()\r\n if max_lev == 1:\r\n name = 'exact match'\r\n elif max_lev > 0.5:\r\n row_num = lev_series.idxmax()\r\n name = gaz_df.loc[row_num, 'modern_name']\r\n else:\r\n name = None\r\n return name\r\n\r\n def attribute_lookup(self, gaz_df, attributes):\r\n \"\"\"\r\n The input gazetteer needs to include a column that has matched names\r\n from the itinerary dataframe. Generally, this will be something like\r\n 'modern_name' or 'geo_id'. The function creates new columns for\r\n latitude and longitude in the itinerary dataframe, searches the\r\n gazetteer list for each element in the itinerary dataset, and inputs\r\n the data for all matches. Columns in the itinerary frame that do not\r\n find a match in the gazetteer are compiled as a list of strings that\r\n note the column and name of the location not found in the gazetteer.\r\n The function returns the modified itinerary dataframe along with the\r\n list of errors.\r\n Place Lat and Long as columns in the itin_frame from gaz_frame\r\n Return modified itin_frame\r\n \"\"\"\r\n blanks = self.itin_df[self.itin_df['modern_name'].isna()].index\r\n message = []\r\n # Attributes can be either string or list - this forces a list.\r\n try:\r\n attributes = attributes.split()\r\n except AttributeError:\r\n attributes = list(attributes)\r\n # Checks that the attributes match column names in the Gazetteer.\r\n for name in attributes:\r\n if name in gaz_df.columns: None\r\n else:\r\n attributes.remove(name)\r\n message.append('The gazetteer used for the attribute lookup'\r\n ' does not contain {}s.'.format(name))\r\n for name in attributes:\r\n message.append('Looking up {} in the gazetteer.'.format(name))\r\n self.itin_df[name] = self.itin_df['modern_name'].apply(\r\n lambda x: self._gaz_lookup(gaz_df, name, x))\r\n # Compiles the errors where the lookup function below failed.\r\n errors = self.itin_df[self.itin_df[name].isna()].index\r\n errors = errors.difference(blanks)\r\n if errors.empty:\r\n message.append('All {}s were found.'.format(name))\r\n else:\r\n message.append('The following {}s were not in the '\r\n 'Gazetteer:'.format(name))\r\n for i in errors:\r\n message.append('Error on line {}; {} \\n'.format(i,\r\n self.itin_df['modern_name'][i]))\r\n # If latitude and longitude looked up correctly, change class variable\r\n if {'latitude','longitude'}.issubset(attributes):\r\n self.latlong = True\r\n self.error_checks += message\r\n print('See the output text file for possibe errors.')\r\n return message\r\n\r\n def _gaz_lookup(self, gaz_df, attribute, row_ref, column='modern_name'):\r\n \"\"\"\r\n This function uses the modern name in the itinerary row (supplied as\r\n 'row_ref'), and finds the gazetteer row that matches that name from\r\n the supplied gaz_df. It then returns the value from the gazetteer\r\n row that matches the 'attribute' column. Missing names or other\r\n index errors return None. This function could theoretically be also\r\n used to match other columns (like geo_id) in the gazetteer dataframe.\r\n \"\"\"\r\n try:\r\n return gaz_df.loc[gaz_df[column]==row_ref, attribute].values[0]\r\n except IndexError:\r\n return None\r\n\r\n def format_dates(self):\r\n \"\"\"\r\n Takes the columns: year, month, day\r\n If all three are present, the function creates a new column 'date'\r\n at the beginning of the dataframe.\r\n The 'date' column is in the datetime type but without times.\r\n All incomplete dates or bad date entries will be left empty and\r\n logged in the \"error_checks\" list.\r\n \"\"\"\r\n self._verify_cols()\r\n message = []\r\n # Structures dates as yyyy-mm-dd from three independent columns.\r\n self.itin_df['dates'] = self.itin_df.apply(lambda x:\r\n self._date_formater(x), axis=1)\r\n date_filter = self.itin_df[['day','month','year']].isna().any(axis=1)\r\n blanks = self.itin_df[date_filter].index\r\n message.append('The following dates are incomplete: \\n{}'.format(\r\n (blanks + 2).tolist()))\r\n blank_dates = self.itin_df.dates.isna()\r\n bad_dates = self.itin_df[blank_dates].index.difference(blanks)\r\n message.append('The following dates contain errors:\\n{}'.format(\r\n (bad_dates + 2).tolist()))\r\n cols = self.itin_df.columns.tolist()\r\n cols = cols[-1:] + cols[:-1]\r\n self.itin_df = self.itin_df[cols]\r\n self.error_checks += message\r\n\r\n def _date_formater(self, row):\r\n \"\"\"\r\n Requires a Series or Dataframe row containing 'day', 'month', and\r\n 'year' in its index or columns and attempts to return a formatted\r\n date type. All entries are coerced to integers. Any failed integer\r\n or bad date (ie: February 30th) returns None for the date.\r\n Example: {year:'1291', month:'8', day:'14'} returns\r\n datetime.date(1291, 8, 14)\r\n \"\"\"\r\n try:\r\n # Forces all values in the three columns to integer for dt.date\r\n row = pd.to_numeric(row[['day','month','year']],\r\n downcast='integer', errors='coerce')\r\n date = dt.date(row.year, row.month, row.day)\r\n return date\r\n # dt.date with NaN instead of numeric values throws a TypeError\r\n except TypeError:\r\n return None\r\n # Days out of range (July 34th, etc) throw a ValueError\r\n except ValueError:\r\n print('{} has a date value out of range.'.format(row.name))\r\n return None\r\n\r\n def itin_to_gaz(self):\r\n \"\"\"\r\n Converts an itinerary into a gazetteer. The function takes all unique\r\n entries in the itinerary, attaches their latitude and longitude (if\r\n present), drops date columns, and creates standard gazetteer columns\r\n (certainty, checked, modern_country). If there are no lat/long\r\n coordinates, the function prints a warning, but does not throw an\r\n error. Returns a pandas DataFrame.\r\n \"\"\"\r\n self._verify_cols()\r\n if not self.no_flag:\r\n print(\"This operation has failed\")\r\n return None\r\n if not self.latlong:\r\n print('Warning: This gazetteer lacks lat-long coordinates')\r\n gaz_x = self.itin_df['modern_name'].drop_duplicates().dropna().index\r\n gaz_df = self.itin_df.loc[gaz_x]\r\n gaz_df.drop(columns=['day','month','year'], inplace=True)\r\n gaz_df.sort_values('modern_name', inplace=True)\r\n cols1 = ['modern_name','modern_country', 'checked', \r\n 'certainty', 'latitude', 'longitude']\r\n if add_code: cols1 += ['itin_code']\r\n drop_col = ['modern_name']\r\n if self.latlong: drop_col += ['latitude', 'longitude']\r\n columns = cols1 + gaz_df.columns.drop(drop_col).tolist()\r\n gaz_df = pd.DataFrame(gaz_df, columns=columns)\r\n if 'dates' in columns: gaz_df.drop(columns='dates', inplace=True)\r\n if add_code:\r\n gaz_df.loc[:,'itin_code'] = itin_code\r\n return gaz_df\r\n\r\n def itin_to_trips(self, date_style='full_date'):\r\n \"\"\"\r\n Outputs a new dataframe with the original itinerary reorganized as\r\n a series of point A to point B trips. This program works best when\r\n there are not a lot of gaps in the data, as large gaps will appear in\r\n the output as single very long trips (if someone is in London on March\r\n 1 and arrives in Caterbury on April 2, this will return a \"trip\"\r\n taking a month from London to Caterbury, whether or not the person was\r\n actually in London for the remainder of that time.) The trips maintain\r\n the name, date, geo_id, latitude, and longitude columns when\r\n available. The csv may include other columns such as notes or\r\n original names or source references, but these will not appear in the\r\n output. The function requires 'latitude' and 'longitude' columns\r\n largely filled in and will stop the function and return an error\r\n message if they are not available. 'date_style' can be 'full_date'\r\n (the default), 'months' or 'all'. 'full_date' only uses entries with\r\n days, months and years available and drops all others. 'all' uses\r\n only full dates, but keeps the other columns. 'month' which keeps\r\n all rows containing a month regardless of the day column.\r\n\r\n Output format -\r\n\r\n csv filw with the following headings:\r\n depart_date (yyyy-mm-dd)\r\n depart_loc (str) - based on modern_name from inputs\r\n depart_id (str)\r\n depart_lat (decimal)\r\n depart_long (decimal)\r\n arrive_date (yyyy-mm-dd)\r\n arrive_loc (str) - same as above\r\n arrive_id (str)\r\n arrive_lat (decimal)\r\n arrive_long (decimal)\r\n travel_days (int) - recorded as number of days\r\n distance (decimal) - the straight line distance in kilometers\r\n between the two points\r\n \"\"\"\r\n self._verify_cols()\r\n # Prevents function from running without latitude and longitude\r\n if (not self.latlong) or self.itin_df.latitude.empty:\r\n print('This Itinerary is lacking coordinates please create a '\r\n '\"latitude\" and \"longitude\" column before proceeding.')\r\n return None\r\n # determines whether to include full dates or day, month, year columns\r\n date_col, ref = self._trips_date_style(date_style)\r\n # If there are no date columns, the function fails...include message?\r\n if not date_col: return None\r\n # The 'ref' only drops columns with missing days rather than months\r\n dated_locs = self.itin_df[date_col[:ref] +\r\n ['modern_name']].notna().all(axis=1)\r\n columns = date_col + ['modern_name','latitude','longitude']\r\n if 'geo_id' in self.itin_df.columns: columns.append('geo_id')\r\n trip_df = self.itin_df[dated_locs].reindex(columns=columns)\r\n # Lines up the database in dated order so trips are contiguous.\r\n trip_df.sort_values(date_col[:ref], kind='mergesort', inplace=True)\r\n # This pair removes all repeated locations to leave trips only.\r\n orig_df = trip_df[trip_df.modern_name.shift(-1)!=trip_df.modern_name]\r\n dest_df = trip_df[trip_df.modern_name.shift(1)!=trip_df.modern_name]\r\n df_lst = [orig_df.reset_index(drop=True),\r\n dest_df.reset_index(drop=True).shift(-1)]\r\n trip_df = pd.concat(df_lst, axis=1).loc[df_lst[0].index[:-1]]\r\n # Relabeling columns for the 'trips' - origin and destination.\r\n origin_cols = ['origin_' + col for col in columns]\r\n dest_cols = ['dest_' + col for col in columns]\r\n trip_df.columns = origin_cols + dest_cols\r\n # months style prioritizes years and months and cannot calculate the\r\n # day vector between origin and destination dates.\r\n if date_style!='months':\r\n trip_df['travel_days'] = (trip_df['dest_dates'] -\r\n trip_df['origin_dates']).dt.days\r\n trip_df['distance'] = trip_df.apply(self._distance_calc, axis=1)\r\n return trip_df\r\n\r\n def _trips_date_style(self, date_style):\r\n \"\"\"\r\n Determines whether the trips dataframe will be output with fully\r\n formatted dates or only month and year columns.\r\n \"\"\"\r\n if date_style=='full_date' or date_style=='all':\r\n # Creates an integrated 'dates' field from the year-month-day.\r\n if 'dates' not in self.itin_df.columns:\r\n self.format_dates()\r\n message = self._undated_locations()\r\n self.error_checks += message\r\n if message:\r\n print('Some locations are missing due to missing dates - '\r\n 'check errors in error_checks or error output file.')\r\n if date_style=='full_date':\r\n date_col = ['dates']\r\n return date_col, 1\r\n elif date_style=='months':\r\n date_col = ['year','month','day']\r\n return date_col, 2\r\n elif date_style=='all':\r\n date_col = ['dates','year','month','day']\r\n return date_col, 1\r\n else:\r\n print('Attribute Error: the dates attribute has an out of '\r\n 'bounds value.')\r\n return None\r\n\r\n def _undated_locations(self):\r\n \"\"\"\r\n Records all rows in which there is a location listed without a\r\n complete date. These locations are dropped from the trips dataframe\r\n but recorded as a list in the errors output.\r\n \"\"\"\r\n message = []\r\n no_names = self.itin_df[self.itin_df['modern_name'].isna()].index\r\n no_dates = self.itin_df[self.itin_df['dates'].isna()].index\r\n # Does not flag locations without a name...obviously.\r\n missing_locs = no_dates.difference(no_names)\r\n places = self.itin_df.loc[missing_locs, 'modern_name'].unique()\r\n if places:\r\n message.append('the following places are listed with no dates:')\r\n for loc in places:\r\n message.append('{}, '.format(loc))\r\n message.append('the locations are in the following rows:')\r\n message.append('{}.'.format((missing_locs + 2).tolist()))\r\n return message\r\n\r\n def _distance_calc(self, trip_row):\r\n \"\"\"\r\n The old version uses a geo-calculator from pyproj to create a great \r\n circle distance given a series, array, or row of a dataframe and \r\n returns the distance:\r\n wgs84_geod = Geod(ellps='WGS84')\r\n lat1, long1 = trip_row.origin_latitude, trip_row.origin_longitude\r\n lat2, long2 = trip_row.dest_latitude, trip_row.dest_longitude\r\n az12,az21,dist = wgs84_geod.inv(long1,lat1,long2,lat2)\r\n kmdist = round((dist / 1000), 1)\r\n \r\n This new version is a direct calculation using the Haversine formula.\r\n \r\n The inputs from the series need to include depart_lat, depart_long,\r\n arrive_lat, and arrive_long. Distance is returned in kilometers\r\n \"\"\"\r\n lat1, long1 = trip_row.origin_latitude, trip_row.origin_longitude\r\n lat2, long2 = trip_row.dest_latitude, trip_row.dest_longitude\r\n long1, lat1, long2, lat2 = map(radians, [long1, lat1, long2, lat2])\r\n dlong = long2 - long1 \r\n dlat = lat2 - lat1 \r\n angle = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlong/2)**2\r\n arc_dist = 2 * arcsin(sqrt(angle)) \r\n km_dist = 6367 * arc_dist\r\n return km_dist\r\n\r\n def error_output(self, tofile=False, filename=None):\r\n \"\"\"\r\n Takes the errors gathered together at any point in the use of the\r\n specific Itinerary and outputs them either as basic print statements\r\n in an interactive session or prints to a txt file. For a txt file\r\n print, enter: 'tofile=True' as an argument. The default file name\r\n is the same as the input itinerary with '_errors.txt' added. For a\r\n different label, use the argument: filename='desired_name'\r\n \"\"\"\r\n output = pd.unique(self.error_checks).tolist()\r\n output.append('Have a nice day!')\r\n if tofile:\r\n if filename:\r\n error_file = filename\r\n else:\r\n error_file = self.name + '_errors.txt'\r\n with open(error_file, 'w') as f:\r\n f.writelines(\"{}\\n\".format(line) for line in output)\r\n else:\r\n for line in output:\r\n print(line)\r\n return None\r\n\r\n def _verify_cols(self):\r\n \"\"\"\r\n Verify checks to make sure there are column names required by other\r\n functions. If latitude ad longitude are present, it sets the class\r\n variable latlong to True which allows other processing. Otherwise,\r\n it returns a message that the itinerary lacks coordinates.\r\n \"\"\"\r\n no_flag = True\r\n message = []\r\n columns = self.itin_df.columns\r\n required_cols = ['modern_name', 'day', 'month', 'year']\r\n for col in required_cols:\r\n if col not in columns:\r\n message.append('\"{}\" does not appear in the itinerary columns'\r\n '\\n Please fix before continuing.'.format(col))\r\n no_flag = False\r\n if not message:\r\n message.append(\"The itinerary has the proper column names.\")\r\n if {'latitude','longitude'}.issubset(columns):\r\n self.latlong = True\r\n else:\r\n self.latlong = False\r\n message.append('Note: This itinerary lacks Lat-Long coordinates.')\r\n # print(message)\r\n return no_flag, message\r\n","sub_path":"Itinerary-Project-Code/itinerary_class.py","file_name":"itinerary_class.py","file_ext":"py","file_size_in_byte":24832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"334865443","text":"#!/usr/bin/env python\n\n\nclass Solution(object):\n def postorderTraversal(self, root):\n if not root: return []\n\n stack, order = [root], []\n prev = curr = None\n while stack:\n curr = stack[-1]\n # traverse down\n if not prev or prev.left == curr or prev.right == curr:\n if curr.left:\n stack.append(curr.left)\n elif curr.right:\n stack.append(curr.right)\n elif curr.left == prev:\n if curr.right:\n stack.append(curr.right)\n else:\n order.append(curr.val)\n stack.pop()\n prev = curr\n","sub_path":"145.Binary_Tree_Postorder_Traversal.py","file_name":"145.Binary_Tree_Postorder_Traversal.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"387027070","text":"from scipy.optimize import fsolve\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator\nimport numpy as np\n\ndef equation_solver(x_1,x_2):\n x_3_pos = (2*x_1-5*x_2+((27*(x_1**2)+2)**(1/2)))/2\n x_3_neg = (2*x_1-5*x_2-((27*(x_1**2)+2)**(1/2)))/2\n return [x_3_pos,x_3_neg]\n\nx = np.linspace(-1, 1, 300)\ny = np.linspace(-1, 1, 300)\n\nX, Y = np.meshgrid(x, y)\nZ = equation_solver(X,Y)[0]\n\n\nfig = plt.figure()\nax = plt.axes(projection=\"3d\")\nax.plot_wireframe(X, Y, Z, color='green')\n\nplt.show()\n\nZ = equation_solver(X,Y)[1]\n\nfig = plt.figure()\nax = plt.axes(projection=\"3d\")\nax.plot_wireframe(X, Y, Z, color='green')\n\nplt.show()\n\n","sub_path":"3.1. fixed.py","file_name":"3.1. fixed.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"128946044","text":"from math import log\nimport numpy as np\nimport tensorflow as tf\nfrom config import LEARNING_RATE\nfrom tensorflow.keras import Input\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.layers import LSTM, Embedding\nfrom preprocessing import clean_text\nfrom tokenizers import Tokenizer \nfrom config import VOCAB_SIZE, MAXLEN\n\n# +++++++++++++++++++++++++++++++++ seq2seq model to refere layers with their names ++++++++++++++++++++++++++++++++\nencoder_inputs = Input(shape=(25,))\nencoder_embedding = Embedding(VOCAB_SIZE, 100, input_length=MAXLEN)\n\ndecoder_embedding = Embedding(VOCAB_SIZE, 100, input_length=MAXLEN)\nencoder_embeddings = encoder_embedding(encoder_inputs)\nencoder_lstm = LSTM(256, return_state=True, kernel_regularizer=l2(0.0000001), activity_regularizer=l2(0.0000001))\nLSTM_outputs, state_h, state_c = encoder_lstm(encoder_embeddings)\n\nencoder_states = [state_h, state_c]\n\ndecoder_inputs = Input(shape=(25,), name='decoder_inputs')\ndecoder_lstm = LSTM(256, return_sequences=True, return_state=True, name='decoder_lstm', kernel_regularizer=l2(0.0000001), activity_regularizer=l2(0.0000001))\ndecoder_embeddings = decoder_embedding(decoder_inputs)\ndecoder_outputs, _, _ = decoder_lstm(decoder_embeddings, initial_state=encoder_states)\n\ndecoder_dense = Dense(5000, activation='softmax', name='decoder_dense')\ndecoder_outputs = decoder_dense(decoder_outputs)\n\nSeq2SeqModel = Model([encoder_inputs, decoder_inputs], decoder_outputs, name='model_encoder_training')\n\n\n# +++++++++++++++++++++++++++++++++ model for predictions +++++++++++++++++++++++++++++++++ \nencoder_model = Model(encoder_inputs, encoder_states)\n\ndecoder_state_input_h = Input(shape=(256,))\ndecoder_state_input_c = Input(shape=(256,))\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\n\n\ndecoder_inputs = Input(shape=(1,))\nembedded = decoder_embedding(decoder_inputs)\ndecoder_outputs, state_h, state_c = decoder_lstm(embedded, initial_state=decoder_states_inputs)\ndecoder_states = [state_h, state_c]\ndecoder_outputs = decoder_dense(decoder_outputs)\ndecoder_model = Model(\n [decoder_inputs] + decoder_states_inputs,\n [decoder_outputs] + decoder_states)\n\n\n# +++++++++++++++++++++++++++++++++ Predict Class +++++++++++++++++++++++++++++++++ \nclass Predict():\n def __init__(self, model, tokenizer):\n self.model = model\n self.tokenizer = tokenizer\n\n def create_response(self, question):\n question = np.expand_dims(self.tokenizer.tokenize_sequence(clean_text(question)), axis=0)\n result = self.predict_sentence(question)\n return result \n\n \n def predict_sentence(self, input_seq):\n with tf.device('/cpu:0'):\n states_value = encoder_model.predict(input_seq)\n\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = self.tokenizer.tokenizer.word_index['']\n output_sentence = []\n\n for _ in range(MAXLEN):\n output_tokens, h, c = decoder_model.predict([target_seq] + states_value)\n idx = np.argmax(output_tokens)\n\n if self.tokenizer.tokenizer.index_word[idx] == '':\n\n break\n\n output_sentence.append(idx)\n target_seq[0, 0] = idx\n states_value = [h, c]\n\n return self.tokenizer.decode_sequence(output_sentence)\n\n","sub_path":"neuralnet/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"228397058","text":"from apps.bot.classes.Consts import Role, Platform\nfrom apps.bot.classes.Exceptions import PWarning\nfrom apps.bot.classes.common.CommonCommand import CommonCommand\n\n\nclass Conference(CommonCommand):\n name = \"конфа\"\n names = [\"конференция\", \"беседа\"]\n help_text = \"назвать конфу\"\n help_texts = [\"(название конфы) - называет конфу\"]\n conversation = True\n priority = 90\n platforms = [Platform.VK, Platform.TG]\n\n def accept(self, event):\n if event.chat and (event.chat.name is None or event.chat.name == \"\"):\n return True\n return super().accept(event)\n\n def start(self):\n if self.event.command not in self.full_names:\n raise PWarning(\"Не задано имя конфы, задайте его командой /конфа (название конфы)\")\n if self.event.args:\n try:\n self.check_sender(Role.CONFERENCE_ADMIN)\n same_chats = self.bot.chat_model.filter(name=self.event.original_args)\n if len(same_chats) > 0:\n raise PWarning(\"Конфа с таким названием уже есть. Придумайте другое\")\n self.event.chat.name = self.event.original_args\n self.event.chat.save()\n return f\"Поменял название беседы на {self.event.original_args}\"\n except PWarning as e:\n if self.event.chat.admin is None:\n msg = \"Так как администратора конфы не было, то теперь вы стали администратором конфы!\"\n self.event.chat.admin = self.event.sender\n same_chats = self.bot.chat_model.filter(name=self.event.original_args)\n if len(same_chats) > 0:\n msg += \"\\nКонфа с таким названием уже есть. Придумайте другое\"\n return msg\n self.event.chat.name = self.event.original_args\n self.event.chat.save()\n msg += f\"\\nПоменял название беседы на {self.event.original_args}\"\n return msg\n else:\n return str(e)\n\n else:\n if self.event.chat.name and self.event.chat.name != \"\":\n return f\"Название конфы - {self.event.chat.name}\"\n else:\n raise PWarning(\"Конфа не имеет названия\")\n","sub_path":"apps/bot/commands/Conference.py","file_name":"Conference.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"514746823","text":"# Copyright 2020 The Private Cardinality Estimation Framework Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Plot utilities for visualizing cardinality estimation and errors.\"\"\"\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef boxplot_relative_error(df, num_sets, relative_error):\n \"\"\"Boxplot for relative error by number of sets.\n\n Args:\n df: a pd.DataFrame that has columns of the number of sets and the relative\n error from one or more runs, specified by num_sets and relative_error.\n num_sets: a column name in df specifying the number of sets.\n relative_error: a column name in df specifying the relative_error.\n\n Returns:\n A matplotlib.axes.Axes object of boxplot.\n \"\"\"\n if not set([num_sets, relative_error]).issubset(df.columns):\n raise ValueError('num_sets or relative_error not found in df.')\n _, ax = plt.subplots()\n sns.boxplot(x=num_sets, y=relative_error, data=df, ax=ax)\n ax.plot(ax.get_xlim(), (0, 0), '--m')\n ax.set_ylabel('Relative error')\n ax.set_xlabel('Number of sets')\n return ax\n","sub_path":"src/common/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"215067276","text":"# File: main.py\n# Date: 21-Dec-20\n# HamzaKhairy/python-tic-tac-toe\n# This program is a simple text-based tic-tac-toe game. Playable in both single player or multiplayer mode.\n\n# Import the random module & Initialize variables.\nimport random\nCurrentPlayer = \"X\"\nAgainChoice = \" \"\nPlayer1, Player2 = \"X\", \"O\"\nGamesPlayed, position, playcount = 0, 0, 0\nPlayer1Score, Player2Score, TieGames = 0, 0, 0\nValid, PlayAgain, FirstPlay = True, True, True\nGameEnd = False\n\n# Initialize the empty board, index 0 is empty as references to the list range from 1 to 9.\nBoard = [\"\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\"]\n# 10 motivational phrases to be displayed at random in single player mode.\nPhrases = [\"Nice move!\", \"Smart\", \"Good job\", \"Nice!\", \"good move\", \"Smart move!\", \"Good one\", \"Well played\",\n \"clever\", \"\"]\n\n\n# Game introduction & mode selection (single player or multiplayer).\ndef intro():\n global Player1, Player2\n print(\"\\n ~~ Welcome to Hamza's tic-tac-toe. Have fun! ~~ \\n\")\n\n print(\"The grid is organized as shown below, you will enter the respective number to make a move. \\n\")\n\n print(\"1 | 2 | 3\\n--+---+--\")\n print(\"4 | 5 | 6\\n--+---+--\")\n print(\"7 | 8 | 9\\n\")\n\n Player1 = input(\"Enter Player 1's name (X): \")\n Player2 = input(\"\\n* Type 'pc' for single player * \\nEnter Player 2's name (O): \")\n print(\"\\nFirst game starts now, good luck!\\n\")\n\n\ndef display_board():\n print(\"\\n\" + Board[1] + \" | \" + Board[2] + \" | \" + Board[3] + \"\\n--+---+--\")\n print(Board[4] + \" | \" + Board[5] + \" | \" + Board[6] + \"\\n--+---+--\")\n print(Board[7] + \" | \" + Board[8] + \" | \" + Board[9] + \"\\n\")\n\n\n# Handles the pc's move for single player mode.\ndef pc_move():\n global position\n while Board[position] != \"-\":\n position = int(random.randint(1, 9))\n Board[position] = \"O\"\n\n\ndef turn_flip():\n global CurrentPlayer\n if CurrentPlayer == \"X\":\n CurrentPlayer = \"O\"\n else:\n CurrentPlayer = \"X\"\n\n\n# Checks for winning conditions after the 4th move.\ndef check_game_status():\n if playcount > 3:\n global GameEnd, TieGames\n if (\"-\" != Board[1] == Board[2] == Board[3]) or (\"-\" != Board[4] == Board[5] == Board[6]) \\\n or (\"-\" != Board[7] == Board[8] == Board[9]):\n if (CurrentPlayer == \"O\") and (Player2 == \"pc\"):\n print(CurrentPlayer, \"has made a horizontal win. You lose :(\")\n else:\n print(CurrentPlayer, \"has filled a horizontal row. Congratulations, you won!\")\n update_score()\n\n elif (\"-\" != Board[1] == Board[4] == Board[7]) or (\"-\" != Board[2] == Board[5] == Board[8]) \\\n or (\"-\" != Board[3] == Board[6] == Board[9]):\n if (CurrentPlayer == \"O\") and (Player2 == \"pc\"):\n print(CurrentPlayer, \"has made a vertical win. You lose :(\")\n else:\n print(CurrentPlayer, \"has filled a vertical column. Congratulations, you won!\")\n update_score()\n\n elif (\"-\" != Board[1] == Board[5] == Board[9]) or (\"-\" != Board[7] == Board[5] == Board[3]):\n if (CurrentPlayer == \"O\") and (Player2 == \"pc\"):\n print(CurrentPlayer, \"has made a diagonal win. You lose :(\")\n else:\n print(CurrentPlayer, \" has filled a diagonal line. Congratulations, you won!\")\n update_score()\n\n elif \"-\" not in Board:\n print(\" It's a tie!\")\n TieGames += 1\n GameEnd = True\n\n\n# Checks if the position input by either player (or the pc) is valid.\ndef validate_position():\n global position\n while (position > 9) or (position < 1):\n position = int(input(\"Invalid! Please input an integer from 1 to 9: \"))\n while Board[position] != \"-\":\n position = int(input(\"Occupied! Please select an empty position: \"))\n Board[position] = CurrentPlayer\n\n\n# Handles the plays that happen each turn for both game modes.\ndef turns():\n global FirstPlay, CurrentPlayer, position, playcount\n if FirstPlay:\n if (CurrentPlayer == \"O\") and (Player2 == \"pc\"):\n print(\"O goes first\")\n pc_move()\n else:\n position = input(CurrentPlayer + \" goes first: \")\n position = int(position)\n validate_position()\n display_board()\n FirstPlay = False\n turn_flip()\n\n if (CurrentPlayer == \"O\") and (Player2 == \"pc\"):\n if Player2 == \"pc\":\n print(Phrases[random.randint(0, 9)], \"\\n\")\n print(\"O's play:\")\n pc_move()\n else:\n position = input(CurrentPlayer + \"'s turn: \")\n position = int(position)\n validate_position()\n playcount += 1\n print(playcount)\n display_board()\n check_game_status()\n\n\ndef display_scoreboard():\n global GamesPlayed\n GamesPlayed = Player1Score + Player2Score + TieGames\n print(\"------------------------- \\n ~ Scoreboard ~\")\n print(\"|\", Player1, \":\", Player1Score, \"||\", Player2, \":\", Player2Score, \"|\")\n print(\"Games Played:\", GamesPlayed, \"| Tied Games:\", TieGames, \"\\n------\")\n\n\ndef update_score():\n global GameEnd\n global Player1Score, Player2Score\n if CurrentPlayer == \"X\":\n Player1Score += 1\n elif CurrentPlayer == \"O\":\n Player2Score += 1\n GameEnd = True\n\n\n# Asks the user if they want to play again ('Y' starts a new game while any other string exits).\ndef new_game():\n global AgainChoice, PlayAgain, FirstPlay, Board, GameEnd\n AgainChoice = input(\"Type 'Y' to play again, or anything else to exit: \")\n if AgainChoice.upper() == \"Y\":\n Board = [\"\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\"]\n GameEnd, FirstPlay = False, True\n # Makes the winning player (the one who moved last) start first in the new game.\n turn_flip()\n\n if Player2 != \"pc\":\n print(\"\\nGame\", (GamesPlayed + 1), \"starting now. Good luck \" + Player1 + \" and \" + Player2 + \"!\\n\")\n else:\n print(\"\\nGame\", (GamesPlayed + 1), \"starting now. Good luck \" + Player1 + \"!\\n\")\n else:\n PlayAgain = False\n print(\"Goodbye! I hope you enjoyed.\")\n\n\nintro()\nwhile PlayAgain:\n display_board()\n while not GameEnd:\n turns()\n turn_flip()\n display_scoreboard()\n new_game()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"484649145","text":"# -*- coding: utf-8 -*-\n\nimport logging\n\nimport numpy as np\n\nfrom scilpy.image.utils import volume_iterator\nfrom scilpy.gradientsampling.save_gradient_sampling import (save_gradient_sampling_fsl,\n save_gradient_sampling_mrtrix)\n\nDEFAULT_B0_THRESHOLD = 20\n\n\ndef is_normalized_bvecs(bvecs):\n \"\"\"\n Check if b-vectors are normalized.\n\n Parameters\n ----------\n bvecs : (N, 3) array\n input b-vectors (N, 3) array\n\n Returns\n -------\n True/False\n \"\"\"\n\n bvecs_norm = np.linalg.norm(bvecs, axis=1)\n return np.all(np.logical_or(np.abs(bvecs_norm - 1) < 1e-3,\n bvecs_norm == 0))\n\n\ndef normalize_bvecs(bvecs, filename=None):\n \"\"\"\n Normalize b-vectors\n\n Parameters\n ----------\n bvecs : (N, 3) array\n input b-vectors (N, 3) array\n filename : string\n output filename where to save the normalized bvecs\n\n Returns\n -------\n bvecs : (N, 3)\n normalized b-vectors\n \"\"\"\n\n bvecs_norm = np.linalg.norm(bvecs, axis=1)\n idx = bvecs_norm != 0\n bvecs[idx] /= bvecs_norm[idx, None]\n\n if filename is not None:\n logging.info('Saving new bvecs: {}'.format(filename))\n np.savetxt(filename, np.transpose(bvecs), \"%.8f\")\n\n return bvecs\n\n\ndef check_b0_threshold(force_b0_threshold, bvals_min):\n \"\"\"Check if the minimal bvalue is under zero or over the default threshold.\n If `force_b0_threshold` is true, don't raise an error even if the minimum\n bvalue is suspiciously high.\n\n Parameters\n ----------\n force_b0_threshold : bool\n If True, don't raise an error.\n bvals_min : float\n Minimum bvalue.\n\n Raises\n ------\n ValueError\n If the minimal bvalue is under zero or over the default threshold, and\n `force_b0_threshold` is False.\n \"\"\"\n if bvals_min != 0:\n if bvals_min < 0 or bvals_min > DEFAULT_B0_THRESHOLD:\n if force_b0_threshold:\n logging.warning(\n 'Warning: Your minimal bval is {}. This is highly '\n 'suspicious. The script will nonetheless proceed since '\n '--force_b0_threshold was specified.'.format(bvals_min))\n else:\n raise ValueError('The minimal bval is lesser than 0 or '\n 'greater than {}. This is highly ' +\n 'suspicious.\\n'\n 'Please check your data to ensure everything '\n 'is correct.\\n'\n 'Value found: {}\\n'\n 'Use --force_b0_threshold to execute '\n 'regardless.'\n .format(DEFAULT_B0_THRESHOLD, bvals_min))\n else:\n logging.warning('Warning: No b=0 image. Setting b0_threshold to '\n 'the minimum bval: {}'.format(bvals_min))\n\n\ndef get_shell_indices(bvals, shell, tol=10):\n \"\"\"\n Get shell indices\n\n Parameters\n ----------\n bvals: array (N,)\n array of bvals\n shell: list\n list of bvals\n tol: int\n tolerance to accept a bval\n\n Returns\n -------\n numpy.ndarray where shells are found\n \"\"\"\n\n return np.where(\n np.logical_and(bvals < shell + tol, bvals > shell - tol))[0]\n\n\ndef fsl2mrtrix(fsl_bval_filename, fsl_bvec_filename, mrtrix_filename):\n \"\"\"\n Convert a fsl dir_grad.bvec/.bval files to mrtrix encoding.b file.\n\n Parameters\n ----------\n fsl_bval_filename: str\n path to input fsl bval file.\n fsl_bvec_filename: str\n path to input fsl bvec file.\n mrtrix_filename : str\n path to output mrtrix encoding.b file.\n\n Returns\n -------\n \"\"\"\n\n shells = np.loadtxt(fsl_bval_filename)\n points = np.loadtxt(fsl_bvec_filename)\n bvals = np.unique(shells).tolist()\n\n if not points.shape[0] == 3:\n points = points.transpose()\n logging.warning('WARNING: Your bvecs seem transposed. ' +\n 'Transposing them.')\n\n shell_idx = [int(np.where(bval == bvals)[0]) for bval in shells]\n save_gradient_sampling_mrtrix(points,\n shell_idx,\n bvals,\n mrtrix_filename)\n\n\ndef mrtrix2fsl(mrtrix_filename, fsl_bval_filename=None,\n fsl_bvec_filename=None):\n \"\"\"\n Convert a mrtrix encoding.b file to fsl dir_grad.bvec/.bval files.\n\n Parameters\n ----------\n mrtrix_filename : str\n path to mrtrix encoding.b file.\n fsl_bval_filename: str\n path to the output fsl bval file. Default is\n mrtrix_filename.bval.\n fsl_bvec_filename: str\n path to the output fsl bvec file. Default is\n mrtrix_filename.bvec.\n Returns\n -------\n \"\"\"\n\n mrtrix_b = np.loadtxt(mrtrix_filename)\n if not len(mrtrix_b.shape) == 2 or not mrtrix_b.shape[1] == 4:\n raise ValueError('mrtrix file must have 4 columns')\n\n points = np.array([mrtrix_b[:, 0], mrtrix_b[:, 1], mrtrix_b[:, 2]])\n shells = np.array(mrtrix_b[:, 3])\n\n bvals = np.unique(shells).tolist()\n shell_idx = [int(np.where(bval == bvals)[0]) for bval in shells]\n\n save_gradient_sampling_fsl(points,\n shell_idx,\n bvals,\n filename_bval=fsl_bval_filename,\n filename_bvec=fsl_bvec_filename)\n\n\ndef identify_shells(bvals, threshold=40.0):\n \"\"\"\n Guessing the shells from the b-values. Returns the list of shells and, for\n each b-value, the associated shell.\n\n Starting from the first shell as holding the first b-value in bvals,\n the next b-value is considered on the same shell if it is closer than\n threshold, or else we consider that it is on another shell. This is an\n alternative to K-means considering we don't already know the number of\n shells K.\n\n Note. This function should be added in Dipy soon.\n\n Parameters\n ----------\n bvals: array (N,)\n Array of bvals\n threshold: float\n Limit value to consider that a b-value is on an existing shell. Above\n this limit, the b-value is placed on a new shell.\n remove_b0\n\n Returns\n -------\n centroids: array (K)\n Array of centroids. Each centroid is a b-value representing the shell.\n K is the number of identified shells.\n shell_indices: array (N,)\n For each bval, the associated centroid K.\n \"\"\"\n if len(bvals) == 0:\n raise ValueError('Empty b-values.')\n\n # Finding centroids\n bval_centroids = [bvals[0]]\n for bval in bvals[1:]:\n diffs = np.abs(np.asarray(bval_centroids) - bval)\n if not len(np.where(diffs < threshold)[0]):\n # Found no bval in bval centroids close enough to the current one.\n # Create new centroid (i.e. new shell)\n bval_centroids.append(bval)\n centroids = np.array(bval_centroids)\n\n # Identifying shells\n bvals_for_diffs = np.tile(bvals.reshape(bvals.shape[0], 1),\n (1, centroids.shape[0]))\n\n shell_indices = np.argmin(np.abs(bvals_for_diffs - centroids), axis=1)\n\n return centroids, shell_indices\n\n\ndef extract_dwi_shell(dwi, bvals, bvecs, bvals_to_extract, tol=20,\n block_size=None):\n \"\"\"Extracts the DWI volumes that are on specific b-value shells. Many\n shells can be extracted at once by specifying multiple b-values. The\n extracted volumes are in the same order as in the original file.\n\n If the b-values of a shell are not all identical, use the --tolerance\n argument to adjust the accepted interval. For example, a b-value of 2000\n and a tolerance of 20 will extract all volumes with a b-values from 1980 to\n 2020.\n\n Files that are too large to be loaded in memory can still be processed by\n setting the --block-size argument. A block size of X means that X DWI\n volumes are loaded at a time for processing.\n\n Parameters\n ----------\n dwi : nib.Nifti1Image\n Original multi-shell volume.\n bvals : ndarray\n The b-values in FSL format.\n bvecs : ndarray\n The b-vectors in FSL format.\n bvals_to_extract : list of int\n The list of b-values to extract.\n tol : int\n Loads the data using this block size. Useful when the data is too\n large to be loaded in memory.\n block_size : int\n The tolerated gap between the b-values to extract and the actual\n b-values.\n\n Returns\n -------\n indices : ndarray\n Indices of the volumes corresponding to the provided b-values.\n shell_data : ndarray\n Volumes corresponding to the provided b-values.\n output_bvals : ndarray\n Selected b-values.\n output_bvecs : ndarray\n Selected b-vectors.\n\n \"\"\"\n indices = [get_shell_indices(bvals, shell, tol=tol)\n for shell in bvals_to_extract]\n indices = np.unique(np.sort(np.hstack(indices)))\n\n if len(indices) == 0:\n raise ValueError(\"There are no volumes that have the supplied b-values\"\n \": {}\".format(bvals_to_extract))\n\n logging.info(\n \"Extracting shells [{}], with number of images per shell [{}], \"\n \"from {} images from {}.\"\n .format(\" \".join([str(b) for b in bvals_to_extract]),\n \" \".join([str(len(get_shell_indices(bvals, shell)))\n for shell in bvals_to_extract]),\n len(bvals), dwi.get_filename()))\n\n if block_size is None:\n block_size = dwi.shape[-1]\n\n # Load the shells by iterating through blocks of volumes. This approach\n # is slower for small files, but allows very big files to be split\n # with less memory usage.\n shell_data = np.zeros((dwi.shape[:-1] + (len(indices),)))\n for vi, data in volume_iterator(dwi, block_size):\n in_volume = np.array([i in vi for i in indices])\n in_data = np.array([i in indices for i in vi])\n shell_data[..., in_volume] = data[..., in_data]\n\n output_bvals = bvals[indices].astype(int)\n output_bvals.shape = (1, len(output_bvals))\n output_bvecs = bvecs[indices, :]\n\n return indices, shell_data, output_bvals, output_bvecs\n\n\ndef flip_mrtrix_gradient_sampling(gradient_sampling_filename,\n gradient_sampling_flipped_filename, axes):\n \"\"\"\n Flip Mrtrix gradient sampling on a axis\n\n Parameters\n ----------\n gradient_sampling_filename: str\n Gradient sampling filename\n gradient_sampling_flipped_filename: str\n Gradient sampling flipped filename\n axes: list of int\n List of axes to flip (e.g. [0, 1])\n \"\"\"\n gradient_sampling = np.loadtxt(gradient_sampling_filename)\n for axis in axes:\n gradient_sampling[:, axis] *= -1\n\n np.savetxt(gradient_sampling_flipped_filename,\n gradient_sampling,\n \"%.8f %.8f %.8f %0.6f\")\n\n\ndef flip_fsl_gradient_sampling(bvecs_filename, bvecs_flipped_filename, axes):\n \"\"\"\n Flip FSL bvecs on a axis\n\n Parameters\n ----------\n bvecs_filename: str\n Bvecs filename\n bvecs_flipped_filename: str\n Bvecs flipped filename\n axes: list of int\n List of axes to flip (e.g. [0, 1])\n \"\"\"\n bvecs = np.loadtxt(bvecs_filename)\n for axis in axes:\n bvecs[axis, :] *= -1\n\n np.savetxt(bvecs_flipped_filename, bvecs, \"%.8f\")\n","sub_path":"scilpy/utils/bvec_bval_tools.py","file_name":"bvec_bval_tools.py","file_ext":"py","file_size_in_byte":11512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"359024559","text":"def binary(value, sorted_list):\n \"\"\"\n takes a value and a sorted list and searches the list \n for the value using the classic binary search algorithm.\n Returns the position of the value in the list if found else\n -1\n \"\"\"\n start_index = 0\n end_index = len(sorted_list) - 1\n\n while(start_index <= end_index):\n mid_index = start_index + (end_index - start_index) / 2\n if value < sorted_list[mid_index]:\n end_index = mid_index - 1\n elif value > sorted_list[mid_index]:\n start_index = mid_index + 1\n else:\n return mid_index\n return -1\n","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"84975063","text":"#\n# Copyright (c) 2015, Prometheus Research, LLC\n#\n\n\nfrom contextlib import contextmanager\n\nfrom rex.core import Extension\n\nfrom ..util import global_scope\nfrom .calculationmethod import CalculationMethod\n\n\n__all__ = (\n 'CalculationScopeAddon',\n 'global_calculation_scope',\n)\n\n\nclass CalculationScopeAddon(Extension):\n \"\"\"\n An extension that allows a developer to expand the scope of values and/or\n functions that are available in the context of a CalculationSet\n calculation.\n \"\"\"\n\n #: The name in the calculation scope that the value will be assigned to.\n name = None\n\n #: A list containing the CalculationMethod names that this scope addon can\n #: be applied to.\n allowed_methods = []\n\n @classmethod\n def enabled(cls):\n return cls is not CalculationScopeAddon \\\n and cls.name is not None \\\n and len(cls.allowed_methods) > 0\n\n @classmethod\n def sanitize(cls):\n if cls.__name__ != 'CalculationScopeAddon':\n assert \\\n cls.get_scope_value != CalculationScopeAddon.get_scope_value, \\\n 'abstract method %s.get_scope_value()' % cls\n\n @classmethod\n def signature(cls): # pragma: no cover\n return cls.name\n\n @classmethod\n def get_addon_scope(cls, method, assessment):\n \"\"\"\n Returns a dictionary containing the custom values to add to the scope\n of a calculation.\n\n :param method: the name of the CalculationMethod that is being executed\n :type method: string\n :param assessment:\n the Assessment that the calculation is being executed on\n :type assessment: Assessment\n :rtype: dict\n \"\"\"\n\n scope = {}\n for addon in cls.all():\n if method in addon.allowed_methods:\n scope[addon.name] = addon.get_scope_value(assessment)\n return scope\n\n @classmethod\n def get_all_addon_scopes(cls, assessment):\n \"\"\"\n Returns a dictionary containing the custom values to add to the scope\n of a calculation, organized by the calculation method they're\n applicable to.\n\n :param assessment:\n the Assessment that the calculation is being executed on\n :type assessment: Assessment\n :rtype: dict of dicts\n \"\"\"\n\n scopes = {}\n for method in CalculationMethod.mapped():\n scopes[method] = cls.get_addon_scope(method, assessment)\n return scopes\n\n @classmethod\n def get_scope_value(cls, assessment):\n \"\"\"\n Returns the value to assign to this scope's name.\n\n Must be implemented by concrete classes.\n\n :param assessment:\n the Assessment that the calculation is being executed on\n :type assessment: Assessment\n \"\"\"\n\n raise NotImplementedError()\n\n\n@contextmanager\ndef global_calculation_scope(assessment):\n \"\"\"\n A context manager that will Initialize all defined CalculationScopeAddons\n and inject those variables into the global Python scope.\n\n :param assessment: the Assessment to create the calculation scope for\n :type assessment: Assessment\n \"\"\"\n\n scope_additions = CalculationScopeAddon.get_addon_scope(\n method='python',\n assessment=assessment,\n )\n\n with global_scope(scope_additions):\n yield\n\n","sub_path":"src/rex.instrument/src/rex/instrument/interface/calculationscope.py","file_name":"calculationscope.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"439224105","text":"import re, sys\nfrom lxml import etree\nimport ConfigParser\n\nsettings = ConfigParser.ConfigParser()\nsettings.read('assessment.conf')\n\ndef evaluateReport(report_file):\n alerts = []\n report = etree.parse(report_file)\n find_text = etree.XPath(\"/SECANT/SSH_AUTH_TEST/text()\")\n\n try:\n ssh_test_result = find_text(report)[0]\n except (ValueError,IndexError):\n return alerts\n\n regex = re.search('is\\sallowed', ssh_test_result)\n if regex:\n alerts.append(ssh_test_result)\n return alerts","sub_path":"external_tests/ssh_authentication_test/ssh_authentication_test.py","file_name":"ssh_authentication_test.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"374083574","text":"#!/usr/bin/python \n\n#\n#\tfunctions defied by user\n#\ndef isReal(txt):\n try:\n float(txt)\n return True\n except ValueError:\n return False\n\n#\n#\timporting python libraries\n#\nimport sys\nfrom string import *\n\n#\n# reading lammps data (input)\n#\nfi=sys.stdin\n\natom_list = []\natmflg=0\n\nfor line in fi.readlines():\n# separating numbers with single space if needed\n if line != \"\\n\":\n ret=line.find(\"-\")\n if ret>=0:\n eret=line.find(\"e-\")\n if eret<0:\n line=line.replace(\"-\",\" -\")\n# find number of atoms\n ret=line.find(\"atoms\")\n if ret>=0:\n split=str.split(line)\n anumber=split[0]\n# box range (x)\n ret=line.find(\"xlo\")\n if ret>=0:\n split=str.split(line)\n xlo=float(split[0])\n xhi=float(split[1])\n# box range (y)\n ret=line.find(\"ylo\")\n if ret>=0:\n split=str.split(line)\n ylo=float(split[0])\n yhi=float(split[1])\n# box range (z)\n ret=line.find(\"zlo\")\n if ret>=0:\n split=str.split(line)\n zlo=float(split[0])\n zhi=float(split[1])\n# find coordinates of atoms\n ret=line.find(\"Atoms\")\n if ret>=0:\n atmflg=1\n if atmflg==1:\n split=str.split(line)\n#-----------> start\n#\n# REMARK: this part needs to be modified with your lammps setting.\n#\n if len(split)>5: # 2011-04-17\n if split[1]==\"1\":\n atomic_weight=\"234\"\n atomic_name=\"U\"\n elif split[1]==\"2\":\n atomic_weight=\"016\"\n atomic_name=\"O\"\n elif split[1]==\"3\":\n atomic_weight=\"050\"\n atomic_name=\"Cu\"\n elif split[1]==\"4\":\n atomic_weight=\"010\"\n atomic_name=\"Bo\"\n else: # 2011-04-17\n atomic_weight=\"000\" # 2011-04-17\n atomic_name=\"?\" # 2011-04-17\n#-----------> end\n# the box is supposed to be a rectangular parallelepiped.\n x=(float(split[3])-xlo)/(xhi-xlo) # 2011-04-17\n y=(float(split[4])-ylo)/(yhi-ylo) # 2011-04-17\n z=(float(split[5])-zlo)/(zhi-zlo) # 2011-04-17\n el=[atomic_weight,atomic_name,str(x),str(y),str(z),\"0\",\"0\",\"0\"]\n atom_list.append(el)\n \n#\n# printing cfg file (output) - for atomeye\n#\nfw=sys.stdout\n\nfw.write(\"Number of particles = \"+anumber+\" \\n\")\nfw.write(\"#\\n\")\nfw.write(\"\\n\")\nfw.write(\"A = 1.0 Angstrom\\n\")\nfw.write(\"#\\n\")\nfw.write(\"\\n\")\nfw.write(\" H0(1,1) = \"+str(xhi-xlo)+\" A\\n\") # 2011-04-17\nfw.write(\" H0(1,2) = 0 A\\n\")\nfw.write(\" H0(1,3) = 0 A\\n\")\nfw.write(\"#\\n\")\nfw.write(\"\\n\")\nfw.write(\" H0(2,1) = 0 A\\n\")\nfw.write(\" H0(2,2) = \"+str(yhi-ylo)+\" A\\n\") # 2011-04-17\nfw.write(\" H0(2,3) = 0 A\\n\")\nfw.write(\"#\\n\")\nfw.write(\"\\n\")\nfw.write(\" H0(3,1) = 0 A\\n\")\nfw.write(\" H0(3,2) = 0 A\\n\")\nfw.write(\" H0(3,3) = \"+str(zhi-zlo)+\" A\\n\") # 2011-04-17\nfw.write(\"#\\n\")\nfw.write(\"\\n\")\nfw.write(\"eta(1,1) = 0\\n\")\nfw.write(\"eta(1,2) = 0\\n\")\nfw.write(\"eta(1,3) = 0\\n\")\nfw.write(\"eta(2,2) = 0\\n\")\nfw.write(\"eta(2,3) = 0\\n\")\nfw.write(\"eta(3,3) = 0\\n\")\nfw.write(\"#\\n\")\nfw.write(\"\\n\")\nfw.write(\"#\\n\")\nfw.write(\"#\\n\")\nfw.write(\"#\\n\")\nfw.write(\"\\n\")\nfw.write(\"#\\n\")\nfw.write(\"#\\n\")\nfw.write(\"#\\n\")\nfw.write(\"#\\n\")\nfw.write(\"\\n\")\nfw.write(\"#\\n\")\nfw.write(\"#\\n\")\nfw.write(\"#\\n\")\nfw.write(\"\\n\")\n\n#printing atoms coordinates\nfor i in range(0, len(atom_list)):\n el=atom_list[i]\n s=join((el[0],el[1],el[2],el[3],el[4],el[5],el[6],el[7],\"\\n\"))\n fw.write(s)","sub_path":"dat2cfg.py","file_name":"dat2cfg.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"232561118","text":"#!/usr/bin/env python\n\n__author__ = 'vinayakkarnataki'\n\nimport tdclient\nimport sys, os, argparse\nimport Functions\n\nif __name__ == '__main__':\n apikey = os.getenv(\"TD_API_KEY\")\n\n # the stdout can be directed to a file by uncommenting the next line\n #sys.stdout = open('stdout_query.txt', 'w')\n\n if (len(sys.argv) < 5):\n print(\"Usage: %s -d database -t table\" % sys.argv[0])\n print(\"Insufficient arguments provided. Exiting the program\")\n sys.exit(1)\n\n try:\n parser = Functions.createParser(argparse.ArgumentParser())\n args = parser.parse_args()\n except:\n print(\"Unrecongnized parameter passed. Please check the usage\")\n sys.exit(1)\n\n for arg in vars(args):\n if arg == 'd':\n dbase = getattr(args, arg)\n elif arg == 't':\n table = getattr(args, arg)\n elif arg == 'c':\n columns = getattr(args, arg)\n elif arg == 'm':\n mintime = getattr(args, arg)\n elif arg == 'M':\n maxtime = getattr(args, arg)\n elif arg == 'l':\n limit = getattr(args, arg)\n elif arg == 'f':\n file = getattr(args, arg)\n elif arg == 'e':\n engine = getattr(args, arg)\n else:\n print(\"Undefined argument passed: %s\" % getattr(args, arg))\n sys.exit(1)\n\n Functions.validateString(table)\n Functions.validateString(dbase)\n Functions.validateString(columns)\n Functions.validateTime(mintime,maxtime)\n Functions.validateInt(limit)\n Functions.validateFile(file)\n Functions.validateEngine(engine)\n\n\n try:\n if file == 'csv':\n filetype = Functions.initCSVfile()\n else:\n filetype = Functions.initTabfile()\n except IOError:\n print(\"Error creating file for output. Exiting the program\")\n sys.exit(1)\n\n query = \"SELECT \" + columns + \" FROM \" + table + \" WHERE \" + \"TD_TIME_RANGE(time, \" + str(mintime) + \",\" + str(maxtime) + \")\" + \" LIMIT \" + str(limit)\n\n with tdclient.Client(apikey) as client:\n try:\n data = client.query(dbase, query)\n #wait of job to complete\n data.wait()\n # Check for empty resultset\n if data.result_size > 20:\n for line in data.result():\n if file == 'csv':\n Functions.writeCSV(filetype, list(line))\n else:\n Functions.writeTSV(filetype, list(line))\n else:\n print(\"No matching records found based on the given input\")\n sys.exit(1)\n\n except Exception as ex:\n template = \"An exception of type {0} occured. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n print(message)\n sys.exit(1)\n\n sys.exit(0)\n\n","sub_path":"PythonScripts/td_sample.py","file_name":"td_sample.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"530576043","text":"# -*- coding: utf-8 -*-\nfrom django.contrib import admin\n\nfrom .models import CharacterInfo, SheetDetail, SheetHeader\n\n\n@admin.register(SheetHeader)\nclass SheetHeaderAdmin(admin.ModelAdmin):\n list_display = ('id', 'name', 'user', 'character_info')\n list_filter = ('user', 'character_info')\n search_fields = ('name',)\n\n\n@admin.register(CharacterInfo)\nclass CharacterInfoAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'name',\n 'age',\n 'height',\n 'weight',\n 'hair_color',\n 'eye_color',\n 'height_measurement_system',\n 'WEIGHT_MEASUREMENT_SYSTEM',\n )\n search_fields = ('name',)\n\n\n@admin.register(SheetDetail)\nclass SheetDetailAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'name',\n 'start_value',\n 'rollable',\n 'dice_class',\n 'dice_number',\n 'misc_bonus',\n 'extra_bonus_1',\n 'extra_bonus_2',\n 'sheet',\n )\n list_filter = ('rollable', 'sheet')\n search_fields = ('name',)\n","sub_path":"sheet/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"444983399","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 02 09:20:05 2016\r\n\r\n\r\n@author: JOHN\r\n\"\"\"\r\n\r\nimport pylab as plt\r\nimport numpy as np\r\n\r\n\r\ndef SaveFigureAsImage(fileName,fig=None,**kwargs):\r\n ''' Save a Matplotlib figure as an image without borders or frames.\r\n Args:\r\n fileName (str): String that ends in .png etc.\r\n \r\n fig (Matplotlib figure instance): figure you want to save as the image\r\n Keyword Args:\r\n orig_size (tuple): width, height of the original image used to maintain \r\n aspect ratio.\r\n '''\r\n fig_size = fig.get_size_inches()\r\n w,h = fig_size[0], fig_size[1]\r\n fig.patch.set_alpha(0)\r\n if kwargs.has_key('orig_size'): # Aspect ratio scaling if required\r\n w,h = kwargs['orig_size']\r\n w2,h2 = fig_size[0],fig_size[1]\r\n fig.set_size_inches([(w2/w)*w,(w2/w)*h])\r\n fig.set_dpi((w2/w)*fig.get_dpi())\r\n a=fig.gca()\r\n a.set_frame_on(False)\r\n a.set_xticks([]); a.set_yticks([])\r\n plt.axis('off')\r\n plt.xlim(0,h); plt.ylim(w,0)\r\n fig.savefig(fileName, transparent=True, bbox_inches='tight', \\\r\n pad_inches=0)\r\nif __name__ == '__main__':\r\n im = plt.imread( r'E:\\Dropbox\\Tmp\\073.png') \r\n plt.imshow(im)\r\n plt.plot(2,2,'r*')\r\n SaveFigureAsImage('test',plt.gcf(),orig_size=(im.shape[0],im.shape[1]))","sub_path":"Utils/CRLM_Piwigo/savefig.py","file_name":"savefig.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"460175420","text":"import argparse\nimport os\nimport random\nfrom datetime import datetime\nimport torch\nfrom torch import optim\n\nfrom experiment.train_eval import evaluateInput, GreedySearchDecoder, trainIters, eval_batch, plot_training_results\nfrom global_settings import device, FILENAME, SAVE_DIR, PREPRO_DIR, TRAIN_FILE, TEST_FILE, EXPERIMENT_DIR, LOG_FILE\nfrom model.model import EncoderLSTM, DecoderLSTM\nfrom utils.prepro import read_lines, preprocess_pipeline, load_cleaned_data, save_clean_data\nfrom utils.tokenize import build_vocab, batch2TrainData\n\nfrom global_settings import DATA_DIR\nfrom utils.utils import split_data, filter_pairs, max_length, plot_grad_flow\n\n\ndef str2bool(v):\n #https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\n\ndef str2float(s):\n try:\n return float(s)\n except ValueError:\n return None\n\n\n\n\n\nif __name__ == '__main__':\n\n ##### ArgumentParser ###########\n\n parser = argparse.ArgumentParser(description='PyTorch Vanilla LSTM Machine Translator')\n #parser.add_argument('--data', type=str, default='./data/',\n # help='location of the data corpus. Default in ./data/')\n ### Embedding size ####\n parser.add_argument('--emb', type=int, default=256,\n help='size of word embeddings')\n ### Hidden size ####\n parser.add_argument('--hid', type=int, default=256,\n help='number of hidden units per layer')\n\n ### Number of layers ####\n parser.add_argument('--nlayers', type=int, default=1,\n help='number of layers')\n\n ### Learning rate ###\n parser.add_argument('--lr', type=float, default=0.003,\n help='initial learning rate')\n\n ### Gradient clipping ###\n parser.add_argument('--clip', type=str2float, default=\"\",\n help='gradient clipping. Provided as a float number or an empty string \\\" \\\", if no clipping should happen.')\n\n ### Number of iterations ###\n parser.add_argument('--iterations', type=int, default=10000,\n help='number of iterations')\n\n ### Batch size ###\n parser.add_argument('--batch_size', type=int, default=64, help='batch size')\n\n ### Teacher forcing ratio ###\n parser.add_argument('--teacher', type=float, default=0.9, help=\"Teacher forcing ration during training phase\")\n\n ### How many data ###\n parser.add_argument('--limit', type=int, help='Reduce dataset to N samples')\n\n ### Decoder learning rate ###\n parser.add_argument('--dec_lr', type=int, default=1, help=\"Decoder learning rate decay. This must be provided as integer, as it is multiplied by the learning rate (lr)\")\n\n ### Compute vocabulary on all dataset or only training samples ###\n parser.add_argument('--voc_all', type=str2bool, default=\"False\",\n help=\"Get vocabulary from all dataset (true) or only from training data (false).\\n\"\n \"Possible inputs: 'yes', 'true', 't', 'y', '1' OR 'no', 'false', 'f', 'n', '0'\")\n\n ### Truncated Backprop through time ###\n parser.add_argument('--tbptt', type=str2bool, default=\"False\",\n help=\"Set how to perform truncation in backpropagation. If 'true', every time 'detach()' is applied on the hidden states. \"\n \"If 'false', 'detach()' is not applied.\\n\"\n \"Possible inputs: 'yes', 'true', 't', 'y', '1' OR 'no', 'false', 'f', 'n', '0'\")\n ### Dropout ###\n parser.add_argument('--dropout', type=float, default=0.0,\n help='dropout applied to layers (0.0 = no dropout). Values range allowed: [0.0 - 1.0]')\n\n ### Seed ###\n parser.add_argument('--seed', type=int, default=1111,\n help='random seed')\n\n ### Run program on cuda ###\n parser.add_argument('--cuda', type=str2bool, default=\"true\", help=\"use CUDA.\\n\"\n \"Possible inputs: 'yes', 'true', 't', 'y', '1' OR 'no', 'false', 'f', 'n', '0'\")\n\n ### Logging interval ###\n parser.add_argument('--log_interval', type=int, default=100, help='report interval')\n\n parser.add_argument('--max_len', type=int, default=10, help='max sentence length in the dataset. Sentences longer than max_len are trimmed. Provide 0 for no trimming!')\n\n parser.add_argument('--optim', type=str, default='adam', help=\"Training optimizer. Possible values: 'adamax', 'adam', 'adagrad', 'sgd'\")\n\n #### Start #####\n\n # Read arguments\n args = parser.parse_args()\n\n print(\"Expreiment settings:\")\n for arg in vars(args):\n print(arg, getattr(args, arg))\n\n # Set the random seed manually for reproducibility.\n torch.manual_seed(args.seed)\n if torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\n device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\n ###### Starting the program #####\n\n experiment_execution_time = datetime.now()\n\n start_root = \".\"\n src_lang = \"eng\"\n trg_lang = \"deu\"\n exp_contraction = True\n\n limit = args.limit\n\n voc_all = args.voc_all\n\n ### Setup preprocessing file ####\n\n max_sent_len = args.max_len\n if max_sent_len > 0:\n cleaned_file = \"%s-%s_cleaned\" % (src_lang, trg_lang) + \"_{}\".format(max_sent_len) +\".pkl\"\n else:\n cleaned_file = \"%s-%s_cleaned\" % (src_lang, trg_lang) + \"_full\" + \".pkl\"\n\n ### Check if data has already been preprocessed, if not, preprocess it ####\n\n if os.path.isfile(os.path.join(PREPRO_DIR, cleaned_file)):\n print(\"File already preprocessed! Loading file....\")\n pairs = load_cleaned_data(PREPRO_DIR, filename=cleaned_file)\n else:\n print(\"No preprocessed file found. Starting data preprocessing...\")\n pairs = read_lines(os.path.join(start_root, DATA_DIR), FILENAME)\n pairs, path = preprocess_pipeline(pairs, cleaned_file, exp_contraction, max_len = max_sent_len) #data/prepro/eng-deu_cleaned_full.pkl\n\n ### Get sample ###\n print(\"Sample from data:\")\n print(random.choice(pairs))\n\n src_sents, trg_sents = [], []\n\n\n # limit = None\n\n if limit:\n pairs = pairs[:limit]\n print(\"Limit set: %s\" % str(limit))\n\n train_data = pairs\n\n\n print(\"Total samples in the dataset:\", len(train_data))\n\n src_sents = [item[0] for item in train_data]\n trg_sents = [item[1] for item in train_data]\n\n max_src_l = max_length(src_sents)\n max_trg_l = max_length(trg_sents)\n\n print(\"Max sentence length in source sentences:\", max_src_l)\n print(\"Max sentence length in source sentences:\", max_trg_l)\n\n input_lang = build_vocab(src_sents, \"eng\")\n output_lang = build_vocab(trg_sents, \"deu\")\n\n print(\"Source vocabulary:\", input_lang.num_words)\n print(\"Target vocabulary:\", output_lang.num_words)\n\n\n # Configure models\n hidden_size = args.hid\n encoder_n_layers = args.nlayers\n decoder_n_layers = args.nlayers\n batch_size = args.batch_size\n input_size = input_lang.num_words\n output_size = output_lang.num_words\n embedding_size = args.emb\n dropout = args.dropout\n tbptt = args.tbptt\n clip = args.clip\n teacher_forcing_ratio = args.teacher\n learning_rate = args.lr\n decoder_learning_ratio = args.dec_lr\n n_iteration = args.iterations\n val_iteration = n_iteration\n print_every = args.log_interval\n\n cell_type = \"lstm\" ### default, as GRU implementation still needs changes\n if cell_type not in [\"lstm\", \"gru\"]:\n cell_type = \"lstm\"\n print(\"{} cell type not allowed. Cell type has been set to default value 'lstm'\".format(args.cell))\n\n save_every = 500\n\n optimizer = args.optim.lower()\n if optimizer not in ['adam', 'adamax', 'adagrad', 'sgd']:\n optimizer = 'adamax'\n print(\"Provided optimizer is not supported. Standard optimizer is used.\")\n\n model_name = ''\n model_name += 'dry_run_simple_nmt_model' + str(limit) if limit else 'dry_run_simple_nmt_model_full_' + str(len(pairs))\n model_name += \"_teacher_{}\".format(str(teacher_forcing_ratio)) if teacher_forcing_ratio > 0.0 else \"_no_teacher\"\n model_name += \"\" if voc_all else \"_train_voc\"\n model_name += \"_clip-{}\".format(clip) if clip else \"\"\n model_name += \"_tbptt\" if tbptt else \"\"\n model_name += \"_\"+optimizer\n model_name += \"_lr-{}-{}\".format(learning_rate, decoder_learning_ratio)\n\n print(\"Model name:\", model_name)\n print('Building encoder and decoder ...')\n encoder = EncoderLSTM(input_size=input_size, emb_size=embedding_size, hidden_size=hidden_size,\n n_layers=encoder_n_layers, dropout=dropout, cell_type=cell_type)\n decoder = DecoderLSTM(output_size=output_size, emb_size=embedding_size, hidden_size=hidden_size,\n n_layers= decoder_n_layers, dropout=dropout, cell_type=cell_type)\n\n assert encoder.cell_type == decoder.cell_type\n assert encoder.n_layers == decoder.n_layers\n\n encoder = encoder.to(device)\n decoder = decoder.to(device)\n print('Models built:')\n print(encoder)\n print(decoder)\n\n # Initialize optimizers\n print('Building optimizers ...')\n encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)\n decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio)\n print(encoder_optimizer, decoder_optimizer)\n\n # Run training iterations\n print(\"Starting Training!\")\n start_time = datetime.now()\n val_loss, directory, train_history, val_statistics, _, _ = \\\n trainIters(model_name, input_lang, output_lang, train_data, None, encoder, decoder, encoder_optimizer,\n decoder_optimizer, encoder_n_layers, decoder_n_layers, SAVE_DIR, n_iteration, batch_size,\n print_every, save_every, clip, FILENAME, val_iteration, tbptt=tbptt)\n\n end_time = datetime.now()\n duration = end_time-start_time\n print('Training duration: {}'.format(duration))\n\n # print(\"Performing evaluation on test set...\")\n # test_loss = eval_batch(test_batches, encoder, decoder, 1)\n # print(\"Test loss:\", test_loss)\n\n print(\"Checkponits saved in %s\" %(directory))\n\n #Log file name\n try:\n with open(os.path.join(start_root, EXPERIMENT_DIR, LOG_FILE), encoding=\"utf-8\", mode=\"w\") as f:\n #Logging to last_experiment.txt\n f.write(str(directory))\n with open(os.path.join(start_root, EXPERIMENT_DIR, \"log_history.txt\"), encoding=\"utf-8\", mode=\"a\") as hf:\n #Logging in the history document\n hf.write(\"Execution date: %s\" % str(experiment_execution_time))\n hf.write(\"\\nDRY_RUN\\n\")\n hf.write(\"\\nExperiment name:\\n\")\n hf.write(model_name)\n hf.write(\"\\nDirectory:\\n\")\n hf.write(str(directory))\n hf.write(\"\\n Number of samples: %s\" %str(len(pairs)))\n if voc_all:\n hf.write(\"\\nVocabularies built on all dataset\")\n else:\n hf.write(\"\\nVocabularies built only on the train set\")\n hf.write(\"\\nSource vocabulary: %s\" % str(input_lang.num_words))\n hf.write(\"\\nTarget vocabulary: %s\" % str(output_lang.num_words))\n hf.write(\"\\nMax src length %s\" % max_src_l)\n hf.write(\"\\nMax trg length %s\" % max_trg_l)\n hf.write(\"\\nLearning rate: %s\" % str(learning_rate))\n hf.write(\"\\nBatch size: %s\" % str(batch_size))\n hf.write(\"\\nEmbedding size: %s\" % str(embedding_size))\n hf.write(\"\\nHidden size: %s\" % str(hidden_size))\n hf.write(\"\\nAverage validation loss: %s\" %str(val_loss))\n #hf.write(\"\\nTest loss: %s\" %str(test_loss))\n hf.write(\"\\nTraining iterations:\")\n hf.write(str(n_iteration))\n hf.write(\"\\n Training duration:\")\n hf.write(str(duration))\n hf.write(\"\\n**********************************\\n\")\n except IOError or TypeError or RuntimeError:\n print(\"Log to file failed!\")\n\n print(\"Plotting results...\")\n try:\n plot_training_results(model_name, train_history, val_statistics, SAVE_DIR, FILENAME, decoder_n_layers, embedding_size, hidden_size, batch_size, learning_rate,\n n_iterations = n_iteration, live_show=False)\n print(\"Plots stored!\")\n\n except IOError or RuntimeError:\n pass","sub_path":"dry_run.py","file_name":"dry_run.py","file_ext":"py","file_size_in_byte":12659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"445624918","text":"#!/usr/bin/env python3 -B\n\nimport os\nimport sys\nimport bonobo\nimport importlib\nfrom cromulent import vocab\n\nimport settings\n\nif len(sys.argv) < 2:\n\tprint(\"python3 ./run.py project [dot] [*args]\")\n\tsys.exit()\nelse:\n\tproject = sys.argv[1]\n\tpipe = importlib.import_module(f'pipeline.projects.{project}')\n\tPipeline = pipe.Pipeline\n\tsys.argv = [sys.argv[0], *sys.argv[2:]]\n\n### Run the Pipeline\n\nif __name__ == '__main__':\n\tif settings.DEBUG:\n\t\tLIMIT\t\t= int(os.environ.get('GETTY_PIPELINE_LIMIT', 1))\n\t\tPACK_SIZE = 1\n\telse:\n\t\tLIMIT\t\t= int(os.environ.get('GETTY_PIPELINE_LIMIT', 10000000))\n\t\tPACK_SIZE = 10000000\n\n\tvocab.add_linked_art_boundary_check()\n\n\tprint_dot = False\n\tif 'dot' in sys.argv[1:]:\n\t\tprint_dot = True\n\t\tsys.argv[1:] = [a for a in sys.argv[1:] if a != 'dot']\n\tparser = bonobo.get_argument_parser()\n\twith bonobo.parse_args(parser) as options:\n\t\ttry:\n\t\t\tpipeline = Pipeline(\n\t\t\t\toutput_path=settings.output_file_path,\n\t\t\t\tmodels=settings.arches_models,\n\t\t\t\tpack_size=PACK_SIZE,\n\t\t\t\tlimit=LIMIT,\n\t\t\t\tdebug=settings.DEBUG\n\t\t\t)\n\t\t\tif print_dot:\n\t\t\t\tprint(pipeline.get_graph()._repr_dot_())\n\t\t\telse:\n\t\t\t\tpipeline.run(**options)\n\t\texcept RuntimeError:\n\t\t\traise ValueError()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"125413268","text":"from common import *\nimport autograd.numpy.random as rng\nimport cov\nimport matplotlib.pyplot as plt\nfrom numpy.random import multivariate_normal as rmvn\nfrom autograd import grad\nimport vanilla\n\n##############################################\n## This module currently assumes 1D inputs. ##\n##############################################\n\n\n\n# General-purpose inference function factory. Creates functions to compute the\n# log-marginal likelihood of kernel+noise parameters, compute the posterior\n# predictive mean and the posterior marginal variance. Note that \"grid_mu\" and\n# \"grid_s2\" are faster than the general posterior distributions computed using\n# \"posterior\".\n#\n# Inputs:\n# y - observed data. (N)\n# fcov - covariance function. (function)\n# w - sampling period. (scalar)\n# s2nh - observation noise s2n := log(1 + exp(s2nh)). (scalar)\n# n_trunc - number of truncation points. (scalar)\n#\n# Outputs:\n# LML - function to compute the log marginal likelihood.\n# posterior - function to compute functions that compute posterior mu and var.\n# grid_mu - computes the posterior mean on the grid data X.\n# grid_s2 - computes the posterior variance on the grid data X.\n#\ndef infer(X, y, fcov, w, s2nh, n_trunc):\n\n # Ensure input is appropriately shaped and that X and y are the same length.\n if np.ndim(y) != 1 or np.ndim(X) != 1:\n raise ValueError('X and y must be 1-dimensional.')\n N = y.shape[0]\n if y.shape[0] != X.shape[0]:\n raise ValueError('X and y must be the same length.')\n\n # Extend X and y with more data and zeros respectively. Compute ifft of\n # extended y to get ytilde.\n ytilde_ext = ifft(np.hstack([np.zeros(n_trunc), y, np.zeros(n_trunc)]))\n X_ext = np.hstack([np.linspace(X[0] - n_trunc * w, X[0], n_trunc),\\\n X, np.linspace(X[-1], X[-1] + n_trunc * w, n_trunc)])\n\n # Compute eigenvalues of approximate covariance matrix.\n s2n, shift = np.log1p(np.exp(s2nh)), np.int64(np.floor(N / 2.0 + n_trunc))\n Ktop = np.roll(fcov(X_ext - X_ext[shift], Z=np.array([0.0]), flat=1), shift)\n gamma = np.real(fft(Ktop)) * np.sqrt(Ktop.shape[0])\n gammapluss2n = gamma + s2n\n\n def approxkernel():\n ext = fft(np.dot(np.diag(gammapluss2n), ifft(np.eye(N + 2 * n_trunc))))\n return np.real(ext[n_trunc:-n_trunc,n_trunc:-n_trunc])\n def multiplyvec(y):\n y_ext = np.hstack([y, np.zeros(2 * n_trunc)])\n return np.real(fft(gammapluss2n * ifft(y_ext))[:-2*n_trunc])\n \n y = rng.randn(N)\n exact_kernel = fcov(X,X) + s2n * np.eye(N)\n z_exact = np.dot(exact_kernel, y)\n z_apprx = multiplyvec(y)\n plt.plot(z_exact - z_apprx)\n print('Exact - approx quad form', np.dot(y, z_exact) - np.dot(y, z_apprx))\n plt.show()\n \n \n\n print('Showing kernels.')\n Gamma = ComputeSpectrum(fcov, X) + s2n\n plt.figure()\n plt.imshow(exact_kernel - approxkernel())\n plt.colorbar()\n plt.title('Extend approx.')\n plt.figure()\n plt.imshow(exact_kernel - ConstructCircCov(Gamma))\n plt.colorbar()\n plt.title('Circ approx')\n plt.show()\n\n\n\n # Function to compute the log marginal likelihood.\n infer.ydiv = None\n def lml():\n if infer.ydiv is None:\n infer.ydiv = sqrabs(ytilde) / gammapluss2n\n\n return -0.5 * (np.sum(np.log(2 * np.pi * gammapluss2n)) + np.sum(infer.ydiv)) \n\n # Function to compute the gradient of the lml w.r.t. the kernel parameters.\n def dlmlkernel():\n if infer.ydiv is None:\n infer.ydiv = sqrabs(ytilde) / gammapluss2n\n\n dk = grad(lambda tau : fcov(tau, flatten=True))\n return 0.5 * np.sum(fft(dk))\n\n # Function to generate two functions which share expensive computation. The\n # first (mu) computes the posterior mean at each x, whilst the second (s2)\n # computes the marginal variance for each x.\n infer.post_mean_sol = None\n def posterior(x):\n\n # Lazy evaluation for computation for posterior mean.\n if infer.post_mean_sol is None:\n infer.post_mean_sol =\\\n np.real(fft(ytilde_ext / gammapluss2n))[n_trunc:-n_trunc]\n\n # Compute cross-covariance matrix.\n KXx = fcov(X, x)\n\n # Define + return functions to compute the posterior mean and variance at x.\n def mu():\n return np.dot(infer.post_mean_sol, KXx)\n def s2():\n Ktilde, Kself = ifft(KXx), fcov(x, diag=True)\n return 0.0\n return Kself - np.sum(sqrabs(Ktilde) / np.expand_dims(gammapluss2n, 1), 0)\n\n return mu, s2\n\n # Return functions to compute quantities of interest.\n return lml, posterior\n\n\ndef main():\n\n # Define the covariance matrix.\n print('Define covariance function.')\n pars = {'l2h' : 0.0, 's2h' : np.log(np.exp(1.0) - 1.0)}\n fcov = cov.factory(cov.eq, pars)\n\n # Generate some data.\n print('Generate toy data.')\n rng.seed(15485863)\n lb, ub, N, s2n = 0.0, 10.0, 100, 0.1\n X = np.linspace(lb, ub, N)\n y = rmvn(np.zeros(N), fcov(X, X) + s2n * np.eye(N))\n\n # Perform inference.\n print('Perform inference.')\n lml, posterior =\\\n infer(X, y, fcov, ub / N, np.log(np.exp(s2n) - 1.0), 25)\n return\n van_lml, van_posterior, van_mu, van_s2 =\\\n vanilla.infer(X, ifft(y), fcov, ub / N, np.log(np.exp(s2n) - 1.0))\n\n # Compute posterior distribution.\n print('Compute posterior mean and variance.')\n Xh = np.linspace(lb - 10.0, ub + 10.0, 10 * N)\n mu, s2 = posterior(Xh)\n muXh, sXh2 = mu(), 2 * np.sqrt(s2())\n\n # Compute posterior distribution using tricks for input data.\n print('Compute posterior mean + variance at data using tricks.')\n gmu, g2s = van_mu(), 2 * np.sqrt(van_s2())\n plt.plot(X, y, 'rx',\\\n Xh, muXh, 'b', Xh, muXh+sXh2, 'b--', Xh, muXh-sXh2, 'b--',\\\n X, gmu, 'g', X, gmu + g2s, 'g--', X, gmu - g2s, 'g--')\n plt.show()\n\n # Compute + show log marginal likelihood.\n #print('Computing lml.')\n #print(lml())\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"exp/circgp/Archive/extenddomain.py","file_name":"extenddomain.py","file_ext":"py","file_size_in_byte":5664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"84254485","text":"from tkinter import *\n\n\nclass demineur :\n\tdef __init__(self):\n\t\tself.nbr_case = int()\n\t\tself.i = 0\n\t\tself.j = 0\n\t\tself.liste_button_i = []\n\t\tself.liste_button_j = []\n\t\tself.liste_label_i = []\n\t\tself.liste_labal_j = []\n\t\tself.liste_variables_label = []\n\t\tself.fenetre = Tk()\n\tdef create_label(self,x) :\n\t\tself.nbr_case = x\n\t\tcount = 0\n\t\tfor i in range(self.nbr_case) :\n\t\t\tself.liste_label_j = []\n\t\t\tfor j in range(self.nbr_case) :\n\t\t\t\t#self.liste_variables_label.insert(count, StringVar())\n\t\t\t\tlabel = Label(self.fenetre,text = count)#,textvariable = self.liste_variables_label[count])\n\t\t\t\tself.liste_label_j.insert(j,label)\n\t\t\t\tlabel.grid(row = i,column = j,ipadx = 10,ipady = 2)\n\t\t\t\tcount += 1\n\t\t\tself.liste_label_i.insert(i,self.liste_label_j)\n\tdef create_button(self) :\n\t\tcount = 0\n\t\tfor i in range(self.nbr_case) : \n\t\t\tself.liste_button_j = []\n\t\t\tfor j in range(self.nbr_case) :\n\t\t\t\tprint(i,\" \",j)\n\t\t\t\tbutton = Button(self.liste_label_i[i][j])\n\t\t\t\tself.liste_button_j.insert(j,button)\n\t\t\t\tbutton.grid(row = i,column = j,ipadx = 10,ipady = 2)\n\t\t\tself.liste_button_i.insert(i,self.liste_button_j)\n\tdef create_destroy_button(self) :\n\t\tButton(self.fenetre,text = \"supression des buttons \",command = self.destroy_button).grid(row = self.nbr_case,column = 0,columnspan = 5)\n\tdef destroy_button(self) :\n\t\tprint(\"i = \",self.i,\"j = \",self.j)\n\t\tif self.i < self.nbr_case :\n\t\t\tif self.j < self.nbr_case :\n\t\t\t\tself.liste_button_i[self.i][self.j].destroy()\n\t\t\t\tself.j += 1\n\t\t\telif self.j == self.nbr_case :\n\t\t\t\tself.j = 0\n\t\t\t\tself.i += 1\n\t\t\t\tif self.i != self.nbr_case and self.j != self.nbr_case :\n\t\t\t\t\tself.liste_button_i[self.i][self.j].destroy()\n\t\t\t\t\tself.j += 1\n\t\t\t\telse :\n\t\t\t\t\tself.i,self.j = 0,0\n\t\t\t\t\tself.create_button(self.nbr_case)\n\tdef self_destroy_button(self,nom) :\n\t\tpass\n\n\n\nif __name__ == '__main__' :\n\twindow = demineur()\n\twindow.create_label(3)\n\twindow.create_button()\n\twindow.fenetre.mainloop()\n","sub_path":"Projets/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"271355236","text":"n = int(input())\nstudent_marks = {}\ndict={}\nfor _ in range(n):\n\tname, *line = input().split()\n\n\t# Could be split like this\n\t# line = input().split()\n\t# name, scores = line[0], line[1:]\n\n\tscores = list(map(float, line))\n\tstudent_marks[name] = scores\n\tdict[name] = sum(scores) / float(len(scores))\n\nquery_name = input()\nprint(\"%.2f\" % round(dict[query_name],2))\nprint(\"%.2f\" % dict[query_name])","sub_path":"02 Basic Data Types/finding_the_percentage.py","file_name":"finding_the_percentage.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"431836028","text":"import os\nimport argparse\nimport shutil\nimport pandas as pd\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Specify output directory.')\n parser.add_argument('output_directory',type=str)\n parser.add_argument('-r','--resume',default=False)\n args = parser.parse_args()\n output_directory = args.output_directory\n output_directory = os.path.abspath(output_directory)\n resume = args.resume\n\n with open('stack.csv', 'r') as f:\n header = f.readline()\n stack_name = header.split('\\n')[0]\n stack = pd.read_csv(f)\n\n stack_directory = output_directory + '/' + stack_name\n\n print('hello')\n print(stack_directory)\n print('hello')\n\n if os.path.exists(stack_directory):\n if not resume:\n raise RuntimeError(stack_directory + ' already exists but resume = False.')\n else:\n if not os.path.exists(output_directory):\n os.mkdir(output_directory)\n os.mkdir(stack_directory)\n shutil.copyfile('stack.csv', stack_directory+'/stack.csv')\n shutil.copyfile('slowdown_liouvillian_alt.py', stack_directory + '/slowdown_liouvillian_alt.py')\n shutil.copyfile('workstation_liouvillian_changwoo.py', stack_directory+'/workstation_liouvillian_changwoo.py')\n\n os.chdir(stack_directory)\n\n command = 'python slowdown_liouvillian_alt.py ' + stack_directory + ' -e True'\n cwd = os.getcwd()\n\n print('cwd: ' + cwd)\n print('running in bash: ' + command)\n\n os.system(command)\n","sub_path":"scripts/workstation_liouvillian_changwoo.py","file_name":"workstation_liouvillian_changwoo.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"181027817","text":"'''\nCreated on 2018年7月26日\n\n@author: cloud\n'''\nimport logging\nfrom threading import Lock\nfrom pandas import DataFrame,ExcelWriter\nimport settings\nfrom settings.template import server_url\nimport json,datetime,time\nimport copy\n\nlogger = logging.getLogger()\n\nclass ServerResponseDataMap():\n ''''''\n def __basetranslate(self,datamap,arg):\n if arg in datamap:\n return datamap[arg]\n else:\n return arg\n \n def matchStatus(self,arg):\n datamap = {\n 1:\"主动\", #proactive\n 2:\"被动\" #Passive\n }\n return self.__basetranslate(datamap, arg)\n \n def matchedUserGender(self,arg):\n datamap = {\n 1:\"male\", # 男\n 2:\"female\" # 女\n }\n return self.__basetranslate(datamap, arg)\n \n def matchedUserLevelName(self,arg):\n return arg\n \n def matchEroticismBehavior(self,arg):\n datamap = {\n True:\"是\",\n False:\"否\"\n }\n return self.__basetranslate(datamap, arg)\n \n def matchedUserChannel(self,arg):\n datamap = {\n 0:\"默认\",\n 1:\"自然渠道\",\n 2:\"install\",\n 3:\"AEO or InApp\"\n }\n return self.__basetranslate(datamap, arg)\n \n def circleGirl(self,arg):\n return self.matchEroticismBehavior(arg)\n \n def signGirl(self,arg):\n return self.matchEroticismBehavior(arg)\n \n def goddessGirl(self,arg):\n return self.matchEroticismBehavior(arg)\n \n def target(self,arg):\n datamap = {\n 0:\"其他\",\n 1:\"女神\",\n }\n return self.__basetranslate(datamap, arg)\n \n def matchMode(self,arg):\n datamap = {\n 1:\"global\", # 全球\n 2:\"country\" # 定向国家\n }\n return self.__basetranslate(datamap, arg)\n \n def createTime(self,arg):\n# logger.info(\"createTime %s\"%time.strftime(\"%Y-%m-%d-%H_%M_%S\",time.localtime(arg/1000)))\n try:\n# return time.strftime(\"%Y-%m-%d %H:%M:%S\",time.localtime(arg/1e3))\n return datetime.datetime.utcfromtimestamp(int(arg/1e3))\n except Exception as e:\n logger.warning(\"invalid time stamp,[%s]\"%e)\n return arg\n\n def matchArea(self,arg):\n datamap = {\n 0: 0,\n 1: \"泰国\",\n 2: \"菲律宾\",\n 3: \"印度尼西亚\",\n 4: \"东南亚\",\n 5: \"沙特阿拉伯\",\n 6: \"阿拉伯联合酋长国\",\n 7: \"科威特\",\n 8: \"海湾国家\",\n 9: \"埃及\",\n 10: \"土耳其\",\n 11: \"印度\"\n }\n return self.__basetranslate(datamap, arg)\n \n def matchAreaName(self,arg):\n return arg\n \n def matchGender(self,arg):\n# logger.info(\"matchGender %s\"%self.matchedUserGender(arg))\n return self.matchedUserGender(arg)\n \n def matchUserPay(self,arg):\n datamap={\n 0: \"未付费\",\n 1: \"付费\"\n }\n return self.__basetranslate(datamap, arg)\n\nresponse_data_map = ServerResponseDataMap()\n\nclass PerfReport():\n def __init__(self):\n self.__columns = [\n \"Account\",\n \"UserID\",\n \"Identity\",\n \"ClientMatchGender\",\n \"ClientMatchRegion\",\n \"RequestTimes\",\n \"MatchResult\",\n \"VideoResult\",\n \"MatchSpacingTime\",\n \"VideoSpacingTime\",\n \"ChannelID\",\n \"MatchedUserID\",\n \"MatchStatus\",\n \"MatchedUserGender\",\n \"MatchedUserLevelName\",\n \"MatchedEroticismBehavior\",\n \"MatchedUserChannel\",\n \"CircleGirl\",\n \"SignGirl\",\n \"GoddessGirl\",\n \"MatchMode\",\n \"CreateTime\",\n \"MatchAreaName\",\n \"Target\",\n \"MatchUserPay\",\n \"TrueUser\"\n ]\n self.__df = DataFrame(columns=self.__columns)\n self.__lock = Lock()\n self.__httpclient = settings.http_client\n self.__resp_data_row_map = {\n \"matchStatus\": \"MatchStatus\",\n \"matchedUserGender\": \"MatchedUserGender\",\n \"matchedUserLevelName\": \"MatchedUserLevelName\",\n \"matchEroticismBehavior\": \"MatchedEroticismBehavior\",\n \"matchedUserChannel\": \"MatchedUserChannel\",\n \"circleGirl\": \"CircleGirl\",\n \"signGirl\": \"SignGirl\",\n \"goddessGirl\": \"GoddessGirl\",\n \"matchMode\": \"MatchMode\",\n \"createTime\": \"CreateTime\",\n \"matchAreaName\": \"MatchAreaName\",\n \"target\": \"Target\",\n \"matchUserPay\": \"MatchUserPay\"\n }\n \n '''\n append a row to DataFrame.\n support multi-threading\n '''\n def append(self,row,sleep_time=1.5):\n data = copy.deepcopy(row)\n time.sleep(sleep_time)\n self.get_remote(data)\n if self.__row_check(data):\n self.__lock.acquire()\n self.__df=self.__df.append(DataFrame(data,index=[0]), ignore_index=True,sort=False)\n self.__lock.release()\n else:\n logger.error(\"Invalid row : %s\"%str(row))\n \n def get_remote(self,row):\n if row[\"MatchedUserID\"] and [\"UserID\"]:\n try:\n logger.info(\"start request [%s]\"%server_url.render(server=settings.server,UserID = row[\"UserID\"],MatchedUserID = row[\"MatchedUserID\"]))\n req = self.__httpclient.request(\"GET\",server_url.render(server=settings.server, UserID = row[\"UserID\"], MatchedUserID = row[\"MatchedUserID\"]))\n remote_user_info = json.loads(req.data.decode(\"utf-8\"))\n logger.info(remote_user_info)\n self.check_response(remote_user_info)\n for item in self.__resp_data_row_map:\n# logger.info(\"get response key[%s]\"%item)\n if hasattr(response_data_map, item):\n row[self.__resp_data_row_map[item]] = getattr(response_data_map, item)(remote_user_info[item])\n else:\n logger.warning(\"invalid column name %s\")\n except Exception as e:\n logger.error(\"get remote user information error. reason: %s\"%e)\n \n def check_response(self,data):\n for key in self.__resp_data_row_map:\n if key not in data:\n logger.warn(\"field '%s' is not in response data,will set to null.\"%key)\n data[key]=None\n \n def __row_check(self,row):\n if isinstance(row, dict):\n for key in row:\n if key in self.__columns:\n pass\n else :\n return False\n else:\n return False\n return True\n \n @property\n def to_dataframe(self):\n return self.__df\n \n @property\n def columns(self):\n return self.__columns\n \n def average(self,account,column):\n return self.__df.loc[self.__df[\"Account\"]==account][column].mean()\n \n def count(self,account,column,condition):\n return self.__df.loc[self.__df[\"Account\"]==account].loc[self.__df[column]==condition][column].count()\n \n def sum(self,account,column):\n return (self.__df.loc[self.__df[\"Account\"]==account])[column].sum()\n \n def save_to_excel(self,file_name):\n report_writer = ExcelWriter(file_name)\n self.__df.loc[self.__df[\"MatchedUserID\"]>500000000,[\"TrueUser\"]] = \"否\"\n self.__df.loc[self.__df[\"MatchedUserID\"]<500000000,[\"TrueUser\"]] = \"是\"\n self.__df.to_excel(excel_writer=report_writer, sheet_name=\"Detail\", encoding=\"gbk\")\n #\"MeanSpacingTime\"\n average_spacing_df = self.__df.loc[:,[\"Account\",\"UserID\",\"Identity\",\"ClientMatchGender\",\"ClientMatchRegion\"]].drop_duplicates()\n average_spacing_df[\"MeanMatchSpacingTime\"] = None\n average_spacing_df[\"MeanVideoSpacingTime\"] = None\n average_spacing_df[\"RequestTimesSum\"] = None\n average_spacing_df[\"MatchedUser\"] = None\n average_spacing_df[\"MatchedUserFalse\"] = None\n average_spacing_df[\"MatchedUserTrue\"] = None\n average_spacing_df[\"MatchedMale\"] = None\n average_spacing_df[\"MatchedFemale\"] = None\n average_spacing_df[\"PayUser\"] = None\n \n accounts = self.__df[\"Account\"].drop_duplicates()\n for account in accounts:\n average_spacing_df.loc[average_spacing_df[\"Account\"] == account,[\"MeanMatchSpacingTime\"]] = self.average(account,\"MatchSpacingTime\")\n average_spacing_df.loc[average_spacing_df[\"Account\"] == account,[\"MeanVideoSpacingTime\"]] = self.average(account,\"VideoSpacingTime\") \n average_spacing_df.loc[average_spacing_df[\"Account\"] == account,[\"RequestTimesSum\"]] = self.sum(account,\"RequestTimes\")\n average_spacing_df.loc[average_spacing_df[\"Account\"] == account,[\"MatchedUser\"]] = (self.__df.loc[self.__df[\"Account\"]==account])[\"Account\"].count()\n average_spacing_df.loc[average_spacing_df[\"Account\"] == account,[\"MatchedUserFalse\"]] = (self.__df.loc[self.__df[\"Account\"]==account])[\"MatchedUserID\"].dropna(axis=0, how='any', inplace=False).loc[self.__df[\"MatchedUserID\"]>500000000].count()\n average_spacing_df.loc[average_spacing_df[\"Account\"] == account,[\"MatchedUserTrue\"]] = (self.__df.loc[self.__df[\"Account\"]==account])[\"MatchedUserID\"].dropna(axis=0, how='any', inplace=False).loc[self.__df[\"MatchedUserID\"]<500000000].count() \n average_spacing_df.loc[average_spacing_df[\"Account\"] == account,[\"MatchedMale\"]] = (self.__df.loc[self.__df[\"Account\"]==account])[\"MatchedUserGender\"].dropna(axis=0, how='any', inplace=False).loc[self.__df[\"MatchedUserGender\"]==\"male\"].count()\n average_spacing_df.loc[average_spacing_df[\"Account\"] == account,[\"MatchedFemale\"]] = (self.__df.loc[self.__df[\"Account\"]==account])[\"MatchedUserGender\"].dropna(axis=0, how='any', inplace=False).loc[self.__df[\"MatchedUserGender\"]==\"female\"].count()\n average_spacing_df.loc[average_spacing_df[\"Account\"] == account,[\"PayUser\"]] = (self.__df.loc[self.__df[\"Account\"]==account])[\"MatchUserPay\"].dropna(axis=0, how='any', inplace=False).loc[self.__df[\"MatchUserPay\"]==\"付费\"].count()\n \n average_spacing_df.reset_index(drop=True).to_excel(excel_writer=report_writer, sheet_name=\"statistics\", encoding=\"gbk\") \n report_writer.save()\n \nclass PornographicClosureReport():\n ''''''\n def __init__(self):\n self.__columns = [\n \"Account\",\n \"UserID\",\n \"Identity\",\n \"VideoStartTime\",\n \"FirstSnapshortTime\",\n \"LastSnapshotTime\",\n \"VideoTimes\",\n \"SnapshotTimes\",\n \"IsClosure\",\n \"ClosureTime\",\n \"IsUnlock\",\n \"CoinsBeforeUnlock\",\n \"CoinsAfterUnlock\",\n \"ClosureTimes\",\n \"UnlockPrice\",\n \"DeductionResult\",\n \"EroticismBehavior\"\n ]\n self.__df = DataFrame(columns=self.__columns)\n self.__lock = Lock()\n self.__httpclient = settings.http_client\n self.__resp_data_row_map = {\n# \"matchStatus\": \"MatchStatus\",\n# \"matchedUserGender\": \"MatchedUserGender\",\n# \"matchedUserLevelName\": \"MatchedUserLevelName\",\n \"matchEroticismBehavior\": \"MatchedEroticismBehavior\",\n# \"matchedUserChannel\": \"MatchedUserChannel\",\n# \"circleGirl\": \"CircleGirl\",\n# \"signGirl\": \"SignGirl\",\n# \"goddessGirl\": \"GoddessGirl\",\n# \"matchMode\": \"MatchMode\",\n# \"createTime\": \"CreateTime\",\n# \"matchAreaName\": \"MatchAreaName\",\n# \"target\": \"Target\",\n# \"matchUserPay\": \"MatchUserPay\"\n }\n '''\n append a row to DataFrame.\n support multi-threading\n '''\n def append(self,row,sleep_time=1.5):\n data = copy.deepcopy(row)\n time.sleep(sleep_time)\n self.get_remote(data)\n data.pop(\"MatchedUserID\")\n if self.__row_check(data):\n self.__lock.acquire()\n self.__df=self.__df.append(DataFrame(data,index=[0]), ignore_index=True,sort=False)\n self.__lock.release()\n else:\n logger.error(\"Invalid row : %s\"%str(row))\n\n def get_remote(self,row):\n if row[\"MatchedUserID\"] and [\"UserID\"]:\n try:\n logger.info(\"start request [%s]\"%server_url.render(server=settings.server,UserID = row[\"MatchedUserID\"],MatchedUserID = row[\"UserID\"]))\n req = self.__httpclient.request(\"GET\",server_url.render(server=settings.server, UserID = row[\"MatchedUserID\"],MatchedUserID = row[\"UserID\"]))\n remote_user_info = json.loads(req.data.decode(\"utf-8\"))\n logger.info(remote_user_info)\n self.check_response(remote_user_info)\n row[\"EroticismBehavior\"] = remote_user_info[\"matchEroticismBehavior\"]\n except Exception as e:\n logger.error(\"get remote user information error. reason: %s\"%e)\n \n def check_response(self,data):\n for key in self.__resp_data_row_map:\n if key not in data:\n logger.warn(\"field '%s' is not in response data,will set to null.\"%key)\n data[key]=None\n\n def __row_check(self,row):\n if isinstance(row, dict):\n for key in row:\n if key in self.__columns:\n pass\n else :\n return False\n else:\n return False\n return True\n \n def save_to_excel(self,filename):\n report_writer = ExcelWriter(filename)\n self.__df.to_excel(excel_writer=report_writer, sheet_name=\"sheet1\", encoding=\"gbk\")\n report_writer.save()\n \n @property\n def to_dataframe(self):\n return self.__df\nif __name__==\"__main__\":\n pass","sub_path":"helper/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":14051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"251998422","text":"# Copyright 2021 VMware, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport time\n\nimport argparse\nimport pandas as pd\nfrom github import Github\nfrom github.GithubException import RateLimitExceededException\n\n\nclass GithubDataExtractor:\n def __init__(self, access_token):\n \"\"\"\n Constructor requires an access token to start a Github session, and specifies instance variables\n \"\"\"\n self.g_ses = Github(access_token) # Github object is used as a channel to the Github API\n self.current_repo = None # Current Opended Repo\n self.reaction_flag = False\n self.repo_opened = False # Flag to store state of repo as opened (True) or closed (False)\n self.repo_name = \"\"\n self.organization = \"\"\n\n def openRepo(self, organization, repo_name):\n \"\"\"\n Method to open (access) repository with given organization and repository name (reponame).\n Parameters: username - owner of the repository, repo_name - name of repo to be opened\n \"\"\"\n self.current_repo = self.g_ses.get_repo(organization + \"/\" + repo_name) # Open repo\n self.repo_opened = True\n self.repo_name = repo_name\n self.organization = organization\n print(\"Opened repo {} - {}\".format(repo_name, organization))\n\n def getAllPulls(self, name=\"\", reaction_flag=False, export_to_csv=True):\n \"\"\"\n Method to form a dataframe containing pull information. Parameters: name - name of exported csv file,\n export - if the dataframe should be exported to csv. Returns: Dataframe with pull data\n \"\"\"\n self.reaction_flag = reaction_flag\n if self.repo_opened: # Verify if a repo has been opened\n pull_data = []\n pull_data.extend(self.getPullsByState('open')) # Access all open pulls\n pull_data.extend(self.getPullsByState('closed')) # Access all closed pulls\n pull_df = pd.DataFrame(pull_data) # Convert list of dictionaries to dataframe\n if export_to_csv: # Export to csv if flag is true\n if not os.path.exists('exports'):\n os.mkdir('exports')\n if name == \"\": # Check if name is provided\n pull_df.to_csv(\"exports/\" + self.organization + \"_\" + self.repo_name + \".csv\")\n else:\n pull_df.to_csv(\"exports/\" + name)\n return pull_df\n print(\"Please open a Repo\")\n\n def getPullsByState(self, state):\n \"\"\"\n Extract pulls with given state. Parameters: state - state of the pull (open or closed)\n Return: list of dictionaries containing data regardining each pull\n \"\"\"\n pull_data = []\n try: # Call the Github api to get all pulls\n pulls = self.current_repo.get_pulls(state=state, sort='create')\n except RateLimitExceededException as e: # If token has reached limit\n print(\"Token rate limit exceeded. Waiting for 1 hour\", e)\n time.sleep(60 * 60) # Wait for 1 hour (time required to reset)\n pulls = self.current_repo.get_pulls(state=state, sort='create') # Get all pulls from Github API again\n # Iterate over each pull\n for pull in pulls:\n try: # Call to extract features for each pull\n pull_data.append(self.getPullFeatures(pull))\n except RateLimitExceededException as e: # getPullFeatures accesses the Github API so provisino for rate limit\n print(\"Token rate limit exceeded. Waiting for 1 hour\", e)\n time.sleep(60 * 60)\n pull_data.append(self.getPullFeatures(pull))\n return pull_data\n\n def listOfComments(self, comments):\n \"\"\"\n Method to form a list of json strings rerpesenting comments (reviews or issue).\n Parameters: comments - list of comment objects. Returns: List of json strings\n \"\"\"\n list_comments = []\n\n # Iterate over each comment\n for comment in comments:\n # Record reactions if Flag is True\n if self.reaction_flag:\n reactions = []\n raw_reactions = []\n\n try: # Call to extract all raw reactions\n raw_reactions = comment.get_reactions()\n except RateLimitExceededException as e: # get_reactions accesses the Github API so provisino for rate limit\n print(\"Token rate limit exceeded. Waiting for 1 hour\", e)\n time.sleep(60 * 60)\n raw_reactions = comment.get_reactions()\n\n for reaction in raw_reactions:\n # Extract information regarding each reaction\n try:\n reactions.append((reaction.content, str(reaction.created_at), reaction.user.name))\n except:\n reactions.append((reaction.content, str(reaction.created_at), None))\n\n # Extract information regarding each comment\n try:\n list_comments.append(\n str({\"Created_At\": str(comment.created_at), \"User\": comment.user.name, \"Body\": comment.body,\n \"Updated_At\": str(comment.updated_at), \"Reactions\": reactions}))\n except:\n list_comments.append(str({\"Created_At\": str(comment.created_at), \"User\": None, \"Body\": comment.body,\n \"Updated_At\": str(comment.updated_at), \"Reactions\": reactions}))\n else:\n try:\n list_comments.append(\n str({\"Created_At\": str(comment.created_at), \"User\": comment.user.name, \"Body\": comment.body,\n \"Updated_At\": str(comment.updated_at)}))\n except:\n list_comments.append(str({\"Created_At\": str(comment.created_at), \"User\": None, \"Body\": comment.body,\n \"Updated_At\": str(comment.updated_at)}))\n return list_comments\n\n def getPullFeatures(self, pull):\n \"\"\"\n Method to get all data for a particular pull. Parameters: pull - object representing a pull\n Returns: dictionary containing all data of a pull\n \"\"\"\n pull_dict = {}\n pull_dict[\"Number\"] = pull.number\n pull_dict[\"Title\"] = pull.title\n try:\n pull_dict[\"User\"] = pull.user.name\n except:\n pull_dict[\"User\"] = None\n pull_dict[\"URL\"] = pull.url\n pull_dict[\"State\"] = pull.state\n pull_dict[\"Body\"] = pull.body\n pull_dict[\"Additions\"] = pull.additions\n pull_dict[\"Deletions\"] = pull.deletions\n pull_dict[\"Comments_Num\"] = pull.comments\n pull_dict[\"Commits_Num\"] = pull.commits\n pull_dict[\"Created_At\"] = pull.created_at\n pull_dict[\"Closed_At\"] = pull.closed_at\n pull_dict[\"Merged\"] = pull.merged\n pull_dict[\"Merged_At\"] = pull.merged_at\n try: # Attribute merged_by might be none if it has not been merged\n pull_dict[\"Merged_By\"] = pull.merged_by.name\n except: # If not merged then none\n pull_dict[\"Merged_By\"] = None\n pull_dict[\"Review_Comments_Num\"] = pull.review_comments\n pull_dict[\"Updated_At\"] = pull.updated_at\n pull_dict[\"Comments\"] = self.listOfComments(pull.get_issue_comments())\n pull_dict[\"Review_Comments\"] = self.listOfComments(pull.get_review_comments())\n\n return pull_dict\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Create csv for all pulls in repo')\n parser.add_argument('organization', help='Organization the repo belongs to.')\n parser.add_argument('reponame', help='Name of repo')\n parser.add_argument('-reactions', action='store_true', default=False, help='Flag to extract reactions')\n parser.add_argument('--filename', help='Name of file')\n # parser.add_argument('accesstoken', help='Github access token')\n\n args = parser.parse_args()\n ACCESS_TOKEN = os.environ[\"GITACCESS\"] # Access Github token from environment for security purposes\n extractor = GithubDataExtractor(ACCESS_TOKEN) # Create object\n extractor.openRepo(args.organization, args.reponame) # Open repo\n\n # Extract all pulls and export them to .csv\n if args.filename:\n extractor.getAllPulls(args.filename, args.reactions)\n else:\n extractor.getAllPulls(\"\", args.reactions)\n","sub_path":"ml-conversational-analytic-tool/githubDataExtraction.py","file_name":"githubDataExtraction.py","file_ext":"py","file_size_in_byte":8484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"388875066","text":"#!/usr/bin/env python3\nimport operator\nimport pathlib\n\nimport allel\nimport numpy as np\nimport stdpopsim\n\nGFF_URL = (\n \"ftp://ftp.ensembl.org/pub/release-104/gff3/homo_sapiens/\"\n \"Homo_sapiens.GRCh38.104.gff3.gz\"\n)\nGFF_SHA256 = \"313ad46bd4af78b45b9f5d8407bbcbd3f87f4be0747060e84b3b5eb931530ec1\"\nOUTPUT_DIRECTORY = \"./intervals/HomSap\"\nCHROM_IDS = [chrom.id for chrom in stdpopsim.get_species(\"HomSap\").genome.chromosomes]\n\n\ndef merged(intervals, *, closed: bool):\n \"\"\"\n Merge overlapping and adjacent intervals.\n\n :param intervals: An iterable of (start, end) coordinates.\n :param bool closed: If True, [start, end] coordinates are closed,\n so [1, 2] and [3, 4] are adjacent intervals and will be merged.\n If False, [start, end) coordinates are half-open,\n so [1, 2) and [3, 4) are not adjacent and will not be merged.\n \"\"\"\n\n def iter_merged(intervals, *, closed: bool):\n \"\"\"\n Generate tuples of (start, end) coordinates for merged intervals.\n \"\"\"\n intervals = sorted(intervals, key=operator.itemgetter(0))\n if len(intervals) == 0:\n return\n start, end = intervals[0]\n for a, b in intervals[1:]:\n assert a <= b\n if a > end + closed:\n # No intersection with the current interval.\n yield start, end\n start, end = a, b\n else:\n # Intersects, or is contiguous with, the current interval.\n end = max(end, b)\n yield start, end\n\n return list(iter_merged(intervals, closed=closed))\n\n\ndef test_merged():\n rng = np.random.default_rng(1234)\n\n for closed in (True, False):\n assert merged([], closed=closed) == []\n assert merged([(10, 20), (15, 30)], closed=closed) == [(10, 30)]\n assert merged([(10, 20), (20, 30)], closed=closed) == [(10, 30)]\n assert merged([(10, 20), (22, 30)], closed=closed) == [(10, 20), (22, 30)]\n assert merged([(10, 20), (12, 16)], closed=closed) == [(10, 20)]\n assert merged([(12, 16), (10, 20), (13, 15)], closed=closed) == [(10, 20)]\n\n # Check merging is idempotent.\n for _ in range(100):\n starts = rng.integers(1, 1000, size=100)\n ends = starts + rng.integers(1, 100, size=len(starts))\n merged_intervals = merged(zip(starts, ends), closed=closed)\n assert merged_intervals == merged(merged_intervals, closed=closed)\n\n assert merged([(10, 20), (21, 30)], closed=True) == [(10, 30)]\n assert merged([(10, 20), (21, 30)], closed=False) == [(10, 20), (21, 30)]\n\n\ndef gff_recarray_to_stdpopsim_intervals(gff):\n \"\"\"\n Merge overlapping intervals and convert coordinates. GFF intervals are\n 1-based [i,j], but stdpopsim intervals are 0-based [i-1,j).\n \"\"\"\n intervals = np.array(merged(zip(gff.start, gff.end), closed=True))\n intervals[:, 0] = intervals[:, 0] - 1\n return intervals\n\n\ndef get_gff_recarray(url, sha256):\n local_path = pathlib.Path(url).name\n\n if not pathlib.Path(local_path).exists():\n print(f\"downloading {url}\")\n stdpopsim.utils.download(url, local_path)\n\n print(\"checking sha256\")\n local_sha256 = stdpopsim.utils.sha256(local_path)\n if local_sha256 != sha256:\n print(\n f\"{local_path}: sha256: expected {sha256}, but found {local_sha256}. \"\n \"Delete the file to download it again.\"\n )\n exit(1)\n\n print(f\"loading {local_path} into numpy recarray\")\n gff = allel.gff3_to_recarray(local_path)\n return gff\n\n\nif __name__ == \"__main__\":\n gff = get_gff_recarray(GFF_URL, GFF_SHA256)\n\n print(\"extracting exons\")\n exons = gff[\n np.where(np.logical_and(gff.source == \"ensembl_havana\", gff.type == \"exon\"))\n ]\n\n out_dir = pathlib.Path(OUTPUT_DIRECTORY)\n out_dir.mkdir(parents=True, exist_ok=True)\n print(f\"merging overlapping regions and dumping to {out_dir}/\")\n\n for chrom_id in CHROM_IDS:\n chrom_exons = exons[np.where(exons.seqid == chrom_id)]\n if len(chrom_exons) == 0:\n continue\n intervals = gff_recarray_to_stdpopsim_intervals(chrom_exons)\n # double check that the intervals can be used in stdpopsim\n stdpopsim.utils.check_intervals_validity(intervals)\n\n out_file = out_dir / f\"ensembl_havana_exons_{chrom_id}.txt\"\n np.savetxt(out_file, intervals, fmt=\"%d\")\n","sub_path":"get_intervals.py","file_name":"get_intervals.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"464420472","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 4 16:28:38 2020\n\n@author: Brendan Non-Admin\n\"\"\"\n\nimport pandas as pd\n\ndef scrape_schiller_pe_ratio_data():\n url = 'https://www.multpl.com/shiller-pe/table/by-year'\n \n df = (pd.read_html(url, converters={'Date': pd.to_datetime})[0]\n .rename(columns={'Value Value': 'Schiller PE Ratio'})\n .assign(Year=lambda x: x.Date.dt.year)\n .to_dict(orient='records')\n )\n \n return df","sub_path":"metis_app/api/schiller_pe_ratio.py","file_name":"schiller_pe_ratio.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"417179618","text":"from PyQt5 import QtCore, QtGui, QtWidgets, QtWebEngineWidgets\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.uic import loadUi\r\nimport os\r\nimport sys\r\nimport makememap\r\n\r\nclass Ui_MainWindow(QMainWindow):\r\n def setupUi(self, MainWindow):\r\n ##makememap.change_color_to_time()\r\n if not MainWindow.objectName():\r\n MainWindow.setObjectName(u\"MainWindow\")\r\n \r\n MainWindow.setEnabled(True)\r\n MainWindow.resize(900, 600)\r\n MainWindow.setMouseTracking(False)\r\n self.centralwidget = QWidget(MainWindow)\r\n self.centralwidget.setObjectName(u\"centralwidget\")\r\n \r\n self.verticalLayoutWidget = QWidget(self.centralwidget)\r\n self.verticalLayoutWidget.setObjectName(u\"verticalLayoutWidget\")\r\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(30, 20, 531, 521))\r\n self.verticalLayout = QVBoxLayout(self.verticalLayoutWidget)\r\n self.verticalLayout.setObjectName(u\"verticalLayout\")\r\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\r\n\r\n # Add Map viewer to Verical Layout\r\n self.webEngineView = QtWebEngineWidgets.QWebEngineView(self.centralwidget)\r\n url = QtCore.QUrl('http://127.0.0.1:8050/')\r\n ##QtCore.QUrl().fromLocalFile(os.path.split(os.path.abspath(__file__))[0]+r'index.html'\r\n self.webEngineView.load(url)\r\n self.verticalLayout.addWidget(self.webEngineView)\r\n\r\n # Pressing button shows a dialog box\r\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\r\n self.pushButton.setObjectName(u\"pushButton\")\r\n self.pushButton.setGeometry(QtCore.QRect(600, 250, 200, 30))\r\n\r\n # self.horizontalSlider = QSlider(self.centralwidget)\r\n # self.horizontalSlider.setObjectName(u\"horizontalSlider\")\r\n # self.horizontalSlider.setGeometry(QtCore.QRect(590, 40, 261, 31))\r\n # self.horizontalSlider.setOrientation(QtCore.Qt.Horizontal)\r\n\r\n self.dateEdit = QDateTimeEdit(self.centralwidget)\r\n self.dateEdit.setGeometry(600, 40, 200, 30)\r\n self.dateEdit.setMinimumDate(QtCore.QDate.currentDate().addDays(-365))\r\n self.dateEdit.setMaximumDate(QtCore.QDate.currentDate().addDays(365))\r\n self.dateEdit.setDisplayFormat(\"dd.MM.yyyy hh:mm\")\r\n \r\n self.dateEdit = QDateTimeEdit(self.centralwidget)\r\n self.dateEdit.setGeometry(600, 100, 200, 30)\r\n self.dateEdit.setMinimumDate(QtCore.QDate.currentDate().addDays(-365))\r\n self.dateEdit.setMaximumDate(QtCore.QDate.currentDate().addDays(365))\r\n self.dateEdit.setDisplayFormat(\"dd.MM.yyyy hh:mm\")\r\n\r\n self.pushButton_2 = QPushButton(self.centralwidget)\r\n self.pushButton_2.setObjectName(u\"pushButton_2\")\r\n self.pushButton_2.setGeometry(QtCore.QRect(600, 300, 200, 30))\r\n self.comboBox = QComboBox(self.centralwidget)\r\n self.comboBox.addItem(\"\")\r\n self.comboBox.addItem(\"\")\r\n self.comboBox.addItem(\"\")\r\n self.comboBox.setObjectName(u\"comboBox\")\r\n self.comboBox.setGeometry(QtCore.QRect(600, 370, 200, 30))\r\n self.pushButton_3 = QPushButton(self.centralwidget)\r\n self.pushButton_3.setObjectName(u\"pushButton_3\")\r\n self.pushButton_3.setGeometry(QtCore.QRect(600, 509, 200, 30))\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.pushButton.raise_()\r\n self.verticalLayoutWidget.raise_()\r\n # self.horizontalSlider.raise_()\r\n self.pushButton_2.raise_()\r\n self.comboBox.raise_()\r\n self.pushButton_3.raise_()\r\n self.menubar = QMenuBar(MainWindow)\r\n self.menubar.setObjectName(u\"menubar\")\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 955, 22))\r\n MainWindow.setMenuBar(self.menubar)\r\n self.statusbar = QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(u\"statusbar\")\r\n MainWindow.setStatusBar(self.statusbar)\r\n QWidget.setTabOrder(self.pushButton_2, self.comboBox)\r\n # QWidget.setTabOrder(self.comboBox, self.horizontalSlider)\r\n # QWidget.setTabOrder(self.horizontalSlider, self.pushButton_3)\r\n QWidget.setTabOrder(self.pushButton_3, self.pushButton)\r\n\r\n self.retranslateUi(MainWindow)\r\n self.pushButton.clicked.connect(self.show_dialog)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n \r\n # setupUi\r\n\r\n def show_dialog(self):\r\n msg = QMessageBox()\r\n msg.setWindowTitle(\"Confirmation\")\r\n msg.setText(\"Are you sure?\")\r\n msg.setStandardButtons(QMessageBox.Yes|QMessageBox.No)\r\n\r\n x = msg.exec_()\r\n\r\n def selectionchange(self, i):\r\n ## S T R\r\n if i == 0:\r\n makememap.change_color_to_time()\r\n if i == 1:\r\n makememap.change_color_to_speed()\r\n if i == 2:\r\n makememap.change_color_to_risk()\r\n \r\n self.webEngineView.reload()\r\n\r\n def retranslateUi(self, MainWindow):\r\n MainWindow.setWindowTitle(QtCore.QCoreApplication.translate(\"MainWindow\", u\"MainWindow\", None))\r\n self.pushButton.setText(QtCore.QCoreApplication.translate(\"MainWindow\", u\"Toggle Outlier Selection\", None))\r\n self.pushButton_2.setText(QtCore.QCoreApplication.translate(\"MainWindow\", u\"Toggle Trajectory\", None))\r\n self.comboBox.setItemText(0, QtCore.QCoreApplication.translate(\"MainWindow\", u\"Time\", None))\r\n self.comboBox.setItemText(1, QtCore.QCoreApplication.translate(\"MainWindow\", u\"Speed\", None))\r\n self.comboBox.setItemText(2, QtCore.QCoreApplication.translate(\"MainWindow\", u\"Risk\", None))\r\n self.comboBox.currentIndexChanged.connect(self.selectionchange)\r\n\r\n self.pushButton_3.setText(QtCore.QCoreApplication.translate(\"MainWindow\", u\"Refresh\", None))\r\n # retranslateUi\r\n\r\nif __name__ == \"__main__\":\r\n\r\n app = QtWidgets.QApplication(sys.argv)\r\n Window = QtWidgets.QMainWindow()\r\n ui = Ui_MainWindow()\r\n ui.setupUi(Window)\r\n Window.show()\r\n app.exec_()\r\n","sub_path":"mockdesign.py","file_name":"mockdesign.py","file_ext":"py","file_size_in_byte":5996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"30356727","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import integrate\r\nimport scipy.constants as const\r\nimport math\r\n#-------Cluster decay investigation within a modified Woods-Saxon potential------\r\n#----------------------------------------------------------------------------------------JANGAN LUPA COMMENTTT\r\nprint(\"berikut adalah data - datanya: \")\r\nprint(\"Ra(A=222; Z=86) --> Pb(A=208; Z=82) + C(A=14; Z=6) \")\r\n\r\n#constant\r\nhbareV = 6.582119569*(10**-16)\r\nheV = 4.135667696*(10**-15)\r\nc = 2.997924588*(10**8)\r\n\r\nNdata = 3000\r\nbase = 0\r\n#Bikin array untuk plot data\r\nx = np.empty(Ndata+1)\r\ng=0\r\nx[g] = 4.5\r\nwhile(g=0):\r\n if(locked==0):\r\n locked=1\r\n iroot=iroot+1\r\n Mroot[iroot] = x[i]\r\n if (y[i]-Qtheory<0)or((x[i]>Mroot[1]+2)&(x[i]<(Mroot[1]+5))):\r\n locked=0\r\n i = i+1\r\n \r\n Rin=Mroot[1]\r\n Rout=Mroot[2]\r\n result[base]=lnT(Ap, Zp, Ad, Zd, Ac, Zc, 0, Rin, Rout)*(math.log(math.e,10))\r\n arrayerror[base]=error(base, result[base])\r\n base=base+1\r\n\r\ni=0\r\nwhile(i<22):\r\n print('Error',i,' = ',arrayerror[i],'; ')\r\n i=i+1\r\nprint(\"Error average = \", average(arrayerror))\r\n#--------------------------MAIN---------------------- \r\n\r\n\r\n#Dibawah ini khusus untuk plot saja-------------------------------\r\nbase=1\r\nif(base==0):\r\n Ap = 222; Zp = 88; Ac = 14; Zc = 6;\r\n Ad = 208; Zd = 82; Qtheory = 33.049;\r\nif(base==1):\r\n #base 26206\r\n Ap = 224; Zp = 88; Ac = 14; Zc = 6\r\n Ad = 210; Zd = 82; Qtheory = 30.535;\r\nif(base==2):\r\n #base 99\r\n Ap = 226; Zp = 88; Ac = 14; Zc = 6\r\n Ad = 212; Zd = 82; Qtheory = 28.196;\r\nyVc = np.empty(Ndata+1)\r\nyVo = np.empty(Ndata+1)\r\nyVn = np.empty(Ndata+1)\r\n\r\n#Mengisi matrix Qtheory(qq) untuk membentuk garis lurus\r\nqq = np.empty(Ndata+1)\r\ni = 0\r\nwhile(i Pb(A=208; Z=82) + C(A=14; Z=6)')\r\nplt.plot(x,qq,label='Q value')\r\nplt.plot(x,y,label='Interaction Potential')\r\nplt.plot(x,yVo,label='Vo constant')\r\nplt.plot(x,yVc,label='Vc Coulomb Potential')\r\nplt.plot(x,yVn,label='Vn Potential')\r\nplt.legend()\r\nplt.ion()\r\nplt.show()\r\nplt.pause(0.001)\r\n\r\nprint(Mroot)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"RBL benchmark paramset2 5base.py","file_name":"RBL benchmark paramset2 5base.py","file_ext":"py","file_size_in_byte":9233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"487867024","text":"from pathlib import Path\n\nfrom sphinx_testing import with_app\n\n\n@with_app(buildername=\"latex\", srcdir=\"doc_test/doc_build_latex\")\ndef test_doc_build_latex(app, status, warning):\n app.build()\n\n latex_file = Path(app.outdir, \"needstestdocs.tex\")\n assert latex_file\n\n latex_content = latex_file.read_text()\n\n # Check table generated by Sphinxneeds has correct caption\n assert (\n \"\\\\sphinxcaption{Table from sphinxneeds\\\\sphinxhyphen{}contrib \\\\textquotesingle\"\n \"{}needtable\\\\textquotesingle{} directive}\" in latex_content\n )\n","sub_path":"tests/test_doc_build_latex.py","file_name":"test_doc_build_latex.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"339306479","text":"import pandas as pd\nfrom sklearn.naive_bayes import GaussianNB\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport matplotlib\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nimport math\n\ndef main():\n\n filename = r'D:\\Uca\\Thesis\\NLP\\Dataset\\Wine1855_Category.csv'\n dataset = loadCSV(filename)\n dataset_train = dataset[200:1000]\n dataset_test = dataset[:7]\n #print(dataset_test)\n pro_oris = NormalDisMin(dataset_train,dataset_test)\n\n #Confusion_Matrix(pro_oris)\n\n\n#Load CSV File\ndef loadCSV(filename):\n dataset = pd.read_csv(filename)\n dataset.head()\n dataset = pd.DataFrame(dataset)\n dataset['Label'] = np.where(dataset['Score'] < 90, 0, 1)\n #Min-max normlization\n COLUMNS = dataset.shape[1]\n dataset_attributes = dataset.iloc[:,4:COLUMNS]\n normalized_df = (dataset_attributes - dataset_attributes.min()) / (dataset_attributes.max() - dataset_attributes.min())\n normalized_df['Label'] = dataset['Label']\n return normalized_df\n\n #return dataset\n\ndef Group_Class(dataset):\n class_N, class_Y = (g for _, g in dataset.groupby('Label'))\n\n # class_N, class_Y = dataset.groupby('Label')\n return [class_Y, class_N]\n\ndef Zero_Frequency_mean(dataset):\n for i in range(dataset.shape[0]-1):\n if dataset.iloc[i] == 0:\n dataset.iloc[i] = dataset.mean()\n\n return dataset\n\ndef Zero_Frequency_min(dataset):\n min = dataset[dataset > .01].min()\n for i in range(dataset.shape[0]):\n if dataset.iloc[i] == 0:\n dataset.iloc[i] = min\n\n return dataset\n\ndef NormalDisMin(train_set, test_set):\n train_set_Yes, train_set_No = Group_Class(train_set)\n train_set_Yes_X, train_set_Yes_Y = Split_X_y(train_set)\n train_set_No_X, train_set_No_Y = Split_X_y(test_set)\n\n train_set_Yes_X_mean = train_set_Yes_X.mean()\n train_set_Yes_X_var = train_set_Yes_X.var()\n train_set_No_X_mean = train_set_No_X.mean()\n train_set_No_X_var = train_set_No_X.var()\n\n\n # print(\"Mean in trainset 90+ class\")\n # print(train_set_Yes_X_mean)\n # print(\"Variance in train set 90+ Class\")\n # print(train_set_Yes_X_var)\n # print(\"Mean in trainset 90- class\")\n # print(train_set_No_X_mean)\n # print(\"Variance in train set 90- Class\")\n # print(train_set_No_X_var)\n # print()\n\n #Handle zero frequency by assign the average\n train_set_Yes_mean = Zero_Frequency_min(train_set_Yes_X_mean)\n train_set_Yes_var = Zero_Frequency_min(train_set_Yes_X_var)\n train_set_No_mean = Zero_Frequency_min(train_set_No_X_mean)\n train_set_No_var = Zero_Frequency_min(train_set_No_X_var)\n\n # print(\"Mean in trainset 90+ class\")\n # print(train_set_Yes_X_mean)\n # print(\"Variance in train set 90+ Class\")\n # print(train_set_Yes_X_var)\n # print(\"Mean in trainset 90- class\")\n # print(train_set_No_X_mean)\n # print(\"Variance in train set 90- Class\")\n # print(train_set_No_X_var)\n\n ROWS = test_set.shape[0]\n COLS = test_set.shape[1]-1\n pro_yes = [[0] * COLS] * ROWS\n pro_yes = pd.DataFrame(pro_yes, columns=test_set.columns[0:13], dtype=float)\n pro_no = [[0] * COLS] * ROWS\n pro_no = pd.DataFrame(pro_no, columns=test_set.columns[0:13], dtype=float)\n for i in range(test_set.shape[0]):\n for j in range(COLS):\n pro_yes.iloc[i][j] = (1/(math.sqrt(2*math.pi*train_set_Yes_var[j])))*(math.exp(-((test_set.iloc[i][j]-train_set_Yes_mean[j])**2)/(2*train_set_Yes_var[j])))\n pro_no.iloc[i][j] = (1 / (math.sqrt(2 * math.pi * train_set_No_var[j]))) * (math.exp(-((test_set.iloc[i][j] - train_set_No_mean[j]) ** 2) / (2 * train_set_No_var[j])))\n\n pro_label_yes = pro_yes.product(axis =1)\n pro_label_no = pro_no.product(axis =1)\n\n Total_sample = train_set.shape[0]\n\n num_Y = train_set_Yes.shape[0]\n num_N = train_set_No.shape[0]\n # Calculate class distrubution\n pro_Y = float(num_Y / Total_sample)\n pro_N = float(num_N / Total_sample)\n\n\n pros_Y = pro_label_yes * pro_Y\n pro_Ori = pd.DataFrame(pros_Y.values, columns=['Probability_Y'])\n pros_N = pro_N *pro_label_no\n pro_Ori['Probability_N'] = pros_N.values\n\n # Classify the test dataset\n test_size = pros_Y.shape[0]\n sample_class = [99] * test_size\n for i in range(test_size):\n if (pro_Ori.iloc[i, 0] >= pro_Ori.iloc[i, 1]):\n sample_class[i] = 1\n else:\n sample_class[i] = 0\n\n\n pro_Ori['Predict_Class'] = sample_class\n pro_Ori['Actual_Labels'] = test_set['Label'].values\n return pro_Ori\n\ndef Confusion_Matrix(pro_Ori):\n tp = 0\n fn = 0\n fp = 0\n tn = 0\n for i in range(pro_Ori.shape[0]):\n if((pro_Ori.iloc[i, 2] == 1)and (pro_Ori.iloc[i,3] == 1)):\n tp = tp+1\n elif((pro_Ori.iloc[i, 2] == 0)and (pro_Ori.iloc[i,3] == 1)):\n fn = fn+1\n elif ((pro_Ori.iloc[i, 2] == 1) and (pro_Ori.iloc[i, 3] == 0)):\n fp = fp+1\n else:\n tn = tn+1\n\n array = {'Predicted Class = 90+' :[tp,fp],\n 'Predicted Class = 90-' : [fn,tn]}\n\n actual_Class = ['Actual Class = 90+ ', 'Actual Class = 90-']\n#Put two list into a dataframe\n matrix = pd.DataFrame(array,actual_Class)\n #matrix.to_csv(r'D:\\Spring2019\\DataMining\\Output\\Con_Matrix.csv')\n print(\"Confusion Matrix: \")\n print(matrix)\n precision = 0\n recall = 0\n acc = float((tp+tn)/(tp+fn+fp+tn))\n print(\"Accuracy: {:.4f}\".format(acc))\n if(tp != 0):\n precision = float(tp/(tp+fp))\n recall = float(tp/(tp+fn))\n print(\"Precision: {:.4f}\".format(precision))\n print(\"Recall: {:.4f}\".format(recall))\n print()\n\n return acc, precision, recall\n\ndef Split_X_y(dataset):\n X = dataset.iloc[:, 0:13]\n y = dataset.iloc[:, 13]\n\n return X, y\n\n#main()\n","sub_path":"NLP/GaussianNB.py","file_name":"GaussianNB.py","file_ext":"py","file_size_in_byte":5777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"325869003","text":"from scraper import GradesPage\nfrom point_values import point_values\n\n\nclass UserData(object):\n\t'''Returns an object storing all class grades data as well\n\t as info for a specific user.'''\n\t\n\tdef __init__(self, name):\n\t\tself.name = name\n\t\tself.page = GradesPage()\n\t\tself.user_rows = self.page.grade_rows_table[3:]\n\t\tself.ranges = self.page.grade_ranges_table\n\t\tself.forum_scores = self.page.forum_scores\n\n\t@property\n\tdef user_row(self):\n\t\tfor row in self.user_rows:\n\t\t\tif self.name == row[0]:\n\t\t\t\tstart = 15\n\t\t\t\tstop = start + len(self.forum_scores)\n\t\t\t\trow[start: stop] = self.forum_scores\n\t\t\t\treturn row\n\n\tdef check_columns(self, start, stop):\t\n\t\tcount = 0\n\t\twhile start < stop:\n\t\t\ttry:\n\t\t\t\ttest = all(row[start] == 0 for row in self.user_rows)\n\t\t\t\tif test:\n\t\t\t\t\treturn count\n\t\t\t\tcount += 1\n\t\t\t\tstart += 1\n\t\t\texcept IndexError:\n\t\t\t\treturn count\n\t\treturn count\n\n\tdef completed_assignments(self):\n\t\treturn (\n\t\tself.check_columns(2, 12),\n\t\tself.check_columns(12, 15),\n\t\tself.check_columns(15, 19),\n\t\tself.check_columns(19, 29),\n\t\tself.check_columns(29, 30)\n\t\t)\n\n\t@property\n\tdef possible_points(self):\n\t\tquizzes, tests, forums, labs, project = self.completed_assignments()\n\n\t\treturn int(\n\t\t\tpoint_values['Quiz'] * quizzes +\n\t\t\tpoint_values['Test'] * tests +\n\t\t\tpoint_values['posts'] * forums +\n\t\t\tpoint_values['Lab'] * labs +\n\t\t\tpoint_values['Project'] * project)\n\n\t@property\t\n\tdef current_assignments(self):\n\t\tassignment_strings = (\n\t\t\t'Quiz', 'Test', 'Forum', \n\t\t\t'Lab', 'Project'\n\t\t)\n\t\tassignment_worth = (3, 30, 20, 30, 60)\n\t\tassignment_nums = self.completed_assignments()\n\t\t\n\t\trow = self.user_row\n\t\t\n\t\tuser_scores = [\n\t\t\trow[2:12], row[12:15], row[15: 19],\n\t\t\trow[19: 29], row[29: 30]\n\t\t]\n\n\t\tcombined_data = zip(assignment_strings, assignment_nums,\n\t\t\tuser_scores, assignment_worth)\n\t\t\n\t\treturn combined_data\t\n\n\n","sub_path":"lib/formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"160704658","text":"import argparse\nimport os\nfrom scipy.stats import pearsonr\nimport numpy as np\nimport pickle\nimport itertools\nfrom itertools import permutations\nfrom copy import deepcopy\nfrom collections import defaultdict\nimport subprocess\n\n'''Script that creates predictions by averaging predictions of models'''\n\ndef create_arg_parser():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"-d\",\"--dev_pred\", required=True, type=str, help=\"Dev predictions folder\")\n\tparser.add_argument(\"-t\",\"--test_pred\", required=True, type=str, help=\"Test predictions folder\")\n\tparser.add_argument(\"-g\", \"--dev_gold\", required=True, type=str, help=\"Dir with gold dev data\")\n\tparser.add_argument(\"-od\", \"--orig_dev\", required=True, type=str, help=\"Dir with original test files\")\n\tparser.add_argument(\"-ot\", \"--orig_test\", required=True, type=str, help=\"Dir with original test files\")\n\tparser.add_argument(\"-o\",\"--out_dir\", required=True, type=str, help=\"Dir to write results to\")\n\tparser.add_argument(\"-ta\", \"--task_name\", choices = ['EI-reg', 'EI-oc', 'V-reg', 'V-oc'], type=str, help=\"Name of task - choose EI-reg, EI-oc, V-reg or V-oc\")\n\tparser.add_argument(\"-diff\", \"--difference\", default = 0.002, type=float, help=\"The amount the F-score should increase by before we discard a model (default 0.002)\")\n\tparser.add_argument(\"-c\", \"--clf\", action = 'store_true', help=\"Add this if it is a classification task -- important!\")\n \n\targs = parser.parse_args()\n\treturn args\n\n\ndef calculate_pearson(scores, real_y, weights=None, options=None, old_options=None):\n\tpred_y = []\n\n\tfor instance in scores:\n\t\tpred_y.append(sum(instance)/len(instance))\n\n\tif args.clf:\n\t\tscaled_pred_y = []\n\t\tfor item in pred_y:\n\t\t\tscaled_item = min(options, key=lambda x:abs(x-item))\n\t\t\tscaled_pred_y.append(old_options[options.index(scaled_item)])\n\t\tscore = round(pearsonr(scaled_pred_y, real_y)[0],4)\n\t\treturn score\n\n\telse:\n\t\tscore = round(pearsonr(pred_y, real_y)[0],4)\n\n\treturn score\n\n\ndef remove_if_better(predictions, gold_labels, original_score, model_order, extra_diff, options=None, old_options=None):\n\t'''Function that tries to remove a model from the averaging and see if it gets better. Do this a lot of times, e.g.\n\t first remove the worst model, then try again and see if we can remove one in the current set, etc'''\n\tprint (\"Try to remove models from averaging to see if score improves:\\n\")\n\tbest_diff = 0\n\tworst_model = -1\n\titerations = len(predictions[0])\n\tfor iteration in range(iterations):\n\t\tfor skip_idx in range(len(predictions[0])):\n\t\t\tnew_preds = [x[0:skip_idx] + x[skip_idx + 1:] for x in predictions]\n\t\t\tcur_score = calculate_pearson(new_preds, gold_labels, options=options, old_options = old_options)\n\t\t\t## Check if we score worse, and also if this model is really the worst\n\t\t\tif cur_score > (original_score + extra_diff) and cur_score - original_score > best_diff:\n\t\t\t\tbest_diff = cur_score - original_score\n\t\t\t\tworst_model = skip_idx\n\t\t\n\t\t## Set new best score we have to beat\n\t\toriginal_score = original_score + best_diff\n\t\tprint (\"New original score: {0}\".format(original_score))\n\t\t## Throw out worst model from predictions (if we found one), print which one it is \n\t\tif worst_model != -1:\n\t\t\tpredictions = [x[0:worst_model] + x[worst_model + 1:] for x in predictions]\n\t\t\tprint (\"Remove {0} from averaging\".format(model_order[worst_model]))\n\t\t\tdel model_order[worst_model]\n\t\telse:\n\t\t\tprint (\"\\nFound best model with score {0}, includes:\".format(original_score))\n\t\t\tfor m in model_order:\n\t\t\t\tprint (m)\n\t\t\tprint ('\\n')\n\t\t\tbreak #else stop trying, removing a model made it worse \n\t\t\n\t\tbest_diff = 0\n\t\tworst_model = -1\n\treturn original_score, model_order\n\n\ndef cat_to_int(pred):\n\t'''Convert predicted categories to numbers'''\n\tnew_pred = []\n\toptions = []\n\tfor idx, p in enumerate(pred):\n\t\tif p.startswith(\"'\"):\n\t\t\ttry:\n\t\t\t\tnew_value = int(p[1]) #predicted category looks something like this: '0: no se infieren niveles de enojo' -- so take second character as number\n\t\t\texcept ValueError:\n\t\t\t\tnew_value = int(p[1:3]) #predicted category looks something like this: '-1: no se infieren niveles de enojo' -- so take second + third character as number\n\t\telse:\n\t\t\ttry:\n\t\t\t\t# changed this because data is now \"'0: no se infieren etc.'\"\n\t\t\t\tnew_value = int(p[0]) #predicted category looks something like this: '0: no se infieren niveles de enojo' -- so take second character as number\n\t\t\texcept ValueError:\n\t\t\t\tnew_value = int(p[0:2]) #predicted category looks something like this: '-1: no se infieren niveles de enojo' -- so take second + third character as number\n\t\tnew_pred.append(new_value)\n\t\tif new_value not in options:\n\t\t\toptions.append(new_value) \n\t\n\treturn np.asarray(new_pred), sorted(options)\n\n\ndef rescale(Y, options):\n\t'''Rescale categories between 0 and 1'''\n\tsorted_options = sorted(options)\n\trange_divider = len(options) + 1\n\tnew_options = []\n\t\n\t## Scale options between 0 and 1 evenly\n\tfor idx, option in enumerate(options):\n\t\tnew_val = round((float(1) / float(range_divider)) * (idx+1), 5)\n\t\tnew_options.append(new_val)\n\t\n\t## Rewrite the vector by new options\n\tnew_Y = []\n\tfor y in Y:\n\t\tnew_Y.append(new_options[sorted_options.index(y)])\n\treturn new_Y, sorted(new_options)\n\n\ndef get_file_labels(f, ix):\n\t'''Get individual labels from file'''\n\tlabels = []\n\twith open(f, \"r\", encoding=\"utf-8\") as infile:\n\t\tdata = infile.readlines()\n\t\tfor row in data[1:]:\n\t\t\tif args.clf:\n\t\t\t\tif ix == -1:\n\t\t\t\t\tlabels.append(row.split(\"\\t\")[ix])\n\t\t\t\telse:\n\t\t\t\t\tlabels.append(float(row.split(\"\\t\")[ix]))\n\t\t\telse:\n\t\t\t\tlabels.append(float(row.split(\"\\t\")[ix].strip()))\n\treturn labels \n\n\ndef fetch_labels(dir, model_types, emotion=None, ix=-1):\n\t\"\"\"\" use gold_data_emotion to specify the subtask > necessary to get the right file in the gold_data dir\"\"\"\n\tif emotion:\n\t\tfiles = [os.path.join(dir,f) for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f)) and f.endswith(\".txt\") and emotion in f.lower()]\n\telse:\n\t\tfiles = [os.path.join(dir,f) for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f)) and f.endswith(\".txt\")]\n\tall_labels = []\n\tmodel_order = []\n\t\n\t## If we get all models\n\tif model_types:\n\t\tfor mod in model_types:\n\t\t\tfor f in files:\n\t\t\t\t# make sure order is always the same here and consistent with dir!\n\t\t\t\tif mod[0] in f:\n\t\t\t\t\tmodel_order.append(mod[1])\n\t\t\t\t\tlabels = get_file_labels(f, ix)\n\t\t\t\t\tall_labels.append(labels)\n\t\t\t\t\t\n\telse:\n\t\tlabels = get_file_labels(files[0], ix)\n\t\tall_labels.append(labels) \n\t\n\t## Getting gold data so only single file\n\tif emotion:\n\t\treturn all_labels[0], [], model_order\n\t## Multiple lists of labels\n\telse:\n\t\tif args.clf and ix == -1:\n\t\t\toptions = []\n\t\t\tfor labellist in all_labels:\n\t\t\t\t_, opt = cat_to_int(labellist)\n\t\t\t\toptions.append(opt)\n\t\t\treturn list(zip(*all_labels)), options, model_order\n\t\telse:\n\t\t\treturn list(zip(*all_labels)), [] , model_order\n\n\ndef averaging(all_predictions, indices):\n\tfinal_predictions = []\n\tprint (\"Taking average for {0} models\".format(len(indices)))\n\tfor instance in all_predictions:\n\t\tnew_instance = []\n\t\tfor index, item in enumerate(instance):\n\t\t\tif index in indices:\n\t\t\t\tnew_instance.append(item)\n\t\tnew_pred = sum(new_instance) / len(new_instance)\n\t\tfinal_predictions.append(new_pred)\n\n\treturn final_predictions\n\n\ndef reformat_predictions(final_predictions, test_labels, options, old_options):\n\t'''Reformat the predictions back to categories'''\n\trescaled_final_predictions = []\n\tunique_string_labels = list(set([model for instances in test_labels for model in instances if \"infiere\" in model]))\n\t#print(\"Unique labels:\", unique_string_labels)\n\tfor item in final_predictions:\n\t\tscaled_item = min(options, key=lambda x:abs(x-item))\n\t\treconverted_score = old_options[options.index(scaled_item)]\n\t\tfor item in unique_string_labels:\n\t\t\tif item.startswith(\"'\"+str(reconverted_score)):\n\t\t\t\t# to remove the redundant quotation marks\n\t\t\t\titem = item.strip()[1:-1]\n\t\t\t\trescaled_final_predictions.append(item)\n\treturn rescaled_final_predictions\t\t\t\n\n\ndef write_output(test_pred, out_dir, emotion, dev_test, original_test_file, task_name):\n\t'''Write predictions to file'''\n\tout_dir_full = \"{0}{1}/{2}/\".format(out_dir,task_name,dev_test)\n\tif not os.path.exists(out_dir_full):\n\t\tsubprocess.call([\"mkdir\", \"-p\", out_dir_full])\n\t\n\tif emotion == \"valence\":\n\t\tname = \"{0}{1}_es_pred.txt\".format(out_dir_full, task_name)\n\telse:\n\t\tname = \"{0}{1}_es_{2}_pred.txt\".format(out_dir_full,task_name, emotion)\n\n\twith open(original_test_file, 'r', encoding=\"utf-8\") as infile:\n\t\tinfile = infile.readlines()\n\t\tinfile = [x for x in infile if x]\n\t\tdata = [\"\\t\".join(line.split(\"\\t\")[:-1]) + \"\\t\" + str(test_pred[ix]) for ix, line in enumerate(infile[1:])]\n\t\twith open(name, 'w', encoding=\"utf-8\") as out:\n\t\t\tout.write(infile[0])\n\t\t\tfor line in data:\n\t\t\t\tout.write(line)\n\t\t\t\tout.write(\"\\n\")\n\t\tout.close() \n\n\nif __name__ == \"__main__\":\n\targs = create_arg_parser()\n\tgold_labels_dir = args.dev_gold\n\temotions = [\"anger\", \"fear\", \"joy\", \"sadness\", \"valence\"]\n\tmodel_types = [[\"traindev_svm\", \"SVM Normal\"], [\"trans_svm\",\"SVM Translated\"], [\"feed_forward_normal\",\"Feed Forward Normal\"], [\"feed_forward_translated\",\"Feed Forward Translated\"], [\"lstm_normal\",\"LSTM normal\"], [\"lstm_translated\",\"LSTM translated\"],[\"lstm_silver\",\"LSTM silver\"], [\"feed_forward_silver\",\"Feed Forward Silver\"]] \n\t\n\tfor emotion in emotions:\n\t\t## Get correct models for dev/test predictions\n\t\tmodel_dir_dev = args.dev_pred + emotion + '/'\n\t\tmodel_dir_test = args.test_pred + emotion + \"/\"\n\t\tassert(model_dir_dev != model_dir_test), \"Dev and test folder can not be the same!\"\n\t\t\n\t\tif os.path.exists(model_dir_dev) and os.path.exists(model_dir_test):\n\t\t\tprint (\"\\nDoing tests for {0}:\\n\".format(emotion))\n\t\t\tif not args.clf:\n\t\t\t\t## First get predictions + gold labels: order of labels should be consistent\n\t\t\t\tall_pred_labels, _, model_order = fetch_labels(model_dir_dev, model_types)\n\t\t\t\tgold_labels, _, _ \t = fetch_labels(gold_labels_dir, [], emotion)\n\t\t\t\tall_test_predictions, _, _ \t\t = fetch_labels(model_dir_test, model_types)\n\t\t\t\t\n\t\t\t\t## Print individual scores of the models to see if some model stands out (very high/low score)\n\t\t\t\tprint (\"Individual scores of models\\n\")\n\t\t\t\tfor idx, model in enumerate(model_order):\n\t\t\t\t\tprint ('{0}: {1}'.format(model, round(pearsonr([x[idx] for x in all_pred_labels], gold_labels)[0],4)))\n\n\t\t\t\t## Then get the score by averaging\n\t\t\t\tavg_score = calculate_pearson(all_pred_labels, gold_labels)\n\t\t\t\tprint('\\nAveraging all models', avg_score, '\\n')\n\t\t\t\t\n\t\t\t\t## Keep removing models until it does not help anymore\n\t\t\t\tbest_score_removing, models = remove_if_better(all_pred_labels, gold_labels, avg_score, model_order, args.difference)\n\t\t\t\tflat_model_types = [model[1] for model in model_types] ## only keep models\n\t\t\t\tindices_models = [flat_model_types.index(model) for model in models]\n\t\t\t\t#print ('indices', indices_models)\n\t\t\t\t## Write predictions for dev and test\n\t\t\t\tfinal_predictions_dev = averaging(all_pred_labels, indices_models)\n\t\t\t\tfinal_predictions_test = averaging(all_test_predictions, indices_models)\n\t\t\t\t\n\t\t\t\t## Write output\n\t\t\t\toriginal_dev_file = [os.path.join(args.orig_dev ,f) for f in os.listdir(args.orig_dev) if os.path.isfile(os.path.join(args.orig_dev, f)) and f.endswith(\".txt\") and emotion in f.lower()]\n\t\t\t\toriginal_test_file = [os.path.join(args.orig_test,f) for f in os.listdir(args.orig_test) if os.path.isfile(os.path.join(args.orig_test, f)) and f.endswith(\".txt\") and emotion in f.lower()]\n\t\t\t\t\n\t\t\t\twrite_output(final_predictions_dev, args.out_dir, emotion, 'dev' ,original_dev_file[0], args.task_name)\n\t\t\t\twrite_output(final_predictions_test, args.out_dir, emotion, 'test',original_test_file[0], args.task_name)\n\t\t\t\t\n\t\t\telse:\n\t\t\t\t# Prediction labels for dev\n\t\t\t\tstring_pred_labels, options_pred, model_order = fetch_labels(model_dir_dev, model_types) \t\t# string pred labels\n\t\t\t\tscaled_pred_labels, _, _ \t\t\t\t\t = fetch_labels(model_dir_dev, model_types, ix=-2) # scaled predictions\n\t\t\t\tspecific_pred_labels, _, _ \t\t\t\t\t = fetch_labels(model_dir_dev, model_types, ix=-3) # most specific predictions\n\t\t\t\t## Gold labels for dev\n\t\t\t\tgold_labels, _, _ \t\t\t= fetch_labels(gold_labels_dir, [], emotion)\n\t\t\t\tnew_gold, old_options \t\t= cat_to_int(gold_labels)\n\t\t\t\tscaled_gold_labels, options = rescale(new_gold, old_options)\n\t\t\t\t## Prediction labels for test\n\t\t\t\ttest_labels, options_test, _ = fetch_labels(model_dir_test, model_types) \t # string predictions test\n\t\t\t\tscaled_test_labels, _, _ \t = fetch_labels(model_dir_test, model_types, ix=-2) # scaled predictions\n\t\t\t\tall_test_predictions, _, _ = fetch_labels(model_dir_test, model_types, ix=-3) # most specific predictions\n\n\t\t\t\tassert sorted(options_pred) == sorted(options_test), \"Options are not the same for pred and test, something is wrong\"\n\n\t\t\t\t## Print individual scores of the models to see if some model stands out (very high/low score) -- calculate this based on category data (so 4, -2, -1, 0, etc)\n\t\t\t\tprint (\"Individual scores of models\\n\")\n\t\t\t\tfor idx, model in enumerate(model_order):\n\t\t\t\t\tcur_labels = [x[idx] for x in string_pred_labels]\n\t\t\t\t\tcategory_int_labels, _ = cat_to_int(cur_labels)\n\t\t\t\t\tprint ('{0}: {1}'.format(model, round(pearsonr(category_int_labels, new_gold)[0],4)))\n\t\t\t\t\n\t\t\t\t## Print average score\n\t\t\t\tavg_score = calculate_pearson(specific_pred_labels, scaled_gold_labels, options=options, old_options = old_options)\n\t\t\t\tprint('\\nAveraging all models', avg_score, '\\n')\n\t\t\t\t\n\t\t\t\t## Keep removing models until it does not help anymore\n\t\t\t\tbest_score_removing, models = remove_if_better(specific_pred_labels, scaled_gold_labels, avg_score, model_order, args.difference, options = options, old_options = old_options)\n\t\t\t\tflat_model_types = [model[1] for model in model_types]\n\t\t\t\tindices_models = [flat_model_types.index(model) for model in models]\n\t\t\t\t\n\t\t\t\t## Get predictions for test and dev\n\t\t\t\tfinal_predictions_dev = averaging(specific_pred_labels, indices_models)\n\t\t\t\tfinal_predictions_test = averaging(all_test_predictions, indices_models)\n\t\t\t\t\n\t\t\t\t## Reformat them back to original categories when writing output\n\t\t\t\trescaled_pred_dev = reformat_predictions(final_predictions_dev, test_labels, options, old_options)\n\t\t\t\trescaled_pred_test = reformat_predictions(final_predictions_test, test_labels, options, old_options)\n\t\t\t\t\n\t\t\t\t## Write output to file\n\t\t\t\toriginal_dev_file = [os.path.join(args.orig_dev ,f) for f in os.listdir(args.orig_dev) if os.path.isfile(os.path.join(args.orig_dev, f)) and f.endswith(\".txt\") and emotion in f.lower()]\n\t\t\t\toriginal_test_file = [os.path.join(args.orig_test,f) for f in os.listdir(args.orig_test) if os.path.isfile(os.path.join(args.orig_test, f)) and f.endswith(\".txt\") and emotion in f.lower()]\n\t\t\t\t\n\t\t\t\twrite_output(rescaled_pred_dev, args.out_dir, emotion, 'dev', original_dev_file[0], args.task_name)\n\t\t\t\twrite_output(rescaled_pred_test, args.out_dir, emotion, 'test',original_test_file[0], args.task_name)\n","sub_path":"python_scripts/calc_weights.py","file_name":"calc_weights.py","file_ext":"py","file_size_in_byte":14892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"388426009","text":"# coding:utf-8\nimport json\nimport threading\n\nfrom volcengine.ApiInfo import ApiInfo\nfrom volcengine.Credentials import Credentials\nfrom volcengine.base.Service import Service\nfrom volcengine.ServiceInfo import ServiceInfo\n\n\nclass RtcService(Service):\n _instance_lock = threading.Lock()\n\n def __new__(cls, *args, **kwargs):\n if not hasattr(RtcService, \"_instance\"):\n with RtcService._instance_lock:\n if not hasattr(RtcService, \"_instance\"):\n RtcService._instance = object.__new__(cls)\n return RtcService._instance\n\n def __init__(self):\n self.service_info = RtcService.get_service_info()\n self.api_info = RtcService.get_api_info()\n super(RtcService, self).__init__(self.service_info, self.api_info)\n\n @staticmethod\n def get_service_info():\n service_info = ServiceInfo(\"open.volcengineapi.com\", {'Accept': 'application/json'},\n Credentials('', '', 'rtc', 'cn-north-1'), 5, 5)\n return service_info\n\n @staticmethod\n def get_api_info():\n api_info = {\n \"ListRooms\": ApiInfo(\"GET\", \"/\", {\"Action\": \"ListRooms\", \"Version\": \"2020-12-01\"}, {}, {}),\n \"ListIndicators\": ApiInfo(\"POST\", \"/\", {\"Action\": \"ListIndicators\", \"Version\": \"2020-12-01\"}, {}, {}),\n }\n return api_info\n\n def list_rooms(self, params):\n res = self.get(\"ListRooms\", params)\n if res == '':\n raise Exception(\"ListRooms: empty response\")\n res_json = json.loads(res)\n return res_json\n\n def list_indicators(self, body):\n res = self.json(\"ListIndicators\", {}, body)\n if res == '':\n raise Exception(\"ListIndicators: empty response\")\n res_json = json.loads(res)\n return res_json\n","sub_path":"volcengine/rtc/RtcService.py","file_name":"RtcService.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"590088272","text":"# -*- coding: utf-8 -*- #\n# Copyright 2018 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"services helper functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom apitools.base.py import exceptions as apitools_exceptions\nfrom apitools.base.py import list_pager\nfrom googlecloudsdk.api_lib.services import exceptions\nfrom googlecloudsdk.api_lib.util import apis_internal\nfrom googlecloudsdk.core import log\nfrom googlecloudsdk.core import properties\nfrom googlecloudsdk.core.credentials import http as http_creds\n\n_PROJECT_RESOURCE = 'projects/%s'\n_PROJECT_SERVICE_RESOURCE = 'projects/%s/services/%s'\n_V1_VERSION = 'v1'\n_V1BETA1_VERSION = 'v1beta1'\n_V1ALPHA_VERSION = 'v1alpha'\n\n# Map of services which should be protected from being disabled by\n# prompting the user for confirmation\n_PROTECTED_SERVICES = {\n 'anthos.googleapis.com': ('Warning: Disabling this service will '\n 'also automatically disable any running '\n 'Anthos clusters.')\n}\n\n\ndef GetProtectedServiceWarning(service_name):\n \"\"\"Return the warning message associated with a protected service.\"\"\"\n return _PROTECTED_SERVICES.get(service_name)\n\n\ndef EnableApiCall(project, service):\n \"\"\"Make API call to enable a specific service.\n\n Args:\n project: The project for which to enable the service.\n service: The identifier of the service to enable, for example\n 'serviceusage.googleapis.com'.\n\n Raises:\n exceptions.EnableServicePermissionDeniedException: when enabling API fails.\n apitools_exceptions.HttpError: Another miscellaneous error with the service.\n\n Returns:\n The result of the operation\n \"\"\"\n client = _GetClientInstance()\n messages = client.MESSAGES_MODULE\n\n # TODO(b/78464430): use resource argument.\n request = messages.ServiceusageServicesEnableRequest(\n name=_PROJECT_SERVICE_RESOURCE % (project, service))\n try:\n return client.services.Enable(request)\n except (apitools_exceptions.HttpForbiddenError,\n apitools_exceptions.HttpNotFoundError) as e:\n exceptions.ReraiseError(e,\n exceptions.EnableServicePermissionDeniedException)\n\n\ndef BatchEnableApiCall(project, services):\n \"\"\"Make API call to batch enable services.\n\n Args:\n project: The project for which to enable the services.\n services: Iterable of identifiers of services to enable.\n\n Raises:\n exceptions.EnableServicePermissionDeniedException: when enabling API fails.\n apitools_exceptions.HttpError: Another miscellaneous error with the service.\n\n Returns:\n The result of the operation\n \"\"\"\n client = _GetClientInstance()\n messages = client.MESSAGES_MODULE\n\n request = messages.ServiceusageServicesBatchEnableRequest(\n batchEnableServicesRequest=messages.BatchEnableServicesRequest(\n serviceIds=services),\n parent=_PROJECT_RESOURCE % project)\n try:\n return client.services.BatchEnable(request)\n except (apitools_exceptions.HttpForbiddenError,\n apitools_exceptions.HttpNotFoundError) as e:\n exceptions.ReraiseError(e,\n exceptions.EnableServicePermissionDeniedException)\n\n\ndef DisableApiCall(project, service, force=False):\n \"\"\"Make API call to disable a specific service.\n\n Args:\n project: The project for which to enable the service.\n service: The identifier of the service to disable, for example\n 'serviceusage.googleapis.com'.\n force: disable the service even if there are enabled services which depend\n on it. This also disables the services which depend on the service to be\n disabled.\n\n Raises:\n exceptions.EnableServicePermissionDeniedException: when disabling API fails.\n apitools_exceptions.HttpError: Another miscellaneous error with the service.\n\n Returns:\n The result of the operation\n \"\"\"\n client = _GetClientInstance()\n messages = client.MESSAGES_MODULE\n\n request = messages.ServiceusageServicesDisableRequest(\n name=_PROJECT_SERVICE_RESOURCE % (project, service),\n disableServiceRequest=messages.DisableServiceRequest(\n disableDependentServices=force,),\n )\n try:\n return client.services.Disable(request)\n except (apitools_exceptions.HttpForbiddenError,\n apitools_exceptions.HttpNotFoundError) as e:\n exceptions.ReraiseError(e,\n exceptions.EnableServicePermissionDeniedException)\n except apitools_exceptions.HttpBadRequestError as e:\n log.status.Print('Provide the --force flag if you wish to disable '\n 'dependent services.')\n exceptions.ReraiseError(e, exceptions.Error)\n\n\ndef GetService(project, service):\n \"\"\"Get a service.\n\n Args:\n project: The project for which to get the service.\n service: The service to get.\n\n Raises:\n exceptions.GetServicePermissionDeniedException: when getting service fails.\n apitools_exceptions.HttpError: Another miscellaneous error with the service.\n\n Returns:\n The service configuration.\n \"\"\"\n client = _GetClientInstance()\n messages = client.MESSAGES_MODULE\n\n request = messages.ServiceusageServicesGetRequest(\n name=_PROJECT_SERVICE_RESOURCE % (project, service))\n try:\n return client.services.Get(request)\n except (apitools_exceptions.HttpForbiddenError,\n apitools_exceptions.HttpNotFoundError) as e:\n exceptions.ReraiseError(e, exceptions.GetServicePermissionDeniedException)\n\n\ndef IsServiceEnabled(service):\n client = _GetClientInstance()\n messages = client.MESSAGES_MODULE\n return service.state == messages.GoogleApiServiceusageV1Service.StateValueValuesEnum.ENABLED\n\n\ndef ListServices(project, enabled, page_size, limit):\n \"\"\"Make API call to list services.\n\n Args:\n project: The project for which to list services.\n enabled: List only enabled services.\n page_size: The page size to list.\n limit: The max number of services to display.\n\n Raises:\n exceptions.ListServicesPermissionDeniedException: when listing services\n fails.\n apitools_exceptions.HttpError: Another miscellaneous error with the service.\n\n Returns:\n The list of services\n \"\"\"\n client = _GetClientInstance()\n messages = client.MESSAGES_MODULE\n\n if enabled:\n service_filter = 'state:ENABLED'\n else:\n service_filter = None\n request = messages.ServiceusageServicesListRequest(\n filter=service_filter, parent=_PROJECT_RESOURCE % project)\n try:\n return list_pager.YieldFromList(\n client.services,\n request,\n limit=limit,\n batch_size_attribute='pageSize',\n batch_size=page_size,\n field='services')\n except (apitools_exceptions.HttpForbiddenError,\n apitools_exceptions.HttpNotFoundError) as e:\n exceptions.ReraiseError(e,\n exceptions.EnableServicePermissionDeniedException)\n\n\ndef GetOperation(name):\n \"\"\"Make API call to get an operation.\n\n Args:\n name: The name of operation.\n\n Raises:\n exceptions.OperationErrorException: when the getting operation API fails.\n apitools_exceptions.HttpError: Another miscellaneous error with the service.\n\n Returns:\n The result of the operation\n \"\"\"\n client = _GetClientInstance()\n messages = client.MESSAGES_MODULE\n request = messages.ServiceusageOperationsGetRequest(name=name)\n try:\n return client.operations.Get(request)\n except (apitools_exceptions.HttpForbiddenError,\n apitools_exceptions.HttpNotFoundError) as e:\n exceptions.ReraiseError(e, exceptions.OperationErrorException)\n\n\ndef GenerateServiceIdentity(project, service):\n \"\"\"Generate a service identity.\n\n Args:\n project: The project to generate a service identity for.\n service: The service to generate a service identity for.\n\n Raises:\n exceptions.GenerateServiceIdentityPermissionDeniedException: when generating\n service identity fails.\n apitools_exceptions.HttpError: Another miscellaneous error with the service.\n\n Returns:\n The email and uid of the generated service identity.\n \"\"\"\n client = _GetClientInstance(version=_V1BETA1_VERSION)\n messages = client.MESSAGES_MODULE\n\n request = messages.ServiceusageServicesGenerateServiceIdentityRequest(\n parent=_PROJECT_SERVICE_RESOURCE % (project, service))\n try:\n op = client.services.GenerateServiceIdentity(request)\n return _GetOperationResponseProperty(\n op, 'email'), _GetOperationResponseProperty(op, 'unique_id')\n except (apitools_exceptions.HttpForbiddenError,\n apitools_exceptions.HttpNotFoundError) as e:\n exceptions.ReraiseError(\n e, exceptions.GenerateServiceIdentityPermissionDeniedException)\n\n\ndef _GetOperationResponseProperty(op, key):\n return next((p.value.string_value\n for p in op.response.additionalProperties\n if p.key == key), None)\n\n\ndef _GetClientInstance(version='v1'):\n \"\"\"Get a client instance for service usage.\"\"\"\n # pylint:disable=protected-access\n # Specifically disable resource quota in all cases for service management.\n # We need to use this API to turn on APIs and sometimes the user doesn't have\n # this API turned on. We should always use the shared project to do this\n # so we can bootstrap users getting the appropriate APIs enabled. If the user\n # has explicitly set the quota project, then respect that.\n enable_resource_quota = (\n properties.VALUES.billing.quota_project.IsExplicitlySet())\n http_client = http_creds.Http(\n response_encoding=http_creds.ENCODING,\n enable_resource_quota=enable_resource_quota)\n return apis_internal._GetClientInstance(\n 'serviceusage', version, http_client=http_client)\n","sub_path":"google-cloud-sdk/lib/googlecloudsdk/api_lib/services/serviceusage.py","file_name":"serviceusage.py","file_ext":"py","file_size_in_byte":10146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"116845799","text":"#编写一个pygame\n#深入了解什么是对象\nimport pygame\nimport sys\n#由于万物皆对象,所以我将创建二个对象,一个是屏幕screen\nclass Screen():\n\tdef __init__(self):\n\t\tself.size=(480,700)\nscreen_nature=Screen()\n#一个是ship,移动速度,以及移动行为\t\t\nclass Ship():\n\tdef __init__(self,image_rect,screen_rect):\n\t\tself.move_right=False\n\t\tself.move_left=False\n\t\tself.image_rect=image_rect\n\t\tself.screen_rect=screen_rect\n\tdef move_position(self):\n\t\tif self.move_right:\n\t\t\tif self.image_rect.right<=self.screen_rect.right:\n\t\t\t\tself.image_rect.centerx+=1\n\t\tif self.move_left:\n\t\t\tif self.image_rect.left>=self.screen_rect.left:\n\t\t\t\tself.image_rect.centerx-=1\t\t\ndef run_game():\n\tpygame.init()\n\tscreen=pygame.display.set_mode(screen_nature.size)\n\tpygame.display.set_caption('hello ship')\n #加载图片,然后在下面绘制\n\timage_maliao=pygame.image.load('me1.png')\n\t#我们将得到屏幕和图片的rect,目的就是为了更好的确立位置\n\tscreen_rect=screen.get_rect()\n\timage_background=pygame.image.load('background.png').convert()\n\timage_rect=image_maliao.get_rect()\n\tship=Ship(image_rect,screen_rect)\n\t#将马里奥放在屏幕最下中间\n\timage_rect.centerx=screen_rect.centerx\n\timage_rect.bottom=screen_rect.bottom\n\twhile True:\n\t\tfor event in pygame.event.get():\n\t\t\t'''\n\t\t\t这么搞肯定是不行的,因为只有先检测KEYDOWN,接下来检测到K_LEFT才可以,不然就不行,所以这段代码错误\n\t\t\tmove_sign=False\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tmove_sign=True\n\t\t\tif event.type == pygame.KEYUP:\n\t\t\t\tmove_sign=False\n\t\t\twhile move_sign:\n\t\t\t\tif event.key == pygame.K_LEFT:\n\t\t\t\t\timage_rect.centerx-=1\n\t\t\t\tif \tevent.key == pygame.K_RIGHT:\n\t\t\t\t\timage_rect.centerx+=1\n\t\t\t\t\t'''\n\t\t\t'''\n\t\t\t#我将加上键盘的事件来控制人物上下左右移动,但我需要按下一直移动\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_LEFT:\n\t\t\t\t\timage_rect.centerx-=1\n\t\t\t\tif event.key == pygame.K_RIGHT:\n\t\t\t\t\timage_rect.centerx+=1\n\t\t\t'''\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_LEFT:\n\t\t\t\t\tship.move_left=True\n\t\t\t\tif event.key == pygame.K_RIGHT:\n\t\t\t\t\tship.move_right=True\n\t\t\tif event.type == pygame.KEYUP:\n\t\t\t\tif event.key == pygame.K_LEFT:\n\t\t\t\t\tship.move_left=False\n\t\t\t\tif event.key == pygame.K_RIGHT:\n\t\t\t\t\tship.move_right=False\n\t\tship.move_position()\n\t\tscreen.blit(image_background,(0,0))\n\t\tscreen.blit(image_maliao,ship.image_rect)\n\t\tpygame.display.update()\nrun_game()\n\t\t\t\t\n\n\n","sub_path":"射击子弹.py","file_name":"射击子弹.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"431062986","text":"from django.urls import path\nfrom . import views\nfrom .feeds import LatestPostsFeed, AtomSiteNewsFeed\n\n\n\nurlpatterns = [\n path(\"feed/rss\", LatestPostsFeed(), name=\"post_feed\"),\n path(\"feed/atom\", AtomSiteNewsFeed()),\n path('', views.index, name = 'kolture'),\n path('about/', views.about, name = 'about'),\n path('home/', views.PostList.as_view(), name='blog-home'),\n path('home/drafts/', views.post_draft_list, name='draft-post'),\n path('home/posts/', views.all_post, name='all-post'),\n path('home/add_post/', views.AddPostView.as_view(), name='add_post'),\n path('home/add_category/', views.AddCategoryView.as_view(), name='add_category'),\n path('tag//', views.tagged, name=\"tagged\"),\n path('home//', views.PostDetail.as_view(), name='post-detail'),\n path('like/', views.like_post, name='like_post'),\n path('home//comment/', views.add_comment, name='add_comment'),\n # path('home//', views.post_detail, name='post_detail'),\n path('home/edit//', views.UpdatePostView.as_view(), name='update_post'),\n path('home//remove', views.DeletePostView.as_view(), name='delete_post'),\n path('home/category//', views.CategoryView, name='category'),\n path('categorylist/', views.CategoryListView, name='category-list'),\n path('home/search/results/', views.search, name='search'),\n path('//',\n views.ArticleMonthArchiveView.as_view(month_format='%m'),\n name=\"post_archive_month\"),\n \n\n]\n\n\n","sub_path":"SiteProject/Kolture/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"579490077","text":"from os import environ as env\nfrom os import listdir\nfrom os.path import dirname, exists, isdir, isfile, join\nfrom string import capwords\nfrom typing import Any, TypeVar, Union\n\nimport tensorflow as tf\nimport tensorflowjs\nfrom flask import Flask, redirect, render_template, request, send_from_directory\nfrom flask_pbj import api, json, protobuf\n\nfrom .debug import _DEBUG, dprint, if_debug\nfrom .model_store import ModelStore\nfrom .types import (\n InferenceRequest,\n InferenceResponse,\n LoadModelRequest,\n LoadModelResponse,\n)\nfrom .types.error import Error, into_error\nfrom .types.metrics import Metrics\nfrom .types.model import Model, ModelHandle, convert_handle, convert_model, into_handle\nfrom .types.tensor import Tensors, pb_to_tflite_tensors, tflite_tensors_to_pb\n\n# convert: Foreign type -> Local type\n# into: Local type -> Foreign type\n\nHOST: str = env[\"HOST\"] if \"HOST\" in env else \"0.0.0.0\"\nPORT: int = int(env[\"PORT\"]) if \"PORT\" in env else 5000\nEX_DIR = join(dirname(__file__), \"..\", \"examples\")\nTEMPLATE_DIR = join(dirname(__file__), \"templates\")\n\napp = Flask(\n __name__,\n static_folder=EX_DIR,\n static_url_path=\"/examples/\",\n template_folder=TEMPLATE_DIR,\n)\nmodel_store: ModelStore\n\n# Not ideal, but good enough:\nResponse = Any\n\n# snake_case/kebab-case to Title Case\ndef name_to_title(name: str) -> str:\n return \" \".join(\n [capwords(word) for word in name.replace(\"_\", \" \").replace(\"-\", \" \").split()]\n )\n\n\n@app.route(\"/\")\ndef hello() -> Response:\n return redirect(\"ex\", code=302)\n\n\n@app.route(\"/ex/\")\ndef example_index_page() -> Response:\n examples = [\n (ex, name_to_title(ex))\n for ex in listdir(EX_DIR)\n if isdir(join(EX_DIR, ex))\n and (\n isfile(join(EX_DIR, ex, \"dist\", \"index.html\"))\n or isfile(join(EX_DIR, ex, \"demo\", \"dist\", \"index.html\"))\n )\n ]\n examples.sort()\n return render_template(\"example-index-page.html\", examples=examples)\n\n\n@app.route(\"/api/echo/\")\ndef echo(string: str) -> str:\n return string\n\n\n@app.route(\"/ex//\")\n@app.route(\"/ex//\", defaults={\"path\": \"index.html\"})\ndef serve_build_file(example_name: str, path: str) -> Response:\n p = join(\"dist\", path)\n\n if isfile(join(EX_DIR, example_name, p)):\n pass\n elif isfile(join(EX_DIR, example_name, \"demo\", p)):\n p = join(\"demo\", p)\n\n p = join(example_name, p)\n dprint(f\"Trying: {p}\")\n return send_from_directory(EX_DIR, p)\n\n\n@app.route(\"/api/load_model\", methods=[\"POST\"])\n@api(json, protobuf(receives=LoadModelRequest, sends=LoadModelResponse, to_dict=False))\ndef load_model() -> LoadModelResponse:\n pb_model: Model = request.received_message.model\n\n try:\n model: bytes = convert_model(pb_model)\n handle = model_store.load(model)\n\n return LoadModelResponse(handle=into_handle(handle))\n except Exception as e:\n return LoadModelResponse(error=into_error(e))\n\n\n@app.route(\"/api/inference\", methods=[\"POST\"])\n@api(json, protobuf(receives=InferenceRequest, sends=InferenceResponse, to_dict=False))\ndef run_inference() -> InferenceResponse:\n pb_tensor: Tensors = request.received_message.tensors\n pb_handle: ModelHandle = request.received_message.handle\n\n try:\n tensors = pb_to_tflite_tensors(pb_tensor)\n handle = model_store.get(convert_handle(pb_handle))\n\n tensors, metrics = handle.predict(tensors)\n\n return InferenceResponse(\n tensors=tflite_tensors_to_pb(tensors), metrics=metrics.into()\n )\n except Exception as e:\n return InferenceResponse(error=into_error(e))\n\n\ndef main() -> None:\n global model_store\n model_store = ModelStore()\n app.run(host=HOST, port=PORT, debug=_DEBUG)\n","sub_path":"server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"643483738","text":"import math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\ndef plusMinus(arr):\r\n positive = 0\r\n negative = 0\r\n zero = 0\r\n for i in arr:\r\n if i > 0:\r\n positive = positive + 1\r\n elif i < 0:\r\n negative = negative + 1\r\n else:\r\n zero = zero + 1\r\n print(float(\"{0:.6f}\".format(positive/len(arr))))\r\n print(float(\"{0:.6f}\".format(negative/len(arr))))\r\n print(float(\"{0:.6f}\".format(zero/len(arr))))\r\n\r\nif __name__ == '__main__':\r\n n = int(input())\r\n\r\n arr = list(map(int, input().rstrip().split()))\r\n\r\n plusMinus(arr)\r\n","sub_path":"Warmup/plusMinus.py","file_name":"plusMinus.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"295664532","text":"# http://codeforces.com/problemset/problem/446/A\ndef fast_dzy_loves_sequence(a):\n a=[10**9]+a+[0]\n lens=[]\n left=[]\n right=[]\n c_left=0\n c_right=0\n for i in range(0,len(a)):\n if i > 0 and a[i-1]a[j]:\n c_right+=1\n else:\n c_right=1\n right.append(c_right)\n right.reverse()\n left[0]=0\n left[-1]=0\n right[0]=0\n right[-1]=0\n for k in range(1,len(a)-1):\n if a[k-1]+1 < a[k+1]:\n this_len=left[k-1]+1+right[k+1]\n else:\n this_len=max(left[k-1]+1,right[k+1]+1)\n lens.append(this_len) \n return max(lens)\nn=int(input())\na=[int(x) for x in input().split()]\nprint (fast_dzy_loves_sequence(a))","sub_path":"codeforces/446A.py","file_name":"446A.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"240200245","text":"#First import the netcdf4 library\nfrom netCDF4 import Dataset\n\n# Read en existing NetCDF file and create a new one\n# f is going to be the existing NetCDF file from where we want to import data\n# and g is going to be the new file.\n\ng=Dataset('/home/wms/Documents/ideal_dataset.nc','w',) # w if for creating a file\n # if the file exists it the \n # file will be deleted to write on it\n\n#Add global attributes for Datacite\n\ng.id='SAEON_dataset_001'\ng.creator_name='Kyle Cooper'\ng.creator_position='data curator'\ng.creator_address='1 address street, suburb, Province, City'\ng.creator_email='test@saeon.ac.za'\ng.creator_url='www.saeon.ac.za'\n\ng.publisher_name='John Smith'\ng.publisher_position='Chief Scientist'\ng.publisher_address='1 address street, suburb, Province, City'\ng.publisher_email='john@publisher.ac.za'\ng.publisher_url='www.publisher.ac.za'\n\ng.contributor_name='Jane Doe'\ng.contributor_position='Field Assistant'\ng.contributor_address='1 address street, suburb, Province, City'\ng.contributor_email='jane@contributor.ac.za'\ng.contributor_url='www.contributor.com'\n\ng.source='test data collected in Cape Town'\ng.history='nccopy'\ng.references='www.saeon.ac.za/dataproductionmethod'\ng.comment='go bokke'\ng.institution='South African Environmental Observation Network, 5th Floor, Foretrust Building, Martin Hammerschlag Way, CPT, Private Bag X2, Roggebaai, 8012'\ng.title='The Ideal datacite formatted NetCDF dataset for SAEON Import'\n\ng.date_issued='2017/06/01'\ng.keywords='datadiscovery,metadata,test'\ng.time_coverage_start='2015/01/01'\ng.time_coverage_end='2017/12/31'\ng.time_coverage_resolution='daily'\ng.conventions='CF Conventions v1.6,ACCD v1.6'\ng.Alternative_identifier='SAEON DOI issued'\ng.License='Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)'\ng.summary='The global attributes in this file are setup for the seamless import of metadata from the global attributes into the SAEON Metadata base'\ng.geospatial_lat_min=-10.00\ng.geospatial_lat_max=-5.00\ng.geospatial_lat_units='degrees south'\ng.geospatial_lat_resolution=0.5\ng.geospatial_lon_min=1.00\ng.geospatial_lon_max=15.00\ng.geospatial_lon_units='degrees east'\ng.geospatial_lon_resolution=0.5\ng.geospatial_vertical_min=0.00\ng.geospatial_vertical_max=100.00\ng.geospatial_vertical_positive='up'\ng.geospatial_vertical_units='m'\ng.geospatial_vertical_resolution=1.00\ng.acknowledgement='grant number 001'\ng.date_created='2017/01/01'\ng.date_modified='2017/02/30'\ng.OnlineResourceLink='http://196.21.191.71:8080/erddap'\n\n#g.repository='SAEON'\n#g.name='ideal_dataset.nc'\n#g.title='The Ideal datacite formatted NetCDF dataset for SAEON Import'\n#g.creators='Kyle Cooper'\n#g.contributor='Wim Hugo'\n#g.publisher='South African Environmental Network'\n#g.publicationdate='2017'\n#g.abstract='The global attributes in this file are setup for the seamless import of metadata from the global attributes into the SAEON Metadata base'\n#g.keywords='datadiscovery,metadata,test'\n#g.boundingbox='North:-5.00, South:-10.00, East:1.00, West: 15.00'\n#g.start_date='2015'\n#g.end_date='2017'\n#g.DataDownloadLink='http://196.21.191.71:8080/opendap/data/ideal_dataset.nc'\n#g.License='Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)'\n#g.RightsURL='https://creativecommons.org/licenses/by-sa/4.0/legalcode'\n#g.SystemKeywords='#SAEON'\n#g.Supplementaryinfo='http://www.saeon.ac.za'\n#g.OnlineResourceLink='http://196.21.191.71:8080/erddap'\n#g.MetaDataStandard='DataCite'\n#g.Portal='SAEON'\n \ng.close()\n","sub_path":"SAEON/ideal_nc_global_attributes.py","file_name":"ideal_nc_global_attributes.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"520189082","text":"from sensor_msgs.point_cloud2 import read_points\nimport numpy as np\n\n\nclass SegmentationResult():\n\n def __init__(self, msg):\n \"\"\"\n Convert a Segmentation msg to a Python class\n Segmented object point clouds are converted to N by 3 numpy arrays\n\n Parameters\n ----------\n msg: Segmentation\n \"\"\"\n self.class_ids = np.array(msg.class_ids)\n self.class_names = np.array(msg.class_names)\n self.object_points = []\n for pc_msg in msg.object_points:\n pointgen = read_points(pc_msg)\n pc = np.array([p for p in pointgen])\n self.object_points.append(pc)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"542473276","text":"from rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom ecoshop.models import Shop\nfrom ecoshop.serializers import ShopSerializer\n\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef ecoshop_list(request):\n data = request.data[\"location\"]\n\n addr1 = address_finder(data['1'], data['2'])\n addr2 = address_finder(data['3'], data['4'])\n address = address_finder(addr1, addr2)\n\n queryset = Shop.objects.filter(address__startswith=address)\n context = {\"request\": request}\n serializer = ShopSerializer(queryset, context=context, many=True)\n\n return Response(serializer.data)\n\n\ndef address_finder(addr1, addr2):\n result = \"\"\n len1, len2 = len(addr1), len(addr2)\n for i in range(len1):\n match = \"\"\n for j in range(len2):\n if i + j < len1 and addr1[i + j] == addr2[j]:\n match += addr2[j]\n else:\n if len(match) > len(result):\n result = match\n match = \"\"\n return result\n","sub_path":"ecoshop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"380468741","text":"import os\nimport doctest\nimport time\nimport traceback\n\nimport importlib, importlib.util\nfrom copy import deepcopy\nfrom collections import OrderedDict\n\n# programmatically import buggy implementations\n\nimport lab\nimportlib.reload(lab)\n\n# list different implementations\n# called by ui\ndef list_impls(d):\n return [\"lab\"]\n\nTESTDOC_FLAGS = doctest.NORMALIZE_WHITESPACE | doctest.REPORT_ONLY_FIRST_FAILURE\ndef testdoc(target):\n if target == \"lab\":\n results = doctest.testmod(lab, optionflags=TESTDOC_FLAGS, report=False)\n elif target == \"readme\":\n results = doctest.testfile(\"readme.md\", optionflags=TESTDOC_FLAGS, report=False)\n return results[0] == 0\n\ndef checkdoc(kind):\n tests = doctest.DocTestFinder(exclude_empty=False).find(lab)\n for test in tests:\n if test.name == \"lab\":\n continue\n if kind == \"docstrings\" and not test.docstring:\n return \"Oh no, '{}' has no docstring!\".format(test.name)\n if kind == \"doctests\" and not test.examples:\n return \"Oh no, '{}' has no doctests!\".format(test.name)\n return {\"docstrings\": \"All functions are documented; great!\",\n \"doctests\": \"All functions have tests; great!\"}[kind]\n\ndef ui_new_game_2d(d):\n return lab.new_game_2d(d['num_rows'], d['num_cols'], [tuple(i) for i in d['bombs']])\n\ndef ui_dig_2d(d):\n game, row, col = d[\"game\"], d[\"row\"], d[\"col\"]\n nb_dug = lab.dig_2d(game, row, col)\n status = game['state']\n return [game, status, nb_dug]\n\ndef ui_render_2d(d):\n g = d['game']\n x = d['xray']\n b = g['board']\n m = g['mask']\n r = d['our_renderer']\n if r:\n return [[ '_' if (not x) and (not m[r][c]) else ' ' if b[r][c] == 0 else str(b[r][c]) for c in range(d['num_cols'])] for r in range(d['num_rows'])]\n else:\n try:\n game = d['game']\n r = lab.render_2d(game, x)\n except:\n r = [['ERROR' for i in range(d['num_cols'])] for j in range(d['num_rows'])]\n return r\n\ncurrent_game_nd = None\n\ndef ui_new_game_nd(d):\n global current_game_nd\n current_game_nd = lab.new_game_nd(d[\"dimensions\"], [tuple(i) for i in d[\"bombs\"]])\n return\n\ndef ui_dig_nd(d):\n coordinates = d[\"coordinates\"]\n nd_dug = lab.dig_nd(current_game_nd, tuple(coordinates))\n status = current_game_nd['state']\n return [status, nd_dug]\n\ndef ui_render_nd(d):\n return lab.render_nd(current_game_nd, d['xray'])\n\n","sub_path":"lab5/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"303670654","text":"\"\"\"\n banana.maya.MGlobal\n ~~~~~~~~~~~~~~~~~~~\n \n Monkey patching of the `~maya.OpenMaya.MGlobal` class.\n \n :copyright: Copyright 2014 by Christopher Crouzet.\n :license: MIT, see LICENSE for details.\n\"\"\"\n\nimport re\n\nimport gorilla\nfrom maya import OpenMaya\n\nimport banana.maya._cache\n\n\n_SHORT_NAME_PATTERN = r'[a-zA-Z_][\\w]*'\n_SHORT_NAME_WILD_PATTERN = r'[a-zA-Z_\\*][\\w\\*]*'\n_NAME_PATTERN = r'(?:%s)(?:\\:%s)*' % ((_SHORT_NAME_PATTERN, ) * 2)\n_NAME_WILD_PATTERN = r'(?:%s)(?:\\:%s)*' % ((_SHORT_NAME_WILD_PATTERN, ) * 2)\n\n_RE_SHORT_NAME_PATTERN = re.compile(\n r'^%s$' % _SHORT_NAME_PATTERN)\n_RE_SHORT_NAME_WILD_PATTERN = re.compile(\n r'^%s$' % _SHORT_NAME_WILD_PATTERN)\n_RE_NAME_PATTERN = re.compile(\n r'^%s$' % _NAME_PATTERN)\n_RE_NAME_WILD_PATTERN = re.compile(\n r'^%s$' % _NAME_WILD_PATTERN)\n_RE_PATH_PATTERN = re.compile(\n r'^(?:\\|%s)+(?:->(?:\\|%s)+)*$' % ((_NAME_PATTERN, ) * 2))\n_RE_PATH_WILD_PATTERN = re.compile(\n r'^(?:\\*?\\|%s)+(?:->(?:\\|%s)+)*$' % ((_NAME_WILD_PATTERN, ) * 2))\n\n_RE_NAME_DUPLICATES = re.compile(\n r'([\\:])\\1+')\n_RE_NAME_DUPLICATES_WILD = re.compile(\n r'([\\*\\:])\\1+')\n_RE_NAME_STRIP_BEGIN = re.compile(\n r'^[^a-zA-Z_]+')\n_RE_NAME_STRIP_BEGIN_WILD = re.compile(\n r'^[^a-zA-Z_\\*]+')\n_RE_NAME_STRIP_END = re.compile(\n r'[^\\w\\:]+$')\n_RE_NAME_STRIP_END_WILD = re.compile(\n r'[^\\w\\*\\:]+$')\n_RE_NAME_INVALID_CHARACTERS = re.compile(\n r'[^\\w\\:]')\n_RE_NAME_INVALID_CHARACTERS_WILD = re.compile(\n r'[^\\w\\*\\:]')\n\n_RE_PATH_DUPLICATES = re.compile(\n r'([\\|\\:])\\1+')\n_RE_PATH_DUPLICATES_WILD = re.compile(\n r'([\\*\\|\\:])\\1+')\n_RE_PATH_PART_STRIP_BEGIN = re.compile(\n r'^[^a-zA-Z_\\|]+')\n_RE_PATH_PART_STRIP_BEGIN_WILD = re.compile(\n r'^[^a-zA-Z_\\*\\|]+')\n_RE_PATH_PART_STRIP_END = re.compile(\n r'[^\\w\\:]+$')\n_RE_PATH_PART_STRIP_END_WILD = re.compile(\n r'[^\\w\\*\\:]+$')\n_RE_PATH_INVALID_CHARACTERS = re.compile(\n r'[^\\w\\|\\:]')\n_RE_PATH_INVALID_CHARACTERS_WILD = re.compile(\n r'[^\\w\\*\\|\\:]')\n\n\n@gorilla.patch(OpenMaya)\nclass MGlobal(object):\n \n @classmethod\n def bnn_getFunctionSetFromType(cls, type):\n \"\"\"Retrieve the function set from a given type.\n \n Parameters\n ----------\n type : maya.OpenMaya.MFn.Type\n Type of the function set to look for.\n \n Returns\n -------\n class inheriting from maya.OpenMaya.MFnBase\n The function set found, None otherwise.\n \n Examples\n --------\n Initialize the appropriate function set according to the type returned\n by a DAG path object:\n \n >>> import banana.maya\n >>> banana.maya.patch()\n >>> from maya import OpenMaya, cmds\n >>> cmds.group(name='transform', empty=True)\n >>> dagPath = OpenMaya.MDagPath.bnn_get(pattern='transform')\n >>> type = dagPath.apiType()\n >>> cls = OpenMaya.MGlobal.bnn_getFunctionSetFromType(type)\n >>> transform = cls(dagPath)\n \n Note\n ----\n Use the `~maya.OpenMaya.MObject.getFunctionSet()` method whenever\n possible to also try to deduce the function set if the\n correspondance with its type is not yet known.\n \"\"\"\n return banana.maya._cache.FUNCTION_SET_FROM_TYPE.get(type, None)\n \n @classmethod\n def bnn_isValidShortNamePattern(cls, name, wildcards=False):\n \"\"\"Check if a short name has a valid pattern.\n \n That is, a short name that is strictly well-formed and that is\n guaranteed to be accepted by the more forgiving syntax checker of Maya.\n \n Short names are used within simple types such as attributes.\n \n Parameters\n ----------\n name : str\n Short name to check.\n wildcards : bool\n True to consider the wildcards `*` as valid characters.\n \n Returns\n -------\n bool\n True if the short name is valid and strictly well-formed.\n \"\"\"\n # The valid characters forming a name can be checked as follow:\n # >>> from maya import cmds\n # >>> tests = {'first chars': '', 'chars': '_'}\n # ... for test, append in tests.iteritems():\n # ... valid_characters = []\n # ... for i in range(128):\n # ... character = chr(i).decode('ascii')\n # ... node = cmds.group(name=append + character, empty=True)\n # ... if node == append + character:\n # ... valid_characters.append(character)\n # ... cmds.delete(node)\n # ... print('%s: %s' % (test, ''.join(valid_characters)))\n # chars: 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz\n # first chars: ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\n \n if wildcards and not _RE_PATH_DUPLICATES_WILD.search(name):\n return True if _RE_SHORT_NAME_WILD_PATTERN.match(name) else False\n \n if not _RE_PATH_DUPLICATES.search(name):\n return True if _RE_SHORT_NAME_PATTERN.match(name) else False\n \n return False\n \n @classmethod\n def bnn_isValidNamePattern(cls, name, wildcards=False):\n \"\"\"Check if a name has a valid pattern.\n \n That is, a name that is strictly well-formed and that is guaranteed\n to be accepted by the more forgiving syntax checker of Maya.\n \n Names are used as identifiers for nodes. Namespaces are taken\n into account.\n \n Parameters\n ----------\n name : str\n Name to check.\n wildcards : bool\n True to consider the wildcards `*` as valid characters.\n \n Returns\n -------\n bool\n True if the name is valid and strictly well-formed.\n \"\"\"\n # ``maya.OpenMaya.MNamespace.validateName()`` is not usable here since\n # it would discard any name containing a non-existing namespace.\n \n if wildcards and not _RE_PATH_DUPLICATES_WILD.search(name):\n return True if _RE_NAME_WILD_PATTERN.match(name) else False\n \n if not _RE_PATH_DUPLICATES.search(name):\n return True if _RE_NAME_PATTERN.match(name) else False\n \n return False\n \n @classmethod\n def bnn_isValidPathPattern(cls, path, wildcards=False):\n \"\"\"Check if a DAG path has a valid pattern.\n \n That is, a DAG path that is strictly well-formed and that is\n guaranteed to be accepted by the more forgiving syntax checker of Maya.\n \n Namespaces and underworld are taken into account.\n \n Parameters\n ----------\n path : str\n DAG path to check.\n wildcards : bool\n True to consider the wildcards `*` as valid characters.\n \n Returns\n -------\n bool\n True if the path is a valid and strictly well-formed DAG path.\n \"\"\"\n if wildcards and not _RE_PATH_DUPLICATES_WILD.search(path):\n return True if _RE_PATH_WILD_PATTERN.match(path) else False\n \n if not _RE_PATH_DUPLICATES.search(path):\n return True if _RE_PATH_PATTERN.match(path) else False\n \n return False\n \n @classmethod\n def bnn_matchPath(cls, pattern, path):\n \"\"\"Check if a DAG path matches a given pattern.\n \n Both pattern and path should be strictly well-formed. Use\n `normalizePath()` if it is not the case.\n \n Parameters\n ----------\n pattern : str\n Pattern to match to. Wildcards are allowed.\n path : str\n DAG path to check.\n \n Returns\n -------\n bool\n True if the DAG path matches the given pattern.\n \n Raises\n ------\n ValueError\n Either the pattern or the path is not well-formed.\n \n Note\n ----\n For the pattern ``|*``, Maya matches only the top level nodes\n while ``node|shape->|*`` matches all the underworld nodes\n to be found at any depth. This is inconsistent and we assume\n here that ``node|shape->|*`` matches only the top level nodes\n from the underworld.\n \"\"\"\n if not cls.bnn_isValidPathPattern(pattern, wildcards=True):\n raise ValueError(\n \"The pattern '%s' is not well-formed, try to normalize it.\" %\n pattern)\n \n if not cls.bnn_isValidPathPattern(path):\n raise ValueError(\n \"The path '%s' is not well-formed, try to normalize it.\" %\n path)\n \n if pattern.startswith('*'):\n pattern = pattern.lstrip('*')\n search = True\n else:\n search = False\n \n underworldParts = pattern.split('->')\n for i in range(len(underworldParts)):\n underworldPart = underworldParts[i]\n underworldPart = underworldPart.replace('*', r'[\\w]*?')\n underworldPart = underworldPart.replace(':', r'\\:')\n underworldPart = underworldPart.replace('|', r'\\|')\n underworldParts[i] = underworldPart\n \n pattern = '->'.join(underworldParts)\n if search:\n return True if re.search(r'%s$' % pattern, path) else False\n \n return True if re.match(r'^%s$' % pattern, path) else False\n \n @classmethod\n def bnn_normalizeName(cls, name, wildcards=False):\n \"\"\"Normalize a name.\n \n This results in a strictly well-formed name.\n \n Parameters\n ----------\n name : str\n Name to normalize.\n wildcards : bool\n True to consider the wildcards `*` as valid characters.\n \n Returns\n -------\n str\n The normalized name.\n \n Raises\n ------\n ValueError\n The input name is invalid and can't be normalized.\n \"\"\"\n if not name:\n return '*'\n \n if wildcards:\n reNameDuplicates = _RE_NAME_DUPLICATES_WILD\n reNameStripBegin = _RE_NAME_STRIP_BEGIN_WILD\n reNameStripEnd = _RE_NAME_STRIP_END_WILD\n reNameInvalidCharacters = _RE_NAME_INVALID_CHARACTERS_WILD\n else:\n reNameDuplicates = _RE_NAME_DUPLICATES\n reNameStripBegin = _RE_NAME_STRIP_BEGIN\n reNameStripEnd = _RE_NAME_STRIP_END\n reNameInvalidCharacters = _RE_NAME_INVALID_CHARACTERS\n \n normalized = reNameDuplicates.sub(r'\\1', name)\n normalized = reNameStripBegin.sub('', normalized)\n normalized = reNameStripEnd.sub('', normalized)\n normalized = reNameInvalidCharacters.sub('_', normalized)\n if not cls.bnn_isValidNamePattern(normalized, wildcards=wildcards):\n raise ValueError(\"The input name '%s' is invalid\" % name)\n \n return normalized\n \n @classmethod\n def bnn_normalizePath(cls, path, wildcards=False):\n \"\"\"Normalize a DAG path.\n \n This results in a strictly well-formed DAG path.\n \n Parameters\n ----------\n path : str\n DAG path to normalize.\n wildcards : bool\n True to consider the wildcards `*` as valid characters.\n \n Returns\n -------\n str\n The normalized DAG path.\n \n Raises\n ------\n ValueError\n The input path is invalid and can't be normalized.\n \"\"\"\n if not path:\n return '*|*'\n \n if wildcards:\n rePathDuplicates = _RE_PATH_DUPLICATES_WILD\n rePathPartStripBegin = _RE_PATH_PART_STRIP_BEGIN_WILD\n rePathPartStripEnd = _RE_PATH_PART_STRIP_END_WILD\n rePathInvalidCharacters = _RE_PATH_INVALID_CHARACTERS_WILD\n else:\n rePathDuplicates = _RE_PATH_DUPLICATES\n rePathPartStripBegin = _RE_PATH_PART_STRIP_BEGIN\n rePathPartStripEnd = _RE_PATH_PART_STRIP_END\n rePathInvalidCharacters = _RE_PATH_INVALID_CHARACTERS\n \n normalized = rePathDuplicates.sub(r'\\1', path)\n underworldParts = normalized.split('->')\n for i in range(len(underworldParts)):\n underworldPart = underworldParts[i]\n pathParts = underworldPart.split('|')\n for j in range(len(pathParts)):\n pathPart = pathParts[j]\n pathPart = rePathPartStripBegin.sub('', pathPart)\n pathPart = rePathPartStripEnd.sub('', pathPart)\n pathPart = rePathInvalidCharacters.sub('_', pathPart)\n pathParts[j] = pathPart\n \n if i == 0:\n if pathParts[0] and pathParts[0] != '*':\n pathParts.insert(0, '*')\n elif pathParts[0]:\n pathParts.insert(0, '')\n \n if not pathParts[-1]:\n del pathParts[-1]\n \n underworldParts[i] = '|'.join(pathParts)\n \n normalized = '->'.join(underworldParts)\n if not cls.bnn_isValidPathPattern(normalized, wildcards=True):\n raise ValueError(\"The input path '%s' is invalid\" % path)\n \n return normalized\n","sub_path":"banana/maya/extensions/OpenMaya/MGlobal.py","file_name":"MGlobal.py","file_ext":"py","file_size_in_byte":13330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"72989424","text":"s = 'azcbobobegghakl'\n# s = 'qljjdyxliz'\n# s = 'zyxwvutsrqponmlkjihgfedcba'\n# s = 'eazwzdugel'\n# s = 'ykuwpnijsaajk'\n\nholder = s[0]\nseq = \"\"\n\nfor i in range(1,len(s)):\n if s[i] >= s[i-1]:\n holder = holder + s[i]\n else:\n if len(holder) > len(seq):\n seq = holder\n holder = s[i]\n\nif len(holder) > len(seq):\n seq = holder\n\nprint('Longest substring in alphabetical order is: ', seq)","sub_path":"Week_1-Python_Basics/Problem_Set_1/Problem_3_ref.py","file_name":"Problem_3_ref.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"574703028","text":"import os.path\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.keras.layers as kl\nimport tensorflow.keras.losses as kls\nimport tensorflow.keras.optimizers as ko\n\nfrom game import Action\nfrom game import Game\n\n\nclass ProbabilityDistribution(tf.keras.Model):\n def call(self, logits):\n return tf.squeeze(tf.random.categorical(logits, 1), axis=-1)\n\n\nclass Model(tf.keras.Model):\n def __init__(self):\n super(Model, self).__init__('a2c_model')\n self.all_possible_actions_in_game = [Action.UP, Action.DOWN, Action.LEFT, Action.RIGHT] # ignoring Action.NONE\n\n self.common_layers = []\n self.common_layers.append(kl.Conv2D(64, 3, padding='same', activation='relu', input_shape=(Game.HEIGHT+2, Game.WIDTH+2, 2)))\n self.common_layers.append(kl.Conv2D(64, 3, padding='same', activation='relu'))\n self.common_layers.append(kl.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n self.common_layers.append(kl.Conv2D(128, 3, padding='same', activation='relu'))\n self.common_layers.append(kl.Conv2D(128, 3, padding='same', activation='relu'))\n self.common_layers.append(kl.MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n self.common_layers.append(kl.Flatten())\n\n # actor branch\n # final layer should output logits\n self.actor_layers = []\n self.actor_layers.append(kl.Dense(1024, activation='relu'))\n self.actor_layers.append(kl.Dense(128, activation='relu'))\n self.actor_layers.append(kl.Dense(len(self.all_possible_actions_in_game), name='policy_logits'))\n\n # critic branch\n # final layer should output a single value (state value / expected reward)\n self.critic_layers = []\n self.critic_layers.append(kl.Dense(1024, activation='relu'))\n self.critic_layers.append(kl.Dense(128, activation='relu'))\n self.critic_layers.append(kl.Dense(1, name='value'))\n\n self.dist = ProbabilityDistribution()\n\n def call(self, inputs):\n output_from_common_layers = tf.convert_to_tensor(inputs)\n # forward pass through common layers\n for layer in self.common_layers:\n output_from_common_layers = layer(output_from_common_layers)\n\n actor_output = output_from_common_layers\n # forward pass through actor layers\n for layer in self.actor_layers:\n actor_output = layer(actor_output)\n\n critic_output = output_from_common_layers\n # forward pass through actor layers\n for layer in self.critic_layers:\n critic_output = layer(critic_output)\n return actor_output, critic_output\n\n def action_value(self, obs):\n logits, value = self.predict(obs)\n action = self.dist.predict(logits)\n return np.squeeze(action, axis=-1), np.squeeze(value, axis=-1)\n\n\nclass A2CAgent:\n def __init__(self, model):\n self.params = {\n 'gamma': 0.99,\n 'value': 0.5,\n 'entropy': 0.0001\n }\n self.model = model\n self.model.compile(\n optimizer=ko.Adam(lr=0.001),\n loss=[self._logits_loss, self._value_loss]\n )\n\n def train(self, env, batch_sz=32, updates=100):\n action_ids = np.empty((batch_sz,), dtype=np.int32)\n rewards, dones, values = np.empty((3, batch_sz))\n observations = np.empty((batch_sz,) + (Game.HEIGHT+2, Game.WIDTH+2, 2))\n\n ep_rews = [0.0]\n next_obs = env.reset()\n for _ in range(updates):\n for step in range(batch_sz):\n observations[step] = next_obs.copy()\n action_ids[step], values[step] = self.model.action_value(next_obs[None, :])\n next_obs, rewards[step], dones[step] = env.step(self._action_from_id(action_ids[step]))\n\n ep_rews[-1] += rewards[step]\n if dones[step]:\n ep_rews.append(0.0)\n next_obs = env.reset()\n\n _, next_value = self.model.action_value(next_obs[None, :])\n returns, advs = self._returns_advantages(rewards, dones, values, next_value)\n acts_and_advs = np.concatenate([action_ids[:, None], advs[:, None]], axis=-1)\n self.model.train_on_batch(observations, [acts_and_advs, returns])\n\n return ep_rews\n\n def select_action(self, obs):\n action_id, _ = self.model.action_value(obs[None, :])\n return self._action_from_id(action_id)\n\n def save_model(self):\n self.model.save_weights('saved_model/weights', save_format='tf')\n\n def load_model_if_previously_saved(self, env):\n if os.path.exists('saved_model'):\n self.train(env, updates=1) # needed to initialize the model\n self.model.load_weights('saved_model/weights')\n\n def load_pretrained_model(self, env):\n if os.path.exists('pretrained_model'):\n self.train(env, updates=1) # needed to initialize the model\n self.model.load_weights('pretrained_model/weights')\n\n def _returns_advantages(self, rewards, dones, values, next_value):\n returns = np.append(np.zeros_like(rewards), next_value, axis=-1)\n for t in reversed(range(rewards.shape[0])):\n returns[t] = rewards[t] + self.params['gamma'] * returns[t + 1] * (1 - dones[t])\n returns = returns[:-1]\n advantages = returns - values\n return returns, advantages\n\n def _value_loss(self, returns, value):\n return self.params['value'] * kls.mean_squared_error(returns, value)\n\n def _logits_loss(self, acts_and_advs, logits):\n actions, advantages = tf.split(acts_and_advs, 2, axis=-1)\n weighted_sparse_ce = kls.SparseCategoricalCrossentropy(from_logits=True)\n actions = tf.cast(actions, tf.int32)\n policy_loss = weighted_sparse_ce(actions, logits, sample_weight=advantages)\n entropy_loss = kls.categorical_crossentropy(logits, logits, from_logits=True)\n return policy_loss - self.params['entropy'] * entropy_loss\n\n def _action_from_id(self, action_id):\n return self.model.all_possible_actions_in_game[action_id]\n","sub_path":"src/a2c.py","file_name":"a2c.py","file_ext":"py","file_size_in_byte":6080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"295238509","text":"__author__ = 'midorikawakeita'\n\"\"\"\n....... ....... ..Q.... ....... ...Q... ....... .......\n\n#.###..\n####..#\n##Q####\n######.\n###Q###\n..####.\n.###.##\n\n#Q#####\n#######\n##Q####\n######Q\n###Q###\nQ######\n####Q##\n\"\"\"\nclass N_queen:\n def __init__(self,input):\n self.base_map = input.split(\" \")\n for i in range(len(self.base_map)):\n self.base_map [i] = list(self.base_map[i])\n self.width = len(self.base_map[0])\n self.height = len(self.base_map)\n\n \"\"\"Qを探してQが動く範囲をぬりつぶす\"\"\"\n def check(self,mapping):\n for i in range(self.height):\n for n in range(self.width):\n if mapping[i][n] == \"Q\":\n #左右を#に置換\n mapping[i] = list(\"#\" * self.width)\n #上下を#に置換\n for h in range(self.height):\n mapping[h][n] = \"#\"\n #斜めを#に置換\n y = 1\n x = 1\n while True:\n count = 0\n if ((i - y) >= 0) and ((n + x) <= self.width - 1):#右斜め上\n mapping[i - y][n + x] = \"#\"\n count += 1\n if ((i + y) <= (self.height - 1)) and ((n + x) <= (self.width - 1)):#右斜め下\n mapping[i + y][n + x] = \"#\"\n count += 1\n if ((i - y) >= 0) and ((n - x) >= 0):#左斜め上\n mapping[i - y][n - x] = \"#\"\n count += 1\n if ((i + y) <= (self.height -1)) and ((n -x) >= 0):#左斜め下\n mapping[i + y][n - x] = \"#\"\n count += 1\n y += 1\n x += 1\n if count == 0:\n mapping[i][n] = \"q\"\n break\n return mapping\n\n\n\n \"\"\"0,1,3,5,6行目のどこにQを入れるかをfor文で書く\"\"\"\n def set_queen(self,c_map):\n map_list = []\n num_list = []\n for a in [1,5,6]:\n for b in [4,5]:\n for c in [0,5,6]:\n for d in [0,1,6]:\n for e in [0,4]:\n num_list.append([a,b,c,d,e])\n for i in range(len(num_list)):\n c_map[0][num_list[i][0]] = \"Q\"\n c_map[1][num_list[i][1]] = \"Q\"\n c_map[3][num_list[i][2]] = \"Q\"\n c_map[5][num_list[i][3]] = \"Q\"\n c_map[6][num_list[i][4]] = \"Q\"\n d_map = self.check(c_map)\n map_list.append(\"\".join([\"\".join(i) for i in d_map]))\n c_map[0][num_list[i][0]] = \".\"\n c_map[1][num_list[i][1]] = \".\"\n c_map[3][num_list[i][2]] = \".\"\n c_map[5][num_list[i][3]] = \".\"\n c_map[6][num_list[i][4]] = \".\"\n #print(map_list[i])\n q_num_list = []\n for i in range(len(map_list)):\n q_num_list.append(map_list[i].count(\"q\"))\n max_num = max(q_num_list)\n max_map_list = []\n for i in range(len(q_num_list)):\n if q_num_list[i] == max_num:\n max_map_list.append(map_list[i])\n\n #print(len(num_list))\n return max_map_list\n\nif __name__ == \"__main__\":\n a = N_queen(\"....... ....... ..Q.... ....... ...Q... ....... .......\")\n b = a.check(a.base_map)\n c = a.set_queen(b)\n for i in range(len(c)):\n for n in range(7):\n print(c[i][7 * n:7 * (n + 1)])\n","sub_path":"n_queen.py","file_name":"n_queen.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"589329202","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Float64\nfrom sensor_msgs.msg import Range\n\ndef switch_led():\n rospy.init_node('led_switch',anonymous=True)\n\n \n offset=0.08\n\n def callback(data):\n global state\n if data.range 0 and cuda_path != cuda_path_default:\n utils.print_warning(\n 'nvcc path != CUDA_PATH',\n 'nvcc path: %s' % cuda_path_default,\n 'CUDA_PATH: %s' % cuda_path)\n\n if os.path.exists(cuda_path):\n _cuda_path = cuda_path\n elif cuda_path_default is not None:\n _cuda_path = cuda_path_default\n elif os.path.exists('/usr/local/cuda'):\n _cuda_path = '/usr/local/cuda'\n else:\n _cuda_path = None\n\n return _cuda_path\n\n\ndef get_compiler_setting():\n cuda_path = get_cuda_path()\n\n include_dirs = []\n library_dirs = []\n define_macros = []\n\n if cuda_path:\n include_dirs.append(os.path.join(cuda_path, 'include'))\n if sys.platform == 'win32':\n library_dirs.append(os.path.join(cuda_path, 'bin'))\n library_dirs.append(os.path.join(cuda_path, 'lib', 'x64'))\n else:\n library_dirs.append(os.path.join(cuda_path, 'lib64'))\n library_dirs.append(os.path.join(cuda_path, 'lib'))\n if sys.platform == 'darwin':\n library_dirs.append('/usr/local/cuda/lib')\n\n if sys.platform == 'win32':\n nvtoolsext_path = os.environ.get('NVTOOLSEXT_PATH', '')\n if os.path.exists(nvtoolsext_path):\n include_dirs.append(os.path.join(nvtoolsext_path, 'include'))\n library_dirs.append(os.path.join(nvtoolsext_path, 'lib', 'x64'))\n else:\n define_macros.append(('CUPY_NO_NVTX', '1'))\n\n return {\n 'include_dirs': include_dirs,\n 'library_dirs': library_dirs,\n 'define_macros': define_macros,\n 'language': 'c++',\n }\n\n\ndef _match_output_lines(output_lines, regexs):\n # Matches regular expressions `regexs` against `output_lines` and finds the\n # consecutive matching lines from `output_lines`.\n # `None` is returned if no match is found.\n if len(output_lines) < len(regexs):\n return None\n\n matches = [None] * len(regexs)\n for i in range(len(output_lines) - len(regexs)):\n for j in range(len(regexs)):\n m = re.match(regexs[j], output_lines[i + j])\n if not m:\n break\n matches[j] = m\n else:\n # Match found\n return matches\n\n # No match\n return None\n\n\n_opencl_version = None\n\n\ndef check_opencl_version(compiler, settings):\n global _opencl_version\n\n # TODO(naoya.sakabe): implement this\n\n return True\n\n\ndef get_opencl_version():\n \"\"\"Return OpenCL Toolkit version cached in check_opencl_version().\"\"\"\n global _opencl_version\n if _opencl_version is None:\n msg = 'check_opencl_version() must be called first.'\n raise Exception(msg)\n return _opencl_version\n\n\ndef build_shlib(compiler, source, libraries=(),\n include_dirs=(), library_dirs=()):\n with _tempdir() as temp_dir:\n fname = os.path.join(temp_dir, 'a.cpp')\n with open(fname, 'w') as f:\n f.write(source)\n\n objects = compiler.compile([fname], output_dir=temp_dir,\n include_dirs=include_dirs)\n\n try:\n postargs = ['/MANIFEST'] if sys.platform == 'win32' else []\n compiler.link_shared_lib(objects,\n os.path.join(temp_dir, 'a'),\n libraries=libraries,\n library_dirs=library_dirs,\n extra_postargs=postargs,\n target_lang='c++')\n except Exception as e:\n msg = 'Cannot build a stub file.\\nOriginal error: {0}'.format(e)\n raise Exception(msg)\n\n\ndef build_and_run(compiler, source, libraries=(),\n include_dirs=(), library_dirs=()):\n with _tempdir() as temp_dir:\n fname = os.path.join(temp_dir, 'a.cpp')\n with open(fname, 'w') as f:\n f.write(source)\n\n objects = compiler.compile([fname], output_dir=temp_dir,\n include_dirs=include_dirs)\n\n try:\n postargs = ['/MANIFEST'] if sys.platform == 'win32' else []\n compiler.link_executable(objects,\n os.path.join(temp_dir, 'a'),\n libraries=libraries,\n library_dirs=library_dirs,\n extra_postargs=postargs,\n target_lang='c++')\n except Exception as e:\n msg = 'Cannot build a stub file.\\nOriginal error: {0}'.format(e)\n raise Exception(msg)\n\n try:\n out = subprocess.check_output(os.path.join(temp_dir, 'a'))\n return out\n\n except Exception as e:\n msg = 'Cannot execute a stub file.\\nOriginal error: {0}'.format(e)\n raise Exception(msg)\n","sub_path":"install/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":5897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"8188494","text":"import constraintSatisfier as cs\nimport mapgen as gen\nimport operator\nimport copy\n\ndef assignConstraints(edges, C):\n if len(edges) == 0:\n return C\n C.append((operator.ne, edges[0][0], edges[0][1]))\n return assignConstraints(edges[1:], C)\n\ndef assignDomain(values, domain, D):\n if len(values) == 0:\n return D\n D[values[0]] = set(domain)\n return assignDomain(values[1:], domain, D)\n\ndef runMapTrial(count, order, inference, trial, killCond=None):\n print(\"Map trial: count = \" + str(count) + \"\\torder = \" + order.__name__ + \"\\tinference = \" + inference.__name__ + \"\\ttrial = \" + str(trial))\n S = range(0, count)\n D = assignDomain(S, frozenset(['red', 'green', 'blue', 'yellow']), {})\n C = assignConstraints(list(gen.gen(count, trial)), [])\n csp = cs.CSP(S, D, C, order, inference)\n csp.killCond = killCond\n if cs.backtrack(csp) == None:\n return None\n csp.finalMetrics()\n return csp\n","sub_path":"project1b/src/mapcoloring.py","file_name":"mapcoloring.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"592116492","text":"#\r\n# This is the main code for the lexer.\r\n#\r\n\r\n\r\nimport re\r\n\r\nclass Lexer(object):\r\n\r\n\tdef __init__(self, source_code):\r\n\t\tself.source_code = source_code\r\n\r\n\tdef tokenize(self):\r\n\t\t\r\n\t\t# Where all lexer tokens are stored\r\n\t\ttokens = []\r\n\r\n\t\t# Creates a word list of the source code\r\n\t\tsource_code = self.source_code.split()\r\n\r\n\t\t# Keeps track of word index in source code\r\n\t\tsource_index = 0\r\n\r\n\t\t# Goes through every word in source code in order to generate tokens.\r\n\t\twhile source_index < len(source_code):\r\n\r\n\t\t\tword = (source_code[source_index])\r\n\r\n\t\t\t# Recognises variables and creates token for it\r\n\t\t\tif word == \"var\": tokens.append([\"VAR_DECLARATION\", word])\r\n\r\n\t\t\t# This will recognise a word and create an identifier token for it\r\n\t\t\telif re.match('[a-z]', word) or re.match('[A-Z]', word):\r\n\t\t\t\ttokens.append(['IDENTIFIER', word])\r\n\r\n\t\t\t# This will recognise an integer and create an identifier token for it\r\n\t\t\telif re.match('[0-9]', word):\r\n\t\t\t\ttokens.append(['INTEGER', word])\r\n\r\n\t\t\t# This will recognise an operator and create an identifier token for it\r\n\t\t\telif word in \"=/*=-+\":\r\n\t\t\t\ttokens.append(['OPERATOR', word])\r\n\r\n\t\t\t# This will recognise an semicolon and create an identifier token for it\r\n\t\t\telif word in \";\":\r\n\t\t\t\ttokens.append(['SEMICOLON', word])\r\n\t\t\t\t\r\n\t\t\t# This will recognise an semicolon and create an identifier token for it\r\n\t\t\telif word in '\"':\r\n\t\t\t\ttokens.append(['QUOTES', word])\r\n\r\n\t\t\t# Increases word index after checking it\r\n\t\t\tsource_index += 1\r\n\r\n\t\tprint(tokens)\r\n\r\n\t\treturn tokens\r\n","sub_path":"src/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"426860628","text":"import numpy as np\r\nfrom numpy.linalg import norm\r\n\r\ndef k_mean(k,data,centriods = None, tol = 0):\r\n \"\"\"Algoritmo que k-mean con k centroides para los datos data\"\"\"\r\n\r\n # Se generan k centroides\r\n if type(centriods) != type(None):\r\n centroides = centriods\r\n else:\r\n centroides = np.random.rand(k,len(data[0]))\r\n\r\n lista_datos = []\r\n for i in range(k):\r\n lista_datos.append([centroides[i],[]])\r\n\r\n # dist < maxima_distancia para L1 > L2\r\n maxima_distancia = len(data) + 1\r\n\r\n # se calcula la distancia de los datos a los centriodes y\r\n # se asigna un centroide para cada dato.\r\n\r\n for dato in data:\r\n minimo = maxima_distancia\r\n for centroide in range(k):\r\n posible_min = min(minimo, norm(dato-centroides[centroide],2))\r\n if posible_min < minimo:\r\n c_temp = centroide\r\n minimo = posible_min\r\n\r\n lista_datos[c_temp][1].append(dato)\r\n\r\n # se calculan los nuevos centriodes.\r\n centroides_temp = []\r\n for centroide in range(k):\r\n # Si se le asigna ningún punto al centroide, este se actualiza\r\n if len(lista_datos[centroide][1]) != 0:\r\n c_temp = sum(lista_datos[centroide][1])/len(lista_datos[centroide][1])\r\n #el agoritmo de suma es vectorial\r\n # En otro caso este se ignora\r\n else:\r\n c_temp=lista_datos[centroide][0]\r\n centroides_temp.append(c_temp)\r\n lista_datos[centroide][0] = c_temp\r\n\r\n # si los centriodes no se desplazan se finaliza el algoritmo\r\n\r\n if np.any(np.any(abs(np.array(centroides_temp)-np.array(centroides))> tol)):\r\n return k_mean(k,data,centriods = centroides_temp, tol = tol)\r\n\r\n # si la los centros se mantienen dentro de cierta tolerancia se finaliza\r\n return lista_datos\r\n","sub_path":"Correciones/T1/submissions/bahamondeswalterspablosebastián_49364_1768243_Bahamondes_Pablo_T1/k_mean_numpy.py","file_name":"k_mean_numpy.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"111903938","text":"from pymodbus.client.sync import ModbusTcpClient as ModbusClient\r\nfrom pymodbus.payload import BinaryPayloadDecoder\r\nfrom pymodbus.constants import Endian\r\nimport paho.mqtt.client as mqtt\r\nimport socket\r\nfrom multiprocessing import Process, Pipe, Value, Manager\r\nimport random\r\nimport json\r\nimport time\r\nfrom app import logger,db\r\nfrom models import User, modbus_address,mqtt_parameters,modbus_parameters,pub_mqtt_topics,read_mod_registers\r\nfrom datetime import datetime\r\nimport sparkplug_b as sparkplug\r\nfrom sparkplug_b import *\r\n############------------------------------------------------------################\r\n\r\ndef on_connect(client, userdata, flag,rc):\r\n print(\"Connected with result code \"+str(rc)) \r\n logger.info(\"Connected with result code \"+str(rc)) \r\n client.subsrcibe(\"Modbus\\Received\")\r\n Mqtt_Stat.value = rc\r\n\r\n\r\ndef ModReadJson(client,start,qty):\r\n response = client.read_holding_registers(start,qty,unit=0x1)\r\n return_dict = {}\r\n for i in range(qty):\r\n return_dict[\"%s\" %str(start+i)] = \"%s\" %str(response.registers[i])\r\n return json.dumps(return_dict)\r\n\r\ndef ModReadTopic(client,topic):\r\n value_dict = {} \r\n # ret_dict = {}\r\n for regis in topic.mod_addresses:\r\n response = client.read_holding_registers(regis.address,regis.qty,unit = regis.unit)\r\n \r\n if regis.pp:\r\n exec(regis.pp)\r\n # value = exec(regis.pp)\r\n # value_dict[regis.name] = value\r\n else:\r\n value_dict[regis.name] = response.registers\r\n # print(json.dumps(value_dict)) \r\n value_dict[\"ts\"] = str(datetime.now())\r\n return (topic, json.dumps(value_dict))\r\n \r\n\r\ndef ModWriteJson(client,json_data):\r\n print(json_data)\r\n pass\r\n\r\ndef validate_ip(s):\r\n a = s.split('.')\r\n if len(a) != 4:\r\n return False\r\n for x in a:\r\n if not x.isdigit():\r\n return False\r\n i = int(x)\r\n if i < 0 or i > 255:\r\n return False\r\n return True\r\n\r\ndef is_connected(hostname,port):\r\n try:\r\n # see if we can resolve the host name -- tells us if there is\r\n # a DNS listening\r\n if validate_ip(hostname):\r\n host = hostname\r\n else:\r\n host = socket.gethostbyname(hostname)\r\n # connect to the host -- tells us if the host is actually\r\n # reachable\r\n s = socket.create_connection((host, port), 2)\r\n s.close()\r\n del s\r\n return True\r\n except:\r\n return False\r\n pass\r\n return False\r\n#################---------iginition sparkplug functions ----------#################\r\n\r\n###--publish node birth ----####\r\ndef publishNodeBirth():\r\n \r\n payload = sparkplug.getNodeBirthPayload()\r\n\r\n addMetric(payload, \"Node Control/Next Server\", AliasMap.Next_Server, MetricDataType.Boolean, False)\r\n addMetric(payload, \"Node Control/Rebirth\", AliasMap.Rebirth, MetricDataType.Boolean, False)\r\n addMetric(payload, \"Node Control/Reboot\", AliasMap.Reboot, MetricDataType.Boolean, False)\r\n return bytearray(payload.SerializeToString())\r\n\r\n\r\n###--publish device birth ----####\r\n\r\ndef publishDeviceBirth(tpoics):\r\n print (\"Publishing Device Birth\")\r\n\r\n payload = sparkplug.getDeviceBirthPayload()\r\n\r\n addMetric(payload, \"input/Device Metric0\", AliasMap.Device_Metric0, MetricDataType.String, \"hello device\")\r\n addMetric(payload, \"input/Device Metric1\", AliasMap.Device_Metric1, MetricDataType.Boolean, True)\r\n addMetric(payload, \"output/Device Metric2\", AliasMap.Device_Metric2, MetricDataType.Int16, 16)\r\n addMetric(payload, \"output/Device Metric3\", AliasMap.Device_Metric3, MetricDataType.Boolean, True)\r\n template = initTemplateMetric(payload, \"My_Custom_Motor\", AliasMap.My_Custom_Motor, \"Custom_Motor\")\r\n templateParameter = template.parameters.add()\r\n templateParameter.name = \"Index\"\r\n templateParameter.type = ParameterDataType.String\r\n templateParameter.string_value = \"1\"\r\n addMetric(template, \"RPMs\", None, MetricDataType.Int32, 123) # No alias in UDT members\r\n addMetric(template, \"AMPs\", None, MetricDataType.Int32, 456) # No alias in UDT members\r\n\r\n return bytearray(payload.SerializeToString())\r\n\r\n\r\n#################---------------mqtt function ---------------------################\r\n\r\ndef Mqtt_process(Stat, MqConn,MqStatChild , MqDataChild,):\r\n mq = mqtt_parameters.query.get(1)\r\n PubTopics = pub_mqtt_topics.query.filter(pub_mqtt_topics.mod_addresses.any(read_mod_registers.address >= 0)).all()\r\n try:\r\n client = mqtt.Client(client_id = \"Proj_%s\" %(random.getrandbits(8)))\r\n client.on_connect = on_connect\r\n # Mqtt_Stat = client._handle_connack()\r\n if mq.mqtt_user_name and mq.mqtt_password :\r\n client.username_pw_set(mq.mqtt_user_name,mq.mqtt_password)\r\n elif mq.mqtt_access_token:\r\n client.username_pw_set(mq.mqtt_access_token)\r\n\r\n client.connect(mq.mqtt_ip,mq.mqtt_port,60)\r\n MqStatChild.send(\"Setted Client parameters\")\r\n logger.info(\"Setted Client parameters\")\r\n\r\n client.loop_start()\r\n MqStatChild.send(\"Loop Started & Connected to Server\")\r\n logger.info(\"Loop Started & Connected to Server\")\r\n Mqtt_Stat = Stat.value\r\n while (Mqtt_Stat > 0):\r\n time.sleep(1)\r\n\r\n if Mqtt_Stat == 0:\r\n pass\r\n\r\n elif Mqtt_Stat == 1: #---Connection refused - incorrect protocol version ---#\r\n client.loop_stop()\r\n MqStatChild.send(\"Connection refused - invalid client identifier\") \r\n logger.error(\"Connection refused - invalid client identifier\") \r\n\r\n elif Mqtt_Stat == 2 : #---Connection refused - invalid client identifier---#\r\n MqStatChild.send(\"Connection refused - invalid client identifier\")\r\n logger.error(\"Connection refused - invalid client identifier\")\r\n client.loop_stop()\r\n time.sleep(1)\r\n client = mqtt.Client(client_id = \"Proj_%s\" %random.getrandbits(8))\r\n MqStatChild.send(\"Changed another Client identifier\")\r\n logger.info(\"Changed another Client identifier\")\r\n client.loop_start()\r\n MqStatChild.send(\"Loop Started\")\r\n logger.info(\"Loop Started\")\r\n\r\n elif Mqtt_Stat == 3: #-- Connection refused - server unavailable ---#\r\n client.loop_stop()\r\n MqStatChild.send(\"Connection Unaviable Checck Internet\")\r\n logger.error(\"Connection Unaviable Checck Internet\")\r\n\r\n elif Mqtt_Stat == 4: #---Connection refused - bad username or password---#\r\n client.loop_stop()\r\n MqStatChild.send(\" Connection refused - bad username or password\")\r\n logger.error(\" Connection refused - bad username or password\")\r\n elif Mqtt_Stat == 5 : #---Connection refused - not authorised---#\r\n client.loop_stop()\r\n MqStatChild.send(\"Connection refused - not authorised\")\r\n logger.error(\"Connection refused - not authorised\")\r\n\r\n else :\r\n MqStatChild.send(\"Waiting for Connection or Not Connected or -->Mqtt_Stat - %s\" %Mqtt_Stat)\r\n logger.info(\"Waiting for Connection or Not Connected or -->Mqtt_Stat - %s\" %Mqtt_Stat)\r\n\r\n devbirthpayload = publishDeviceBirth(PubTopics)\r\n\r\n while True:\r\n if MqConn.poll():\r\n msg = MqConn.recv()\r\n client.publish(msg[0].topic, payload= msg[1], qos=msg[0].qos, retain=msg[0].retain)\r\n # client.publish(msg[\"topic\"], msg[\"value\"])\r\n MqDataChild.send(msg)\r\n\r\n except Exception as e :\r\n client.loop_stop()\r\n print(\"Mqtt error - {}\".format(e))\r\n MqStatChild.send(\"Mqtt Disconnected, mqtt Process Stopped\")\r\n MqStatChild.send(str(e))\r\n logger.exception(\"Got Exception\")\r\n\r\n\r\n####################------------------Modbus function -------------------#####################\r\ndef Mod_ReadWrite(ModConn, ModStatChild):\r\n mod = modbus_parameters.query.get(1)\r\n \r\n PubTopics = pub_mqtt_topics.query.filter(pub_mqtt_topics.mod_addresses.any(read_mod_registers.address >= 0)).all()\r\n\r\n try:\r\n while True:\r\n if is_connected(mod.modbus_ip,mod.modbus_port):\r\n ModStatChild.send(\"Modbus device Connection is UP\")\r\n logger.info(\"Modbus device Connection is UP\")\r\n break\r\n else:\r\n ModStatChild.send(\"Modbus device connection is DOWN\")\r\n logger.error(\"Modbus device connection is DOWN\")\r\n time.sleep(10)\r\n Modclient = ModbusClient(mod.modbus_ip, port = mod.modbus_port)\r\n msg = 0 \r\n while True :\r\n \r\n if ModStatChild.poll():\r\n msg = ModStatChild.recv()\r\n print(\"received Msg in modbus outer while loop {}\".format(msg))\r\n logger.info(\"received Msg in modbus outer while loop {}\".format(msg))\r\n if msg == 1:\r\n Modclient.connect()\r\n ModStatChild.send(\"Connected to Modbus device\")\r\n logger.info(\"Connected to Modbus device\")\r\n msg = 2\r\n while msg == 2:\r\n # GetModValues = ModReadJson(Modclient, 0 , 10) \r\n for topic in PubTopics:\r\n GetModValues = ModReadTopic(Modclient,topic)\r\n #########################\r\n ModConn.send(GetModValues)\r\n #########################\r\n # print(GetModValues)\r\n time.sleep(0.5)\r\n if ModStatChild.poll():\r\n msg = ModStatChild.recv()\r\n if msg == 1 :\r\n print(\"received Msg in modbus inner while loop {}\".format(msg))\r\n logger.info(\"received Msg in modbus inner while loop {}\".format(msg))\r\n msg = 2\r\n ModStatChild.send(\"Modbus device data Acquisition already running\")\r\n \r\n while msg == 3 :\r\n Modclient.close()\r\n msg = 0\r\n ModStatChild.send(\"Modbus device connection Closed\")\r\n \r\n # FlModChild.send(\"Disconnected from Controller\")\r\n # FlModChild.send(GetModValues)\r\n # if ModConn.poll():\r\n # msg = ModConn.recv()\r\n # if isinstance(msg, dict):\r\n # if \"ModWrite\" in msg:\r\n # if msg[\"ModWrite\"] == True:\r\n # ModWriteJson(ModbusClient,msg)\r\n except Exception as e:\r\n ModStatChild.send(\"Modbus Disconnected, Modbus process Stopped\")\r\n ModStatChild.send(str(e))\r\n print(e)\r\n############------------------------------------------------------################","sub_path":"process_ig.py","file_name":"process_ig.py","file_ext":"py","file_size_in_byte":10953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"314926544","text":"from typing import List\nfrom copy import deepcopy\n\nstack: List[List[str]] = []\n\nstack.append(['A'])\nstack.append(['B'])\nstack.append(['C'])\n\n# For\nfor item in stack[::-1]:\n print(item)\n\n# While\n# A função copy só funciona para dados mutáveis\n# stack_copy = stack.copy()\nstack_copy = deepcopy(stack)\nwhile stack_copy:\n item = stack_copy.pop()\n item += ['Manipulado']\n print(item)\n\nprint('Pilha:', stack)\n","sub_path":"aula02/pilhas02.py","file_name":"pilhas02.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"22357553","text":"class FileReader(object):\r\n\t\"\"\"docstring for FileReader\"\"\"\r\n\tdef __init__(self, path):\r\n\t\tself.path = path\r\n\r\n\tdef read(self):\r\n\t\t\"\"\"Return the file's content as a line\"\"\"\r\n\t\ttry:\r\n\t\t\twith open(self.path,'r') as f:\r\n\t\t\t\tread_data = f.read()\r\n\t\t\treturn read_data\r\n\t\t\r\n\t\texcept (IOError, OSError):\r\n\t\t\treturn \"\"\r\n\r\n# def _main():\r\n# \texample = FileReader(\"city.py\")\r\n# \tprint(example.read())\r\n\r\n# if __name__ == '__main__':\r\n# \t_main()\r\n\r\n","sub_path":"week301solution.py","file_name":"week301solution.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"281674283","text":"#!/usr/bin/env python\n\nimport signal\nimport time\nfrom sys import exit\nimport os.path\nimport random\n\ntry:\n from PIL import Image\nexcept ImportError:\n exit(\"This script requires the pillow module\\nInstall with: sudo pip install pillow\")\n\nimport unicornhathd\n\n\nprint(\"\"\"Unicorn HAT HD: Show a PNG image!\n\nThis basic example shows use of the Python Pillow library:\n\nThe tiny 16x16 bosses in lofi.png are from Oddball:\nhttp://forums.tigsource.com/index.php?topic=8834.0\n\nLicensed under Creative Commons Attribution-Noncommercial-Share Alike 3.0\nUnported License.\n\nPress Ctrl+C to exit!\n\n\"\"\")\n\nunicornhathd.rotation(90)\nunicornhathd.brightness(0.95)\n\nwidth, height = unicornhathd.get_shape()\nfdir = os.path.abspath(os.path.dirname(__file__))\nf = os.path.join(fdir, 'asya.png')\nimg = Image.open(f)\n\ntry:\n while True:\n\n o_x = random.choice([0,1])\n o_y = 0\n randi = random.randint([0,10])\n if randi is 0:\n for x in range(width):\n for y in range(height):\n pixel = img.getpixel(((2*width)+y,(o_y*height)+x))\n r, g, b = int(pixel[0]),int(pixel[1]),int(pixel[2])\n if r or g or b:\n valid = True\n unicornhathd.set_pixel(x, y, r, g, b) \n time.sleep(0.3)\n\n valid = False\n for x in range(width):\n for y in range(height):\n pixel = img.getpixel(((o_x*width)+y,(o_y*height)+x))\n r, g, b = int(pixel[0]),int(pixel[1]),int(pixel[2])\n if r or g or b:\n valid = True\n unicornhathd.set_pixel(x, y, r, g, b)\n if valid:\n unicornhathd.show()\n time.sleep(6)\n\nexcept KeyboardInterrupt:\n unicornhathd.off()\n\n","sub_path":"show-png.py","file_name":"show-png.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"494763049","text":"# _*_ coding:utf-8 _*_\r\n\r\nimport socket\r\nimport time\r\nfrom kodec import msg_type_pb2, logical_pb2\r\nfrom public import IPConver\r\nimport struct\r\nimport random\r\n\r\n# 朗读题\r\nclass ReadSentenceClass(object):\r\n def __init__(self, userId):\r\n self.userId = userId\r\n self.questionId = \"\"\r\n self.token = \"\"\r\n\r\n # 朗读题逻辑函数\r\n def readLogic(self, recData):\r\n if recData.result_frame.code == 0:\r\n if recData.head_frame.msg_type == msg_type_pb2.READ_SENTENCE_START_BROADCAST:\r\n print(\"收到朗读答题:\", recData.logical_frame.read_sentence_start_broadcast.question_id,\r\n recData.logical_frame.read_sentence_start_broadcast.text)\r\n self.questionId = recData.logical_frame.read_sentence_start_broadcast.question_id\r\n self.text = recData.logical_frame.read_sentence_start_broadcast.text\r\n self.maxRecordTime = recData.logical_frame.read_sentence_start_broadcast.max_record_time ## 毫秒\r\n self.teacherId = recData.logical_frame.read_sentence_start_broadcast.teacher_id\r\n return True\r\n elif recData.head_frame.msg_type == msg_type_pb2.SENTENCE_NO_FINISH_P2P:\r\n print(\"未完成朗读答题:\", recData.logical_frame.sentence_no_finish_p2p.question_id,\r\n recData.logical_frame.sentence_no_finish_p2p.text)\r\n self.questionId = recData.logical_frame.sentence_no_finish_p2p.question_id\r\n self.text = recData.logical_frame.sentence_no_finish_p2p.text\r\n self.maxRecordTime = recData.logical_frame.sentence_no_finish_p2p.max_record_time ## 毫秒\r\n self.teacherId = recData.logical_frame.sentence_no_finish_p2p.teacher_id\r\n return True\r\n elif recData.head_frame.msg_type == msg_type_pb2.READ_SENTENCE_STOP_BROADCAST:\r\n print (\"停止朗读题:\", recData.logical_frame.read_sentence_stop_broadcast.question_id)\r\n return False\r\n elif recData.head_frame.msg_type == msg_type_pb2.READ_SENTENCE_SUBMIT_RES:\r\n print (\"提交朗读完成:\", self.questionId)\r\n return False\r\n # 提交朗读题封包函数\r\n def pack_submitRead(self, token, audioUrl):\r\n reqPack = logical_pb2.RequestPackage()\r\n reqCommFrame = reqPack.head_frame\r\n reqCommFrame.msg_type = msg_type_pb2.READ_SENTENCE_SUBMIT_REQ\r\n reqCommFrame.msg_no = 'wk_tt_' + str(random.randint(1, 999999)) # 采用随机数\r\n reqCommFrame.msg_from_user_id = self.userId\r\n reqCommFrame.msg_to_user_id = \"\"\r\n reqCommFrame.device_type = 0\r\n reqCommFrame.version = 101000012\r\n # reqCommFrame.timestamp = int(time.time() * 1000)\r\n reqCommFrame.ip = IPConver.ip2int(socket.gethostbyname(socket.gethostname()))\r\n reqCommFrame.client_info.os_name = \"windows\"\r\n reqCommFrame.client_info.client_version = \"wkai2133\"\r\n reqCommFrame.extended_fields['from'] = 'multiuser_test'\r\n\r\n # 构造上报朗读题请求逻辑帧\r\n req_message = logical_pb2.RequestMessage()\r\n req_message.token = token\r\n reqBody = req_message.read_sentence_submit\r\n reqBody.question_id = self.questionId\r\n reqBody.text = self.text\r\n reqBody.record_time = 9000\r\n reqBody.audio_url = audioUrl\r\n reqBody.teacher_id = self.teacherId\r\n # reqBody.err_code = random.sample([0, 1, 2, 3, 4], 1)[0]\r\n reqBody.err_code = 0\r\n # reqBody.err_msg = \"ok\"\r\n if reqBody.err_code == 0:\r\n reqBody.err_msg = \"\"\r\n elif reqBody.err_code == 1:\r\n reqBody.err_msg = \"录音时间过短\"\r\n reqBody.record_time = 0\r\n reqBody.audio_url = \"\"\r\n elif reqBody.err_code == 2:\r\n reqBody.err_msg = \"未授权录音功能\"\r\n reqBody.record_time = 0\r\n reqBody.audio_url = \"\"\r\n elif reqBody.err_code == 3:\r\n reqBody.err_msg = \"其他\"\r\n reqBody.record_time = 0\r\n reqBody.audio_url = \"\"\r\n elif reqBody.err_code == 4:\r\n reqBody.err_msg = u\"上传时网络错误\"\r\n reqBody.record_time = 0\r\n reqBody.audio_url = \"\"\r\n\r\n # 对答题请求数据包进行序列化\r\n reqPack.logical_frame = req_message.SerializeToString()\r\n readMessage = reqPack.SerializeToString()\r\n\r\n Msg_flag = int('0x0000', 16)\r\n # 计算请求封包的长度\r\n Msg_len = reqPack.ByteSize() + 2\r\n reportMessage = struct.pack('!IH', Msg_len, Msg_flag) + readMessage\r\n return reportMessage","sub_path":"liveTest/simulateSever/liveServiceMonitor/logical/ReadSentenceClass.py","file_name":"ReadSentenceClass.py","file_ext":"py","file_size_in_byte":4699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"826329","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 2 22:03:16 2020\n\n@author: asif\n\"\"\"\n\nimport numpy as np\n\nno_of_specs=8\nbase_name=\"spec\"\n\n\n\ndef Log_reader(base_name,no_of_specs): \n \"\"\"Takes in base name and number of spectra as argument. Returns Photon index and normalisation\n The data is of the format [lower lim, upper lim, -ve error, +ve error] \"\"\" \n ph_idx=[]\n norm=[]\n \n for i in range(no_of_specs): \n log_name=base_name+str(i)+\".log\"\n with open(log_name,'r') as reader:\n log=reader.readlines()\n \n line=log[-11]\n temp=line.strip().split(' ')\n temp=[i for i in temp if i !='']\n temp1=temp[-1][1:-1].split(',')\n ph_idx.append(temp[2:-1]+temp1)\n \n \n line=log[-7]\n temp=line.strip().split(' ')\n temp=[i for i in temp if i !='']\n temp1=temp[-1][1:-1].split(',')\n norm.append(temp[2:-1]+temp1)\n \n return [np.array(ph_idx,dtype=np.float64),np.array(norm,dtype=np.float64)]\n \nph,nm=Log_reader(base_name,no_of_specs)","sub_path":"log_reader.py","file_name":"log_reader.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"271863940","text":"from flask import Flask, render_template, redirect, url_for, request, session,flash, g\nfrom functools import wraps\nimport random\nimport sqlite3\n\napp = Flask(__name__)\n\napp.secret_key = \"f0a9fja09fjaf0aif90afi0a9fi90aif90aif09aif90aif09aifajf09ajf09ajf09ajf09ajf0a\"\napp.database = \"hangman.db\"\n\ndef login_required(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'logged_in' in session:\n return f(*args, **kwargs)\n else:\n flash('Please Log In')\n return redirect(url_for('login'))\n return wrap\n\n@app.route('/')\n@login_required\ndef start():\n #return app.send_static_file('index.html')\n return render_template('index.html')\n\ndef getTypes(type):\n g.db = connect_db()\n if type is None:\n qu = g.db.execute('select * from type')\n else:\n qu = g.db.execute('select * from type where type = '+ type)\n types = [dict(type=row[1], id=row[0]) for row in qu.fetchall()]\n g.db.close()\n return str(types)\n\n\n@app.route('/test')\ndef getPhrase():\n type = None\n if type is None:\n g.db = connect_db()\n qu = g.db.execute('select p.phrase, p.id, t.type from phrase p inner join type t on t.id = p.typeid')\n p = [dict(phrase=row[0], id=row[1], type=row[2]) for row in qu.fetchall()]\n g.db.close()\n else:\n typeid = getTypes(type)[0][\"id\"]\n g.db = connect_db()\n qu = g.db.execute('select p.phrase, p.id, t.type from phrase p inner join type t on t.id = p.typeid where t.id = ' + typeid)\n p = [dict(phrase=row[0], id=row[1], type=row[2]) for row in qu.fetchall()]\n g.db.close()\n return str(p[random.randint(0,len(p)-1)][\"phrase\"]) #str(len(p))\n\n@app.route('/classic')\ndef classic():\n return app.send_static_file('classic.html')\n\n@app.route('/login', methods=['GET','POST'])\ndef login():\n error = None;\n if request.method == 'POST':\n if request.form['username'] != 'admin' or request.form['password'] != 'admin':\n error = \"Nope\"\n else:\n session['logged_in'] = True\n flash('Welcome ' + request.form['username'])\n return redirect(url_for('start'))\n return render_template('login.html', error=error)\n\n@app.route('/logout')\n@login_required\ndef logout():\n session.pop('logged_in', None)\n flash(\"Logged out.\")\n return redirect(url_for('start'))\n\ndef connect_db():\n return sqlite3.connect(app.database)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"819045","text":"import sys, json\nfrom collections import defaultdict\n\nANNOVAR_FILTERED_VARIANTS = sys.argv[1] # annovar_clinvar_hgmd_combined_for_annovar_with_scrape.variants.txt\nOUT_FILE = sys.argv[2]\n\npair_to_variants = defaultdict(list)\n\nwith open(ANNOVAR_FILTERED_VARIANTS) as f:\n next(f)\n for line in f:\n ln = line.strip().split('\\t')\n tuples = ln[-3].split(';')\n for t in tuples:\n mim, gene = t.split('/')[:2]\n mim = mim[5:]\n pair_to_variants[mim + '|' + gene].append(ln)\n\nwith open(OUT_FILE, 'w') as o:\n json.dump(pair_to_variants, o)\n","sub_path":"AMELIE/gen/make_pair_to_variants.py","file_name":"make_pair_to_variants.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"631485072","text":"from pycqed.scripts.Experiments.Five_Qubits import CZ_tuneup as czt\n# script for cphases run on 20170131\n\nmq_mod.measure_SWAPN(S5, 'DataT', swap_amps=np.arange(1.04, 1.06, 0.001))\n# amp obtained was 1.048 Vpp\nDataT.SWAP_amp(1.048)\n\n# next the cphase amplitude\nnested_MC.soft_avg(3)\nCZ_amps = np.linspace(1.03, 1.07, 21)\nAWG.ch4_amp(DataT.SWAP_amp())\ncorr_amps = np.arange(.0, .3, 0.01)\nMC.set_sweep_function(AWG.ch3_amp)\nMC.set_sweep_points(CZ_amps)\nd=czt.CPhase_cost_func_det(S5, DataT, AncT, nested_MC, corr_amps)\nMC.set_detector_function(d)\nMC.run('CZ_cost_function')\n# amplitude obtained was 1.056 Vpp\n\n# next the 1Q phase corrections\nAncT.CZ_channel_amp(1.056)\nAWG.ch3_amp(AncT.CZ_channel_amp())\nAWG.ch4_amp(DataT.SWAP_amp())\nscan_range = {'DataT': np.linspace(0, .15, 60),\n 'AncT': np.linspace(0, .21, 60)}\nfor s_q in ['DataT', 'AncT']:\n mq_mod.measure_SWAP_CZ_SWAP(S5, 'DataT', 'AncT',\n scan_range[s_q], sweep_qubit=s_q, excitations=0)\n\n# nice functions\ndef fix_phase_qcp():\n label = 'SWAP_CP_SWAP'\n a = ma.MeasurementAnalysis(label=label, auto=False)\n a.get_naming_and_values()\n x = a.sweep_points[:-4]\n cp_acq_weight = 0\n y = a.measured_values[cp_acq_weight, :-4]\n return a_tools.find_min(x, y)\n\n\ndef fix_phase_qs():\n label = 'SWAP_CP_SWAP'\n a = ma.MeasurementAnalysis(label=label, auto=False)\n a.get_naming_and_values()\n x = a.sweep_points[:-4]\n qs_acq_weigth = 1\n y = a.measured_values[qs_acq_weigth, :-4]\n return a_tools.find_min(x, y)\n\n\ndef fix_phase_2Q():\n label = 'CZ_cost_function'\n a = ma.MeasurementAnalysis(label=label, auto=False)\n a.get_naming_and_values()\n x = a.sweep_points[:]\n cp_acq_weight = 0\n y = a.measured_values[cp_acq_weight, :]\n return x[np.argmax(y)],y[np.argmax(y)]\n# return a_tools.find_min(x, -y)\n\n# all toghether\nqs_corr = AncT.CZ_corr_amp()\nqcp_corr = DataT.SWAP_corr_amp()\namp_2Q = AncT.CZ_channel_amp()\nprint('********************************')\nprint('CPhase Amp = %.3f Vpp'%amp_2Q)\nprint('qS phase corr = %.3f'%qs_corr)\nprint('qCP phase corr = %.3f'%qcp_corr)\nprint('********************************')\n\nCZ_amps = np.linspace(-0.002, .002, 5) + amp_2Q\nAWG.ch4_amp(DataT.SWAP_amp())\ncorr_amps = np.arange(.0, .3, 0.01)\nMC.set_sweep_function(AWG.ch3_amp)\nMC.set_sweep_points(CZ_amps)\nd=czt.CPhase_cost_func_det(S5, DataT, AncT, nested_MC, corr_amps)\nMC.set_detector_function(d)\nMC.run('CZ_cost_function')\namp_2Q = fix_phase_2Q()[0]\nAncT.CZ_channel_amp(amp_2Q)\nAWG.ch3_amp(AncT.CZ_channel_amp())\nprint('********************************')\nprint('CPhase Amp = %.3f Vpp'%amp_2Q)\nprint('qS phase corr = %.3f'%qs_corr)\nprint('qCP phase corr = %.3f'%qcp_corr)\nprint('********************************')\n\nmq_mod.measure_SWAP_CZ_SWAP(S5, 'DataT', 'AncT',\n np.linspace(0, .13, 60), sweep_qubit='DataT', excitations=0)\nqs_corr = fix_phase_qs()[0]\nDataT.SWAP_corr_amp(qs_corr)\nprint('********************************')\nprint('CPhase Amp = %.3f Vpp'%amp_2Q)\nprint('qS phase corr = %.3f'%qs_corr)\nprint('qCP phase corr = %.3f'%qcp_corr)\nprint('********************************')\nmq_mod.measure_SWAP_CZ_SWAP(S5, 'DataT', 'AncT',\n np.linspace(0, .18, 60), sweep_qubit='AncT', excitations=0)\nqcp_corr = fix_phase_qcp()[0]\nAncT.CZ_corr_amp(qcp_corr)\nprint('********************************')\nprint('CPhase Amp = %.3f Vpp'%amp_2Q)\nprint('qS phase corr = %.3f'%qs_corr)\nprint('qCP phase corr = %.3f'%qcp_corr)\nprint('********************************')\n\n\n#tomo\nfrom pycqed.analysis import tomography as tomo\n\nmq_mod.tomo2Q_bell(bell_state=0, device=S5, qS_name='DataT', qCZ_name='AncT',\n nr_shots=1024, nr_rep=1)\ntomo.analyse_tomo(MLE=False, target_bell=0)\n\n# plot sequence\n# vw = None\nvw.clear()\nfor i in [24, 4]:\n vw = viewer.show_element_pyqt(\n station.pulsar.last_elements[i], vw, color_idx=i%7)","sub_path":"pycqed/scripts/personal_folders/Ramiro/20170131_cphases.py","file_name":"20170131_cphases.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"57474029","text":"from .measurements import MeasurementsBaseClass\r\nfrom .recurrence import BurstDetection\r\n\r\nfrom socialsim.utils import gini\r\n\r\nfrom collections import Counter\r\nfrom matplotlib.pyplot import cm\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport igraph as ig\r\n\r\nimport louvain\r\n\r\nfrom collections import defaultdict\r\n\r\nimport warnings\r\nimport matplotlib.pyplot as plt\r\n\r\nimport re\r\nimport time\r\nimport os\r\n\r\n# community detection algorithms\r\n# More algorithms: \r\n# https://igraph.org/c/doc/igraph-Community.html\r\n# https://github.com/vtraag/leidenalg\r\n\r\n\r\ndef louvain_method(user_interaction_graph):\r\n '''\r\n https://github.com/vtraag/louvain-igraph\r\n Fast unfolding of communities in large networks, Vincent D Blondel, Jean-Loup Guillaume, Renaud Lambiotte, Renaud Lefebvre, Journal of Statistical Mechanics: Theory and Experiment 2008(10), P10008 (12pp)\r\n :param user_interaction_graph: igraph Graph\r\n '''\r\n louvain.set_rng_seed(43)\r\n node_names = user_interaction_graph.vs\r\n return[[node_names[node]['name'] for node in community] for community in louvain.find_partition(user_interaction_graph, louvain.ModularityVertexPartition)]\r\n\r\n\r\nclass PersistentGroupsMeasurements(MeasurementsBaseClass):\r\n def __init__(self, dataset_df, configuration={}, metadata=None,\r\n id_col='nodeID', timestamp_col=\"nodeTime\", userid_col=\"nodeUserID\",\r\n platform_col=\"platform\", content_col=\"informationID\",\r\n log_file='group_formation_measurements_log.txt', selected_content=None,\r\n time_granularity='12H', parentid_col='parentID',\r\n community_detection_algorithm=louvain_method,\r\n plot=False, save_groups=False, plot_bursts=False, save_plots=False, plot_dir='./',\r\n save_predicted_gammas = False, save_predicted_gammas_to_fn='./predicted_gammas.csv'):\r\n\r\n \"\"\"\r\n :param dataset_df: dataframe containing all posts for all communities (Eg. coins for scenario 2) in all platforms\r\n :param timestamp_col: name of the column containing the time of the post\r\n :param id_col: name of the column containing the post id\r\n :param userid_col: name of the column containing the user id\r\n :param content_col: name of the column containing the content the simulation was done for eg. coin name\r\n :param community_detection_algorithm: a function that takes a networkx Graph as in input and returns a list of list i.e. [[users_in_group1], [users_in_group2], ...}\r\n \"\"\"\r\n super(PersistentGroupsMeasurements, self).__init__(dataset_df, configuration, log_file=log_file)\r\n self.dataset_df = dataset_df\r\n self.timestamp_col = timestamp_col\r\n self.id_col = id_col\r\n self.userid_col = userid_col\r\n self.platform_col = platform_col\r\n self.content_col = content_col\r\n self.measurement_type = 'persistent_groups'\r\n\r\n self.metadata = metadata\r\n self.plot = plot\r\n self.plot_bursts = plot_bursts\r\n self.plot_dir = plot_dir\r\n self.save_plots = save_plots\r\n\r\n\r\n if selected_content == 'all':\r\n self.selected_content = None\r\n elif selected_content is not None:\r\n self.selected_content = selected_content\r\n else:\r\n try:\r\n self.selected_content = self.metadata.node_list\r\n except:\r\n self.selected_content = None\r\n \r\n self.min_date = self.dataset_df[self.timestamp_col].min()\r\n self.max_date = self.dataset_df[self.timestamp_col].max()\r\n \r\n self.gammas = {k: {p: None for p in self.dataset_df[self.platform_col].unique()} for k in self.dataset_df[self.content_col].unique()}\r\n\r\n if not self.metadata is None:\r\n\r\n if self.metadata.use_info_data and 'gamma' in self.metadata.info_data.columns:\r\n \r\n for i, row in self.metadata.info_data[[self.content_col, self.platform_col, 'gamma']].iterrows():\r\n if row[self.content_col] in self.gammas.keys():\r\n self.gammas[row[self.content_col]][row[self.platform_col]] = row['gamma']\r\n\r\n self.gamma_filepath = 'temporary_predicted_gammas_file_{}.csv'.format(str(time.ctime()))\r\n with open(self.gamma_filepath, 'w') as f:\r\n f.write( '{},{},{}\\n'.format(self.content_col, self.platform_col, 'gamma'))\r\n\r\n self.time_granularity = time_granularity\r\n self.parentid_col = parentid_col\r\n self.community_detection_algorithm = community_detection_algorithm\r\n self.get_network_from_bursts(user_interaction_weight_threshold=2)\r\n if save_groups:\r\n self.save_groups_to_file()\r\n\r\n\r\n if not (self.metadata.use_info_data and 'gamma' in self.metadata.info_data.columns):\r\n # load gammas from temp file\r\n temp_gammas = pd.read_csv(self.gamma_filepath)\r\n for i, row in temp_gammas[[self.content_col, self.platform_col, 'gamma']].iterrows():\r\n if row[self.content_col] in self.gammas.keys():\r\n self.gammas[row[self.content_col]][row[self.platform_col]] = row['gamma']\r\n # update metadata info data with temp gammas\r\n self.metadata.info_data = temp_gammas.copy()\r\n # update user_info_data boolean flag\r\n self.metadata.use_info_data = True\r\n\r\n # remove temporary gammas file\r\n os.remove(self.gamma_filepath)\r\n # write gammas to file if specified\r\n if save_predicted_gammas:\r\n self.gammas[[self.content_col, self.platform_col, 'gamma']].to_csv(save_predicted_gammas_to_fn, index=False)\r\n\r\n\r\n def list_measurements(self):\r\n count = 0\r\n for f in dir(self):\r\n if not f.startswith('_'):\r\n func = getattr(self, f)\r\n if callable(func):\r\n doc_string = func.__doc__\r\n if not doc_string is None and 'Measurement:' in doc_string:\r\n desc = re.search('Description\\:([\\s\\S]+?)Input', doc_string).groups()[0].strip()\r\n print('{}) {}: {}\\n'.format(count + 1, f, desc))\r\n count += 1\r\n\r\n\r\n def get_network_from_bursts(self, bursts_count_threshold=1, user_interaction_weight_threshold=1):\r\n \"\"\"\r\n get bursts in activity for units of information, get network of connected users that parcipate in these bursts\r\n :param bursts_count_threshold: threshold for the number of bursts in activity for an information to be considered\r\n :param user_interaction_weight_threshold: threshold for the number of burts two users must participate in together in order for them to be connected\r\n \"\"\" \r\n def get_burst_user_connections_df(content_id, content_df, burst_interval, userid_col='from_id', timestamp_col='date'):\r\n '''connect users who participate in the same burst'''\r\n burst_df = content_df[(content_df[self.timestamp_col].between(burst_interval[0], burst_interval[1], inclusive=True)) & (~content_df[self.userid_col].isna())].copy()\r\n uids = list(sorted(burst_df[self.userid_col].unique())) \r\n if len(uids) < 2:\r\n return [] \r\n content_user_connections = []\r\n for i, uid1 in enumerate(uids):\r\n for uid2 in uids[i+1:]:\r\n content_user_connections.append({'content': content_id,\r\n 'uid1': uid1,\r\n 'uid2': uid2,\r\n 'weight': 1})\r\n return content_user_connections\r\n\r\n user_connections = []\r\n n_ids = self.dataset_df[self.content_col].nunique()\r\n max_plots_to_show = 5\r\n num_plots = 0\r\n for content_id, content_df in self.dataset_df.groupby(self.content_col):\r\n if num_plots < max_plots_to_show:\r\n show = True\r\n else:\r\n show = False\r\n \r\n if (self.selected_content is not None and content_id not in self.selected_content) and not (isinstance(self.selected_content,str) and self.selected_content == 'all'):\r\n continue\r\n burstDetection = BurstDetection(dataset_df=content_df, metadata=self.metadata, id_col=self.id_col,\r\n timestamp_col=self.timestamp_col, platform_col=self.platform_col, \r\n time_granularity=self.time_granularity,\r\n min_date=self.min_date, max_date=self.max_date,\r\n gamma_filepath=self.gamma_filepath)\r\n burst_intervals = burstDetection.detect_bursts(self.gammas[content_id])\r\n if len(burst_intervals) < bursts_count_threshold:\r\n continue\r\n\r\n if self.plot_bursts:\r\n plot_df = self.dataset_df.copy()\r\n plot_df.set_index(self.timestamp_col, inplace=True)\r\n new_df = plot_df.groupby(pd.Grouper(freq=self.time_granularity))[[self.content_col]].count()\r\n new_df.reset_index(inplace=True)\r\n plt.figure()\r\n plt.plot(new_df[self.timestamp_col], new_df[self.content_col])\r\n\r\n for burst_interval in burst_intervals:\r\n user_connections.extend(get_burst_user_connections_df(content_id, content_df, burst_interval))\r\n if self.plot_bursts:\r\n plt.axvspan(xmin=burst_interval[0], xmax=burst_interval[1], color=\"red\", alpha=0.25)\r\n if show and self.plot_bursts:\r\n plt.show()\r\n if self.plot_bursts and self.save_plots:\r\n plt.savefig(self.plot_dir + str(content_id) + \"_persistent_groups_with_bursts.png\", bbox_inches='tight')\r\n plt.close()\r\n num_plots += 1\r\n\r\n\r\n user_network_df = pd.DataFrame(user_connections)\r\n if 'uid1' not in user_network_df.columns:\r\n warnings.warn(\"No bursts detected in any information IDs. Persistent group measurements cannot be run. They will fail with uid1 KeyError.\")\r\n user_network_df = user_network_df.groupby(['uid1', 'uid2'])['weight'].sum().reset_index()\r\n \r\n self.user_network_df = user_network_df[user_network_df['weight']>=user_interaction_weight_threshold] \r\n \r\n edgelist = self.user_network_df[['uid1', 'uid2', 'weight']].apply(tuple, axis=1).tolist()\r\n if len(edgelist) == 0:\r\n return\r\n user_interaction_graph = ig.Graph.TupleList(edgelist, directed=False)\r\n self.groups = self.community_detection_algorithm(user_interaction_graph)\r\n print('Number of groups: ', len(self.groups))\r\n\r\n if self.plot:\r\n figsize = (10, 10)\r\n fig = plt.figure(figsize=figsize)\r\n ax = fig.add_subplot(1, 1, 1)\r\n ax.axis('off')\r\n ax.set_frame_on(False)\r\n colors = iter(cm.jet(np.linspace(0, 1, len(self.groups))))\r\n node_color = {}\r\n node_labels = {}\r\n for g_id, g in enumerate(self.groups):\r\n c = next(colors)\r\n for i in g:\r\n node_color[i] = c\r\n node_labels[i] = g_id\r\n # Draw graph of users by group membership\r\n node_c = [node_color[node['name']] for node in user_interaction_graph.vs]\r\n\r\n ig.plot(user_interaction_graph, layout=user_interaction_graph.layout_fruchterman_reingold(), vertex_size=30, edge_color='black', vertex_color=node_c)\r\n\r\n plt.show()\r\n if self.save_plots:\r\n plt.savefig(self.plot_dir+ \"network_of_users_by_groups.png\",bbox_inches='tight')\r\n\r\n def number_of_groups(self):\r\n \"\"\"\r\n Measurement: number_of_groups\r\n\r\n Description: How many different clusters of users are there?\r\n\r\n Input: Network\r\n\r\n Output: Int.\r\n\r\n \"\"\"\r\n return len(self.groups)\r\n \r\n def group_size_distribution(self):\r\n \"\"\"\r\n Measurement: group_size_distribution\r\n\r\n Description: How large are the groups of users? (population)\r\n\r\n Input: Network of groups\r\n\r\n Output: List of group sizes\r\n \"\"\"\r\n return [len(group_users) for group_users in self.groups]\r\n \r\n def distribution_of_content_discussion_over_groups(self):\r\n \"\"\"\r\n Measurement: distribution_of_content_discussion_over_groups\r\n\r\n Description: Do groups focus on individual information IDs or a larger set of info IDs?\r\n\r\n Input:\r\n\r\n Output: List\r\n \"\"\"\r\n\r\n content_ids = self.dataset_df.groupby(self.content_col)[self.id_col].count().reset_index()\r\n content_ids.columns = [self.content_col,'total_value']\r\n\r\n meas = []\r\n for i, group_users in enumerate(self.groups):\r\n group_df = self.dataset_df[self.dataset_df[self.userid_col].isin(group_users)]\r\n\r\n info_id_counts = group_df.groupby(self.content_col)[self.id_col].count().reset_index()\r\n info_id_counts.columns = [self.content_col,'value']\r\n info_id_counts = info_id_counts.merge(content_ids,on=self.content_col,how='right').fillna(0)\r\n\r\n info_id_counts = list(info_id_counts['value'].values)\r\n\r\n #inequality among information IDs within the group\r\n content_gini = gini(info_id_counts)\r\n\r\n meas.append(content_gini)\r\n\r\n return meas\r\n\r\n def internal_versus_external_interaction_rates(self):\r\n \"\"\"\r\n Measurement: external_to_internal_interaction_rate_ratio\r\n\r\n Description: How much do group members interact with each other versus non-group members?\r\n\r\n Input:\r\n\r\n Output: Float\r\n \"\"\"\r\n internal_links = 0\r\n external_links = 0\r\n for i, group_users in enumerate(self.groups):\r\n internal_links_df = self.user_network_df[(self.user_network_df['uid1'].isin(group_users)) & (self.user_network_df['uid2'].isin(group_users))]\r\n all_links_df = self.user_network_df[(self.user_network_df['uid1'].isin(group_users)) | (self.user_network_df['uid2'].isin(group_users))] # all links made by users in that group\r\n internal_links += sum(internal_links_df['weight'].values)\r\n external_links += sum(all_links_df['weight'].values) - sum(internal_links_df['weight'].values)\r\n return external_links / internal_links\r\n\r\n def group_versus_total_volume_of_activity(self,time_granularity=None):\r\n \"\"\"\r\n Measurement: group_versus_total_volume_of_activity\r\n\r\n Description: How much does the most prolific group dominate the discussion of a particular info ID over time?\r\n\r\n Input:\r\n\r\n Output: Dictionary of DataFrames\r\n \"\"\"\r\n ''''''\r\n\r\n if time_granularity is None:\r\n try:\r\n time_granularity = self.configuration['node']['group_versus_total_volume_of_activity']['measurement_args']['time_granularity']\r\n except:\r\n time_granularity = self.time_granularity\r\n\r\n\r\n dataset_counts_df = self.dataset_df.set_index(self.timestamp_col).\\\r\n groupby([pd.Grouper(freq=time_granularity), self.content_col]).size().reset_index(name='total_activity')\r\n\r\n\r\n group_content_timeseries = {} \r\n for content_id, content_df in self.dataset_df.groupby(self.content_col):\r\n\r\n #users in the group with the most posts related to this content ID\r\n prolific_group_users = self.groups[np.argmax(np.array([len(content_df[content_df[self.userid_col].isin(group_users)]) for group_users in self.groups]))]\r\n\r\n group_df = content_df[content_df[self.userid_col].isin(prolific_group_users)]\r\n\r\n group_counts_df = group_df.set_index(self.timestamp_col).\\\r\n groupby([pd.Grouper(freq=time_granularity), self.content_col]).size().reset_index(name='group_activity')\r\n\r\n merged_df = dataset_counts_df[dataset_counts_df[self.content_col]==content_id].merge(group_counts_df, how='outer', on=[self.content_col, self.timestamp_col]) #dataset_counts_df\r\n\r\n merged_df.fillna(0, inplace=True)\r\n merged_df['value'] = merged_df['group_activity'] / merged_df['total_activity']\r\n\r\n group_content_timeseries[content_id] = merged_df.drop(columns=[self.content_col, 'total_activity', 'group_activity'])\r\n return group_content_timeseries\r\n\r\n def seed_post_versus_response_actions_ratio(self):\r\n \"\"\"\r\n Measurement: seed_post_to_total_actions_ratio\r\n\r\n Description: How much does the group seed new content?\r\n\r\n Input:\r\n\r\n Output: List.\r\n \"\"\"\r\n group_seed_post_ratio = []\r\n for i, group_users in enumerate(self.groups):\r\n group_df = self.dataset_df[self.dataset_df[self.userid_col].isin(group_users)]\r\n idx = (group_df[self.parentid_col] == group_df[self.id_col]) | (group_df['actionType'].isin(['CreateEvent','IssuesEvent','PullRequestEvent']))\r\n group_seed_post_ratio.append(len(group_df[idx]) / float(len(group_df))) \r\n return group_seed_post_ratio \r\n\r\n def save_groups_to_file(self):\r\n groups = []\r\n for g_id, g in enumerate(self.groups):\r\n idx = [g_id]*len(g)\r\n groups.append(pd.DataFrame({\"group id\": idx, \"group member\": g}))\r\n group_df = pd.concat(groups)\r\n group_df.to_csv(\"./groups.csv\")\r\n","sub_path":"socialsim/measurements/persistent_groups.py","file_name":"persistent_groups.py","file_ext":"py","file_size_in_byte":17702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"510182681","text":"import streamlit as st\nfrom PIL import Image\nimport base64\nimport sqlite3\nconn = sqlite3.connect('data.db',check_same_thread=False)\ncur = conn.cursor()\ndef foodDonate() :\n main_bg = \"bg.jpg\"\n main_bg_ext = \"jpg\"\n st.markdown(\n f\"\"\"\n \n \"\"\",\n unsafe_allow_html=True\n )\n \n \n st.markdown(\"

Food Donation

\", unsafe_allow_html=True)\n \n img = Image.open(\"FoodDonation.jpg\")\n st.image(img, caption='Food Donation',width=500)\n st.title(\"Welcome to the food donation page, your donated food can bring hope in someones life of survival.\\nCome let us donate food for needy one.\\nYou don't have to walk and donate it you just have to register yourself and we will pick the food from your house address that will be provided.\")\n st.write(\"Here if you are willing to donate food\\n you have to register yourself.\")\n with st.form(key=\"Register for food donation\"):\n namefood = st.text_input(\"Enter your name : \")\n foodaddress = st.text_input(\"Enter your address please : \")\n food_phone = st.text_input(\"Enter your phone Number : \")\n \n foodsubmission = st.form_submit_button(label=\"Submit\")\n if foodsubmission==True:\n addData(namefood,foodaddress,food_phone)\n \n else:\n st.info(\"Please submit the form.\")\n\ndef addData(a,b,c):\n \n cur.execute(\"\"\"CREATE TABLE IF NOT EXISTS food(NAME TEXT(50), ADDRESS TEXT(50), PHONE_NO TEXT(15)); \"\"\") \n cur.execute(\"INSERT INTO food VALUES (?,?,?)\",(a,b,c))\n conn.commit()\n conn.close()\n st.success(\"Successfully inserted\")\n \n \n\n\n\n\n \n\n","sub_path":"donationApp/foodDonation.py","file_name":"foodDonation.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"164754160","text":"class BayesTheorem:\n\n def __init__(self):\n # chances of person having the cancer \n self.p_having_cancer = 1 / 100\n # probability of the +ve test that are true\n self.p_test_true = 90 / 100\n # not having cancer still got true\n self.p_false_Result = 8 / 100\n # probability of not having cancer is 1 - prob of having cancer\n self.p_not_having_cancer = 1 - self.p_having_cancer\n\n def prob_Cancer_When_Result_Positive(self):\n p_test_positive = self.p_having_cancer * self.p_test_true + self.p_not_having_cancer * self.p_false_Result\n p_cancer_given_positive: float = (self.p_having_cancer * self.p_test_true) / p_test_positive\n return p_cancer_given_positive\n\n\ntry:\n bayes_object = BayesTheorem()\n flag: bool = True\n while flag:\n bayes_object = BayesTheorem()\n # for a next time the value of having cancer will be the previously calculated\n print(\"The probability of having the cancer when you got the test +ve is %.2f\" %\n bayes_object.prob_Cancer_When_Result_Positive(), \"\\nTo exit press 0 else press any other number\")\n if input() == 0:\n flag = False\nexcept Exception as e:\n print(\"Process stopped because %s\" % e)\n","sub_path":"Week3/StatisticsAndProbablitiy/CancerUsingBayesTheorem.py","file_name":"CancerUsingBayesTheorem.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"509863655","text":"\"\"\"\nMethods examples.\n\nMethods are being stored in dictionary. Using my_ before functions to\nnot redefine built-in functions.\n\"\"\"\n\ndef inc(a):\n return a + 1\n\nmy_max = lambda a, b: a if a > b else b\n\n\nobj = {\n 'inc': inc,\n 'my_max': my_max\n}\n\n\nprint('obj.inc.__name__:', obj['inc'].__name__)\nprint('obj.my_max.__name__:', obj['my_max'].__name__)\n\nprint('obj[\"inc\"](3):', obj['inc'](3))\nprint('obj[\"my_max\"](4, 1):', obj['my_max'](4, 1))\n","sub_path":"Python/7-method.py","file_name":"7-method.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"108889466","text":"import requests\nimport json\nimport pprint\n\n#access_tokenはここでは伏せる\nsymbols = [\"-\", \"*\", \"/\"]\nans_match = [\"+\", \"+\", \"+\", \"+\", \"+\", \"+\"]\nflag=0\nanswer = {'answer': '++++++'}\n\nget_question = requests.get(\n 'https://apiv2.twitcasting.tv/internships/2019/games?level=3',\n headers = {'Authorization': 'Bearer {}'.format(access_token)})\npprint.pprint(get_question.json())\nquestion_id = get_question.json()[\"id\"]\n\npost_url = \"https://apiv2.twitcasting.tv/internships/2019/games/\" + question_id\n\npost_answer = requests.post(\n post_url,\n headers = {'Authorization': 'Bearer {}'.format(access_token)},\n json = answer)\npprint.pprint(post_answer.json())\n\nwhile len(post_answer.json()) == 3:\n miss_ans_posi = [index for index, symbol in enumerate(list(post_answer.json()[\"hints\"])) if symbol == '?']\n \n for i in miss_ans_posi:\n ans_match[i] = symbols[flag]\n \n flag += 1 \n answer[\"answer\"] = \"\".join(ans_match)\n \n post_answer = requests.post(\n post_url,\n headers = {'Authorization': 'Bearer {}'.format(access_token)},\n json = answer)\n pprint.pprint(post_answer.json())\n","sub_path":"Botsu_Model.py","file_name":"Botsu_Model.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"456377244","text":"import unittest\n\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string.\n\n :type root: TreeNode\n :rtype: str\n \"\"\"\n tree_repr = \"[\"\n temp = self.pre_order_visit(root, [])\n # need to implement a pass over the array\n clean = []\n # indexes to skip\n to_skip = []\n for i, val in enumerate():\n # for a parent at index p, the children are at index (p+1)*2 and (p+1)*2 + 1\n if val == 'null' and i not in to_skip:\n to_skip.append(i * 2)\n to_skip.append(i * 2 + 1)\n else:\n pass\n\n\n\n tree_repr += ','.join(temp)\n tree_repr += \"]\"\n return tree_repr\n\n def pre_order_visit(self, root, tree: []):\n if not root:\n tree.append(\"null\")\n return\n else:\n tree.append(\"{}\".format(root.val))\n self.pre_order_visit(root.left, tree)\n self.pre_order_visit(root.right, tree)\n return tree\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_serialize(self):\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.right = TreeNode(3)\n root.right.right = TreeNode(5)\n root.right.left = TreeNode(4)\n sol = Codec()\n ans = sol.serialize(root)\n print(ans)\n self.assertTrue(ans == '[1,2,3,null,null,4,5]')\n\n def test_serialize_deep(self):\n root = TreeNode(1)\n root.right = TreeNode(3)\n root.right.right = TreeNode(5)\n root.right.left = TreeNode(4)\n sol = Codec()\n ans = sol.serialize(root)\n print(ans)\n # self.assertTrue(ans == '[1,null,3,,4,5]')","sub_path":"interview/trees_and_graphs/serialize_deserialize.py","file_name":"serialize_deserialize.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"362427575","text":"\"\"\"Serveradmin - Query Committer\n\nCopyright (c) 2018 InnoGames GmbH\n\"\"\"\n\nimport json\nfrom itertools import chain\n\nfrom django.core.exceptions import PermissionDenied, ValidationError\nfrom django.db import IntegrityError, transaction\nfrom django.dispatch.dispatcher import Signal\n\nfrom adminapi.dataset import DatasetCommit\nfrom adminapi.request import json_encode_extra\nfrom serveradmin.serverdb.models import (\n Servertype,\n Attribute,\n Server,\n ServerAttribute,\n ServerRelationAttribute,\n ChangeAdd,\n ChangeCommit,\n ChangeUpdate,\n ChangeDelete,\n)\nfrom serveradmin.serverdb.query_materializer import (\n QueryMaterializer,\n get_default_attribute_values,\n)\n\npre_commit = Signal()\npost_commit = Signal()\n\n\nclass CommitError(ValidationError):\n pass\n\n\nclass CommitValidationFailed(CommitError):\n def __init__(self, message, violations=None):\n CommitError.__init__(self, message)\n if violations is None:\n violations = []\n\n self.violations = violations\n\n\nclass CommitNewerData(CommitError):\n def __init__(self, message, newer=None):\n CommitError.__init__(self, message)\n if newer is None:\n newer = []\n self.newer = newer\n\n\ndef commit_query(created=[], changed=[], deleted=[], app=None, user=None):\n \"\"\"The main function to commit queries\"\"\"\n\n if not user:\n user = app.owner\n\n pre_commit.send_robust(\n commit_query, created=created, changed=changed, deleted=deleted\n )\n\n entities = []\n if not user.is_superuser:\n entities.append(\n ('user', user, list(user.access_control_groups.all()))\n )\n if app and not app.superuser:\n entities.append(\n ('application', app, list(app.access_control_groups.all()))\n )\n\n # TODO: Find out which attributes we actually need\n attribute_lookup = {a.pk: a for a in Attribute.objects.all()}\n joined_attributes = {\n a: None\n for a\n in list(attribute_lookup.values()) + list(Attribute.specials.values())\n }\n\n with transaction.atomic():\n change_commit = ChangeCommit.objects.create(app=app, user=user)\n changed_servers = _fetch_servers(set(c['object_id'] for c in changed))\n changed_objects = _materialize(changed_servers, joined_attributes)\n\n deleted_servers = _fetch_servers(deleted)\n deleted_objects = _materialize(deleted_servers, joined_attributes)\n _validate(attribute_lookup, changed, changed_objects)\n\n # Changes should be applied in order to prevent integrity errors.\n _delete_attributes(attribute_lookup, changed, changed_servers, deleted)\n _delete_servers(changed, deleted, deleted_servers)\n created_servers = _create_servers(attribute_lookup, created)\n created_objects = _materialize(created_servers, joined_attributes)\n _update_servers(changed, changed_servers)\n _upsert_attributes(attribute_lookup, changed, changed_servers)\n changed_objects = _materialize(changed_servers, joined_attributes)\n\n _access_control(\n entities, created_objects, changed_objects, deleted_objects\n )\n _log_changes(change_commit, changed, created_objects, deleted_objects)\n\n post_commit.send_robust(\n commit_query, created=created, changed=changed, deleted=deleted\n )\n\n return DatasetCommit(\n list(created_objects.values()),\n list(changed_objects.values()),\n list(deleted_objects.values()),\n )\n\n\ndef _validate(attribute_lookup, changed, changed_objects):\n servertype_attributes = _get_servertype_attributes(changed_objects)\n\n # Attributes must be always validated\n violations_attribs = _validate_attributes(\n changed, changed_objects, servertype_attributes\n )\n violations_readonly = _validate_readonly(\n attribute_lookup, changed, changed_objects\n )\n violations_regexp = list(\n _validate_regexp(changed, changed_objects, attribute_lookup)\n )\n violations_required = _validate_required(\n changed, changed_objects, servertype_attributes\n )\n if (\n violations_attribs or violations_readonly or\n violations_regexp or violations_required\n ):\n error_message = _build_error_message(\n violations_attribs,\n violations_readonly,\n violations_regexp,\n violations_required,\n )\n raise CommitValidationFailed(\n error_message,\n violations_attribs +\n violations_readonly +\n violations_regexp +\n violations_required,\n )\n\n newer = _validate_commit(changed, changed_objects)\n if newer:\n raise CommitNewerData('Newer data available', newer)\n\n\ndef _delete_attributes(attribute_lookup, changed, changed_servers, deleted):\n # We first have to delete all of the relation attributes\n # to avoid integrity errors. Other attributes will just go away\n # with the servers.\n if deleted:\n (\n ServerRelationAttribute.objects\n .filter(server_id__in=deleted)\n .delete()\n )\n\n for changes in changed:\n object_id = changes['object_id']\n\n for attribute_id, change in changes.items():\n if attribute_id in Attribute.specials:\n continue\n\n server = changed_servers[object_id]\n attribute = attribute_lookup[attribute_id]\n action = change['action']\n\n if action == 'delete' or (\n action == 'update' and change['new'] is None\n ):\n server.get_attributes(attribute).delete()\n elif action == 'multi' and change['remove']:\n for server_attribute in server.get_attributes(attribute):\n value = server_attribute.get_value()\n if isinstance(value, Server):\n value = value.hostname\n if value in change['remove']:\n server_attribute.delete()\n\n\ndef _delete_servers(changed, deleted, deleted_servers):\n if not deleted:\n return\n\n try:\n for server in deleted_servers.values():\n server.delete()\n except IntegrityError as error:\n raise CommitError(\n 'Cannot delete servers because they are referenced by {0}'\n .format(', '.join(str(o) for o in error.protected_objects))\n )\n\n # We should ignore the changes to the deleted servers.\n for server_id in deleted:\n if server_id in changed:\n del changed[server_id]\n\n\ndef _create_servers(attribute_lookup, created):\n created_servers = {}\n for attributes in created:\n if 'hostname' not in attributes:\n raise CommitError('\"hostname\" attribute is required.')\n hostname = attributes['hostname']\n\n if 'servertype' not in attributes:\n raise CommitError('\"servertype\" attribute is required.')\n servertype = _get_servertype(attributes)\n\n if 'intern_ip' not in attributes:\n raise CommitError('\"intern_ip\" attribute is required.')\n intern_ip = attributes['intern_ip']\n\n attributes = dict(_get_real_attributes(attributes, attribute_lookup))\n _validate_real_attributes(servertype, attributes)\n\n server = _insert_server(hostname, intern_ip, servertype, attributes)\n\n created_server = {k.pk: v for k, v in attributes.items()}\n created_server['hostname'] = hostname\n created_server['servertype'] = servertype.pk\n created_server['intern_ip'] = intern_ip\n\n created_servers[server.server_id] = server\n\n return created_servers\n\n\ndef _update_servers(changed, changed_servers):\n really_changed = set()\n for changes in changed:\n object_id = changes['object_id']\n\n for attribute_id, change in changes.items():\n if attribute_id == 'object_id':\n continue\n\n if attribute_id not in Attribute.specials:\n continue\n\n assert change['action'] in ('new', 'update', 'multi')\n server = changed_servers[object_id]\n attribute = Attribute.specials[attribute_id]\n setattr(server, attribute.special.field, change.get('new'))\n really_changed.add(server)\n\n for server in really_changed:\n server.full_clean()\n server.save()\n\n\ndef _upsert_attributes(attribute_lookup, changed, changed_servers):\n for changes in changed:\n object_id = changes['object_id']\n\n for attribute_id, change in changes.items():\n if attribute_id in Attribute.specials:\n continue\n\n attribute = attribute_lookup[attribute_id]\n server = changed_servers[object_id]\n\n action = change['action']\n if action == 'multi':\n for value in change['add']:\n server.add_attribute(attribute, value)\n continue\n\n if action not in ('new', 'update'):\n continue\n if change['new'] is None:\n continue\n\n try:\n server_attribute = server.get_attributes(attribute).get()\n except ServerAttribute.get_model(attribute.type).DoesNotExist:\n server.add_attribute(attribute, change['new'])\n else:\n server_attribute.save_value(change['new'])\n\n\ndef _access_control(\n entities, created_objects, changed_objects, deleted_objects\n):\n for server in chain(\n created_objects.values(),\n changed_objects.values(),\n deleted_objects.values(),\n ):\n for entity_class, entity_name, groups in entities:\n if not any(\n _can_access_server(changed_objects, server, g) for g in groups\n ):\n raise PermissionDenied(\n 'Insufficient access rights to server \"{}\" for {} \"{}\"'\n .format(server['hostname'], entity_class, entity_name)\n )\n\n\ndef _can_access_server(changed_objects, new_object, acl):\n if not all(\n f.matches(new_object.get(a))\n for a, f in acl.get_filters().items()\n ):\n return False\n\n if new_object['object_id'] in changed_objects:\n old_object = changed_objects[new_object['object_id']]\n else:\n old_object = get_default_attribute_values(new_object['servertype'])\n\n attribute_ids = {a.pk for a in acl.attributes.all()}\n if not all(\n a in attribute_ids or v == old_object[a]\n for a, v in new_object.items()\n ):\n return False\n\n return True\n\n\ndef _log_changes(commit, changed, created_objects, deleted_objects):\n for updates in changed:\n ChangeUpdate.objects.create(\n commit=commit,\n server_id=updates['object_id'],\n updates_json=json.dumps(updates, default=json_encode_extra),\n )\n\n for attributes in deleted_objects.values():\n attributes_json = json.dumps(attributes, default=json_encode_extra)\n ChangeDelete.objects.create(\n commit=commit,\n server_id=attributes['object_id'],\n attributes_json=attributes_json,\n )\n\n for obj in created_objects.values():\n attributes_json = json.dumps(\n obj, default=json_encode_extra\n )\n ChangeAdd.objects.create(\n commit=commit,\n server_id=obj['object_id'],\n attributes_json=attributes_json,\n )\n\n\ndef _fetch_servers(object_ids):\n servers = {\n s.server_id: s\n for s\n in Server.objects.select_for_update().filter(server_id__in=object_ids)\n }\n for object_id in object_ids:\n if object_id in servers:\n continue\n raise CommitError('Cannot find object with id {}'.format(object_id))\n\n return servers\n\n\ndef _materialize(servers, joined_attributes):\n return {\n o['object_id']: o\n for o in QueryMaterializer(list(servers.values()), joined_attributes)\n }\n\n\ndef _get_servertype_attributes(servers):\n servertype_attributes = dict()\n for servertype_id in {s['servertype'] for s in servers.values()}:\n servertype_attributes[servertype_id] = dict()\n for sa in Servertype.objects.get(pk=servertype_id).attributes.all():\n servertype_attributes[servertype_id][sa.attribute_id] = sa\n\n return servertype_attributes\n\n\ndef _validate_attributes(changes, servers, servertype_attributes):\n violations = []\n for attribute_changes in changes:\n object_id = attribute_changes['object_id']\n server = servers[object_id]\n attributes = servertype_attributes[server['servertype']]\n\n for attribute_id, change in attribute_changes.items():\n # If servertype is attempted to be changed, we immediately\n # error out.\n if attribute_id == 'servertype':\n raise CommitValidationFailed('Cannot change servertype', [])\n\n # We have no more checks for the special attributes.\n if attribute_id in Attribute.specials:\n continue\n\n if (\n # No such attribute.\n attribute_id not in attributes or\n # Attributes related via another one, cannot be changed.\n attributes[attribute_id].related_via_attribute\n ):\n violations.append((object_id, attribute_id))\n violations.append((object_id, attribute_id))\n\n return violations\n\n\ndef _validate_readonly(attribute_lookup, changes, servers):\n violations = []\n for attribute_changes in changes:\n object_id = attribute_changes['object_id']\n server = servers[object_id]\n for attr, change in attribute_changes.items():\n if attr in Attribute.specials:\n continue\n\n if attribute_lookup[attr].readonly:\n if attr in server and server[attr] != '':\n violations.append((object_id, attr))\n\n return violations\n\n\ndef _validate_regexp(changes, servers, attribute_lookup):\n for attribute_changes in changes:\n object_id = attribute_changes['object_id']\n for attribute_id, change in attribute_changes.items():\n if attribute_id in Attribute.specials:\n continue\n\n attribute = attribute_lookup[attribute_id]\n if not attribute.regexp:\n continue\n\n action = change['action']\n if action == 'update' or action == 'new':\n if change['new'] is None:\n continue\n if not attribute.regexp_match(change['new']):\n yield object_id, attribute_id\n elif action == 'multi':\n for value in change['add']:\n if not attribute.regexp_match(value):\n yield object_id, attribute_id\n break\n\n\ndef _validate_required(changes, servers, servertype_attributes):\n violations = []\n for attribute_changes in changes:\n object_id = attribute_changes['object_id']\n server = servers[object_id]\n for attribute_id, change in attribute_changes.items():\n if attribute_id in Attribute.specials:\n continue\n\n sa = servertype_attributes[server['servertype']][attribute_id]\n if change['action'] == 'delete' and sa.required:\n violations.append((object_id, attribute_id))\n return violations\n\n\ndef _validate_commit(changes, servers):\n newer = []\n for attribute_changes in changes:\n object_id = attribute_changes['object_id']\n server = servers[object_id]\n for attr, change in attribute_changes.items():\n if attr == 'object_id':\n continue\n\n action = change['action']\n if action == 'new':\n if attr in server:\n newer.append((object_id, attr, server[attr]))\n elif action == 'update' or action == 'delete':\n try:\n if str(server[attr]) != str(change['old']):\n newer.append((object_id, attr, server[attr]))\n except KeyError:\n newer.append((object_id, attr, None))\n\n return newer\n\n\ndef _build_error_message(violations_attribs, violations_readonly,\n violations_regexp, violations_required):\n\n violation_types = [\n (violations_attribs, 'Attribute not on servertype'),\n (violations_readonly, 'Attribute is read-only'),\n (violations_regexp, 'Regexp does not match'),\n (violations_required, 'Attribute is required'),\n ]\n\n message = []\n for violations, message_type in violation_types:\n seen = {}\n for server_id, vattr in violations:\n if vattr in seen:\n seen[vattr] += 1\n else:\n seen[vattr] = 1\n\n if seen:\n for vattr, num_affected in seen.items():\n message.append('{0}: {1} (#affected: {2})'.format(\n message_type, vattr, num_affected\n ))\n\n return '. '.join(message)\n\n\ndef _get_servertype(attributes):\n try:\n return Servertype.objects.get(pk=attributes['servertype'])\n except Servertype.DoesNotExist:\n raise CommitError('Unknown servertype: ' + attributes['servertype'])\n\n\ndef _get_real_attributes(attributes, attribute_lookup):\n for attribute_id, value in attributes.items():\n if attribute_id in Attribute.specials:\n continue\n\n attribute = attribute_lookup[attribute_id]\n value_multi = (\n isinstance(value, (list, set)) or\n hasattr(value, '_proxied_set')\n )\n\n if attribute.multi and not value_multi:\n raise CommitError(\n '{0} is a multi attribute, but {1} of type {2} given.'\n .format(attribute, repr(value), type(value).__name__)\n )\n if not attribute.multi and value_multi:\n raise CommitError(\n '{0} is not a multi attribute, but {1} of type {2} given.'\n .format(attribute, repr(value), type(value).__name__)\n )\n\n # Ignore nulls\n if not value_multi and value is None:\n continue\n\n # Ignore the virtual attribute types\n if attribute.type in ['reverse', 'supernet']:\n continue\n\n yield attribute, value\n\n\ndef _validate_real_attributes(servertype, real_attributes): # NOQA: C901\n violations_regexp = []\n violations_required = []\n servertype_attributes = set()\n for sa in servertype.attributes.all():\n attribute = sa.attribute\n servertype_attributes.add(attribute)\n\n # Ignore the related via attributes\n if sa.related_via_attribute:\n if sa.attribute in real_attributes:\n del real_attributes[attribute]\n continue\n\n # Handle not existing attributes (fill defaults, validate require)\n if attribute not in real_attributes:\n if attribute.multi:\n real_attributes[attribute] = sa.get_default_value()\n elif sa.required:\n if sa.default_value is not None:\n real_attributes[attribute] = sa.get_default_value()\n else:\n violations_required.append(attribute.pk)\n continue\n else:\n if sa.default_value is not None:\n real_attributes[attribute] = sa.get_default_value()\n else:\n continue\n\n value = real_attributes[attribute]\n\n if attribute.regexp:\n if attribute.multi:\n for val in value:\n if not attribute.regexp_match(str(val)):\n violations_regexp.append(attribute.pk)\n elif not attribute.regexp_match(value):\n violations_regexp.append(attribute.pk)\n\n # Check for attributes that are not defined on this servertype\n violations_attribs = []\n for attr in real_attributes:\n if attr not in servertype_attributes:\n violations_attribs.append(str(attr))\n\n handle_violations(\n violations_regexp,\n violations_required,\n violations_attribs,\n )\n\n\ndef _insert_server(hostname, intern_ip, servertype, attributes):\n\n if Server.objects.filter(hostname=hostname).exists():\n raise CommitError('Server with that hostname already exists')\n\n server = Server.objects.create(\n hostname=hostname,\n intern_ip=intern_ip,\n servertype=servertype,\n )\n server.full_clean()\n server.save()\n\n for attribute, value in attributes.items():\n if attribute.multi:\n for single_value in value:\n server.add_attribute(attribute, single_value)\n else:\n server.add_attribute(attribute, value)\n\n return server\n\n\ndef handle_violations(\n violations_regexp,\n violations_required,\n violations_attribs,\n):\n if violations_regexp or violations_required:\n if violations_regexp:\n regexp_msg = 'Attributes violating regexp: {0}. '.format(\n ', '.join(violations_regexp)\n )\n else:\n regexp_msg = ''\n if violations_required:\n required_msg = 'Attributes violating required: {0}.'.format(\n ', '.join(violations_required)\n )\n else:\n required_msg = ''\n\n raise CommitError(\n 'Validation failed. {0}{1}'.format(regexp_msg, required_msg),\n violations_regexp + violations_required,\n )\n if violations_attribs:\n raise CommitError(\n 'Attributes {0} are not defined on '\n 'this servertype. You can\\'t skip this validation!'\n .format(', '.join(violations_attribs)),\n violations_regexp + violations_required + violations_attribs,\n )\n","sub_path":"serveradmin/serverdb/query_committer.py","file_name":"query_committer.py","file_ext":"py","file_size_in_byte":21965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"339210333","text":"#\n# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.\n# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl.\n#\n\nimport pandas as pd\nimport os\nimport json\nfrom os.path import dirname\n\nif __name__ == \"__main__\":\n\n data_dir = os.path.join(dirname(os.getcwd()), \"data/\")\n input_dir = os.path.join(data_dir, \"input/NCI109/\")\n graph_dir = os.path.join(data_dir, \"graph/NCI109/\")\n if not os.path.exists(input_dir):\n os.makedirs(input_dir)\n if not os.path.exists(graph_dir):\n os.makedirs(graph_dir)\n\n # sparse (block diagonal) adjacency matrix for all graphs\n f_edges = pd.read_csv(input_dir+\"NCI109_A.txt\", header=None, names=[\"src\", \"dst\"])\n\n # column vector of graph identifiers for all nodes of all graphs\n f_graph_id = pd.read_csv(input_dir+\"NCI109_graph_indicator.txt\", header=None, names=[\"graph_id\"])\n\n # column vector of node labels\n f_node_label = pd.read_csv(input_dir+\"NCI109_node_labels.txt\", header=None, names=[\"node_label\"])\n\n # information for all the nodes\n f_node_info = pd.merge(f_graph_id, f_node_label, left_index=True, right_index=True)\n\n # create the vertex file\n f_node_info.to_csv(graph_dir+\"NCI109_v.csv\", header=True, index=True, index_label=\"v_id\")\n\n # create the edge file\n f_edges.to_csv(graph_dir+\"NCI109_e.csv\", header=True, index=False)\n\n\n # create a configuration file\n config = {\n \"header\": True,\n \"vertex_id_column\": \"v_id\",\n \"edge_source_column\": \"src\",\n \"edge_destination_column\": \"dst\",\n \"format\": \"csv\",\n \"separator\": \",\",\n \"vertex_id_type\": \"int\",\n \"edge_uris\": [\"NCI109_e.csv\"],\n \"vertex_uris\": [\"NCI109_v.csv\"],\n \"vertex_props\": [\n {\"name\": \"graph_id\", \"type\": \"int\"},\n {\"name\": \"node_label\", \"type\": \"int\"}\n ]\n }\n\n # Save to file;\n with open(graph_dir+\"NCI109.json\", \"w\") as f:\n json.dump(config, f, indent=4)\n\n print(\"Created the graph representation in %s ready to load in PGX\" % graph_dir)","sub_path":"graphlet-representation/graph-creation/graph-creator.py","file_name":"graph-creator.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"156366066","text":"def eh_primo(n):\n if n == 0 or n == 1:\n return False\n elif n%2==0 and n!=2:\n return False\n else:\n i = 3\n while i < n:\n if n%i==0:\n return False\n i += 2\n return True\nprimos = []\n\ndef primos_entre(a,b):\n for i in range(a,b+1):\n result = eh_primo(i)\n if result == True:\n primos.append(i)\n return primos\n","sub_path":"backup/user_170/ch51_2020_06_21_20_41_43_773561.py","file_name":"ch51_2020_06_21_20_41_43_773561.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"239037435","text":"import hashlib\nimport json\nfrom time import time\nfrom uuid import uuid4\nfrom datetime import date\n\nfrom flask import Flask, jsonify, request\nfrom flask_cors import CORS\n\n\nclass Blockchain:\n def __init__(self):\n self.chain = []\n self.current_transactions = []\n\n # create genesis block\n self.new_block(previous_hash=1, proof=100)\n \n def new_transaction(self, sender, recipient, amount):\n t_date = date.today().strftime(\"%m/%d/%Y\")\n self.current_transactions.append({\n 'sender': sender,\n 'recipient': recipient,\n 'date': t_date\n })\n return self.last_block['index'] + 1\n \n def new_block(self, proof, previous_hash=None):\n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.last_block)\n }\n self.current_transactions = [] # resets current transactions for that block\n self.chain.append(block) # add block to the chain\n return block\n \n def hash(self, block):\n block_string = json.dumps(block, sort_keys=True) # creates string representation of previous block\n hash_of_block_string = hashlib.sha256(block_string.encode())\n new_hash = hash_of_block_string.hexdigest() # returns 32 byte hash\n return new_hash\n \n @staticmethod # doesn't need to be called, bound to the class, not object\n def valid_proof(block_string, proof):\n guess = f'{block_string}{proof}'.encode()\n guess_hash = hashlib.sha256(guess).hexdigest()\n\n return guess_hash[:6] == '000000'\n\n @property\n def last_block(self):\n return self.chain[-1]\n\n\napp = Flask(__name__)\nCORS(app)\n\nnode_id = str(uuid4()).replace('-', '')\n\nblockchain = Blockchain()\n\n@app.route('/test', methods=['GET'])\ndef test():\n return jsonify('hello'), 200\n\n@app.route('/mine', methods=['POST'])\ndef mine():\n # check to see that a proof and an identifier were sent\n data = request.get_json()\n if data['proof'] and data['id']:\n proof = data['proof']\n miner_id = data['id']\n\n # make a string of the last block on the chain\n last_block_string = json.dumps(blockchain.last_block, sort_keys=True)\n # run the blockstring and the submitted proof through the function\n valid_submission = blockchain.valid_proof(last_block_string, proof)\n if valid_submission:\n # make a new block, reward the miner\n blockchain.new_transaction(\"server\", miner_id, 1)\n previous_hash = blockchain.hash(blockchain.last_block)\n new_block = blockchain.new_block(proof, previous_hash)\n response = {\n 'message': 'New Block Forged',\n 'block': new_block\n }\n return jsonify(response), 200\n else:\n response = {\n 'message': 'Proof valid but already submitted'\n }\n return jsonify(response), 400\n else:\n response = {\n 'error': 'Invalid submission. Requires proof and miner_id'\n }\n return jsonify(response), 400\n\n@app.route('/last_block', methods=['GET'])\ndef last_block():\n response = {\n 'last_block': blockchain.last_block\n }\n return jsonify(response), 200\n\n@app.route('/chain', methods=['GET'])\ndef full_chain():\n response = {\n # TODO: Return the chain and its current length\n 'chain': blockchain.chain,\n 'length': len(blockchain.chain)\n }\n return jsonify(response), 200\n\n\n@app.route('/transactions/new', methods=['POST'])\ndef receive_transaction():\n data = request.get_json()\n required = ['sender', 'recipient', 'amount']\n\n if not all(k in data for k in required):\n ## error\n response = { 'message': 'Missing values'}\n return jsonify(response), 400\n \n index = blockchain.new_transaction(data['sender'], data['recipient'], data['amount'])\n response = { 'message': f'Transaction will be added to block at index {index}'}\n return jsonify(response), 201\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"91024807","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pygame,sys\nfrom pygame.locals import *\npygame.init()\nfen = pygame.display.set_mode((800, 600))\nmonSmiley = pygame.image.load(\"asset/smiley.png\").convert_alpha()\nfen.blit(monSmiley,(400,300))\npygame.display.flip()\nwhile True :\n for event in pygame.event.get():\n if event.type==QUIT:\n pygame.quit()\n sys.exit()","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"78652123","text":"def same( pixel_A , pixel_B ) :\n (B_A,(AR,AG,AB)) = pixel_A\n (B_B,(BR,BG,BB)) = pixel_B\n \n #if (abs(B_A-B_B)) > 25 :\n # return False\n #return True\n R = abs((AR-BR))\n G = abs((AG-BG))\n B = abs((AB-BB))\n\n\n diff = 0.2126*R + 0.7152*G + 0.0722*B\n\n if diff < 15 :\n return True\n\n return False\n","sub_path":"compare_pixels.py","file_name":"compare_pixels.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"530557016","text":"import random as rnd\nimport json\nfrom sympy import symbols, pretty\n\ndef elementosListaEhDistinta(lista):\n for indiceLista in range(len(lista)):\n for indiceListaComparacao in range(len(lista)):\n if indiceLista == 4:\n return True\n elif lista[indiceLista] == lista[indiceListaComparacao] and indiceLista != indiceListaComparacao:\n return False\n\nenunciado = [None]*200\nk = 0\nwhile k < 200:\n\n questoes = open('questao{}-91b.json'.format(k+1), 'w')\n\n a, b, c, x, Y = symbols(\"a b c x Y\")\n\n a = rnd.randint(-100,100)\n x1 = 7 - rnd.randint(-1000,1000)\n x2 = 7 - rnd.randint(-1000,1000)\n\n while x1 == x2:\n x2 = 7 - rnd.randint(-1000,1000)\n\n b = - a * (x1 + x2)\n\n # Este '+1' foi utilizado para compensar o '-1' que o usuário tera de fazer na constante C.\n c = a * (x1 * x2) + 1\n\n listLetra = [\"A\",\"B\",\"C\",\"D\",\"E\"]\n\n questaoCerta = rnd.choice(listLetra)\n questaoInvertida = rnd.choice(listLetra)\n\n while(questaoCerta == questaoInvertida):\n questaoInvertida = rnd.choice(listLetra)\n \n resposta = \"{} e {}\".format(x1, x2)\n\n listAlternativas = [0,0,0,0,0]\n isCorrect = ['','','','','']\n howGenerated = ['','','','','']\n\n # Insere a resposta certa na letra escolhida para ser certa, uma letra recebe a questão invertida e o resto recebe números aleatórios\n while(elementosListaEhDistinta(listAlternativas) == False):\n possuiQuestaoInvertida = rnd.randint(0,1)\n\n for numLetra in range(0,5):\n if questaoCerta == listLetra[numLetra]:\n listAlternativas[numLetra] = resposta\n isCorrect[numLetra] = \"Sim\"\n howGenerated[numLetra] = \"nenhum\"\n elif questaoInvertida == listLetra[numLetra] and possuiQuestaoInvertida == 1:\n numRandomTemporario = rnd.randint(0,1)\n\n if numRandomTemporario == 0:\n\n listAlternativas[numLetra] = \"{} e {}\".format(x1 - 14, x2 - 14)\n isCorrect[numLetra] = \"Nao\"\n howGenerated[numLetra] = \"invertida e positiva\"\n else:\n\n listAlternativas[numLetra] = \"{} e {}\".format(-x1 - 14, -x2 - 14)\n isCorrect[numLetra] = \"Nao\"\n howGenerated[numLetra] = \"invertida e negativa\"\n else:\n numRandomTemporario = rnd.randint(0,1)\n\n if numRandomTemporario == 0:\n\n x1Aleatorio = rnd.randint(-1000, 1000)\n x2Aleatorio = rnd.randint(-1000, 1000)\n\n listAlternativas[numLetra] = \"{} e {}\".format(x1Aleatorio,x2Aleatorio)\n isCorrect[numLetra] = \"Nao\"\n howGenerated[numLetra] = \"gerada aleatoriamente e positiva\"\n else:\n\n x1Aleatorio = rnd.randint(-1000, 1000)\n x2Aleatorio = rnd.randint(-1000, 1000)\n\n listAlternativas[numLetra] = \"{} e {}\".format(-x1Aleatorio, -x2Aleatorio)\n isCorrect[numLetra] = \"Nao\"\n howGenerated[numLetra] = \"gerada aleatoriamente e negativa\"\n\n # Cria a variável que será convertida em um arquivo json\n dados = {\n 'equacaoExponencial' : [\n {\n 'a' : a,\n 'b' : b,\n 'c' : c,\n 'x1' : x1,\n 'x2' : x2,\n 'resposta': resposta\n }\n ],\n 'respostas': [\n {\n 'letra': 'a)',\n 'resposta': listAlternativas[0],\n 'correta': isCorrect[0],\n 'tipoerro': howGenerated[0]\n },\n {\n 'letra': 'b)',\n 'resposta': listAlternativas[1],\n 'correta': isCorrect[1],\n 'tipoerro': howGenerated[1]\n },\n {\n 'letra': 'c)',\n 'resposta': listAlternativas[2],\n 'correta': isCorrect[2],\n 'tipoerro': howGenerated[2]\n },{\n 'letra': 'd)',\n 'resposta': listAlternativas[3],\n 'correta': isCorrect[3],\n 'tipoerro': howGenerated[3]\n },{\n 'letra': 'e)',\n 'resposta': listAlternativas[4],\n 'correta': isCorrect[4],\n 'tipoerro': howGenerated[4]\n }\n ],\n 'atributosquestao': [\n {\n 'enunciado': 'Determine as raízes da equação em IR+: {} = Y'.format(pretty(Y ** (a*(x**2) + b*x + c))),\n 'corretaspossiveis': listAlternativas[isCorrect.index(\"Sim\")],\n 'corretas': isCorrect.count(\"Sim\"),\n 'aleatoriapositiva': howGenerated.count(\"gerada aleatoriamente e positiva\"),\n 'aleatorianegativa': howGenerated.count(\"gerada aleatoriamente e negativa\"),\n 'invertidapositiva': howGenerated.count(\"invertida e positiva\"),\n 'invertidanegativa': howGenerated.count(\"invertida e negativa\"),\n 'respostascorretas': listLetra[isCorrect.index(\"Sim\")]\n }\n ]\n }\n\n # Verifica os enunciados\n if dados['atributosquestao'][0]['enunciado'] in enunciado:\n continue\n else:\n \n # Armazena os enunciados\n enunciado[k] = dados['atributosquestao'][0]['enunciado']\n \n # Cria o arquivo JSON\n print(\"\\nquestao {}\\n\".format(k+1),json.dumps(dados))\n json.dump(dados, questoes, indent=4)\n k = k + 1\n\nquestoes.close()\n\n","sub_path":"src/json/GeradorQuest91b.py","file_name":"GeradorQuest91b.py","file_ext":"py","file_size_in_byte":5611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"309382967","text":"import tornado.web\nimport tornado.websocket\n\nfrom tornado.options import define, options\n\ndefine(\"debug\", default=False, help=\"run in debug mode\")\ndefine(\"port\", default=8080, help=\"port to run on\")\n\n\nclass GameHandler(tornado.web.RequestHandler):\n def get(self):\n self.render(\"game.html\")\n\n\nclass ConnectionHandler(tornado.websocket.WebSocketHandler):\n def open(self):\n print(\"hello!\")\n\n\ndef make_application():\n return tornado.web.Application([\n (r\"/\", GameHandler),\n (r\"/conn\", ConnectionHandler),\n (r\"/(.*)\", tornado.web.StaticFileHandler, {\"path\": \"static\"})\n ], template_path=\"templates\", debug=options.debug)\n\n\ndef main():\n tornado.options.parse_command_line()\n application = make_application()\n\n application.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n\n","sub_path":"catcity/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"287440927","text":"from django.db import models\nimport uuid\nfrom users.models import Profile\n\nUP_VOTE = 'up'\nDOWN_VOTE = 'down'\n\n\nclass BaseModelMixin(models.Model):\n \"\"\"\n Base model mixin to create 'created' and 'id' field for all models\n \"\"\"\n created = models.DateTimeField(auto_now_add=True)\n id = models.UUIDField(default=uuid.uuid4, unique=True, primary_key=True, editable=False)\n\n class Meta:\n abstract = True\n\n\nclass Project(BaseModelMixin, models.Model):\n \"\"\"\n Project Model, to store information related to each project\n \"\"\"\n owner = models.ForeignKey(Profile, null=True, blank=True, on_delete=models.SET_NULL)\n title = models.CharField(max_length=255)\n description = models.TextField(null=True, blank=True)\n featured_image = models.ImageField(null=True, blank=True, default='default.jpg')\n demo_link = models.CharField(max_length=2000, null=True, blank=True)\n source_link = models.CharField(max_length=2000, null=True, blank=True)\n tags = models.ManyToManyField('Tag', blank=True)\n vote_total = models.IntegerField(default=0, null=True, blank=True)\n vote_ratio = models.IntegerField(default=0, null=True, blank=True)\n\n @property\n def reviewers(self):\n \"\"\"Getting all reviewers that voted this project\"\"\"\n return self.review_set.all().values_list('owner__id', flat=True)\n\n @property\n def get_vote_count(self):\n \"\"\"getting total number of votes\"\"\"\n reviews = self.review_set.all()\n up_votes = reviews.filter(value=UP_VOTE).count()\n total_votes = reviews.count()\n\n ration = (up_votes / total_votes) * 100\n self.vote_total = total_votes\n self.vote_ratio = ration\n self.save()\n\n def __str__(self):\n return self.title\n\n class Meta:\n ordering = ('-vote_ratio', '-vote_total', 'title')\n\n\nclass Review(BaseModelMixin, models.Model):\n \"\"\"\n Review model, to store project reviews\n \"\"\"\n\n VOTE_TYPE = (\n (UP_VOTE, 'Up Vote'),\n (DOWN_VOTE, 'Down Vote'),\n )\n\n owner = models.ForeignKey(Profile, on_delete=models.CASCADE, null=True)\n project = models.ForeignKey(Project, on_delete=models.CASCADE)\n body = models.TextField(null=True, blank=True)\n value = models.CharField(max_length=200, choices=VOTE_TYPE)\n\n def __str__(self):\n return self.value\n\n class Meta:\n unique_together = [['owner', 'project']]\n\n\nclass Tag(BaseModelMixin, models.Model):\n \"\"\"\n Tag model, to store project tags\n \"\"\"\n name = models.CharField(max_length=200)\n\n def __str__(self):\n return self.name\n","sub_path":"projects/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"201859375","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n\nfrom start import mainEmbedProcess, mainExtractProcess\n\n\nclass Ui_MainWindow(QtWidgets.QMainWindow):\n\n\tdef __init__(self):\n\t\tsuper(Ui_MainWindow,self).__init__()\n\t\tself.setupUi(self)\n\t\tself.retranslateUi(self)\n\t\tself.filename = \"\"\n\t\tself.embedMessage = \"\"\n\t\tself.warningMesage = \"\"\n\n\tdef setupUi(self, MainWindow):\n\t\tMainWindow.setObjectName(\"MainWindow\")\n\t\tMainWindow.resize(690, 790)\n\t\tself.widget = QtWidgets.QWidget(MainWindow)\n\t\tself.retranslateUi(MainWindow)\n\n\t\tself.pushButton = QtWidgets.QPushButton(self.widget)\n\t\tself.pushButton.setGeometry(QtCore.QRect(10, 400, 280, 40))\n\t\tself.pushButton.setText(\"打开\")\n\n\t\tself.pushButton2 = QtWidgets.QPushButton(self.widget)\n\t\tself.pushButton2.setGeometry(QtCore.QRect(10, 450, 280, 40))\n\t\tself.pushButton2.setText(\"嵌入\")\n\n\t\tself.pushButton3 = QtWidgets.QPushButton(self.widget)\n\t\tself.pushButton3.setGeometry(QtCore.QRect(10, 500, 280, 40))\n\t\tself.pushButton3.setText(\"提取\")\n\n\t\tself.textEdit = QtWidgets.QPlainTextEdit(self.widget)\n\t\tself.textEdit.setGeometry(QtCore.QRect(10, 10, 280, 380))\n\t\tself.textEdit.setPlaceholderText(\"在这里输入嵌入信息\")\n\t\t# self.textEdit.setTextInteractionFlags(Qt.TextSelectableByMouse | Qt.TextSelectableByKeyboard)\n\n\t\t# self.textEdit.setAcceptRichText(True)\n\n\n\t\tself.warningLabel = QtWidgets.QPlainTextEdit(self.widget)\n\t\tself.warningLabel.setGeometry(QtCore.QRect(10, 550, 280, 230))\n\t\tself.warningLabel.setPlainText(\"在这里显示提示信息\\n\")\n\t\t# self.warningLabel.setAlignment(Qt.AlignLeft | Qt.AlignTop)\n\t\tself.warningLabel.setReadOnly(True)\n\n\n\t\tself.original = QtWidgets.QLabel(self.widget)\n\t\tself.original.setGeometry(QtCore.QRect(300, 10, 380, 380))\n\t\tself.original.setText(\"这里显示原来的图片\")\n\n\t\tself.changed = QtWidgets.QLabel(self.widget)\n\t\tself.changed.setGeometry(QtCore.QRect(300, 400, 380, 380))\n\t\tself.changed.setText(\"这里显示嵌入/提取之后的图片\")\n\n\n\n\t\tMainWindow.setCentralWidget(self.widget)\n\t\tQtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n\t\tself.action()\n\n\tdef action(self):\n\t\tself.pushButton.clicked.connect(self.openfile)\n\t\tself.pushButton2.clicked.connect(self.processEmbed)\n\t\tself.pushButton3.clicked.connect(self.processExtract)\n\n\n\tdef retranslateUi(self, MainWindow):\n\t\t_translate = QtCore.QCoreApplication.translate\n\t\tMainWindow.setWindowTitle(_translate(\"MainWindow\", \"保持灰度不变性的可逆图片隐藏\"))\n\n\n\tdef openfile(self):\n\t\topenfile_name = QFileDialog.getOpenFileName(self,'选择图片文件','','图片文件(*.png)')\n\t\tprint(openfile_name)\n\t\tself.filename = openfile_name[0]\n\t\tif self.filename:\n\t\t\tself.pix = QtGui.QPixmap(self.filename.encode('utf-8').decode('utf-8'))\n\t\t\t# 在l1里面,调用setPixmap命令,建立一个图像存放框,并将之前的图像png存放在这个框框里。\n\t\t\tself.original.setPixmap(self.pix)\n\t\t\tself.original.setScaledContents(True)\n\n\tdef processEmbed(self):\n\t\tself.embedMessage = self.textEdit.toPlainText()\n\t\tif not self.filename:\n\t\t\tself.warningMesage = \"请选择图片文件!\"\n\t\t\tself.warningLabel.setPlainText(self.warningMesage)\n\t\t\treturn\n\t\telif not self.embedMessage:\n\t\t\tself.warningMesage = \"请输入嵌入信息\"\n\t\t\tself.warningLabel.setPlainText(self.warningMesage)\n\t\t\treturn\n\n\t\tself.embedMessage = self.textEdit.toPlainText()\n\t\tself.warningMesage = \"嵌入信息:\" + self.embedMessage + \"\\n开始嵌入\"\n\t\tself.warningLabel.setPlainText(self.warningMesage)\n\t\ttry:\n\t\t\tmainEmbedProcess(Size=None, fig=self.filename, Dt=20, rhoT=0, msg=self.embedMessage)\n\t\t\tself.warningMesage += \"\\n嵌入成功!\"\n\t\t\tself.warningLabel.setPlainText(self.warningMesage)\n\t\texcept:\n\t\t\tself.warningMesage += \"\\n嵌入失败!\"\n\t\t\tself.warningLabel.setPlainText(self.warningMesage)\n\t\t\treturn\n\n\t\tself.resultfilename = '.'.join(self.filename.split('.')[:-1] + ['modified'] + self.filename.split('.')[-1:])\n\t\tself.warningMesage += \"\\n嵌入后文件保存为 \" + self.resultfilename\n\t\tself.warningLabel.setPlainText(self.warningMesage)\n\t\tprint(self.resultfilename)\n\n\t\tself.pixEmbed = QtGui.QPixmap(self.resultfilename)\n\t\tself.changed.setPixmap(self.pixEmbed)\n\t\tself.changed.setScaledContents(True)\n\n\tdef processExtract(self):\n\t\tself.embedMessage = self.textEdit.toPlainText()\n\t\tif not self.filename:\n\t\t\tself.warningMesage = \"请选择图片文件!\"\n\t\t\tself.warningLabel.setPlainText(self.warningMesage)\n\t\t\treturn\n\n\t\tself.warningMesage = \"开始提取信息\"\n\t\tself.warningLabel.setPlainText(self.warningMesage)\n\t\ttry:\n\t\t\tself.extractMessage = mainExtractProcess(fig=self.filename)\n\t\t\tself.warningMesage += \"\\n提取成功!\" + \"\\n提取信息为:\" + self.extractMessage\n\t\t\tself.warningLabel.setPlainText(self.warningMesage)\n\n\t\texcept:\n\t\t\tself.warningMesage += \"\\n提取失败!\"\n\t\t\tself.warningLabel.setPlainText(self.warningMesage)\n\t\t\treturn\n\n\t\tself.resultfilename = '.'.join(self.filename.split('.')[:-1] + ['extracted'] + self.filename.split('.')[-1:])\n\t\tself.warningMesage += \"\\n提取后文件保存为 \" + self.resultfilename\n\t\tself.warningLabel.setPlainText(self.warningMesage)\n\t\tprint(self.resultfilename)\n\t\t#\n\t\tself.pixEmbed = QtGui.QPixmap(self.resultfilename)\n\t\tself.changed.setPixmap(self.pixEmbed)\n\t\tself.changed.setScaledContents(True)\n\nif __name__ == \"__main__\":\n\timport sys\n\tapp = QtWidgets.QApplication(sys.argv)\n\tMainWindow = QtWidgets.QMainWindow()\n\tui = Ui_MainWindow()\n\tui.setupUi(MainWindow)\n\tMainWindow.show()\n\tsys.exit(app.exec_())","sub_path":"window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":5474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"36000756","text":"s=input()\r\n\r\nmayusculas = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\r\nminusculas = 'abcdefghijklmnopqrstuvwxyz'\r\na=\"\"\r\nfor i in s:\r\n if i in mayusculas:\r\n a=a+i.lower()\r\n elif i in minusculas:\r\n a=a+i.upper()\r\n else:\r\n a=a+i\r\n\r\nprint(a)\r\n","sub_path":"Python Intermedio/Clase 02/PI0212 - Intercambio.py","file_name":"PI0212 - Intercambio.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"440513769","text":"from array import array\nfrom collections import defaultdict\nimport io\nimport sys\n\nfrom rich.console import Console\nfrom rich.pretty import install, pretty_repr\n\n\ndef test_install():\n console = Console(file=io.StringIO())\n dh = sys.displayhook\n install(console)\n sys.displayhook(\"foo\")\n assert console.file.getvalue() == \"'foo'\\n\"\n assert sys.displayhook is not dh\n\n\ndef test_pretty():\n test = {\n \"foo\": [1, 2, 3, {4, 5, 6, (7, 8, 9)}, {}],\n \"bar\": {\"egg\": \"baz\", \"words\": [\"Hello World\"] * 10},\n False: \"foo\",\n True: \"\",\n \"text\": (\"Hello World\", \"foo bar baz egg\"),\n }\n\n result = pretty_repr(test, max_width=80)\n print(result)\n print(repr(result))\n expected = \"{\\n 'foo': [1, 2, 3, {(7, 8, 9), 4, 5, 6}, {}],\\n 'bar': {\\n 'egg': 'baz',\\n 'words': [\\n 'Hello World',\\n 'Hello World',\\n 'Hello World',\\n 'Hello World',\\n 'Hello World',\\n 'Hello World',\\n 'Hello World',\\n 'Hello World',\\n 'Hello World',\\n 'Hello World'\\n ]\\n },\\n False: 'foo',\\n True: '',\\n 'text': ('Hello World', 'foo bar baz egg')\\n}\"\n assert result == expected\n\n\ndef test_small_width():\n test = [\"Hello world! 12345\"]\n result = pretty_repr(test, max_width=10)\n expected = \"[\\n 'Hello world! 12345'\\n]\"\n assert result == expected\n\n\ndef test_broken_repr():\n class BrokenRepr:\n def __repr__(self):\n 1 / 0\n\n test = [BrokenRepr()]\n result = pretty_repr(test)\n expected = \"[]\"\n assert result == expected\n\n\ndef test_recursive():\n test = []\n test.append(test)\n result = pretty_repr(test)\n expected = \"[...]\"\n assert result == expected\n\n\ndef test_defaultdict():\n test_dict = defaultdict(int, {\"foo\": 2})\n result = pretty_repr(test_dict)\n assert result == \"defaultdict(, {'foo': 2})\"\n\n\ndef test_array():\n test_array = array(\"I\", [1, 2, 3])\n result = pretty_repr(test_array)\n assert result == \"array('I', [1, 2, 3])\"\n\n\ndef test_tuple_of_one():\n assert pretty_repr((1,)) == \"(1,)\"\n","sub_path":"tests/test_pretty.py","file_name":"test_pretty.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"580837183","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport socket\nimport socks\nimport time\nimport json\nimport threading\nimport string\nimport requests\nimport random\nimport os\nfrom core.alert import *\nfrom core.targets import target_type\nfrom core.targets import target_to_host\nfrom core.load_modules import load_file_path\nfrom lib.icmp.engine import do_one as do_one_ping\nfrom lib.socks_resolver.engine import getaddrinfo\nfrom core._time import now\nfrom core.log import __log_into_file\n\n\ndef extra_requirements_dict():\n return {\n \"pma_scan_http_method\": [\"GET\"],\n \"pma_scan_random_agent\": [\"True\"],\n \"pma_scan_list\": ['/admin/', '/accounts/login/', '/admin1.php/', '/admin.php/',\n '/admin.html/', '/admin1.php/', '/admin1.html/', '/login.php/', '/admin/cp.php/', '/cp.php/',\n '/administrator/index.php/', '/administrator/index.html/', '/administartor/', '/admin.login/',\n '/administrator/login.php/', '/administrator/login.html/', '/phpMyAdmin/', '/phpmyadmin/',\n '/PMA/', '/pma/', '/dbadmin/', '/mysql/', '/myadmin/', '/phpmyadmin2/', '/phpMyAdmin2/',\n '/phpMyAdmin-2/', '/php-my-admin/', '/phpMyAdmin-2.2.3/', '/phpMyAdmin-2.2.6/',\n '/phpMyAdmin-2.5.1/', '/phpMyAdmin-2.5.4/', '/phpMyAdmin-2.5.5-rc1/',\n '/phpMyAdmin-2.5.5-rc2/', '/phpMyAdmin-2.5.5/', '/phpMyAdmin-2.5.5-pl1/',\n '/phpMyAdmin-2.5.6-rc1/', '/phpMyAdmin-2.5.6-rc2/', '/phpMyAdmin-2.5.6/',\n '/phpMyAdmin-2.5.7/', '/phpMyAdmin-2.5.7-pl1/', '/phpMyAdmin-2.6.0-alpha/',\n '/phpMyAdmin-2.6.0-alpha2/', '/phpMyAdmin-2.6.0-beta1/', '/phpMyAdmin-2.6.0-beta2/',\n '/phpMyAdmin-2.6.0-rc1/', '/phpMyAdmin-2.6.0-rc2/', '/phpMyAdmin-2.6.0-rc3/',\n '/phpMyAdmin-2.6.0/', '/phpMyAdmin-2.6.0-pl1/', '/phpMyAdmin-2.6.0-pl2/',\n '/phpMyAdmin-2.6.0-pl3/', '/phpMyAdmin-2.6.1-rc1/', '/phpMyAdmin-2.6.1-rc2/',\n '/phpMyAdmin-2.6.1/', '/phpMyAdmin-2.6.1-pl1/', '/phpMyAdmin-2.6.1-pl2/',\n '/phpMyAdmin-2.6.1-pl3/', '/phpMyAdmin-2.6.2-rc1/', '/phpMyAdmin-2.6.2-beta1/',\n '/phpMyAdmin-2.6.2-rc1/', '/phpMyAdmin-2.6.2/', '/phpMyAdmin-2.6.2-pl1/',\n '/phpMyAdmin-2.6.3/', '/phpMyAdmin-2.6.3-rc1/', '/phpMyAdmin-2.6.3/',\n '/phpMyAdmin-2.6.3-pl1/', '/phpMyAdmin-2.6.4-rc1/', '/phpMyAdmin-2.6.4-pl1/',\n '/phpMyAdmin-2.6.4-pl2/', '/phpMyAdmin-2.6.4-pl3/', '/phpMyAdmin-2.6.4-pl4/',\n '/phpMyAdmin-2.6.4/', '/phpMyAdmin-2.7.0-beta1/', '/phpMyAdmin-2.7.0-rc1/',\n '/phpMyAdmin-2.7.0-pl1/', '/phpMyAdmin-2.7.0-pl2/', '/phpMyAdmin-2.7.0/',\n '/phpMyAdmin-2.8.0-beta1/', '/phpMyAdmin-2.8.0-rc1/', '/phpMyAdmin-2.8.0-rc2/',\n '/phpMyAdmin-2.8.0/', '/phpMyAdmin-2.8.0.1/', '/phpMyAdmin-2.8.0.2/', '/phpMyAdmin-2.8.0.3/',\n '/phpMyAdmin-2.8.0.4/', '/phpMyAdmin-2.8.1-rc1/', '/phpMyAdmin-2.8.1/', '/phpMyAdmin-2.8.2/',\n '/sqlmanager/', '/mysqlmanager/', '/p/m/a/', '/PMA2005/', '/pma2005/', '/phpmanager/',\n '/php-myadmin/', '/phpmy-admin/', '/webadmin/', '/sqlweb/', '/websql/',\n '/webdb/', '/mysqladmin/', '/mysql-admin/', '/mya/']\n }\n\n\ndef check(target, user_agent, timeout_sec, log_in_file, language, time_sleep, thread_tmp_filename, retries,\n http_method, socks_proxy, scan_id, scan_cmd):\n status_codes = [200, 401, 403]\n time.sleep(time_sleep)\n try:\n if socks_proxy is not None:\n socks_version = socks.SOCKS5 if socks_proxy.startswith('socks5://') else socks.SOCKS4\n socks_proxy = socks_proxy.rsplit('://')[1]\n if '@' in socks_proxy:\n socks_username = socks_proxy.rsplit(':')[0]\n socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]\n socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),\n int(socks_proxy.rsplit(':')[-1]), username=socks_username,\n password=socks_password)\n socket.socket = socks.socksocket\n socket.getaddrinfo = getaddrinfo\n else:\n socks.set_default_proxy(socks_version, str(socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))\n socket.socket = socks.socksocket\n socket.getaddrinfo = getaddrinfo\n n = 0\n while 1:\n try:\n if http_method == \"GET\":\n r = requests.get(target, timeout=timeout_sec, headers=user_agent)\n elif http_method == \"HEAD\":\n r = requests.head(target, timeout=timeout_sec, headers=user_agent)\n content = r.content\n break\n except:\n n += 1\n if n is retries:\n warn(messages(language, 106).format(target))\n return 1\n if r.status_code in status_codes:\n info(messages(language, 38).format(target, r.status_code, r.reason))\n __log_into_file(thread_tmp_filename, 'w', '0', language)\n __log_into_file(log_in_file, 'a',\n json.dumps({'HOST': target_to_host(target), 'USERNAME': '', 'PASSWORD': '',\n 'PORT': \"\", 'TYPE': 'pma_scan',\n 'DESCRIPTION': messages(language, 38).format(target, r.status_code, r.reason),\n 'TIME': now(), 'CATEGORY': \"scan\", 'SCAN_ID': scan_id,\n 'SCAN_CMD': scan_cmd}) + '\\n', language)\n return True\n except:\n return False\n\n\ndef test(target, retries, timeout_sec, user_agent, http_method, socks_proxy, verbose_level, trying, total_req, total,\n num, language):\n if verbose_level > 3:\n info(messages(language, 72).format(trying, total_req, num, total, target_to_host(target), \"default_port\",\n 'pma_scan'))\n if socks_proxy is not None:\n socks_version = socks.SOCKS5 if socks_proxy.startswith('socks5://') else socks.SOCKS4\n socks_proxy = socks_proxy.rsplit('://')[1]\n if '@' in socks_proxy:\n socks_username = socks_proxy.rsplit(':')[0]\n socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]\n socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),\n int(socks_proxy.rsplit(':')[-1]), username=socks_username,\n password=socks_password)\n socket.socket = socks.socksocket\n socket.getaddrinfo = getaddrinfo\n else:\n socks.set_default_proxy(socks_version, str(socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))\n socket.socket = socks.socksocket\n socket.getaddrinfo = getaddrinfo\n n = 0\n while 1:\n try:\n if http_method == \"GET\":\n r = requests.get(target, timeout=timeout_sec, headers=user_agent)\n elif http_method == \"HEAD\":\n r = requests.head(target, timeout=timeout_sec, headers=user_agent)\n return 0\n except:\n n += 1\n if n is retries:\n return 1\n\n\ndef start(target, users, passwds, ports, timeout_sec, thread_number, num, total, log_in_file, time_sleep, language,\n verbose_level, socks_proxy, retries, methods_args, scan_id, scan_cmd): # Main function\n if target_type(target) != 'SINGLE_IPv4' or target_type(target) != 'DOMAIN' or target_type(\n target) != 'HTTP' or target_type(target) != 'SINGLE_IPv6':\n # rand useragent\n user_agent_list = [\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.5) Gecko/20060719 Firefox/1.5.0.5\",\n \"Googlebot/2.1 ( http://www.googlebot.com/bot.html)\",\n \"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Ubuntu/10.04\"\n \" Chromium/9.0.595.0 Chrome/9.0.595.0 Safari/534.13\",\n \"Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 5.2; WOW64; .NET CLR 2.0.50727)\",\n \"Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51\",\n \"Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620\",\n \"Debian APT-HTTP/1.3 (0.8.10.3)\",\n \"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)\",\n \"Googlebot/2.1 (+http://www.googlebot.com/bot.html)\",\n \"Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)\",\n \"YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; \"\n \"http://help.yahoo.com/help/us/shop/merchant/)\",\n \"Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)\",\n \"Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)\",\n \"msnbot/1.1 (+http://search.msn.com/msnbot.htm)\"\n ]\n http_methods = [\"GET\", \"HEAD\"]\n user_agent = {'User-agent': random.choice(user_agent_list)}\n\n # requirements check\n new_extra_requirements = extra_requirements_dict()\n if methods_args is not None:\n for extra_requirement in extra_requirements_dict():\n if extra_requirement in methods_args:\n new_extra_requirements[extra_requirement] = methods_args[extra_requirement]\n extra_requirements = new_extra_requirements\n if extra_requirements[\"pma_scan_http_method\"][0] not in http_methods:\n warn(messages(language, 110))\n extra_requirements[\"pma_scan_http_method\"] = [\"GET\"]\n random_agent_flag = True\n if extra_requirements[\"pma_scan_random_agent\"][0] == \"False\":\n random_agent_flag = False\n threads = []\n total_req = len(extra_requirements[\"pma_scan_list\"])\n thread_tmp_filename = '{}/tmp/thread_tmp_'.format(load_file_path()) + ''.join(\n random.choice(string.ascii_letters + string.digits) for _ in range(20))\n __log_into_file(thread_tmp_filename, 'w', '1', language)\n trying = 0\n if target_type(target) != \"HTTP\":\n target = 'http://' + target\n if test(target, retries, timeout_sec, user_agent, extra_requirements[\"pma_scan_http_method\"][0],\n socks_proxy, verbose_level, trying, total_req, total, num, language) is 0:\n keyboard_interrupt_flag = False\n for idir in extra_requirements[\"pma_scan_list\"]:\n if random_agent_flag:\n user_agent = {'User-agent': random.choice(user_agent_list)}\n t = threading.Thread(target=check,\n args=(\n target + '/' + idir, user_agent, timeout_sec, log_in_file, language,\n time_sleep, thread_tmp_filename, retries,\n extra_requirements[\"pma_scan_http_method\"][0], socks_proxy, scan_id, scan_cmd))\n threads.append(t)\n t.start()\n trying += 1\n if verbose_level > 3:\n info(messages(language, 72).format(trying, total_req, num, total, target_to_host(target),\n \"default_port\", 'pma_scan'))\n while 1:\n try:\n if threading.activeCount() >= thread_number:\n time.sleep(0.01)\n else:\n break\n except KeyboardInterrupt:\n keyboard_interrupt_flag = True\n break\n if keyboard_interrupt_flag:\n break\n else:\n warn(messages(language, 109).format(target))\n\n # wait for threads\n kill_switch = 0\n kill_time = int(timeout_sec / 0.1) if int(timeout_sec / 0.1) is not 0 else 1\n while 1:\n time.sleep(0.1)\n kill_switch += 1\n try:\n if threading.activeCount() is 1 or kill_switch is kill_time:\n break\n except KeyboardInterrupt:\n break\n thread_write = int(open(thread_tmp_filename).read().rsplit()[0])\n if thread_write is 1:\n info(messages(language, 108).format(target, \"default_port\"))\n if verbose_level is not 0:\n __log_into_file(log_in_file, 'a', json.dumps(\n {'HOST': target_to_host(target), 'USERNAME': '', 'PASSWORD': '', 'PORT': '', 'TYPE': 'pma_scan',\n 'DESCRIPTION': messages(language, 174), 'TIME': now(), 'CATEGORY': \"scan\",\n 'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd}) + '\\n', language)\n os.remove(thread_tmp_filename)\n else:\n warn(messages(language, 69).format('pma_scan', target))\n","sub_path":"lib/scan/pma/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":13398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"239986640","text":"import boto3\nimport click\nfrom datetime import datetime, timedelta\nimport json\nimport logging\nimport requests\n\nfrom google.cloud import bigquery\n\nfrom pyspark.sql import functions as F\nfrom pyspark.sql import SparkSession, Row\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef load_data(spark, date_from, date_to):\n \"\"\"Load a set of aggregated metrics for the provided timeframe.\n\n Returns Spark dataframe containing preaggregated user counts per various dimensions.\n\n Args:\n date_from: Start date (inclusive)\n date_to: End date (exclusive)\n \"\"\"\n bq = bigquery.Client()\n\n query = \"\"\"\n WITH\n rank_per_client AS (\n SELECT\n *,\n ROW_NUMBER() OVER (PARTITION BY client_id ORDER BY submission_timestamp DESC) AS rn\n FROM\n `moz-fx-data-shared-prod.telemetry_stable.main_v4`\n WHERE\n DATE(submission_timestamp) >= @date_from\n AND DATE(submission_timestamp) < @date_to\n ),\n latest_per_client_all AS (\n SELECT\n *\n FROM\n rank_per_client\n WHERE\n rn=1\n ),\n latest_per_client AS (\n SELECT\n environment.build.architecture AS browser_arch,\n COALESCE(environment.system.os.name,\n 'Other') AS os_name,\n COALESCE(\n IF (environment.system.os.name IN ('Linux', 'Darwin'),\n CONCAT(REGEXP_EXTRACT(environment.system.os.version, r\"^[0-9]+\"), '.x'),\n environment.system.os.version),\n 'Other') AS os_version,\n environment.system.memory_mb,\n coalesce(environment.system.is_wow64, FALSE) AS is_wow64,\n IF (ARRAY_LENGTH(environment.system.gfx.adapters)>0,\n environment.system.gfx.adapters[OFFSET(0)].vendor_id,\n NULL) AS gfx0_vendor_id,\n IF (ARRAY_LENGTH(environment.system.gfx.adapters)>0,\n environment.system.gfx.adapters[OFFSET(0)].device_id,\n NULL) AS gfx0_device_id,\n IF (ARRAY_LENGTH(environment.system.gfx.monitors)>0,\n environment.system.gfx.monitors[OFFSET(0)].screen_width,\n 0) AS screen_width,\n IF (ARRAY_LENGTH(environment.system.gfx.monitors)>0,\n environment.system.gfx.monitors[OFFSET(0)].screen_height,\n 0) AS screen_height,\n environment.system.cpu.cores AS cpu_cores,\n environment.system.cpu.vendor AS cpu_vendor,\n environment.system.cpu.speed_m_hz AS cpu_speed,\n 'Shockwave Flash' IN (\n SELECT name FROM UNNEST(environment.addons.active_plugins)\n ) AS has_flash\n FROM\n latest_per_client_all\n ),\n transformed AS (\n SELECT\n browser_arch,\n CONCAT(os_name, '-', os_version) AS os,\n COALESCE(SAFE_CAST(ROUND(memory_mb / 1024.0) AS INT64), 0) AS memory_gb,\n is_wow64,\n gfx0_vendor_id,\n gfx0_device_id,\n CONCAT(CAST(screen_width AS STRING), 'x', CAST(screen_height AS STRING)) AS resolution,\n cpu_cores,\n cpu_vendor,\n cpu_speed,\n has_flash\n FROM\n latest_per_client\n ),\n by_dimensions AS (\n SELECT\n *,\n count(*) AS count\n FROM\n transformed\n GROUP BY\n browser_arch,\n os,\n memory_gb,\n is_wow64,\n gfx0_vendor_id,\n gfx0_device_id,\n resolution,\n cpu_cores,\n cpu_vendor,\n cpu_speed,\n has_flash\n )\n select * from by_dimensions\n \"\"\"\n\n job_config = bigquery.QueryJobConfig(\n query_parameters=[\n bigquery.ScalarQueryParameter(\"date_from\", \"DATE\", date_from),\n bigquery.ScalarQueryParameter(\"date_to\", \"DATE\", date_to),\n ]\n )\n hardware_by_dimensions_query_job = bq.query(query, job_config=job_config)\n hardware_by_dimensions_query_job.result()\n\n hardware_by_dimensions_df = (\n spark.read.format(\"bigquery\")\n .option(\"project\", hardware_by_dimensions_query_job.destination.project)\n .option(\"dataset\", hardware_by_dimensions_query_job.destination.dataset_id)\n .option(\"table\", hardware_by_dimensions_query_job.destination.table_id)\n .load()\n )\n\n return hardware_by_dimensions_df\n\n\ndef get_os_arch(browser_arch, os_name, is_wow64):\n \"\"\"Infer the OS arch from environment data.\n\n Args:\n browser_arch: the browser architecture string (either \"x86\" or \"x86-64\").\n os_name: the operating system name.\n is_wow64: on Windows, indicates if the browser process is running under WOW64.\n\n Returns:\n 'x86' if the underlying OS is 32bit, 'x86-64' if it's a 64bit OS.\n\n \"\"\"\n is_64bit_browser = browser_arch == \"x86-64\"\n # If it's a 64bit browser build, then we're on a 64bit system.\n if is_64bit_browser:\n return \"x86-64\"\n\n is_windows = os_name == \"Windows_NT\"\n # If we're on Windows, with a 32bit browser build, and |isWow64 = true|,\n # then we're on a 64 bit system.\n if is_windows and is_wow64:\n return \"x86-64\"\n\n # Otherwise we're probably on a 32 bit system.\n return \"x86\"\n\n\ndef get_gpu_vendor_name(gpu_vendor_id):\n \"\"\"Get the string name matching the provided vendor id.\n\n Args:\n id: A string containing the vendor id.\n\n Returns:\n A string containing the vendor name or \"Other\" if\n unknown.\n\n \"\"\"\n GPU_VENDOR_MAP = {\n \"0x1013\": \"Cirrus Logic\",\n \"0x1002\": \"AMD\",\n \"0x8086\": \"Intel\",\n \"Intel Open Source Technology Center\": \"Intel\",\n \"0x5333\": \"S3 Graphics\",\n \"0x1039\": \"SIS\",\n \"0x1106\": \"VIA\",\n \"0x10de\": \"NVIDIA\",\n \"0x102b\": \"Matrox\",\n \"0x15ad\": \"VMWare\",\n \"0x80ee\": \"Oracle VirtualBox\",\n \"0x1414\": \"Microsoft Basic\",\n }\n return GPU_VENDOR_MAP.get(gpu_vendor_id, \"Other\")\n\n\ndef get_device_family_chipset(vendor_id, device_id, device_map):\n \"\"\"Get the family and chipset strings given the vendor and device ids.\n\n Args:\n vendor_id: a string representing the vendor id (e.g. '0xabcd').\n device_id: a string representing the device id (e.g. '0xbcde').\n\n Returns:\n A string in the format \"Device Family Name-Chipset Name\"\n or \"Other\" if unknown.\n\n \"\"\"\n if vendor_id not in device_map:\n return \"Other\"\n\n if device_id not in device_map[vendor_id]:\n return \"Other\"\n\n return \"-\".join(device_map[vendor_id][device_id])\n\n\ndef invert_device_map(m):\n \"\"\"Inverts a GPU device map fetched from the jrmuizel's Github repo.\n\n The layout of the fetched GPU map layout is:\n Vendor ID -> Device Family -> Chipset -> [Device IDs]\n We should convert it to:\n Vendor ID -> Device ID -> [Device Family, Chipset]\n\n \"\"\"\n device_id_map = {}\n for vendor, u in m.items():\n device_id_map[\"0x\" + vendor] = {}\n for family, v in u.items():\n for chipset, ids in v.items():\n device_id_map[\"0x\" + vendor].update(\n {(\"0x\" + gfx_id): [family, chipset] for gfx_id in ids}\n )\n return device_id_map\n\n\ndef fetch_json(uri):\n \"\"\"Perform an HTTP GET on the given uri, return the results as json.\n\n If there is an error fetching the data, raise an exception.\n\n Args:\n uri: the string URI to fetch.\n\n Returns:\n A JSON object with the response.\n\n \"\"\"\n data = requests.get(uri)\n # Raise an exception if the fetch failed.\n data.raise_for_status()\n return data.json()\n\n\ndef build_device_map():\n \"\"\"Build a dictionary that will help us map vendor/device ids to device families.\"\"\"\n intel_raw = fetch_json(\"https://github.com/jrmuizel/gpu-db/raw/master/intel.json\")\n nvidia_raw = fetch_json(\"https://github.com/jrmuizel/gpu-db/raw/master/nvidia.json\")\n amd_raw = fetch_json(\"https://github.com/jrmuizel/gpu-db/raw/master/amd.json\")\n\n device_map = {}\n device_map.update(invert_device_map(intel_raw))\n device_map.update(invert_device_map(nvidia_raw))\n device_map.update(invert_device_map(amd_raw))\n\n return device_map\n\n\nDEVICE_MAP = build_device_map()\n\n\ndef to_dict(row):\n cpu_speed = \"Other\" if row[\"cpu_speed\"] is None else str(round(row[\"cpu_speed\"] / 1000.0, 1))\n return {\n \"os\": row[\"os\"],\n \"arch\": row[\"browser_arch\"],\n \"cpu_cores\": row[\"cpu_cores\"],\n \"cpu_vendor\": row[\"cpu_vendor\"],\n \"cpu_speed\": cpu_speed,\n \"resolution\": row[\"resolution\"],\n \"memory_gb\": row[\"memory_gb\"],\n \"has_flash\": row[\"has_flash\"],\n \"os_arch\": get_os_arch(row[\"browser_arch\"], row[\"os\"], row[\"is_wow64\"]),\n \"gfx0_vendor_name\": get_gpu_vendor_name(row[\"gfx0_vendor_id\"]),\n \"gfx0_model\": get_device_family_chipset(\n row[\"gfx0_vendor_id\"], row[\"gfx0_device_id\"], DEVICE_MAP\n ),\n \"count\": row[\"count\"],\n }\n\n\ndef add_counts(dict):\n count = dict[\"count\"]\n return {k: {v: count} for k, v in dict.items() if k != \"count\"}\n\n\ndef combine(acc, row):\n for metric, values in row.items():\n acc_metric = acc.get(metric, {})\n for value, count in values.items():\n acc_metric[value] = acc_metric.get(value, 0) + count\n acc[metric] = acc_metric\n return acc\n\n\ndef aggregate(hardware_by_dimensions_df):\n return hardware_by_dimensions_df.rdd.map(to_dict).map(add_counts).fold({}, combine)\n\n\ndef collapse_buckets(aggregated_data, count_threshold, sample_count):\n OTHER_KEY = \"Other\"\n collapsed_groups = {}\n for dimension, counts in aggregated_data.items():\n collapsed_counts = {}\n for k, v in counts.items():\n if dimension == \"resolution\" and k == \"0x0\":\n collapsed_counts[OTHER_KEY] = collapsed_counts.get(OTHER_KEY, 0) + v\n elif v < count_threshold:\n if dimension == \"os\":\n # create generic key per os name\n [os, ver] = k.split(\"-\", 1)\n generic_os_key = os + \"-\" + \"Other\"\n collapsed_counts[generic_os_key] = (\n collapsed_counts.get(generic_os_key, 0) + v\n )\n else:\n collapsed_counts[OTHER_KEY] = collapsed_counts.get(OTHER_KEY, 0) + v\n else:\n collapsed_counts[k] = collapsed_counts.get(k, 0) + v\n if dimension == \"os\":\n # The previous grouping might have created additional os groups.\n # Let's check again.\n final_collapsed = {}\n for k, v in collapsed_counts.items():\n if v < count_threshold:\n final_collapsed[OTHER_KEY] = final_collapsed.get(OTHER_KEY, 0) + v\n else:\n final_collapsed[k] = v\n collapsed_counts = final_collapsed\n collapsed_groups[dimension] = collapsed_counts\n\n ratios = {}\n for dimension, counts in collapsed_groups.items():\n ratios[dimension] = {\n str(metric): count / float(sample_count) for metric, count in counts.items()\n }\n\n return ratios\n\n\ndef flatten_aggregates(aggregates):\n keys_translation = {\n \"arch\": \"browserArch_\",\n \"cpu_cores\": \"cpuCores_\",\n # \"cpu_cores_speed\": \"cpuCoresSpeed_\",\n \"cpu_vendor\": \"cpuVendor_\",\n \"cpu_speed\": \"cpuSpeed_\",\n \"gfx0_vendor_name\": \"gpuVendor_\",\n \"gfx0_model\": \"gpuModel_\",\n \"resolution\": \"resolution_\",\n \"memory_gb\": \"ram_\",\n \"os\": \"osName_\",\n \"os_arch\": \"osArch_\",\n \"has_flash\": \"hasFlash_\",\n }\n flattened_list = []\n for aggregate in aggregates:\n flattened = {}\n for metric, values in json.loads(aggregate).items():\n if metric in keys_translation:\n for k, v in values.items():\n flattened[keys_translation[metric] + k] = v\n flattened[\"date\"] = json.loads(aggregate)[\"date\"]\n flattened_list.append(flattened)\n return flattened_list\n\n\ndef upload_data_s3(spark, bq_table_name, s3_bucket, s3_path):\n hardware_aggregates_df = (\n spark.read.format(\"bigquery\").option(\"table\", bq_table_name).load()\n )\n\n map_fields = [\n \"arch\",\n \"cpu_cores\",\n \"cpu_vendor\",\n \"cpu_speed\",\n \"gfx0_vendor_name\",\n \"gfx0_model\",\n \"resolution\",\n \"memory_gb\",\n \"os\",\n \"os_arch\",\n \"has_flash\",\n ]\n select_exprs = [\"date_from AS date\"]\n for field in map_fields:\n select_exprs.append(f\"MAP_FROM_ENTRIES({field}.key_value) AS {field}\")\n aggregates = hardware_aggregates_df.selectExpr(select_exprs).toJSON().collect()\n\n aggregates_flattened = sorted(\n flatten_aggregates(aggregates), key=lambda a: a[\"date\"], reverse=True\n )\n aggregates_flattened_json = json.dumps(aggregates_flattened, indent=4)\n\n with open(\"hwsurvey-weekly.json\", \"w\") as output_file:\n output_file.write(aggregates_flattened_json)\n\n # Store dataset to S3. Since S3 doesn't support symlinks, make\n # two copies of the file: one will always contain the latest data,\n # the other for archiving.\n archived_file_copy = f\"hwsurvey-weekly-{datetime.today().strftime('%Y-%m-%d')}.json\"\n\n logger.info(f\"Uploading data to s3 bucket: {s3_bucket}, path: {s3_path}\")\n client = boto3.client(\"s3\", \"us-west-2\")\n transfer = boto3.s3.transfer.S3Transfer(client)\n transfer.upload_file(\n \"hwsurvey-weekly.json\", s3_bucket, s3_path + archived_file_copy\n )\n transfer.upload_file(\n \"hwsurvey-weekly.json\", s3_bucket, s3_path + \"hwsurvey-weekly.json\"\n )\n\n\ndate_type = click.DateTime()\n\n\n@click.command()\n@click.option(\n \"--date_from\",\n type=date_type,\n required=True,\n help=\"Aggregation start date (e.g. yyyy-mm-dd)\",\n)\n@click.option(\"--bq_table\", required=True, help=\"Output BigQuery table\")\n@click.option(\"--temporary_gcs_bucket\", required=True, help=\"GCS bucket for writing to BigQuery\")\n@click.option(\"--s3_bucket\", required=True, help=\"S3 bucket for storing data\")\n@click.option(\"--s3_path\", required=True, help=\"S3 path for storing data\")\n@click.option(\n \"--past_weeks\",\n type=int,\n default=0,\n help=\"Number of past weeks to include (useful for backfills)\",\n)\ndef main(date_from, bq_table, temporary_gcs_bucket, s3_bucket, s3_path, past_weeks):\n \"\"\"Generate weekly hardware report for [date_from, date_from_7) timeframe\n\n Aggregates are incrementally inserted to provided BigQuery table,\n finally table is exported to JSON and copied to S3.\n \"\"\"\n date_from = date_from.date()\n logger.info(f\"Starting, date_from={date_from}, past_weeks={past_weeks}\")\n spark = SparkSession.builder.appName(\"hardware_report_dashboard\").getOrCreate()\n\n for batch_number in range(0, past_weeks + 1):\n # generate aggregates\n batch_date_from = date_from - timedelta(weeks=1 * batch_number)\n batch_date_to = batch_date_from + timedelta(days=7)\n logger.info(\n f\"Running batch {batch_number}/{past_weeks}, timeframe: [{batch_date_from}, {batch_date_to})\" # noqa\n )\n hardware_by_dimensions_df = load_data(spark, batch_date_from, batch_date_to)\n\n aggregated = aggregate(hardware_by_dimensions_df)\n\n # Collapse together groups that count less than 1% of our samples.\n sample_count = hardware_by_dimensions_df.agg(F.sum(\"count\")).collect()[0][0]\n threshold_to_collapse = int(sample_count * 0.01)\n\n aggregates = collapse_buckets(aggregated, threshold_to_collapse, sample_count)\n aggregates[\"date_from\"] = batch_date_from\n aggregates[\"date_to\"] = batch_date_to\n\n # save to BQ\n aggregates_df = spark.createDataFrame(Row(**x) for x in [aggregates])\n aggregates_df.write.format(\"bigquery\").option(\"table\", bq_table).option(\n \"temporaryGcsBucket\", temporary_gcs_bucket\n ).mode(\"append\").save()\n\n upload_data_s3(spark, bq_table, s3_bucket, s3_path)\n\n spark.stop()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"public_data_report/hardware_report/hardware_report.py","file_name":"hardware_report.py","file_ext":"py","file_size_in_byte":15946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"585391647","text":"\nimport pandas as pd\nimport seaborn as sns\nimport urllib3 as url\nimport json\n\n# Link to SLO County Summary: https://datausa.io/profile/geo/san-luis-obispo-county-ca\n\ndef getDataFrame(route):\n http = url.PoolManager()\n resp = http.request('GET', route)\n data = json.loads(resp.data)['data']\n return pd.DataFrame(data)\n\ndef test():\n df = getDataFrame(\n 'http://datausa.io/api/data?Geography=05000US06079:children&measure=Household Income by Race,Household Income by Race Moe&Race=0')\n print(df)\n sns.catplot(x=\"Household Income by Race\", y=\"Geography\", kind=\"swarm\", data=df.sample(15))\n df = getDataFrame('http://datausa.io/api/data?geo=05000US06079&measure=Average%20Wage,Average%20Wage%20Appx%20MOE,Total%20Population,Total%20Population%20MOE%20Appx,Record%20Count&drilldowns=Gender&Employment%20Time%20Status=1&Detailed%20Occupation=119XXX,412010,412031,252020,533030&Record%20Count>=5')\n print(df)\n sns.catplot(x=\"Gender\", y=\"Average Wage\", hue=\"Detailed Occupation\", kind=\"bar\", data=df)\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"data_retrieval/datausa/economic.py","file_name":"economic.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"465903410","text":"#! /usr/bin/python3\nimport random\nimport time\nimport os\nimport paho.mqtt.client as mqtt\nbroker = \"127.0.0.1\"#\"192.168.99.100\" #\"192.168.1.184\"#\"test.mosquitto.org\"\nport = 2000\ntopic = \"RDS19\"#topic to be used for communication\n\n#On receiving the message print it\ndef on_message(client, userdata, message):\n print(message.payload.decode('utf-8'))\n\n#On connecting to the broker subscribe to the topic\ndef on_connect(client, userdata, flags, rc):\n print('connected')\n client.subscribe(topic)\n\ndef main():\n mqtt.Client.connected_flag=False#create flag in class\n client = mqtt.Client(\"Server\")#create new instance\n client.on_message= on_message#attach function to callback\n client.on_connect = on_connect#attach a callback on connect\n #bind call back function\n client.loop_start()#start a loop\n print(\"Connecting to broker \",broker)\n client.connect(broker)#connect to broker\n while True:\n pass\n\nif __name__==\"__main__\":\n\tmain()\n","sub_path":"03-MiddlewareAndBackend/examples/MQTT/subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"144303195","text":"import seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nMBARC_genera = ['Clostridium','Ruminiclostridium','Coraliomargarita',\n 'Corynebacterium','Desulfosporosinus','Desulfotomaculum','Echinicola',\n 'Escherichia','Fervidobacterium','Frateuria','Halovivax','Hirschia',\n 'Olsenella','Pseudomonas','Salmonella','Segniliparus','Sediminispirochaeta',\n 'Meiothermus','Natronobacterium','Natronococcus','Nocardiopsis',\n 'Streptococcus','Terriglobus','Thermobacillus']\n\ndf = pd.read_csv(\"kraken/SRR3656745.report\", sep=\"\\t\", header=None)\n\ndf = df[df[3] == \"G\"]\ndf = df.sort_values(by=1, ascending=True)\ndf = df[df[1] > 10000]\n\ncolors = list(df.apply(lambda x: 'red' if any([x[5].lstrip() == t for t in MBARC_genera]) else 'black', axis=1))#\n\nfig = plt.figure()\nax = df[1].plot(kind=\"barh\", color=colors, logx=True, figsize=(7,5))\nax.set_xlabel(\"no. of assigned reads\")\nax.set_ylabel(\"Genera sorted by no. of assigned reads\")\nax.set_yticklabels([])\nax.set_yticks([])\nplt.plot(0, 0, color='black')\nplt.plot(0, 0, color='red')\nax.legend([\"non-MBARC genera\", 'MBARC genera'])\nfig.savefig('FigS4.pdf')\n","sub_path":"FigS4.py","file_name":"FigS4.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"76053573","text":"import os\nimport json\nimport math\nfrom datetime import datetime, timedelta\nfrom email.utils import parsedate_tz\n\nclass FileComponents:\n\n def __init__(self, filepath):\n absolute_path = os.path.abspath(filepath) if filepath is not None else None\n path,file = os.path.split(absolute_path) if filepath is not None else None\n name = os.path.splitext(file)[0] if filepath is not None else None\n extension = os.path.splitext(file)[1] if filepath is not None else None\n\n self.absolute_path = absolute_path\n self.path = path + '/'\n self.name = name\n self.extension = extension\n\nclass Tweet:\n\n #################################\n # INITIALISER #\n #################################\n def __init__(self, tweet_object):\n # store original json (for debugging/cleaning/verification)\n self.original_data = tweet_object\n\n # convert object string representation to JSON dict\n tweet_json = json.loads(tweet_object)\n\n self.is_retweet = (\"retweeted_status\" in tweet_json)\n self.is_quote_tweet = (\"quoted_status\" in tweet_json)\n self.retweeted_id = None\n self.quoted_id = None\n\n self.id = int(tweet_json[\"id_str\"]) if \"id_str\" in tweet_json else None\n self.likes = int(tweet_json[\"favorite_count\"]) if \"favorite_count\" in tweet_json else None\n self.retweets = int(tweet_json[\"retweet_count\"]) if \"retweet_count\" in tweet_json else None\n self.timestamp = self.process_datetime(tweet_json[\"created_at\"])\n\n # RETWEET\n if self.is_retweet:\n self.retweeted_id = int(tweet_json[\"retweeted_status\"][\"id_str\"]) if \"id_str\" in tweet_json[\"retweeted_status\"] else None\n retweet_likes = int(tweet_json[\"retweeted_status\"][\"favorite_count\"]) if \"favorite_count\" in tweet_json[\"retweeted_status\"] else None\n self.likes = max(self.likes, retweet_likes)\n\n # QUOTE TWEET\n if self.is_quote_tweet:\n self.quoted_id = int(tweet_json[\"quoted_status_id\"]) if \"quoted_status_id\" in tweet_json else None\n\n #################################\n # HELPER FUNCTIONS #\n #################################\n def process_datetime(self, date_string):\n time_tuple = parsedate_tz(date_string.strip())\n dt = datetime(*time_tuple[:6])\n return str(dt - timedelta(seconds=time_tuple[-1]))\n\n #################################\n # CSV LINE VALUES #\n #################################\n def values(self):\n return [str(self.id), str(self.likes), str(self.retweets), str(self.retweeted_id), str(self.quoted_id), self.timestamp]\n","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"147966421","text":"# coding=utf-8\n\n\ndef insert_sort(lst, cmp):\n if len(lst) <= 1:\n return\n for i in range(1, len(lst)):\n tmp = lst[i]\n j = i\n while cmp(tmp, lst[j -1]) == True and j > 0:\n lst[j] = lst[j -1]\n j -= 1\n lst[j] = tmp\n","sub_path":"insert_sort.py","file_name":"insert_sort.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"517811807","text":"#!/usr/bin/python\nimport getopt, sys\n \ndef main():\n\n in_filename = 'fin'\n out_filename = 'fout'\n\n print('argv : %s' %(sys.argv[1:]))\n\n try:\n options, remainder = getopt.getopt(sys.argv[1:], 'i:o:', ['input=', 'output='])\n except getopt.GetoptError:\n sys.exit()\n\n print('options : %s' %(options))\n\n for opt, arg in options:\n if opt in ('-i', '--input'):\n in_filename = arg\n elif opt in ('-o', '--output'):\n out_filename = arg\n\n print('remainder : %s' %(remainder))\n\n print('input file = %s' %(in_filename))\n print('output file = %s' %(out_filename))\n\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"getopt.py","file_name":"getopt.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"167412487","text":"import re\nfrom pychord import Chord \nimport json\n\n\ndef vectorizeChords(chord):\n newChordVector = []\n for component in Chord(chord).components():\n velement = numberfyChordComponent(component)\n newChordVector.append(velement)\n return newChordVector\n\n\ndef numberfyChordComponent(component):\n chordValue = 0\n first = True\n for letter in component:\n if first:\n first = False\n if letter == \"A\":\n chordValue = chordValue + 0\n elif letter == \"B\":\n chordValue = chordValue + 1\n elif letter == \"C\":\n chordValue = chordValue + 1.5\n elif letter == \"D\":\n chordValue = chordValue + 2.5\n elif letter == \"E\":\n chordValue = chordValue + 3.5\n elif letter == \"F\":\n chordValue = chordValue + 4\n elif letter == \"G\":\n chordValue = chordValue + 5\n else:\n if letter == \"#\":\n chordValue = chordValue + 0.5\n elif letter == \"b\":\n chordValue = chordValue - 0.5\n #passando o mod para garantir que não vai ter valores negativos ou maiores q 6 (ex: G##)\n return(chordValue % 6)\n \n\ndef processChords(scraped):\n with open(scraped, 'r') as file:\n scraped_text = file.read()\n #lines = open( \"chordsText.txt\", \"r\" ).readlines()[::2]\n #scraped_text = ''.join(lines)\n scraped_text=scraped_text.split(\" \")\n for i, word in enumerate(scraped_text):\n if (word == ''):\n scraped_text[i] = \" \"\n #o regex abaixo serve para identificar todo tipo de acordes (pode estar incompleto, precisa da ajuda de um profissional de música)\n notes = \"[CDEFGAB]\"\n accidentals = \"(?:#|##|b|bb)?\"\n chords = \"(?:maj|min|m|sus|aug|dim|add|°)?\"\n dim_aum = \"(?:\\+|\\-)?\"\n additions = \"[0-9]?[0-9]?\"\n par = \"(?:\\((?:#|##|b|bb)?(?:maj|min|m|sus|aug|dim|M|add|°)?[0-9]?[0-9]?/?[0-9]?[0-9]?-?\\+?\"+\"\\))?\"\n regex = notes + accidentals + chords + dim_aum+ additions +\"M?\"+dim_aum\n return(list(set(re.findall(r'\\b' + regex + \"/?\" + \"(?:\"+regex+\")?\" + par+r'(?!\\w)', ''.join(scraped_text)))))\n\ndef dist_between_components(comp1, comp2):\n #DISTANCIA DE A PARA G# = MIN(DIST(A,0)+DIST(G#,6), DIST(A,G#))\n return min(abs(comp1-0) + abs(6-comp2), abs(comp1-comp2))\n\n\ndef find_closest_vector(vectorized_chord, chord_vectors, chord_dictionary):\n min_distance = 10000\n index = 0\n for j, vchord in enumerate(chord_vectors):\n distance = 0\n for i, vcomponent in enumerate(vchord):\n if i < min(len(vectorized_chord), len(vchord)):\n #pega a distancia entre dois componentes (na mesma posição) e soma à distancia. repete para n posições onde n é a qtd de componentes do menor acorde\n distance = distance + dist_between_components(vectorized_chord[i], vcomponent)\n #soma à distancia a diferença entre as dimensões dos acordes\n distance = distance + abs(len(vectorized_chord)-len(vchord))\n if(distance < min_distance):\n min_distance = distance\n print(\" the new closest chord is:\" + chord_dictionary[j] + \" with a distance of: \" + str(distance))\n index = j\n \n #transformar estimated_chord em um vetor de notas estimated_chord_vector\n #achar vetor em chord_vectors mais próximo desse vetor\n #lembrando que os acordes estão em dimensões diferentes (existem acordes com 3, 4 ou 5 notas), então o algoritmo tem que ser adaptado\n #soluções: comparar tríades com tríades, apenas\n # comparar só a quantidades de notas presentes no menor dos dois acordes sendo analisados\n\n print(\" the final closest chord is:\" + chord_dictionary[index] + \" with a distance of: \" + str(min_distance))\n return chord_dictionary[index]\n #transformar estimated_chord em um vetor de notas estimated_chord_vector\n #achar vetor em chord_vectors mais próximo desse vetor\n #lembrando que os acordes estão em dimensões diferentes (existem acordes com 3, 4 ou 5 notas), então o algoritmo tem que ser adaptado\n #soluções: comparar tríades com tríades, apenas\n # comparar só a quantidades de notas presentes no menor dos dois acordes sendo analisados\n\n\n\n #return estimated_chord\n\n\n\nextracted_file = \"chordChanges.json\"\nchord_dictionary = processChords(\"chordsText.txt\")\nchord_vectors = []\nprint(\"chord dictionary (made with scraped chords):\")\nprint(chord_dictionary)\nfor chord in chord_dictionary:\n chord_vectors.append(vectorizeChords(chord))\nf = open(extracted_file)#chordChanges.json\nextracted = json.load(f)\nf.close()\n\nprint(\"chords extracted:\")\nprint(extracted)\nfor i, chord in enumerate(extracted):\n if(chord[\"estimated_chord\"]!= \"N\"):\n print(\"for chord: \"+chord[\"estimated_chord\"]+\" number \"+str(i)+\"-----------------------------------\")\n vectorized_chord = vectorizeChords(chord[\"estimated_chord\"])\n #print(\"vectorized_chord\")\n #print(vectorized_chord)\n #print(\"chord_vectors\")\n #print(chord_vectors)\n #print(\"closest_chord:\")\n #print(chord[\"estimated_chord\"])\n \n extracted[i][\"estimated_chord\"] = find_closest_vector(vectorized_chord, chord_vectors, chord_dictionary)\n\n # = find_closest_chord(chord, chord_vectors)\n\nprint(\"chords merged (made by merging extracted and scraped chords):\")\nwith open(\"chordsMerged.txt\", \"w\"):\n print (extracted)\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"481100966","text":"import string\nimport os\n\n__author__ = \"Samuele Dell'Angelo (Red Hat)\"\n\nclass PropertyManager:\n \n _propertyDict = dict()\n \n def __init__(self, filename = None):\n if(filename != None):\n dir = self.getRightDir()\n fpath=dir+\"\"\"/\"\"\"+filename\n fp = open(fpath,'r')\n splitLines = [string.split(line, '=') for line in fp.readlines() if \"=\" in line]\n propertyNames = [tup[0] for tup in splitLines]\n propertyList = map(lambda x: x[1].strip(), splitLines)\n self._propertyDict = dict([(propertyNames[i],propertyList[i]) for i in range(len(splitLines))])\n fp.close()\n \n \n def test(self):\n print(self._propertyDict)\n \n \n \n def getValue(self, key):\n try:\n return self._propertyDict[key]\n except:\n return None\n\n\n def create(self, filepath):\n dir = self.getRightDir()\n fpath=dir+\"\"\"/\"\"\"+filepath\n fp = open(fpath,'w')\n fp.close()\n\n\n def writeValue(self, filename, key, value):\n dir = self.getRightDir()\n fpath=dir+\"\"\"/\"\"\"+filename\n fp = open(fpath,'a')\n line = key+\"=\"+value+\"\\n\"\n fp.write(line)\n fp.close()\n\n\n def updateValue(self, filename, key, newValue):\n dir = self.getRightDir()\n fpath=dir+\"\"\"/\"\"\"+filename\n fp = open(fpath,'r')\n lines = fp.readlines()\n fp.close()\n fpw = open(fpath, 'w')\n newline = key+\"=\"+newValue+\"\\n\"\n for line in lines:\n if(line.split('=')[0] != key):\n fpw.write(line)\n\n fpw.write(newline)\n fpw.close()\n\n def getRightDir(self):\n dir=os.path.dirname(__file__)\n #remove package dir\n dir=dir[:dir.find(__name__.split('.')[0])]\n return dir\n","sub_path":"utils/PropertyManager.py","file_name":"PropertyManager.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"618039525","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Alumno',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('activo', models.BooleanField(default=True)),\n ('controlNumber', models.CharField(max_length=9, verbose_name=b'numero de control', blank=True)),\n ('headshot', models.ImageField(default=b'default.jpg', upload_to=b'%Y/%m/%d')),\n ('nombre', models.CharField(max_length=60)),\n ],\n options={\n 'ordering': ['pk', 'nombre'],\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Grupo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('generacion', models.IntegerField()),\n ('carrera', models.CharField(max_length=5)),\n ('salon', models.CharField(max_length=5)),\n ('activo', models.BooleanField(default=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Phone',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('number', models.CharField(max_length=15)),\n ('owner', models.ForeignKey(related_name='phones', to='alumnos.Alumno')),\n ],\n options={\n 'verbose_name': 'Phone',\n 'verbose_name_plural': 'Phones',\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='alumno',\n name='fk',\n field=models.ForeignKey(to='alumnos.Grupo'),\n preserve_default=True,\n ),\n ]\n","sub_path":"alumnos/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"264378297","text":"import socket\n\nfrom game_logic import display_table\nfrom multiplayer_logic import read_socket_to_dict, write_socket_from_dict\n\nif __name__ == \"__main__\":\n sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sck.connect(('127.0.0.1', 2137))\n\n messages_buffer = b''\n\n # connecting to server and setting nickname\n data_dict = {\"token\": 1}\n while data_dict[\"token\"] == 1:\n write_socket_from_dict(sck, {\"nickname\": input(\"Type in nickname: \")})\n messages_buffer, data_dict = read_socket_to_dict(sck, messages_buffer)\n print(data_dict[\"msg\"])\n\n while data_dict[\"token\"] != 3:\n messages_buffer, data_dict = read_socket_to_dict(sck, messages_buffer)\n\n if data_dict[\"token\"] == 0:\n print(data_dict[\"msg\"])\n elif data_dict[\"token\"] == 1:\n print(data_dict[\"msg\"])\n display_table(data_dict[\"table\"])\n while data_dict[\"token\"] == 1:\n write_socket_from_dict(sck, {\"move\": input(\"Type in move: \")})\n messages_buffer, data_dict = read_socket_to_dict(sck, messages_buffer)\n display_table(data_dict[\"table\"])\n print(data_dict[\"msg\"])\n\n display_table(data_dict[\"table\"])\n print(data_dict[\"msg\"])\n sck.close()\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"377846776","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 3 20:38:24 2020\n\n@author: Sosei Ikeda\n\"\"\"\n\nimport glob\nfrom PIL import Image\nfrom azure.cognitiveservices.vision.face import FaceClient\nfrom msrest.authentication import CognitiveServicesCredentials\nimport cv2\nimport numpy as np\n\nKEY = 'fabbf6804296424baf139a881c3a659b'\nENDPOINT = 'https://sosei-resource.cognitiveservices.azure.com/'\nface_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))\n\ndef face_position(faceDictionary):\n rect = faceDictionary.face_rectangle\n x = rect.left\n y = rect.top\n h = rect.height\n w = rect.width\n \n l = h if h > w else w\n\n x = int(x - (0.1 * l))\n y = int(y - (0.1 * l))\n l = int(1.2 * l)\n \n return x,y,l\n\ndef crop_face(imageUrl,x,y,l):\n img = Image.open(imageUrl)\n imgArray = np.asarray(img)\n imgArrayCropped = imgArray[y:y+l, x:x+l]\n imgArrayCropped = cv2.cvtColor(imgArrayCropped, cv2.COLOR_BGR2RGB)\n \n return imgArrayCropped\n\nimage_url_array = [file for file in glob.glob('*.jpg')]\n\nfor i in range(59,len(image_url_array)):\n detected_faces = face_client.face.detect_with_url(url=\"https://raw.githubusercontent.com/Sosei-Ikeda/cinderella_dataset/master/\"+image_url_array[i], detection_model='detection_02')\n if not detected_faces:\n print(f'No face detected from this image : {image_url_array[i]}')\n else:\n rawimg = cv2.imread(image_url_array[i])\n cv2.imwrite(f'rawimg/{i}.jpg', rawimg)\n left, top, length = face_position(detected_faces[0])\n cropped_face = crop_face(image_url_array[i],left,top,length)\n cv2.imwrite(f'img/{i}.jpg', cropped_face)\n\n#i = 141\n#detected_faces = face_client.face.detect_with_url(url=\"https://raw.githubusercontent.com/Sosei-Ikeda/cinderella_face/master/\"+image_url_array[i], detection_model='detection_02')\n#if not detected_faces:\n# print(f'No face detected from this image : {image_url_array[i]}')\n#else:\n# print(len(detected_faces))\n# left, top, length = face_position(detected_faces[0])\n# cropped_face = crop_face(image_url_array[i],left,top,length)\n# cv2.imwrite(f'img/{i}.jpg', cropped_face)","sub_path":"face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"277607788","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 12 15:20:22 2019\n\n@author: \n\"\"\"\n\nimport numpy as np\nfrom numpy.linalg import norm\nfrom numpy.random import randn\nfrom numpy.random import uniform\nimport scipy.stats\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport random\nfrom numpy.random import rand\nfrom scipy.stats import multivariate_normal\nfrom time import sleep\nimport time\ndef calc_angle(a):\n a = a % (2 * np.pi)\n \n #if a > np.pi: # move to [-pi, pi)\n # a -= 2 * np.pi\n \n a[a>np.pi] -= 2 * np.pi\n return a\n\ndef initialize_unif_part(x_range, y_range, theta_range, N):\n particles = np.empty((N, 3))\n particles[:, 0] = uniform(x_range[0], x_range[1], size=N)\n particles[:, 1] = uniform(y_range[0], y_range[1], size=N)\n particles[:, 2] = uniform(theta_range[0], theta_range[1], size=N)\n particles[:,2] = calc_angle(particles[:,2])\n \n return particles\n \ndef create_gaussian_particles(mean, std, N):\n particles = np.empty((N, 3))\n particles[:, 0] = mean[0] + (randn(N) * std[0])\n particles[:, 1] = mean[1] + (randn(N) * std[1])\n particles[:, 2] = mean[2] + (randn(N) * std[2])\n particles[:,2] = calc_angle(particles[:,2])\n return particles\n\ndef normpdf(x, mu, std):\n\n cov = np.diag((1,1))*std\n part1 = 1 / ( ((2* np.pi)**(len(mu)/2)) * (np.linalg.det(cov)**(1/2)) )\n part2 = (-1/2) * ((x-mu).T.dot(np.linalg.inv(cov))).dot((x-mu))\n return float(part1 * np.exp(part2))\n \nclass ParticleFilter:\n \n def __init__(self,N,landmarks,Q_std,R_std,init_x=None):\n \n self.N = N\n self.landmarks = landmarks\n \n if init_x is None:\n self.particles = initialize_unif_part((-12,12),(-12,12),(-np.pi,np.pi),N)\n else:\n self.particles = create_gaussian_particles(init_x,np.array([0.1,0.1,0.1]),N)\n\n self.weights = np.ones(N)/N\n self.particles[:,2] = calc_angle(self.particles[:,2])\n self.Q_std = Q_std\n self.R_std=R_std\n \n def predict(self,u,dt):\n N=self.N\n std = self.Q_std\n\n \"\"\"self.particles[:,0] = self.particles[:,0] + np.cos(self.particles[:,2])*u[0]*dt + (randn(N)*std[0])\n self.particles[:,1] = self.particles[:,1] + np.sin(self.particles[:,2])*u[0]*dt + (randn(N)*std[1])\n self.particles[:,2] = self.particles[:,2] + u[1]*dt + (randn(N) * std[2])\n self.particles[:,2] = calc_angle(self.particles[:,2])\"\"\"\n self.particles[:,0] = self.particles[:,0] + np.cos(self.particles[:,2])*(u[0]+ (randn(N)*std[0]))*dt \n self.particles[:,1] = self.particles[:,1] + np.sin(self.particles[:,2])*(u[0]+ (randn(N)*std[1]))*dt \n self.particles[:,2] = self.particles[:,2] + dt*(u[1]+randn(N) * std[2])\n self.particles[:,2] = calc_angle(self.particles[:,2])\n \n \"\"\"self.particles[:,2] = self.particles[:,2] + dt*(u[1])\n self.particles[:,2] = calc_angle(self.particles[:,2])\n self.particles[:,0] = self.particles[:,0] + np.cos(self.particles[:,2])*(u[0])*dt \n self.particles[:,1] = self.particles[:,1] + np.sin(self.particles[:,2])*(u[0])*dt\"\"\"\n \n \n def update(self,z,z_phi):\n for i in range(0,self.landmarks.shape[0]) :\n current_lm = self.landmarks[i]\n\n distance = np.linalg.norm(self.particles[:, 0:2] - current_lm, axis=1)\n temp = calc_angle(np.arctan2(current_lm[1]-self.particles[:,1],current_lm[0]-self.particles[:,0])-self.particles[:,2])\n dist2 = np.array(temp-z_phi[i])\n dist2= calc_angle(dist2)\n full_dist = (np.array([distance,dist2])).T\n #w1 =scipy.stats.norm(distance, self.R_std[0]).pdf(z[i])\n #w2 = scipy.stats.norm(dist2, self.R_std[1]).pdf(z_phi[i]) # gaus or histogram\n w1 = [normpdf(np.array([z[i],z_phi[i]]),full_dist[ttt],self.R_std) for ttt in range(0,full_dist.shape[0])]\n self.weights *= w1\n \n self.weights = self.weights + 10**(-10)\n \n self.weights = self.weights / sum(self.weights)\n \n def resample(self):\n N = self.N\n \n positions = (rand(N) + range(N)) / N\n \n indexes = np.zeros(N, 'i')\n cum_sum = np.cumsum(self.weights)\n i, j = 0, 0\n while i < N:\n if positions[i] < cum_sum[j]:\n indexes[i] = j\n i += 1\n else:\n j += 1\n \n self.particles[:] = self.particles[indexes]\n self.weights[:] = self.weights[indexes]\n self.weights.fill(1.0 / len(self.weights))\n def multinomial_resample(self):\n cum_sum = np.cumsum(self.weights)\n cum_sum[-1] = 1. \n indexes= np.searchsorted(cum_sum, rand(len(self.weights)))\n self.particles[:] = self.particles[indexes]\n self.weights[:] = self.weights[indexes]\n self.weights.fill(1.0 / len(self.weights))\n \n \n \n def residual_resample(self):\n N = self.N\n indexes = np.zeros(N, 'i')\n \n \n num_copies = (N*np.asarray(self.weights)).astype(int)\n k = 0\n for i in range(N):\n for _ in range(num_copies[i]): \n indexes[k] = i\n k += 1\n \n \n residual = self.weights - num_copies \n residual /= sum(residual) \n cum_sum = np.cumsum(residual)\n cum_sum[-1] = 1. \n indexes[k:N] = np.searchsorted(cum_sum, rand(N-k))\n self.particles[:] = self.particles[indexes]\n self.weights[:] = self.weights[indexes]\n self.weights.fill(1.0 / len(self.weights))\n \n def stratified_resample(self):\n N = self.N\n positions = (rand(N) + range(N)) / N\n \n indexes = np.zeros(N, 'i')\n cum_sum = np.cumsum(self.weights)\n i, j = 0, 0\n while i < N:\n if positions[i] < cum_sum[j]:\n indexes[i] = j\n i += 1\n else:\n j += 1\n \n self.particles[:] = self.particles[indexes]\n self.weights[:] = self.weights[indexes]\n self.weights.fill(1.0 / len(self.weights))\n \n def systematic_resample(self):\n N = self.N\n\n positions = (np.arange(N) + random.random()) / N\n \n indexes = np.zeros(N, 'i')\n cumulative_sum = np.cumsum(self.weights)\n i, j = 0, 0\n while i < N:\n if positions[i] < cumulative_sum[j]:\n indexes[i] = j\n i += 1\n else:\n j += 1\n self.particles[:] = self.particles[indexes]\n self.weights[:] = self.weights[indexes]\n self.weights.fill(1.0 / len(self.weights))\n \n def estimate(self):\n robot_pos = self.particles[:,0:3]\n mean = np.average(robot_pos,weights = self.weights,axis =0)\n \n var = np.average((robot_pos - mean)**2, weights=self.weights, axis=0)\n return mean, var\n\n\ndef neff(weights):\n #return 1. / np.sum(np.square(weights))\n return 1. / np.sum((weights)**2)\n\n\nif __name__ == \"__main__\":\n #np.random.seed(1234)\n control1 = pd.read_csv(\"dataset/control1.csv\", header=None)\n radar1 = pd.read_csv(\"dataset/radar1.csv\", header=None)\n\n radar1.iloc[:,3] = calc_angle(radar1.iloc[:,3])\n radar1.iloc[:,1] = calc_angle(radar1.iloc[:,1]) \n\n initial_landmarks = np.array([[ 3.21846589, 5.35298539], [-3.17810305, 2.97454244]])\n\n qstd = np.array([0.01,0.01,0.01])\n rstd = np.array([0.5,0.3])\n dt=0.1\n N=1000\n robot_position = np.array([0,0,0])\n start = time.time()\n pf = ParticleFilter(N=N,landmarks=initial_landmarks,Q_std = qstd,R_std=rstd,init_x=robot_position)\n\n x_mean = []\n plt.ion()\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n line1, = ax.plot([], [])\n dif_list=[]\n for i in range(0,100):\n u = control1.iloc[i,:]\n z = np.array([radar1.iloc[i,0],radar1.iloc[i,2]])\n #z[0]= z[0] + randn(1)*rstd[0]\n #z[1]= z[1] + randn(1)*rstd[0]\n z_phi = np.array([radar1.iloc[i,1],radar1.iloc[i,3]])\n #z_phi[0]= z_phi[0] + randn(1)*rstd[1]\n #z_phi[1]= z_phi[1] + randn(1)*rstd[1]\n\n pf.predict(u,dt)\n pf.update(z,z_phi)\n\n if neff(pf.weights) < (0.75)*N:#3/4:\n pf.resample()\n #print(\"resample iter=%d\"%(i+1))\n mu, var = pf.estimate()\n plt.scatter(mu[0], mu[1], marker='o',color='b',zorder=i)\n # ------------------enable-disable to add theta---------------------\n plt.quiver(mu[0], mu[1],np.cos(mu[2]), np.sin(mu[2]),linewidths=0.01, edgecolors='k',zorder=i)\n #di= np.sqrt((mu[0]-initial_landmarks[0][0])**2+(mu[1]-initial_landmarks[0][1])**2)\n #di= np.sqrt((mu[0]-initial_landmarks[1][0])**2+(mu[1]-initial_landmarks[1][1])**2)\n #print(\"iter=%d ---radar=%f distance=%f\"%(i,radar1.iloc[i,2],di))\n #dif = radar1.iloc[i,2] - di\n #print(dif)\n #dif_list.append(dif)\n x_mean.append(mu)\n ax.scatter(initial_landmarks[1][0],initial_landmarks[1][1],color='r',zorder=i+1)\n ax.scatter(initial_landmarks[0][0],initial_landmarks[0][1],color='g',zorder=i+1)\n ax.set_title('Particle Filter with static landmarks - iter=%d'%(i+1))\n\n plt.axis((-6,6,-2,8))\n #plt.scatter(pf.particles[:, 0], pf.particles[:, 1],color='k', marker=',', s=1)\n fig.canvas.draw()\n fig.canvas.flush_events()\n #sleep(0.5)\n x_mean=np.array(x_mean)\n end = time.time()\n print(end - start)\n \n print(mu)","sub_path":"ParticleFilter2.py","file_name":"ParticleFilter2.py","file_ext":"py","file_size_in_byte":9518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"531020893","text":"\"\"\"Hermes MQTT server for Rhasspy NLU\"\"\"\nimport logging\nimport typing\nfrom pathlib import Path\n\nimport networkx as nx\nimport rhasspynlu\nfrom rhasspyhermes.base import Message\nfrom rhasspyhermes.client import GeneratorType, HermesClient, TopicArgs\nfrom rhasspyhermes.intent import Intent, Slot, SlotRange\nfrom rhasspyhermes.nlu import (\n NluError,\n NluIntent,\n NluIntentNotRecognized,\n NluIntentParsed,\n NluQuery,\n NluTrain,\n NluTrainSuccess,\n)\nfrom rhasspynlu import Sentence, recognize\nfrom rhasspynlu.intent import Recognition\n\n_LOGGER = logging.getLogger(\"rhasspynlu_hermes\")\n\n# -----------------------------------------------------------------------------\n\n\nclass NluHermesMqtt(HermesClient):\n \"\"\"Hermes MQTT server for Rhasspy NLU.\"\"\"\n\n def __init__(\n self,\n client,\n intent_graph: typing.Optional[nx.DiGraph] = None,\n graph_path: typing.Optional[Path] = None,\n default_entities: typing.Dict[str, typing.Iterable[Sentence]] = None,\n word_transform: typing.Optional[typing.Callable[[str], str]] = None,\n fuzzy: bool = True,\n replace_numbers: bool = False,\n language: typing.Optional[str] = None,\n extra_converters: typing.Optional[\n typing.Dict[str, typing.Callable[..., typing.Any]]\n ] = None,\n failure_token: typing.Optional[str] = None,\n site_ids: typing.Optional[typing.List[str]] = None,\n lang: typing.Optional[str] = None,\n ):\n super().__init__(\"rhasspynlu_hermes\", client, site_ids=site_ids)\n\n self.subscribe(NluQuery, NluTrain)\n\n self.graph_path = graph_path\n self.intent_graph = intent_graph\n self.default_entities = default_entities or {}\n self.word_transform = word_transform\n self.fuzzy = fuzzy\n self.replace_numbers = replace_numbers\n self.language = language\n self.extra_converters = extra_converters\n self.failure_token = failure_token\n self.lang = lang\n\n # -------------------------------------------------------------------------\n\n async def handle_query(\n self, query: NluQuery\n ) -> typing.AsyncIterable[\n typing.Union[\n NluIntentParsed,\n typing.Tuple[NluIntent, TopicArgs],\n NluIntentNotRecognized,\n NluError,\n ]\n ]:\n \"\"\"Do intent recognition.\"\"\"\n original_input = query.input\n\n try:\n if not self.intent_graph and self.graph_path and self.graph_path.is_file():\n # Load graph from file\n _LOGGER.debug(\"Loading %s\", self.graph_path)\n with open(self.graph_path, mode=\"rb\") as graph_file:\n self.intent_graph = rhasspynlu.gzip_pickle_to_graph(graph_file)\n\n if self.intent_graph:\n\n def intent_filter(intent_name: str) -> bool:\n \"\"\"Filter out intents.\"\"\"\n if query.intent_filter:\n return intent_name in query.intent_filter\n return True\n\n # Replace digits with words\n if self.replace_numbers:\n # Have to assume whitespace tokenization\n words = rhasspynlu.replace_numbers(\n query.input.split(), self.language\n )\n query.input = \" \".join(words)\n\n input_text = query.input\n\n # Fix casing for output event\n if self.word_transform:\n input_text = self.word_transform(input_text)\n\n if self.failure_token and (self.failure_token in query.input.split()):\n # Failure token was found in input\n recognitions = []\n else:\n # Pass in raw query input so raw values will be correct\n recognitions = recognize(\n query.input,\n self.intent_graph,\n intent_filter=intent_filter,\n word_transform=self.word_transform,\n fuzzy=self.fuzzy,\n extra_converters=self.extra_converters,\n )\n else:\n _LOGGER.error(\"No intent graph loaded\")\n recognitions = []\n\n if NluHermesMqtt.is_success(recognitions):\n # Use first recognition only.\n recognition = recognitions[0]\n assert recognition is not None\n assert recognition.intent is not None\n\n intent = Intent(\n intent_name=recognition.intent.name,\n confidence_score=recognition.intent.confidence,\n )\n slots = [\n Slot(\n entity=(e.source or e.entity),\n slot_name=e.entity,\n confidence=1.0,\n value=e.value_dict,\n raw_value=e.raw_value,\n range=SlotRange(\n start=e.start,\n end=e.end,\n raw_start=e.raw_start,\n raw_end=e.raw_end,\n ),\n )\n for e in recognition.entities\n ]\n\n if query.custom_entities:\n # Copy user-defined entities\n for entity_name, entity_value in query.custom_entities.items():\n slots.append(\n Slot(\n entity=entity_name,\n confidence=1.0,\n value={\"value\": entity_value},\n )\n )\n\n # intentParsed\n yield NluIntentParsed(\n input=recognition.text,\n id=query.id,\n site_id=query.site_id,\n session_id=query.session_id,\n intent=intent,\n slots=slots,\n )\n\n # intent\n yield (\n NluIntent(\n input=recognition.text,\n id=query.id,\n site_id=query.site_id,\n session_id=query.session_id,\n intent=intent,\n slots=slots,\n asr_tokens=[NluIntent.make_asr_tokens(recognition.tokens)],\n asr_confidence=query.asr_confidence,\n raw_input=original_input,\n wakeword_id=query.wakeword_id,\n lang=(query.lang or self.lang),\n custom_data=query.custom_data,\n ),\n {\"intent_name\": recognition.intent.name},\n )\n else:\n # Not recognized\n yield NluIntentNotRecognized(\n input=query.input,\n id=query.id,\n site_id=query.site_id,\n session_id=query.session_id,\n custom_data=query.custom_data,\n )\n except Exception as e:\n _LOGGER.exception(\"handle_query\")\n yield NluError(\n site_id=query.site_id,\n session_id=query.session_id,\n error=str(e),\n context=original_input,\n )\n\n # -------------------------------------------------------------------------\n\n @staticmethod\n def is_success(recognitions: typing.List[Recognition]) -> bool:\n \"\"\"True if recognition succeeded\"\"\"\n if not recognitions:\n return False\n\n recognition = recognitions[0]\n\n if (recognition is None) or (recognition.intent is None):\n return False\n\n return True\n\n # -------------------------------------------------------------------------\n\n async def handle_train(\n self, train: NluTrain, site_id: str = \"default\"\n ) -> typing.AsyncIterable[\n typing.Union[typing.Tuple[NluTrainSuccess, TopicArgs], NluError]\n ]:\n \"\"\"Transform sentences to intent graph\"\"\"\n try:\n _LOGGER.debug(\"Loading %s\", train.graph_path)\n with open(train.graph_path, mode=\"rb\") as graph_file:\n self.intent_graph = rhasspynlu.gzip_pickle_to_graph(graph_file)\n\n yield (NluTrainSuccess(id=train.id), {\"site_id\": site_id})\n except Exception as e:\n _LOGGER.exception(\"handle_train\")\n yield NluError(\n site_id=site_id, session_id=train.id, error=str(e), context=train.id\n )\n\n # -------------------------------------------------------------------------\n\n async def on_message(\n self,\n message: Message,\n site_id: typing.Optional[str] = None,\n session_id: typing.Optional[str] = None,\n topic: typing.Optional[str] = None,\n ) -> GeneratorType:\n \"\"\"Received message from MQTT broker.\"\"\"\n if isinstance(message, NluQuery):\n async for query_result in self.handle_query(message):\n yield query_result\n elif isinstance(message, NluTrain):\n assert site_id, \"Missing site_id\"\n async for train_result in self.handle_train(message, site_id=site_id):\n yield train_result\n else:\n _LOGGER.warning(\"Unexpected message: %s\", message)\n","sub_path":"rhasspynlu_hermes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"514900988","text":"import glob\nfrom PIL import Image\n\nfrom settings import TILE_BLOCK_SIZE, TILE_SIZE\n\n\nclass TileProcessor:\n\tdef __init__(self, tiles_directory):\n\t\tself.tiles_directory = tiles_directory\n\n\tdef __process_tile(self, tile_path):\n\t\ttry:\n\t\t\timg = Image.open(tile_path)\n\t\t\t# tiles must be square, so get the largest square that fits inside the image\n\t\t\tw = img.size[0]\n\t\t\th = img.size[1]\n\t\t\tmin_dimension = min(w, h)\n\t\t\tw_crop = (w - min_dimension) / 2\n\t\t\th_crop = (h - min_dimension) / 2\n\t\n\t\t\timg = img.crop((w_crop, h_crop, w - w_crop, h - h_crop))\n\t\t\t\n\t\t\tlarge_tile_img = img.resize((TILE_SIZE, TILE_SIZE), Image.ANTIALIAS)\n\t\t\tsmall_tile_img = img.resize((int(TILE_SIZE/TILE_BLOCK_SIZE), int(TILE_SIZE/TILE_BLOCK_SIZE)), Image.ANTIALIAS)\n\t\t\t\n\t\t\treturn (large_tile_img.convert('RGB'), small_tile_img.convert('RGB'))\n\t\texcept:\n\t\t\treturn (None, None)\n\n\tdef get_tiles(self):\n\t\tlarge_tiles = []\n\t\tsmall_tiles = []\n\t\ttile_names = glob.glob(self.tiles_directory + '/*.jpg')\n\t\tprint(\"Reading files from directory\", self.tiles_directory);\n\n\t\tfor name in tile_names:\n\t\t\tlarge_tile, small_tile = self.__process_tile(name)\n\t\t\tif large_tile:\n\t\t\t\tlarge_tiles.append(large_tile)\n\t\t\t\tsmall_tiles.append(small_tile)\n\t\t\n\t\tprint ('Processed %s tiles.' % (len(large_tiles),))\n\n\t\treturn (large_tiles, small_tiles)","sub_path":"TileProcessor.py","file_name":"TileProcessor.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"67731547","text":"#\n# @lc app=leetcode id=92 lang=python\n#\n# [92] Reverse Linked List II\n#\n# https://leetcode.com/problems/reverse-linked-list-ii/description/\n#\n# algorithms\n# Medium (34.46%)\n# Total Accepted: 186.2K\n# Total Submissions: 540.2K\n# Testcase Example: '[1,2,3,4,5]\\n2\\n4'\n#\n# Reverse a linked list from position m to n. Do it in one-pass.\n#\n# Note: 1 ≤ m ≤ n ≤ length of list.\n#\n# Example:\n#\n#\n# Input: 1->2->3->4->5->NULL, m = 2, n = 4\n# Output: 1->4->3->2->5->NULL\n#\n#\n#\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def reverseBetween(self, head, m, n):\n \"\"\"\n :type head: ListNode\n :type m: int\n :type n: int\n :rtype: ListNode\n \"\"\"\n margin = n-m\n pre = head_node = ListNode(None)\n head_node.next = head\n\n #pre = head_node\n cnt = 0\n while cnt < m-1:\n pre = pre.next\n head = head.next\n cnt += 1\n while margin > 0:\n # cut the node\n tmp_node = head.next\n head.next = tmp_node.next\n\n # insert\n tmp_node.next = pre.next\n pre.next = tmp_node\n\n margin -= 1\n\n head_node = head_node.next\n #head_node.val = margin\n return head_node\n","sub_path":"92.reverse-linked-list-ii.py","file_name":"92.reverse-linked-list-ii.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"112577415","text":"k = int(input())\ncount = 1\ni = 1\nans = 1\nwhile count < k:\n flag = True\n s = str(i)\n for n in range(len(s) - 1):\n if i >= 10:\n if abs(int(s[n + 1]) - int(s[n])) > 1:\n flag = False\n n = len(str(i)) - 1\n if flag:\n count += 1\n i += 1\nprint(i)\n","sub_path":"ABC/abc161/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"542885144","text":"#!/usr/bin/env python\nimport pygame\nfrom pygame.locals import *\nimport os\nimport sys\nimport codecs\n\nSCR_RECT = Rect(0, 0, 800, 640)\nGS = 32\n\nshow_grid = False # グリッドを表示するか?\n\ndef main():\n pygame.init()\n screen = pygame.display.set_mode(SCR_RECT.size)\n pygame.display.set_caption(\"PyMap 04 選択マップチップの表示\")\n\n # マップチップをロード\n load_mapchips(\"data\", \"mapchip.dat\")\n\n palette = MapchipPalette()\n map = Map(\"NEW\", 64, 64, palette)\n cursor = Cursor(0, 0)\n msg_engine = MessageEngine()\n\n clock = pygame.time.Clock()\n while True:\n clock.tick(60)\n if palette.display_flag: # パレットが表示中なら\n palette.update()\n palette.draw(screen)\n else:\n offset = calc_offset(cursor)\n # 更新\n cursor.update()\n map.update(offset)\n # 描画\n map.draw(screen, offset)\n cursor.draw(screen, offset)\n # 選択マップチップを左上に描画\n screen.blit(Map.images[palette.selected_mapchip], (10,10))\n pygame.draw.rect(screen, (0,255,0), (10,10,32,32), 3)\n # マウスの座標を描画\n px, py = pygame.mouse.get_pos()\n selectx = (px + offset[0]) / GS\n selecty = (py + offset[1]) / GS\n msg_engine.draw_string(screen, (10,56), map.name)\n msg_engine.draw_string(screen, (10,86), u\"%d %d\" % (selectx, selecty))\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == KEYDOWN and event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n elif event.type == KEYDOWN and event.key == K_SPACE:\n # パレットの表示/非表示を切り替え\n palette.display_flag = not palette.display_flag\n elif event.type == KEYDOWN and event.key == K_g:\n # グリッドの表示/非表示を切り替え\n global show_grid # show_gridはグローバル変数\n show_grid = not show_grid\n\nclass Cursor:\n COLOR = (0,255,0) # 緑色\n WIDTH = 3 # 太さ\n def __init__(self, x, y):\n self.x, self.y = x, y\n self.rect = Rect(x*GS, y*GS, GS, GS)\n def update(self):\n # キー入力でカーソルを移動\n pressed_keys = pygame.key.get_pressed()\n if pressed_keys[K_DOWN]:\n self.y += 1\n elif pressed_keys[K_LEFT]:\n self.x -= 1\n elif pressed_keys[K_RIGHT]:\n self.x += 1\n elif pressed_keys[K_UP]:\n self.y -= 1\n self.rect = Rect(self.x*GS, self.y*GS, GS, GS)\n def draw(self, screen, offset):\n # オフセットを考慮してカーソルを描画\n offsetx, offsety = offset\n px = self.rect.topleft[0]\n py = self.rect.topleft[1]\n pygame.draw.rect(screen, self.COLOR, (px-offsetx,py-offsety,GS,GS), self.WIDTH)\n\nclass Map:\n images = []\n def __init__(self, name, row, col, palette):\n self.name = name\n self.row = row\n self.col = col\n self.default = 5 # デフォルトのマップチップ番号\n self.map = [[self.default for c in range(self.col)] for r in range(self.row)]\n self.palette = palette\n def __str__(self):\n return \"%s,%d,%d,%d\" % (self.name, self.row, self.col, self.default)\n def update(self, offset):\n offsetx, offsety = offset\n mouse_pressed = pygame.mouse.get_pressed()\n if mouse_pressed[0]: # 左クリック(マップチップ描画)\n # マウスが返すのはローカル座標\n px, py = pygame.mouse.get_pos()\n # 全体マップ上での座標はoffsetを足せばよい\n # GSで割るのはピクセルをマスに直すため\n selectx = int((px + offsetx) / GS)\n selecty = int((py + offsety) / GS)\n # マップ範囲外だったら無視\n if selectx < 0 or selecty < 0 or selectx > self.col-1 or selecty > self.row-1:\n return\n # パレットで選択中のマップチップでマップを更新\n self.map[selecty][selectx] = self.palette.selected_mapchip\n elif mouse_pressed[2]: # 右クリック(マップチップ抽出)\n px, py = pygame.mouse.get_pos()\n selectx = int((px + offsetx) / GS)\n selecty = int((py + offsety) / GS)\n if selectx < 0 or selecty < 0 or selectx > self.col-1 or selecty > self.row-1:\n return\n self.palette.selected_mapchip = self.map[selecty][selectx]\n def draw(self, screen, offset):\n offsetx, offsety = offset\n # マップの描画範囲を計算\n startx = int(offsetx / GS)\n endx = int(startx + SCR_RECT.width/GS + 2)\n starty = int(offsety / GS)\n endy = int(starty + SCR_RECT.height/GS + 2)\n # マップの描画\n for y in range(starty, endy):\n for x in range(startx, endx):\n # マップの範囲外はマップチップ番号0で描画\n if x < 0 or y < 0 or x > self.col-1 or y > self.row-1:\n screen.blit(self.images[0], (x*GS-offsetx,y*GS-offsety))\n else:\n screen.blit(self.images[self.map[y][x]], (x*GS-offsetx,y*GS-offsety))\n if show_grid:\n pygame.draw.rect(screen, (0,0,0), (x*GS-offsetx,y*GS-offsety,GS,GS), 1)\n\nclass MapchipPalette:\n \"\"\"マップチップパレット\"\"\"\n ROW = 20 # パレットの行数\n COL = 25 # パレットの列数\n COLOR = (0,255,0) # 緑\n WIDTH = 3 # カーソルの太さ\n def __init__(self):\n self.display_flag = False # Trueのときパレット表示\n self.selected_mapchip = 3 # 選択しているマップチップ番号\n def update(self):\n # マップチップパレットの選択\n mouse_pressed = pygame.mouse.get_pressed()\n if mouse_pressed[0]: # 左クリック\n # マウス座標を取得\n mouse_pos = pygame.mouse.get_pos()\n # マス座標に変換\n x = mouse_pos[0] / GS\n y = mouse_pos[1] / GS\n # マップチップ番号に変換\n n = int(y * self.COL + x)\n if n < len(Map.images) and Map.images[n] != None:\n self.selected_mapchip = n\n self.display_flag = False # パレットを消す\n # パレットが消えた直後にマップチップを描画してしまうのを防ぐ\n pygame.time.wait(500)\n def draw(self, screen):\n # パレットを描画\n for i in range(self.ROW * self.COL):\n x = (i % self.COL) * GS\n y = (i // self.COL) * GS\n image = Map.images[0]\n try:\n if Map.images[i] != None:\n image = Map.images[i]\n except IndexError: # イメージが登録されてないとき\n image = Map.images[0]\n screen.blit(image, (x,y))\n # マウスの位置にカーソルを描画\n mouse_pos = pygame.mouse.get_pos()\n x = mouse_pos[0] / GS\n y = mouse_pos[1] / GS\n pygame.draw.rect(screen, self.COLOR, (x*GS,y*GS,GS,GS), self.WIDTH)\n\ndef load_image(dir, file, colorkey=None):\n file = os.path.join(dir, file)\n try:\n image = pygame.image.load(file)\n except pygame.error as message:\n print(\"Cannot load image:\", file)\n raise SystemExit(message)\n image = image.convert()\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0,0))\n image.set_colorkey(colorkey, RLEACCEL)\n return image\n\ndef calc_offset(cursor):\n \"\"\"cursorを中心としてオフセットを計算する\"\"\"\n offsetx = cursor.rect.topleft[0] - SCR_RECT.width/2\n offsety = cursor.rect.topleft[1] - SCR_RECT.height/2\n return offsetx, offsety\n\ndef load_mapchips(dir, file):\n \"\"\"マップチップをロードしてMap.imagesに格納\"\"\"\n file = os.path.join(dir, file)\n fp = open(file, \"r\")\n for line in fp:\n line = line.rstrip() # 改行除去\n data = line.split(\",\") # カンマで分解\n id = int(data[0]) # マップチップID\n name = data[1] # マップチップ名\n movable = int(data[2]) # 移動可能か?(エディタでは未使用)\n Map.images.append(load_image(\"mapchip\", \"%s.png\" % name))\n fp.close()\n\nclass MessageEngine:\n FONT_WIDTH = 16\n FONT_HEIGHT = 22\n WHITE, RED, GREEN, BLUE = 0, 160, 320, 480\n def __init__(self):\n self.image = load_image(\"data\", \"font.png\", -1)\n self.color = self.WHITE\n self.kana2rect = {}\n self.create_hash()\n def set_color(self, color):\n \"\"\"文字色をセット\"\"\"\n self.color = color\n # 変な値だったらWHITEにする\n if not self.color in [self.WHITE,self.RED,self.GREEN,self.BLUE]:\n self.color = self.WHITE\n def draw_character(self, screen, pos, ch):\n \"\"\"1文字だけ描画する\"\"\"\n x, y = pos\n try:\n rect = self.kana2rect[ch]\n screen.blit(self.image, (x,y), (rect.x+self.color,rect.y,rect.width,rect.height))\n except KeyError:\n print(\"描画できない文字があります:%s\" % ch)\n return\n def draw_string(self, screen, pos, str):\n \"\"\"文字列を描画\"\"\"\n x, y = pos\n for i, ch in enumerate(str):\n dx = x + self.FONT_WIDTH * i\n self.draw_character(screen, (dx,y), ch)\n def create_hash(self):\n \"\"\"文字から座標への辞書を作成\"\"\"\n filepath = os.path.join(\"data\", \"kana2rect.dat\")\n fp = codecs.open(filepath, \"r\", \"utf-8\")\n for line in fp.readlines():\n line = line.rstrip()\n d = line.split(\" \")\n kana, x, y, w, h = d[0], int(d[1]), int(d[2]), int(d[3]), int(d[4])\n self.kana2rect[kana] = Rect(x, y, w, h)\n fp.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pymap/pymap04/pymap04.py","file_name":"pymap04.py","file_ext":"py","file_size_in_byte":10290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"194798100","text":"# import matplotlib as mpl\n# mpl.use('Agg')\n\nimport numpy as np\nimport matplotlib.pyplot as plt;\nimport random\nfrom scipy import stats\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\ntf.logging.set_verbosity(tf.logging.ERROR)\nimport tensorflow_probability as tfp\n\ntfd = tfp.distributions\ntfb = tfp.bijectors\n\nprint(20*'=~')\nsess = tf.Session(config=tf.ConfigProto(log_device_placement=False))\nprint(20*'=~')\n\ndef ReadGalaxPy(path_program = '../../Data/fromGalaxev/photozs/datasets/', sim_obs_combine = True):\n import os\n import sys\n import glob\n from astropy.table import Table\n\n # path_program = '../../Data/fromGalaxev/photozs/datasets/'\n\n\n class Curated_sample():\n ''' Class to store the redshift and colors of observed galaxies,\n and the redshift, Mpeak, colors, and \"weights\" of simulated\n galaxies whose colors are compatible with those of observed\n galaxies.\n\n The observed sample include galaxies from SDSS\n (SDSS+BOSS+eBOSS), DEEP2, and VIPERS.\n\n The simulated sample was created by sampling the parameter of\n GALAXPY using a LH.\n\n The weights of simulated galaxies are related to the number\n density of observed galaxies in the same region of the color\n space.\n\n You only have to care about the method load_structure. '''\n\n def __init__(self):\n self.arr_c = []\n self.arr_z = []\n self.arr_m = []\n self.arr_w = []\n\n def append(self, c, z, m, w):\n self.arr_c.append(c)\n self.arr_z.append(z)\n self.arr_m.append(m)\n self.arr_w.append(w)\n\n def ndarray(self):\n self.arr_c = np.concatenate(self.arr_c)\n self.arr_z = np.concatenate(self.arr_z)\n self.arr_m = np.concatenate(self.arr_m)\n self.arr_w = np.concatenate(self.arr_w)\n\n def save_struct(self, name):\n np.save(name + 'c.npy', self.arr_c)\n np.save(name + 'z.npy', self.arr_z)\n np.save(name + 'm.npy', self.arr_m)\n np.save(name + 'w.npy', self.arr_w)\n\n def load_struct(self, name):\n self.arr_c = np.load(name + 'c.npy')\n self.arr_z = np.load(name + 'z.npy')\n self.arr_m = np.load(name + 'm.npy')\n self.arr_w = np.load(name + 'w.npy')\n\n def duplicate_data(self, zrange):\n aa = np.where((self.arr_w > 50)\n & (self.arr_z >= zrange[0])\n & (self.arr_z < zrange[1]))[0]\n print(aa.shape)\n cc = np.repeat(aa, self.arr_w[aa].astype(int))\n self.arr_cn = self.arr_c[cc, :]\n self.arr_zn = self.arr_z[cc]\n self.arr_mn = self.arr_m[cc]\n\n\n def read_curated_data():\n run_path = path_program + 'runs/run_z3/'\n\n sim_q = Curated_sample() # simulated colors quenched galaxies\n sim_s = Curated_sample() # simulated colors star-forming galaxies\n obs_q = Curated_sample() # observed colors quenched galaxies\n obs_s = Curated_sample() # observed colors star-forming galaxies\n\n obs_q.load_struct(run_path + 'str_obs_q')\n obs_s.load_struct(run_path + 'str_obs_s')\n sim_q.load_struct(run_path + 'str_sim_q')\n sim_s.load_struct(run_path + 'str_sim_s')\n\n print(sim_q.arr_c.shape)\n print(sim_s.arr_c.shape)\n print(obs_q.arr_c.shape)\n print(obs_s.arr_c.shape)\n\n return sim_q, sim_s, obs_q, obs_s\n\n\n sim_q, sim_s, obs_q, obs_s = read_curated_data()\n\n if sim_obs_combine:\n train_datafile = 'GalaxPy'\n\n # 2.0 ####### TRAIN USING SIMULATION, TEST OBSERVATION ####\n\n Trainfiles = np.append(sim_q.arr_c, sim_s.arr_c, axis=0)\n TrainZ = np.append(sim_q.arr_z, sim_s.arr_z, axis=0)\n\n Trainfiles = np.delete(Trainfiles, (4), axis=1) ## deleting z-Y\n\n Testfiles = np.append(obs_q.arr_c, obs_s.arr_c, axis=0)\n TestZ = np.append(obs_q.arr_z, obs_s.arr_z, axis=0)\n\n TrainshuffleOrder = np.arange(Trainfiles.shape[0])\n np.random.shuffle(TrainshuffleOrder)\n\n Trainfiles = Trainfiles[TrainshuffleOrder]\n TrainZ = TrainZ[TrainshuffleOrder]\n\n TestshuffleOrder = np.arange(Testfiles.shape[0])\n np.random.shuffle(TestshuffleOrder)\n\n Testfiles = Testfiles[TestshuffleOrder]\n TestZ = TestZ[TestshuffleOrder]\n\n X_train = Trainfiles[:num_train] # color mag\n X_test = Trainfiles[:num_test] # color mag\n\n y_train = TrainZ[:num_train] # spec z\n y_test = TrainZ[:num_test] # spec z\n\n else:\n train_datafile = 'SDSS'\n # 1.1 ####### SIMULATED: QUENCHED ONLY ############\n # Trainfiles = sim_q.arr_c\n # TrainZ = sim_q.arr_z\n\n # 1.2 ### SIMULATED: QUENCHED + STAR FORMATION ####\n\n # Trainfiles =np.append( sim_q.arr_c, sim_s.arr_c, axis = 0)\n # TrainZ = np.append( sim_q.arr_z, sim_s.arr_z, axis = 0)\n\n # 1.3 ####### OBSERVED: QUENCHED + STAR FORMATION ####\n\n Trainfiles = np.append(obs_q.arr_c, obs_s.arr_c, axis=0)\n TrainZ = np.append(obs_q.arr_z, obs_s.arr_z, axis=0)\n\n TrainshuffleOrder = np.arange(Trainfiles.shape[0])\n np.random.shuffle(TrainshuffleOrder)\n\n Trainfiles = Trainfiles[TrainshuffleOrder]\n TrainZ = TrainZ[TrainshuffleOrder]\n\n # 1 #################################\n\n X_train = Trainfiles[:num_train] # color mag\n X_test = Trainfiles[num_train + 1: num_train + num_test] # color mag\n\n X_train = Trainfiles[:num_train] # color mag\n X_test = Trainfiles[num_train + 1: num_train + num_test] # color mag\n\n y_train = TrainZ[:num_train] # spec z\n y_test = TrainZ[num_train + 1: num_train + num_test] # spec z\n\n ############## THINGS ARE SAME AFTER THIS ###########\n\n ## rescaling xmax/xmin\n xmax = np.max([np.max(X_train, axis=0), np.max(X_test, axis=0)], axis=0)\n xmin = np.min([np.min(X_train, axis=0), np.min(X_test, axis=0)], axis=0)\n\n X_train = (X_train - xmin) / (xmax - xmin)\n X_test = (X_test - xmin) / (xmax - xmin)\n\n #### RESCALING X_train, X_test NOT done yet -- (g-i), (r-i) ... and i mag -->> Color/Mag issue\n\n ymax = np.max([y_train.max(), y_test.max()])\n ymin = np.min([y_train.min(), y_test.min()])\n\n y_train = (y_train - ymin) / (ymax - ymin)\n y_test = (y_test - ymin) / (ymax - ymin)\n\n return X_train, y_train, X_test, y_test, ymax, ymin, xmax, xmin\n\ndef evaluate(tensors):\n \"\"\"Evaluates Tensor or EagerTensor to Numpy `ndarray`s.\n Args:\n tensors: Object of `Tensor` or EagerTensor`s; can be `list`, `tuple`,\n `namedtuple` or combinations thereof.\n\n Returns:\n ndarrays: Object with same structure as `tensors` except with `Tensor` or\n `EagerTensor`s replaced by Numpy `ndarray`s.\n \"\"\"\n if tf.executing_eagerly():\n return tf.contrib.framework.nest.pack_sequence_as(\n tensors,\n [t.numpy() if tf.contrib.framework.is_tensor(t) else t\n for t in tf.contrib.framework.nest.flatten(tensors)])\n return sess.run(tensors)\n\ndef plot_normal_mix(pis, mus, sigmas, ax, label='', comp=True):\n \"\"\"Plots the mixture of Normal models to axis=ax comp=True plots all\n components of mixture model\n \"\"\"\n # x = np.linspace(-10.5, 10.5, 250)\n x = np.linspace(-0.1, 1.1, 250)\n final = np.zeros_like(x)\n for i, (weight_mix, mu_mix, sigma_mix) in enumerate(zip(pis, mus, sigmas)):\n temp = stats.norm.pdf(x, mu_mix, sigma_mix) * weight_mix\n final = final + temp\n if comp:\n ax.plot(x, temp, label='Normal ' + str(i))\n ax.plot(x, final, label='Mixture of Normals ' + label)\n ax.legend(fontsize=13)\n return final\n\ndef neural_network_mod():\n \"\"\"\n loc, scale, logits = NN(x; theta)\n\n Args:\n X: Input Tensor containing input data for the MDN\n Returns:\n locs: The means of the normal distributions that our data is divided into.\n scales: The scales of the normal distributions that our data is divided\n into.\n logits: The probabilities of ou categorical distribution that decides\n which normal distribution our data points most probably belong to.\n \"\"\"\n X = tf.placeholder(tf.float64,name='X',shape=(None,D))\n # 2 hidden layers with 15 hidden units\n net = tf.layers.dense(X, 32, activation=tf.nn.relu)\n net = tf.layers.dense(net, 16, activation=tf.nn.relu)\n net = tf.layers.dense(net, 8, activation=tf.nn.relu)\n locs = tf.layers.dense(net, K, activation=None)\n scales = tf.layers.dense(net, K, activation=tf.exp)\n logits = tf.layers.dense(net, K, activation=None)\n outdict= {'locs':locs, 'scales':scales, 'logits':logits}\n hub.add_signature(inputs=X,outputs=outdict)\n\n return locs, scales, logits\n\ndef mixture_model(X,Y,learning_rate=1e-3,decay_rate=.95,step=1000,train=True):\n if train:\n dict = neural_network(tf.convert_to_tensor(X),as_dict=True)\n else:\n dict = neural_network_t(tf.convert_to_tensor(X),as_dict=True)\n locs = dict['locs'] ; scales = dict['scales'] ; logits = dict['logits']\n cat = tfd.Categorical(logits=logits)\n components = [tfd.Normal(loc=loc, scale=scale) for loc, scale\n in zip(tf.unstack(tf.transpose(locs)),\n tf.unstack(tf.transpose(scales)))]\n\n y = tfd.Mixture(cat=cat, components=components)\n #define loss function\n\n with tf.name_scope(\"loss\"):\n\n log_likelihood = y.log_prob(Y)\n # log_likelihood = -tf.reduce_sum(log_likelihood/(1. + y_train)**2 )\n y_mean = np.median(Y)\n log_likelihood = -tf.reduce_sum(log_likelihood)\n #log_likelihood = -tf.reduce_sum(log_likelihood*(y_mean-y_train)**4 )\n\n tf.summary.scalar('loglike', log_likelihood)\n\n # with tf.name_scope(\"loss1\"):\n # log_likelihood1 = log_likelihood/(1. + y_train)**2\n # tf.summary.scalar('loglike_rescaled', log_likelihood1)\n\n if train:\n global_step = tf.Variable(0, trainable=False)\n decayed_lr = tf.train.exponential_decay(learning_rate,\n global_step, step,\n decay_rate, staircase=True)\n optimizer = tf.train.AdamOptimizer(decayed_lr)\n\n\n with tf.name_scope(\"train\"):\n train_op = optimizer.minimize(log_likelihood)\n\n summary_op = tf.summary.merge_all()\n\n evaluate(tf.global_variables_initializer())\n return log_likelihood, train_op, logits, locs, scales, summary_op\n else:\n evaluate(tf.global_variables_initializer())\n return log_likelihood, logits, locs, scales\n\n# def train(log_likelihood,train_op,n_epoch):\n# train_loss = np.zeros(n_epoch)\n# test_loss = np.zeros(n_epoch)\n# for i in range(n_epoch):\n# _, loss_value = evaluate([train_op, log_likelihood])\n# # summary, loss_value = evaluate([train_op, log_likelihood])\n#\n# train_loss[i] = loss_value\n# # writer.add_summary(summary, i)\n# plt.plot(np.arange(n_epoch), -train_loss / len(X_train), label='Train Loss')\n# # plt.savefig('../Plots/T_loss_function.pdf')\n# return train_loss\n\ndef train(log_likelihood, train_op, summary_op,n_epoch):\n\n logs_path = \"./log_dir\"\n writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())\n\n\n train_loss = np.zeros(n_epoch)\n test_loss = np.zeros(n_epoch)\n for i in range(n_epoch):\n # _, loss_value = evaluate([train_op, log_likelihood])\n _, summary = evaluate([train_op, summary_op])\n\n writer.add_summary(summary, n_epoch)\n\n # train_loss[i] = loss_value\n # plt.plot(np.arange(n_epoch), -train_loss / len(X_train), label='Train Loss')\n # plt.savefig('../Plots/T_loss_function.pdf')\n # return train_loss\n return summary\n\n\ndef get_predictions(logits,locs,scales):\n pred_weights, pred_means, pred_std = evaluate([tf.nn.softmax(logits), locs, scales])\n return pred_weights, pred_means, pred_std\n\ndef plot_pdfs(pred_means,pred_weights,pred_std,num=6,train=True):\n if train:\n obj = [random.randint(0,num_train-1) for x in range(num)]\n else:\n obj = [random.randint(0,num_test-1) for x in range(num)]\n #obj = [93, 402, 120,789,231,4,985]\n print(obj)\n fig, axes = plt.subplots(nrows=num, ncols=1, sharex = True, figsize=(8, 7))\n allfs = []\n for i in range(len(obj)):\n fs = plot_normal_mix(pred_weights[obj][i], pred_means[obj][i],\n pred_std[obj][i], axes[i], comp=False)\n allfs.append(fs)\n axes[i].axvline(x=y_train[obj][i], color='black', alpha=0.5)\n axes[i].text(0.3, 4.0, 'ID: ' +str(obj[i]), horizontalalignment='center',\n verticalalignment='center')\n\n plt.xlabel(r' rescaled[$z_{pred}]$', fontsize = 19)\n # plt.savefig('../Plots/T_pdfs.pdf')\n plt.show()\n\ndef plot_pred_mean(pred_means,pred_weights,pred_std,ymax,ymin,y_train,select='no'):\n y_pred = np.sum(pred_means*pred_weights, axis = 1)\n y_pred_std = np.sum(pred_std*pred_weights, axis = 1)\n\n plt.figure(22, figsize=(9,8))\n\n #ymax=1\n #ymin=0\n # if select == 'yes':\n # y_pred = y_pred[obj]\n # y_train = y_train[obj]\n # y_pred_std = y_pred_std[obj]\n\n # plt.scatter(y_test, y_pred, facecolors='k', s = 1)\n\n plt.errorbar( (ymax - ymin)*(y_train)+ymin, (ymax - ymin)*(y_pred)+ymin, yerr= (ymax - ymin)*(y_pred_std), fmt='bo', ecolor='r', ms = 2, alpha = 0.1)\n\n #switched\n #plt.errorbar( (ymax - ymin)*(y_pred)+ymin, (ymax - ymin)*(y_train)+ymin, yerr= (ymax - ymin)*(y_pred_std), fmt='bo', ecolor='r', ms = 2, alpha = 0.1)\n\n #plt.text(0.2, 0.9, train_datafile + ' trained', horizontalalignment='center', verticalalignment='center')\n plt.plot((ymax - ymin)*(y_train)+ymin, (ymax - ymin)*( y_train)+ymin, 'k')\n\n plt.ylabel(r'$z_{pred}$', fontsize = 19)\n plt.xlabel(r'$z_{true}$', fontsize = 19)\n #plt.xlim([0,1])\n #plt.ylim([0,1])\n plt.title('weight x mean')\n plt.tight_layout()\n # plt.savefig('../Plots/T_pred_mean.pdf')\n plt.show()\n\ndef plot_pred_peak(pred_means,pred_weights,pred_std,ymax,ymin,y_train,select='no'):\n def peak(weight,sigma):\n return weight/np.sqrt(2*np.pi*sigma**2)\n\n peak_max = np.argmax(peak(pred_weights,pred_std),axis=1)\n y_pred = np.array([pred_means[i,peak_max[i]] for i in range(len(y_train))])\n y_pred_std = np.array([pred_std[i,peak_max[i]] for i in range(len(y_train))])\n plt.figure(24, figsize=(9, 8))\n # if select == 'yes':\n # y_pred = y_pred[obj]\n # y_train = y_train[obj]\n # y_pred_std = y_pred_std[obj]\n # plt.scatter(y_test, y_pred, facecolors='k', s = 1)\n plt.errorbar((ymax - ymin)*(y_train)+ymin, (ymax - ymin)*(y_pred)+ymin, yerr= (ymax - ymin)*(\n y_pred_std), fmt='bo', ecolor='r', ms = 2, alpha = 0.1)\n #plt.text(0.2, 0.9, train_datafile + ' trained', horizontalalignment='center', verticalalignment='center')\n plt.plot((ymax - ymin)*(y_test)+ymin, (ymax - ymin)*(y_test)+ymin, 'k')\n plt.ylabel(r'$z_{pred}$', fontsize = 19)\n plt.xlabel(r'$z_{true}$', fontsize = 19)\n #plt.xlim([0,1])\n #plt.ylim([0,1])\n plt.title('highest peak')\n plt.tight_layout()\n plt.show()\n\ndef plot_pred_weight(pred_means,pred_weights,pred_std,ymax,ymin,y_train,select='no'):\n weight_max = np.argmax(pred_weights, axis = 1) ## argmax or max???\n\n y_pred = np.array([pred_means[i,weight_max[i]] for i in range(len(y_train))])\n y_pred_std = np.array([pred_std[i,weight_max[i]] for i in range(len(y_train))])\n\n plt.figure(29, figsize=(9, 8))\n # if select == 'yes':\n # y_pred = y_pred[obj]\n # y_train = y_train[obj]\n # y_pred_std = y_pred_std[obj]\n\n # plt.scatter(y_test, y_pred, facecolors='k', s = 1)\n plt.errorbar((ymax - ymin)*(y_train)+ymin, (ymax - ymin)*(y_pred)+ymin, yerr= (ymax - ymin)*(\n y_pred_std), fmt='bo', ecolor='r', ms = 2, alpha = 0.1)\n\n #plt.text(0.2, 0.9, train_datafile + ' trained', horizontalalignment='center', verticalalignment='center')\n plt.plot((ymax - ymin)*(y_test)+ymin, (ymax - ymin)*(y_test)+ymin, 'k')\n plt.ylabel(r'$z_{pred}$', fontsize = 19)\n plt.xlabel(r'$z_{true}$', fontsize = 19)\n #plt.xlim([0,1])\n #plt.ylim([0,1])\n plt.title('highest weight')\n plt.tight_layout()\n plt.show()\n\ndef per_stats(pred_means,pred_weights,pred_std,ymax,ymin,y_train):\n y_pred = np.sum(pred_means*pred_weights, axis = 1)\n y_pred_std = np.sum(pred_std*pred_weights, axis = 1)\n y_pred = (ymax - ymin)*(y_pred)+ymin\n y_pred_std = (ymax - ymin)*(y_pred_std)\n y_train = (ymax - ymin)*(y_train)+ymin\n diff = y_pred-y_train\n mean_diff = np.mean(diff)\n med_diff = np.median(diff)\n std_diff = np.std(diff)\n mean_sigma = np.mean(y_pred_std)\n med_sigma = np.median(y_pred_std)\n std_sigma = np.std(y_pred_std)\n return mean_diff, med_diff, std_diff, mean_sigma, med_sigma, std_sigma\n\ndef testing(X_test,y_test):\n\n log_likelihood, logits, locs, scales = mixture_model(X_test,y_test,train=False)\n #_, loss_value = evaluate([train_op, log_likelihood])\n pred_weights, pred_means, pred_std = get_predictions(logits,locs,scales)\n return pred_weights, pred_means, pred_std\n\ndef plot_cum_sigma(pred_weights,pred_std,ymax,ymin):\n #y_pred_std = np.sum(pred_std*pred_weights, axis = 1)\n\n weight_max = np.argmax(pred_weights, axis = 1) ## argmax or max???\n y_pred_std = np.array([pred_std[i,weight_max[i]] for i in range(len(pred_weights[0]))])\n y_pred_std = (ymax - ymin)*(y_pred_std)\n plt.figure(222)\n plt.hist(y_pred_std,100, density=True, histtype='step',\n cumulative=True,color='k')\n plt.xlabel('Sigma')\n plt.show()\n\n\n\nn_epochs = 3000\nD = 5\nK = 3\nlearning_rate = 5e-3\ndecay_rate= 0.0\nstep=100\nnum_train = 12000\nnum_test = 5000\nsave_mod = 'hub_mod_lr_334'+str(learning_rate)+'_dr'+str(decay_rate)+'_step'+str(step)+'_ne'+str(n_epochs)+'_k'+str(K)+'_nt'+str(num_train)\n\n\n\n############training\n\nX_train, y_train, X_test, y_test, ymax, ymin, xmax, xmin = ReadGalaxPy(path_program = '../../Data/fromGalaxev/photozs/datasets/', sim_obs_combine = True)\n\nprint(\"Size of features in training data: {}\".format(X_train.shape))\nprint(\"Size of output in training data: {}\".format(y_train.shape))\nprint(\"Size of features in test data: {}\".format(X_test.shape))\nprint(\"Size of output in test data: {}\".format(y_test.shape))\n\n\n\nnet_spec = hub.create_module_spec(neural_network_mod)\nneural_network = hub.Module(net_spec,name='neural_network',trainable=True)\nlog_likelihood, train_op, logits, locs, scales, summary_op = mixture_model(X_train,y_train,learning_rate=learning_rate,decay_rate=decay_rate)\n# train_loss = train(log_likelihood,train_op,n_epochs)\ntrain(log_likelihood, train_op, summary_op,n_epochs)\n\nprint ('lalalala')\n\n\n#save network\nneural_network.export(save_mod,sess)\npred_weights, pred_means, pred_std = get_predictions(logits, locs, scales)\nprint(pred_means)\n\nplot_pdfs(pred_means,pred_weights,pred_std)\nplot_pred_mean(pred_means,pred_weights,pred_std,ymax,ymin,y_train)\nmean_diff, med_diff, std_diff, mean_sigma, med_sigma, std_sigma = per_stats(pred_means,pred_weights,pred_std,ymax,ymin,y_train)\nplot_cum_sigma(pred_weights,pred_std,ymax,ymin)\nplot_pred_peak(pred_means,pred_weights,pred_std,ymax,ymin,y_train)\nplot_pred_weight(pred_means,pred_weights,pred_std,ymax,ymin,y_train)\n\n#load network\nneural_network_t = hub.Module(save_mod)\n##testing\n\n\ntest_weights, test_means, test_std = testing(X_test,y_test)\nplot_pdfs(test_means,test_weights,test_std,train=False)\nplot_pred_mean(test_means,test_weights,test_std,ymax,ymin,y_test)\nplot_cum_sigma(test_weights,test_std,ymax,ymin)\ntest_mean_diff, test_med_diff, test_std_diff, test_mean_sigma, test_med_sigma, test_std_sigma = per_stats(test_means,test_weights,test_std,ymax,ymin,y_test)\nplot_pred_peak(test_means,test_weights,test_std,ymax,ymin,y_test)\nplot_pred_weight(test_means,test_weights,test_std,ymax,ymin,y_test)\n\n","sub_path":"mdn_hub_board.py","file_name":"mdn_hub_board.py","file_ext":"py","file_size_in_byte":20000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"294640402","text":"#!/usr/bin/python\n\nfrom ansible.errors import AnsibleFilterError\n\nclass FilterModule(object):\n def filters(self):\n return {\n 'ctrl_data_intf_dict': self.ctrl_data_intf_dict\n }\n\n def ctrl_data_intf_dict(self, instances, contrail_config, kolla_config):\n host_intf = {}\n kolla_globals = kolla_config.get('kolla_globals', {})\n for k,v in instances.iteritems():\n tmp_intf = contrail_config.get('PHYSICAL_INTERFACE', \\\n kolla_globals.get('network_interface', None))\n if tmp_intf != None:\n host_intf[v['ip']] = tmp_intf\n\n for i,j in v.get('roles', {}).iteritems():\n if j is not None:\n tmp_intf = j.get('PHYSICAL_INTERFACE', \\\n j.get('network_interface', None))\n if tmp_intf != None:\n host_intf[v['ip']] = tmp_intf\n\n return host_intf\n","sub_path":"playbooks/roles/configure_instances/filter_plugins/ctrl_data_intf_dict.py","file_name":"ctrl_data_intf_dict.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"534148526","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n__author__ = 'ghost'\n\nfrom app.libs import router\nfrom app.helper import BaseRequestHandler\n\n@router.Route('/')\nclass Index(BaseRequestHandler):\n\n def get(self, *args, **kwargs):\n self.finish('It works')","sub_path":"app/views/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"264878345","text":"#!/usr/bin/python3\n\n# get tasi with degradome\n\nimport sys\n\ndef getseq(raw):\n\tseq = raw.split(\"(\")[0]\n\treturn seq\n\nf = open(sys.argv[1], \"r\")\nf.readline()\nf.readline()\nstate = 1\ntasi = dict()\nfor i in f:\n\tif i == \"\\n\":\n\t\tif state == 1: state = 2\n\t\telse: state = 1\n\telse:\n\t\tif state == 1:\n\t\t\tidx = i.rstrip()\n\t\t\ttasi[idx] = list()\n\t\tif state == 2:\n\t\t\ttasi[idx].append(i.rstrip())\nf.close()\n\nf = open(sys.argv[2], \"r\")\nhasdegr = dict()\nfor i in f:\n\thasdegr[i.rstrip()] = 1\nf.close()\n\nprint(\"List of phased sRNAs in each of the predicted loci\\n\")\nfor i in tasi.keys():\n\tsub = i.split(\",\")[0].split(\" \")[0]\n\tif sub in hasdegr:\n\t\tprint(i)\n\t\tprint()\n\t\tfor phase in tasi[i]:\n\t\t\tprint(phase)\n\t\tprint()\n\n","sub_path":"tasidegr.py","file_name":"tasidegr.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"60364040","text":"# -*- coding:utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nfrom multiprocessing import Pool\n\nfrom Utils.DB_config import ConfigQuant\nfrom Utils.DBOperation import readDB, writeDB, checkIfIncre, deleteObsoleteDataFromDB\nfrom Utils.Algorithms import getFinancialReportAccountYOY, getFinancialReportAccountTTM, getStockCashDividend, expandDataFromSeasonToDaily\n\n\nclass StockFundamentalTushareMapping:\n def __init__(self, **kwargs):\n self.sourceTableName = kwargs.get(\"sourceTableName\")\n self.calendarTableName = kwargs.get('calendarTableName')\n self.codeField = kwargs.get(\"codeField\")\n self.dateField = kwargs.get('dateField')\n self.rawFields = kwargs.get('rawFields')\n self.yearField = kwargs.get('yearField')\n self.seasonField = kwargs.get('seasonField')\n self.releaseDateField = kwargs.get('releaseDateField')\n self.splitDividendField = kwargs.get('splitDividendField')\n self.valueForYOYFields = kwargs.get('valueForYOYFields')\n self.valueForTTMFields = kwargs.get('valueForTTMFields')\n self.originalYOYFields = kwargs.get('originalYOYFields')\n self.ratioFields = kwargs.get('ratioFields')\n self.timeStampField = kwargs.get('timeStampField')\n # self.lagYearNum = kwargs.get('lagYearNum')\n self.targetTableName = kwargs.get('targetTableName')\n self.condition = kwargs.get('condition')\n self.chunkSize = kwargs.get('chunkSize')\n self.isMultiProcess = kwargs.get('isMultiProcess')\n self.processNum = kwargs.get('processNum')\n self.reportPeriodField = 'REPORT_PERIOD'\n self.latestTimeStamp = 'latest_time_stamp'\n self.state = ''\n self.if_exist = ''\n self.last_update_date = ''\n\n # prepare sql statement (全量 or 增量)\n def prepareData(self, startDate='2007-01-01'):\n # check if target table exist\n is_full, last_record_date, start_fetch_date = checkIfIncre(ConfigQuant, self.sourceTableName,\n self.targetTableName, self.timeStampField, [252], self.condition)\n\n # sql statement\n # tmp_field = list(map(lambda x: \"`%s`\" % x, tmp_field))\n # fields = \",\".join(tmp_field)\n season_end_date = {\n 1: '03-31',\n 2: '06-30',\n 3: '09-30',\n 4: '12-31'\n }\n today_dt = datetime.today()\n today_str = datetime.strftime(today_dt, '%Y-%m-%d')\n current_year = today_dt.year\n if today_str < '%d-%s' % (current_year, season_end_date[1]):\n current_season = 4\n else:\n for tmp_season in range(1,4):\n if (today_str >= '%d-%s' % (current_year, season_end_date[tmp_season])) and (today_str < '%d-%s' % (current_year, season_end_date[tmp_season + 1])):\n current_season = tmp_season\n break\n current_period = current_year * 100 + current_season\n last_year_period = (current_year - 2) * 100 + current_season # load 2 years' old data in order to calculate ttm and yoy (last year's yoy need the data of 2 years ago), and might have nan filled by new downloads\n if is_full == 1: # 全量\n self.state = \"SELECT * FROM %s where \" % self.sourceTableName\n if self.condition != '':\n self.state = self.state + self.condition + ' and ' # add search condition\n self.if_exist = 'replace'\n # elif is_full == 0: # 增量\n else: # even if source do not have new data, still update target\n self.last_update_date = datetime.strftime(last_record_date, '%Y-%m-%d')\n self.state = \"SELECT * FROM %s where (`%s` * 100 + `%s` >= '%d') and (`%s` * 100 + `%s` <= '%d') \" % (\n self.sourceTableName, self.yearField, self.seasonField, last_year_period, self.yearField, self.seasonField, current_period)\n if self.condition != '':\n self.state = self.state + ' and ' + self.condition # add search condition\n self.if_exist = 'append'\n # else: # 不需要跑\n # self.state = ''\n\n def run(self):\n self.prepareData()\n if self.state == '': # already the latest data\n return\n elif self.last_update_date != '':\n self.runIncrm()\n else:\n self.runFull()\n\n def runFull(self):\n # get total code list\n tmp_state = 'select distinct %s from %s' % (self.codeField, self.sourceTableName)\n code_list = readDB(tmp_state, ConfigQuant).values\n code_list = code_list.T[0]\n\n # get trade date list from calendar\n tmp_state = 'select `date` from %s' % self.calendarTableName\n tradedates = readDB(tmp_state, ConfigQuant)['date']\n\n # calculate num of loop\n loop_num = int(code_list.size / self.chunkSize)\n if code_list.size > loop_num * self.chunkSize:\n loop_num = loop_num + 1\n\n if self.isMultiProcess: # use multi processing\n # register pool\n pool = Pool(processes=self.processNum)\n # fetch and process data from sql by chunk\n for i in range(loop_num):\n tmp_code = code_list[i * self.chunkSize:(i + 1) * self.chunkSize]\n tmp_code_str = list(map(lambda x: \"'%s'\" % x, tmp_code))\n tmp_range = ','.join(tmp_code_str)\n tmp_state = self.state + \"`%s` in (%s)\" % (self.codeField, tmp_range)\n dataO = readDB(tmp_state, ConfigQuant)\n dataO.loc[:, self.reportPeriodField] = dataO[self.yearField] * 100 + dataO[self.seasonField] # combine report year and season\n dataO = dataO.drop_duplicates([self.codeField, self.reportPeriodField])\n dataO = dataO.sort_values(self.reportPeriodField) # sort by report period\n\n # process chunk data\n pool_results = []\n for code in tmp_code:\n tmp_data = dataO.loc[dataO[self.codeField] == code] # dataO already sorted by date\n\n if tmp_data.empty:\n continue\n\n # multiprocessing\n tmp_procs = pool.apply_async(self.coreComputation, (code, tmp_data, tradedates))\n pool_results.append(tmp_procs)\n\n # get result from the process pool\n data_tot_result = pd.DataFrame([])\n for tmp_procs in pool_results:\n data_result = tmp_procs.get()\n data_tot_result = data_tot_result.append(data_result)\n\n # add timestamp\n data_tot_result['time_stamp'] = datetime.now()\n\n if data_tot_result.empty:\n continue\n\n # dump chunk data into sql\n writeDB(self.targetTableName, data_tot_result, ConfigQuant, self.if_exist)\n self.if_exist = 'append'\n\n pool.close()\n else: # not multiprocess\n # fetch and process data from sql by chunk\n for i in range(loop_num):\n tmp_code = code_list[i*self.chunkSize:(i+1)*self.chunkSize]\n tmp_code_str = list(map(lambda x:\"'%s'\"%x, tmp_code))\n tmp_range = ','.join(tmp_code_str)\n tmp_state = self.state + \"`%s` in (%s)\" % (self.codeField, tmp_range)\n dataO = readDB(tmp_state, ConfigQuant)\n dataO.loc[:, self.reportPeriodField] = dataO[self.yearField] * 100 + dataO[\n self.seasonField] # combine report year and season\n dataO = dataO.drop_duplicates([self.codeField, self.reportPeriodField])\n # dataO = dataO.sort_values(self.reportPeriodField) # sort by report period\n\n data_tot_result = pd.DataFrame([])\n for code in tmp_code:\n tmp_data = dataO.loc[dataO[self.codeField] == code] # dataO already sorted by date\n tmp_data = tmp_data.sort_values(self.reportPeriodField) # sort by report period\n\n if tmp_data.empty:\n continue\n\n data_result = self.coreComputation(code, tmp_data, tradedates)\n\n data_tot_result = data_tot_result.append(data_result)\n\n # add timestamp\n data_tot_result['time_stamp'] = datetime.now()\n\n if data_tot_result.empty:\n continue\n\n # dump chunk data into sql\n writeDB(self.targetTableName, data_tot_result, ConfigQuant, self.if_exist)\n self.if_exist = 'append'\n\n\n def runIncrm(self):\n # fetch and process all incremental data from sql\n dataO = readDB(self.state, ConfigQuant)\n dataO.loc[:, self.reportPeriodField] = dataO[self.yearField] * 100 + dataO[self.seasonField]\n dataO = dataO.drop_duplicates([self.codeField, self.reportPeriodField])\n # dataO = dataO.sort_values(self.reportPeriodField) # sort by date\n\n # get calendar\n tmp_state = \"select `date` from `%s`;\" % self.calendarTableName\n trade_calendar = readDB(tmp_state, ConfigQuant)\n trade_calendar = trade_calendar['date']\n\n # get latest time stamp for each stock in the target table\n tmp_state = \"select `%s`, max(`%s`) as %s from `%s` group by `%s`\" % (\n self.codeField, self.timeStampField, self.latestTimeStamp, self.targetTableName, self.codeField)\n target_latest_time_stamp = readDB(tmp_state, ConfigQuant)\n target_latest_time_stamp = target_latest_time_stamp.set_index(self.codeField)\n\n # get the latest trade date of the data in target table\n tmp_state = \"select max(`%s`) from `%s`\" % (self.dateField, self.targetTableName)\n target_latest_trade_date = readDB(tmp_state, ConfigQuant)\n target_latest_trade_date = target_latest_trade_date.iloc[0, 0]\n\n # process incremental data\n code_list = dataO[self.codeField].unique()\n\n data_tot_result = pd.DataFrame([])\n # use multiprocessing to improve computation hours\n if self.isMultiProcess:\n pool = Pool(processes=self.processNum)\n pool_results = []\n pool_data_first_date = []\n no_update_code_list = []\n # build pool\n for code in code_list:\n tmp_data = dataO.loc[dataO[self.codeField] == code]\n tmp_data = tmp_data.sort_values(self.reportPeriodField) # sorted by report period\n\n if tmp_data.empty:\n continue\n\n # find the latest time stamp, and compare it with the raw data, delete obsolete data if there exists\n is_new_data = False\n try:\n tmp_target_latest_time_stamp = target_latest_time_stamp.loc[code, self.latestTimeStamp]\n tmp_data_source_new = tmp_data.loc[tmp_data[self.timeStampField] >= tmp_target_latest_time_stamp]\n tmp_data_source_unexpanded = tmp_data.loc[tmp_data[self.releaseDateField] > target_latest_trade_date]\n tmp_data_source_new = tmp_data_source_new.append(tmp_data_source_unexpanded)\n tmp_data_source_new = tmp_data_source_new.drop_duplicates([self.codeField, self.yearField, self.seasonField])\n\n tmp_data_source_new = tmp_data_source_new.loc[~tmp_data_source_new[self.releaseDateField].isnull()]\n\n if not tmp_data_source_new.empty: # obsolete data\n data_new_first_data = tmp_data_source_new[self.releaseDateField].min() # find the earliest report in new update data\n\n if type(data_new_first_data).__name__ == 'str': # else data_new_first_data is nan\n is_new_data = True\n deleteObsoleteDataFromDB(code, data_new_first_data, self.dateField, self.codeField,\n self.targetTableName, ConfigQuant) # delete obsolet data not earlier than the eariliest report date\n except KeyError:\n is_new_data = True\n data_new_first_data = '2007-01-01' # this stock code is new to the target table\n\n if is_new_data: # have values updated or completely new\n tmp_result = pool.apply_async(self.coreComputation, (code, tmp_data, trade_calendar))\n pool_results.append(tmp_result)\n data_new_first_data = min(data_new_first_data, trade_calendar[trade_calendar > target_latest_trade_date].iloc[0])\n pool_data_first_date.append(data_new_first_data)\n else: # no new data from source table to update target table\n no_update_code_list.append(code)\n\n # get result from the pool\n for tmp_result, tmp_first_date in zip(pool_results, pool_data_first_date):\n data_result = tmp_result.get()\n data_result = data_result.loc[data_result[self.dateField] >= tmp_first_date] # slice data, from the earliest report release date\n print('%s regenerate %d data' % (code, data_result.shape[0]))\n data_tot_result = data_tot_result.append(data_result)\n\n # replicate the latest records for those not updated codes\n replicate_records = self.replicateLatestRecord(no_update_code_list, trade_calendar, target_latest_trade_date)\n data_tot_result = data_tot_result.append(replicate_records)\n\n else: # single process\n no_update_code_list = []\n for code in code_list:\n tmp_data = dataO.loc[dataO[self.codeField] == code]\n tmp_data = tmp_data.sort_values(self.reportPeriodField) # sorted by report period\n\n if tmp_data.empty:\n continue\n\n # find the latest time stamp, and compare it with the raw data, delete obsolete data if there exists\n has_new_data = False\n try:\n tmp_target_latest_time_stamp = target_latest_time_stamp.loc[code, self.latestTimeStamp]\n tmp_data_source_new = tmp_data[tmp_data[self.timeStampField] >= tmp_target_latest_time_stamp]\n tmp_data_source_unexpanded = tmp_data[tmp_data[self.releaseDateField] > target_latest_trade_date]\n tmp_data_source_new = tmp_data_source_new.append(tmp_data_source_unexpanded)\n tmp_data_source_new = tmp_data_source_new.drop_duplicates([self.codeField, self.yearField, self.seasonField])\n\n if not tmp_data_source_new.empty: # obsolete data\n tmp_data_new_first_data = tmp_data_source_new[\n self.releaseDateField].min() # find the earliest report in new update data\n if type(tmp_data_new_first_data).__name__ == 'str': # else tmp_data_new_first_data is nan\n has_new_data = True\n deleteObsoleteDataFromDB(code, tmp_data_new_first_data, self.dateField, self.codeField,\n self.targetTableName, ConfigQuant) # delete obsolet data later than the eariliest report date\n except KeyError:\n has_new_data = True # this stock code is new to the target table\n tmp_data_new_first_data = '2007-01-01'\n\n if has_new_data:\n data_result = self.coreComputation(code, tmp_data, trade_calendar)\n tmp_data_new_first_data = min(tmp_data_new_first_data, trade_calendar[trade_calendar > target_latest_trade_date].iloc[0])\n data_result = data_result.loc[data_result[self.dateField] >= tmp_data_new_first_data]\n print('%s regenerate %d data' % (code, data_result.shape[0]))\n data_tot_result = data_tot_result.append(data_result)\n else: # no new data from source table to update target table\n no_update_code_list.append(code)\n\n # replicate latest records for those not updated codes\n replicate_records = self.replicateLatestRecord(no_update_code_list, trade_calendar, target_latest_trade_date)\n data_tot_result = data_tot_result.append(replicate_records)\n\n if not data_tot_result.empty:\n # add timestamp\n data_tot_result['time_stamp'] = datetime.now()\n\n # dump chunk data into sql\n writeDB(self.targetTableName, data_tot_result, ConfigQuant, self.if_exist)\n self.if_exist = 'append'\n\n\n def coreComputation(self, tmp_code, tmp_data, tradedates):\n data_result = tmp_data.copy()\n\n # calculate yearly increment percent (yoy)\n # yoy_cols = ['parent_net_profits_yoy', 'eps_yoy', 'bvps_yoy', 'revenue_yoy', 'sales_per_share_yoy', 'cfo_per_share_yoy']\n yoy_cols = list(map(lambda x: x + '_yoy', self.valueForYOYFields))\n\n ts_data_cols = tmp_data.columns.tolist()\n for tmp_yoy_col, tmp_ori_col in zip(yoy_cols, self.valueForYOYFields):\n tmp_yoy = getFinancialReportAccountYOY(tmp_data, tmp_ori_col, self.reportPeriodField) # calculate derivative from original\n\n if tmp_yoy_col in ts_data_cols: # if tushare data already has a yoy column, fill nan\n tmp_yoy = pd.Series(tmp_yoy, index=tmp_data.index)\n\n tmp_idx = (~tmp_yoy.isnull()) & (tmp_data[tmp_yoy_col].isnull()) # fill nan by calculated data\n if tmp_idx.sum() > 0:\n data_result.loc[tmp_idx, tmp_yoy_col] = tmp_yoy[tmp_idx]\n else: # if tushare data do not have this yoy data, create a new column\n data_result.loc[:, tmp_yoy_col] = tmp_yoy\n\n # calculate dividend\n dvd_col_name = 'dividend'\n tmp_data.loc[:, dvd_col_name] = np.nan\n tmp_idx = ~tmp_data[self.splitDividendField].isnull()\n tmp_data.loc[tmp_idx, dvd_col_name] = tmp_data.loc[tmp_idx, self.splitDividendField].apply(lambda x: getStockCashDividend(x))\n tmp_data.loc[:, dvd_col_name] = tmp_data[dvd_col_name].astype('float')\n\n # calculate TTM values\n # ttm_cols = ['eps_ttm', 'cfo_per_share_ttm', 'parent_net_profits_ttm', 'sales_per_share_ttm', 'dividend_ttm']\n original_data_for_ttm_cols = self.valueForTTMFields.copy()\n # original_data_for_ttm_cols.append(dvd_col_name) #\n ttm_cols = list(map(lambda x: x + '_ttm', original_data_for_ttm_cols))\n for tmp_ttm_col, tmp_ori_col in zip(ttm_cols, original_data_for_ttm_cols):\n data_result.loc[:, tmp_ttm_col] = getFinancialReportAccountTTM(tmp_data, tmp_ori_col, self.reportPeriodField, self.yearField, self.seasonField) # calculate derivative from original\n\n # select columns to expand to daily data\n col_to_expand = [self.codeField, self.yearField, self.seasonField, self.releaseDateField]\n col_to_expand.extend(yoy_cols)\n col_to_expand.extend(ttm_cols)\n col_to_expand.extend(self.ratioFields)\n data_result = data_result[col_to_expand]\n\n # proliferate seasonal data into daily data\n today_str = datetime.strftime(datetime.now(), '%Y-%m-%d')\n daily_data_result = expandDataFromSeasonToDaily(tmp_code, data_result, tradedates, self.releaseDateField, today_str)\n\n # drop duplicated daily data\n daily_data_result = self.dropDailyRecordDuplicates(daily_data_result)\n\n return daily_data_result\n\n def replicateLatestRecord(self, no_update_code_list, trade_calendar, target_latest_trade_date):\n data_tot_result = pd.DataFrame([])\n\n # copy the latest records for those not updated codes\n today_str = datetime.strftime(datetime.now(), '%Y-%m-%d')\n expand_dates = trade_calendar[(trade_calendar > target_latest_trade_date) & (trade_calendar <= today_str)]\n if not expand_dates.empty: # today is not the latest record date in target table\n tmp_not_updated_codes = list(map(lambda x: \"'%s'\" % x, no_update_code_list))\n tmp_not_updated_codes = ','.join(tmp_not_updated_codes)\n\n # get the latest record from target table\n tmp_state = \"select * from `%s` where `%s` in (%s) and `%s` = '%s'\" % (self.targetTableName,\n self.codeField,\n tmp_not_updated_codes,\n self.dateField, target_latest_trade_date)\n tmp_latest_records = readDB(tmp_state, ConfigQuant)\n\n # expand the latest record up to today\n for tmp_idx in tmp_latest_records.index:\n tmp_record = tmp_latest_records.loc[tmp_idx]\n tmp_expand_records = pd.DataFrame([])\n tmp_expand_records = tmp_expand_records.append([tmp_record] * expand_dates.size)\n tmp_expand_records.loc[:, self.dateField] = expand_dates.tolist()\n data_tot_result = data_tot_result.append(tmp_expand_records)\n\n data_tot_result.loc[:, self.timeStampField] = datetime.now()\n\n return data_tot_result\n\n def dropDailyRecordDuplicates(self, daily_records):\n # seperate all duplicated data of one stock\n tmp_idx = daily_records[self.dateField].duplicated(keep=False)\n duplicated_daily_data = daily_records.loc[tmp_idx]\n distinct_daily_data = daily_records.loc[~tmp_idx]\n\n if not duplicated_daily_data.empty:\n duplicated_daily_data.loc[:, self.reportPeriodField] = duplicated_daily_data[self.yearField] * 100 + duplicated_daily_data[self.seasonField]\n\n # get the latest report period of all duplicated group\n max_report_period = duplicated_daily_data.groupby(self.dateField)[self.reportPeriodField].max()\n\n # use date and max report period to draw unique data from duplicated data\n new_distinct_daily_data = pd.DataFrame([])\n for tmp_date in max_report_period.index:\n tmp_uni_data = duplicated_daily_data.loc[(duplicated_daily_data[self.dateField] == tmp_date) &\n (duplicated_daily_data[self.reportPeriodField] == max_report_period[tmp_date])]\n\n new_distinct_daily_data = new_distinct_daily_data.append(tmp_uni_data)\n\n # combine new distinct daily with the original distinct daily data\n new_distinct_daily_data = new_distinct_daily_data.drop(self.reportPeriodField, axis=1)\n distinct_daily_data = distinct_daily_data.append(new_distinct_daily_data)\n\n return distinct_daily_data","sub_path":"Mapping/StockFundamentalTushareMapping.py","file_name":"StockFundamentalTushareMapping.py","file_ext":"py","file_size_in_byte":22918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"205362551","text":"from protozoo.configtask import ConfigAction\n\nBottlePyAction=ConfigAction()\nBottlePyAction.codename='bottlepy'\nBottlePyAction.name='Bottle web framework'\nBottlePyAction.description='Bottle is a microframework for create web services'\nBottlePyAction.script_path='webservers/python3/bottlepy.py'\nBottlePyAction.script_interpreter='python3'\nBottlePyAction.parameters=''\nBottlePyAction.extra_files=[]\n\n","sub_path":"protozoo/configtasks/bottlepy.py","file_name":"bottlepy.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"541615533","text":"# -*- coding: utf-8 -*-\n'''\nAuthor: Ahmed Ammar, ahmed.ammar@fst.utm.tn\nPurpose: - - - \nInputs: - - -\nOutputs: - - -\nDate Created: Sun Mar 11 22:50:48 2018\nDate Modified: M D, Y\nDate Released: M D, Y\nVersions:\n V0.01: ---\n \n'''\nfrom LoadData import Load_DAQ_Data\nfrom DAQ_DataPhase import FixDAQ_DataPhase\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef Plot_Data(pathnames=[], filenames=[], TitlePlot=[]):\n \"\"\"\n Plot Amplitude and Phase from AWESOME DATA\n \"\"\"\n max_nsp=len(pathnames)\n \n nsp=0 # number of subplots\n fig=plt.figure(figsize=(7.5, 5+max_nsp/4))\n plt.gcf().canvas.set_window_title('PyDAQviewer: Plot Narrowband Data')\n for pathname, filename,title in zip(pathnames,filenames,TitlePlot):\n nsp+=1\n \n FixData=FixDAQ_DataPhase(pathname, filename)\n time, Data, StationInfo =Load_DAQ_Data(FixData.path, FixData.filename)\n \n fs=StationInfo['fs']\n \n if StationInfo['data_type']==1.0:\n Data_amp= Data\n ##Averaging\n AveragingLengthAmp = 10\n data_amp_averaged = np.zeros((len(Data_amp) - AveragingLengthAmp + 1,1),float)\n for jj in range(0, (len(Data_amp)-AveragingLengthAmp+1)):\n data_amp_averaged[jj] = np.mean(Data_amp[jj:(jj+AveragingLengthAmp-1)])\n ## Figure\n fig.add_subplot(max_nsp, 1, nsp)\n plt.plot(time[:len(data_amp_averaged)], 20*np.log10(data_amp_averaged), lw=1, color='r')\n plt.plot(time, 20*np.log10(Data_amp), ls='-', lw=.5, color='b', alpha=.5)\n plt.title(title, fontsize=10, weight = 'bold')\n plt.xlabel(\"Time (UT)\", fontsize=8, weight = 'bold')\n plt.ylabel(\"Amplitude (dB)\", fontsize=8, weight = 'bold')\n plt.xlim(0,24)\n \n else:\n Data_phi= Data\n \n ##phase unwrapped\n PhaseFixLength90 = 3000\n PhaseFixLength180 =3000\n averaging_length=fs*PhaseFixLength180\n # print(averaging_length)\n data_phase_fixed180 = FixData.fix_phasedata180(Data_phi, averaging_length)\n # print(data_phase_fixed180)\n data_phase_fixed90 = FixData.fix_phasedata90(data_phase_fixed180, averaging_length)\n data_phase_unwrapped = np.zeros((len(data_phase_fixed90),1),float)\n data_phase_unwrapped[0] = data_phase_fixed90[0]\n \n offset = 0\n for jj in range(1, (len(data_phase_fixed90))):\n if data_phase_fixed90[jj]-data_phase_fixed90[jj-1] > 180:\n offset = offset + 360\n elif data_phase_fixed90[jj]-data_phase_fixed90[jj-1] < -180:\n offset = offset - 360\n data_phase_unwrapped[jj] = data_phase_fixed90[jj] - offset\n \n ##Averaging\n AveragingLengthPhase = 10 \n data_phase_averaged = np.zeros((len(data_phase_unwrapped) - AveragingLengthPhase + 1,1),float)\n for jj in range(0, (len(data_phase_unwrapped) - AveragingLengthPhase + 1)):\n data_phase_averaged[jj] = np.mean(data_phase_unwrapped[jj:(jj+AveragingLengthPhase-1)])\n \n ## Figure\n fig.add_subplot(max_nsp, 1, nsp)\n plt.plot(time[:len(data_phase_averaged)], data_phase_averaged, lw=1, color='r')\n plt.plot(time, data_phase_unwrapped, lw=.5, color='b', alpha=.5)\n plt.title(title, fontsize=10, weight = 'bold')\n plt.xlabel(\"Time (UT)\", fontsize=8, weight = 'bold')\n plt.ylabel(\"Phase (deg)\", fontsize=8, weight = 'bold')\n plt.xlim(0,24)\n \n plt.tight_layout() \n plt.show()\nif __name__ == \"__main__\":\n Plot_Data(pathnames=[\"F:\\\\NarrowbandData\\\\Tunisia\\\\2017\\\\09\\\\05\\\\\",\"F:\\\\NarrowbandData\\\\Tunisia\\\\2017\\\\09\\\\06\\\\\" ], filenames=[\"*170905*NRK_001A.mat\",\"*170906*NRK_001B.mat\"], TitlePlot=[\"1\",\"2\"])","sub_path":"AWESOME/NarrowBand/plot_narrowband/PlotData.py","file_name":"PlotData.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"176230066","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\n\ndf = pd.read_csv('AustWeather_zeros.csv', dtype={'Date':str, 'Location':str, 'MinTemp':float, 'MaxTemp':float, 'Rainfall':float, 'Evaporation':float, 'Sunshine':float, 'WindGustSpeed':float, 'WindSpeed9am':float, 'WindSpeed3pm':float, 'Pressure9am':float, 'Pressure3pm':float, 'Cloud9am':float, 'Cloud3pm':float, 'Temp9am':float, 'Temp3pm':float, 'RainToday':str, \"RainTomorrow\":str})\n\nlocations = df.groupby(df.Location)\nalbury = locations.get_group(\"Albury\")\n\nalbury_train = albury.iloc[1462:2724,]\nalbury_test = albury.iloc[2724:,]\nalbury_train.to_excel(\"weather_data/albury_train.xlsx\")\nalbury_test.to_excel(\"weather_data/albury_test.xlsx\")\nprint(\"albury\",str(len(albury_train)))\nprint(\"albury\",str(len(albury_test)))\n\nbadgeryscreek = locations.get_group(\"BadgerysCreek\")\nbadgeryscreek_train = badgeryscreek.iloc[1431:2693,]\nbadgeryscreek_test = badgeryscreek.iloc[2693:,]\nbadgeryscreek_train.to_excel(\"weather_data/badgeryscreek_train.xlsx\")\nbadgeryscreek_test.to_excel(\"weather_data/badgeryscreek_test.xlsx\")\nprint(\"badgery\",len(badgeryscreek_train))\nprint(\"badgery\",len(badgeryscreek_test))\n\ncobar = locations.get_group(\"Cobar\")\ncobar_train = cobar.iloc[1431:2693,]\ncobar_test = cobar.iloc[2693:,]\ncobar_train.to_excel(\"weather_data/cobar_train.xlsx\")\ncobar_test.to_excel(\"weather_data/cobar_test.xlsx\")\nprint(\"cobar\",str(len(cobar_train)))\nprint(\"cobar\",str(len(cobar_test)))\n\ncoffsharbour = locations.get_group(\"CoffsHarbour\")\ncoffsharbour_train = coffsharbour.iloc[1431:2693,]\ncoffsharbour_test = coffsharbour.iloc[2693:,]\ncoffsharbour_train.to_excel(\"weather_data/coffsharbour_train.xlsx\")\ncoffsharbour_test.to_excel(\"weather_data/coffsharbour_test.xlsx\")\nprint(\"coffsharbour\",str(len(coffsharbour_train)))\nprint(\"coffsharbour\",str(len(coffsharbour_test)))\n\n\nmoree = locations.get_group(\"Moree\")\nmoree_train = moree.iloc[1431:2693,]\nmoree_test = moree.iloc[2693:,]\nmoree_train.to_excel(\"weather_data/moree_train.xlsx\")\nmoree_test.to_excel(\"weather_data/moree_test.xlsx\")\nprint(\"moree\",str(len(moree_train)))\nprint(\"moree\",str(len(moree_test)))\n\nnewcastle = locations.get_group(\"Newcastle\")\nnewcastle_train = newcastle.iloc[1462:2724,]\nnewcastle_test = newcastle.iloc[2724:,]\nnewcastle_train.to_excel(\"weather_data/newcastle_train.xlsx\")\nnewcastle_test.to_excel(\"weather_data/newcastle_test.xlsx\")\nprint(\"newcastle\",str(len(newcastle_train)))\nprint(\"newcastle\",str(len(newcastle_test)))\n\nnorahhead = locations.get_group(\"0rahHead\")\nnorahhead_train = norahhead.iloc[1431:2693,]\nnorahhead_test = norahhead.iloc[2693:,]\nnorahhead_train.to_excel(\"weather_data/norahhead_train.xlsx\")\nnorahhead_test.to_excel(\"weather_data/norahhead_test.xlsx\")\nprint(\"norahhead\",str(len(norahhead_train)))\nprint(\"norahhead\",str(len(norahhead_test)))\n\nnorfolkisland = locations.get_group(\"0rfolkIsland\")\nnorfolkisland_train = norfolkisland.iloc[1431:2693,]\nnorfolkisland_test = norfolkisland.iloc[2693:,]\nnorfolkisland_train.to_excel(\"weather_data/norfolkisland_train.xlsx\")\nnorfolkisland_test.to_excel(\"weather_data/norfolkisland_test.xlsx\")\nprint(\"norfolkisland\",str(len(norfolkisland_train)))\nprint(\"norfolkisland\",str(len(norfolkisland_test)))\n\n\npenrith = locations.get_group(\"Penrith\")\npenrith_train = penrith.iloc[1461:2723,]\npenrith_test = penrith.iloc[2723:,]\npenrith_train.to_excel(\"weather_data/penrith_train.xlsx\")\npenrith_test.to_excel(\"weather_data/penrith_test.xlsx\")\nprint(\"penrith\",str(len(penrith_train)))\nprint(\"penrith\",str(len(penrith_test)))\n\nrichmond = locations.get_group(\"Richmond\")\nrichmond_train = richmond.iloc[1431:2693,]\nrichmond_test = richmond.iloc[2693:,]\nrichmond_train.to_excel(\"weather_data/richmond_train.xlsx\")\nrichmond_test.to_excel(\"weather_data/richmond_test.xlsx\")\nprint(\"richmond\",str(len(richmond_train)))\nprint(\"richmond\",str(len(richmond_test)))\n\nsydney = locations.get_group(\"Sydney\")\nsydney_train = sydney.iloc[1766:3028,]\nsydney_test = sydney.iloc[3028:,]\nsydney_train.to_excel(\"weather_data/sydney_train.xlsx\")\nsydney_test.to_excel(\"weather_data/sydney_test.xlsx\")\nprint(\"sydney\",len(sydney_train))\nprint(\"sydney\",len(sydney_test))\n\nsydneyairport = locations.get_group(\"SydneyAirport\")\nsydneyairport_train = sydneyairport.iloc[1431:2693,]\nsydneyairport_test = sydneyairport.iloc[2693:,]\nsydneyairport_train.to_excel(\"weather_data/sydneyairport_train.xlsx\")\nsydneyairport_test.to_excel(\"weather_data/sydneyairport_test.xlsx\")\nprint(\"sydneyairport\",str(len(sydneyairport_train)))\nprint(\"sydneyairport\",str(len(sydneyairport_test)))\n\nwaggawagga = locations.get_group(\"WaggaWagga\")\nwaggawagga_train = waggawagga.iloc[1431:2693,]\nwaggawagga_test = waggawagga.iloc[2693:,]\nwaggawagga_train.to_excel(\"weather_data/waggawagga_train.xlsx\")\nwaggawagga_test.to_excel(\"weather_data/waggawagga_test.xlsx\")\nprint(\"waggawagga\",str(len(waggawagga_train)))\nprint(\"waggawagga\",str(len(waggawagga_test)))\n\n\nwilliamtown = locations.get_group(\"Williamtown\")\nwilliamtown_train = williamtown.iloc[1431:2693,]\nwilliamtown_test = williamtown.iloc[2693:,]\nwilliamtown_train.to_excel(\"weather_data/williamtown_train.xlsx\")\nwilliamtown_test.to_excel(\"weather_data/williamtown_test.xlsx\")\nprint(\"williamtown\",str(len(williamtown_train)))\nprint(\"williamtown\",str(len(williamtown_test)))\n\nwollongong = locations.get_group(\"Wollongong\")\nwollongong_train = wollongong.iloc[1462:2724,]\nwollongong_test = wollongong.iloc[2724:,]\nwollongong_train.to_excel(\"weather_data/wollongong_train.xlsx\")\nwollongong_test.to_excel(\"weather_data/wollongong_test.xlsx\")\nprint(\"wollongong\",str(len(wollongong_train)))\nprint(\"wollongong\",str(len(wollongong_test)))\n\ncanberra = locations.get_group(\"Canberra\")\ncanberra_train = canberra.iloc[1858:3120,]\ncanberra_test = canberra.iloc[3120:,]\ncanberra_train.to_excel(\"weather_data/canberra_train.xlsx\")\ncanberra_test.to_excel(\"weather_data/canberra_test.xlsx\")\nprint(\"canberra\", str(len(canberra_train)))\nprint(\"canberra\", str(len(canberra_test)))\n\ntuggeranong = locations.get_group(\"Tuggera0ng\")\ntuggeranong_train = tuggeranong.iloc[1461:2723,]\ntuggeranong_test = tuggeranong.iloc[2723:,]\ntuggeranong_train.to_excel(\"weather_data/tuggeranong_train.xlsx\")\ntuggeranong_test.to_excel(\"weather_data/tuggeranong_test.xlsx\")\nprint(\"tuggeranong\",str(len(tuggeranong_train)))\nprint(\"tuggeranong\",str(len(tuggeranong_test)))\n\nmountginini = locations.get_group(\"MountGinini\")\nmountginini_train = mountginini.iloc[1462:2724,]\nmountginini_test = mountginini.iloc[2724:,]\nmountginini_train.to_excel(\"weather_data/mountginini_train.xlsx\")\nmountginini_test.to_excel(\"weather_data/mountginini_test.xlsx\")\nprint(\"mountginini\",str(len(mountginini_train)))\nprint(\"mountginini\",str(len(mountginini_test)))\n\nballarat = locations.get_group(\"Ballarat\")\nballarat_train = ballarat.iloc[1462:2724,]\nballarat_test = ballarat.iloc[2724:,]\nballarat_train.to_excel(\"weather_data/ballarat_train.xlsx\")\nballarat_test.to_excel(\"weather_data/ballarat_test.xlsx\")\nprint(\"ballarat\",str(len(ballarat_train)))\nprint(\"ballarat\",str(len(ballarat_test)))\n\nbendigo = locations.get_group(\"Bendigo\")\nbendigo_train = bendigo.iloc[1462:2724,]\nbendigo_test = bendigo.iloc[2724:,]\nbendigo_train.to_excel(\"weather_data/bendigo_train.xlsx\")\nbendigo_test.to_excel(\"weather_data/bendigo_test.xlsx\")\nprint(\"bendigo\",str(len(bendigo_train)))\nprint(\"bendigo\",str(len(bendigo_test)))\n\n\nsale = locations.get_group(\"Sale\")\nsale_train = sale.iloc[1431:2693,]\nsale_test = sale.iloc[2693:,]\nsale_train.to_excel(\"weather_data/sale_train.xlsx\")\nsale_test.to_excel(\"weather_data/sale_test.xlsx\")\nprint(\"sale\",str(len(sale_train)))\nprint(\"sale\",str(len(sale_test)))\n\nmelbourne = locations.get_group(\"Melbourne\")\nmelbourne_train = melbourne.iloc[1615:2877,]\nmelbourne_test = melbourne.iloc[2877:,]\nmelbourne_train.to_excel(\"weather_data/melbourne_train.xlsx\")\nmelbourne_test.to_excel(\"weather_data/melbourne_test.xlsx\")\nprint(\"melbourne\", len(melbourne_train))\nprint(\"melbourne\", len(melbourne_test))\n\nmelbourneairport = locations.get_group(\"MelbourneAirport\")\nmelbourneairport_train = melbourneairport.iloc[1431:2693,]\nmelbourneairport_test = melbourneairport.iloc[2693:,]\nmelbourneairport_train.to_excel(\"weather_data/melbourneairport_train.xlsx\")\nmelbourneairport_test.to_excel(\"weather_data/melbourneairport_test.xlsx\")\nprint(\"melbourneairport\",str(len(melbourneairport_train)))\nprint(\"melbourneairport\",str(len(melbourneairport_test)))\n\nmildura = locations.get_group(\"Mildura\")\nmildura_train = mildura.iloc[1431:2693,]\nmildura_test = mildura.iloc[2693:,]\nmildura_train.to_excel(\"weather_data/mildura_train.xlsx\")\nmildura_test.to_excel(\"weather_data/mildura_test.xlsx\")\nprint(\"mildura\",str(len(mildura_train)))\nprint(\"mildura\",str(len(mildura_test)))\n\nnhil = locations.get_group(\"Nhil\")\nnhil_train = nhil.iloc[:1262,]\nnhil_test = nhil.iloc[1262:,]\nnhil_train.to_excel(\"weather_data/nhil_train.xlsx\")\nnhil_test.to_excel(\"weather_data/nhil_test.xlsx\")\nprint(\"nhil\",len(nhil_train))\nprint(\"nhil\",len(nhil_test))\n\nportland = locations.get_group(\"Portland\")\nportland_train = portland.iloc[1431:2693,]\nportland_test = portland.iloc[2693:,]\nportland_train.to_excel(\"weather_data/portland_train.xlsx\")\nportland_test.to_excel(\"weather_data/portland_test.xlsx\")\nprint(\"portland\",str(len(portland_train)))\nprint(\"portland\",str(len(portland_test)))\n\nwatsonia = locations.get_group(\"Watsonia\")\nwatsonia_train = watsonia.iloc[1431:2693,]\nwatsonia_test = watsonia.iloc[2693:,]\nwatsonia_train.to_excel(\"weather_data/watsonia_train.xlsx\")\nwatsonia_test.to_excel(\"weather_data/watsonia_test.xlsx\")\nprint(\"watsonia\",str(len(watsonia_train)))\nprint(\"watsonia\",str(len(watsonia_test)))\n\ndartmoor = locations.get_group(\"Dartmoor\")\ndartmoor_train = dartmoor.iloc[1431:2693,]\ndartmoor_test = dartmoor.iloc[2693:,]\ndartmoor_train.to_excel(\"weather_data/dartmoor_train.xlsx\")\ndartmoor_test.to_excel(\"weather_data/dartmoor_test.xlsx\")\nprint(\"dartmoor\",str(len(dartmoor_train)))\nprint(\"dartmoor\",str(len(dartmoor_test)))\n\nbrisbane = locations.get_group(\"Brisbane\")\nbrisbane_train = brisbane.iloc[1615:2877,]\nbrisbane_test = brisbane.iloc[2877:,]\nbrisbane_train.to_excel(\"weather_data/brisbane_train.xlsx\")\nbrisbane_test.to_excel(\"weather_data/brisbane_test.xlsx\")\nprint(\"brisbane\",len(brisbane_train))\nprint(\"brisbane\",len(brisbane_test))\n\ncairns = locations.get_group(\"Cairns\")\ncairns_train = cairns.iloc[1462:2724,]\ncairns_test = cairns.iloc[2724:,]\ncairns_train.to_excel(\"weather_data/cairns_train.xlsx\")\ncairns_test.to_excel(\"weather_data/cairns_test.xlsx\")\nprint(\"cairns\",str(len(cairns_train)))\nprint(\"cairns\",str(len(cairns_test)))\n\ngoldcoast = locations.get_group(\"GoldCoast\")\ngoldcoast_train = goldcoast.iloc[1462:2724,]\ngoldcoast_test = goldcoast.iloc[2724:,]\ngoldcoast_train.to_excel(\"weather_data/goldcoast_train.xlsx\")\ngoldcoast_test.to_excel(\"weather_data/goldcoast_test.xlsx\")\nprint(\"goldcoast\",str(len(goldcoast_train)))\nprint(\"goldcoast\",str(len(goldcoast_test)))\n\ntownsville = locations.get_group(\"Townsville\")\ntownsville_train = townsville.iloc[1462:2724,]\ntownsville_test = townsville.iloc[2724:,]\ntownsville_train.to_excel(\"weather_data/townsville_train.xlsx\")\ntownsville_test.to_excel(\"weather_data/townsville_test.xlsx\")\nprint(\"townsville\",str(len(townsville_train)))\nprint(\"townsville\",str(len(townsville_test)))\n\nadelaide = locations.get_group(\"Adelaide\")\nadelaide_train = adelaide.iloc[1615:2877,]\nadelaide_test = adelaide.iloc[2877:,]\nadelaide_train.to_excel(\"weather_data/adelaide_train.xlsx\")\nadelaide_test.to_excel(\"weather_data/adelaide_test.xlsx\")\nprint(\"adelaide\",str(len(adelaide_train)))\nprint(\"adelaide\",str(len(adelaide_test)))\n\nmountgambier = locations.get_group(\"MountGambier\")\nmountgambier_train = mountgambier.iloc[1462:2724,]\nmountgambier_test = mountgambier.iloc[2724:,]\nmountgambier_train.to_excel(\"weather_data/mountgambier_train.xlsx\")\nmountgambier_test.to_excel(\"weather_data/mountgambier_test.xlsx\")\nprint(\"mountgambier\",str(len(mountgambier_train)))\nprint(\"mountgambier\",str(len(mountgambier_test)))\n\nnuriootpa = locations.get_group(\"Nuriootpa\")\nnuriootpa_train = nuriootpa.iloc[1431:2693,]\nnuriootpa_test = nuriootpa.iloc[2693:,]\nnuriootpa_train.to_excel(\"weather_data/nuriootpa_train.xlsx\")\nnuriootpa_test.to_excel(\"weather_data/nuriootpa_test.xlsx\")\nprint(\"nuriootpa\",str(len(nuriootpa_train)))\nprint(\"nuriootpa\",str(len(nuriootpa_test)))\n\nwoomera = locations.get_group(\"Woomera\")\nwoomera_train = woomera.iloc[1431:2693,]\nwoomera_test = woomera.iloc[2693:,]\nwoomera_train.to_excel(\"weather_data/woomera_train.xlsx\")\nwoomera_test.to_excel(\"weather_data/woomera_test.xlsx\")\nprint(\"woomera\",str(len(woomera_train)))\nprint(\"woomera\",str(len(woomera_test)))\n\nalbany = locations.get_group(\"Albany\")\nalbany_train = albany.iloc[1462:2724,]\nalbany_test = albany.iloc[2724:,]\nalbany_train.to_excel(\"weather_data/albany_train.xlsx\")\nalbany_test.to_excel(\"weather_data/albany_test.xlsx\")\nprint(\"albany\",str(len(albany_train)))\nprint(\"albany\",str(len(albany_test)))\n\nwitchcliffe = locations.get_group(\"Witchcliffe\")\nwitchcliffe_train = witchcliffe.iloc[1431:2693,]\nwitchcliffe_test = witchcliffe.iloc[2693:,]\nwitchcliffe_train.to_excel(\"weather_data/witchcliffe_train.xlsx\")\nwitchcliffe_test.to_excel(\"weather_data/witchcliffe_test.xlsx\")\nprint(\"witchcliffe\",str(len(witchcliffe_train)))\nprint(\"witchcliffe\",str(len(witchcliffe_test)))\n\npearceraaf = locations.get_group(\"PearceRAAF\")\npearceraaf_train = pearceraaf.iloc[1431:2693,]\npearceraaf_test = pearceraaf.iloc[2693:,]\npearceraaf_train.to_excel(\"weather_data/pearceraaf_train.xlsx\")\npearceraaf_test.to_excel(\"weather_data/pearceraaf_test.xlsx\")\nprint(\"pearceraaf\",str(len(pearceraaf_train)))\nprint(\"pearceraaf\",str(len(pearceraaf_test)))\n\nperthairport = locations.get_group(\"PerthAirport\")\nperthairport_train = perthairport.iloc[1431:2693,]\nperthairport_test = perthairport.iloc[2693:,]\nperthairport_train.to_excel(\"weather_data/perthairport_train.xlsx\")\nperthairport_test.to_excel(\"weather_data/perthairport_test.xlsx\")\nprint(\"perthairport\",str(len(perthairport_train)))\nprint(\"perthairport\",str(len(perthairport_test)))\n\nperth = locations.get_group(\"Perth\")\nperth_train = perth.iloc[1615:2877,]\nperth_test = perth.iloc[2877:,]\nperth_train.to_excel(\"weather_data/perth_train.xlsx\")\nperth_test.to_excel(\"weather_data/perth_test.xlsx\")\nprint(\"perth\",str(len(perth_train)))\nprint(\"perth\",str(len(perth_test)))\n\nsalmongums = locations.get_group(\"SalmonGums\")\nsalmongums_train = salmongums.iloc[1423:2685,]\nsalmongums_test = salmongums.iloc[2685:,]\nsalmongums_train.to_excel(\"weather_data/salmongums_train.xlsx\")\nsalmongums_test.to_excel(\"weather_data/salmongums_test.xlsx\")\nprint(\"salmongums\",str(len(salmongums_train)))\nprint(\"salmongums\",str(len(salmongums_test)))\n\nwalpole = locations.get_group(\"Walpole\")\nwalpole_train = walpole.iloc[1428:2690,]\nwalpole_test = walpole.iloc[2690:,]\nwalpole_train.to_excel(\"weather_data/walpole_train.xlsx\")\nwalpole_test.to_excel(\"weather_data/walpole_test.xlsx\")\nprint(\"walpole\",str(len(walpole_train)))\nprint(\"walpole\",str(len(walpole_test)))\n\nhobart = locations.get_group(\"Hobart\")\nhobart_train = hobart.iloc[1615:2877,]\nhobart_test = hobart.iloc[2877:,]\nhobart_train.to_excel(\"weather_data/hobart_train.xlsx\")\nhobart_test.to_excel(\"weather_data/hobart_test.xlsx\")\nprint(\"hobart\",str(len(hobart_train)))\nprint(\"hobart\",str(len(hobart_test)))\n\nlaunceston = locations.get_group(\"Launceston\")\nlaunceston_train = launceston.iloc[1462:2724,]\nlaunceston_test = launceston.iloc[2724:,]\nlaunceston_train.to_excel(\"weather_data/launceston_train.xlsx\")\nlaunceston_test.to_excel(\"weather_data/launceston_test.xlsx\")\nprint(\"launceston\",str(len(launceston_train)))\nprint(\"launceston\",str(len(launceston_test)))\n\nalicesprings = locations.get_group(\"AliceSprings\")\nalicesprings_train = alicesprings.iloc[1462:2724,]\nalicesprings_test = alicesprings.iloc[2724:,]\nalicesprings_train.to_excel(\"weather_data/alicesprings_train.xlsx\")\nalicesprings_test.to_excel(\"weather_data/alicesprings_test.xlsx\")\nprint(\"alicesprings\",str(len(alicesprings_train)))\nprint(\"alicesprings\",str(len(alicesprings_test)))\n\ndarwin = locations.get_group(\"Darwin\")\ndarwin_train = darwin.iloc[1615:2877,]\ndarwin_test = darwin.iloc[2877:,]\ndarwin_train.to_excel(\"weather_data/darwin_train.xlsx\")\ndarwin_test.to_excel(\"weather_data/darwin_test.xlsx\")\nprint(\"darwin\",str(len(darwin_train)))\nprint(\"darwin\",str(len(darwin_test)))\n\nkatherine = locations.get_group(\"Katherine\")\nkatherine_train = katherine.iloc[:1262,]\nkatherine_test = katherine.iloc[1262:,]\nkatherine_train.to_excel(\"weather_data/katherine_train.xlsx\")\nkatherine_test.to_excel(\"weather_data/katherine_test.xlsx\")\nprint(\"katherine\",str(len(katherine_train)))\nprint(\"katherine\",str(len(katherine_test)))\n\nuluru = locations.get_group(\"Uluru\")\nuluru_train = uluru.iloc[:1262,]\nuluru_test = uluru.iloc[1262:,]\nuluru_train.to_excel(\"weather_data/uluru_train.xlsx\")\nuluru_test.to_excel(\"weather_data/uluru_test.xlsx\")\nprint(\"uluru\",str(len(uluru_train)))\nprint(\"uluru\",str(len(uluru_test)))","sub_path":"wGSTLNN/wGSTLNN/FileHandling.py","file_name":"FileHandling.py","file_ext":"py","file_size_in_byte":16917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"594806402","text":"#!/usr/bin/python3\nprint(\"Importing libraries\")\nimport logging, sqlite3, signal, cherrypy, sys, os\nfrom logging.handlers import RotatingFileHandler\nfrom jinja2 import Environment, FileSystemLoader\n\nfrom libs.config import Config\nfrom libs.mails import Mails\nfrom libs.board import Board\nfrom libs.i2cbus import Bus\nfrom libs.lcd import Lcd\nfrom libs.delayer import Delay\nfrom libs.database import Database\nfrom libs.thermometers import Temps\nfrom libs.arduinos import Arduinos\nfrom libs.aquariums import Aquariums\nfrom libs.room import Room\nfrom libs.web import WebRoot, WebSettings\n\nclass Core(object):\n\n def __init__(self):\n self.thread_pool = {}\n\n self._configure_logger()\n self._initialize_config()\n self._initialize_mails()\n self._initialize_board()\n self._initialize_lcd()\n self._initialize_database()\n self._initialize_thermometers()\n self._initialize_arduinos()\n self._restore_aquariums()\n self._initialize_web()\n self._initialize_room()\n\n signal.signal(signal.SIGINT, self.cleanup)\n signal.signal(signal.SIGTERM, self.cleanup)\n self.lcd.writeMsg(\"Gotowe \")\n\n logging.info(\"{} wątków działa\".format(len(self.thread_pool)))\n\n def _configure_logger(self):\n logFormatter = logging.Formatter(fmt=\"[%(asctime)-15s][%(levelname)s] %(message)s\", datefmt='%d.%m.%Y %H:%M:%S')\n log = logging.getLogger()\n log.setLevel(logging.DEBUG)\n\n if not os.path.isdir(\"logs\"):\n os.mkdir(\"logs\")\n\n fileHandler = RotatingFileHandler(\"logs/akwaria.log\", maxBytes=10*1024 ,backupCount=5)\n fileHandler.setFormatter(logFormatter)\n fileHandler.setLevel(logging.INFO)\n log.addHandler(fileHandler)\n\n consoleHandler = logging.StreamHandler(sys.stdout)\n consoleHandler.setFormatter(logFormatter)\n log.addHandler(consoleHandler)\n\n log.info(\"[STARTING] Uruchamianie...\")\n\n def _initialize_config(self):\n self.config = Config()\n self.config.load()\n\n def _initialize_mails(self):\n \tself.mails = Mails(self)\n\n def _initialize_board(self):\n self.board = Board(self)\n self.thread_pool[\"board_alive\"] = Delay(target=self.board.alive, delay=3, repeat=True)\n\n def _initialize_lcd(self):\n self.bus = Bus()\n self.lcd = Lcd(bus=self.bus)\n self.lcd.writeMsg(\"Uruchamianie\")\n\n def _initialize_database(self):\n self.db = Database('sqlite.db')\n self.thread_pool[\"sqlite_queue\"] = Delay(target=self.db.begin, daemon=False, onexit=self.db.cancel)\n\n try:\n self.db.execute('''CREATE TABLE akwaria\n \t\t(id INTEGER PRIMARY KEY AUTOINCREMENT,\n \t\theater TEXT DEFAULT '',\n \t\tcooler TEXT DEFAULT '',\n \t\tthermometer TEXT DEFAULT '',\n \t\tmaxtemp REAL DEFAULT 25.0,\n \t\tmintemp REAL DEFAULT 20.0,\n \t\thisteresis REAL DEFAULT 0.1)''')\n logging.info(\"Utworzono tabele z akwariami\")\n except sqlite3.OperationalError as e:\n pass\n\n logging.info(\"Polaczono z baza danych\")\n\n def _initialize_thermometers(self):\n self.thermometers = Temps()\n self.thermometers.detect()\n logging.info(\"Wykryto {0} termometry/ow\".format(len(self.thermometers.get_all())))\n\n # Start thermometers update thread\n self.thread_pool[\"temps_update\"] = Delay(target=self.thermometers.alive, delay=1, repeat=True)\n\n def _initialize_arduinos(self):\n self.arduinos = Arduinos(self)\n self.lcd.enabled = False\n self.arduinos.detect()\n logging.info(\"Wykryto {0} arduino\".format(len(self.arduinos.arduinos)))\n self.lcd.enabled = True\n\n # Keep them alive\n self.thread_pool[\"arduinos_alive\"] = Delay(target=self.arduinos.alive, delay=3, repeat=True)\n\n def _restore_aquariums(self):\n self.aquariums = Aquariums(self)\n self.aquariums.restore()\n\n # Start aquariums update thread\n self.thread_pool[\"aquariums_update\"] = Delay(target=self.aquariums.alive, delay=1, repeat=True)\n\n def _initialize_room(self):\n self.room = Room(self)\n self.room.load()\n\n # Start aquariums update thread\n self.thread_pool[\"room_update\"] = Delay(target=self.room.alive, delay=5, repeat=True)\n\n def _initialize_web(self):\n logging.info(\"Uruchamianie serwera www\")\n\n # Disable cherrypy loggers\n logging.getLogger(\"cherrypy\").propagate = False\n logging.getLogger(\"cherrypy.error\").propagate = False\n logging.getLogger(\"cherrypy.access\").propagate = False\n\n # Initialize template engine\n env = Environment(loader=FileSystemLoader('web/templates'))\n env.filters[\"hex\"] = lambda value: hex(value)\n\n # Start web server\n logging.debug(\"CherryPy version: {0}\".format(cherrypy.__version__))\n\n cherrypy.config.update(\"web/app.conf\")\n cherrypy.tree.mount(WebRoot(self, env), \"/\", \"web/app.conf\")\n cherrypy.tree.mount(WebSettings(self, env), \"/settings\", \"web/app.conf\")\n cherrypy.engine.signals.subscribe()\n cherrypy.engine.start()\n logging.info(\"Serwer www uruchomiony\")\n\n def cleanup(self, *args):\n logging.info(\"Zamykanie...\")\n for name, thread in self.thread_pool.items():\n try: thread.cancel()\n except: pass\n self.board.cleanup()\n self.mails.cleanup()\n cherrypy.engine.exit()\n self.save_changes()\n logging.info(\"Koniec.\")\n\n def save_changes(self):\n logging.info(\"Zapisywanie zmian na dysku\")\n self.db.commit()\n self.config.save()\n\nif __name__ == \"__main__\":\n Core()","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"316379111","text":"from django.shortcuts import render\nfrom django import forms\nfrom landing.models import MailForm\nfrom django.core.mail import send_mail\n\n\nclass MailFormForm(forms.Form):\n email = forms.EmailField()\n\n\n# Create your views here.\ndef landing_page(request):\n if request.method == 'POST':\n form = MailFormForm(request.POST)\n if form.is_valid():\n try:\n send_mail('Форма с лундинга',\n f'Пользователь {form.data.get(\"email\")}\\n оставил заявку',\n 'info@belogex.ru',\n ['support@belogex.ru', 'info@belogex.ru'],\n fail_silently=False)\n except Exception as e:\n pass\n MailForm(email=form.data.get('email')).save()\n\n return render(request, 'index.html')\n","sub_path":"landing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"372294240","text":"def loadBase(bot):\r\n #for inference\r\n bot.implies = bot.makeNode('implies')\r\n #usage: X,(and) Y, (and) Z, ... implies, A, (AND) B, ...\r\n #when X and Y and Z etc. are found their probabilities\r\n #influence the probabilities on A and B etc.\r\n\r\n #for hardware interface and native code\r\n bot.activate = bot.makeNode('activate')\r\n #usage: activate, X, arg1, arg2, arg3...\r\n #will run the code held in X.function with args\r\n\r\n bot.hasNewValue = bot.makeNode('has a value')\r\n #usage: hasNewValue X\r\n #indicates that X is currently a defined variable\r\n\r\n def doneF(*args, **keyargs): pass#bot.setAwake(False)\r\n\r\n bot.done = bot.makeNode('finish', function=doneF)\r\n\r\n #HELPERS\r\n def bayes(A, B, B_A, bot=None):\r\n p = A*B_A\r\n notp = (1-A)*(1-B_A)\r\n if p == 0 and notp == 0: return 0\r\n return (p*B+notp*(1-B))/(notp+p)\r\n bot.bayes = bayes\r\n\r\n def bayesUpdate(chainFrom, self, A, B, B_A, bot=None):\r\n A.setValue(bayes(A.getValue(), B.getValue(), B_A.getValue()))\r\n\r\n bot.conditional_P = bot.makeNode('conditional probability')\r\n #usage: P(A|B), conditional_P, P(A), P(B)\r\n #indicates the P(A|B) allows updating of P(A)\r\n\r\n bot.bayesUpdateNode = bot.makeNode('bayesUpdate', function=bayesUpdate)\r\n\r\n\r\n bot.is_a = bot.makeNode('is a')\r\n\r\n A = bot.makeNode(\"A\", abstract=True)\r\n B = bot.makeNode(\"B\", abstract=True)\r\n ENT = bot.makeNode('ENT', abstract=True)\r\n\r\n bot.makeChain(\r\n bot.makeChain(ENT, bot.is_a, A, abstract=True),\r\n bot.makeChain(A, bot.is_a, B, abstract=True),\r\n bot.implies,\r\n bot.makeChain(ENT, bot.is_a, B, abstract=True),\r\n abstract=True\r\n )","sub_path":"engine/loadBase.py","file_name":"loadBase.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"444383392","text":"#coding:utf-8\n\nimport requests\nimport json\nimport math\nimport random\nimport time\n\ndef get_w():\n co_f='2'\n curt=str(int(time.time()*1000))\n for i in range(1,32-len(curt)):\n\t co_f+=hex(math.floor(random.random()*16))[-1]\n co_f+=curt\n return 'id=' + co_f + ':lv=' + curt + ':ss=' + curt\n\ndef send_code(mobile):\n url = 'http://bj.ac.10086.cn/ac/tempPwdSend'\n data = {\n 'mobile':mobile\n }\n response = requests.post(url,data=data)\n print('发送验证码:',response.status_code,response.text,response.headers)\n\n JSESSIONID = response.cookies['JSESSIONID']\n Webtrends = response.cookies['Webtrends']\n cookie = {\n 'JSESSIONID':JSESSIONID,\n 'Webtrends':Webtrends,\n }\n return cookie\n\ndef validate(mobile,cookies):\n url = 'http://bj.ac.10086.cn/ac/ValidateIp'\n data = {\n 'ceshi':'false'\n }\n response = requests.post(url,data=data,cookies=cookies)\n print('validate 请求结果:',response.status_code,response.text,response.headers)\n\ndef login(mobile, code,cookies):\n url = 'http://bj.ac.10086.cn/ac/CmSsoLogin'\n data = {\n 'user': mobile,\n 'phone': mobile,\n 'backurl': 'http://www.bj.10086.cn/my',\n 'continue':'http://www.bj.10086.cn/my',\n 'style':'BIZ_LOGINBOX',\n\t\t'service':'www.bj.10086.cn',\n 'ssoLogin':'yes',\n 'loginMode': 2,\n 'loginMethod': 1,\n 'loginName': mobile,\n 'target':'_parent',\n 'smsNum': code,\n 'ckCookie': 'on',\n }\n headers = {\n\t\t'Content-Type':'application/x-www-form-urlencoded',\n\t\t'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',\n\t\t'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n }\n response = requests.post(url, data=data, headers=headers, cookies=cookies, allow_redirects=False)\n print('北京移动,登录:', mobile, response.status_code, response.headers)\n\n newurl = 'http' + response.headers['Location'][5:]\n response1 = requests.get(newurl, cookies=cookies)\n print('北京移动,登录第二步:', mobile, response1.status_code, response1.headers)\n\nmobile = '15011463580'\ncookies = send_code(mobile)\ncode = input('code=')\ncookies['WT_FPC'] = get_w()\ncookies['login_mobile'] = mobile\ncookies['c_mobile'] = mobile\nvalidate(mobile,cookies)\nlogin(mobile,code,cookies)\n\n","sub_path":"chinaMobile/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"606614734","text":"import math\nimport numpy as np\n\nfrom pyande.models.model import Model\nfrom pyande.data.draw import draw_correlation_matrix\n\n\nclass MultivariateGaussian(Model):\n\n # Initialise data\n def __init__(self, data_dictionary):\n\n if 'cross_validation' not in data_dictionary:\n raise ValueError(\"No cross validation data in dictionary. A cross validation data \"\n \"is necessary\")\n\n if 'cross_validation_output' not in data_dictionary:\n raise ValueError(\"No cross validation output datta in dictionary. A cross validation \"\n \"output data is necessary\")\n\n super(MultivariateGaussian, self).__init__(data_dictionary)\n\n self.mean = 0\n self.sigma = 0\n\n def fit_parameter(self):\n\n (self.mean, self.sigma) = MultivariateGaussian.fit_parameter_model(self.df_train)\n\n def get_probabilities(self, data):\n\n p_validation = MultivariateGaussian.get_probabilities_mvg(data.values, self.mean,\n self.sigma)\n return p_validation\n\n @staticmethod\n def fit_parameter_model(data):\n\n mean = np.mean(data, axis=0)\n sigma = np.cov(data, rowvar=False)\n\n return mean, sigma\n\n @staticmethod\n def compute_probability_mvg(sample, mean, sigma, det_sigma):\n\n if det_sigma == 0:\n raise NameError(\"The covariance matrix can't be singular\")\n\n const_divide = math.pow((2.0 * np.pi), (len(sample) / 2.0)) * math.pow(det_sigma, 1.0 / 2)\n const_divide = 1.0 / const_divide\n\n sample_dif = np.subtract(sample, mean)\n\n if sigma.size == 1:\n sigma_inverse = 1.0 / sigma\n else:\n sigma_inverse = np.linalg.inv(sigma)\n prod_matrix = sample_dif.dot(sigma_inverse)\n prod_matrix = prod_matrix.dot(sample_dif.T)\n\n result = math.exp(-0.5 * prod_matrix)\n\n probabilities = const_divide * result\n\n return probabilities\n\n @staticmethod\n def get_probabilities_mvg(sample, mean, sigma):\n probabilities_vector = []\n\n # Determinant calculations\n if sigma.size == 1:\n det_sigma = sigma\n else:\n\n det_sigma = np.linalg.det(sigma)\n if det_sigma < 0:\n raise NameError(\"The covariance matrix is negative. Try to normalise features or \"\n \"review features values.\")\n\n for row in sample:\n pb = MultivariateGaussian.compute_probability_mvg(row, mean, sigma, det_sigma)\n probabilities_vector.append(pb)\n\n return probabilities_vector\n\n def draw_correlation_matrix(self, data):\n draw_correlation_matrix(self.sigma, data)\n","sub_path":"pyande/models/statistics/mvg.py","file_name":"mvg.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"275619901","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\supervisor\\process.py\n# Compiled at: 2015-07-18 11:09:26\nimport os, time, errno, shlex, traceback, signal\nfrom supervisor.compat import maxint\nfrom supervisor.compat import total_ordering\nfrom supervisor.compat import as_bytes\nfrom supervisor.medusa import asyncore_25 as asyncore\nfrom supervisor.states import ProcessStates\nfrom supervisor.states import SupervisorStates\nfrom supervisor.states import getProcessStateDescription\nfrom supervisor.states import STOPPED_STATES\nfrom supervisor.options import decode_wait_status\nfrom supervisor.options import signame\nfrom supervisor.options import ProcessException, BadCommand\nfrom supervisor.dispatchers import EventListenerStates\nfrom supervisor import events\nfrom supervisor.datatypes import RestartUnconditionally\nfrom supervisor.socket_manager import SocketManager\n\n@total_ordering\nclass Subprocess(object):\n \"\"\"A class to manage a subprocess.\"\"\"\n pid = 0\n config = None\n state = None\n listener_state = None\n event = None\n laststart = 0\n laststop = 0\n laststopreport = 0\n delay = 0\n administrative_stop = False\n system_stop = False\n killing = False\n backoff = 0\n dispatchers = None\n pipes = None\n exitstatus = None\n spawnerr = None\n group = None\n\n def __init__(self, config):\n \"\"\"Constructor.\n\n Argument is a ProcessConfig instance.\n \"\"\"\n self.config = config\n self.dispatchers = {}\n self.pipes = {}\n self.state = ProcessStates.STOPPED\n\n def removelogs(self):\n for dispatcher in self.dispatchers.values():\n if hasattr(dispatcher, 'removelogs'):\n dispatcher.removelogs()\n\n def reopenlogs(self):\n for dispatcher in self.dispatchers.values():\n if hasattr(dispatcher, 'reopenlogs'):\n dispatcher.reopenlogs()\n\n def drain(self):\n for dispatcher in self.dispatchers.values():\n if dispatcher.readable():\n dispatcher.handle_read_event()\n if dispatcher.writable():\n dispatcher.handle_write_event()\n\n def write(self, chars):\n if not self.pid or self.killing:\n raise OSError(errno.EPIPE, 'Process already closed')\n stdin_fd = self.pipes['stdin']\n if stdin_fd is None:\n raise OSError(errno.EPIPE, 'Process has no stdin channel')\n dispatcher = self.dispatchers[stdin_fd]\n if dispatcher.closed:\n raise OSError(errno.EPIPE, \"Process' stdin channel is closed\")\n dispatcher.input_buffer += chars\n dispatcher.flush()\n return\n\n def get_execv_args(self):\n \"\"\"Internal: turn a program name into a file name, using $PATH,\n make sure it exists / is executable, raising a ProcessException\n if not \"\"\"\n try:\n commandargs = shlex.split(self.config.command)\n except ValueError as e:\n raise BadCommand(\"can't parse command %r: %s\" % (\n self.config.command, str(e)))\n\n if commandargs:\n program = commandargs[0]\n else:\n raise BadCommand('command is empty')\n if '/' in program:\n filename = program\n try:\n st = self.config.options.stat(filename)\n except OSError:\n st = None\n\n else:\n path = self.config.options.get_path()\n found = None\n st = None\n for dir in path:\n found = os.path.join(dir, program)\n try:\n st = self.config.options.stat(found)\n except OSError:\n pass\n else:\n break\n\n if st is None:\n filename = program\n else:\n filename = found\n self.config.options.check_execv_args(filename, commandargs, st)\n return (\n filename, commandargs)\n\n event_map = {ProcessStates.BACKOFF: events.ProcessStateBackoffEvent, \n ProcessStates.FATAL: events.ProcessStateFatalEvent, \n ProcessStates.UNKNOWN: events.ProcessStateUnknownEvent, \n ProcessStates.STOPPED: events.ProcessStateStoppedEvent, \n ProcessStates.EXITED: events.ProcessStateExitedEvent, \n ProcessStates.RUNNING: events.ProcessStateRunningEvent, \n ProcessStates.STARTING: events.ProcessStateStartingEvent, \n ProcessStates.STOPPING: events.ProcessStateStoppingEvent}\n\n def change_state(self, new_state, expected=True):\n old_state = self.state\n if new_state is old_state:\n return False\n else:\n event_class = self.event_map.get(new_state)\n if event_class is not None:\n event = event_class(self, old_state, expected)\n events.notify(event)\n if new_state == ProcessStates.BACKOFF:\n now = time.time()\n self.backoff += 1\n self.delay = now + self.backoff\n self.state = new_state\n return\n\n def _assertInState(self, *states):\n if self.state not in states:\n current_state = getProcessStateDescription(self.state)\n allowable_states = (' ').join(map(getProcessStateDescription, states))\n raise AssertionError('Assertion failed for %s: %s not in %s' % (\n self.config.name, current_state, allowable_states))\n\n def record_spawnerr(self, msg):\n self.spawnerr = msg\n self.config.options.logger.info('spawnerr: %s' % msg)\n\n def spawn(self):\n \"\"\"Start the subprocess. It must not be running already.\n\n Return the process id. If the fork() call fails, return None.\n \"\"\"\n options = self.config.options\n if self.pid:\n msg = 'process %r already running' % self.config.name\n options.logger.warn(msg)\n return\n else:\n self.killing = False\n self.spawnerr = None\n self.exitstatus = None\n self.system_stop = False\n self.administrative_stop = False\n self.laststart = time.time()\n self._assertInState(ProcessStates.EXITED, ProcessStates.FATAL, ProcessStates.BACKOFF, ProcessStates.STOPPED)\n self.change_state(ProcessStates.STARTING)\n try:\n filename, argv = self.get_execv_args()\n except ProcessException as what:\n self.record_spawnerr(what.args[0])\n self._assertInState(ProcessStates.STARTING)\n self.change_state(ProcessStates.BACKOFF)\n return\n\n try:\n self.dispatchers, self.pipes = self.config.make_dispatchers(self)\n except (OSError, IOError) as why:\n code = why.args[0]\n if code == errno.EMFILE:\n msg = 'too many open files to spawn %r' % self.config.name\n else:\n msg = 'unknown error making dispatchers: %s' % errno.errorcode.get(code, code)\n self.record_spawnerr(msg)\n self._assertInState(ProcessStates.STARTING)\n self.change_state(ProcessStates.BACKOFF)\n return\n\n try:\n pid = options.fork()\n except OSError as why:\n code = why.args[0]\n if code == errno.EAGAIN:\n msg = 'Too many processes in process table to spawn %r' % self.config.name\n else:\n msg = 'unknown error during fork: %s' % errno.errorcode.get(code, code)\n self.record_spawnerr(msg)\n self._assertInState(ProcessStates.STARTING)\n self.change_state(ProcessStates.BACKOFF)\n options.close_parent_pipes(self.pipes)\n options.close_child_pipes(self.pipes)\n return\n\n if pid != 0:\n return self._spawn_as_parent(pid)\n return self._spawn_as_child(filename, argv)\n return\n\n def _spawn_as_parent(self, pid):\n self.pid = pid\n options = self.config.options\n options.close_child_pipes(self.pipes)\n options.logger.info('spawned: %r with pid %s' % (self.config.name, pid))\n self.spawnerr = None\n self.delay = time.time() + self.config.startsecs\n options.pidhistory[pid] = self\n return pid\n\n def _prepare_child_fds(self):\n options = self.config.options\n options.dup2(self.pipes['child_stdin'], 0)\n options.dup2(self.pipes['child_stdout'], 1)\n if self.config.redirect_stderr:\n options.dup2(self.pipes['child_stdout'], 2)\n else:\n options.dup2(self.pipes['child_stderr'], 2)\n for i in range(3, options.minfds):\n options.close_fd(i)\n\n def _spawn_as_child(self, filename, argv):\n options = self.config.options\n try:\n options.setpgrp()\n self._prepare_child_fds()\n setuid_msg = self.set_uid()\n if setuid_msg:\n uid = self.config.uid\n msg = \"couldn't setuid to %s: %s\\n\" % (uid, setuid_msg)\n options.write(2, 'supervisor: ' + msg)\n return\n env = os.environ.copy()\n env['SUPERVISOR_ENABLED'] = '1'\n serverurl = self.config.serverurl\n if serverurl is None:\n serverurl = self.config.options.serverurl\n if serverurl:\n env['SUPERVISOR_SERVER_URL'] = serverurl\n env['SUPERVISOR_PROCESS_NAME'] = self.config.name\n if self.group:\n env['SUPERVISOR_GROUP_NAME'] = self.group.config.name\n if self.config.environment is not None:\n env.update(self.config.environment)\n cwd = self.config.directory\n try:\n if cwd is not None:\n options.chdir(cwd)\n except OSError as why:\n code = errno.errorcode.get(why.args[0], why.args[0])\n msg = \"couldn't chdir to %s: %s\\n\" % (cwd, code)\n options.write(2, 'supervisor: ' + msg)\n return\n\n try:\n if self.config.umask is not None:\n options.setumask(self.config.umask)\n options.execve(filename, argv, env)\n except OSError as why:\n code = errno.errorcode.get(why.args[0], why.args[0])\n msg = \"couldn't exec %s: %s\\n\" % (argv[0], code)\n options.write(2, 'supervisor: ' + msg)\n except:\n (file, fun, line), t, v, tbinfo = asyncore.compact_traceback()\n error = '%s, %s: file: %s line: %s' % (t, v, file, line)\n msg = \"couldn't exec %s: %s\\n\" % (filename, error)\n options.write(2, 'supervisor: ' + msg)\n\n finally:\n options.write(2, 'supervisor: child process was not spawned\\n')\n options._exit(127)\n\n return\n\n def stop(self):\n \"\"\" Administrative stop \"\"\"\n self.administrative_stop = True\n self.laststopreport = 0\n return self.kill(self.config.stopsignal)\n\n def stop_report(self):\n \"\"\" Log a 'waiting for x to stop' message with throttling. \"\"\"\n if self.state == ProcessStates.STOPPING:\n now = time.time()\n if now > self.laststopreport + 2:\n self.config.options.logger.info('waiting for %s to stop' % self.config.name)\n self.laststopreport = now\n\n def give_up(self):\n self.delay = 0\n self.backoff = 0\n self.system_stop = True\n self._assertInState(ProcessStates.BACKOFF)\n self.change_state(ProcessStates.FATAL)\n\n def kill(self, sig):\n \"\"\"Send a signal to the subprocess. This may or may not kill it.\n\n Return None if the signal was sent, or an error message string\n if an error occurred or if the subprocess is not running.\n \"\"\"\n now = time.time()\n options = self.config.options\n if self.state == ProcessStates.BACKOFF:\n msg = 'Attempted to kill %s, which is in BACKOFF state.' % self.config.name\n options.logger.debug(msg)\n self.change_state(ProcessStates.STOPPED)\n return\n else:\n if not self.pid:\n msg = \"attempted to kill %s with sig %s but it wasn't running\" % (\n self.config.name, signame(sig))\n options.logger.debug(msg)\n return msg\n if self.state == ProcessStates.STOPPING:\n killasgroup = self.config.killasgroup\n else:\n killasgroup = self.config.stopasgroup\n as_group = ''\n if killasgroup:\n as_group = 'process group '\n options.logger.debug('killing %s (pid %s) %swith signal %s' % (\n self.config.name,\n self.pid,\n as_group,\n signame(sig)))\n self.killing = True\n self.delay = now + self.config.stopwaitsecs\n self._assertInState(ProcessStates.RUNNING, ProcessStates.STARTING, ProcessStates.STOPPING)\n self.change_state(ProcessStates.STOPPING)\n pid = self.pid\n if killasgroup:\n pid = -self.pid\n try:\n options.kill(pid, sig)\n except:\n tb = traceback.format_exc()\n msg = 'unknown problem killing %s (%s):%s' % (self.config.name,\n self.pid, tb)\n options.logger.critical(msg)\n self.change_state(ProcessStates.UNKNOWN)\n self.pid = 0\n self.killing = False\n self.delay = 0\n return msg\n\n return\n\n def signal(self, sig):\n \"\"\"Send a signal to the subprocess, without intending to kill it.\n\n Return None if the signal was sent, or an error message string\n if an error occurred or if the subprocess is not running.\n \"\"\"\n options = self.config.options\n if not self.pid:\n msg = \"attempted to send %s sig %s but it wasn't running\" % (\n self.config.name, signame(sig))\n options.logger.debug(msg)\n return msg\n else:\n options.logger.debug('sending %s (pid %s) sig %s' % (\n self.config.name,\n self.pid,\n signame(sig)))\n self._assertInState(ProcessStates.RUNNING, ProcessStates.STARTING, ProcessStates.STOPPING)\n try:\n options.kill(self.pid, sig)\n except:\n tb = traceback.format_exc()\n msg = 'unknown problem sending sig %s (%s):%s' % (\n self.config.name, self.pid, tb)\n options.logger.critical(msg)\n self.change_state(ProcessStates.UNKNOWN)\n self.pid = 0\n return msg\n\n return\n\n def finish(self, pid, sts):\n \"\"\" The process was reaped and we need to report and manage its state\n \"\"\"\n self.drain()\n es, msg = decode_wait_status(sts)\n now = time.time()\n self.laststop = now\n processname = self.config.name\n if now > self.laststart:\n too_quickly = now - self.laststart < self.config.startsecs\n else:\n too_quickly = False\n self.config.options.logger.warn(\"process %r (%s) laststart time is in the future, don't know how long process was running so assuming it did not exit too quickly\" % (\n self.config.name, self.pid))\n exit_expected = es in self.config.exitcodes\n if self.killing:\n self.killing = False\n self.delay = 0\n self.exitstatus = es\n msg = 'stopped: %s (%s)' % (processname, msg)\n self._assertInState(ProcessStates.STOPPING)\n self.change_state(ProcessStates.STOPPED)\n elif too_quickly:\n self.exitstatus = None\n self.spawnerr = 'Exited too quickly (process log may have details)'\n msg = 'exited: %s (%s)' % (processname, msg + '; not expected')\n self._assertInState(ProcessStates.STARTING)\n self.change_state(ProcessStates.BACKOFF)\n else:\n self.delay = 0\n self.backoff = 0\n self.exitstatus = es\n if self.state == ProcessStates.STARTING:\n self.change_state(ProcessStates.RUNNING)\n self._assertInState(ProcessStates.RUNNING)\n if exit_expected:\n msg = 'exited: %s (%s)' % (processname, msg + '; expected')\n self.change_state(ProcessStates.EXITED, expected=True)\n else:\n self.spawnerr = 'Bad exit code %s' % es\n msg = 'exited: %s (%s)' % (processname, msg + '; not expected')\n self.change_state(ProcessStates.EXITED, expected=False)\n self.config.options.logger.info(msg)\n self.pid = 0\n self.config.options.close_parent_pipes(self.pipes)\n self.pipes = {}\n self.dispatchers = {}\n if self.event is not None:\n events.notify(events.EventRejectedEvent(self, self.event))\n self.event = None\n return\n\n def set_uid(self):\n if self.config.uid is None:\n return\n else:\n msg = self.config.options.dropPrivileges(self.config.uid)\n return msg\n\n def __lt__(self, other):\n return self.config.priority < other.config.priority\n\n def __eq__(self, other):\n return self.config.priority == other.config.priority\n\n def __repr__(self):\n return '' % (\n id(self),\n self.config.name,\n getProcessStateDescription(self.get_state()))\n\n def get_state(self):\n return self.state\n\n def transition(self):\n now = time.time()\n state = self.state\n logger = self.config.options.logger\n if self.config.options.mood > SupervisorStates.RESTARTING:\n if state == ProcessStates.EXITED:\n if self.config.startintervalsecs:\n if not self.laststart or now > self.laststart + self.config.startintervalsecs:\n self.spawn()\n elif self.config.autorestart:\n if self.config.autorestart is RestartUnconditionally:\n self.spawn()\n elif self.exitstatus not in self.config.exitcodes:\n self.spawn()\n elif state == ProcessStates.STOPPED and not self.laststart:\n if self.config.autostart:\n self.spawn()\n elif state == ProcessStates.BACKOFF:\n if self.backoff <= self.config.startretries:\n if now > self.delay:\n self.spawn()\n if state == ProcessStates.STARTING:\n if now - self.laststart > self.config.startsecs:\n self.delay = 0\n self.backoff = 0\n self._assertInState(ProcessStates.STARTING)\n self.change_state(ProcessStates.RUNNING)\n msg = 'entered RUNNING state, process has stayed up for > than %s seconds (startsecs)' % self.config.startsecs\n logger.info('success: %s %s' % (self.config.name, msg))\n if state == ProcessStates.BACKOFF:\n if self.backoff > self.config.startretries:\n self.give_up()\n msg = 'entered FATAL state, too many start retries too quickly'\n logger.info('gave up: %s %s' % (self.config.name, msg))\n elif state == ProcessStates.STOPPING:\n time_left = self.delay - now\n if time_left <= 0:\n self.config.options.logger.warn('killing %r (%s) with SIGKILL' % (self.config.name,\n self.pid))\n self.kill(signal.SIGKILL)\n\n\nclass FastCGISubprocess(Subprocess):\n \"\"\"Extends Subprocess class to handle FastCGI subprocesses\"\"\"\n\n def __init__(self, config):\n Subprocess.__init__(self, config)\n self.fcgi_sock = None\n return\n\n def before_spawn(self):\n \"\"\"\n The FastCGI socket needs to be created by the parent before we fork\n \"\"\"\n if self.group is None:\n raise NotImplementedError('No group set for FastCGISubprocess')\n if not hasattr(self.group, 'socket_manager'):\n raise NotImplementedError('No SocketManager set for %s:%s' % (\n self.group, dir(self.group)))\n self.fcgi_sock = self.group.socket_manager.get_socket()\n return\n\n def spawn(self):\n \"\"\"\n Overrides Subprocess.spawn() so we can hook in before it happens\n \"\"\"\n self.before_spawn()\n pid = Subprocess.spawn(self)\n if pid is None:\n self.fcgi_sock = None\n return pid\n\n def after_finish(self):\n \"\"\"\n Releases reference to FastCGI socket when process is reaped\n \"\"\"\n self.fcgi_sock = None\n return\n\n def finish(self, pid, sts):\n \"\"\"\n Overrides Subprocess.finish() so we can hook in after it happens\n \"\"\"\n retval = Subprocess.finish(self, pid, sts)\n self.after_finish()\n return retval\n\n def _prepare_child_fds(self):\n \"\"\"\n Overrides Subprocess._prepare_child_fds()\n The FastCGI socket needs to be set to file descriptor 0 in the child\n \"\"\"\n sock_fd = self.fcgi_sock.fileno()\n options = self.config.options\n options.dup2(sock_fd, 0)\n options.dup2(self.pipes['child_stdout'], 1)\n if self.config.redirect_stderr:\n options.dup2(self.pipes['child_stdout'], 2)\n else:\n options.dup2(self.pipes['child_stderr'], 2)\n for i in range(3, options.minfds):\n options.close_fd(i)\n\n\n@total_ordering\nclass ProcessGroupBase(object):\n\n def __init__(self, config):\n self.config = config\n self.processes = {}\n for pconfig in self.config.process_configs:\n self.processes[pconfig.name] = pconfig.make_process(self)\n\n def __lt__(self, other):\n return self.config.priority < other.config.priority\n\n def __eq__(self, other):\n return self.config.priority == other.config.priority\n\n def __repr__(self):\n return '<%s instance at %s named %s>' % (self.__class__, id(self),\n self.config.name)\n\n def removelogs(self):\n for process in self.processes.values():\n process.removelogs()\n\n def reopenlogs(self):\n for process in self.processes.values():\n process.reopenlogs()\n\n def stop_all(self):\n processes = list(self.processes.values())\n processes.sort()\n processes.reverse()\n for proc in processes:\n state = proc.get_state()\n if state == ProcessStates.RUNNING:\n proc.stop()\n elif state == ProcessStates.STARTING:\n proc.stop()\n elif state == ProcessStates.BACKOFF:\n proc.give_up()\n\n def get_unstopped_processes(self):\n \"\"\" Processes which aren't in a state that is considered 'stopped' \"\"\"\n return [ x for x in self.processes.values() if x.get_state() not in STOPPED_STATES\n ]\n\n def get_dispatchers(self):\n dispatchers = {}\n for process in self.processes.values():\n dispatchers.update(process.dispatchers)\n\n return dispatchers\n\n\nclass ProcessGroup(ProcessGroupBase):\n\n def transition(self):\n for proc in self.processes.values():\n proc.transition()\n\n\nclass FastCGIProcessGroup(ProcessGroup):\n\n def __init__(self, config, **kwargs):\n ProcessGroup.__init__(self, config)\n sockManagerKlass = kwargs.get('socketManager', SocketManager)\n self.socket_manager = sockManagerKlass(config.socket_config, logger=config.options.logger)\n try:\n self.socket_manager.get_socket()\n except Exception as e:\n raise ValueError('Could not create FastCGI socket %s: %s' % (\n self.socket_manager.config(), e))\n\n\nclass EventListenerPool(ProcessGroupBase):\n\n def __init__(self, config):\n ProcessGroupBase.__init__(self, config)\n self.event_buffer = []\n for event_type in self.config.pool_events:\n events.subscribe(event_type, self._acceptEvent)\n\n events.subscribe(events.EventRejectedEvent, self.handle_rejected)\n self.serial = -1\n self.last_dispatch = 0\n self.dispatch_throttle = 0\n\n def handle_rejected(self, event):\n process = event.process\n procs = self.processes.values()\n if process in procs:\n self._acceptEvent(event.event, head=True)\n\n def transition(self):\n processes = self.processes.values()\n dispatch_capable = False\n for process in processes:\n process.transition()\n if process.state == ProcessStates.RUNNING:\n if process.listener_state == EventListenerStates.READY:\n dispatch_capable = True\n\n if dispatch_capable:\n if self.dispatch_throttle:\n now = time.time()\n if now - self.last_dispatch < self.dispatch_throttle:\n return\n self.dispatch()\n\n def dispatch(self):\n while self.event_buffer:\n event = self.event_buffer.pop(0)\n ok = self._dispatchEvent(event)\n if not ok:\n self._acceptEvent(event, head=True)\n break\n\n self.last_dispatch = time.time()\n\n def _acceptEvent(self, event, head=False):\n if not hasattr(event, 'serial'):\n event.serial = new_serial(GlobalSerial)\n if not hasattr(event, 'pool_serials'):\n event.pool_serials = {}\n if self.config.name not in event.pool_serials:\n event.pool_serials[self.config.name] = new_serial(self)\n else:\n self.config.options.logger.debug('rebuffering event %s for pool %s (bufsize %s)' % (\n event.serial, self.config.name, len(self.event_buffer)))\n if len(self.event_buffer) >= self.config.buffer_size:\n if self.event_buffer:\n discarded_event = self.event_buffer.pop(0)\n self.config.options.logger.error('pool %s event buffer overflowed, discarding event %s' % (\n self.config.name, discarded_event.serial))\n if head:\n self.event_buffer.insert(0, event)\n else:\n self.event_buffer.append(event)\n\n def _dispatchEvent(self, event):\n pool_serial = event.pool_serials[self.config.name]\n for process in self.processes.values():\n if process.state != ProcessStates.RUNNING:\n continue\n if process.listener_state == EventListenerStates.READY:\n payload = str(event)\n try:\n event_type = event.__class__\n serial = event.serial\n envelope = self._eventEnvelope(event_type, serial, pool_serial, payload)\n process.write(as_bytes(envelope))\n except OSError as why:\n if why.args[0] != errno.EPIPE:\n raise\n self.config.options.logger.debug('epipe occurred while sending event %s to listener %s, listener state unchanged' % (\n event.serial, process.config.name))\n continue\n\n process.listener_state = EventListenerStates.BUSY\n process.event = event\n self.config.options.logger.debug('event %s sent to listener %s' % (\n event.serial, process.config.name))\n return True\n\n return False\n\n def _eventEnvelope(self, event_type, serial, pool_serial, payload):\n event_name = events.getEventNameByType(event_type)\n payload_len = len(payload)\n D = {'ver': '3.0', \n 'sid': self.config.options.identifier, \n 'serial': serial, \n 'pool_name': self.config.name, \n 'pool_serial': pool_serial, \n 'event_name': event_name, \n 'len': payload_len, \n 'payload': payload}\n return 'ver:%(ver)s server:%(sid)s serial:%(serial)s pool:%(pool_name)s poolserial:%(pool_serial)s eventname:%(event_name)s len:%(len)s\\n%(payload)s' % D\n\n\nclass GlobalSerial(object):\n\n def __init__(self):\n self.serial = -1\n\n\nGlobalSerial = GlobalSerial()\n\ndef new_serial(inst):\n if inst.serial == maxint:\n inst.serial = -1\n inst.serial += 1\n return inst.serial","sub_path":"pycfiles/supervisor_plus_cron-1.0.15201-py2.7/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":28939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"352290533","text":"#coding=utf-8\nimport caffe\nimport numpy as np\nimport cv2\nimport os\nimport pickle\nimport shutil\n\nclass ObjTypeClassifier:\n def __init__(self,prototxt,weightfile,gpu_id=0):\n caffe.set_mode_gpu()\n caffe.set_device(gpu_id)\n self.__net = caffe.Net(prototxt,weightfile,caffe.TEST)\n\n inputBlobShape = self.__net.blobs['data'].data[0].shape\n self.__input_geometry = (inputBlobShape[-1],inputBlobShape[-2])\n\n def __getInputBlob(self,im):\n im_resize = cv2.resize(im,self.__input_geometry).astype(np.float32)/256\n blob = np.zeros((1,1,self.__input_geometry[1],self.__input_geometry[0]),dtype=np.float32)\n blob[0,:,:,:] = im_resize\n blob = blob.astype(np.float32)\n\n return blob\n\n def __cleanBounding(self,im):\n ret, im_threshold = cv2.threshold(im, 150, 255, cv2.THRESH_BINARY_INV)\n contours, hierarchy = cv2.findContours(im_threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n con = np.vstack(contours)\n con = cv2.convexHull(con)\n con = np.squeeze(np.array(con), axis=(1,))\n con = cv2.boundingRect(con)\n\n return im[con[1]:con[1]+con[3],con[0]:con[0]+con[2]]\n\n def __resize_data(self,im):\n im = self.__cleanBounding(im)\n # cv2.imshow(\"im\",im)\n # cv2.waitKey(0)\n h, w = im.shape\n\n max_side = max(w, h)\n im_resize = (np.ones((max_side, max_side)) * 255).astype(np.uint8)\n if h < w:\n up = int((w - h) / 2 + 0.7)\n im_resize[up:up + h] = im\n\n elif w < h:\n left = int((h - w) / 2 + 0.7)\n im_resize[:, left:left + w] = im\n else:\n im_resize = im\n\n return im_resize\n\n def extractFeature(self,im,featureBlobName):\n im_resize = self.__resize_data(im)\n self.__net.blobs['data'].data[...] = self.__getInputBlob(im_resize)\n self.__net.forward()\n feature = self.__net.blobs[featureBlobName].data.flatten()\n\n return feature\n\n def classify(self,im,featureBlobName):\n self.__net.blobs['data'].data[...] = self.__getInputBlob(im)\n self.__net.forward()\n prob = self.__net.blobs['prob'].data.flatten()\n index = prob.argsort()[-1]\n confidence = prob[index]\n feature = self.__net.blobs[featureBlobName].data.flatten()\n\n return (index,confidence,feature)\n\ndef run():\n cv2.namedWindow(\"base\",0)\n cv2.namedWindow(\"im\",0)\n\n classifier = initNet()\n\n pic_path = [line.strip() for line in open(\"./pic_path.txt\").readlines()]\n base_dir = \"/media/zqp/data/train_data/patent_retrieval/\"\n base_path = [base_dir+\"%06d/%06d.jpg\"%(index,index) for index in range(1000)]\n for path in base_path:\n im = cv2.imread(path)\n result = classifier.classify(im,'loss2/fc')\n print (result[0],\"****************\",result[1])\n cv2.imshow(\"base\",cv2.imread(base_path[result[0]]))\n cv2.imshow(\"im\", im)\n if cv2.waitKey(0)==27:\n break\n\ndef initNet():\n prototxt = r'./deploy.prototxt'\n weightfile = r'./final.caffemodel'\n classifier = ObjTypeClassifier(prototxt,weightfile,0)\n\n return classifier\n\ndef run_1():\n cv2.namedWindow(\"im\",0)\n classifier = initNet()\n pic_path = [line.strip() for line in open(\"./pic_path.txt\").readlines()]\n for path in pic_path:\n print(path)\n im = cv2.imread(path, 0)\n result = classifier.classify(im,'loss2/fc')\n print (result[0],\"****************\",result[1])\n cv2.imshow(\"im\",im)\n if cv2.waitKey(0)==27:\n break\n\ndef extraceBaseFeatures():\n features = []\n classifier = initNet()\n pic_path = [line.strip() for line in open(\"./pic_path.txt\").readlines()]\n index = 0\n for path in pic_path:\n im = cv2.imread(path, 0)\n feature = classifier.extractFeature(im,'loss2/fc')\n features.append(feature)\n index+=1\n\n if index%10==0:\n print (\"process img********\"+str(index))\n\n features = np.array(features)\n array1 = np.sqrt(np.sum(features*features,1))\n features = (features.T/array1).T\n\n file1 = open(\"./features1.pkl\",\"wb\")\n pickle.dump(features, file1)\n fs = cv2.FileStorage(\"./features1.json.gz\",cv2.FILE_STORAGE_WRITE)\n fs.write(\"mat\", features)\n fs.release()\n\ndef classify():\n features = pickle.load(open(\"./features.pkl\",\"rb\"))\n base_path = [line.strip() for line in open(\"./features.txt\").readlines()]\n\n classifier = initNet()\n pic_path = [line.strip() for line in open(\"./pic_path.txt\").readlines()]\n\n cv2.namedWindow(\"im\", 0)\n cv2.namedWindow(\"base\", 0)\n for path in pic_path:\n print(path)\n im = cv2.imread(path, 0)\n feature = classifier.extractFeature(im,'loss2/fc')\n\n feature = feature/np.sqrt(np.dot(feature,feature))\n result = np.matmul(features, feature)\n indexs = np.flip(np.argsort(result))\n\n # indexs = np.where(result>0.9)[0]\n\n if len(indexs)<1:\n continue\n\n flag = False\n cv2.imshow(\"im\", im)\n i = 1\n for index in indexs[:10]:\n cv2.imshow(\"base\", cv2.imread(base_path[index]))\n print(\"top*******%s/%s\"%(result[index], i))\n i += 1\n\n if cv2.waitKey((0))==27:\n flag = True\n break\n\n if flag:\n break\n\ndef classify_1():\n features = pickle.load(open(\"./features.pkl\",\"rb\"))\n base_path = [line.strip() for line in open(\"./features.txt\").readlines()]\n classifier = initNet()\n pic_save_root_dir = \"/media/zqp/data/train_data/patent_retrieval/\"\n pic_path = [line.strip() for line in open(\"./pic_path.txt\").readlines()]\n pic_index = 0\n available = 0\n\n count = 0\n for path in pic_path:\n im = cv2.imread(path)\n feature = classifier.extractFeature(im,'loss2/fc')\n\n feature = feature/np.sqrt(np.dot(feature,feature))\n result = np.matmul(features, feature)\n indexs = np.argsort(result)\n\n count += 1\n if count%10==0:\n print(\"process************%s×××××××available: %s\"%(count,available))\n if result[indexs[-1]]<0.85:\n continue\n\n label = os.path.basename(os.path.dirname(base_path[indexs[-1]]))\n shutil.move(path, pic_save_root_dir+label)\n available += 1\n\nif __name__ == '__main__':\n extraceBaseFeatures()\n\n","sub_path":"objTypeClassifier.py","file_name":"objTypeClassifier.py","file_ext":"py","file_size_in_byte":6383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"163771524","text":"from typing import Sequence, Optional\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.exceptions import NotFittedError\nfrom sklearn.utils.validation import check_is_fitted\n\n__all__ = [\"Explainer\"]\n\n\nclass Explainer:\n def __init__(\n self,\n train: pd.DataFrame,\n test: pd.DataFrame,\n target: str,\n model,\n features: Optional[Sequence[str]] = None,\n mode: str = \"binary_classification\",\n random_feature: bool = True,\n ):\n self.features = features if features else train.columns\n self.train = train[self.features].copy()\n self.test = test[self.features].copy()\n self.target = target\n self.model = model\n self.mode = mode\n\n if random_feature:\n self.train = self.insert_random_num(self.train)\n self.test = self.insert_random_num(self.test)\n self.prepare_data()\n self.maybe_fit_model()\n self.get_predictions()\n self.feature_names = self.train.columns\n\n def insert_random_num(self, df: pd.DataFrame) -> pd.DataFrame:\n df[\"RANDOM_NUM\"] = np.random.rand(df.shape[0])\n return df\n\n def prepare_data(self) -> None:\n self.X_train, self.y_train = (\n self.train.drop(columns=self.target),\n self.train[self.target],\n )\n self.X_test, self.y_test = (\n self.test.drop(columns=self.target),\n self.test[self.target],\n )\n\n def maybe_fit_model(self) -> None:\n try:\n check_is_fitted(self.model)\n except NotFittedError:\n self.model = self.model.fit(self.X_train, self.y_train)\n\n def get_predictions(self) -> None:\n if self.mode == \"binary_classification\":\n self.train_preds = self.model.predict_proba(self.X_train)[:, 1]\n self.test_preds = self.model.predict_proba(self.X_test)[:, 1]\n else:\n raise NotImplementedError(\n f\"Problem type {self.mode} has not been implemented yet.\"\n )\n","sub_path":"src/why/explainer.py","file_name":"explainer.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"303992882","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# This is to run model to predict question type by question sentence\n# Question types are defined at:\n# https://github.com/GT-Vision-Lab/VQA/tree/master/QuestionTypes\n\nimport os\nimport time\nimport json\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom main.settings import Config\nfrom main.models import QuestionTypeClassification\nfrom main.models.train import make_training_cls_model\nfrom main.utils.loader import VQA, fetch_question_types\nfrom main.utils.preprocess import text_processor\nfrom main.metrics import calculate_accuracy\n\n# ignore tensorflow debug info\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nDEBUG = False\n\n# dataset\ndata_size = 30000\nvocab_size = 20000\n\n# parameters\nembedding_dim = 256\nhidden_units = 64\n\nlearning_rate = 0.005\nbatch_size = 64\nepochs = 2\n\n# initialize labels\nclasses = fetch_question_types()\nq2id = {q: i for i, q in enumerate(classes)}\nnum_classes = len(classes)\n\n# preprocess\nprocessor = None\n\n\ndef data_generator(inputs, labels, batch_size=batch_size):\n steps_per_epoch = (len(inputs)-1) // batch_size + 1\n\n for step in range(steps_per_epoch):\n start = step * batch_size\n batch_inputs = inputs[start:start+batch_size]\n batch_labels = labels[start:start+batch_size]\n yield batch_inputs, batch_labels\n\n\ndef main(*, training=True, save_to=None, load_from=None, val=0.2):\n global data_size\n global num_classes\n global processor\n\n vqa = VQA()\n vqa.load_data(num_data=data_size)\n questions, question_types, _, _ = next(vqa.data_generator())\n labels = [\n q2id[q] if q in q2id else q2id['none of the above']\n for q in question_types]\n\n # build processor based on training dataset\n # if processor is not reused\n if training:\n # preprocessing dataset\n # split train and test set\n train_size = int(data_size * (1 - val))\n\n # inputs\n inputs_train = questions[:train_size]\n inputs_val = questions[train_size:]\n\n # process inputs\n # if tokenizer is not loaded, create new one\n if processor is None:\n processor = text_processor(inputs_train)\n\n # iinitialize model\n model = QuestionTypeClassification(\n embedding_dim=embedding_dim,\n units=hidden_units,\n vocab_size=vocab_size, # need to add 1 due to Embedding implementation\n num_classes=num_classes\n )\n\n # set initial weights to the model\n if load_from is not None:\n print('Loading weights...')\n model.load_weights(load_from)\n\n # TRAINING STEP\n if training:\n min_loss_val = 1.0\n\n print('Start training')\n\n inputs_train = processor(inputs_train)\n inputs_val = [processor(inputs_val)]\n\n # labels\n labels = np.array(labels, dtype=np.int32)\n\n labels_train = labels[:train_size]\n labels_val = labels[train_size:]\n\n loss = 0\n optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n\n train_cls_step = make_training_cls_model(model, optimizer,\n loss='sparse_categorical_crossentropy')\n\n # execute training\n for epoch in range(epochs):\n print('=====' * 10)\n print(' Epoch {}'.format(epoch+1))\n print('=====' * 10)\n\n dataset = data_generator(inputs_train, labels_train, batch_size)\n\n for batch, (ins, outs) in enumerate(dataset):\n st = time.time()\n ins = [ins]\n batch_loss, accuracy = train_cls_step(ins, outs)\n\n end = time.time()\n\n if batch % 100 == 0:\n out_val = model(*inputs_val)\n cost_val = tf.keras.losses.sparse_categorical_crossentropy(labels_val, out_val, from_logits=True)\n loss_val = tf.reduce_mean(cost_val)\n acc_val = calculate_accuracy(out_val, labels_val)\n\n if DEBUG:\n print('[DEBUG] Batch:', batch)\n for layer in model.layers:\n print(' Layer:', model.name + ':' + layer.name)\n print(' Weights:')\n print(' mean:', np.mean(layer.get_weights()[0]))\n print(' std:', np.std(layer.get_weights()[0]))\n print()\n\n batch_loss = batch_loss.numpy()\n print(' Batch:', batch)\n # TODO: add accuracy\n print(' Loss: {:.4f} Accuracy(Train): {:.4f} Loss(Val): {:.4f} Accuracy(Val): {:.4f} Time(batch): {:.4f}s'\n .format(batch_loss, accuracy,loss_val, acc_val, end-st))\n\n if loss_val < min_loss_val:\n min_loss_val = loss_val\n print('Saving models...')\n # save tokenizer info for resuse\n processor.to_json('./.env/tokenizer_config.json')\n model.save_weights(save_to)\n print('Saved!!')\n\n print()\n print('Training completed')\n\n else:\n # if not training mode test with all given data\n st = time.time()\n inputs = processor(questions)\n out = model(inputs)\n labels = tf.Variable(labels, dtype=tf.int32)\n accuracy = calculate_accuracy(out, labels)\n end = time.time()\n print('Evaluated score: Accuracy: {:.4f} Time: {:.4f}s'\n .format(accuracy, end-st))\n\n return model\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(\n description='Run question type classification model')\n parser.add_argument(\n '--no-train', default=False, action='store_true',\n help=\"not run training step (must set '-p', otherwise run training)\"\n )\n parser.add_argument(\n '-s', '--save', type=str, default='weights',\n help='file name to save as checkpoint'\n )\n parser.add_argument(\n '-p', '--path', type=str, default=None,\n help='path to model data to load'\n )\n parser.add_argument(\n '-i', '--interactive', default=False, action='store_true',\n help='interactive mode. can pass sentence to predict from stdin.'\n )\n\n args = parser.parse_args()\n\n interactive = args.interactive\n load_from = args.path\n\n # if set no-train, not run training step\n training = True ^ args.no_train\n\n file_name = args.save\n save_to = os.path.join(Config.MODELS.get('QTYPE'), file_name)\n\n st = time.time()\n\n model = main(training=training, load_from=load_from, save_to=save_to)\n\n end = time.time()\n\n print(f'Total time: {end - st:.4f}s.')\n\n if interactive:\n print()\n print('-----' * 10)\n print(' Interactive mode')\n print('-----' * 10)\n sentence = input(\" Input sentece(if quit, type 'q'): \").strip()\n\n while sentence != 'q':\n sentence = processor([sentence])\n\n # avoid to be broken by inputs with only unseen word nor empty\n # however this will appear DeprecatedWarning\n if not np.any(sentence):\n sentence = np.array([[0]])\n\n pred = model(sentence)\n pred = np.argmax(pred[0])\n print(' Predicted type => ', classes[pred])\n print()\n sentence = input(\" Input sentece(if quit, type 'q'): \").strip()\n\n print()\n print('Closing...')\n","sub_path":"run_questiontype_classification.py","file_name":"run_questiontype_classification.py","file_ext":"py","file_size_in_byte":7492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"117858489","text":"#!/usr/bin/env python3\n# https://stackoverflow.com/questions/20856518/navigate-between-widgets-using-arrows-in-qt\n#\n# author: feb 2019\n# Cassio Batista - cassio.batista.13@gmail.com\n\nfrom PyQt5 import QtWidgets, QtGui, QtCore, QtTest\nimport config\n\nclass Card(QtWidgets.QPushButton):\n\tdef __init__(self):\n\t\tsuper(Card, self).__init__()\n\t\tself.setFixedSize(config.BUTTON_SIZE,config.BUTTON_SIZE)\n\t\tself.setDefault(True)\n\t\tself.setAutoDefault(False)\n\n\t# https://stackoverflow.com/questions/20722823/qt-get-mouse-pressed-event-even-if-a-button-is-pressed\n\tdef mousePressEvent(self, ev):\n\t\tQtWidgets.QMessageBox.warning(self, u'Mouse device', config.MOUSE_ERROR_MSG)\n\n\tdef set_icon(self, icon_path):\n\t\tself.icon = QtGui.QIcon(QtGui.QPixmap(icon_path))\n\t\tself.setIcon(self.icon)\n\t\tself.setIconSize(QtCore.QSize(config.ICON_SIZE,config.ICON_SIZE))\n\n\tdef unset_icon(self):\n\t\tself.setIcon(QtGui.QIcon())\n\nclass LightArrow(Card):\n\tdef __init__(self, snd_obj):\n\t\tsuper(LightArrow, self).__init__()\n\t\tself.sound = snd_obj\n\t\tself.setFixedSize(config.BUTTON_SIZE,config.BUTTON_SIZE)\n\t\tself.onVal = False\n\t\tself.order = ['red', 'yellow', 'green']\n\n\tdef is_on(self):\n\t\treturn self.onVal\n\n\tdef set_on(self, on):\n\t\tif self.onVal == on:\n\t\t\treturn\n\t\tself.onVal = on\n\t\tself.update()\n\n\tdef restore(self):\n\t\tself.order = ['red', 'yellow', 'green']\n\n\tdef set_bg_colour(self, colour):\n\t\tself.setFocus()\n\t\tself.setStyleSheet(config.HOVER_FOCUS_BG_COLOUR % colour)\n\n\t@QtCore.pyqtSlot()\n\tdef turn_off(self):\n\t\tself.set_on(False)\n\n\t@QtCore.pyqtSlot()\n\tdef turn_on(self):\n\t\tif len(self.order):\n\t\t\tself.colour = self.order.pop(0)\n\t\t\tself.sound.play(self.sound.REG_BEEP, 1.5)\n\t\tself.set_on(True)\n\t\tself.set_bg_colour(self.colour)\n\n\ton = QtCore.pyqtProperty(bool, is_on, set_on)\n\nclass LightState(QtCore.QState):\n\tdef __init__(self, light):\n\t\tsuper(LightState, self).__init__()\n\t\tself.light = light\n\t\tself.timer = QtCore.QTimer(self)\n\t\tself.timer.setInterval(1000) # duration\n\t\tself.timer.setSingleShot(True)\n\n\t\tself.timing = QtCore.QState(self)\n\t\tself.timing.entered.connect(self.light.turn_on)\n\t\tself.timing.entered.connect(self.timer.start)\n\t\tself.timing.exited.connect(self.light.turn_off)\n\t\n\t\tself.done = QtCore.QFinalState(self)\n\n\t\tself.timing.addTransition(self.timer.timeout, self.done)\n\t\n\t\tself.setInitialState(self.timing)\n\t\tself.setObjectName('state')\n\t\tself.addTransition(self.finished, self)\n\n# https://stackoverflow.com/questions/9840197/subclass-arguments-from-superclass\nclass LightMachine(QtCore.QStateMachine):\n\tdef __init__(self, parent, state):\n\t\tsuper(LightMachine, self).__init__(parent) \n\t\tself.state = state\n\t\tself.addState(self.state)\n\t\tself.setInitialState(self.state)\n\t\tself.flag = True\n\n\tdef start(self, grid):\n\t\tQtTest.QTest.qWait(50)\n\t\tsuper(LightMachine, self).start()\n\t\tself.state.light.restore()\n\t\tself.flag = True\n\t\tfor i in range(1, config.BOARD_DIM+1):\n\t\t\tfor j in range(1, config.BOARD_DIM+1):\n\t\t\t\tbutton = grid.itemAtPosition(i,j).widget()\n\t\t\t\tbutton.setEnabled(False)\n\n\tdef stop(self, grid):\n\t\tsuper(LightMachine, self).stop()\n\t\tself.flag = False\n\t\tfor i in range(1, config.BOARD_DIM+1):\n\t\t\tfor j in range(1, config.BOARD_DIM+1):\n\t\t\t\tbutton = grid.itemAtPosition(i,j).widget()\n\t\t\t\tbutton.setEnabled(True)\n### EOF ###\n","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"146716310","text":"from random import randint\nfrom math import sqrt\nfrom numpy import *\n\nclass Matrix:\n def __init__(self, n, N, delta, search_key=1, no_search_key=0, matrix= None):\n self.search_key = 1\n self.no_search_key = 0\n self.marker_key = -2.0\n self.star_marker = -1.0\n self.delta = delta\n self.n = n\n self.N = N\n self.matrix = []\n self.matrix_list = []\n self.matrix_copy = []\n\n if matrix == None:\n self.__init_matrix__()\n else:\n self.matrix = matrix[:] #copy editable inner matrix\n\n self.matrix_copy = self.matrix[:] #backup matrix\n\n\n def __init_matrix__(self):\n for i in range(0,self.n):\n row = []\n for j in range(0,self.n):\n if randint(0,1) == 1:\n row.append( self.search_key )\n else:\n row.append( self.no_search_key )\n self.matrix_list.append([j,i])\n self.matrix.append(row)\n\n def __set_control_vars__(self, j, i):\n #Temporal global variables\n ##\n self.xcm = i\n self.ycm = j\n ###\n self.minx = i\n self.miny= j\n ###\n self.maxx = i\n self.maxy= j\n ###\n self.rx= 0\n self.ry= 0\n self.r = 0\n\n self.points = 0\n self.union_points = []\n self.next_point = False\n\n def valid_point(self,j,i):\n return j >= 0 and j < self.n and i >= 0 and i < self.n\n\n def more_stars(self, i, j):\n if( self.matrix[j][i+1] == self.search_key or \\\n self.matrix[j+1][i] == self.search_key or \\\n self.matrix[j][i-1] == self.search_key or \\\n self.matrix[j-1][i] == self.search_key):\n return True\n else:\n return False\n\n def has_a_star(self, j, i):\n if self.valid_point( j, i ):\n return self.matrix[j][i] == self.search_key\n else:\n return False\n\n def can_expand(self, j = None, i = None):\n if j == None or i == None:\n return self.r < self.delta and self.points <= self.N\n else:\n return self.has_a_star( j, i) and self.valid_point( j, i) and self.r < self.delta and self.points <= self.N\n\n def connect(self):\n self.matrix = self.matrix_copy\n\n # Randomized version\n # l = self.matrix_list[:] #copy list\n # while len(l) > 0:\n # index = randint(0,len(l)-1)\n # j = l[index][0]\n # i = l[index][1]\n # self.connect_point(j,i)\n # del(l[index])\n\n # Lineal implementation version\n for j in range(0,self.n):\n for i in range(0,self.n):\n self.connect_point( j, i)\n\n if not __name__ == '__main__':\n for j in range(0,self.n):\n for i in range(0,self.n):\n self.clean_marker( j, i)\n return self.matrix\n\n def clean_marker(self,j,i):\n if self.matrix[j][i] == self.star_marker or self.matrix[j][i] == self.marker_key:\n self.matrix[j][i] = self.search_key\n\n def connect_point(self, j, i):\n if not self.has_a_star(j,i):\n return\n\n self.__set_control_vars__( j, i)\n self.connect_recursive( j, i)\n self.unify_points()\n\n def connect_recursive(self,j,i):\n if self.union( j, i):\n\n # Randomized implementation\n # dirs = []\n # d = randint(0,3)\n # dirs += [d]\n # while len(dirs) < 4:\n # d = randint(0,3)\n # if not(d in dirs):\n # dirs += [d]\n\n # Iterative implementation\n dirs = [0,1,2,3]\n\n for d in dirs:\n if d == 0:\n self.connect_recursive( j+1, i)\n elif d == 1:\n self.connect_recursive( j, i-1)\n elif d == 2:\n self.connect_recursive( j-1, i)\n elif d == 3:\n self.connect_recursive( j, i+1)\n\n def union(self, cj, ci):\n if self.can_expand( cj, ci):\n self.points += 1\n\n if ci < self.minx:\n self.minx = ci\n if ci > self.maxx:\n self.maxx = ci\n\n if cj < self.miny:\n self.miny = cj\n if cj > self.maxy:\n self.maxy = cj\n\n self.rx = (self.maxx - self.minx) + 1\n self.ry = (self.maxy - self.miny) + 1\n self.r = sqrt(self.rx**2 + self.ry**2)/2\n\n self.matrix[cj][ci] = self.marker_key # mark the united point\n self.union_points.append([cj,ci])\n return True\n else:\n return False\n\n def unify_points(self):\n if self.points >= self.N:\n xcm = 0\n ycm = 0\n\n for point in self.union_points:\n y = point[0]\n x = point[1]\n\n self.matrix[y][x] = self.no_search_key #delete the united point\n\n ycm += y\n xcm += x\n\n xcm = int(round( xcm / self.points))\n ycm = int(round( ycm / self.points))\n\n self.xcm = xcm\n self.ycm = ycm\n\n self.matrix[self.ycm][self.xcm] = self.star_marker\n # points < N\n else:\n for point in self.union_points:\n y = point[0]\n x = point[1]\n\n self.matrix[y][x] = self.marker_key #delete the united point\n self.matrix[self.ycm][self.xcm] = self.search_key\n\n def print_m(self):\n p = \"\"\n for j in range(0,self.n):\n for i in range(0,self.n):\n p += \" \" + str( self.matrix[j][i])\n p += \"\\n\"\n print(p)\n\ndef run():\n n = 40\n delta = 3\n N = 4\n\n matrix = Matrix( n = n, N = N, delta = delta)\n\n matrix.print_m()\n matrix.connect()\n matrix.print_m()\n\nif __name__ == '__main__':\n run()","sub_path":"conectivity.py","file_name":"conectivity.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"571671259","text":"import asyncio\nimport logging\n\nimport discord \n\nfrom discord import AllowedMentions \nfrom discord.ext import commands #NEED TO DELETE THIS AFTER DEVELOPMENT\n\nfrom bot.constants import Logs, Rationals, Roles\nfrom bot.bot import Bot\nfrom bot.extension import load_all_extensions\n\nlog = logging.getLogger(__name__)\n \nasync def main():\n intents = discord.Intents.all()\n intents.members = True\n intents.presences = True\n intents.dm_typing = False\n intents.dm_reactions = False\n intents.invites = False\n intents.webhooks = False\n intents.integrations = False\n \n \n bot_ = Bot(\n command_prefix=(\"|\"),\n case_insensitive=False,\n allowed_mentions=AllowedMentions(everyone=False),\n activity=discord.Game(name=f\"Commands: {Rationals.prefix}help\"),\n intents=intents\n )\n\n await load_all_extensions(bot_)\n await bot_.connection_pool()\n \n for func in bot_.helper_functions:\n func()\n \n await bot_.start(Rationals.token, reconnect=True)\n\nasyncio.run(main())\n\n\n \n ","sub_path":"bot/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"431625613","text":"import time\nimport socket\nimport threading\nimport pyping\n\ndeftimeout = 10\nsocket.setdefaulttimeout(deftimeout)\n# tcp\n# prol = socket.SOCK_STREAM\n# udp\nprol = socket.SOCK_DGRAM\n\n# hostes = [\"p.55100.net\",\"p2.55100.net\",\"p3.55100.net\",\"p.51pp.info\",\"p1.51pp.info\",\"p2.51pp.info\"]\n# testPort = 443\ntestPort = 53\nhostes = []\n# China Net\nhostes.append(\"101.226.4.6\")\nhostes.append(\"218.30.118.6\")\n# China Union\nhostes.append(\"123.125.81.6\")\nhostes.append(\"140.207.198.6\")\n# China Tietong\nhostes.append(\"101.226.4.6\")\nhostes.append(\"218.30.118.6\")\n# China Mobile\nhostes.append(\"101.226.4.6\")\nhostes.append(\"218.30.118.6\")\n\nhostes.sort()\n\n\ndef icmplink(ip, port, count=1):\n t = 0\n c = 0\n for i in range(0, count):\n try:\n\n c = c + 1\n b = time.time()\n r = pyping.ping(ip)\n e = time.time()\n print\n \"IP [%s] in port [%s] ret = %s\" % (ip, port, r.ret_code)\n\n t = t + r.avg_rtt\n except Exception as e:\n print\n \"IP [%s] in port [%s] timeout!!!!!!!\" % (ip, port)\n t = t + 9999\n return (t / c)\n\n\ndef tcplink(ip, port, count=1):\n t = 0\n c = 0\n for i in range(0, count):\n try:\n s = socket.socket(socket.AF_INET, prol)\n c = c + 1\n b = time.time()\n s.connect((ip, port))\n s.close()\n e = time.time()\n print(\"IP [%s] in port [%s] time = %s\" % (ip, port, (e - b)))\n t = t + (e - b)\n except Exception as e:\n print\n \"IP [%s] in port [%s] timeout!!!!!!!\" % (ip, port)\n t = t + deftimeout\n return (t / c)\n\n\nres = []\nts = []\nlastHost = \"\"\nfor host in hostes:\n if lastHost == host:\n continue\n t = threading.Thread(target=(lambda: res.append((host, icmplink(host, testPort, 4)))))\n ts.append(t)\n t.start()\nfor temp in ts:\n temp.join()\nres.sort(lambda x, y: cmp(x[1], y[1]))\nprint\n\"======================\"\nfor r in res:\n print\n \"%s\\t:\\t%s\" % r\nprint\n\"end -----\"\ns = input(\"wait for you : \")\n","sub_path":"pythonExercise/python/ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"194698209","text":"\n#Imports\nimport praw\nimport discord\nimport os \nfrom discord.ext import commands\nimport random\nimport asyncio\nimport re\nimport time\nfrom discord import Member\nfrom discord.ext.commands import has_permissions, MissingPermissions\nfrom random import seed\nfrom random import randint\nfrom dotenv import load_dotenv\n\n#tbh idk what this does but without it the random commands don't work\nseed(1)\n\n\n#sets working dirrectory to file location\nabspath = os.path.abspath(__file__)\ndname = os.path.dirname(abspath)\nos.chdir(dname)\n\n\nload_dotenv()\ntoken = os.getenv('DISCORD_TOKEN')\nprint(\"Successfully Loaded Bot Token\")\nGUILD = os.getenv('DISCORD_GUILD')\nprint(\"Successfully connected to Guild\")\nprawID = os.getenv('PRAWID')\nprint('Successfully connected Reddit ID')\nprawTKN = os.getenv('PRAWTOKEN')\nprint(\"Approved Reddit API Token\")\n\nclient = discord.Client()\n\n\n#Sets Prefix For Bot Commands For commands extension\nbot = commands.Bot(command_prefix='m!')\n\n#Redit Instance \nreddit = praw.Reddit(client_id=prawID, client_secret=prawTKN , user_agent=':: (by /u/WinterAmoeba514)')\n\n#Subreddit Image Embedder Function\ndef subreddit_image_embedder(subreddit, filter='hot'):\n subred = reddit.subreddit(subreddit)\n newmeme = subred.hot(limit=50)\n lstmeem = list(newmeme)\n randsub = random.choice(lstmeem)\n embed = discord.Embed(title=randsub.title,\n url=randsub.url, colour=0x3498d)\n embed.set_image(url=randsub.url)\n return embed\n\n#Startup Process\n@bot.event\nasync def on_ready():\n guild = discord.utils.find(lambda g: g.name == GUILD, client.guilds)\n\n print(\"Successful startup, connecting to Discord...\")\n print(f'{bot.user} has connected to Discord!')\n\n for guild in bot.guilds:\n if guild.name == GUILD:\n break\n\n print(\n f'{bot.user} is connected to the following guild:\\n'\n f'{guild.name}(id: {guild.id})'\n )\n\n #The displayed game of the bot\n CurrentStatus = \"Bunny Girl Simulator\"\n await bot.change_presence(status=discord.Status.online,\n activity=discord.Activity(type=discord.ActivityType.playing, name=CurrentStatus))\n\n\n\nclass SFW(commands.Cog):\n @commands.command(name=\"reddit\")\n async def subreddit_spooky(self, ctx, sub): \n '''A hotpost from a specified subreddit. Usage: m!reddit '''\n reddit_subreddit = reddit.subreddit(sub)\n if not reddit_subreddit.over18:\n await ctx.send(embed=subreddit_image_embedder(sub))\n elif reddit_subreddit.over18 and (str(ctx.channel.type) == \"private\") or (not ctx.channel.is_nsfw()):\n await ctx.send(\"Sorry, that subreddit is NSFW, try running the command again in a NSFW channel\")\n elif reddit_subreddit.over18 and ctx.channel.is_nsfw():\n await ctx.send(embed=subreddit_image_embedder(sub))\n\n @commands.command()\n async def info(self, ctx):\n '''Shows the GitHub repo''' \n await ctx.send(\"Here's a link to my GitHub Repo\")\n bot_info_embed = discord.Embed(title=\"Battle Bunny Info Card\",\n url=\"https://github.com/ko-torii/Momo_Battle_Bunny\",\n description=\"Made by Kotori’s Fried Chicken#8426 because they'd rather do Python than VB.\",\n color=0xfabcbd)\n bot_info_embed.set_thumbnail(url=\"https://i.redd.it/27bp6h2z55011.jpg\")\n bot_info_embed.set_footer(text=\"(The code is like, really really bad)\")\n await ctx.send(embed= bot_info_embed)\n await ctx.send(\"For a list of commands, do m!help\")\n\n\n @commands.command()\n async def kizuna(self, ctx):\n '''Random Kizuna Ai content'''\n ksub = \"KizunaA_Irl\"\n await ctx.send(embed=subreddit_image_embedder(ksub))\n\nclass NSFW(commands.Cog):\n @commands.command()\n async def sauce(self, ctx, *ids):\n '''Generates a nHentai.net link. Usage: m!doujin '''\n nIDs = ids\n nlinks = []\n nurl = \"https://www.nhentai.net/g/{0}\"\n if (str(ctx.channel.type) == \"private\") or (ctx.channel.is_nsfw()):\n for nID in nIDs:\n nlinks.append(nurl.format(nID))\n await ctx.send(\"\\n\".join(nlinks))\n else:\n await ctx.send(\"Buddy, you can only use this command in NSFW channels\")\n @commands.command()\n async def randomsauce(self,ctx):\n '''Generates a random nHentai.net link. Usage: m!randdoujin'''\n randNID = randint(0, 300000)\n nurl = \"https://www.nhentai.net/g/\"\n if (str(ctx.channel.type) == \"private\") or (ctx.channel.is_nsfw()):\n await ctx.send(nurl + str(randNID))\n else:\n await ctx.send(\"Buddy, you can only use this command in NSFW channels\")\n\n\n @commands.command()\n async def order(self, ctx, *orders):\n '''Generates a Hentai Cafe Link. Usage: m!cafe '''\n cafeIDs = orders\n cafeLink = []\n cafeURL = \"https://hentai.cafe/hc.fyi/{0}\"\n if (str(ctx.channel.type) == \"private\") or (ctx.channel.is_nsfw()):\n for cafeID in cafeIDs :\n cafeLink.append(cafeURL.format(cafeID))\n await ctx.send(\"\\n\".join(cafeLink))\n else:\n await ctx.send(\"Buddy, you can only use this command in NSFW channels\")\n\n @commands.command()\n async def randomorder(self,ctx):\n '''Generates a random Hentai Cafe link. Usage: m!randomorders'''\n randCafeID = randint(0, 14800)\n cafeURL = \"https://hentai.cafe/hc.fyi/\"\n if (str(ctx.channel.type) == \"private\") or (ctx.channel.is_nsfw()):\n await ctx.send(cafeURL + str(randCafeID))\n else:\n await ctx.send(\"Buddy, you can only use this command in NSFW channels\")\n\n\n\nclass Administration(commands.Cog):\n @commands.command(name=\"purge\", pass_context=True)\n @commands.has_permissions(manage_messages=True)\n async def purge(self, ctx, amount: int):\n\n '''Clears Messages. Usage: m!purge '''\n if amount == 0:\n await ctx.send('Please specify amount.')\n else:\n await ctx.channel.purge(limit=amount+1)\n await ctx.send('I cleared ' + str(amount) + ' messages, ' + ctx.message.author.mention)\n time.sleep(3)\n await ctx.channel.purge(limit=1)\n\n @purge.error \n async def purge_error(self, error, ctx):\n if isinstance(error, MissingPermissions):\n text = \"Sorry,\" + ctx.message.author.mention + \"you do not have permissions to do that!\"\n await ctx.send(text)\n\n\n @commands.command()\n async def test(self, ctx):\n '''States The Channel Type'''\n await ctx.send(\"もしもし! バトルバニーです :carrot:\")\n\n\n\nbot.add_cog(SFW(bot))\nbot.add_cog(NSFW(bot))\nbot.add_cog(Administration(bot))\n\n\nbot.run(token)\n","sub_path":"refined_battle_bunny_code.py","file_name":"refined_battle_bunny_code.py","file_ext":"py","file_size_in_byte":6890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"157553154","text":"\"\"\"\nThis file contains code for creating data frames and preparing them\nto be analyzed in the other relevant files.\n\"\"\"\nimport numpy as np\nimport pandas as pd\n\n\n# Check whether our data file exists. If the file does not exist\n# then create the appropriate index and columns and save to the\n# file where it belongs\nif os.path.isfile(\"../data/stepdata.csv\") is False:\n # Create the date ranges\n startdate = pd.datetime(2016, 1, 1, 0, 0)\n enddate = pd.datetime(2016, 12, 31, 23, 59)\n dates = pd.date_range(start=startdate, end=enddate, freq=\"1min\")\n\n # Create columns for everyone participating\n names = [\"Andrew\", \"Austin\", \"Chase\", \"Grey\", \"Jenna\",\n \"Jess\", \"Kevin\", \"Reid\", \"Stephen\", \"Winston\"]\n\n # Create the main data frame and save as csv file\n steps_df = pd.DataFrame(columns=names, index=dates)\n\nelse:\n # Read the data that we already have into pandas\n steps_df = pd.read_csv(\"../data/stepdata.csv\",\n index_col=0, parse_dates=True)\n\n # Find last date with NO null data\n startdate = steps_df.dropna().index[-1]\n enddate = pd.datetime(2016, 12, 31, 23, 59)\n\n# Save final data back to csv \nsteps_df.to_csv(\"../data/stepdata.csv\")\n","sub_path":"code/makedata.py","file_name":"makedata.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"541619933","text":"import numpy as np\nimport sys\nsys.path.append('/Users/miguel/Documents/Iceland-Faroe/ROMS_GRID/')\nfrom load_vars import variables, GRID \nimport figures\nimport resize\nfrom Dynamics_Vars import Bernoulli, vorticity\n\nf=1.25e-4\nU=np.mean(variables('u')[-100:-1,:,:,:], axis=0)\nV=np.mean(variables('v')[-100:-1,:,:,:], axis=0)\nT=np.mean(variables('temp')[-100:-1,:,:,:], axis=0)\nssh=np.mean(variables('zeta')[-100:-1,:,:], axis=0)\nN=np.shape(T)\nW=np.mean(variables('w')[-100:-1,:,:,:], axis=0)\n\nzeta=vorticity(U,V,'level', GRID)\nB=Bernoulli(U,V,T,ssh,GRID)\n#zeta=resize.reduce(zeta,[N[0]-1, N[1]-2, N[2]-2])\nW=resize.reduce(W, np.shape(T))\naxis=2\ncmap='seismic_r' # Perceptually uniform\n#cmap='inferno'\n#cmap='PRGn' # Divergent map\nlocation='mid_channel' \n#figures.vertical(T, V, W, location, axis, cmap)\nfigures.horizontal(zeta/f, U, V, B, 'none', cmap)\n","sub_path":"test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"368643919","text":"from collections import deque\n\nfireworks_effects = deque([int(n) for n in input().split(\", \")])\nexplosive_powers = deque([int(n) for n in input().split(\", \")])\npalm_count, willow_count, crossette_count = 0, 0, 0\nwhile fireworks_effects and explosive_powers:\n if fireworks_effects[0] <= 0 or explosive_powers[-1] <= 0:\n if fireworks_effects[0] <= 0:\n fireworks_effects.popleft()\n if explosive_powers[-1] <= 0:\n explosive_powers.pop()\n continue\n\n res = fireworks_effects[0] + explosive_powers[-1]\n if res % 3 == 0 and res % 5 == 0:\n crossette_count += 1\n elif res % 5 == 0:\n willow_count += 1\n elif res % 3 == 0:\n palm_count += 1\n else:\n fireworks_effects.append(fireworks_effects.popleft() - 1)\n continue\n\n fireworks_effects.popleft()\n explosive_powers.pop()\n if palm_count >= 3 and willow_count >= 3 and crossette_count >= 3:\n print(f\"Congrats! You made the perfect firework show!\")\n break\nelse:\n print(f\"Sorry. You can't make the perfect firework show.\")\n\nif fireworks_effects:\n print(f\"Firework Effects left: {', '.join([str(n) for n in fireworks_effects])}\")\nif explosive_powers:\n print(f\"Explosive Power left: {', '.join([str(n) for n in explosive_powers])}\")\n\nprint(f\"Palm Fireworks: {palm_count}\\n\"\n f\"Willow Fireworks: {willow_count}\\n\"\n f\"Crossette Fireworks: {crossette_count}\")\n","sub_path":"Python/Advanced/Advanced/Exams/14-February-2021/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"90644473","text":"\"\"\"\nIt contains customadmin's models. It's used to customize admin's interface\n\"\"\"\nfrom upy.contrib.tree.models import _\nfrom django.db import models\nfrom upy.contrib.colors.fields import ColorField\nfrom upy.contrib.sortable.models import PositionModel\nfrom django.conf import settings\nfrom imagekit.models import ImageSpecField, ProcessedImageField\nfrom pilkit.processors import ResizeToFit\nfrom upy.fields import NullTrueField\n\n\ndef verifyApp(app):\n return app in ['django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.admin',\n 'django.contrib.sitemaps',\n 'mptt',\n 'imagekit',\n 'upy',\n 'south',\n 'upy.contrib.inspect',\n 'modeltranslation',\n 'upy.contrib.tabbed_translation',\n 'upy.contrib.cked',\n 'upy.contrib.colors',\n 'upy.contrib.rosetta']\n\n\ndef all_apps():\n \"\"\"\n it returns a list of tuples with the name of all installed apps with admin's registration.\n \"\"\"\n list_apps = []\n for app in settings.INSTALLED_APPS:\n if not verifyApp(app):\n list_apps.append([app, app.split(\".\")[-1].title()])\n return list_apps\n\n\ndef list_apps():\n \"\"\"\n it returns a list of tuples with the name of all installed apps with admin's registration.\n \"\"\"\n list_apps = []\n for app in settings.INSTALLED_APPS:\n if not verifyApp(app):\n try:\n CustomApp.objects.get(application=app.split(\".\")[-1].title())\n except:\n list_apps.append([app.split(\".\")[-1].title()] * 2)\n return list_apps\n\n\ndef list_models():\n \"\"\"\n It returns a list of tuple with the name of all models in installed apps\n \"\"\"\n list_models = []\n for app in settings.INSTALLED_APPS:\n if not verifyApp(app):\n list_models_app = []\n try:\n all_models = models.get_models(models.get_app(app.split(\".\")[-1]))\n except:\n pass#app doesn't have model.py module\n for m in all_models:\n try:\n CustomModel.objects.get(app=app, model=m.__name__)\n except:\n list_models_app.append([m._meta.verbose_name_plural] * 2)\n list_models.append((app.split(\".\")[-1].title(), list_models_app))\n return list_models\n\n\nclass CustomAdmin(models.Model):\n \"\"\"\n This object define parameters to customize admin layout. It has sense if you use only a record \n of this class. Infact base template use the first occurence find in the database\n \"\"\"\n branding = models.CharField(max_length=200, null=True, blank=True,\n default=u\"upyproject.com\",\n help_text=_(u\"Set branding\"),\n verbose_name=_(u\"Branding\"))\n branding_link = models.CharField(max_length=200, null=True, blank=True,\n default=u\"www.upyproject.com\",\n help_text=_(u\"Set branding's link\"),\n verbose_name=_(u\"Branding link\"))\n branding_image = models.FilePathField(path=settings.RELATIVE_STATIC_ROOT, null=True, blank=True,\n match=\"\\.jpg|\\.jpeg|.png|\\.gif\", recursive=True,\n help_text=_(u\"Set brand's image.\"),\n verbose_name=_(u\"Branding image\"))\n default = NullTrueField(_(u\"Default\"), help_text=_(u\"Select it if you want use this as default customization.\"),\n unique=True)\n default_app_image = ProcessedImageField(verbose_name=_(u\"Default app image\"),\n help_text=_(u\"Insert a default application image\"), null=True, blank=True,\n upload_to='customadmin')\n default_model_image = ProcessedImageField(verbose_name=_(u\"Default model image\"),\n help_text=_(u\"Insert a default model image\"), null=True, blank=True,\n upload_to='customadmin')\n\n app_image = ImageSpecField([ResizeToFit(128, 128)], source='default_app_image',\n options={'quality': 90}) #format='JPEG',\n model_image = ImageSpecField([ResizeToFit(50, 50)], source='default_model_image', options={'quality': 90})\n\n bg_header = ColorField(max_length=200, null=True, blank=True,\n help_text=_(u\"Set header's background color.\"),\n verbose_name=_(u\"BG Header\"))\n sitename_font = models.CharField(max_length=200, null=True, blank=True,\n help_text=_(u\"Set sitename font.\"),\n verbose_name=_(u\"Sitename font\"))\n sitename_font_size = models.CharField(max_length=200, null=True, blank=True,\n help_text=_(u\"Set sitename font size.\"),\n verbose_name=_(u\"Sitename font size\"))\n sitename_font_weight = models.CharField(max_length=200, null=True, blank=True,\n help_text=_(u\"Set sitename font weight.\"),\n verbose_name=_(u\"Sitename font weight\"))\n table_title_bg = ColorField(max_length=200, null=True, blank=True,\n help_text=_(u\"Set the background of title in tables.\"),\n verbose_name=_(u\"BG table title \"))\n table_title_color = ColorField(max_length=200, null=True, blank=True,\n help_text=_(u\"Set the color of title in tables.\"),\n verbose_name=_(u\"Table title color\"))\n h2_color = ColorField(max_length=200, null=True, blank=True,\n help_text=_(u\"Set h2 color.\"), verbose_name=_(u\"H2 color\"))\n h2_size = models.CharField(max_length=200, null=True, blank=True,\n help_text=_(u\"Set h2 size.\"), verbose_name=_(u\"H2 size\"))\n h3_color = ColorField(max_length=200, null=True, blank=True,\n help_text=_(u\"Set h3 color.\"), verbose_name=_(u\"H3 color\"))\n h3_size = models.CharField(max_length=200, null=True, blank=True,\n help_text=_(u\"Set h3 size.\"), verbose_name=_(u\"H3 size\"))\n link_color = ColorField(max_length=200, null=True, blank=True,\n help_text=_(u\"Set link's color\"), verbose_name=_(u\"Link color\"))\n link_hover_color = ColorField(max_length=200, null=True, blank=True,\n help_text=_(u\"Set link's color when hover\"),\n verbose_name=_(u\"Link hover color\"))\n html_head = models.TextField(null=True, blank=True,\n help_text=_(u\"Set other html code to put in HEAD section. \"),\n verbose_name=_(u\"Html head\"))\n css_code = models.TextField(null=True, blank=True,\n help_text=_(u\"Set the css code. \"),\n verbose_name=_(u\"Css code\"))\n use_css_code = models.BooleanField(help_text=_(u\"Check it if you want use css code to extends style.\"),\n verbose_name=_(u\"Use css code\"), default=False)\n use_log_sidebar = models.BooleanField(default=False,\n help_text=_(u\"Check it if you want use log sidebar in index template.\"),\n verbose_name=_(u\"Use log sidebar\"))\n view_mode = models.CharField(max_length=250, null=True, blank=True,\n choices=(('use_custom_app', _('Use custom app system')),\n ('use_app_icons', _(\"Use apps' icons system\")),\n ('use_app_and_model_icons', _(\"Use apps and models icons system\")),\n ('use_model_icons',\n _(\"Use models' icons system in index group models by app\")),\n ('use_total_model_icons',\n _(\"Use models' icons system in index ungroup models by app\"))),\n help_text=_(u\"Choose the view mode\"),\n verbose_name=_(u\"View mode\"))\n autocomplete_app_list = models.BooleanField(default=True,\n help_text=_(\n u\"Check it if you want complete the custom app list with the default app list.\"),\n verbose_name=_(u\"Autocomplete App\"))\n autocomplete_models_list = models.BooleanField(default=True,\n help_text=_(\n u\"Check it if you want complete the custom models list with the default models list.\"),\n verbose_name=_(u\"Autocomplete model\"))\n\n @property\n def customization(self):\n \"\"\"\n It returns branding if defined, else image, else only his primary key.\n \"\"\"\n if self.branding:\n return self.branding\n elif self.branding_image:\n res = self.branding_image.split(\"/\")[-1]\n return res\n else:\n return self.pk\n\n @property\n def branding_image_url(self):\n return self.branding_image.replace(settings.RELATIVE_STATIC_ROOT, settings.STATIC_URL).replace(\"//\", \"/\")\n\n def save(self, *args, **kwargs):\n appicons = CustomApp.objects.all()\n if self.view_mode == \"use_app_icons\" and not appicons:\n for app in list_apps():\n new_app = CustomApp(application=app[0], verbose_app_name=app[1])\n new_app.save()\n super(CustomAdmin, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return u\"%s\" % (self.branding)\n\n class Meta:\n verbose_name = _(u\"Custom Admin\")\n verbose_name_plural = _(u\"Custom Admin\")\n ordering = ['branding']\n\n\nclass CustomApp(PositionModel):\n \"\"\"\n This object links the installed_apps with an icon to use if CustomAdmin.use_app_icons is True\n \"\"\"\n application = models.CharField(max_length=250,\n unique=True, help_text=_(u\"Select the application\"),\n verbose_name=_(u\"Application\"))\n verbose_app_name = models.CharField(max_length=250, unique=True,\n help_text=_(u\"Write the verbose name to show\"),\n verbose_name=_(u\"Verbose app name\"))\n image = models.ImageField(_(u'Image'), null=True, blank=True, upload_to='upyimage')\n thumb = ImageSpecField([ResizeToFit(80, 80)],\n source='image',\n format='png')\n show_models = models.BooleanField(\n default=True,\n help_text=_(u\"If use_app_icons is False in Customadmin, you can choose wheter or not show the model list.\"),\n verbose_name=_(u\"Show models\")\n )\n\n def __unicode__(self):\n return self.application\n\n class Meta:\n verbose_name = _(u\"Custom App\")\n verbose_name_plural = _(u\"Custom Apps\")\n ordering = ['position']\n\n\nclass CustomLink(PositionModel):\n \"\"\"\n This object links the installed_apps with an icon to use \n if CustomAdmin.use_app_icons is True\n \"\"\"\n link_url = models.CharField(max_length=250, default=\"/admin/\",\n help_text=_(u\"Select the url you want to link\"),\n verbose_name=_(u\"Link Url\"))\n verbose_url_name = models.CharField(max_length=250, unique=True,\n help_text=_(u\"Write the verbose name to show\"),\n verbose_name=_(u\"Verbose url name\"))\n image = models.ImageField(_(u'Image'), null=True, blank=True, upload_to='upyimage')\n thumb = ImageSpecField([ResizeToFit(80, 80)], source='image', format='png')\n\n def __unicode__(self):\n return self.link_url\n\n class Meta:\n verbose_name = _(u\"Custom Link\")\n verbose_name_plural = _(u\"Custom Link\")\n ordering = ['position']\n\n\nclass CustomModel(PositionModel):\n \"\"\"\n This object links models in installed_apps with an icon to use\n if CustomAdmin.view_mode == \"use_model_icons\" or CustomAdmin.view_mode == \"use_inner_model_icons\"\n \"\"\"\n app = models.CharField(max_length=250,\n help_text=_(u\"Select an appplication\"),\n verbose_name=_(u\"App\"))\n model = models.CharField(max_length=250,\n help_text=_(u\"Select a model\"),\n verbose_name=_(u\"Model\"))\n image = models.ImageField(_(u'Image'), null=True, blank=True, upload_to='upyimage')\n thumb = ImageSpecField([ResizeToFit(50, 50)],\n source='image',\n format='png')\n\n def __unicode__(self):\n return self.model\n\n class Meta:\n verbose_name = _(u\"Custom Model\")\n verbose_name_plural = _(u\"Custom Models\")\n unique_together = ('app', 'model')\n ordering = ['position']\n \n","sub_path":"upy/contrib/customadmin/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":13647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"531825451","text":"import sys\nsys.setrecursionlimit(15000)\n\n\ndef dfs(x, y, area):\n paper[y][x] = 1\n area[-1] += 1\n\n if y > 0 and paper[y - 1][x] == 0:\n dfs(x, y - 1, area)\n if y < m - 1 and paper[y + 1][x] == 0:\n dfs(x, y + 1, area)\n if x > 0 and paper[y][x - 1] == 0:\n dfs(x - 1, y, area)\n if x < n - 1 and paper[y][x + 1] == 0:\n dfs(x + 1, y, area)\n\n\ndef main():\n area = []\n for y in range(m):\n for x in range(n):\n if paper[y][x] == 0:\n area.append(0)\n dfs(x, y, area)\n\n area.sort()\n print(len(area))\n print(\" \".join(map(str, area)))\n\n\nif __name__ == \"__main__\":\n input = sys.stdin.readline\n m, n, k = map(int, input().split())\n paper = [[0 for _ in range(n)] for _ in range(m)]\n for _ in range(k):\n x1, y1, x2, y2 = map(int, input().split())\n for y in range(m - y2, m - y1):\n for x in range(x1, x2):\n paper[y][x] = -1\n\n main()\n ","sub_path":"0630/2583 영역 구하기.py","file_name":"2583 영역 구하기.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"420307815","text":"import numpy as np\r\nfrom scipy import stats\r\nfrom scipy.optimize import curve_fit\r\nimport matplotlib.pyplot as plt\r\nimport uncertainties.unumpy as unp\r\n\r\nRhoch2 = np.genfromtxt('build/1a.temp')\r\nn=np.linspace(10,60,11)\r\nR=np.sqrt(Rhoch2)\r\n\r\ndef f(x,a,b,c):\r\n\treturn a*x**b+c\r\n\t\r\n\r\n\r\nparams, covar = curve_fit(f,R,n)\r\n\r\n\r\nR2=np.linspace(np.min(R)*0.8,np.max(R)+np.min(R)*0.2,1000)\r\nplt.cla()\r\nplt.clf()\r\nplt.plot(n,R, 'gx', label='Messwertepaare')\r\nplt.plot(f(R2,*params), R2, 'r-', label='fit')\r\nplt.ylim(R2[0],R2[-1])\r\nplt.xlabel(r'$N$')\r\nplt.ylabel(r'$R$')\r\nplt.legend(loc='best')\r\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\r\nplt.savefig('build/fit.pdf')\r\nparams = unp.uarray(params, np.sqrt(np.diag(covar)))\r\nprint(\"a=\",params[0])\r\nprint(\"b=\",params[1])\r\nprint(\"c=\",params[2])\r\nfile = open('build/1b.temp','w')\r\nfile.write(r'a=\\num{'+'{:0.2f}'.format(unp.nominal_values(params)[0])+'+-'+'{:0.2f}'.format(unp.std_devs(params)[0])+'}\\\\\\\\\\n')\r\nfile.write(r'b=\\num{'+'{:0.2f}'.format(unp.nominal_values(params)[1])+'+-'+'{:0.2f}'.format(unp.std_devs(params)[1])+'}\\\\\\\\\\n')\r\nfile.write(r'c=\\num{'+'{:0.2f}'.format(unp.nominal_values(params)[2])+'+-'+'{:0.2f}'.format(unp.std_devs(params)[2])+'}\\n')\r\n\r\n","sub_path":"Blatt01/fit_w.py","file_name":"fit_w.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"401270901","text":"# Implement a class to hold room information. This should have name and\n# description attributes.\n\n\nclass Room:\n def __init__(self, name, desc):\n self.name = name\n self.desc = desc\n self.n_to = None\n self.s_to = None\n self.e_to = None\n self.w_to = None\n self.items = []\n self.hidden_items = None\n self.hidden_rooms = None\n self.is_dark = False\n self.is_locked = False\n\n def add_item(self, item, player):\n self.items.append(item)\n player.remove_from_inventory(item)\n\n def remove_item(self, item, player):\n if self.is_dark:\n print(\"You cannot pick up items in a dark room!\")\n return\n self.items.remove(item)\n player.add_to_inventory(item)\n\n def view_items(self):\n if self.is_dark:\n print(\"You cannot see items in a dark room!\")\n return\n\n if len(self.items) > 0:\n for item in self.items:\n print(item.name)\n else:\n print(\"Room contains no visible items\")\n","sub_path":"src/room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"122043702","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\n\nfrom .models import Document\nfrom .models import Journal\n\n\nclass SignUpForm(UserCreationForm):\n\tfirst_name = forms.CharField(max_length=30, required=False, help_text='Optional.')\n\tlast_name = forms.CharField(max_length=30, required=False, help_text='Optional.')\n\temail = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')\n\tnoreg = forms.CharField(label='NRM')\n\tangkatan = forms.CharField(label='Angkatan')\n\tpeminatan = forms.CharField(label='Peminatan')\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ('username', 'first_name', 'last_name', 'email','password1', 'password2','noreg','angkatan','peminatan', )\n\n\nclass DocumentForm(forms.ModelForm):\n class Meta:\n model = Document\n fields = ('description', 'document', )\n\t\t\nclass JournalForm(forms.ModelForm):\n class Meta:\n model = Journal\n fields = ('description', 'journal', )\n\t\t","sub_path":"src/mahasiswa/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"636753811","text":"\nimport os\nimport errno\nfrom shutil import copyfile\n\n# os crawler\n\noutput_root = '../output'\ntarget = 'to_here'\n\nfor path, directory, files in os.walk('../scaffolder/default_folder'):\n # path = root.split(os.sep)\n print(path, directory, files)\n root_directory = path.split('/', 3)[-1]\n print('root directory:', root_directory)\n\n for file in files:\n # get the path, etc\n file_source_path = os.path.join(path, file)\n if root_directory == 'default_folder':\n file_dest_path = os.path.join(output_root, target, file)\n else:\n file_dest_path = os.path.join(output_root, target, root_directory, file)\n\n print('source:', file_source_path)\n print('dest:', file_dest_path)\n\n # process the file\n\n # make sure that directory is exist\n if not os.path.exists(os.path.dirname(file_dest_path)):\n try:\n os.makedirs(os.path.dirname(file_dest_path))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n # copy\n copyfile(file_source_path, file_dest_path)\n\n print()","sub_path":"snippets/04_file traversing and copy.py","file_name":"04_file traversing and copy.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"628011469","text":"from random import randint \n\ntarget_file = open('cookies_receit.txt', 'w')\n\n\ndef create_cookies_list():\n\tcookies = [\"chocolate chip\", \"gluten free\", \"oatmeal raison\", \"thin mints\", \"tagalongs\"]\n\tfor i in range(200):\n\t\tnumber = randint(0, (len(cookies)-1))\n\t\ttarget_file.write(cookies[number]+\":\"+ str(randint(1,100)) + '\\n')\n\ttarget_file.close()\n\ncreate_cookies_list()","sub_path":"week03/lists/generatecookies.py","file_name":"generatecookies.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"168459315","text":"print(\"Triangulo de asteriscos *\")\n\nn=int(input(\"Digita el numero de filas: \"))\n\nfor i in range(n):\n\ta=\" \"\n\tfor j in range(n):\n\t\tif j>=i:\n\t\t\ta+=\"* \"\n\t\telse:\n\t\t\ta+=\" \"\n\tprint(a)\t\n\nprint(\"Final del programa\")\n\n\t\t\t\t\t","sub_path":"N_81.py","file_name":"N_81.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"452441393","text":"import random\nimport threading\n\n__author__ = 'zyan'\nfrom kivy import args\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.stacklayout import StackLayout\nimport time\nimport math\nimport cmath\nimport cairo\nimport psutil\nimport os\nfrom subprocess import Popen\n\n#------ Configuration --------\nIMAGE_SIZE = (1000, 1000)\nNUM_SUBDIVISIONS = 8\n#-----------------------------\n\ngoldenRatio = (1 + math.sqrt(5)) / 2\n\n# RandomColoring\nColor11 = random.random()\nColor12 = random.random()\nColor13 = random.random()\nColor21 = random.random()\nColor22 = random.random()\nColor23 = random.random()\n#-----------------------------\n\nclass Penrose():\n # Gtk.init(args)\n def subdivide(triangles):\n result = []\n for color, A, B, C in triangles:\n if color == 0:\n # Subdivide red triangle\n P = A + (B - A) / goldenRatio\n result += [(0, C, P, B), (1, P, C, A)]\n else:\n # Subdivide blue triangle\n Q = B + (A - B) / goldenRatio\n R = B + (C - B) / goldenRatio\n result += [(1, R, C, A), (1, Q, R, B), (0, R, Q, A)]\n return result\n\n # Create wheel of red triangles around the origin\n triangles = []\n for i in xrange(10):\n B = cmath.rect(1, (2*i - 1) * math.pi / 10)\n C = cmath.rect(1, (2*i + 1) * math.pi / 10)\n if i % 2 == 0:\n B, C = C, B # Make sure to mirror every second triangle\n triangles.append((0, 0j, B, C))\n\n # Perform subdivisions\n for i in xrange(NUM_SUBDIVISIONS):\n triangles = subdivide(triangles)\n\n # Prepare cairo surface\n surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, IMAGE_SIZE[0], IMAGE_SIZE[1])\n cr = cairo.Context(surface)\n cr.translate(IMAGE_SIZE[0] / 2.0, IMAGE_SIZE[1] / 2.0)\n wheelRadius = 1.2 * math.sqrt((IMAGE_SIZE[0] / 2.0) ** 2 + (IMAGE_SIZE[1] / 2.0) ** 2)\n cr.scale(wheelRadius, wheelRadius)\n\n # Draw red triangles\n for color, A, B, C in triangles:\n if color == 0:\n cr.move_to(A.real, A.imag)\n cr.line_to(B.real, B.imag)\n cr.line_to(C.real, C.imag)\n cr.close_path()\n cr.set_source_rgb(Color11, Color12, Color13)\n cr.fill()\n\n # Draw blue triangles\n for color, A, B, C in triangles:\n if color == 1:\n cr.move_to(A.real, A.imag)\n cr.line_to(B.real, B.imag)\n cr.line_to(C.real, C.imag)\n cr.close_path()\n cr.set_source_rgb(Color21, Color22, Color23)\n cr.fill()\n\n # Determine line width from size of first triangle\n color, A, B, C = triangles[0]\n cr.set_line_width(abs(B - A) / 10.0)\n cr.set_line_join(cairo.LINE_JOIN_ROUND)\n\n # Draw outlines\n for color, A, B, C in triangles:\n cr.move_to(C.real, C.imag)\n cr.line_to(A.real, A.imag)\n cr.line_to(B.real, B.imag)\n cr.set_source_rgb(0.2, 0.2, 0.2)\n cr.stroke()\n\n # Save to PNG\n surface.write_to_png('penrose.png')\n\nBuilder.load_string(\"\"\"\n:\n Image:\n source: 'penrose.png'\n size_hint: None,None\n size: 1000,1000\n\"\"\")\n\n\nclass MyPaintApp(App, StackLayout):\n def build(self):\n for process in psutil.process_iter():\n if process.cmdline == ['python', 'pythonPenroseServer.py']:\n print('Process found. Terminating it.')\n process.terminate()\n break\n else:\n print('Process not found: starting it.')\n Popen(['python', 'pythonPenroseServer.py'])\n # while 1 :\n return self\n\n# if __name__ == '__main__':\n\n\nif __name__ == '__main__':\n Penrose.run()\n\n # PeriodicExecutor().run()\n\n\n # time.sleep(10)\n # os.system(\"TASKKILL /F /IM /usr/bin/python2.7 /home/zyan/PycharmProjects/PenrosePython/pythonPenroseServer.py\")\n # time.sleep(5)\n # MyPaintApp().stop()\n # threading.Timer(15.0, self.stop()).start()\n","sub_path":"pythonPenroseServer.py","file_name":"pythonPenroseServer.py","file_ext":"py","file_size_in_byte":3921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"217350701","text":"from tkinter import *\nfrom gui.gui_config import *\n\n\ndef init():\n window = Tk()\n window.title(\"ssr-manager\")\n window.geometry(\"900x400\")\n window.resizable(width=False, height=False)\n\n win_frm = Frame(window)\n win_frm.pack()\n\n lst_frm = Frame(win_frm, cnf_frm, bg=\"black\", width=280) # 列表\n det_frm = Frame(win_frm, cnf_frm, bg=\"blue\", width=340) # 详情\n qr_frm = Frame(win_frm, cnf_frm, bg=\"green\", width=280) # 二维码\n\n lst_frm.pack(cnf_frm_pack)\n det_frm.pack(cnf_frm_pack)\n qr_frm.pack(cnf_frm_pack)\n\n window.mainloop()\n","sub_path":"gui/gui_panel.py","file_name":"gui_panel.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"582163370","text":"from django import forms\nfrom .models import Profile\nfrom django.contrib.auth.models import User\n\nclass UserForm(forms.ModelForm):\n password = forms.CharField(label='密码', widget=forms.PasswordInput())\n\n class Meta:\n model = User\n fields = ('username', 'email')\n labels = {\n 'username': '用户名',\n 'email': '邮箱',\n }\n help_texts = {\n 'username': '',\n }\n # https://docs.djangoproject.com/en/1.9/ref/models/fields/#error-messages\n error_messages = {\n 'username': {\n 'unique': '用户名已被注册',\n 'max_length': '用户名过长',\n },\n }\n\nclass ProfileForm(forms.ModelForm):\n\n class Meta:\n model = Profile\n fields = ('avatar', 'age', 'location')","sub_path":"apps/account/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"324589064","text":"from selenium import webdriver\nimport time\nimport math\n\n\ndef calc(integer_from_site: str) -> str:\n return str(math.log(abs(12*math.sin(int(integer_from_site)))))\n\n\nlink = \"http://suninjuly.github.io/alert_accept.html\"\n\ntry:\n browser = webdriver.Chrome()\n browser.get(link)\n\n button = browser.find_element_by_tag_name('button')\n button.click()\n\n confirm = browser.switch_to.alert\n confirm.accept()\n\n # находим элемент и скроллим до него\n x_element = browser.find_element_by_id(\"input_value\")\n browser.execute_script('return arguments[0].scrollIntoView(true);', x_element)\n x = x_element.text\n\n y = calc(x)\n input1 = browser.find_element_by_id(\"answer\")\n browser.execute_script('return arguments[0].scrollIntoView(true);', input1)\n input1.send_keys(y)\n\n button2 = browser.find_element_by_tag_name('button')\n browser.execute_script('return arguments[0].scrollIntoView(true);', button2)\n button2.click()\n\n time.sleep(1)\n answer = browser.switch_to.alert.text\n print(answer.split()[-1])\n\nfinally:\n # закрываем браузер после всех манипуляций\n browser.quit()","sub_path":"selenium_course/part2/confirm_window.py","file_name":"confirm_window.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"10244827","text":"from Acquisition import aq_inner\nfrom plone.app.layout.viewlets.common import ViewletBase\nfrom zope.component import getMultiAdapter\n\n\nclass BootstrapDropdownMenu(ViewletBase):\n\n def navtree(self):\n context = aq_inner(self.context)\n view = getMultiAdapter((context, self.request),\n name='sitemap_builder_view')\n data = view.siteMap()\n bottomLevel = 5\n # XXX: The recursion should probably be done in python code\n return context.homepage_sections(\n children=data.get('children', []),\n level=1,\n bottomLevel=bottomLevel\n )\n","sub_path":"plonetheme/bootstrap/browser/dropdownmenu.py","file_name":"dropdownmenu.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"539098641","text":"#import\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom time import sleep as wait\n\n#init path\npath = 'D:/TUT/Learn selenium/chromedriver.exe'\n\n#init driver\ndriver = webdriver.Chrome(path)\n\n#getting the website\ndriver.get(\"https://10fastfingers.com/typing-test/english\")\n\nwait(7)\n\n#function to type \ndef Typingbotmain():\n for i in range(0,1000):\n \tword = driver.find_element_by_class_name(\"highlight\").text\t\n \tdriver.find_element_by_class_name(\"form-control\").send_keys(word)\n \tdriver.find_element_by_class_name(\"form-control\").send_keys(u'\\ue00d')\n\n#executig the commands\nTypingbotmain()\n\n\n\n\n","sub_path":"Learn selenium/HACK_DEMO.py","file_name":"HACK_DEMO.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"48764717","text":"import pandas as pd\nimport glob\nimport os\nimport numpy\nimport networkx as nx\n\nclass GraphDesc:\n\n\tdef __init__(self, ID, desc, graph_list):\n\n\t\tself.ID = ID # Pathway ID (list)\n\t\tself.desc = desc # Pathway Description (list)\n\t\tself.graph_list = graph_list # Pathway Topology (Nodes+Links) (list of dictionary)\t\t\n\t\t\n\t\t\ndef get_graphs(organism, sep = \"\\t\", graphs_path = \"/Users/hd/Git/bpa/kegg\"):\t\n\tdf = populate_pathway_info(organism, sep)\t\n\tID = list(df['ID'])\n\tdesc = list(df['Desc'])\n\n\t# ID=['hsa1','hsa2','hsa3','hsa4','hsa5']\n\t# Desc=['Glycolisation','Phosphorylation','Apoptosis','Angiogenesis','Autophagy']\n\t\n\tgraph_list = convert_graphs(graphs_path)\n\t# Top=[{'a':['b','c'],'b':['d'],'c':['d','e'],'d':[],'e':[]},\n\t# {'c':['f','h'],'f':['i'],'h':['i','j'],'i':[],'j':[]},\n\t# {'g':['i','j'],'i':['k'],'j':['k','l'],'k':[],'l':[]},\n\t# {'m':['n','a'],'n':['c'],'a':['c','d'],'c':[],'d':[]},\n\t# {'e':['f','h'],'f':['i'],'h':['i','j'],'i':[],'j':[]},]\n\t\t\n\treturn GraphDesc(ID, desc, graph_list)\n\ndef populate_pathway_info(pathways_file, sep = \"\\t\"):\n\treturn pd.read_csv(pathways_file, sep)\n\ndef convert_graphs(graph_directory):\n\tgraphs = []\t\n\tpath = graph_directory + os.path.sep + \"*.csv\"\n\tfor fname in glob.glob(path):\t\t\n\t\tnodes = numpy.genfromtxt(graph_directory + os.path.sep + \"nodes\" + \\\n\t\t os.path.sep + fname.split(os.path.sep)[-1].split(\".\")[0] + \"_nodes.txt\", dtype=int)\t\t\n\t\tadj_graph = numpy.genfromtxt(fname, delimiter=\",\")\n\t\tG = nx.DiGraph(adj_graph)\n\t\tG = nx.relabel_nodes(G, dict(zip(range(adj_graph.shape[0]-1), nodes)))\t\t\n\t\tgraphs.append(nx.to_dict_of_lists(G))\n\t\n\treturn graphs\n\n# To populate the kegg pathways into GraphDesc objects\ngraph_desc = get_graphs(\"/Users/hd/Git/bpa/docs/kegg_pathways.tsv\")","sub_path":"misc/GraphRead.py","file_name":"GraphRead.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"145763735","text":"__author__ = 'chhavi21'\n\nfrom load_data import *\nfrom baseline_model import *\nimport numpy as np\nfrom pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating\n\n################################################################################\n## CREATE MAPPING\n################################################################################\n\n# ALS needs numeric user_id and business_id. Hence create mapping for both\nbusiness_id_mapping = dict()\nbusiness_id = review.select([\"business_id\"]).map(lambda x: x.business_id).collect()\nbusiness_id = list(set(business_id))\nfor i in range(len(business_id)):\n business_id_mapping[business_id[i]] = i\n\n\nuser_id_mapping = dict()\nuser_id = review.select([\"user_id\"]).map(lambda x: x.user_id).collect()\nuser_id = list(set(user_id))\nfor i in range(len(user_id)):\n user_id_mapping[user_id[i]] = i\n\n################################################################################\n## CREATE INVERSE MAPPING\n################################################################################\n# create inverse mapping of the mapping above incase needed.\ninv_business_id_mapping = dict()\nfor i in business_id_mapping:\n inv_business_id_mapping[business_id_mapping[i]] = i\n\ninv_user_id_mapping = dict()\nfor i in user_id_mapping:\n inv_user_id_mapping[user_id_mapping[i]] = i\n\n################################################################################\n## CREATE TRAIN DATA\n################################################################################\n\nals_data = review.select(['user_id', \"business_id\", 'r_stars']).\\\n map(lambda x: Rating(user_id_mapping[x[0]], business_id_mapping[x[1]], x[2]-mu))\nals_data.first()\n\n################################################################################\n## BUILD MODEL\n################################################################################\n\nrank = 100\nnumIterations = 25\nmodel = ALS.train(als_data, rank, numIterations, lambda_ = 0.3, seed=10)\n\n################################################################################\n## PREDICT\n################################################################################\n\ndef clip(x):\n # clip the ratings if they are outside permissible limits\n if x<1: return 1.0\n elif x>5: return 5.0\n return x\n\n#make predicitions on test data\ntest_data = review.select(['user_id', \"business_id\"]).\\\n map(lambda x: (user_id_mapping[x[0]], business_id_mapping[x[1]]))\npredictions = model.predictAll(test_data).map(lambda r: ((r[0], r[1]), clip(r[2]+mu)))\npredictions = predictions.map(lambda x: (x[0], clip(x[1])))\npredictions.collect()\n\n# getting the training data in the right format for comparison\ntrain1 = review.select(['user_id', \"business_id\", 'r_stars']).\\\n map(lambda x: Row(user_id_mapping = user_id_mapping[x[0]],\n business_id_mapping = business_id_mapping[x[1]],\n rating = x[2]))\ntrain1 = sqlContext.createDataFrame(train1)\n\n# getting the predictions in the right format for comparison\ntrain2 = predictions.map(lambda x: Row(user_id_mapping = x[0][0],\n business_id_mapping = x[0][1],\n pred = x[1]))\ntrain2 = sqlContext.createDataFrame(train2)\n\n################################################################################\n## COMPUTE RMSE ON TRAINING DATA\n################################################################################\n\njoined = train1.join(train2, on=[\"user_id_mapping\", \"business_id_mapping\"])\nse = joined.map(lambda x: (x.rating - x.pred)**2).reduce(lambda a,b: a+b)\nn = joined.count()\nrmse = np.sqrt(se/n)\n# 0.64672825592371597\n\n################################################################################\n## GET COMMON TEST DATA\n################################################################################\n\n# Note to self: try to improve this code by not taking stuff out of rdd\n# get the set of known user and known business from train and test set\nknown_business = test_rvw.select(['business_id']).rdd.intersection(business.select(['business_id']).rdd)\nknown_business = known_business.map(lambda x: x.business_id).collect()\nknown_business = set(known_business)\n\nknown_user = test_rvw.select(['user_id']).rdd.intersection(user.select(['user_id']).rdd)\nknown_user = known_user.map(lambda x: x.user_id).collect()\nknown_user = set(known_user)\n\n#12078 observations\nknown_user_and_know_business = test_rvw.drop('type').\\\n map(lambda x: (x.user_id, x.business_id, x.review_id)).\\\n filter(lambda x: (x[0] in known_user)\n and (x[1] in known_business))\n\n# fromat: (user_id, business_id, review_id)\nknown_user_and_know_business = known_user_and_know_business.\\\n map(lambda x: (user_id_mapping[x[0]],\n business_id_mapping[x[1]], x[2]))\n\ntest_pred = model.predictAll(known_user_and_know_business.\n map(lambda x: (x[0], x[1]))).\\\n map(lambda r: ((r[0], r[1]), clip(r[2]+mu)))\n\ntest_pred = test_pred.map(lambda x: Row(user_id_mapping = x[0][0],\n business_id_mapping = x[0][1],\n pred = x[1]))\ntest_pred = sqlContext.createDataFrame(test_pred)\n\n\nschema = StructType([StructField(\"user_id_mapping\", StringType(), True),\n StructField(\"business_id_mapping\", StringType(), True),\n StructField(\"review_id\", StringType(), True)])\nknown_user_and_know_business = sqlContext.createDataFrame(known_user_and_know_business, schema)\n\ntest_pred = test_pred.join(known_user_and_know_business, on=['user_id_mapping',\n 'business_id_mapping'])\\\n .drop('business_id_mapping')\\\n .drop('user_id_mapping')\n\n#export data to pandas so that it can be written to csv. \n#Spark does not have any function to export data to csv directly\np = test_pred.toPandas()\n\n# get ratings for known-known case from ALS and rest from baseline predicitions\npreds_final = preds.merge(p, on=['review_id'], how='outer')\npreds_final.pred = preds_final.apply(lambda x: x.stars if np.isnan(x.pred) else x.pred, axis=1)\npreds_final.drop('stars', axis=1, inplace=True)\npreds_final.columns = ['review_id', 'stars']\npreds_final.to_csv('submission.csv', index=None)\n\n\n################################################################################\n## OBSERVATIONS\n################################################################################\n\n# k=10 RMSE 1.38587\n# k=8 RMSE 1.36976\n# k=20 RMSE 1.33906\n# k=25 RMSE 1.33433\n# k=70 RMSE 1.30318\n# k=100 RMSE 1.29951\n\n#k=100 and lambda=0.5 RMSE = 1.29316\n#k=100 and lambda=0.2 RMSE = 1.29096\n#k=100 and lambda=0.3 RMSE =1.29070\n#k=100 and lambda=0.1 RMSE =1.29322\n\n#lambda is not helping at all\n# number of iterations is limited by 25 beacuse of spark version. Need to try on AWS for final predicition\n","sub_path":"als.py","file_name":"als.py","file_ext":"py","file_size_in_byte":7052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"391291314","text":"import io\nfrom math import ceil\n\nfrom PIL import Image\nimport numpy as np\nimport imageio\nimport atexit\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\ndef save_img(im_array, output_path):\n imageio.imwrite(output_path, im_array)\n\n\ndef numpy_to_png(array: np.ndarray) -> str:\n \"\"\"Get a PNG string from a Numpy array.\n\n Args:\n array: A Numpy array of shape (w, h, 3) or (w, h), where the\n former is meant to become a three-channel image and the\n latter a one-channel image. The dtype of the array\n should be uint8.\n\n Returns:\n str\n\n \"\"\"\n im = Image.fromarray(array)\n output = io.BytesIO()\n im.save(output, 'png')\n return output.getvalue()\n\n\ndef png_to_numpy(png: str, dtype=np.uint8) -> np.ndarray:\n \"\"\"Get a Numpy array from a PNG string.\n\n Args:\n png: A str containing a PNG-formatted image.\n\n Returns:\n numpy.ndarray\n\n \"\"\"\n incoming = io.BytesIO(png)\n im = Image.open(incoming)\n return np.array(im)\n\n\ndef replace_nones_in_dict(target, replace_value):\n \"\"\"Recursively replaces Nones in a dictionary with the given value.\"\"\"\n for k in target:\n if target[k] is None:\n target[k] = replace_value\n elif type(target[k]) is list:\n result = []\n for e in target[k]:\n if type(e) is dict:\n result.append(replace_nones_in_dict(e, replace_value))\n else:\n if e is None:\n result.append(replace_value)\n else:\n result.append(e)\n target[k] = result\n elif type(target[k]) is dict:\n replace_nones_in_dict(target[k], replace_value)\n return target\n\n\ndef set_nested_keys(target,\n mods,\n ignore_missing_keys=False,\n set_missing_keys=False):\n \"\"\"Sets dictionary keys based on modifications.\n\n Args:\n target - Target dictionary to be modified in-place.\n mods - Dictionary of values to set into the target dict.\n This method will look for any keys matching the mod\n key, even in nested dictionaries. If the mod has a nested\n dictionary, then the leaf key value will only be set\n if that parent dictionary key is found and is a dictionary.\n ignore_missing_keys - If a key is not found, do not throw an error.\n set_missing_keys - If a key is not found, set it. If the key is part\n of a nested set, and parent keys are found in the target\n dictionary, then set the key at whatever level of the nested\n set of keys where the key is first not found.\n \"\"\"\n searched_keys, found_keys = [], []\n\n def f(_target, _mods, parent_key=None, mod_parent_key=None):\n for key in _target:\n if key in _mods.keys():\n found_keys.append(key)\n if type(_target[key]) is dict:\n if type(_mods[key]) is dict:\n f(_target[key],\n _mods[key],\n parent_key=key,\n mod_parent_key=key)\n else:\n raise Exception('Error: cannot modify dict with value')\n else:\n _target[key] = _mods[key]\n else:\n if type(_target[key]) is dict:\n f(_target[key],\n _mods,\n parent_key=key,\n mod_parent_key=mod_parent_key)\n searched_keys.extend(list(_mods.keys()))\n\n if set_missing_keys:\n for key in set(_mods.keys()) - set(found_keys):\n if not type(\n _mods[key]) is dict and parent_key == mod_parent_key:\n _target[key] = _mods[key]\n found_keys.append(key)\n\n f(target, mods)\n if not ignore_missing_keys:\n d = set(searched_keys) - set(found_keys)\n if d:\n raise Exception('Mod keys not found in target dict: {}'.format(d))\n\n\ndef terminate_at_exit(process):\n def terminate():\n log.debug('Terminating {}...'.format(process.pid))\n process.terminate()\n\n atexit.register(terminate)\n\n\ndef grouped(lst, size):\n \"\"\"Returns a list of lists of length 'size'.\n The last list will have size <= 'size'.\n \"\"\"\n return [lst[n:n + size] for n in range(0, len(lst), size)]\n\n\ndef split_into_groups(lst, num_groups):\n \"\"\"Attempts to split a list into a given number of groups.\n The number of groups will be at least 1 and at most\n num_groups.\n\n Args:\n lst: The list to split\n num_groups: The number of groups to create.\n Returns:\n A list of size between 1 and num_groups containing lists\n of items of l.\"\"\"\n group_size = max(int(ceil((len(lst)) / num_groups)), 1)\n\n return grouped(lst, group_size)\n","sub_path":"rastervision/utils/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"505669434","text":"#Python packages\nimport torch\nimport torch.nn as nn\nfrom time import time\nimport sys, gc\n\n#Custom classes\nfrom Models import LogHarmonicNet\nfrom Samplers import RandomWalkMetropolisHastings as RWMH\nfrom Hamiltonian import HarmonicOscillatorWithInteraction1D as HOwInt1D\nfrom VMC import ADVMC\nfrom utils import memory_usage, calc_clip\nfrom Writer import WriteToFile\n\n#Hardware Hyperparameters\ndevice = torch.device('cpu')\n\n#Gaussian Interaction Hyperparameters\nV0 = -3\nsigma0 = 0.5\n\n#Network Hyperparameters\nnfermions = 2 #number of input nodes\nnum_hidden = 16 #number of hidden nodes per layer\nnum_layers = 1 #number of layers in network\nnum_dets = 1 #number of determinants (currently only accepts 1)\nfunc = nn.Tanh() #activation function between layers\n\n#Feed-Forward Neural Network which is a R^N -> R^1 function. It returns \n#the log. abs. determinant of network's output (along with its sign)\nnet = LogHarmonicNet(num_input=nfermions,\n num_hidden=num_hidden,\n num_layers=num_layers,\n num_dets=num_dets,\n func=func)\nnet=net.to(device)\nnet=torch.jit.script(net)\n\n#Sampler Hyperparameters (Markov-Chain Monte Carlo)\nnblocks = 50 #number of blocks in Markov Chain\nnsteps = 50 #number of samples within a given block\nnsamples = nblocks*nsteps #total number of samples within a Markov Chain\nburn_in = 0 #number of burn in samples of Markov Chain\nthinning = 10 #thinning factor for samples (take every thinning-th sample, i.e. if thinning = 10, take every 10-th sample from chain)\nnwalkers = 100 #number of chains/walkers \nstd = 1 #width of the proposal distribution\n\nsampler = RWMH(network=net,\n dim=nfermions,\n nwalkers=nwalkers,\n std=std,\n device=device)\n\n#Analtyical Solution (lowest loss value for given value of 'nfermions')\ngroundstate = nfermions**2*(40.0*nfermions**(-1/3))/2.0\n \ncalc_local_energy = HOwInt1D(network=net, V0=V0, sigma0=sigma0) #class that calculates the local matrix elements of the Hamiltonian\n \nadvmc_estimator = ADVMC(network=net) #class that combines all local matrix elements together to return mean loss (and its error)\n\noptim = torch.optim.Adam(net.parameters(), #optimiser class\n lr=1e-3,\n betas=(0.9,0.999),\n eps=1e-8)\n\n\n#file name to save data of interest\nfname = \"A%02i_H%03i_B%03i_S%03i_V0_%04.2f_Sig0_%04.2f.csv\" % (nfermions, num_hidden, nblocks, nsteps, V0, sigma0)\nwriter = WriteToFile(load=None, filename=fname)\n\n\nnet.train()\n\nepochs=10000 #number of epochs\n\nprefix='GB' #for memory usage\n\nfor epoch in range(epochs+1):\n \n stats={} #dict to store items of interest\n \n start=time() #record time per epoch\n \n mem1=memory_usage(prefix) #record current ram usage \n \n X, acceptance = sampler(nsamples=nsamples, #sampler object which returns samples (X) and their acceptance rate \n burn_in=burn_in, #from the Markov Chain. X is of shape [nwalkers, nsamples, dim]\n thinning=thinning)\n \n mem2=memory_usage(prefix) #record current ram usage \n \n E_local = calc_local_energy(X) #Calculate local energy from the samplers (X) returns shape [nwalkers, nsamples]\n \n mem3=memory_usage(prefix) #record current ram usage \n \n energy_mean, energy_std, sampler_stats = advmc_estimator(X, E_local, nblocks) #Takes initial samples, and local energy and computes \n #the total energy (which the loss of our choice)\n mem4=memory_usage(prefix) #record current ram usage \n \n optim.zero_grad(set_to_none=True) #zero gradient cache \n energy_mean.backward() #calculate loss gradients \n optim.step() #update parameters\n \n mem5=memory_usage(prefix) #record current ram usage \n\n end = time() #record time per epoch\n\n #record items of interest into a dict, which is written to pandas dataframe.\n stats['epoch'] = [epoch]\n stats['energy_mean'] = energy_mean.item()\n stats['energy_std'] = energy_std.item()\n stats['groundstate'] = groundstate\n stats['envelope_width'] = net.width.item()\n stats = {**stats, **sampler_stats} #merge dicts\n stats['ram_sample'] = mem2-mem1 #memory usage before/after the calling of certain classes\n stats['ram_local'] = mem3-mem2\n stats['ram_advmc'] = mem4-mem3\n stats['ram_optim'] = mem5-mem4\n stats['ram_epoch'] = mem5-mem1\n stats['ram_total'] = mem5\n stats['walltime'] = end-start\n \n writer(stats) #write to file...\n\n #print some useful information during runtime\n print(\"Epoch: %6i | Energy: %4.2f +/- %4.2f MeV | GS: %4.2f MeV | Walltime: %4.2e%s | RAM: %4.2e%s (%+4.2e%s) | Sample: %4.2e%s Local: %4.2e%s ADVMC: %4.2e%s Optimiser: %4.2e%s\" % (epoch, energy_mean, energy_std, groundstate,end-start,\"s\", memory_usage(prefix), prefix, mem5-mem1,prefix, mem2-mem1,prefix,mem3-mem2,prefix,mem4-mem3,prefix,mem5-mem4,prefix))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Run_interaction.py","file_name":"Run_interaction.py","file_ext":"py","file_size_in_byte":5156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"240392034","text":"from random import randint\n\nboard = []\nuser_board = []\nboard_size = 8\nhit = 0\nfirst_play = 1\n\n\ndef build_board():\n global board\n for i in range(board_size):\n temp = []\n for j in range(board_size):\n temp.append(0)\n board.append(temp)\n\n\ndef create_mines(row, col):\n global board, board_size\n for i in range(board_size):\n for j in range(2):\n random = randint(0, board_size - 1)\n if board[i][random] != 1 and (row != i or col != random):\n board[i][random] = 1\n else:\n j -= 1\n\n\ndef build_user_board():\n global user_board, board_size\n for i in range(board_size):\n temp = []\n for j in range(board_size):\n temp.append('o')\n user_board.append(temp)\n\n\ndef check_mine(row, col):\n global board, board_size\n if board[row][col] == 1:\n return 1\n else:\n return 0\n\n\ndef print_board():\n global board, board_size\n for i in range(board_size):\n for j in range(board_size):\n print(board[i][j], r\" \")\n print(\"\\n\")\n\n\ndef print_user_board():\n global user_board, board_size\n for i in range(board_size):\n for j in range(board_size):\n print(user_board[i][j], r\" \")\n print(\"\\n\")\n\n\ndef nearbymines(i, j):\n global board, board_size\n num_mines = 0\n\n if i == 0:\n start_row = 0\n end_row = start_row + 1\n elif i == board_size - 1:\n end_row = board_size - 1\n start_row = end_row - 2\n else:\n start_row = i - 1\n end_row = i + 1\n\n if j == 0:\n start_col = 0\n end_col = start_col + 1\n elif j == board_size - 1:\n end_col = board_size - 1\n start_col = end_col - 2\n else:\n start_col = j - 1\n end_col = j + 1\n\n for i in range(start_row, end_row):\n for j in range(start_col, end_col):\n if board[i][j] == 1:\n num_mines += 1\n\n return num_mines\n\n\ndef play():\n row = 0\n col = 0\n global board_size, hit\n while row < 1 or row > board_size or col < 1 or col > board_size:\n row = int(input(\"Row--> \"))\n col = int(input(\"Col--> \"))\n\n row -= 1\n col -= 1\n\n if first_play == 1:\n create_mines(row, col)\n\n if check_mine(row, col):\n hit = 1\n else:\n surr_mines = nearbymines(row, col)\n user_board[row][col] = str(surr_mines)\n\n\ndef start():\n global hit\n hit = 0\n build_board()\n build_user_board()\n\n # getting the board out on the screen\n print(user_board)\n\n while hit != 1:\n play()\n print_board()\n print_user_board()\n\n if hit == 1:\n print(\"You are hit\")\n\n# main suite\nstart()\n\n","sub_path":"source/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"16307143","text":"import cv2 as cv\nimport numpy as np\n\ncap = cv.VideoCapture(0) #common source = 0\n\nwhile (True):\n ret, frame = cap.read()\n hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)\n blur = cv.GaussianBlur(hsv, (15,15), 0)\n low = np.array([30, 180, 180])\n high = np.array([35, 240, 255])\n mask1 = cv.inRange(hsv, low, high)\n yellow = cv.bitwise_and(frame, frame, mask = mask1)\n edge = cv.Canny(yellow, 100, 200)\n cv.imshow(\"Original\", frame)\n #cv.imshow(\"HSV\", hsv)\n #cv.imshow(\"Mask\", mask1)\n cv.imshow(\"Detect\", yellow)\n cv.imshow(\"Canny\", edge)\n\n if cv.waitKey(1) & 0xFF ==ord('q'):\n break\ncap.release()\ncv.destroyAllWindows()\n","sub_path":"OpenCV/opencv_06.py","file_name":"opencv_06.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"222642365","text":"import sys\nimport requests\nimport json\nfrom authorization import refresh\nfrom machine_learning.model import calculate_mood\n\ndef get_key():\n return refresh()\n\ndef readTracks():\n\n auth_key = get_code()\n\n fi = open(\"machine_learning/songs.data\", \"a\")\n # fi2 = open(\"nameIdRelation.txt\", \"a\")\n\n # initial setup, currently using hard-coded authorization\n payload = {\"limit\" : 50, \"offset\" : 0}\n headers = {\"Accept\" : \"application/json\", \"Authorization\" : \"Bearer \"+auth_key}\n track_request = requests.get(\"https://api.spotify.com/v1/me/tracks\", params=payload, headers=headers)\n data = track_request.json()\n total = data[\"total\"]\n\n # run the analysis until reaching the end of the list\n data_array = []\n while total > 0:\n total -= 50\n for item in data[\"items\"]:\n id = (item[\"track\"][\"id\"])\n\n query_headers = {\"Content-Type\" : \"application/json\"}\n query_request = requests.get(\"https://api.mlab.com/api/1/databases/emotion_music/collections/prediction_data?apiKey=7fjUwhTEJe2ALljJOyn706HsWtIJxvvB&q={\\\"id\\\":\\\"\"+id+\"\\\"}\", headers=query_headers)\n if len(json.loads(query_request.text)) == 0:\n name = item[\"track\"][\"name\"]\n url = \"https://api.spotify.com/v1/audio-features/\"\n url = url + id\n feature_request = requests.get(url, headers=headers)\n feature = feature_request.json()\n data = {\n \"id\" : feature[\"id\"],\n \"energy\" : feature[\"energy\"],\n \"loudness\" : feature[\"loudness\"],\n \"mode\" : feature[\"mode\"],\n \"speechiness\" : feature[\"speechiness\"],\n \"tempo\" : feature[\"tempo\"],\n \"valence\" : feature[\"valence\"],\n \"name\" : item[\"track\"][\"name\"]\n }\n data_array.append(data)\n # fi2.write(\"id: {} | name: {}\\n\".format( feature[\"id\"] ,name))\n # print data\n # dict_data.append(data)\n if total > 0 :\n payload[\"offset\"] += 50\n\n\n track_request = requests.get(\"https://api.spotify.com/v1/me/tracks\", params=payload, headers=headers)\n data = track_request.json()\n\n\n fi.write(str(json.dumps(data_array)))\n\n calculate_mood()\n\ndef readPlayList():\n\n auth_key = get_code()\n f2 = open(\"machine_learning/playlist_songs.data\", \"a\")\n fi2 = open(\"machine_learning/nameIdRelation.data\", \"a\")\n url = \"https://api.spotify.com/v1/users/\" + \"lockijazz\" +\"/playlists/\" + \"0KxCwQWV2dag4n81XQNu2K\" + \"/tracks\"\n headers = {\"Accept\" : \"application/json\", \"Authorization\" : \"Bearer \" + auth_key}\n payload = {\"limit\" : 50}\n playlist_request = requests.get(url, headers = headers, params=payload)\n data = playlist_request.json()\n f2.write(\"[\")\n for item in data[\"items\"]:\n url = \"https://api.spotify.com/v1/audio-features/\"\n url = url + item[\"track\"][\"id\"]\n feature_request = requests.get(url, headers=headers)\n feature = feature_request.json()\n song = {\n \"id\" : feature[\"id\"],\n \"energy\" : feature[\"energy\"],\n \"loudness\" : feature[\"loudness\"],\n \"mode\" : feature[\"mode\"],\n \"speechiness\" : feature[\"speechiness\"],\n \"tempo\" : feature[\"tempo\"],\n \"valence\" : feature[\"valence\"],\n \"name\" : item[\"track\"][\"name\"]\n }\n f2.write(str(song) + \",\")\n fi2.write(\"id: {} | name: {}\\n\".format( feature[\"id\"] , item[\"track\"][\"name\"]))\n f2.write(\"]\")\n\n\n\ndef get_playlist(emotion):\n\n auth_key = get_code()\n\n # initial setup, currently using hard-coded authorization\n payload = {\"limit\" : 50, \"offset\" : 0}\n headers = {\"Accept\" : \"application/json\", \"Authorization\" : \"Bearer \" + auth_key}\n track_request = requests.get(\"https://api.spotify.com/v1/me/tracks\", params=payload, headers=headers)\n data = track_request.json()\n total = data[\"total\"]\n dict_data = []\n\n emotions = [\"Happy\", \"Sad\", \"Hyped\", \"Calm\"]\n\n user_id = \"GOOSH\" # change to get it from amans app as well as all the authorization codes\n playlist_headers = {\"Content_Type\" : \"application/json\", \"Authorization\" : \"Bearer \" + auth_key}\n playlist_body = json.dumps({\"name\":emotions[emotion], \"description\":\"Playlist Built By: HackTX 2017 Emotion-Spotify Playlist Generator\"})\n playlist_create_request = requests.post(\"https://api.spotify.com/v1/users/\"+user_id+\"/playlists\", headers=playlist_headers, body=playlist_body)\n playlist_uri = playlist_create_request.json()[\"uri\"]\n\n data_uris = []\n\n # run the analysis until reaching the end of the list\n while total > 0:\n total -= 50\n for item in data[\"items\"]:\n id = (item[\"track\"][\"id\"])\n\n query_headers = {\"Content-Type\" : \"application/json\"}\n query_request = requests.get(\"https://api.mlab.com/api/1/databases/emotion_music/collections/prediction_data?apiKey=7fjUwhTEJe2ALljJOyn706HsWtIJxvvB&q={\\\"id\\\":\\\"\"+id+\"\\\", \\\"emotion\\\":\"+emotion+\"}\", headers=query_headers)\n if len(json.loads(query_request.text)) > 0:\n data_uris.append(item[\"track\"][\"uri\"])\n\n\n if total > 0 :\n payload[\"offset\"] += 50\n\n track_request = requests.get(\"https://api.spotify.com/v1/me/tracks\", params=payload, headers=headers)\n data = track_request.json()\n\n playlist_add_body = json.dumps({\"uris\": str(data_uris)})\n playlist_add_request = requests.post(\"https://api.spotify.com/v1/users/\"+user_id+\"/playlists/\"+playlist_id+\"/tracks\", headers=playlist_headers, body=playlist_add_body)\n\n return playlist_uri\n\n\n\n # input\n # energy, loudness, mode, speechiness, tempo, valence","sub_path":"data_scrape.py","file_name":"data_scrape.py","file_ext":"py","file_size_in_byte":5785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"294114706","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport importlib\nimport inspect\nimport os\n\nfrom typing import Any, Callable, Iterator, List\n\n\ndef get_tokeniser(path: str) -> Callable:\n \"\"\"\n Get tokeniser function based on the module's relative path under\n tic/create/tokenisers. The main tokenising function in the submodule must\n match the signature:\n\n def tokenise(input_path: str, workers: Optional[int] = None) -> str:\n pass\n\n Examples:\n The path to tic/create/tokenisers/discrete/example.py should be given\n as:\n\n discrete/example\n\n Args:\n path: Tokeniser module path relative to tic/create/tokenisers\n\n Returns:\n Tokenising function\n\n \"\"\"\n package = 'tic.create.tokenisers'\n components = [p.strip('/') for p in os.path.split(path) if p]\n basename = components[-1]\n basename = os.path.splitext(basename)[0]\n components[-1] = basename\n relative_name = f'.{\".\".join(components)}'\n module = importlib.import_module(relative_name, package)\n funcs = dict(inspect.getmembers(module, inspect.isfunction))\n return funcs['tokenise']\n\n\ndef expand_path(path: str) -> str:\n \"\"\"\n Expand the given path's references to user and environment variables. Also\n convert to absolute path.\n\n Args:\n path: Path to expand\n\n Returns:\n Expanded path\n\n \"\"\"\n expanded_path = os.path.abspath(\n os.path.expandvars(os.path.expanduser(path))\n )\n return expanded_path\n\n\ndef line_count(file_path: str) -> int:\n \"\"\"\n Count the number of lines in the given file.\n\n Args:\n file_path: Path to file\n\n Returns:\n Line count\n\n \"\"\"\n with open(file_path) as stream:\n for index, _ in enumerate(stream):\n pass\n return index + 1\n\n\ndef line_to_csv(line: List[Any]) -> str:\n \"\"\"\n Turn a list of Python objects into a comma separated string.\n\n Args:\n line: List to turn into CSV\n\n Returns:\n Line represented as a comma separated string\n\n \"\"\"\n items = []\n for item in line:\n if not isinstance(item, str):\n items.append(str(item))\n elif ',' in item:\n items.append(f'\"{item}\"')\n else:\n items.append(item)\n return ','.join(items) + '\\n'\n\n\ndef batch_list(to_batch: List[Any], batch_size: int) -> Iterator[List[Any]]:\n \"\"\"\n Split list into a list of batches.\n\n Args:\n to_batch: List to batch\n batch_size: Number of items per batch\n\n Returns:\n Batched list\n\n \"\"\"\n batched = (\n to_batch[i:i + batch_size] for i in range(0, len(to_batch), batch_size)\n )\n return batched\n\n\ndef readlines(file_path: str) -> Iterator[str]:\n \"\"\"\n Read a line at a time from the given file path.\n\n Args:\n file_path: Path to file\n\n Yields:\n Line contents\n\n \"\"\"\n file_path = os.path.join(os.getcwd(), file_path)\n with open(file_path, 'r') as stream:\n while True:\n line = stream.readline()\n if not line:\n break\n yield line\n","sub_path":"python/tic/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"260589287","text":"TRAP_STATE = -1\r\nRESULT_TRAP = \"RESULT_TRAP\"\r\nRESULT_ACCEPTED = \"ACCEPTED\"\r\nRESULT_NOT_ACCEPTED = \"NOT ACCEPTED\"\r\n\r\ndef ASSIGNATION_delta(state, character):\r\n if state == 0 and character == \":\":\r\n return 1\r\n if state == 1 and character == \"=\":\r\n return 2\r\n return TRAP_STATE\r\n\r\ndef ASSIGNATION_automata(string):\r\n finals = [2]\r\n state = 0\r\n\r\n for character in string:\r\n next_state = ASSIGNATION_delta(state, character)\r\n state = next_state\r\n \r\n if state in finals:\r\n return RESULT_ACCEPTED\r\n if state == TRAP_STATE:\r\n return RESULT_TRAP\r\n return RESULT_NOT_ACCEPTED \r\n\r\ntest_cases = [\r\n (\":=\", RESULT_ACCEPTED),\r\n (\":\", RESULT_NOT_ACCEPTED),\r\n (\"+:\", RESULT_TRAP),\r\n (\"=:\", RESULT_TRAP),\r\n (\"@$#\", RESULT_TRAP)\r\n]\r\n\r\nfor string, result in test_cases:\r\n assert ASSIGNATION_automata(string) == result\r\n","sub_path":"Terminals/ASSIGNATION.py","file_name":"ASSIGNATION.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"395702965","text":"from NIENV import *\n\n\n# API METHODS --------------\n\n# self.main_widget\n# self.update_shape()\n\n# Ports\n# self.input(index)\n# self.set_output_val(index, val)\n# self.exec_output(index)\n\n# self.create_new_input(type_, label, widget_name=None, widget_pos='under', pos=-1)\n# self.delete_input(index)\n# self.create_new_output(type_, label, pos=-1)\n# self.delete_output(index)\n\n# Logging\n# mylog = self.new_log('Example Log')\n# mylog.log('I\\'m alive!!')\n# self.log_message('hello global!', target='global')\n# self.log_message('that\\'s not good', target='error')\n\n# --------------------------\n\nimport numpy as np\nimport copy as cp\n\nclass SpikingPopulation_NodeInstance(NodeInstance):\n \n types= {'default': { \n 'tau_recovery':0.02,\n 'coupling':0.2,\n 'reset_voltage':-65.0,\n 'reset_recovery':8.0,},\n 'intrinsically_bursting': {\n 'tau_recovery':0.02,\n 'coupling':0.2,\n 'reset_voltage':-55.0,\n 'reset_recovery':4.0,},\n 'chattering': {\n 'tau_recovery':0.02,\n 'coupling':0.2,\n 'reset_voltage':-50.0,\n 'reset_recovery':2.0,},\n 'fast_spiking': {\n 'tau_recovery':0.1,\n 'coupling':0.2,\n 'reset_voltage':-65.0,\n 'reset_recovery':8.0,},\n 'low-threshold_spiking': {\n 'tau_recovery':0.02,\n 'coupling':0.25,\n 'reset_voltage':-65.0,\n 'reset_recovery':8.0,},\n 'resonator': {\n 'tau_recovery':0.1,\n 'coupling':0.26,\n 'reset_voltage':-65.0,\n 'reset_recovery':8.0,},\n \n }\n inp = {'clock':0,\n 'excitation':1,\n 'inhibition':2,\n 'direct':3,\n 'size':4,\n 'type':5,\n 'exc_top':6,\n 'inh_top':7,\n 'int_top':8\n }\n def __init__(self, params):\n super(SpikingPopulation_NodeInstance, self).__init__(params)\n\n # self.special_actions['action name'] = {'method': M(self.action_method)}\n # ...\n self.x = None\n self.substeps = 2\n self.threshold = 30\n self.size = 2\n\n # build a dictionary here\n # but start with regular spiking\n self.type = 'default'\n self.taurecovery = self.types[self.type]['tau_recovery'] #0.02\n self.coupling = self.types[self.type]['coupling'] #0.2\n self.resetvolt = self.types[self.type]['reset_voltage'] #-65.0\n self.resetrecovery = self.types[self.type]['reset_recovery'] #8.0\n \n self.vlt = np.ones(self.size) * self.resetvolt\n self.u = self.coupling*self.vlt\n\n # \n def timestep_Iz(self, \n a_a, # tau recovery\n a_b, # coupling\n a_c, # reset voltage\n a_d, # reset recovery\n a_i, # direct current\n a_v, # in out excitation\n a_u, # recovery\n e_syn, # excitation synapse\n i_syn, # inhibition synapse\n int_syn # internal synapse \n ):\n\n v1 = cp.deepcopy(a_v)\n u1 = cp.deepcopy(a_u)\n #self.log_message('#1', target='global')\n fired = (a_v >= self.threshold)\n #v1[fired] = self.threshold\n #print(\"v1 = \" + str(v1) + \"; ac=\" + str(a_c))\n #self.log_message('#2a', target='global')\n v1[fired] = a_c\n #self.log_message('#2b', target='global')\n u1[fired] = a_u[fired] + a_d #[fired]\n #self.log_message('#2c', target='global')\n \n # synapses, topology\n exc = e_syn*(e_syn >= self.threshold)\n inh = i_syn*(i_syn >= self.threshold)\n internal_e = int_syn*(int_syn >= self.threshold)\n internal_i = int_syn*(int_syn <= -self.threshold)\n inpvlt = np.sum(internal_e, axis=1) \\\n + np.sum(internal_i, axis=1)\\\n + np.sum(exc, axis=1) \\\n - np.sum(inh, axis=1)\n \n #print(\"inpvlt = \" + str(inpvlt))\n i1 = cp.deepcopy(a_i)\n i1 = np.ravel(i1 + inpvlt)\n stepfact = 1.0/self.substeps\n tmp = max(a_c, max(cp.deepcopy(v1)))\n #self.log_message('#3', target='global')\n for i in range(self.substeps):\n v1 += stepfact*(0.04*tmp**2 + 5*tmp + 140-a_u+i1)\n #print(\"v1 = \" + str(v1))\n u1 += a_a*(a_b*v1 - u1)\n fired = (v1>self.threshold)\n v1[fired] = self.threshold\n v1.shape = (self.size)\n #self.log_message('#4', target='global')\n #print(\"v1_2 = \" + str(v1))\n #a_v = cp.deepcopy(v1)\n #a_u = cp.deepcopy(u1)\n return (v1, u1)\n \n def reinit(self, a_size, a_type):\n # print(\"size cl: \" + str(type(a_size)))\n if (isinstance(a_size, int) and a_size != self.size):\n self.log_message('setting size: ' + str(a_size) , target='global')\n self.size = a_size\n self.vlt = np.ones(self.size) * self.resetvolt\n self.u = self.vlt * self.coupling\n if(isinstance(a_type, str) and \\\n a_type != self.type and \\\n a_type in self.types.keys()):\n self.log_message('setting type: ' + str(a_type) , target='global')\n self.type = a_type\n self.taurecovery = self.types[self.type]['tau_recovery'] #0.02\n self.coupling = self.types[self.type]['coupling'] #0.2\n self.resetvolt = self.types[self.type]['reset_voltage'] #-65.0\n self.resetrecovery = self.types[self.type]['reset_recovery'] #8.0\n self.log_message('tau='+str(self.taurecovery), target = 'global')\n\n def update_event(self, input_called=-1):\n # check if type or size has changed\n #self.log_message(\"inp 0: \" + str(self.input(0)), target='global')\n #self.log_message(\"inp 1: \" + str(self.input(1)), target='global')\n #self.log_message(\"inp 2: \" + str(self.input(2)), target='global')\n #self.log_message(\"inp 3: \" + str(self.input(3)), target='global')\n #self.log_message(\"inp 4: \" + str(self.input(4)), target='global')\n #self.log_message(\"inp 5: \" + str(self.input(5)), target='global')\n #self.log_message(\"inp 6: \" + str(self.input(6)), target='global')\n #self.log_message(\"inp 7: \" + str(self.input(7)), target='global')\n \n self.reinit(self.input(self.inp['size']),\n self.input(self.inp['type'])) # reinit if size changed\n # call timestep\n excitation = 0\n inhibition = 0\n internal_syn = 0\n if input_called == 0:\n \n dircur = np.reshape(np.array(self.input(self.inp['direct'])), (self.size)) if\\\n not isinstance(self.input(self.inp['direct']), type(None)) else\\\n np.zeros(self.size) # todo: convert to array\n #print(\"dircur: \" + str(dircur))\n excitation = self.input(self.inp['exc_top'])\\\n * np.tile(self.input(self.inp['excitation']), (self.size, 1)) if \\\n not isinstance(self.input(self.inp['exc_top']), type(None)) and \\\n not isinstance(self.input(self.inp['excitation']), type(None)) else \\\n np.zeros((1, self.size))\n #print(\"excitation: \" + str(excitation))\n inhibition = self.input(self.inp['inh_top'])\\\n * np.tile(self.input(self.inp['inhibition']), (self.size, 1)) if \\\n not isinstance(self.input(self.inp['inh_top']), type(None)) and \\\n not isinstance(self.input(self.inp['inhibition']), type(None)) else \\\n np.zeros((1, self.size))\n #print(\"inhibition: \" + str(inhibition))\n internal_syn = self.input(self.inp['int_top'])\\\n * np.tile(self.vlt, (self.size, 1)) if \\\n not isinstance(self.input(self.inp['int_top']), type(None)) else \\\n np.zeros((1, self.size))\n #print(\"internal: \" + str(internal_syn))\n self.vlt.shape = (self.size)\n (self.vlt, self.u) = self.timestep_Iz(\n self.taurecovery,\n self.coupling,\n self.resetvolt,\n self.resetrecovery,\n dircur,\n self.vlt,\n self.u,\n excitation,\n inhibition,\n internal_syn\n )\n self.set_output_val(0, self.vlt)\n\n\n\n def get_data(self):\n # TODO: add type and internal topology\n data = {}\n return data\n\n def set_data(self, data):\n pass\n\n def removing(self):\n pass\n","sub_path":"trond/nodes/trond___SpikingPopulation0/trond___SpikingPopulation0.py","file_name":"trond___SpikingPopulation0.py","file_ext":"py","file_size_in_byte":9141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"575542701","text":"from basic_detection import structured_output\nfrom helpers_ot import user_input, frame_input, massage_data\nfrom helpers_ot import tempDS_update, currentDS_update, pastDS_update\n\n\n# Parameters\n# (1) Dataset or New Folder\n# (2) Tree Depth/Sliding Window Size \n# (3) Maximum Inactivity\n\nif __name__ =='__main__':\n\n\t# Faster R-CNN Model chosen for Flanagan Lab Research\n\tMODEL_NAME = 'faster_rcnn_resnet50_coco_2018_01_28'\n\tFROZEN_GRAPH = 'frozen_inference_graph.pb'\n\tLABELS = 'mscoco_label_map.pbtxt'\n\t\n\t# Instance for testing \n\tfolder = 'datasets/Forward_10204_1844'\n\tfirst = 'Forward_10204_1844-f-0000001.jpg'\n\tlast = 'Forward_10204_1844-f-0012331.jpg'\n\tstart = 1\n\tend = 12331\n\tcount = 1490\n\tmethod = 'video'\n\n\n\t# parameters = user_input()\n\n\t# if parameters == None:\n\t# \treturn\n\n\n\n\t# Data Structure that acts as a sliding window on past object detections\n\ttemp_objects = {\n\t\t'window': 3,\n\t\t'classes': [],\n\t\t1: {},\n\t\t2: {},\n\t\t3: {}\n\t}\n\n\t# Data Structure that keeps track of objects deemed active in a specific context\n\tcurrent_objects = {\n\t\t'class_counts': {},\n\t\t'active': [],\n\t\t'active_classes': [],\n\t\t'active_objects': {},\n\t}\n\n\t# Data Structure that objects that are deemed inactive\n\tpast_objects = {\n\t\t'inactive': [],\n\t\t'inactive_classes': [],\n\t\t'inactive_objects': {}\n\t}\n\n\twhile (count <= end):\n\t\t\n\t\tdata = None\n\t\tif method == 'video':\n\t\t\timage_path = frame_input(count) # Dummy function specific to testing instance \n\t\t\tdata = structured_output(MODEL_NAME, FROZEN_GRAPH, LABELS, image_path)\n\t\telif method == 'dataset':\n\t\t\tdata = dataset_extraction() ###### TBD\n\n\t\tif not data:\n\t\t\tcontinue\n\n\t\tnew_objects = massage_data(data, method)\n\t\ttemp_objects = tempDS_update(temp_objects, new_objects)\n\t\t# current_objects = tree_search(temp_objects, current_objects, count)\n\t\tcurrent_objects = currentDS_update(current_objects, temp_objects, count)\n\t\tupdate = pastDS_update(current_objects, past_objects, count)\n\t\tcurrent_objects = update[0]\n\t\tpast_objects = update[1]\n\t\t\n\t\t\n\t\tprint(count)\n\t\tprint('_____________')\n\t\tprint('')\t\t\n\t\tprint(new_objects)\n\t\tprint('')\n\t\tprint(temp_objects)\n\t\tprint('')\n\t\tprint(current_objects)\n\t\tprint('')\n\t\tprint(past_objects)\n\t\tprint('')\n\n\t\tcount = count + 1\n\n\n\n\n\n\n\n\n","sub_path":"object_tracking.py","file_name":"object_tracking.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"287388234","text":"from bs4.element import Comment\nfrom string import punctuation\n\n\ndef clean_string(inp_str):\n st = str.maketrans('', '', punctuation)\n return inp_str.translate(st).lower().strip().split()\n\n\ndef remove_stop_words(inp_dict):\n '''clean the dictionary of words by removing the stop words'''\n stop_word_list = get_stop_words()\n for word in stop_word_list:\n if word in inp_dict.keys():\n del inp_dict[word]\n return inp_dict\n\n\ndef get_stop_words():\n '''read stop words from the file into a list'''\n stop_word_list = []\n with open('stopwords.txt','r') as file:\n for word in file:\n stop_word_list.append(word.strip('\\n'))\n return stop_word_list\n\n\ndef get_word_count(inp_str):\n st = clean_string(inp_str)\n d = {}\n total_count = len(st)\n for word in st:\n d[word] = d.get(word, 0) + 1\n unique_count = len(d.items())\n cleaned_d = remove_stop_words(d)\n sorted_d = dict(sorted(cleaned_d.items(), key=lambda kv:kv[1], reverse=True))\n return total_count, unique_count, list(sorted_d.keys())[:5]\n\n\ndef tag_visible(element):\n if element.parent.name in ['style', 'script', 'head', 'meta', '[document]','noscript','header', 'html', 'input']:\n return False\n if isinstance(element, Comment):\n return False\n return True\n\n\ndef text_from_html(soup):\n texts = soup.findAll(text=True)\n visible_texts = filter(tag_visible, texts)\n return u\" \".join(t.strip() for t in visible_texts)\n\n\n","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"97068280","text":"# Read inputs from Standard Input.\n# Write outputs to Standard Output.\n\nimport sys\n\nl, h = [int(i) for i in input().split()]\nchiffres_input = list()\n\ndef fromMayaToInt(chiffres, nombre):\n res = 0\n for j, n in enumerate(nombre):\n for i, c in enumerate(chiffres):\n print(str(n) + str(c), file=sys.stderr)\n if c == n:\n print('coucou', file=sys.stderr)\n res += i*20**(len(nombre)-j-1)\n break\n return res\n\ndef fromIntToMaya(chiffres, nombre):\n if nombre == 0:\n return chiffres[0]\n \n res = list()\n # find largest exponent\n exp = 0\n while 20**exp < nombre:\n exp += 1\n exp -= 1\n residu = nombre\n while exp >= 0:\n quotient = residu//(20**exp)\n res.append(chiffres[quotient])\n residu = residu % quotient\n exp -= 1\n return res\n\n\nfor i in range(h):\n chiffres_input.append(input())\nchiffres = list()\nfor i in range(20):\n chiffres.append('\\n'.join([j[i*l:(i+1)*l] for j in chiffres_input]) )\n\ns1 = int(input())\ns1_str = list()\nfor i in range(s1 // h):\n s1_str.append('')\n for j in range(h):\n s1_str[i] += input() + '\\n'\n\ns2 = int(input())\ns2_str = list()\nfor i in range(s2 // h):\n s2_str.append('')\n for j in range(h):\n s2_str[i] += input()+'\\n'\n\n\nint1 = fromMayaToInt(chiffres, s1_str)\nint2 = fromMayaToInt(chiffres, s2_str)\nprint(str(int1) + ' ' + str(s2_str), file=sys.stderr)\nres = 0\nop = input()\nif op == '*':\n res = int1*int2\nelif op == '/':\n res = int1//int2\nelif op == '+':\n res = int1 + int2\nelif op == '-':\n res = int1 - int2\n\nres_str = fromIntToMaya(chiffres, res)\nfor i in res_str:\n print(i)","sub_path":"maya.py","file_name":"maya.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"527901237","text":"from keras.layers.normalization import BatchNormalization\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.layers.core import Activation\nfrom keras.layers.core import Flatten\nfrom keras.layers.core import Dropout\nfrom keras.layers.core import Dense\nfrom keras.optimizers import SGD\nfrom keras.layers import Concatenate\nfrom keras.models import *\n\nfrom facial_expression_recognition.ModelInterface import ModelInterface\n\n\nclass SampleModel2(ModelInterface):\n\n def fit(self):\n return self._model.fit()\n\n def get_name(self):\n return self.__class__.__name__\n\n def __init__(self):\n self._img_dim = 0\n self._depth = 0\n self._dropout = 0\n self._init_lr = 0\n self._nr_classes = 0\n\n def initialize_model(self):\n self._model = self.create_model()\n\n def set_params(self, img_dim, depth, dropout, init_lr, classes_no):\n self._img_dim = img_dim\n self._depth = depth\n self._dropout = dropout\n self._init_lr = init_lr\n self._nr_classes = classes_no\n\n def create_model(self):\n\n padding = \"valid\"\n input_shape = (self._img_dim, self._img_dim, self._depth)\n input_layer = Input(input_shape)\n\n conv1 = Conv2D(64, 7, strides=2, padding=padding)(input_layer)\n conv1 = Activation(\"relu\")(conv1)\n pooling1 = MaxPooling2D(3, strides=2, padding=padding)(conv1)\n # lrn1 = LRN2D()(pooling1)\n lrn1 = BatchNormalization()(pooling1)\n\n conv2a = Conv2D(96, 1,padding=padding)(lrn1)\n conv2a = Activation(\"relu\")(conv2a)\n pool2a = MaxPooling2D(3, strides=1, padding=padding)(conv2a)\n # pool2a = Dropout(Constants.DROPOUT)(pool2a)\n conv2b = Conv2D(208, 3, padding=padding)(conv2a)\n conv2b = Activation(\"relu\")(conv2b)\n conv2c = Conv2D(64, 1, padding=padding)(pool2a)\n conv2c = Activation(\"relu\")(conv2c)\n concat2 = Concatenate(axis=-1)([conv2b, conv2c])\n\n pool2b = MaxPooling2D(pool_size=3, strides=1, padding=padding)(concat2)\n # pool2b = Dropout(Constants.DROPOUT)(pool2b)\n\n\n conv3a = Conv2D(96, 1, activation=\"relu\", padding=padding)(pool2b)\n pool3a = MaxPooling2D(pool_size=3, strides = 1, padding=padding)(pool2b)\n # pool3a = Dropout(Constants.DROPOUT)(pool3a) #was commented\n\n conv3b = Conv2D(208, (3, 3), activation=\"relu\", padding=padding)(conv3a)\n conv3c = Conv2D(64, (1, 1), activation=\"relu\", padding=padding)(pool3a)\n\n concat3 = Concatenate(axis=-1)([conv3b, conv3c])\n\n pool3b = MaxPooling2D(pool_size=(3, 3), strides=(1, 1), padding=padding)(concat3)\n\n flat = Flatten()(pool3b)\n flat = Dropout(self._dropout)(flat)\n output = Dense(self._nr_classes, activation = \"softmax\")(flat)\n\n model = Model(inputs=input_layer, outputs=output)\n model.summary()\n '''\n adam = optimizers.Adam(lr=Constants.INIT_LR, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n model.compile(loss=\"categorical_crossentropy\",\n optimizer=adam,\n metrics=['accuracy']\n )\n '''\n opt = SGD(lr=self._init_lr)\n model.compile(loss=\"categorical_crossentropy\", optimizer=opt,\n metrics=[\"accuracy\"])\n return model","sub_path":"facial_expression_recognition/SampleModel2.py","file_name":"SampleModel2.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"81882707","text":"from django.contrib import admin\n\nfrom ..tasks import product_task\n\n\nclass ProductAdmin(admin.ModelAdmin):\n \"\"\"Product Admin\"\"\"\n\n def parse(self, request, products):\n \"\"\"Run category parser\"\"\"\n products = list(products)\n\n recycle = True\n slice_count = 10\n product_ids = []\n\n while recycle:\n # collect product ids\n for i in range(slice_count):\n try:\n product_ids.append(products.pop().pk)\n except IndexError:\n recycle = False\n break\n\n if not product_ids:\n break\n\n # run task\n product_task.delay(product_ids=product_ids)\n product_ids = []\n\n parse.short_description = 'Начать парсинг'\n\n actions = (parse,)\n list_display = ('id', 'is_active', 'status', 'category', 'name', 'created_at', 'updated_at',\n 'name_url_color', 'price',)\n list_filter = ('status', 'is_active', 'category__name',)\n search_fields = ('name',)\n list_per_page = 20\n readonly_fields = ('created_at', 'updated_at',)\n","sub_path":"apps/fcmoto/admin/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"367575300","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\n\nurlpatterns = [\n # Examples:\n # url(r'^$', 'Blog_project.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^index/$','blog.views.index'),\n \n\turl(r'^home/$','blog.views.home'),\n url(r'^home/get/(?P\\d+)/$','blog.views.page'),\n \n\turl(r'^sign_in/$','blog.views.sign_in'),\n\turl(r'^article/$','blog.views.articles'),\n\turl(r'^sign_up/$','blog.views.sign_up'),\n\n url(r'^validate/$','blog.views.validate'),\n\n # url(r'^article/get/(?P\\d+)/$','blog.views.articles'),\n url(r'^article/get/(?P\\d+)/(?P\\d+)/$','blog.views.articles'),\n url(r'^comment/(?P\\d+)/$','blog.views.comment'),\n\n\n\n #url(r'^index/(?P\\d+)/$','blog.views.index'),\n # another url here index/post_id to determine which post\n]\n","sub_path":"Blog_project/Blog_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"530194259","text":"import cvxopt as cvx\nimport numpy as np\nimport pylab\nfrom scipy.integrate import odeint\n\n\ndef func(omega, tt, A, c):\n return np.dot(A, omega)\n\n\ndef func1(y, tt1, A, b):\n return np.dot(A, y) + b\n\n\ndef func2(U, D_new, c_new):\n return 0.5 * np.dot(np.dot(U.transpose(), D_new), U) + np.dot(c_new.transpose(), U)\n\n\ndef alg(D, A, b, c, x_zv, H, g, t0, t_zv):\n N = 30\n y = np.zeros(len(A))\n h = (t_zv - t0) / N\n tj = []\n\n for i in xrange(1, int(N) + 1):\n tj.append(t0 + i * h)\n\n tt = [t0, t_zv]\n B0 = odeint(func, x_zv, tt, args=(A, b))[1]\n tt1 = [t0, tj[0]]\n y1 = odeint(func1, y, tt1, args=(A, b))[1]\n temp = odeint(func1, y1, tj, args=(A, b))\n B = np.random.random((len(A), int(N)))\n\n for i in xrange(int(N)):\n B[:, i] = temp[int(N) - i - 1]\n\n D_new = cvx.matrix(np.dot(np.dot(B.transpose(), D), B))\n A_new = cvx.matrix(np.dot(H, B))\n c_new = cvx.matrix(np.dot(B.transpose(), c + np.dot(D, B0)))\n b_new = cvx.matrix(g - np.dot(H, B0))\n G = np.zeros((2 * int(N), int(N)))\n\n for i in xrange(int(N)):\n G[i][i] = 1\n for i in xrange(int(N)):\n G[int(N) + i][i] = -1\n\n G = cvx.matrix(G)\n h = [1.0 for i in xrange(2 * int(N))]\n h = cvx.matrix(h)\n\n u = cvx.solvers.qp(D_new, c_new, G, h, A_new.T, b_new)['x']\n\n for i in xrange(0, int(N)):\n tj[i] -= (t_zv - t0) / N\n\n pylab.plot(tj, u)\n pylab.show()\n","sub_path":"06cem/Optimisation_and_Manage_Methods/lab08/lab08.py","file_name":"lab08.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"468893321","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 26 15:47:36 2014\n\n@author: David\n\"\"\"\nimport os\nfrom tkinter import *\n\nclass DoXML:\n def __init__(self, name):\n self.root = Tk()\n self.S = Scrollbar(self.root)\n self.T = Text(self.root, height=40, width=120, wrap=WORD)\n self.S.pack(side=RIGHT, fill=Y)\n self.T.pack(side=LEFT, fill=Y)\n self.S.config(command=self.T.yview)\n self.T.config(yscrollcommand=self.S.set)\n self.file_name = name;\n# self.fh = open(self.file_name,\"r\")\n os.chdir(\"XML\")\n self.fh = open(self.file_name,'rb')\n self.chapNum = 0;\n\n def getChar(self):\n byte = self.fh.read(1)\n ch = byte.decode(\"utf-8\")\n while ch == '\\r' or ch == '\\n':\n ch = self.fh.read(1).decode(\"utf-8\")\n return ch\n \n def getNum(self):\n n = 0;\n ch = self.getChar()\n while ch >= '0' and ch <= '9':\n n = n * 10 + ord(ch) - ord('0')\n ch = self.getChar()\n return (n, ch)\n \n def getElement(self, ch):\n if ch != '<':\n raise RuntimeError('expected ')\n ch = self.getChar()\n e = \"\";\n while ch != \">\":\n e += ch\n ch = self.getChar()\n return e\n \n def processState(self, state):\n print('State is ' + state)\n if state == 'INIT':\n ch = self.getChar();\n e = self.getElement(ch)\n if e != 'chapter':\n raise RuntimeError('Bad Root' + e)\n while e != \"/chapter\":\n ch = self.getChar()\n e = self.getElement(ch)\n self.processState(e)\n elif state == 'number':\n self.chapNum,ch = self.getNum()\n e = self.getElement(ch)\n if e != '/number':\n raise RuntimeError('expected \"/number')\n return True\n# elif state == 'outline':\n else:\n raise RuntimeError('unknown state: ' + state);\n \n def run(self, state):\n self.processState(state)\n# str = \" \"\n# while len(str) > 0:\n# str = self.fh.readline()\n# if len(str) > 0:\n# self.T.insert(END,str)\n# self.fh.close() \n \n# main program\nprint (chr(12))\nprint('Started basic_e_book')\nxml = DoXML('Chapter_02.xhtml')\nxml.run('INIT') \n\nfh.close()\nmainloop()\n","sub_path":"popeye/src/text/Python_Book/basic_e_book.py","file_name":"basic_e_book.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"340135802","text":"# -*- mode: python -*-\n\nimport os\n\nLOGIN_URL = \"/neru/login/\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.environ[\"NERU_BASE_DIR\"]\n\nROOT_URLCONF = \"proj.urls\"\n\nWSGI_APPLICATION = \"proj.wsgi.application\"\n\nINSTALLED_APPS = [\n \"app.apps.AppConfig\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.10/topics/i18n/\n\nLANGUAGE_CODE = \"ja-jp\"\n\nTIME_ZONE = \"Asia/Tokyo\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.10/howto/static-files/\n\nSTATIC_URL = \"/neru/static/\"\n","sub_path":"python/django/django-model-try/proj/_settings_common.py","file_name":"_settings_common.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"167464852","text":"import aug_sfutils as sfu\n\n\ndef to_str(str_or_byt):\n \"\"\"\n Converts a plain string to a byte string (python3 compatibility)\n \"\"\"\n\n str_out = str_or_byt.decode('utf8') if isinstance(str_or_byt, bytes) else str_or_byt\n return str_out.strip()\n\n\ndef extract_main(gasv_str):\n\n main_spec = 'D'\n if '(1)' in gasv_str:\n main_spec = gasv_str.split('(1)')[0].split(':')[-2].split(',')[-1].strip()\n elif gasv_str.count(':') == 1:\n main_spec = gasv_str.split(':')[0].strip()\n return main_spec\n\ndef spec_jou_sf(nshot):\n\n jou = sfu.SFREAD(nshot, 'JOU')\n print(jou.sfile)\n if jou.status:\n fill = jou.getparset('FILLING')\n tmp = fill['GasVent']\n gasv_str = ''\n for y in tmp:\n gasv_str += to_str(y)\n\n return extract_main(gasv_str)\n else:\n return None\n\n\nif __name__ == '__main__':\n\n print(spec_jou_sf(28053))\n","sub_path":"jou_main_spec.py","file_name":"jou_main_spec.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"226321562","text":"\"\"\"Utility methods for EvalML pipelines.\"\"\"\nimport logging\n\nfrom woodwork import logical_types\n\nfrom . import (\n TimeSeriesBinaryClassificationPipeline,\n TimeSeriesMulticlassClassificationPipeline,\n TimeSeriesRegressionPipeline,\n)\nfrom .binary_classification_pipeline import BinaryClassificationPipeline\nfrom .multiclass_classification_pipeline import (\n MulticlassClassificationPipeline,\n)\nfrom .pipeline_base import PipelineBase\nfrom .regression_pipeline import RegressionPipeline\n\nfrom evalml.data_checks import DataCheckActionCode\nfrom evalml.model_family import ModelFamily\nfrom evalml.pipelines.components import ( # noqa: F401\n CatBoostClassifier,\n CatBoostRegressor,\n ComponentBase,\n DateTimeFeaturizer,\n DelayedFeatureTransformer,\n DropColumns,\n DropNullColumns,\n DropRowsTransformer,\n EmailFeaturizer,\n Estimator,\n Imputer,\n LogTransformer,\n NaturalLanguageFeaturizer,\n OneHotEncoder,\n Oversampler,\n RandomForestClassifier,\n StackedEnsembleClassifier,\n StackedEnsembleRegressor,\n StandardScaler,\n TargetImputer,\n Undersampler,\n URLFeaturizer,\n)\nfrom evalml.pipelines.components.transformers.encoders.label_encoder import (\n LabelEncoder,\n)\nfrom evalml.pipelines.components.utils import (\n get_estimators,\n handle_component_class,\n)\nfrom evalml.problem_types import (\n ProblemTypes,\n handle_problem_types,\n is_classification,\n is_time_series,\n)\nfrom evalml.utils import import_or_raise, infer_feature_types\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_preprocessing_components(\n X, y, problem_type, estimator_class, sampler_name=None\n):\n \"\"\"Given input data, target data and an estimator class, construct a recommended preprocessing chain to be combined with the estimator and trained on the provided data.\n\n Args:\n X (pd.DataFrame): The input data of shape [n_samples, n_features].\n y (pd.Series): The target data of length [n_samples].\n problem_type (ProblemTypes or str): Problem type.\n estimator_class (class): A class which subclasses Estimator estimator for pipeline.\n sampler_name (str): The name of the sampler component to add to the pipeline. Defaults to None.\n\n Returns:\n list[Transformer]: A list of applicable preprocessing components to use with the estimator.\n \"\"\"\n pp_components = []\n\n if is_classification(problem_type):\n pp_components.append(LabelEncoder)\n\n all_null_cols = X.columns[X.isnull().all()]\n if len(all_null_cols) > 0:\n pp_components.append(DropNullColumns)\n\n index_and_unknown_columns = list(\n X.ww.select([\"index\", \"unknown\"], return_schema=True).columns\n )\n if len(index_and_unknown_columns) > 0:\n pp_components.append(DropColumns)\n\n email_columns = list(X.ww.select(\"EmailAddress\", return_schema=True).columns)\n if len(email_columns) > 0:\n pp_components.append(EmailFeaturizer)\n\n url_columns = list(X.ww.select(\"URL\", return_schema=True).columns)\n if len(url_columns) > 0:\n pp_components.append(URLFeaturizer)\n\n input_logical_types = {type(lt) for lt in X.ww.logical_types.values()}\n types_imputer_handles = {\n logical_types.Boolean,\n logical_types.Categorical,\n logical_types.Double,\n logical_types.Integer,\n logical_types.URL,\n logical_types.EmailAddress,\n logical_types.Datetime,\n }\n\n datetime_cols = list(X.ww.select([\"Datetime\"], return_schema=True).columns)\n\n add_datetime_featurizer = len(datetime_cols) > 0\n if add_datetime_featurizer and estimator_class.model_family not in [\n ModelFamily.ARIMA,\n ModelFamily.PROPHET,\n ]:\n pp_components.append(DateTimeFeaturizer)\n\n text_columns = list(X.ww.select(\"NaturalLanguage\", return_schema=True).columns)\n if len(text_columns) > 0:\n pp_components.append(NaturalLanguageFeaturizer)\n\n if len(input_logical_types.intersection(types_imputer_handles)) or len(\n text_columns\n ):\n pp_components.append(Imputer)\n\n if (\n is_time_series(problem_type)\n and estimator_class.model_family != ModelFamily.ARIMA\n ):\n pp_components.append(DelayedFeatureTransformer)\n\n # The URL and EmailAddress Featurizers will create categorical columns\n categorical_cols = list(\n X.ww.select([\"category\", \"URL\", \"EmailAddress\"], return_schema=True).columns\n )\n if len(categorical_cols) > 0 and estimator_class not in {\n CatBoostClassifier,\n CatBoostRegressor,\n }:\n pp_components.append(OneHotEncoder)\n\n sampler_components = {\n \"Undersampler\": Undersampler,\n \"Oversampler\": Oversampler,\n }\n if sampler_name is not None:\n try:\n import_or_raise(\n \"imblearn.over_sampling\", error_msg=\"imbalanced-learn is not installed\"\n )\n pp_components.append(sampler_components[sampler_name])\n except ImportError:\n logger.warning(\n \"Could not import imblearn.over_sampling, so defaulting to use Undersampler\"\n )\n pp_components.append(Undersampler)\n\n if estimator_class and estimator_class.model_family == ModelFamily.LINEAR_MODEL:\n pp_components.append(StandardScaler)\n\n return pp_components\n\n\ndef _get_pipeline_base_class(problem_type):\n \"\"\"Returns pipeline base class for problem_type.\"\"\"\n problem_type = handle_problem_types(problem_type)\n if problem_type == ProblemTypes.BINARY:\n return BinaryClassificationPipeline\n elif problem_type == ProblemTypes.MULTICLASS:\n return MulticlassClassificationPipeline\n elif problem_type == ProblemTypes.REGRESSION:\n return RegressionPipeline\n elif problem_type == ProblemTypes.TIME_SERIES_REGRESSION:\n return TimeSeriesRegressionPipeline\n elif problem_type == ProblemTypes.TIME_SERIES_BINARY:\n return TimeSeriesBinaryClassificationPipeline\n else:\n return TimeSeriesMulticlassClassificationPipeline\n\n\ndef make_pipeline(\n X,\n y,\n estimator,\n problem_type,\n parameters=None,\n sampler_name=None,\n extra_components=None,\n extra_components_position=\"before_preprocessing\",\n use_estimator=True,\n):\n \"\"\"Given input data, target data, an estimator class and the problem type, generates a pipeline class with a preprocessing chain which was recommended based on the inputs. The pipeline will be a subclass of the appropriate pipeline base class for the specified problem_type.\n\n Args:\n X (pd.DataFrame): The input data of shape [n_samples, n_features].\n y (pd.Series): The target data of length [n_samples].\n estimator (Estimator): Estimator for pipeline.\n problem_type (ProblemTypes or str): Problem type for pipeline to generate.\n parameters (dict): Dictionary with component names as keys and dictionary of that component's parameters as values.\n An empty dictionary or None implies using all default values for component parameters.\n sampler_name (str): The name of the sampler component to add to the pipeline. Only used in classification problems.\n Defaults to None\n extra_components (list[ComponentBase]): List of extra components to be added after preprocessing components. Defaults to None.\n extra_components_position (str): Where to put extra components. Defaults to \"before_preprocessing\" and any other value will put components after preprocessing components.\n use_estimator (bool): Whether to add the provided estimator to the pipeline or not. Defaults to True.\n\n Returns:\n PipelineBase object: PipelineBase instance with dynamically generated preprocessing components and specified estimator.\n\n Raises:\n ValueError: If estimator is not valid for the given problem type, or sampling is not supported for the given problem type.\n \"\"\"\n X = infer_feature_types(X)\n y = infer_feature_types(y)\n\n if estimator:\n problem_type = handle_problem_types(problem_type)\n if estimator not in get_estimators(problem_type):\n raise ValueError(\n f\"{estimator.name} is not a valid estimator for problem type\"\n )\n if not is_classification(problem_type) and sampler_name is not None:\n raise ValueError(\n f\"Sampling is unsupported for problem_type {str(problem_type)}\"\n )\n\n preprocessing_components = _get_preprocessing_components(\n X, y, problem_type, estimator, sampler_name\n )\n extra_components = extra_components or []\n estimator = [estimator] if use_estimator else []\n\n if extra_components_position == \"before_preprocessing\":\n complete_component_list = (\n extra_components + preprocessing_components + estimator\n )\n else:\n complete_component_list = (\n preprocessing_components + extra_components + estimator\n )\n\n component_graph = PipelineBase._make_component_dict_from_component_list(\n complete_component_list\n )\n base_class = _get_pipeline_base_class(problem_type)\n return base_class(\n component_graph,\n parameters=parameters,\n )\n\n\ndef generate_pipeline_code(element):\n \"\"\"Creates and returns a string that contains the Python imports and code required for running the EvalML pipeline.\n\n Args:\n element (pipeline instance): The instance of the pipeline to generate string Python code.\n\n Returns:\n str: String representation of Python code that can be run separately in order to recreate the pipeline instance.\n Does not include code for custom component implementation.\n\n Raises:\n ValueError: If element is not a pipeline, or if the pipeline is nonlinear.\n \"\"\"\n # hold the imports needed and add code to end\n code_strings = []\n if not isinstance(element, PipelineBase):\n raise ValueError(\n \"Element must be a pipeline instance, received {}\".format(type(element))\n )\n if isinstance(element.component_graph, dict):\n raise ValueError(\"Code generation for nonlinear pipelines is not supported yet\")\n code_strings.append(\n \"from {} import {}\".format(\n element.__class__.__module__, element.__class__.__name__\n )\n )\n code_strings.append(repr(element))\n return \"\\n\".join(code_strings)\n\n\ndef _make_stacked_ensemble_pipeline(\n input_pipelines, problem_type, final_estimator=None, n_jobs=-1, random_seed=0\n):\n \"\"\"Creates a pipeline with a stacked ensemble estimator.\n\n Args:\n input_pipelines (list(PipelineBase or subclass obj)): List of pipeline instances to use as the base estimators for the stacked ensemble.\n This must not be None or an empty list or else EnsembleMissingPipelinesError will be raised.\n problem_type (ProblemType): Problem type of pipeline\n final_estimator (Estimator): Metalearner to use for the ensembler. Defaults to None.\n n_jobs (int or None): Integer describing level of parallelism used for pipelines.\n None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.\n Defaults to -1.\n\n Returns:\n Pipeline with appropriate stacked ensemble estimator.\n \"\"\"\n\n def _make_new_component_name(model_type, component_name, idx=None):\n idx = \" \" + str(idx) if idx is not None else \"\"\n return f\"{str(model_type)} Pipeline{idx} - {component_name}\"\n\n component_graph = (\n {\"Label Encoder\": [\"Label Encoder\", \"X\", \"y\"]}\n if is_classification(problem_type)\n else {}\n )\n final_components = []\n used_model_families = []\n parameters = {}\n\n if is_classification(problem_type):\n parameters = {\n \"Stacked Ensemble Classifier\": {\n \"n_jobs\": n_jobs,\n }\n }\n estimator = StackedEnsembleClassifier\n pipeline_name = \"Stacked Ensemble Classification Pipeline\"\n else:\n parameters = {\n \"Stacked Ensemble Regressor\": {\n \"n_jobs\": n_jobs,\n }\n }\n estimator = StackedEnsembleRegressor\n pipeline_name = \"Stacked Ensemble Regression Pipeline\"\n\n pipeline_class = {\n ProblemTypes.BINARY: BinaryClassificationPipeline,\n ProblemTypes.MULTICLASS: MulticlassClassificationPipeline,\n ProblemTypes.REGRESSION: RegressionPipeline,\n }[problem_type]\n\n for pipeline in input_pipelines:\n model_family = pipeline.component_graph[-1].model_family\n model_family_idx = (\n used_model_families.count(model_family) + 1\n if used_model_families.count(model_family) > 0\n else None\n )\n used_model_families.append(model_family)\n final_component = None\n ensemble_y = \"y\"\n for name, component_list in pipeline.component_graph.component_dict.items():\n new_component_list = []\n new_component_name = _make_new_component_name(\n model_family, name, model_family_idx\n )\n for i, item in enumerate(component_list):\n if i == 0:\n fitted_comp = handle_component_class(item)\n new_component_list.append(fitted_comp)\n parameters[new_component_name] = pipeline.parameters.get(name, {})\n elif isinstance(item, str) and item not in [\"X\", \"y\"]:\n new_component_list.append(\n _make_new_component_name(model_family, item, model_family_idx)\n )\n elif isinstance(item, str) and item == \"y\":\n if is_classification(problem_type):\n new_component_list.append(\"Label Encoder.y\")\n else:\n new_component_list.append(\"y\")\n else:\n new_component_list.append(item)\n if i != 0 and item.endswith(\".y\"):\n ensemble_y = _make_new_component_name(\n model_family, item, model_family_idx\n )\n component_graph[new_component_name] = new_component_list\n final_component = new_component_name\n final_components.append(final_component)\n\n component_graph[estimator.name] = (\n [estimator] + [comp + \".x\" for comp in final_components] + [ensemble_y]\n )\n\n return pipeline_class(\n component_graph,\n parameters=parameters,\n custom_name=pipeline_name,\n random_seed=random_seed,\n )\n\n\ndef _make_pipeline_from_multiple_graphs(\n input_pipelines,\n estimator,\n problem_type,\n parameters=None,\n pipeline_name=None,\n sub_pipeline_names=None,\n random_seed=0,\n):\n \"\"\"Creates a pipeline from multiple preprocessing pipelines and a final estimator. Final y input to the estimator will be chosen from the last of the input pipelines.\n\n Args:\n input_pipelines (list(PipelineBase or subclass obj)): List of pipeline instances to use for preprocessing.\n estimator (Estimator): Final estimator for the pipelines.\n problem_type (ProblemType): Problem type of pipeline.\n parameters (Dict): Parameters to initialize pipeline with. Defaults to an empty dictionary.\n pipeline_name (str): Custom name for the final pipeline.\n sub_pipeline_names (Dict): Dictionary mapping original input pipeline names to new names. This will be used to rename components. Defaults to None.\n random_seed (int): Random seed for the pipeline. Defaults to 0.\n\n Returns:\n pipeline (PipelineBase): Pipeline created with the input pipelines.\n \"\"\"\n\n def _make_new_component_name(name, component_name, idx=None, pipeline_name=None):\n idx = \" \" + str(idx) if idx is not None else \"\"\n if pipeline_name:\n return f\"{pipeline_name} Pipeline{idx} - {component_name}\"\n return f\"{str(name)} Pipeline{idx} - {component_name}\"\n\n parameters = parameters if parameters else {}\n final_components = []\n used_names = []\n component_graph = (\n {\"Label Encoder\": [\"Label Encoder\", \"X\", \"y\"]}\n if is_classification(problem_type)\n else {}\n )\n for pipeline in input_pipelines:\n component_pipeline_name = pipeline.name\n name_idx = (\n used_names.count(component_pipeline_name) + 1\n if used_names.count(component_pipeline_name) > 0\n else None\n )\n used_names.append(component_pipeline_name)\n sub_pipeline_name = (\n sub_pipeline_names[pipeline.name] if sub_pipeline_names else None\n )\n final_component = None\n final_y = \"y\"\n\n final_y_candidate = (\n None\n if not handle_component_class(\n pipeline.component_graph.compute_order[-1]\n ).modifies_target\n else _make_new_component_name(\n component_pipeline_name,\n pipeline.component_graph.compute_order[-1],\n name_idx,\n sub_pipeline_name,\n )\n + \".y\"\n )\n for name, component_list in pipeline.component_graph.component_dict.items():\n new_component_list = []\n new_component_name = _make_new_component_name(\n component_pipeline_name, name, name_idx, sub_pipeline_name\n )\n for i, item in enumerate(component_list):\n if i == 0:\n fitted_comp = handle_component_class(item)\n new_component_list.append(fitted_comp)\n parameters[new_component_name] = pipeline.parameters.get(name, {})\n elif isinstance(item, str) and item not in [\"X\", \"y\"]:\n new_component_list.append(\n _make_new_component_name(\n component_pipeline_name, item, name_idx, sub_pipeline_name\n )\n )\n if i != 0 and item.endswith(\".y\"):\n final_y = _make_new_component_name(\n component_pipeline_name, item, name_idx, sub_pipeline_name\n )\n elif isinstance(item, str) and item == \"y\":\n if is_classification(problem_type):\n new_component_list.append(\"Label Encoder.y\")\n else:\n new_component_list.append(\"y\")\n else:\n new_component_list.append(item)\n component_graph[new_component_name] = new_component_list\n final_component = new_component_name\n final_components.append(final_component)\n\n final_y = final_y_candidate if final_y_candidate else final_y\n component_graph[estimator.name] = (\n [estimator] + [comp + \".x\" for comp in final_components] + [final_y]\n )\n pipeline_class = {\n ProblemTypes.BINARY: BinaryClassificationPipeline,\n ProblemTypes.MULTICLASS: MulticlassClassificationPipeline,\n ProblemTypes.REGRESSION: RegressionPipeline,\n ProblemTypes.TIME_SERIES_BINARY: TimeSeriesBinaryClassificationPipeline,\n ProblemTypes.TIME_SERIES_MULTICLASS: TimeSeriesMulticlassClassificationPipeline,\n ProblemTypes.TIME_SERIES_REGRESSION: TimeSeriesRegressionPipeline,\n }[problem_type]\n return pipeline_class(\n component_graph,\n parameters=parameters,\n custom_name=pipeline_name,\n random_seed=random_seed,\n )\n\n\ndef make_pipeline_from_actions(problem_type, actions):\n \"\"\"Creates a pipeline of components to address the input DataCheckAction list.\n\n Args:\n problem_type (str or ProblemType): The problem type that the pipeline should address.\n actions (list[DataCheckAction]): List of DataCheckAction objects used to create list of components\n\n Returns:\n PipelineBase: Pipeline which can be used to address data check actions.\n \"\"\"\n component_list = _make_component_list_from_actions(actions)\n parameters = {}\n for component in component_list:\n parameters[component.name] = component.parameters\n component_dict = PipelineBase._make_component_dict_from_component_list(\n [component.name for component in component_list]\n )\n base_class = _get_pipeline_base_class(problem_type)\n return base_class(component_dict, parameters=parameters)\n\n\ndef _make_component_list_from_actions(actions):\n \"\"\"Creates a list of components from the input DataCheckAction list.\n\n Args:\n actions (list(DataCheckAction)): List of DataCheckAction objects used to create list of components\n\n Returns:\n list(ComponentBase): List of components used to address the input actions\n \"\"\"\n components = []\n cols_to_drop = []\n indices_to_drop = []\n for action in actions:\n if action.action_code == DataCheckActionCode.DROP_COL:\n cols_to_drop.extend(action.metadata[\"columns\"])\n elif action.action_code == DataCheckActionCode.IMPUTE_COL:\n metadata = action.metadata\n if metadata[\"is_target\"]:\n components.append(\n TargetImputer(impute_strategy=metadata[\"impute_strategy\"])\n )\n elif action.action_code == DataCheckActionCode.DROP_ROWS:\n indices_to_drop.extend(action.metadata[\"rows\"])\n if cols_to_drop:\n cols_to_drop = sorted(set(cols_to_drop))\n components.append(DropColumns(columns=cols_to_drop))\n if indices_to_drop:\n indices_to_drop = sorted(set(indices_to_drop))\n components.append(DropRowsTransformer(indices_to_drop=indices_to_drop))\n\n return components\n\n\ndef make_timeseries_baseline_pipeline(problem_type, gap, forecast_horizon):\n \"\"\"Make a baseline pipeline for time series regression problems.\n\n Args:\n problem_type: One of TIME_SERIES_REGRESSION, TIME_SERIES_MULTICLASS, TIME_SERIES_BINARY\n gap (int): Non-negative gap parameter.\n forecast_horizon (int): Positive forecast_horizon parameter.\n\n Returns:\n TimeSeriesPipelineBase, a time series pipeline corresponding to the problem type.\n\n \"\"\"\n pipeline_class, pipeline_name = {\n ProblemTypes.TIME_SERIES_REGRESSION: (\n TimeSeriesRegressionPipeline,\n \"Time Series Baseline Regression Pipeline\",\n ),\n ProblemTypes.TIME_SERIES_MULTICLASS: (\n TimeSeriesMulticlassClassificationPipeline,\n \"Time Series Baseline Multiclass Pipeline\",\n ),\n ProblemTypes.TIME_SERIES_BINARY: (\n TimeSeriesBinaryClassificationPipeline,\n \"Time Series Baseline Binary Pipeline\",\n ),\n }[problem_type]\n baseline = pipeline_class(\n component_graph=[\n \"Delayed Feature Transformer\",\n \"Time Series Baseline Estimator\",\n ],\n custom_name=pipeline_name,\n parameters={\n \"pipeline\": {\n \"date_index\": None,\n \"gap\": gap,\n \"max_delay\": 0,\n \"forecast_horizon\": forecast_horizon,\n },\n \"Delayed Feature Transformer\": {\n \"max_delay\": 0,\n \"gap\": gap,\n \"forecast_horizon\": forecast_horizon,\n \"delay_target\": True,\n \"delay_features\": False,\n },\n \"Time Series Baseline Estimator\": {\n \"gap\": gap,\n \"forecast_horizon\": forecast_horizon,\n },\n },\n )\n return baseline\n\n\ndef rows_of_interest(\n pipeline, X, y=None, threshold=None, epsilon=0.1, sort_values=True, types=\"all\"\n):\n \"\"\"Get the row indices of the data that are closest to the threshold. Works only for binary classification problems and pipelines.\n\n Args:\n pipeline (PipelineBase): The fitted binary pipeline.\n X (ww.DataTable, pd.DataFrame): The input features to predict on.\n y (ww.DataColumn, pd.Series, None): The input target data, if available. Defaults to None.\n threshold (float): The threshold value of interest to separate positive and negative predictions. If None, uses the pipeline threshold if set, else 0.5. Defaults to None.\n epsilon (epsilon): The difference between the probability and the threshold that would make the row interesting for us. For instance, epsilon=0.1 and threhsold=0.5 would mean\n we consider all rows in [0.4, 0.6] to be of interest. Defaults to 0.1.\n sort_values (bool): Whether to return the indices sorted by the distance from the threshold, such that the first values are closer to the threshold and the later values are further. Defaults to True.\n types (str): The type of rows to keep and return. Can be one of ['incorrect', 'correct', 'true_positive', 'true_negative', 'all']. Defaults to 'all'.\n\n 'incorrect' - return only the rows where the predictions are incorrect. This means that, given the threshold and target y, keep only the rows which are labeled wrong.\n 'correct' - return only the rows where the predictions are correct. This means that, given the threshold and target y, keep only the rows which are correctly labeled.\n 'true_positive' - return only the rows which are positive, as given by the targets.\n 'true_negative' - return only the rows which are negative, as given by the targets.\n 'all' - return all rows. This is the only option available when there is no target data provided.\n\n Returns:\n The indices corresponding to the rows of interest.\n\n Raises:\n ValueError: If pipeline is not a fitted Binary Classification pipeline.\n ValueError: If types is invalid or y is not provided when types is not 'all'.\n ValueError: If the threshold is provided and is exclusive of [0, 1].\n \"\"\"\n valid_types = [\"incorrect\", \"correct\", \"true_positive\", \"true_negative\", \"all\"]\n if types not in valid_types:\n raise ValueError(\n \"Invalid arg for 'types'! Must be one of {}\".format(valid_types)\n )\n\n if types != \"all\" and y is None:\n raise ValueError(\"Need an input y in order to use types {}\".format(types))\n\n if (\n not isinstance(pipeline, BinaryClassificationPipeline)\n or not pipeline._is_fitted\n ):\n raise ValueError(\n \"Pipeline provided must be a fitted Binary Classification pipeline!\"\n )\n\n if threshold is not None and (threshold < 0 or threshold > 1):\n raise ValueError(\n \"Provided threshold {} must be between [0, 1]\".format(threshold)\n )\n\n if threshold is None:\n threshold = pipeline.threshold or 0.5\n\n # get predicted proba\n pred_proba = pipeline.predict_proba(X)\n pos_value_proba = pred_proba.iloc[:, -1]\n preds = pos_value_proba >= threshold\n preds_value_proba = abs(pos_value_proba - threshold)\n\n # placeholder for y if it isn't supplied\n y_current = y if y is not None else preds\n\n # logic for breaking apart the different categories\n mask = y_current\n if types in [\"correct\", \"incorrect\"]:\n mask = preds == y\n mask = mask.astype(bool)\n\n if types in [\"correct\", \"true_positive\"]:\n preds_value_proba = preds_value_proba[mask.values]\n elif types in [\"incorrect\", \"true_negative\"]:\n preds_value_proba = preds_value_proba[~mask.values]\n\n if sort_values:\n preds_value_proba = preds_value_proba.sort_values(kind=\"stable\")\n\n preds_value_proba = preds_value_proba[preds_value_proba <= epsilon]\n return preds_value_proba.index.tolist()\n","sub_path":"evalml/pipelines/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":27652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"124554053","text":"import os\nfrom collections import namedtuple\n\nimport requests\n\nimport pdb\n\n\nLatLon = namedtuple('LatLon', ['latitude', 'longitude'])\nGOOGLE_GEOCODING = 'https://maps.googleapis.com/maps/api/geocode/json'\n\n\ndef get_geocoding_api_key():\n return os.environ['GOOGLE_GEOCODING_API_KEY']\n\n\ndef get_lat_lon(address=None):\n parameters = {\n 'address': address,\n 'key': get_geocoding_api_key()\n }\n\n response = requests.get(GOOGLE_GEOCODING, params=parameters)\n\n latitude = response.json()['results'][0]['geometry']['location']['lat']\n longitude = response.json()['results'][0]['geometry']['location']['lng']\n\n return LatLon(latitude=str(latitude), longitude=str(longitude))\n\n\n\nif __name__ == '__main__':\n print(get_lat_lon(address='316 W 14th St new york, ny 10014'))\n","sub_path":"google_geocoding.py","file_name":"google_geocoding.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"281960451","text":"\"\"\"Command line utility for extracting files from PAK files\n\nSupported Games:\n - QUAKE\n\"\"\"\n\n__version__ = '1.0.1'\n\nimport argparse\nimport os\nimport sys\nfrom tabulate import tabulate\n\nfrom quake import pak\nfrom common import Parser, ResolvePathAction\n\nif __name__ == '__main__':\n parser = Parser(prog='unpak',\n description='Default action is to extract files to xdir.',\n epilog='example: unpak PAK0.PAK -d {0} => extract all files to {0}'.format(os.path.expanduser('./extracted')))\n\n parser.add_argument('file',\n metavar='file.pak',\n action=ResolvePathAction)\n\n parser.add_argument('-l', '--list',\n action='store_true',\n help='list files')\n\n parser.add_argument('-d',\n metavar='xdir',\n dest='dest',\n default=os.getcwd(),\n action=ResolvePathAction,\n help='extract files into xdir')\n\n parser.add_argument('-q',\n dest='quiet',\n action='store_true',\n help='quiet mode')\n\n parser.add_argument('-v', '--version',\n dest='version',\n action='version',\n help=argparse.SUPPRESS,\n version='{} version {}'.format(parser.prog, __version__))\n\n args = parser.parse_args()\n\n if not pak.is_pakfile(args.file):\n print('{0}: cannot find or open {1}'.format(parser.prog, args.file), file=sys.stderr)\n sys.exit(1)\n\n if args.list:\n with pak.PakFile(args.file) as pak_file:\n info_list = sorted(pak_file.infolist(), key=lambda i: i.filename)\n\n headers = ['Length', 'Name']\n table = [[i.file_size, i.filename] for i in info_list]\n length = sum([i.file_size for i in info_list])\n count = len(info_list)\n table.append([length, '%d file%s' % (count, 's' if count == 1 else '')])\n\n separator = []\n for i in range(len(headers)):\n t = max(len(str(length)), len(headers[i]) + 2)\n separator.append('-' * t)\n\n table.insert(-1, separator)\n\n print('Archive: %s' % os.path.basename(args.file))\n print(tabulate(table, headers=headers))\n\n sys.exit(0)\n\n with pak.PakFile(args.file) as pak_file:\n info_list = pak_file.infolist()\n for item in sorted(info_list, key=lambda i: i.filename):\n filename = item.filename\n fullpath = os.path.join(args.dest, filename)\n\n if not args.quiet:\n print(' extracting: %s' % fullpath)\n\n try:\n pak_file.extract(filename, args.dest)\n except:\n print('{0}: error: {1}'.format(parser.prog, sys.exc_info()[0]), file=sys.stderr)\n\n sys.exit(0)\n","sub_path":"quake/utils/unpak.py","file_name":"unpak.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"414069652","text":"import numpy as np\nfrom utils.util import resize, check_random_state\n\nkRtEps = 1e-6\n\n\nclass ColumnSampler:\n def __init__(self, random_state=0):\n self.rng_ = check_random_state(random_state)\n self.feature_weights_ = []\n self.feature_set_tree_ = []\n self.colsample_bylevel_ = 1.0\n self.colsample_bytree_ = 1.0\n self.colsample_bynode_ = 1.0\n self.feature_set_level_ = {}\n\n def init(self, num_col, feature_weights, colsample_bynode,\n colsample_bylevel, colsample_bytree, skip_index_0=False):\n self.feature_weights_ = feature_weights\n self.colsample_bylevel_ = colsample_bylevel\n self.colsample_bytree_ = colsample_bytree\n self.colsample_bynode_ = colsample_bynode\n\n self.feature_set_tree_ = []\n self.feature_set_level_ = {}\n\n feature_set_tree_ = list(range(int(skip_index_0), num_col))\n\n self.feature_set_tree_ = self.col_sample(feature_set_tree_,\n self.colsample_bytree_)\n\n def col_sample(self, p_features, colsample):\n if colsample == 1.0:\n return p_features\n features = p_features\n assert len(features) > 0\n n = np.maximum(1, np.floor(colsample * len(features)))\n fweights = self.feature_weights_\n if len(fweights) != 0:\n new_features = weighted_sampling_without_replacement(self.rng_,\n p_features,\n fweights,\n n)\n else:\n new_features = features.copy()\n new_features = shuffle_std(new_features, self.rng_)\n resize(new_features, n)\n return sorted(new_features)\n\n def get_feature_set(self, depth):\n if self.colsample_bylevel_ == 1.0 and self.colsample_bynode_ == 1.0:\n return self.feature_set_tree_\n if depth not in self.feature_set_level_.keys():\n self.feature_set_level_[depth] = self.col_sample(\n self.feature_set_tree_, self.colsample_bylevel_)\n if self.colsample_bynode_ == 1.0:\n return self.feature_set_level_[depth]\n return self.col_sample(self.feature_set_level_[depth],\n self.colsample_bynode_)\n\n\ndef weighted_sampling_without_replacement(rng, feat, weights, n):\n assert len(feat) == len(weights)\n keys = [None] * len(weights)\n for i in range(len(feat)):\n w = np.maximum(weights[i], kRtEps)\n u = uniform_real_distribution(rng)\n k = np.log(u) / w\n keys[i] = k\n ind = list(np.argsort(keys))\n ind.reverse()\n resize(ind, n)\n return [feat[i] for i in ind]\n\n\ndef uniform_real_distribution(rng, param=(0, 2147483647), num_bit=24):\n b = np.minimum(24, num_bit)\n r = 2 ** 32\n log2r = int(np.log(r) / np.log(2))\n k = np.maximum(1, int((b + log2r - 1) / log2r))\n summ = 0\n tmpp = 1\n while k > 0:\n summ += (rng.randint(2 ** 32)) * tmpp\n tmpp *= r\n k -= 1\n return (summ / tmpp) * (param[1] - param[0]) + param[0]\n\n\ndef uniform_int_distribution(rng, min_value=0, max_value=2147483647):\n \"\"\"\n https://gcc.gnu.org/onlinedocs/libstdc++/libstdc++-api-4.5/a00987_source.html\n :param max_value:\n :param min_value:\n :param_ rng:\n :return:\n \"\"\"\n rng = check_random_state(rng)\n urange = max_value - min_value\n urnrange = 2 ** 32 - 1\n tmp = 0\n ret = 2 ** 34\n if urnrange > urange:\n uerange = urange + 1\n scaling = int(urnrange / uerange)\n past = uerange * scaling\n while ret >= past:\n ret = rng.randint(2 ** 32)\n ret = int(ret / scaling)\n elif urnrange < urange:\n while ret > urange or ret < tmp:\n uerngrange = urnrange + 1\n new_max = urange / uerngrange\n tmp = uerngrange * uniform_int_distribution(rng, 0, new_max)\n ret - tmp + rng.randint(2 ** 32)\n else:\n ret = rng.randint(2 ** 32)\n return ret + min_value\n\n\ndef shuffle_std(data, rng, crange=(0, 0)):\n first = crange[0]\n if crange[1] == 0:\n last = len(data) - 1\n else:\n last = crange[1]\n urnrange = 2 ** 32 - 1\n urange = last - first + 1\n if (urnrange / urange) >= urange:\n i = first + 1\n if urange % 2 == 0:\n i_rnd = first + uniform_int_distribution(rng, 0, 1)\n swap(data, i, i_rnd)\n i += 1\n while i <= last:\n swap_range = i - first + 1\n a1, a2 = gen_two_uniform_ints(rng, swap_range, swap_range + 1)\n swap(data, i, first + a1)\n i += 1\n swap(data, i, first + a2)\n i += 1\n else:\n for i in range(first + 1, last):\n i_rnd = uniform_int_distribution(rng, 0, i - first)\n swap(data, i, first + i_rnd)\n return data\n\n\ndef swap(data, i1, i2):\n tmp = data[i1]\n data[i1] = data[i2]\n data[i2] = tmp\n\n\ndef gen_two_uniform_ints(rng, a, b):\n x = uniform_int_distribution(rng, 0, int(a * b - 1))\n return x // b, x % b\n\n\nif __name__ == '__main__':\n nn1 = np.random.RandomState(0)\n ppp = []\n for _ in range(5):\n ppp.append(uniform_real_distribution(nn1, (0, 5)))\n print(ppp)\n\n print('numpy')\n nn1 = np.random.RandomState(0)\n dd = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]\n nn1.shuffle(dd)\n print(dd)\n\n # same as std::shuffle\n nn1 = np.random.RandomState(0)\n print(shuffle_std([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], nn1))\n","sub_path":"utils/random_sampler.py","file_name":"random_sampler.py","file_ext":"py","file_size_in_byte":5609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"77040713","text":"#Max Flow and Shortest Path Algorithms\n#Homework No.3 @gla-gzz\n\nfrom grafo import Grafo, get_stats, plot_stats\nG = Grafo()\nsizes = [20, 40, 60, 80, 100, 120, 140, 160, 180, 200, 220, 240, 260, 280, 300]\niterations = 10\ntypes = ['S', 'SP', 'DS', 'DP']\ntext = \"tiempos.dat\"\nG.loppy_loop(text, iterations, sizes) \nshort, maxflow = get_stats(text)\n#plot_stats(sizes, short, \"S_script\")\nplot_stats(sizes, maxflow, \"M_script\")\n\n","sub_path":"Homework3/NF_3.py","file_name":"NF_3.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"180399776","text":"'''\nAutor: Martin Schinnerl\nDatum: 25.09.2016\n\nModulbeschreibung:\nDas Modul seitenansicht_roboter enthält die Klasse SeitenansichtRoboter. \nEine Animation ermöglicht die Verschiebung von Punkt4 in zwei Richtungen\nsowie die Rotation von Punkt5 um Punkt4. Die Klasse wird von QObject \nabgeleitet. Das Signal xy_neu dient dem Übermitteln der veränderten \nMittelpunktkoordinaten. Weiter wird das Klassenattribut lock definiert. \nIn der Methode __init__ werden die Attribute und Methoden der Klasse QObject \nvererbt, die Zeichenfläche als Parent-Objekt zugewiesen, die Zeichenwinkel \nder Circle-Objekte berechnet, die Grenzen der Bewegung festgelegt und die \nCircle-Objekte sowie Line2D-Objekte instanziiert. Weitere Methoden der \nKlasse sind connect, on_press, on_motion, on_release, ansicht_aktualisieren, \ngeisterstunde, modellrechnung und punkte_faerben.\n'''\n\n'Module importieren'\nfrom berechnung.winkelnormierung import winkel_normieren\nfrom berechnung.winkelberechnung import winkel_berechnen\nfrom math import acos, cos, pi, sin\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Circle\nfrom PyQt4.QtCore import QObject, Signal\n\n'Klasse SeitenansichtRoboter'\nclass SeitenansichtRoboter(QObject):\n \n 'Signal definieren'\n xy_neu = Signal(float, float, float, float, float, float, float, float)\n \n 'Klassenattribut lock definieren'\n lock = None\n \n 'Methode __init__'\n def __init__(self, parent, xP1, yP1, xP2, yP2, xP3, yP3, \\\n xP4, yP4, xP5, yP5):\n \n 'Vererbung aller Attribute und Methoden von QObject'\n super(SeitenansichtRoboter, self).__init__(parent)\n \n 'Parentobjekt - QPlotWidget'\n self.parent = parent\n \n 'Kartesische Koordinaten'\n self.xP1 = xP1\n self.yP1 = yP1\n \n 'Längen berechnen'\n self.a2 = ((xP2 - xP1)**2 + (yP2 - yP1)**2)**(1/2)\n self.a3 = ((xP4 - xP2)**2 + (yP4 - yP2)**2)**(1/2)\n \n 'Zeichenwinkel berechnen'\n self.modellrechnung(xP2, yP2, xP3, yP3, xP4, yP4, xP5, yP5)\n \n 'Kleinsten x-Wert und y-Wert festlegen'\n self.x_min = 0.001\n self.y_min = 45\n \n 'Grenzwinkel festlegen'\n self.theta2_min = 0\n self.theta2_max = pi\n \n self.theta3_min = -(pi - 8*pi/180)\n self.theta3_max = 0\n \n self.theta4_min = -8*pi/180\n self.theta4_max = pi - 8*pi/180\n \n 'Farben der Punkte und Linien festlegen'\n self.farbe_allgemein = 'black'\n self.farbe_punkt = '#5c061c'\n self.alpha_allgemein = 0.5\n self.alpha_geist = 0.25\n\n 'Punktradius, Linienstärke und Linienende festlegen'\n self.radius_punkt = 55\n self.staerke_linie = 20\n self.ende_linie = 'round'\n \n 'Circle-Objekte instanziieren'\n \n 'Punkt1 instanziieren'\n #Fixpunkt im Ursprung des Koordinatensystems\n self.point1 = Circle((self.xP1, self.yP1))\n self.point1.set_radius(self.radius_punkt)\n self.point1.set_facecolor(self.farbe_allgemein)\n self.point1.set_alpha(self.alpha_allgemein)\n self.parent.axes.add_patch(self.point1)\n \n 'Punkt2 instanziieren'\n self.point2 = Circle((self.xP2, self.yP2))\n self.point2.set_radius(self.radius_punkt)\n self.point2.set_facecolor(self.farbe_allgemein)\n self.point2.set_alpha(self.alpha_allgemein)\n self.parent.axes.add_patch(self.point2)\n \n 'Punkt3 instanziieren'\n self.point3 = Circle((self.xP3, self.yP3))\n self.point3.set_radius(self.staerke_linie)\n self.point3.set_facecolor(self.farbe_allgemein)\n self.point3.set_alpha(self.alpha_allgemein)\n self.parent.axes.add_patch(self.point3)\n self.point3.set_visible(False)\n \n 'Punkt4 instanziieren'\n self.point4 = Circle((self.xP4, self.yP4))\n self.point4.set_radius(self.radius_punkt)\n self.point4.set_facecolor(self.farbe_allgemein)\n self.point4.set_alpha(self.alpha_allgemein)\n self.parent.axes.add_patch(self.point4)\n \n 'Punkt5 instanziieren'\n self.point5 = Circle((self.xP5, self.yP5))\n self.point5.set_radius(self.radius_punkt)\n self.point5.set_facecolor(self.farbe_allgemein)\n self.point5.set_alpha(self.alpha_allgemein)\n self.parent.axes.add_patch(self.point5)\n \n self.point = self.point4\n \n 'Line2D-Objekte instanziieren'\n \n 'Endpunktkoordinaten von Linie1'\n #Linie1 - verbindet Punkt1 mit Punkt2\n self.xL1 = (self.point1.center[0], self.point2.center[0])\n self.yL1 = (self.point1.center[1], self.point2.center[1])\n #Länge der Linie berechnen\n self.l1 = ((self.xL1[1] - self.xL1[0])**2 + (self.yL1[1] - \\\n self.yL1[0])**2)**(1/2)\n \n 'Linie1 instanziieren'\n self.line1 = Line2D(self.xL1, self.yL1)\n self.line1.set_linewidth(self.staerke_linie)\n self.line1.set_color(self.farbe_allgemein)\n self.line1.set_alpha(self.alpha_allgemein)\n self.line1.set_solid_capstyle(self.ende_linie)\n self.parent.axes.add_line(self.line1)\n \n 'Endpunktkoordinaten von Linie2'\n #Linie2 - verbindet Punkt2 mit Punkt3\n self.xL2 = (self.point2.center[0], self.point3.center[0])\n self.yL2 = (self.point2.center[1], self.point3.center[1])\n #Länge der Linie berechnen\n self.l2 = ((self.xL2[1] - self.xL2[0])**2 + (self.yL2[1] - \\\n self.yL2[0])**2)**(1/2)\n \n 'Linie2 instanziieren'\n self.line2 = Line2D(self.xL2, self.yL2)\n self.line2.set_linewidth(self.staerke_linie)\n self.line2.set_color(self.farbe_allgemein)\n self.line2.set_alpha(self.alpha_allgemein)\n self.line2.set_solid_capstyle(self.ende_linie)\n self.parent.axes.add_line(self.line2)\n \n 'Endpunktkoordinaten von Linie3'\n #Linie3 - verbindet Punkt3 mit Punkt4\n self.xL3 = (self.point3.center[0], self.point4.center[0])\n self.yL3 = (self.point3.center[1], self.point4.center[1])\n #Länge der Linie berechnen\n self.l3 = ((self.xL3[1] - self.xL3[0])**2 + (self.yL3[1] - \\\n self.yL3[0])**2)**(1/2)\n \n 'Linie3 instanziieren'\n self.line3 = Line2D(self.xL3, self.yL3)\n self.line3.set_linewidth(self.staerke_linie)\n self.line3.set_color(self.farbe_allgemein)\n self.line3.set_alpha(self.alpha_allgemein)\n self.line3.set_solid_capstyle(self.ende_linie)\n self.parent.axes.add_line(self.line3)\n \n 'Endpunktkoordinaten von Linie4'\n #Linie3 - verbindet Punkt4 mit Punkt5\n self.xL4 = (self.point4.center[0], self.point5.center[0])\n self.yL4 = (self.point4.center[1], self.point5.center[1])\n #Länge der Linie berechnen\n self.l4 = ((self.xL4[1] - self.xL4[0])**2 + (self.yL4[1] - \\\n self.yL4[0])**2)**(1/2)\n \n 'Linie4 instanziieren'\n self.line4 = Line2D(self.xL4, self.yL4)\n self.line4.set_linewidth(self.staerke_linie)\n self.line4.set_color(self.farbe_allgemein)\n self.line4.set_alpha(self.alpha_allgemein)\n self.line4.set_solid_capstyle(self.ende_linie)\n self.parent.axes.add_line(self.line4)\n \n 'Geisterpunktobjekte instanziieren'\n \n 'Geisterpunkt2 instanziieren'\n self.pointG2 = Circle((self.xP2, self.yP2))\n self.pointG2.set_radius(self.radius_punkt)\n self.pointG2.set_facecolor(self.farbe_allgemein)\n self.pointG2.set_alpha(self.alpha_geist)\n self.parent.axes.add_patch(self.pointG2)\n self.pointG2.set_visible(False)\n \n 'Geisterpunkt3 instanziieren'\n self.pointG3 = Circle((self.xP3, self.yP3))\n self.pointG3.set_radius(self.staerke_linie)\n self.pointG3.set_facecolor(self.farbe_allgemein)\n self.pointG3.set_alpha(self.alpha_geist)\n self.parent.axes.add_patch(self.pointG3)\n self.pointG3.set_visible(False)\n \n 'Geisterpunkt4 instanziieren'\n self.pointG4 = Circle((self.xP4, self.yP4))\n self.pointG4.set_radius(self.radius_punkt)\n self.pointG4.set_facecolor(self.farbe_punkt)\n self.pointG4.set_alpha(self.alpha_geist)\n self.parent.axes.add_patch(self.pointG4)\n self.pointG4.set_visible(False)\n \n 'Geisterpunkt5 instanziieren'\n self.pointG5 = Circle((self.xP5, self.yP5))\n self.pointG5.set_radius(self.radius_punkt)\n self.pointG5.set_facecolor(self.farbe_punkt)\n self.pointG5.set_alpha(self.alpha_geist)\n self.parent.axes.add_patch(self.pointG5)\n self.pointG5.set_visible(False)\n \n 'Geisterlinienobjekte instanziieren'\n \n 'Geisterlinie1 instanziieren'\n self.lineG1 = Line2D(self.xL1, self.yL1)\n self.lineG1.set_linewidth(self.staerke_linie)\n self.lineG1.set_color(self.farbe_allgemein)\n self.lineG1.set_alpha(self.alpha_geist)\n self.lineG1.set_solid_capstyle(self.ende_linie)\n self.parent.axes.add_line(self.lineG1)\n self.lineG1.set_visible(False)\n \n 'Geisterlinie2 instanziieren'\n self.lineG2 = Line2D(self.xL2, self.yL2)\n self.lineG2.set_linewidth(self.staerke_linie)\n self.lineG2.set_color(self.farbe_allgemein)\n self.lineG2.set_alpha(self.alpha_geist)\n self.lineG2.set_solid_capstyle(self.ende_linie)\n self.parent.axes.add_line(self.lineG2)\n self.lineG2.set_visible(False)\n \n 'Geisterlinie3 instanziieren'\n self.lineG3 = Line2D(self.xL3, self.yL3)\n self.lineG3.set_linewidth(self.staerke_linie)\n self.lineG3.set_color(self.farbe_allgemein)\n self.lineG3.set_alpha(self.alpha_geist)\n self.lineG3.set_solid_capstyle(self.ende_linie)\n self.parent.axes.add_line(self.lineG3)\n self.lineG3.set_visible(False)\n \n 'Geisterlinie4 instanziieren'\n self.lineG4 = Line2D(self.xL4, self.yL4)\n self.lineG4.set_linewidth(self.staerke_linie)\n self.lineG4.set_color(self.farbe_allgemein)\n self.lineG4.set_alpha(self.alpha_geist)\n self.lineG4.set_solid_capstyle(self.ende_linie)\n self.parent.axes.add_line(self.lineG4)\n self.lineG4.set_visible(False)\n \n 'Attribut self.press'\n self.press = None\n \n 'Attribut self.background'\n self.background = None\n \n 'Aufruf der Methode connect'\n self.connect()\n\n '''Methode connect - Die Methode dient dem Verbinden der Events mit den\n entsprechenden Methoden. Beim Drücken der linken Maustaste, dem button_\n press_event, wird die Methode on_press ausgeführt. Weiter führt das \n Loslassen der linken Maustaste, dem button_release_event, zum Ausführen \n der Methode on_release. Zuletzt wird bei der Bewegung der Maus, dem \n motion_notify_event, die Methode on_motion aufgerufen. Vorraussetzung \n für ein Event ist das Drücken oder Loslassen der linken Maustaste auf \n oder das Bewegen der Maus über die Zeichenfläche.'''\n def connect(self):\n \n 'Verbindet das button_press_event mit der Methode on_press'\n self.cidpress = self.point.figure.canvas.mpl_connect(\n 'button_press_event', self.on_press)\n \n 'Verbindet das button_release_event mit der Methode on_release'\n self.cidrelease = self.point.figure.canvas.mpl_connect(\n 'button_release_event', self.on_release)\n \n 'Verbindet das motion_notify_event mit der Methode on_motion'\n self.cidmotion = self.point.figure.canvas.mpl_connect(\n 'motion_notify_event', self.on_motion)\n \n '''Methode on_press - Die Methode on_press wird beim Drücken der linken\n Maustaste auf der Zeichenfläche ausgeführt.'''\n def on_press(self, event):\n \n '''Die Methode wird weiter ausgeführt wenn der Mauszeiger \n zum Zeitpunkt des Drückens der linken Maustaste innerhalb des \n Koordinatensystems liegt.'''\n if event.inaxes != self.point.axes: return\n \n '''Die Methode wird weiter ausgeführt wenn vor dem Drücken der \n linken Maustaste kein Circle-Objekt ausgewählt ist.'''\n if SeitenansichtRoboter.lock is not None: return \n \n 'Fallunterscheidung - Punkt4 oder Punkt5'\n if self.point4.contains(event)[0] == True:\n self.point = self.point4\n \n elif self.point5.contains(event)[0] == True:\n self.point = self.point5\n \n contains, attrd = self.point.contains(event)\n \n '''Die Methode wird weiter ausgeführt wenn der Mauszeiger zum\n Zeitpunkt des Drückens der linken Maustaste auf dem bewegbaren\n Circle-Objekt liegt.'''\n if not contains: return\n \n '''Den Mittelpunkt des Circle-Objektes und die x- und y-Koordinaten\n des Mauszeigers zum Zeitpunkt des Drückens der linken Maustaste\n speichern.'''\n self.press = self.point.center, event.xdata, event.ydata\n \n 'Das Klassenattribut mit dem Wert self belegen.'\n SeitenansichtRoboter.lock = self\n \n 'Sichtbarkeit des Geistes ändern'\n self.pointG2.set_visible(True)\n self.pointG4.set_visible(True)\n self.pointG5.set_visible(True)\n self.lineG1.set_visible(True)\n self.lineG2.set_visible(True)\n self.lineG3.set_visible(True)\n self.lineG4.set_visible(True)\n \n 'Methoden der Animation-Blit-Technik ausführen'\n canvas = self.point.figure.canvas\n axes = self.point.axes\n self.point2.set_animated(True)\n self.point3.set_animated(True)\n self.point4.set_animated(True)\n self.point5.set_animated(True)\n self.line1.set_animated(True)\n self.line2.set_animated(True)\n self.line3.set_animated(True)\n self.line4.set_animated(True)\n canvas.draw()\n self.background = canvas.copy_from_bbox(self.point.axes.bbox)\n axes.draw_artist(self.point2)\n axes.draw_artist(self.point3)\n axes.draw_artist(self.point4)\n axes.draw_artist(self.point5)\n axes.draw_artist(self.line1)\n axes.draw_artist(self.line2)\n axes.draw_artist(self.line3)\n axes.draw_artist(self.line4)\n canvas.blit(axes.bbox)\n \n '''Methode on_motion - Die Methode on_motion wird beim Bewegen des\n Mauszeigers über die Zeichenfläche ausgeführt.'''\n def on_motion(self, event):\n \n '''Die Methode wird weiter ausgeführt wenn das Klassenattribut \n mit dem Wert self belegt ist. Das Circle-Objekt darf ohne angeklickt \n zu sein nicht bewegt werden.'''\n if SeitenansichtRoboter.lock is not self: return\n \n '''Die Methode wird weiter ausgeführt wenn der Mauszeiger \n innerhalb des Koordinatensystems bewegt wird.'''\n if event.inaxes != self.point.axes: return\n \n '''Den Mittelpunkt des Circle-Objektes und die x- und y-Koordinaten\n des Mauszeigers zum Zeitpunkt des Drückens der linken Maustaste\n zuweisen.'''\n self.point.center, xpress, ypress = self.press \n \n 'Fallunterscheidung - Punkt4 oder Punkt5'\n if self.point == self.point4:\n \n 'Verschiebung von Punkt4 in x-, und y-Richtung'\n dx = event.xdata - xpress\n dy = event.ydata - ypress\n \n 'neue Koordinaten berechnen'\n x_neu = self.point.center[0] + dx\n y_neu = self.point.center[1] + dy\n \n 'Begrenzung auf [x_min,...[ und [y_min...['\n if x_neu < self.x_min and dx < 0:\n x_neu = self.x_min\n if y_neu < self.y_min and dy < 0:\n y_neu = self.y_min\n \n 'Länge des Ortsvektors zu Punkt4 berechnen'\n self.r4 = (x_neu**2 + y_neu**2)**(1/2)\n \n 'Hilfswinkel berechnen - Cosinussatz'\n b = (self.a3**2 - self.l1**2 - self.r4**2)/(-2*self.l1*self.r4)\n if b >= 1: b = 1\n elif b <= -1: b = -1\n beta1 = acos(b)\n beta2 = acos(x_neu/self.r4)\n \n 'Winkel von Punkt2 berechnen'\n self.phi2 = beta1 + beta2\n \n 'Winkel auf [0, 2*pi] normieren um Rundungsfehler auszugleichen'\n self.phi2 = winkel_normieren(self.phi2)\n \n 'Winkelgrenzen zuweisen'\n phi2_min = self.theta2_min\n phi2_max = self.theta2_max\n \n 'Winkelbegrenzung [phi_min, phi_max]'\n if self.phi2 < phi2_min:\n self.phi2 = phi2_min\n elif self.phi2 > phi2_max:\n self.phi2 = phi2_max\n \n 'Theta2 (Denavit-Hartenberg-Parameter) aktualisieren'\n self.theta2 = self.phi2\n \n 'Hilfswinkel berechnen - Cosinussatz'\n b = (self.r4**2 - self.l1**2 - self.a3**2)/(-2*self.l1*self.a3)\n if b >= 1: b = 1\n elif b <= -1: b = -1\n beta3 = acos(b)\n \n 'Winkelgrenzen zuweisen'\n beta3_min = self.theta3_min + pi\n beta3_max = self.theta3_max + pi\n \n 'Winkelbegrenzung [phi_min, phi_max]'\n if beta3 < beta3_min:\n beta3 = beta3_min\n elif beta3 > beta3_max:\n beta3 = beta3_max\n \n 'Winkel von Punkt4 berechnen'\n self.phi4 = beta3 - (pi - self.phi2)\n \n 'Theta3 (Denavit-Hartenberg-Parameter) berechnen'\n self.theta3 = -(pi - beta3)\n \n 'Winkel von Punkt3 berechnen'\n self.phi3 = self.phi4 + pi/4 - 8*pi/180\n \n 'Winkel auf [0, 2*pi] normieren um Rundungsfehler auszugleichen'\n self.phi3 = winkel_normieren(self.phi3)\n \n 'Mittelpunktkoordinaten der Circle-Objekte aktualisieren'\n \n 'Koordinaten von Punkt2'\n self.xP2 = self.l1*cos(self.phi2)\n self.yP2 = self.l1*sin(self.phi2)\n 'Koordinaten akualisieren'\n self.point2.center = ((self.xP2, self.yP2))\n \n 'Koordinaten von Punkt3'\n self.xP3 = self.point2.center[0] + self.l2*cos(self.phi3)\n self.yP3 = self.point2.center[1] + self.l2*sin(self.phi3)\n 'Koordinaten akualisieren'\n self.point3.center = ((self.xP3, self.yP3))\n \n 'Koordinaten von Punkt4'\n self.xP4 = self.point2.center[0] + self.a3*cos(self.phi4)\n self.yP4 = self.point2.center[1] + self.a3*sin(self.phi4)\n 'Koordinaten akualisieren'\n self.point4.center = ((self.xP4, self.yP4))\n \n elif self.point == self.point5:\n \n 'relative Verschiebung'\n dx = self.point5.center[0] - self.point4.center[0]\n dy = self.point5.center[1] - self.point4.center[1]\n \n 'Winkel berechnen - Kreismittelpunkt'\n phi = winkel_berechnen(dx, dy)\n \n 'relative Verschiebung'\n dx_p = xpress - self.point4.center[0]\n dy_p = ypress - self.point4.center[1]\n \n 'Winkel berechnen - Mauszeiger beim Anklicken des Punktes'\n phi_p = winkel_berechnen(dx_p, dy_p)\n \n 'relative Verschiebung'\n dx_e = event.xdata - self.point4.center[0]\n dy_e = event.ydata - self.point4.center[1] \n \n 'Winkel berechnen - Mauszeiger bei der Bewegung'\n phi_e = winkel_berechnen(dx_e, dy_e)\n \n 'Winkeländerung berechnen'\n dphi = phi_e - phi_p\n \n 'neuen Positionswinkel berechnen'\n phi_neu = phi + dphi \n \n 'Winkel auf [0, 2*pi] normieren um Rundungsfehler auszugleichen'\n phi_neu = winkel_normieren(phi_neu)\n \n 'Winkel von Punkt5 berechnen'\n self.phi5 = phi_neu\n \n 'Winkelgrenzen - senkrechter Anschlag'\n if self.phi5 > pi/2 and self.phi5 < pi:\n self.phi5 = pi/2\n elif self.phi5 >= pi and self.phi5 < 3*pi/2:\n self.phi5 = 3*pi/2\n \n 'Winkelgrenzen - rechtwinkeliger Anschlag'\n phi5_min = self.phi4 - 8*pi/180 + 3*pi/2\n phi5_max = self.phi4 - 8*pi/180 + pi/2\n \n 'Winkelbegrenzung [phi_min, phi_max]'\n if self.phi5 >= pi and self.phi5 < phi5_min:\n self.phi5 = phi5_min\n elif self.phi5 > phi5_max and self.phi5 < pi:\n self.phi5 = phi5_max\n \n 'Theta4 (Denavit-Hartenberg-Parameter) aktualisieren'\n if self.phi5 >= 0 and self.phi5 <= pi/2:\n self.theta4 = pi/2 - self.phi4 + self.phi5\n elif self.phi5 >= 3*pi/2 and self.phi5 < 2*pi:\n self.theta4 = pi/2 - self.phi4 + self.phi5 - 2*pi\n \n 'Koordinaten von Punkt5'\n self.xP5 = self.point4.center[0] + self.l4*cos(self.phi5)\n self.yP5 = self.point4.center[1] + self.l4*sin(self.phi5)\n 'Koordinaten akualisieren'\n self.point5.center = ((self.xP5, self.yP5))\n \n 'Endpunktkoordinaten der Line2D-Objekte aktualisieren'\n \n 'Koordinaten von Linie1'\n self.xL1 = (self.point1.center[0], self.point2.center[0])\n self.yL1 = (self.point1.center[1], self.point2.center[1])\n 'Koordinaten akualisieren'\n self.line1.set_data(self.xL1, self.yL1)\n \n 'Koordinaten von Linie2'\n self.xL2 = (self.point2.center[0], self.point3.center[0])\n self.yL2 = (self.point2.center[1], self.point3.center[1])\n 'Koordinaten akualisieren'\n self.line2.set_data(self.xL2, self.yL2)\n \n 'Koordinaten von Linie3'\n self.xL3 = (self.point3.center[0], self.point4.center[0])\n self.yL3 = (self.point3.center[1], self.point4.center[1])\n 'Koordinaten akualisieren'\n self.line3.set_data(self.xL3, self.yL3)\n \n 'Koordinaten von Linie4'\n self.xL4 = (self.point4.center[0], self.point5.center[0])\n self.yL4 = (self.point4.center[1], self.point5.center[1])\n 'Koordinaten akualisieren'\n self.line4.set_data(self.xL4, self.yL4)\n \n 'Methoden der Animation-Blit-Technik ausführen'\n canvas = self.point.figure.canvas\n axes = self.point.axes\n canvas.restore_region(self.background)\n axes.draw_artist(self.point2)\n axes.draw_artist(self.point3)\n axes.draw_artist(self.point4)\n axes.draw_artist(self.point5)\n axes.draw_artist(self.line1)\n axes.draw_artist(self.line2)\n axes.draw_artist(self.line3)\n axes.draw_artist(self.line4)\n canvas.blit(axes.bbox)\n \n 'Signal mit den neuen Koordinaten senden'\n self.xy_neu.emit(self.xP2, self.yP2, self.xP3, self.yP3, \\\n self.xP4, self.yP4, self.xP5, self.yP5)\n \n '''Methode on_release - Die Methode on_release wird beim Loslassen\n der linken Maustaste auf der Zeichenfläche ausgeführt.'''\n def on_release(self, event):\n \n '''Die Methode wird weiter ausgeführt wenn das Klassenattribut \n mit dem Wert self belegt ist.'''\n if SeitenansichtRoboter.lock is not self: return\n \n 'Werte der Attribute zurücksetzen'\n self.press = None\n SeitenansichtRoboter.lock = None\n \n 'Attribute auf False setzen'\n self.point2.set_animated(False)\n self.point3.set_animated(False)\n self.point4.set_animated(False)\n self.point5.set_animated(False)\n self.line1.set_animated(False)\n self.line2.set_animated(False)\n self.line3.set_animated(False)\n self.line4.set_animated(False)\n \n 'Hintergrund zurücksetzen'\n self.background = None\n\n 'Das gesamte Bild neu zeichnen'\n self.point.figure.canvas.draw()\n \n '''Methode ansicht_aktualisieren - Die Methode ermöglicht das\n Aktualisieren der Ansicht.'''\n def ansicht_aktualisieren(self, xP2, yP2, xP3, yP3, xP4, yP4, xP5, yP5):\n \n 'Zeichenwinkel berechnen'\n self.modellrechnung(xP2, yP2, xP3, yP3, xP4, yP4, xP5, yP5)\n \n 'Mittelpunktkoordinaten der Circle-Objekte aktualisieren'\n \n 'Koordinaten von Punkt2 aktualisieren'\n self.point2.center = (self.xP2, self.yP2)\n \n 'Koordinaten von Punkt3 aktualisieren'\n self.point3.center = (self.xP3, self.yP3)\n \n 'Koordinaten von Punkt4 aktualisieren'\n self.point4.center = (self.xP4, self.yP4)\n \n 'Koordinaten von Punkt5 aktualisieren'\n self.point5.center = (self.xP5, self.yP5)\n \n 'Endpunktkoordinaten der Line2D-Objekte aktualisieren'\n \n 'Koordinaten von Linie1'\n self.xL1 = (self.point1.center[0], self.point2.center[0])\n self.yL1 = (self.point1.center[1], self.point2.center[1])\n 'Koordinaten aktualisieren'\n self.line1.set_data(self.xL1, self.yL1)\n \n 'Koordinaten von Linie2'\n self.xL2 = (self.point2.center[0], self.point3.center[0])\n self.yL2 = (self.point2.center[1], self.point3.center[1])\n 'Koordinaten aktualisieren'\n self.line2.set_data(self.xL2, self.yL2)\n \n 'Koordinaten von Linie3'\n self.xL3 = (self.point3.center[0], self.point4.center[0])\n self.yL3 = (self.point3.center[1], self.point4.center[1])\n 'Koordinaten aktualisieren'\n self.line3.set_data(self.xL3, self.yL3)\n \n 'Koordinaten von Linie4'\n self.xL4 = (self.point4.center[0], self.point5.center[0])\n self.yL4 = (self.point4.center[1], self.point5.center[1])\n 'Koordinaten aktualisieren'\n self.line4.set_data(self.xL4, self.yL4)\n \n 'Das gesamte Bild neu zeichnen'\n self.point.figure.canvas.draw()\n \n '''Methode geisterstunde - Die Methode dient dem Ein- oder Ausblenden\n des Geistes. Weiter werden die Koordinaten aktualisiert.'''\n def geisterstunde(self, b):\n \n 'Mittelpunktkoordinaten der Circle-Objekte aktualisieren'\n \n 'Koordinaten von Geisterpunkt2'\n xGp2 = self.point2.center[0]\n yGp2 = self.point2.center[1]\n 'Koordinaten aktualisieren'\n self.pointG2.center = (xGp2, yGp2)\n \n 'Koordinaten von Geisterpunkt3'\n xGp3 = self.point3.center[0]\n yGp3 = self.point3.center[1]\n 'Koordinaten aktualisieren'\n self.pointG3.center = (xGp3, yGp3)\n \n 'Koordinaten von Geisterpunkt4'\n xGp4 = self.point4.center[0]\n yGp4 = self.point4.center[1]\n 'Koordinaten aktualisieren'\n self.pointG4.center = (xGp4, yGp4)\n \n 'Koordinaten von Geisterpunkt5'\n xGp5 = self.point5.center[0]\n yGp5 = self.point5.center[1]\n self.pointG5.center = (xGp5, yGp5)\n \n 'Endpunktkoordinaten der Line2D-Objekte aktualisieren'\n \n 'Koordinaten von Geisterlinie1'\n xGl1 = (self.point1.center[0], self.point2.center[0])\n yGl1 = (self.point1.center[1], self.point2.center[1])\n 'Koordinaten akualisieren'\n self.lineG1.set_data(xGl1, yGl1)\n \n 'Koordinaten von Geisterlinie2'\n xGl2 = (self.point2.center[0], self.point3.center[0])\n yGl2 = (self.point2.center[1], self.point3.center[1])\n 'Koordinaten akualisieren'\n self.lineG2.set_data(xGl2, yGl2)\n \n 'Koordinaten von Geisterlinie3'\n xGl3 = (self.point3.center[0], self.point4.center[0])\n yGl3 = (self.point3.center[1], self.point4.center[1])\n 'Koordinaten akualisieren'\n self.lineG3.set_data(xGl3, yGl3)\n \n 'Koordinaten von Geisterlinie4'\n xGl4 = (self.point4.center[0], self.point5.center[0])\n yGl4 = (self.point4.center[1], self.point5.center[1])\n 'Koordinaten akualisieren'\n self.lineG4.set_data(xGl4, yGl4)\n \n 'Sichtbarkeit des Geistes ändern'\n #Sichtbarkeit ändern - True oder False\n self.pointG2.set_visible(b)\n self.pointG4.set_visible(b)\n self.pointG5.set_visible(b)\n self.lineG1.set_visible(b)\n self.lineG2.set_visible(b)\n self.lineG3.set_visible(b)\n self.lineG4.set_visible(b)\n \n 'Das gesamte Bild neu zeichnen'\n self.point.figure.canvas.draw()\n \n '''Methode modellrechnung - Die Punkte werden als Vektorzug \n gezeichnet. Die Methode berechnet die zum Zeichnen notwendigen \n Winkel der vier Punkte.'''\n def modellrechnung(self, xP2, yP2, xP3, yP3, xP4, yP4, xP5, yP5):\n \n 'Kartesische Koordinaten'\n \n 'Punkt2, Punkt3, Punkt4 und Punkt5'\n self.xP2 = xP2\n self.yP2 = yP2\n self.xP3 = xP3\n self.yP3 = yP3\n self.xP4 = xP4\n self.yP4 = yP4\n self.xP5 = xP5\n self.yP5 = yP5\n\n 'Länge des Ortsvektors zu Punkt4 berechnen'\n self.r4 = (self.xP4**2 + self.yP4**2)**(1/2)\n \n 'Hilfswinkel berechnen - Cosinussatz'\n b = (self.a3**2 - self.a2**2 - self.r4**2)/(-2*self.a2*self.r4) \n if b >= 1: b = 1\n elif b <= -1: b = -1\n beta1 = acos(b)\n beta2 = acos(self.xP4/self.r4)\n \n 'Winkel von Punkt2 berechnen'\n self.phi2 = beta1 + beta2\n \n 'Winkel auf [0, 2*pi] normieren um Rundungsfehler auszugleichen'\n self.phi2 = winkel_normieren(self.phi2)\n\n 'Theta2 (Denavit-Hartenberg-Parameter) berechnen'\n self.theta2 = self.phi2\n \n 'Hilfswinkel berechnen - Cosinussatz'\n b = (self.r4**2 - self.a2**2 - self.a3**2)/(-2*self.a2*self.a3)\n if b >= 1: b = 1\n elif b <= -1: b = -1\n beta3 = acos(b)\n \n 'Winkel von Punkt4 berechnen'\n self.phi4 = beta3 - (pi - self.phi2)\n \n 'Theta3 (Denavit-Hartenberg-Parameter) berechnen'\n self.theta3 = -(pi - beta3)\n \n 'Winkel von Punkt3 berechnen'\n self.phi3 = self.phi4 + pi/4 - 8*pi/180\n \n 'Winkel auf [0, 2*pi] normieren um Rundungsfehler auszugleichen'\n self.phi3 = winkel_normieren(self.phi3)\n \n 'Verschiebung von Punkt5 relativ zu Punkt4'\n dx = self.xP5 - self.xP4\n dy = self.yP5 - self.yP4\n \n 'Winkel von Punkt5 berechnen'\n self.phi5 = winkel_berechnen(dx, dy)\n \n 'Winkel normieren'\n self.phi5 = winkel_normieren(self.phi5)\n \n 'Theta4 (Denavit-Hartenberg-Parameter) berechnen'\n if self.phi5 >= 0 and self.phi5 <= pi/2:\n self.theta4 = self.phi5 + pi/2 - 8*pi/180\n elif self.phi5 >= 3*pi/2 and self.phi5 < 2*pi:\n self.theta4 = pi/2 - 8*pi/180 - 2*pi + self.phi5\n \n '''Methode punkte_faerben - Die Methode färbt oder entfärbt die\n bewegbaren Circle-Objekte.'''\n def punkte_faerben(self, b):\n \n 'Fallunterscheidung'\n if b == True: #Farbe\n farbe = self.farbe_punkt\n elif b == False: #Schwarz\n farbe = self.farbe_allgemein\n \n 'Farbe von Punkt4 und Punkt 5 festlegen'\n self.point4.set_facecolor(farbe)\n self.point5.set_facecolor(farbe)\n \n 'Das gesamte Bild neu zeichnen'\n self.point.figure.canvas.draw()","sub_path":"RoboterSteuerung/Robotersteuerung_v1_inc_AS/Verwaltungsschale/ROBOTERSTEUERUNG/Robotersteuerung/digitaler_roboter/ansicht/seitenansicht_roboter.py","file_name":"seitenansicht_roboter.py","file_ext":"py","file_size_in_byte":31734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"502093453","text":"''''\nConsumption Tax: 10% of the total value of goods (TVG)\n Not needed if TVG is less than 10,000 yen\nImport Duty: Recorded in another file as for the relationship\n between different products and their corresponding\n taxation\nSale Tax: 10% of the TVG\n'''\nTAX = {\n \"CONSUMPTION_TAX\": 0.1,\n \"IMPORT_TAX\": 0.1,\n \"SALE_TAX\": 0.1\n}\n\nIMPORT_TAX = {\n \"Coffee\": 0.15,\n \"Furniture\": 0.03,\n \"Games\": 0.03,\n \"Paper\": 0,\n \"Rubber\": 0,\n \"Tableware\": 0.03,\n \"Tea\": 0.15,\n \"Toys\": 0.03,\n \"Others\": 0.05\n}\n\ndef get_total_tax(unit_price, unit, category):\n import_tax = 0.1\n for k in IMPORT_TAX:\n if k == category:\n import_tax = IMPORT_TAX.get(k)\n\n if unit_price * unit < 10000:\n return unit_price * (import_tax)\n return unit_price * (TAX.get(\"CONSUMPTION_TAX\") + import_tax)","sub_path":"price_calculator/taxation.py","file_name":"taxation.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"500685333","text":"from __future__ import print_function\n\nimport argparse\nimport sys\n\ndefault_message = 'You failed to provide a message to the `print_message` pre-commit hook via the -m or --message arg'\n\n\ndef main(argv=[]):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-m',\n '--message',\n dest='message',\n default=default_message,\n help='the message to display when this pre-commit hook is triggered',\n )\n parser.add_argument(\n '-f',\n '--fail',\n dest='outcome',\n action='store_const',\n const=1,\n default=0,\n help='use this flag to make the pre-commit hook fail if it is triggered',\n )\n args = parser.parse_args(argv)\n print(args.message)\n return args.outcome\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"pre_commit_hooks/print_message.py","file_name":"print_message.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"166091033","text":"# -*- coding:utf-8 -*-\n\"\"\"\n@contact: adonis_wu@outlook.com\n@time: 2019-08-05\n\"\"\"\n__author__ = 'adowu'\nfrom typing import List\n\n\ndef merge_sort(nums: List[int], length: int, reverse: bool):\n \"\"\"\n O(N*logN)\n :param nums: unsorted array\n :param length: array length\n :param reverse: increase or decrease\n :return: sorted array\n \"\"\"\n split(nums, 0, length - 1, reverse)\n print(nums)\n return nums\n\n\ndef split(nums, start, end, reverse):\n if start < end:\n mid = (start + end) // 2\n split(nums, start, mid, reverse)\n split(nums, mid + 1, end, reverse)\n merge(nums, start, mid, end, reverse)\n\n\ndef merge(nums, start, mid, end, reverse):\n i, j = start, mid + 1\n temp = []\n while i <= mid and j <= end:\n if reverse:\n if nums[i] <= nums[j]:\n temp.append(nums[j])\n j += 1\n\n else:\n temp.append(nums[i])\n i += 1\n\n else:\n if nums[i] <= nums[j]:\n temp.append(nums[i])\n i += 1\n\n else:\n temp.append(nums[j])\n j += 1\n\n while i <= mid:\n temp.append(nums[i])\n i += 1\n\n while j <= end:\n temp.append(nums[j])\n j += 1\n\n nums[start:end + 1] = temp\n\n\nmerge_sort([1, 3, 2, 5, 6, 4, 1, 0, 3, 5, 1, 9, 8, 6, 5], 15, False)\n","sub_path":"algo-python/ado_sort/n_log_n/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"214116713","text":"# -*- coding: utf-8 -*-\n# @Author: SmartKeyerror\n# @Date : 18-1-22 上午10:00\n\nimport jwt\nfrom six import text_type\n\nfrom .utils import get_appuser_model, jwt_get_username_from_payload_handler, jwt_encode_handler, \\\n jwt_get_user_id_from_payload_handler, jwt_decode_handler\nfrom django.utils.encoding import smart_text\nfrom django.utils.translation import ugettext as _\nfrom rest_framework import exceptions\nfrom rest_framework.authentication import (\n BaseAuthentication, get_authorization_header\n)\nfrom .api_settings import JWT_PREFIX\n\n\nimport api_settings\n# from rest_framework_jwt.settings import api_settings\n\n\nclass BaseJSONWebTokenAuthentication(BaseAuthentication):\n \"\"\"\n Token based authentication using the JSON Web Token standard.\n \"\"\"\n def _get_token(self, jwt_val):\n \"\"\"get token from jwt_val\n \"\"\"\n try:\n prefix, token = jwt_val.split()\n except ValueError:\n return None\n default_prefix = JWT_PREFIX\n if prefix != default_prefix:\n return None\n token = token.decode('utf8')\n return token\n\n def authenticate(self, request):\n \"\"\"\n Returns a two-tuple of `User` and token if a valid signature has been\n supplied using JWT-based authentication. Otherwise returns `None`.\n \"\"\"\n jwt_value = get_authorization_header(request)\n if jwt_value is None:\n return None\n\n try:\n # 通过token拿到payload, 而不是jwt_value\n token = self._get_token(jwt_value)\n payload = jwt_decode_handler(token)\n except jwt.ExpiredSignature:\n msg = _('Signature has expired.')\n raise exceptions.AuthenticationFailed(msg)\n except jwt.DecodeError:\n msg = _('Error decoding signature.')\n raise exceptions.AuthenticationFailed(msg)\n except jwt.InvalidTokenError:\n raise exceptions.AuthenticationFailed()\n\n user = self.authenticate_credentials(payload)\n\n return (user, jwt_value)\n\n def authenticate_credentials(self, payload):\n \"\"\"\n Returns an active user that matches the payload's user id and email.\n \"\"\"\n AppUser = get_appuser_model()\n appuser_id = jwt_get_user_id_from_payload_handler(payload)\n\n if not appuser_id:\n msg = _('Invalid payload.')\n raise exceptions.AuthenticationFailed(msg)\n\n try:\n app_user = AppUser.objects.get(id=appuser_id)\n except AppUser.DoesNotExist:\n msg = _('Invalid signature.')\n raise exceptions.AuthenticationFailed(msg)\n\n # if not user.is_active:\n # msg = _('User account is disabled.')\n # raise exceptions.AuthenticationFailed(msg)\n\n return app_user\n\n\nclass JSONWebTokenAuthentication(BaseJSONWebTokenAuthentication):\n \"\"\"\n Clients should authenticate by passing the token key in the \"Authorization\"\n HTTP header, prepended with the string specified in the setting\n `JWT_AUTH_HEADER_PREFIX`. For example:\n\n Authorization: JWT eyJhbGciOiAiSFMyNTYiLCAidHlwIj\n \"\"\"\n www_authenticate_realm = 'api'\n\n def get_jwt_value(self, request):\n auth = get_authorization_header(request).split()\n auth_header_prefix = api_settings.JWT_PREFIX.lower()\n\n if not auth:\n if api_settings.JWT_AUTH_COOKIE:\n return request.COOKIES.get(api_settings.JWT_AUTH_COOKIE)\n return None\n\n if smart_text(auth[0].lower()) != auth_header_prefix:\n return None\n\n if len(auth) == 1:\n msg = _('Invalid Authorization header. No credentials provided.')\n raise exceptions.AuthenticationFailed(msg)\n elif len(auth) > 2:\n msg = _('Invalid Authorization header. Credentials string '\n 'should not contain spaces.')\n raise exceptions.AuthenticationFailed(msg)\n\n return auth[1]\n\n def authenticate_header(self, request):\n \"\"\"\n Return a string to be used as the value of the `WWW-Authenticate`\n header in a `401 Unauthenticated` response, or `None` if the\n authentication scheme should return `403 Permission Denied` responses.\n \"\"\"\n return '{0} realm=\"{1}\"'.format(api_settings.JWT_PREFIX, self.www_authenticate_realm)\n","sub_path":"apps/weixin/wx_middleware/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"5902813","text":"import datetime\nclass ShopApp:\n\tuserDetails = []\n\tmessageDetails = []\n\tdefmessage = 'Thanks you {} for purchasing things from XYZ store.Your total bill is {} purchased at {}'\n\tdef adduser(self,name,cost):\n\t\tname = name[0].upper() + name[1:].lower()\n\t\tcost = 'Rs ' + str(cost)\n\t\tdict1 = {'name' : name,'cost':cost}\n\t\tdt = datetime.datetime.now()\n\t\tdtformat = dt.strftime('%d-%m-%y %H:%M:%S')\n\t\tdict1['date'] = dtformat\n\t\tShopApp.userDetails.append(dict1)\n\tdef getdetails(self):\n\t\treturn self.userDetails\n\tdef messageuser(self):\n\t\tfor msg in self.getdetails():\n\t\t\tnm = msg['name']\n\t\t\tcs = msg['cost']\n\t\t\ttm = msg['date']\n\t\t\tnew_message = self.defmessage.format(nm,cs,tm)\n\t\t\tself.messageDetails.append(new_message)\n\t\treturn self.messageDetails\nuser1 = ShopApp()\nuser1.adduser('viswa', 1700)\nuser1.adduser('shiva', 2500)\nprint(user1.getdetails())\nprint(user1.userDetails)\nprint(user1.messageuser())\n","sub_path":"Scripts-Sublime/ShoppingApp.py","file_name":"ShoppingApp.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"228331829","text":"#!/usr/bin/python\r\n# -*-coding:utf-8-*-\r\n# @author galaxy\r\n\r\nimport os\r\nimport pandas as pd\r\n\r\n#####\r\nresult_dir = \"/data5/galaxy/project/promoter_TF_enrich/data/total_gene/gene_bed\"\r\nif not os.path.exists(result_dir):\r\n os.makedirs(result_dir)\r\n##########\r\ngenome_region_bed = \"/data/database/GRCh38/GENCODE/Genes_ensembl.bed\"\r\ndf_bed = pd.read_table(genome_region_bed, sep=\"\\t\", header=None, names=[\"chr\", \"a\", \"b\", \"gene\", \"c\", \"d\"])\r\n#####################\r\nexpre_file = \"/data3/xs/tissue_m6a/2018.1/fig2/fig2_1_21/Total-nofilter.txt\"\r\ndf = pd.read_table(expre_file, sep=\"\\t\", index_col=0)\r\ndf_exp = df[[col for col in df.columns if \"exp\" in col]]\r\ndel df_exp[\"expression_cv\"]\r\nfor col in df_exp.columns:\r\n tissue = col.split(\"_exp\")[0].lower()\r\n print(tissue)\r\n result_file = os.path.join(result_dir, \"%s.bed\" % tissue)\r\n df_col = df_exp[col]\r\n df = pd.DataFrame(df_col[df_col > 0]).reset_index()\r\n df_overlap = pd.merge(df, df_bed, how=\"left\")\r\n df_bed = df_overlap[[\"chr\", \"a\", \"b\", \"gene\", \"c\", \"d\"]].dropna(how=\"any\").drop_duplicates()\r\n df_bed[\"a\"], df_bed[\"b\"] = df_bed[\"a\"].astype(int), df_bed[\"b\"].astype(int)\r\n df_sort = df_bed.sort_values([\"chr\", \"a\"])\r\n df_sort.to_csv(result_file, sep=\"\\t\", header=None, index=False)\r\n\r\n\r\n","sub_path":"chip_seq_pipeline/overlap_enrich/promoter_enrich/02_0_filter_by_expre.py","file_name":"02_0_filter_by_expre.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"82155362","text":"import tensorflow as tf\nimport math\nfrom . import utils\nfrom utils import constants\n\nHEIGHT = 100\nWIDTH = 100\n# number of channels for an image - jpeg image has RGB channels\nCHANNELS = 3\n# number of channels for the input layer of the network: HSV + gray scale\nNETWORK_DEPTH = 4\n\nbatch_size = 60\ninput_size = HEIGHT * WIDTH * NETWORK_DEPTH\n# number of max pool operations used in the network structure;\n# used when calculating the input size for the first fully connected layer\n# MUST BE UPDATED if the number of max pool operations changes or if the stride of max pool changes\nnumber_of_max_pools = 4\n# this works because each max pool layer has a 2 x 2 filter and stride 2\n# in case this changes, the formula will no longer be accurate\nnew_width = math.ceil(WIDTH/(1 << number_of_max_pools))\nnew_height = math.ceil(HEIGHT/(1 << number_of_max_pools))\n# probability to keep the values after a training iteration\ndropout = 0.8\n\n# placeholder for input layer\nX = tf.placeholder(tf.float32, [None, input_size], name=\"X\")\n# placeholder for actual labels\nY = tf.placeholder(tf.int64, [batch_size], name=\"Y\")\n\n# number of activation maps for each convolutional layer\nnumber_of_act_maps_conv1 = 16\nnumber_of_act_maps_conv2 = 32\nnumber_of_act_maps_conv3 = 64\nnumber_of_act_maps_conv4 = 128\n\n# number of outputs for each fully connected layer\nnumber_of_fcl_outputs1 = 1024\nnumber_of_fcl_outputs2 = 256\n\ninitial_learning_rate = 0.001\nfinal_learning_rate = 0.00001\nlearning_rate = initial_learning_rate\n\n\ndef conv_net(X, weights, biases, dropout):\n X = tf.reshape(X, shape=[-1, HEIGHT, WIDTH, NETWORK_DEPTH])\n\n conv1 = utils.conv2d('conv1', X, weights['conv_weight1'], biases['conv_bias1'])\n conv1 = utils.maxpool2d('max_pool1', conv1, k=2)\n\n conv2 = utils.conv2d('conv2', conv1, weights['conv_weight2'], biases['conv_bias2'])\n conv2 = utils.maxpool2d('max_pool2', conv2, k=2)\n\n conv3 = utils.conv2d('conv3', conv2, weights['conv_weight3'], biases['conv_bias3'])\n conv3 = utils.maxpool2d('max_pool3', conv3, k=2)\n\n conv4 = utils.conv2d('conv4', conv3, weights['conv_weight4'], biases['conv_bias4'])\n conv4 = utils.maxpool2d('max_pool4', conv4, k=2)\n\n fc1 = tf.reshape(conv4, shape=[-1, weights['fcl_weight1'].get_shape().as_list()[0]])\n fc1 = tf.nn.relu(tf.add(tf.matmul(fc1, weights['fcl_weight1']), biases['fcl_bias1']))\n fc1 = tf.nn.dropout(fc1, dropout)\n\n fc2 = tf.nn.relu(tf.add(tf.matmul(fc1, weights['fcl_weight2']), biases['fcl_bias2']))\n fc2 = tf.nn.dropout(fc2, dropout)\n\n out = tf.add(tf.matmul(fc2, weights['out_weight']), biases['out_bias'], name='softmax')\n return out\n\n\ndef update_learning_rate(acc, learn_rate):\n return max(learn_rate - acc * learn_rate * 0.9, final_learning_rate)\n\n\nweights = {\n 'conv_weight1': utils.get_variable('conv_weight1', [5, 5, NETWORK_DEPTH, number_of_act_maps_conv1],\n tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32)),\n 'conv_weight2': utils.get_variable('conv_weight2', [5, 5, number_of_act_maps_conv1, number_of_act_maps_conv2],\n tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32)),\n 'conv_weight3': utils.get_variable('conv_weight3', [5, 5, number_of_act_maps_conv2, number_of_act_maps_conv3],\n tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32)),\n 'conv_weight4': utils.get_variable('conv_weight4', [5, 5, number_of_act_maps_conv3, number_of_act_maps_conv4],\n tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32)),\n 'fcl_weight1': utils.get_variable('fcl_weight1', [new_width * new_height * number_of_act_maps_conv4, number_of_fcl_outputs1],\n tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32)),\n 'fcl_weight2': utils.get_variable('fcl_weight2', [number_of_fcl_outputs1, number_of_fcl_outputs2],\n tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32)),\n 'out_weight': utils.get_variable('out_weight', [number_of_fcl_outputs2, constants.num_classes],\n tf.truncated_normal_initializer(stddev=5e-2, dtype=tf.float32)),\n}\nbiases = {\n 'conv_bias1': tf.Variable(tf.zeros([number_of_act_maps_conv1])),\n 'conv_bias2': tf.Variable(tf.zeros([number_of_act_maps_conv2])),\n 'conv_bias3': tf.Variable(tf.zeros([number_of_act_maps_conv3])),\n 'conv_bias4': tf.Variable(tf.zeros([number_of_act_maps_conv4])),\n 'fcl_bias1': tf.Variable(tf.zeros([number_of_fcl_outputs1])),\n 'fcl_bias2': tf.Variable(tf.zeros([number_of_fcl_outputs2])),\n 'out_bias': tf.Variable(tf.zeros([constants.num_classes]))\n}\n","sub_path":"src/image_classification/network_structure/fruit_network.py","file_name":"fruit_network.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"545262888","text":"import unittest\nimport bzd.utils.worker\nimport time\n\n\nclass TestWorker(unittest.TestCase):\n\n\t@staticmethod\n\tdef foo(x, stdout):\n\t\tstdout.write(\"Hello\")\n\t\treturn x * x\n\n\t@staticmethod\n\tdef throwWorkload(x, stdout):\n\t\traise Exception(\"dummy\")\n\n\t@staticmethod\n\tdef blockingWorkload(x, stdout):\n\t\ttime.sleep(10000)\n\n\tdef Empty(self) -> None:\n\t\tworker = bzd.utils.worker.Worker(TestWorker.foo)\n\t\tworker.start()\n\t\tresult = list(worker.data())\n\t\tworker.stop()\n\t\tself.assertEqual(len(result), 0)\n\n\tdef testSingleWorkload(self) -> None:\n\t\tworker = bzd.utils.worker.Worker(TestWorker.foo)\n\t\tworker.start()\n\t\tworker.add(12)\n\t\tresult = list(worker.data())\n\t\tworker.stop()\n\t\tself.assertEqual(len(result), 1)\n\t\tself.assertEqual(result[0].isSuccess(), True)\n\t\tself.assertEqual(result[0].getResult(), 12 * 12)\n\t\tself.assertEqual(result[0].getOutput(), \"Hello\")\n\n\tdef testMultiWorkload(self) -> None:\n\t\tworker = bzd.utils.worker.Worker(TestWorker.foo)\n\t\tfor i in range(100):\n\t\t\tworker.add(i)\n\t\tself.assertEqual(worker.context.count.value, 100)\n\t\tworker.start()\n\t\tresult = list(worker.data())\n\t\tworker.stop()\n\t\tself.assertEqual(len(result), 100)\n\n\tdef testThrowingWorkload(self) -> None:\n\t\tworker = bzd.utils.worker.Worker(TestWorker.throwWorkload)\n\t\tworker.add(42)\n\t\tworker.start()\n\t\tresult = list(worker.data())\n\t\tworker.stop()\n\t\tself.assertEqual(len(result), 1)\n\t\tself.assertEqual(result[0].isSuccess(), False)\n\n\tdef testTimeoutWorkload(self) -> None:\n\t\tworker = bzd.utils.worker.Worker(TestWorker.blockingWorkload)\n\t\tworker.add(42, timeoutS=1)\n\t\tworker.start()\n\t\tresult = list(worker.data())\n\t\tworker.stop()\n\t\tself.assertEqual(len(result), 1)\n\t\tself.assertEqual(result[0].isSuccess(), False)\n\n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"python/bzd/utils/tests/worker_test.py","file_name":"worker_test.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"641454149","text":"# -*- coding: utf-8 -*-\n\"\"\"\nME 6761 Acoustics II, Homework 5, Problem 2c\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Set distance variable\nr = np.logspace( -2, 2.5, 1000 )\n\n# Define source parameters\nf = 1E3; # [Hz]\nxs = 0.3; # [m]\n\nomega = 2*np.pi*f;\nc = 343; # Assume air [m/s]\nk = omega/c;\n\n# Define Fraunhoeffer and Fresnel Parameters\nfresnelParameter = xs/r;\nfraunhoeferParametrer = (k*xs**2)/r;\n\n# Plot each\nplt.loglog( r, fresnelParameter, 'k', label=r\"$r'/r$\" );\nplt.loglog( r, fraunhoeferParametrer, '--k', label=r\"$kr'\\,^{2}/r$\" );\n\nplt.xlabel(r'$r$ [m]', fontsize=18, family='serif');\nxTickValues = [0.1, 1, 10, 100];\nxTickLabels = [r'$0.1$', r'$1$', r'$10$', r'$100$'];\nplt.xticks( xTickValues, xTickLabels, fontsize=15, family='serif' );\n\nplt.ylabel('Parameter Value',fontsize=14, family='serif');\nyTickValues = [0.1, 1, 10];\nyTickLabels = [r'$0.1$', r'$1$', r'$10$'];\nplt.yticks( yTickValues, yTickLabels, fontsize=15, family='serif' );\n\nplt.ylim([0.01, 10]);\nplt.xlim([0.1, 100]);\n\nplt.legend(loc='best', frameon=False, fontsize=18, \\\n prop={'family':'serif'});","sub_path":"me6761/hw5/problem2c.py","file_name":"problem2c.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"480490525","text":"\"\"\"\n处理process数据的目的:\n1. 处理只包含正常数据的文件\n\n主要行为是按照核心数进行分开\n\"\"\"\nimport os\nfrom typing import Tuple, Union, List\n\nimport pandas as pd\n\nfrom utils.DefineData import FAULT_FLAG, CPU_FEATURE\n\nprefixtime = \"single\"\nfaultprefix = \"fault_\"\n\n# 是否剔除0数据\nisexcludeNormal = True\nsavedatapath = \"tmp/Data\"\ndatapath = [\n \"D:\\\\HuaweiMachine\\\\测试数据\\\\wrfrst_normal_e5\\\\result\\\\normal_single\\\\wrfrst_e5-43_process-2.csv\",\n]\n\n\"\"\"\n函数功能: 得到指定路径下以prefix为前缀的下一个目录名\nsavepath是包含错误码的\n\"\"\"\n\n\ndef getTimeFileName(prefix: str, savepath: str) -> Union[Tuple[None, bool], Tuple[str, bool]]:\n if not os.path.exists(savepath):\n return None, True\n listdirs = os.listdir(savepath)\n lenprefix = len([idir for idir in listdirs if idir.startswith(prefix)])\n return prefix + str(lenprefix + 1), False\n\n\n# 获得与当前位置beginpos相同的内容且连续的最后一个位置的下一个位置\n# 如果为返回值为-1,则代表这个初始位置不可用\ndef getListNextNotSame(l1: list, beginpos: int) -> int:\n if beginpos >= len(l1):\n return -1\n i = beginpos + 1\n for i in range(beginpos + 1, len(l1) + 1):\n if i == len(l1):\n return len(l1)\n if l1[i] != l1[beginpos]:\n return i\n\n\n\"\"\"\ndf在保存前会调用这个函数,用来对数据进行处理\n\"\"\"\n\nfeatureSub = [\"user\", \"system\"]\n\n\ndef DataProcess(df: pd.DataFrame) -> pd.DataFrame:\n df, err = subtractLastLineFromDataFrame(df, featureSub)\n return df\n\n\n\"\"\"\n- 按照错误码进行划分, 得到一系列的list\n\"\"\"\n\n\ndef SplitFaultFlag(df: pd.DataFrame) -> List[Tuple[int, pd.DataFrame]]:\n respdList = []\n nowpos = 0\n while True:\n nextpos = getListNextNotSame(df[FAULT_FLAG], nowpos)\n if nextpos == -1:\n break\n nowflag = df.iloc[nowpos][FAULT_FLAG]\n respdList.append((nowflag, df.iloc[nowpos:nextpos]))\n nowpos = nextpos\n return respdList\n\n\n\"\"\"\n- 按照核心数进行划分, 得到一系列的list\n\"\"\"\n\n\ndef SplitCores(df: pd.DataFrame) -> List[Tuple[int, pd.DataFrame]]:\n if CPU_FEATURE not in df.columns.array:\n print(\"函数SplitCores错误\")\n print(\"{} 这一列在表格中不存在\".format(CPU_FEATURE))\n exit(1)\n corelist = list(set(df[CPU_FEATURE]))\n coreList = []\n for icore in corelist:\n tpd = df.loc[df[CPU_FEATURE] == icore]\n tpd.reset_index(drop=True)\n # 将CPU_FEATURE去掉\n # coreDict[icore] = tpd.drop(CPU_FEATURE, axis=1)\n tpd = DataProcess(tpd)\n coreList.append((icore, tpd))\n return coreList\n\n\n\"\"\"\n- 将错误码和DataFrame进行文件的保存\n 此时假设所有的时间都是连续\n 该函数会自动处理user和system等累计值数据\n\"\"\"\n\n\ndef SaveDataFrame(faulty: int, faultyDataFrame: pd.DataFrame):\n faultypath = os.path.join(savedatapath, faultprefix + str(faulty))\n if not os.path.exists(faultypath):\n os.makedirs(faultypath)\n timefilename, err = getTimeFileName(prefix=prefixtime, savepath=faultypath)\n corefilepath = os.path.join(faultypath, timefilename)\n if not os.path.exists(corefilepath):\n os.makedirs(corefilepath)\n if err:\n print(\"SaveDataFrame函数错误\")\n print(\"{} 路径不存在\".format(faultypath))\n exit(1)\n corepds = SplitCores(faultyDataFrame)\n for corenum, corepd in corepds:\n corefilename = os.path.join(corefilepath, str(corenum) + \".csv\")\n corepd.to_csv(corefilename, index=False)\n\n\n\"\"\"\n- 只用来分割和保存一个文件中的数据\n\"\"\"\n\n\ndef DealOneFile(df: pd.DataFrame) -> bool:\n # 保证有标签这个选项\n if FAULT_FLAG not in df:\n return False\n # 得到一个错误码 加 DataFrame的结构\n faultList = SplitFaultFlag(df)\n for ifault, faultDataFrame in faultList:\n if isexcludeNormal and ifault == 0:\n continue\n\n print(\"进行错误码{}的保存\".format(ifault).center(40, \"*\"))\n SaveDataFrame(ifault, faultDataFrame)\n\n\n\"\"\"\n- 将DataFrame中的columns都减去上一行,第一行等于0\n\"\"\"\n\n\ndef subtractLastLineFromDataFrame(df: pd.DataFrame, columns: List) -> Union[\n Tuple[None, bool], Tuple[pd.DataFrame, bool]]:\n df = df.copy()\n if len(df) <= 1:\n return None, True\n # 先将整个表格往上一隔\n dfcolumns_1 = df.loc[:, columns].shift(periods=-1, axis=0, fill_value=0)\n # 然后相减\n dfcolumns_2 = dfcolumns_1 - df.loc[:, columns]\n # 然后下一一位\n df.loc[:, columns] = dfcolumns_2.shift(periods=1, axis=0, fill_value=0)\n return df, False\n\n\nif __name__ == \"__main__\":\n for ipath in datapath:\n df = pd.read_csv(ipath)\n # 数据预处理部分\n # 1. 将对应的每个文件的都减去第一行和前一行\n # 数据分割部分\n SaveDataFrame(0, df)\n","sub_path":"datascript/2.单机-测试数据-正常数据处理.py","file_name":"2.单机-测试数据-正常数据处理.py","file_ext":"py","file_size_in_byte":4947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"460630538","text":"import pytest\nimport time\nimport yaml\nimport random\nimport sentry_sdk\n\n# If you print 'driver', it's not an object, it's \"\"\n@pytest.mark.usefixtures(\"driver\")\ndef test_add_to_cart_2(driver):\n\n sentry_sdk.set_tag(\"py_test\", \"test_add_to_cart_2\")\n with open('endpoints2.yaml', 'r') as stream:\n data_loaded = yaml.safe_load(stream)\n endpoints = data_loaded['react_endpoints']\n\n for endpoint in endpoints:\n sentry_sdk.set_tag(\"endpoint\", endpoint)\n reported = False\n clickedButtons = 0\n missedButtons = 0\n\n for i in range(random.randrange(20)):\n # Loads the homepage\n driver.get(endpoint)\n\n # Buttons not be available if tools did not load in time\n try:\n buy_button = driver.find_element_by_css_selector('.item button')\n for i in range(random.randrange(3) + 3):\n buy_button.click()\n driver.find_element_by_css_selector('.sidebar button').click()\n clickedButtons = clickedButtons + 1\n except Exception as err:\n missedButtons = missedButtons + 1\n if reported == False:\n sentry_sdk.capture_exception(err)\n reported = True\n time.sleep(random.randrange(3) + 3)\n\n with sentry_sdk.configure_scope() as scope:\n scope.set_tag(\"clickedButtons\", clickedButtons)\n scope.set_tag(\"missedButtons\", missedButtons)\n msg = \"\"\n if 'platform' in scope._tags:\n msg = msg + scope._tags['platform']\n if 'browserName' in scope._tags:\n msg = msg + \" - \" + scope._tags['browserName']\n if msg == \"\":\n msg = \"Finished Endpoint\"\n else:\n msg = \"Finished Endpoint: %s\" % (msg)\n sentry_sdk.capture_message(msg)\n \n if missedButtons > 0:\n raise 'unable to click button somewhere in this test'\n\n \n","sub_path":"frontend_tests/test_add_to_cart_2.py","file_name":"test_add_to_cart_2.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"119282994","text":"import time\nimport glob\nimport pandas as pd\nimport datetime as dt\nimport numpy as np\nimport dateutil\nimport logging\nimport os\nimport csv\nimport sys\nimport win32com.client\nfrom win32com.client import constants as comConst\n\nndrv = r'\\\\P7FS0001\\ed'\ndirWork = ndrv + r'\\Sancho\\prog\\Database'\nfileLog = dirWork + r'\\log\\GenEDD_DailyData_Xls_' + dt.datetime.now().strftime('%Y%m%d_%H%M%S') + '.log'\ndirBBG = ndrv + r'/Sancho/data/BBG'\ndirWrntBbg = dirBBG + r'/WarrantsDayClose'\ndirCbbcBbg = dirBBG + r'/CBBCsDayClose'\ndirHKEx = ndrv + r'\\Sancho\\data\\HKExData\\Derivatives\\Warrants'\ndirWrntHKEx = ndrv + r'/Sancho/data/HKExData/Derivatives/Warrants/ListedWarrants'\ndirCbbcHKEx = ndrv + r'/Sancho/data/HKExData/Derivatives/CBBCs/ListedCBBCs'\ndirEDD = ndrv + r'\\Trading (EDD)\\Warrants\\Analysis\\Daily'\ndirNewDW = ndrv + r'\\Sancho\\data\\HKExData\\Derivatives\\Warrants\\NewLaunchWarrants'\ndirNewCBBC = ndrv + r'\\Sancho\\data\\HKExData\\Derivatives\\CBBCs\\NewLaunchCBBCs'\ndirImg = ndrv + r'\\Sancho\\data\\Imagine\\FairValue'\nfpTmplt = ndrv + r'\\Sancho\\prog\\Database\\EDD_Daily_tmplt.xlsm'\n\n# === Setup Logger ===\nhandler = logging.FileHandler(fileLog)\nformatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\nlogger = logging.getLogger(__name__)\n\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\nlogger.addHandler(logging.StreamHandler())\nlogger.setLevel(logging.DEBUG) \n\n\ndef getLastTradeDate(dirHKEx, dtNow):\n filePttn = dirHKEx + r'/' + dtNow.strftime('%Y%m') + '*.csv'\n fpList = sorted(glob.glob(filePttn), reverse=True)\n if len(fpList) == 0:\n dtPrevMth = dtNow + dateutil.relativedelta.relativedelta(months=-1)\n filePttn = dirHKEx + r'/' + dtPrevMth.strftime('%Y%m') + '*.csv'\n fpList = sorted(glob.glob(filePttn), reverse=True)\n\n fpHKEx = fpList[0]\n logger.info('Using Trade Data from HKEx: %s' % fpHKEx)\n df = pd.read_csv(fpHKEx, sep='\\t', error_bad_lines=False, quoting=csv.QUOTE_ALL, na_values=['-'], encoding='UTF-16LE', parse_dates=['Trade Date'])\n\n dtLastTrade = yyyymmdd = df.iloc[0]['Trade Date']\n logger.info('Last DateTrade: %s' % yyyymmdd)\n return dtLastTrade\n\nif __name__ == \"__main__\":\n try:\n dtNow = dt.datetime.now()\n dtLastTrade= getLastTradeDate(dirHKEx, dtNow)\n #dtLastTrade= dt.datetime.strptime(dt.datetime.now().strftime('%Y%m%d'), '%Y%m%d')\n #dtLastTrade= dt.datetime.strptime('20180508', '%Y%m%d')\n logger.info('Last trading day is %s' % dtLastTrade.strftime('%Y-%m-%d'))\n\n wsName = 'Data'\n cellRow = 2\n cellCol = 1\n\n # Read csv data\n yyyymmdd = dtLastTrade.strftime('%Y%m%d')\n fpIn = dirEDD + r'/' + yyyymmdd + '.csv'\n fpOut = dirEDD + r'/' + yyyymmdd + '.xlsx'\n df = pd.read_csv(fpIn)\n\n # Read Fair Price from Imagine\n fpImg = dirImg + r'/FairValue_' + yyyymmdd + '.csv'\n dfImg = pd.read_csv(fpImg, skiprows=3)\n # Unified Warrant Code format\n dfImg['BBG Code'] = dfImg['BBG Code'].map(lambda x: x.replace(' AVPO', ''))\n dfImg['BBG Code'] = dfImg['BBG Code'].map(lambda x: x.replace(' CBBC', ''))\n dfImg.rename(columns=lambda x: x.replace(\"BBG Code\", \"BBG_Code\"), inplace=True)\n dfImg.rename(columns=lambda x: x.replace(\"LAST_PRICE\", \"FairPx\"), inplace=True)\n df = pd.merge(df, dfImg.loc[:,['BBG_Code','FairPx']], how='left', left_on=['HT_TICKER'], right_on=['BBG_Code'])\n del df['BBG_Code']\n\n # Add previous OS data\n filePttn = dirEDD + r'/[0-9]*.csv'\n fpLists = sorted(glob.glob(filePttn), reverse=True)[1:6]\n i = 1\n for fp in fpLists:\n logger.info('Reading previous OS from %s' % fp)\n dfTmp = pd.read_csv(fp)\n dfTmp = dfTmp.loc[:,['HT_TICKER','WRT_OUTSTANDING']]\n newCol = 'WRT_OS_DIFF_' + str(i)\n dfTmp.rename(columns=lambda x: x.replace('WRT_OUTSTANDING',newCol),inplace=True)\n df = pd.merge(df, dfTmp, how='left', on='HT_TICKER')\n df[newCol] = df['WRT_OUTSTANDING']- df[newCol]\n i += 1\n\n df['Spread'] = df['PX_LAST'] - df['FairPx']\n hdrNew = ['HT_TICKER','HT_UNDERLYING','ID_BB_GLOBAL','UNDERLYING_ID_BB_GLOBAL','ISSUER','ISSUER_SHORT','LIQUIDITY_PROVIDER','WRT_PUT_OR_CALL','WRT_ISSUE_DT','LISTING_DATE','WRT_EXPIRE_DT','LAST_TRADEABLE_DT','BARRIER_STATUS','RESIDUAL_VALUE','BARRIER_HIT_DATE','','PX_LAST','Spread','FairPx','CHG_NET_1D','CHG_PCT_1D','PX_VOLUME','EQY_TURNOVER','VWAP_NUM_TRADES','WRT_ISSUE_AMT','BOARD_LOT','WRT_EXER_PX','OPT_BARRIER_PX_1','WRT_SH_PER','','WRT_WARRANT_DELTA_BST','WRT_NORM_GAMMA_BST','WRT_VEGA_BST','WRT_THETA_LAST','WRT_IMPLIED_VOLATILITY_BST','','WRT_UNDL_PX','WRT_UNDL_CHG_PCT_1D','WRT_UNDL_CHG_NET_1D','','OUTSTANDING_AS_OF_DATE','WRT_OUTSTANDING','WRT_OS_DIFF_1','WRT_OS_DIFF_2','WRT_OS_DIFF_3','WRT_OS_DIFF_4','WRT_OS_DIFF_5','AVG_PRICE_PER_WARRANTS_BOUGHT','AVG_PRICE_PER_WARRANTS_SOLD','NUMBER_OF_WARRANTS_BOUGHT','NUMBER_OF_WARRANTS_SOLD']\n\n # === Setup Excel com ===\n logger.info('Killing running Excel instance if any... ')\n os.system(\"TASKKILL /F /IM \\\"EXCEL.EXE\\\"\")\n time.sleep(2)\n Excel = win32com.client.gencache.EnsureDispatch('Excel.Application')\n #Excel = win32com.client.DispatchEx('Excel.Application')\n #Excel = win32com.client.Dispatch('Excel.Application')\n Excel.DisplayAlerts = False\n Excel.Visible = True\n #Excel.Interactive = False\n win32c = win32com.client.constants\n wb = Excel.Workbooks.Open(os.path.abspath(fpTmplt))\n ws = wb.Worksheets(wsName)\n ws.Range( ws.Cells(cellRow, cellCol), ws.Cells(cellRow + len(df.index)-1, cellCol+len(df.columns)-1)).Value = df.loc[:,hdrNew].values\n Excel.Application.Run(\"Highlight\")\n time.sleep(2)\n logger.info('Saving to %s' % fpOut)\n wb.SaveAs(fpOut, FileFormat = 51)\n\n except:\n logger.error(\"Exception in user code:\", exc_info=True)\n logger.error('-'*60)\n #traceback.print_exc(file=sys.stdout)\n finally:\n # ==== Excel Cleanup ====\n if 'wb' in locals():\n wb.Close(False)\n #Excel.Application.Quit()\n if 'Excel' in locals():\n Excel.Quit()\n Excel = None\n time.sleep(2)\n\n\n # engine = create_engine('oracle+cx_oracle://' + user + ':' + passwd + '@' + tns)\n #\n # # ==== Listed Warrants ====\n # hdrDW = ['Code','Issuer','UL','CP','Type','DateList','DateMat','CurK','K','EntRatio','TotIssueSz','OSPct','Delta','IV','CurTrd','DayHigh','DayLow','PxClose','TO_000','CurUL','PxUL','Dummy']\n # dfWrntHKExListed = pd.read_csv(fpWrntHKEx, skiprows=2, skipfooter=3, sep=\"\\t\", na_values=['-'], parse_dates=['DateList','DateMat'], header=None, names=hdrDW, encoding='UTF-16LE', engine='python')\n # dfWrntHKExListed = dfWrntHKExListed[['Code','Issuer']]\n #\n # # ==== New Launched Warrants ====\n # logger.info('Reading newly launched warrants from %s' % fpWrntNL)\n # hdrNL_CW = ['No','Code','WrntName','Issuer','UL','CP','LotSz','CurK','K','EntRatio','CurTrd','PxClose','TotIssueSz','DateLaunch','DateComm','DateList','DateMat']\n # dfNL_CW = pd.read_html(fpWrntNL, skiprows=5, header=None)[0] #, names=hdrDW)\n # dfNL_CW.columns = hdrNL_CW\n # dfNL_CW = tidyUpDF(dfNL_CW, skipfooterrow=4)\n # dfNL_CW = dfNL_CW[['Code','Issuer']]\n #\n # dfWrntHKEx = dfWrntHKExListed.append(dfNL_CW)\n # #dfWrntHKEx = dfWrntHKExListed\n # dfWrntHKEx.drop_duplicates(['Code','Issuer'], inplace=True)\n #\n # # ==== Listed CBBCs ====\n # hdrCBBC = ['Code','Issuer','UL','CP','Type','DateList','DateMat','CurK','K','EntRatio','TotIssueSz','OSPct','Delta','IV','CurTrd','DayHigh','DayLow','PxClose','TO_000','CurUL','PxUL','Dummy']\n # dfCbbcHKExListed = pd.read_csv(fpCbbcHKEx, skiprows=2, skipfooter=2, sep=\"\\t\", na_values=['-'], parse_dates=['DateList','DateMat'], header=None, names=hdrCBBC, encoding='UTF-16LE', engine='python')\n # dfCbbcHKExListed = dfCbbcHKExListed[['Code','Issuer']]\n #\n # # ==== New Launched CBBCs ====\n # logger.info('Reading newly launched CBBCs from %s' % fpCbbcNL)\n # hdrNL_CBBC = ['No','Code','CBBCName','Issuer','UL','BullBear','LotSz','CurK','K','KCall','EntRatio','CurIssue','PxIssue', 'TotIssueSz', 'DateLaunch', 'DateComm', 'DateList', 'DateMat']\n # dfNL_CBBC = pd.read_html(fpCbbcNL, skiprows=5, header=None)[0] #, names=hdrDW)\n # dfNL_CBBC.columns = hdrNL_CBBC\n # dfNL_CBBC = tidyUpDF(dfNL_CBBC, skipfooterrow=2)\n # dfNL_CBBC = dfNL_CBBC[['Code','Issuer']]\n #\n # dfCbbcHKEx = dfCbbcHKExListed.append(dfNL_CBBC)\n # #dfCbbcHKEx = dfCbbcHKExListed\n # dfCbbcHKEx.drop_duplicates(['Code','Issuer'], inplace=True)\n #\n # # ==== Dataframe for WARRANTS table ====\n # df_CBBCS = GetCBBCs( fpCbbcBBG, dfCbbcHKEx, dtTrade)\n # df_WARRANTS = GetWarrants(fpWrntBBG, dfWrntHKEx, dtTrade)\n # df_WARRANTS = df_WARRANTS.append(df_CBBCS)\n # res = Merge_WARRANTS(df_WARRANTS, engine)\n #\n # # ==== Dataframe for WARRANTS_UPDATES table ====\n # df_CBBCS_UPDATES = GetCBBCsUPDATES( fpCbbcBBG, dfCbbcHKEx, dtTrade)\n # df_WARRANT_UPDATES = GetWarrantsUPDATES(fpWrntBBG, dfWrntHKEx, dtTrade)\n # df_WARRANT_UPDATES = df_WARRANT_UPDATES.append(df_CBBCS_UPDATES)\n # res = Merge_WARRANT_UPDATES(df_WARRANT_UPDATES, engine)\n #\n # # ==== Dataframe for UNDERLYING_UPDATES table ====\n # df_UNDERLYING_UPDATES = GetUnderlyingUPDATES(fpCbbcBBG, fpWrntBBG, dtTrade)\n # res = Merge_UNDERLYING_UPDATES(df_UNDERLYING_UPDATES, engine)\n #\n # GenEddDailyFile(fpCbbcBBG, fpWrntBBG, dfCbbcHKEx, dfWrntHKEx, fpOut)\n","sub_path":"Database/GenEDD_DailyData_Xls.py","file_name":"GenEDD_DailyData_Xls.py","file_ext":"py","file_size_in_byte":9506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"2860649","text":"import numpy as np\r\nimport random\r\nimport tensorflow as tf\r\nimport random\r\nfrom bert_crf import Generator_BiLSTM_CRF\r\nimport numpy as np\r\nimport os, time, sys, argparse\r\nimport tensorflow as tf\r\nfrom tensorflow.contrib.rnn import LSTMCell\r\nfrom tensorflow.contrib.crf import crf_log_likelihood\r\nfrom tensorflow.contrib.crf import viterbi_decode\r\nfrom data import batch_yield, read_corpus, pad_sequences, batch_yield_for_unla_da, read_corpus_unlabel, \\\r\n batch_yield_for_discri, batch_yield_for_discri_unlabeled\r\n# from discriminator_for_gram import Discriminator\r\nfrom bert_base.bert import modeling\r\n\r\nbert_path = '/home/ywd/tf_model/pre_training_model/chinese_L-12_H-768_A-12/'\r\ninit_checkpoint = os.path.join(bert_path, 'bert_model.ckpt')\r\n#################################\r\n\r\n# chinese data ccks\r\ndata_path = 'ccks_data_path'\r\n# # english data 2010ib\r\n# data_path = 'data_path'\r\n#### Generator Hyper-parameters\r\nbatch_size = 20\r\nepoch_num = 10\r\n# filter_sizes = [1, 2, 3, 4, 5, 6]\r\nfilter_sizes = [1, 2, 3, 4]\r\nnum_filters = [100, 200, 200, 200]\r\ndis_dropout_keep_prob = 0.75\r\ndis_l2_reg_lambda = 0.2\r\nparser = argparse.ArgumentParser(description='BiLSTM-CRF for Chinese NER task')\r\nparser.add_argument('--train_data', type=str, default=data_path, help='train data source')\r\nparser.add_argument('--train_data_unlabel', type=str, default=data_path, help='train data source')\r\nparser.add_argument('--mode', type=str, default='train', help='train/test')\r\n# parser.add_argument('--demo_model', type=str, default='1521112368', help='model for test and demo')\r\nparser.add_argument('--test_data', type=str, default=data_path, help='test data source')\r\nparser.add_argument('--sub_test_data', type=str, default=data_path, help='test data source')\r\nargs = parser.parse_args()\r\n# train_data =\r\nparams = {\r\n 'dim': 768,\r\n 'dropout': 0.5,\r\n 'num_oov_buckets': 1,\r\n # 'batch_size': 20,\r\n 'buffer': 15000,\r\n 'lstm_size': 100,\r\n 'words': '../../../china_medical_char_data_cleaned/vocab.words.txt',\r\n 'chars': '../../../china_medical_char_data_cleaned/vocab.chars.txt',\r\n 'tags': '../../../china_medical_char_data_cleaned/vocab.tags.txt',\r\n 'glove': '../../../medical_char_data_cleaned/glove.npz',\r\n 'vector': 'bert_vec.npz'\r\n}\r\nmodel_path = './model/'\r\n\r\n\r\n# parser = argparse.ArgumentParser(description='BiLSTM-CRF for Chinese NER task')\r\n# parser.add_argument('--mode', type=str, default='train', help='train/test')\r\n# args = parser.parse_args()\r\n\r\ndef train(sess, train, dev, epoch, gen, num_batches, batch, label):\r\n \"\"\"\r\n :param train:\r\n :param dev:\r\n :return:\r\n \"\"\"\r\n saver = tf.train.Saver(tf.global_variables())\r\n\r\n run_one_epoch(sess, train, dev, epoch, saver, gen, num_batches, batch, label)\r\n\r\n\r\ndef run_one_epoch(sess, words, labels, tags, dev, epoch, gen, num_batches, batch, label, it, iteration, saver):\r\n \"\"\"\r\n :param sess:\r\n :param train:\r\n :param dev:\r\n :param tag2label:\r\n :param epoch:\r\n :param saver:\r\n :return:\r\n \"\"\"\r\n # num_batches = (len(train) + batch_size - 1) // batch_size\r\n\r\n start_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\r\n if label == 0:\r\n seqs, labels = words, labels\r\n # batches = batch_yield(train, batch_size, shuffle=True)\r\n # for step, (seqs, labels) in batch:\r\n sys.stdout.write(' processing: epoch {} : {} batch / {} batches.'.format(epoch + 1, batch + 1, num_batches) + '\\r')\r\n step_num = epoch * num_batches + batch + 1\r\n seqs, seqs_len, labels, max_len = get_feed_dict(seqs, labels)\r\n loss_train = gen.train(sess, seqs, seqs_len, labels, max_len)\r\n print(loss_train)\r\n print('11111111111111, training_phase_1 finished!')\r\n # elif label == 1:\r\n # # batches = batch_yield_for_unla_da(train, batch_size, shuffle=True)\r\n # # for step, (seqs, labels,tags) in enumerate(batches):\r\n # seqs, labels, tags = words, labels, tags\r\n # sys.stdout.write(' processing: {} batch / {} batches.'.format(batch + 1, num_batches) + '\\r')\r\n # step_num = epoch * num_batches + batch + 1\r\n # seqs, seqs_len, labels, max_len = get_feed_dict_for_unlabel(seqs, labels)\r\n # loss_train = gen.train_for_unlabel(sess, epoch, seqs, seqs_len, labels, tags, max_len, it, iteration, saver)\r\n # print(loss_train)\r\n # print('222222222222, training_ohase_II finished!')\r\n # elif label == 2:\r\n #\r\n # seqs, labels, tags = words, labels, tags\r\n #\r\n # sys.stdout.write(' processing: {} batch / {} batches.'.format(batch + 1, num_batches) + '\\r')\r\n # step_num = epoch * num_batches + batch + 1\r\n # seqs, seqs_len, labels, max_len = get_feed_dict(seqs, labels)\r\n # loss_train = gen.train_for_discri_labeled(sess, seqs, seqs_len, labels, tags, max_len)\r\n # print(loss_train)\r\n # print('333333333333333333333,labeled training of discriminator finised!')\r\n # else:\r\n # seqs, labels, tags = words, labels, tags\r\n # sys.stdout.write(' processing: {} batch / {} batches.'.format(batch + 1, num_batches) + '\\r')\r\n # step_num = epoch * num_batches + batch + 1\r\n # seqs, seqs_len, labels, max_len = get_feed_dict(seqs, labels)\r\n # loss_train = gen.train_for_discri_unlabeled(sess, epoch, seqs, seqs_len, labels, tags, max_len)\r\n # print(loss_train)\r\n # print('44444444444444444, unlabeled training of discriminator finised!')\r\n\r\n\r\ndef get_feed_dict(seqs, labels):\r\n seqs, seqs_len, max_len = pad_sequences(seqs, pad_mark='.')\r\n\r\n labels, _, _ = pad_sequences(labels, pad_mark='O')\r\n return seqs, seqs_len, labels, max_len\r\n\r\n\r\ndef get_feed_dict_for_unlabel(seqs, labels):\r\n seqs, seqs_len, max_len = pad_sequences(seqs, pad_mark='.')\r\n labels, _, _ = pad_sequences(labels, pad_mark='O')\r\n return seqs, seqs_len, labels, max_len\r\n\r\n\r\ndef get_metrics(sess, generator, dev, test_size, batch_size, flag=0):\r\n value_lis = []\r\n medi_lis = []\r\n metric_lis = generator.evaluate_ori(sess, dev, test_size, batch_size, flag=0)\r\n for ele in metric_lis:\r\n value_lis.append(ele.values())\r\n\r\n value_lis_transform = zip(*value_lis)\r\n for ele in value_lis_transform:\r\n transfor_ele = zip(*ele)\r\n for ele in transfor_ele:\r\n medi_lis.append(np.mean(ele))\r\n\r\n return medi_lis\r\n\r\n\r\ndef main():\r\n # if args.mode == 'train'\r\n ap = []\r\n with open('../../../china_medical_char_data_cleaned/vocab.tags.txt', 'r') as fin:\r\n for line in fin:\r\n ap.append(line.strip())\r\n fin.close()\r\n length = len(ap)\r\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.625)\r\n # config = tf.ConfigProto()\r\n # config.gpu_options.allow_growth = True\r\n sess = tf.Session(config=tf.ConfigProto(\r\n # device_count={ \"CPU\": 48 },\r\n # inter_op_parallelism_threads=10,\r\n allow_soft_placement=True,\r\n # intra_op_parallelism_threads=20,\r\n gpu_options=gpu_options))\r\n\r\n generator = Generator_BiLSTM_CRF(0.5, 1, batch_size, params, filter_sizes, num_filters, 0.75, length)\r\n generator.build_graph()\r\n\r\n tvars = tf.trainable_variables()\r\n (assignment_map,\r\n initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(\r\n tvars, init_checkpoint)\r\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\r\n # 最后初始化变量\r\n # sess.run(tf.global_variables_initializer())\r\n\r\n sess.run(generator.init_op)\r\n sess.run(generator.table_op)\r\n sess.run(generator.init_op_1)\r\n saver = tf.train.Saver(tf.global_variables())\r\n\r\n tf.logging.info(\"**** Trainable Variables ****\")\r\n for var in tvars:\r\n init_string = \"\"\r\n if var.name in initialized_variable_names:\r\n init_string = \", *INIT_FROM_CKPT*\"\r\n print(\" name = %s, shape = %s%s\", var.name, var.shape,\r\n init_string)\r\n\r\n train_path = os.path.join('.', args.train_data, 'train_data1')\r\n train_unlabel_path = os.path.join('.', args.train_data_unlabel, 'train_unlabel')\r\n train_unlabel_path_1 = os.path.join('.', args.train_data_unlabel, 'train_unlabel1')\r\n test_path = os.path.join('.', args.test_data, 'test_data1')\r\n sub_test_path = os.path.join('.', args.sub_test_data, 'sub_test_data')\r\n train_data = read_corpus(train_path)\r\n train_data_unlabel = read_corpus_unlabel(train_unlabel_path)\r\n train_data_unlabel_1 = read_corpus_unlabel(train_unlabel_path_1)\r\n test_data = read_corpus(test_path);\r\n test_size = len(test_data)\r\n sub_test_data = read_corpus(sub_test_path)\r\n\r\n batches_labeled = batch_yield(train_data, batch_size, shuffle=True)\r\n batches_labeled = list(batches_labeled)\r\n # print(len(batches_labeled))\r\n num_batches = (len(train_data) + batch_size - 1) // batch_size\r\n batches_unlabeled = batch_yield_for_unla_da(train_data_unlabel, batch_size, shuffle=True)\r\n batches_unlabeled = list(batches_unlabeled)\r\n # print(len(batches_unlabeled))\r\n batches_labeled_for_dis = batch_yield_for_discri(train_data, batch_size, shuffle=True)\r\n batches_labeled_for_dis = list(batches_labeled_for_dis)\r\n batches_unlabeled_for_dis = batch_yield_for_discri_unlabeled(train_data_unlabel, batch_size, shuffle=True)\r\n batches_unlabeled_for_dis = list(batches_unlabeled_for_dis)\r\n dev = batch_yield(test_data, batch_size, shuffle=True)\r\n # num_batches = min(len(batches_labeled),len(batches_unlabeled))\r\n num_batches_unlabel = (len(train_data_unlabel) + batch_size - 1) // batch_size\r\n num_batches_1 = min(len(batches_labeled_for_dis), len(batches_unlabeled_for_dis))\r\n index = 0\r\n if args.mode == 'train':\r\n for epoch_total in range(30):\r\n\r\n print('epoch_total and index are {} and {}'.format(epoch_total+1, index))\r\n medi_lis = get_metrics(sess, generator, dev, test_size, batch_size, flag=0)\r\n\r\n for ele in medi_lis:\r\n print('实体识别的', ele)\r\n print('the whole epoch training accuracy finished!!!!!!!!!!!!')\r\n\r\n for i, (words, labels) in enumerate(batches_labeled):\r\n run_one_epoch(sess, words, labels, tags=[], dev=test_data, epoch=epoch_total, gen=generator,\r\n num_batches=num_batches, batch=i, label=0, it=0, iteration=0, saver=saver)\r\n\r\n dev1 = batch_yield(test_data, batch_size, shuffle=True)\r\n\r\n medi_lis_from_cross_entropy_training = get_metrics(sess, generator, dev1, test_size, batch_size, flag=0)\r\n\r\n for ele in medi_lis_from_cross_entropy_training:\r\n print('第一次', ele)\r\n\r\n print('the accuray after cross entropy training finished!!!!!!!!!!!!!!!!!!1')\r\n\r\n # if epoch_total > 3:\r\n # # batches_labeled_for_dis = batches_labeled_for_dis[0: len(batches_labeled_for_dis)-5]\r\n # batch_dis_for_label = len(batches_labeled_for_dis)\r\n # batch_dis_for_unlabel = len(batches_unlabeled_for_dis)\r\n # for (ele, ele2) in zip(enumerate(batches_labeled_for_dis), enumerate(batches_unlabeled_for_dis)):\r\n # index += 1\r\n # # if index > 70:\r\n # # break\r\n # run_one_epoch(sess, ele[1][0], ele[1][1], ele[1][2], dev=test_data, epoch=epoch_total,\r\n # gen=generator,\r\n # num_batches=batch_dis_for_label, batch=index, label=2, it=0, iteration=0, saver=saver)\r\n # run_one_epoch(sess, ele2[1][0], ele2[1][1], ele2[1][2], dev=test_data, epoch=epoch_total,\r\n # gen=generator,\r\n # num_batches=batch_dis_for_unlabel, batch=index, label=3, it=0, iteration=0,\r\n # saver=saver)\r\n # index = 0\r\n #\r\n # print('the whole dis phaseI finished')\r\n # # index += 1\r\n # for it in range(5):\r\n # for i, (words, labels, tags) in enumerate(batches_unlabeled):\r\n # # print(i)\r\n # run_one_epoch(sess, words, labels, tags=tags, dev=test_data, epoch=epoch_total, gen=generator,\r\n # num_batches=num_batches_unlabel, batch=i, label=1, it=it, iteration=i,\r\n # saver=saver)\r\n #\r\n # dev2 = batch_yield(test_data, batch_size, shuffle=True)\r\n #\r\n # medi_lis_from_adversarial_training = get_metrics(sess, generator, dev2, test_size, batch_size, flag=0)\r\n #\r\n # for ele in medi_lis_from_adversarial_training:\r\n # print('第二次打印', ele)\r\n #\r\n # print('the accuracy after adversarial training of generator finised!!!!!!!!!!!!!!')\r\n #\r\n # print('epoch {} finished!'.format(epoch_total))\r\n\r\n if args.mode == 'test':\r\n sub_dev = batch_yield_for_discri_unlabeled(sub_test_data, batch_size, shuffle=True)\r\n # print(list(sub_dev))\r\n ckpt_file = tf.train.latest_checkpoint(model_path)\r\n\r\n generator = Generator_BiLSTM_CRF(0.5, batch_size, params, filter_sizes, num_filters, 0.75, length,\r\n is_training=False)\r\n generator.build_graph()\r\n generator.test(sess, sub_dev, test_size, 20)\r\n\r\n\r\nif __name__ == '__main__':\r\n # if args.mode == 'train':\r\n main()\r\n\r\n","sub_path":"model/bi_crf_gan/word2vec_300_clean/china_main_base.py","file_name":"china_main_base.py","file_ext":"py","file_size_in_byte":13672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"354170583","text":"\"\"\"\nUsage: strlength.py --choice=N --list=LIST TEXT...\n\nArguments:\n TEXT The string for calculating length.\n LIST The list for calculating length.\n\nOptions:\n -h, --help Show this message.\n\n --choice=N Choice to use built-in functions or not.\n\n --list=LIST... The list for calculating length.\n\"\"\"\n\n\nimport docopt\nimport logging\nimport re\nimport sys\n\n\n\"\"\"\nTo find length of a string and list using built-in functions and\nwithout using built-in functions.\n\"\"\"\n\n\nCHOICE_FOR_WITH_BUILT_IN = 1\nCHOICE_FOR_WITHOUT_BUILT_IN = 2\nVALID_CHOICES = [CHOICE_FOR_WITH_BUILT_IN, CHOICE_FOR_WITHOUT_BUILT_IN]\n\n\n# Setting up logger\nlogging.basicConfig(filename='strlength.log', level=logging.INFO,\n format='%(levelname)s: %(name)s : '\n '%(asctime)s:\\t %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\nlogger = logging.getLogger(__name__)\n\nch = logging.StreamHandler()\nformatter = logging.Formatter('%(levelname)s: %(name)s : '\n '%(asctime)s:\\t %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\nch.setFormatter(formatter)\n\n\nclass Length(object):\n \"\"\"\n Class to calcute length of string and list.\n \"\"\"\n\n def __init__(self):\n self.result = []\n\n def built_in(self, input):\n \"\"\"\n Method to calculate length of string and list using built-in methods.\n \"\"\"\n try:\n return len(input)\n except TypeError:\n raise Exception('Input not in Expected Format.')\n\n def without_built_in(self, input):\n \"\"\"\n Method to calculate length of string, list using user-defined methods.\n \"\"\"\n input_length = 0\n try:\n for character in input:\n input_length = input_length + 1\n return input_length\n except TypeError:\n logger.error('Wrong input...Not a valid Integer Number...\\n')\n raise Exception('Input not in Expected Format.')\n\n\ndef is_valid_choice(choice):\n \"\"\"\n Check if choice provided is valid or not.\n \"\"\"\n return (choice in VALID_CHOICES)\n\n\nif __name__ == '__main__':\n\n logger.info('Started Program Execution...')\n logger.addHandler(ch)\n\n try:\n arguments = docopt.docopt(__doc__)\n\n # Take choice to use Built-In Functions or not.\n try:\n choice = int(arguments['--choice'])\n except ValueError:\n logger.error(\"Choice not a valid number.\\n\")\n sys.exit(0)\n\n # To take string & list.\n string = ' '.join(arguments['TEXT'])\n input_string = str(arguments['--list'])\n input_list = filter(None, re.split(\"[\\[\\], ;']+\", input_string))\n\n length_obj = Length()\n\n if is_valid_choice(choice):\n\n if choice == CHOICE_FOR_WITH_BUILT_IN:\n list_length = length_obj.built_in(input_list)\n string_length = length_obj.built_in(string)\n\n elif choice == CHOICE_FOR_WITHOUT_BUILT_IN:\n list_length = length_obj.without_built_in(input_list)\n string_length = length_obj.without_built_in(string)\n\n else:\n logger.error('Invalid choice provided...\\n')\n sys.exit(0)\n\n logger.info(\"Length of string is: {}\".format(string_length))\n logger.info(\"Length of list is: {}\\n\".format(list_length))\n\n except docopt.DocoptExit as e:\n logger.exception(e.message)\n","sub_path":"strlength.py","file_name":"strlength.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"247430682","text":"#!/usr/bin/env python3\n# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nsys.path.insert(0, os.getcwd())\n\nimport argparse\nimport json\n\n__doc__ = \"\"\"\nGiven a path to a config.json, this script will auto-format the JSON file and save it to the original path specified.\n\"\"\"\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\n \"config_fpath\",\n help=\"Path to config.json\"\n )\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n with open(args.config_fpath) as f:\n d = json.load(f)\n\n with open(args.config_fpath, 'w') as f:\n json.dump(d, f, indent=4, sort_keys=True)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"closed/LTechKorea/scripts/config_validation/autoformat_configs.py","file_name":"autoformat_configs.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"224927560","text":"import boto3\nimport pandas as pd\nimport numpy as np\nimport io\nimport os\nfrom tqdm import tqdm\n\n\ndef get_datelist():\n s3 = boto3.resource('s3')\n contents = s3.meta.client.list_objects(Bucket='schperics.stock')['Contents']\n date_list = []\n for c in contents:\n if 'csv_dp_tick' in c['Key'] and 'csv' in c['Key']:\n date = os.path.basename(c['Key']).split('.')[0]\n date_list.append(date)\n date_list = np.unique(date_list)\n return date_list\n\n\ndef load_data(codes, date='2019-05-10'):\n s3 = boto3.resource('s3')\n key_name = 'csv_dp_tick/{}.csv'.format(date)\n obj = s3.meta.client.get_object(Bucket='schperics.stock', Key=key_name)\n try:\n df = pd.read_csv(io.BytesIO(obj['Body'].read()))\n except Exception as e:\n print(e)\n return pd.DataFrame([], columns=['shcode', 'chetime', 'price', 'sign', 'volume'])\n \n df['date_time'] = df['chetime'].apply(lambda x: '{}{:06d}'.format(date, x))\n df['date_time'] = pd.to_datetime(df['date_time'], format='%Y-%m-%d%H%M%S')\n df = df.set_index('date_time')\n \n df = df[(90000<=df['chetime']) & (df['chetime']<=153000)]\n df = df[['shcode', 'chetime', 'price', 'sign', 'cvolume']]\n df.rename(index = {'cvolume':'volume'})\n if type(codes) != type([]):\n return df[df['shcode']==codes]\n res_df = None\n for code in codes:\n if res_df is None:\n res_df = df[df['shcode']==code]\n else:\n res_df = res_df.append(df[df['shcode']==code])\n return res_df\n\n\ndef load_all_data(codes):\n date_list = get_datelist()\n res_df = None\n for cur_date in tqdm(date_list, ncols=80):\n df = load_data(codes, cur_date)\n if res_df is None:\n res_df = df\n else:\n res_df = res_df.append(df)\n return res_df.reset_index(drop=True)\n\n","sub_path":"mlfinlab/mlfinlab/datasets/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"649999647","text":"from .models.Form import FormularioPessoa, FormularioEditarDepartamento, FormularioDeletarPessoa\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom .models.Pessoa import Pessoa, Departamento\nfrom django.template import loader\n\n\ndef pessoa(request, idpessoa):\n p = Pessoa.objects.get(pk=idpessoa)\n dados = {\"pessoa\": p}\n return render(request, \"pessoa/detalhar.html\", dados)\n\n\ndef lista_pessoas(request):\n # Aqui é o modelo\n lista_p = Pessoa.objects.obter_pessoas_adultas()\n dados = {\"listapessoas\": lista_p}\n\n # Aqui é o template\n template = loader.get_template(\"pessoa/listar.html\")\n return HttpResponse(template.render(dados, request))\n\n\ndef create(request, nome, sobrenome, idade, escolaridade, dpto_descricao):\n novoDepartamento = Departamento(\n sigla=dpto_descricao[0].upper(), descricao=dpto_descricao.upper()\n )\n novoDepartamento.save()\n novaPessoa = Pessoa(\n nome=nome,\n sobrenome=sobrenome,\n idade=idade,\n escolaridade=escolaridade,\n depto_chefia_id=novoDepartamento.id,\n )\n novaPessoa.depto_atual_id = novoDepartamento.id\n novaPessoa.save()\n\n\ndef update(request, nome, dpto_descricao):\n novoDepartamento = Departamento(\n sigla=dpto_descricao[0].upper(), descricao=dpto_descricao.upper()\n )\n novoDepartamento.save()\n pessoa = Pessoa.objects.get(nome=nome)\n pessoa.depto_atual_id = novoDepartamento.id\n pessoa.save()\n\n\ndef delete(request, nome):\n Pessoa.objects.get(nome=nome).delete()\n\n\ndef createPessoa(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = FormularioPessoa(request.POST)\n # check whether it's valid:\n if form.is_valid():\n\n nome = form.cleaned_data['nome']\n sobrenome = form.cleaned_data['sobrenome']\n idade = form.cleaned_data['idade']\n escolaridade = form.cleaned_data['escolaridade']\n depto = form.cleaned_data['depto']\n\n create(request, nome, sobrenome, idade, escolaridade, depto)\n\n #return JsonResponse({\"pessoa_criada\": form.cleaned_data})\n return render(request, \"pessoa/sucesso.html\", {'form': form})\n\n # if a GET (or any other method) we'll create a blank form\n else:\n form = FormularioPessoa()\n\n return render(request, \"pessoa/home.html\", {'form': form})\n\n\ndef editPessoa(request):\n if request.method == 'POST':\n form = FormularioEditarDepartamento(request.POST)\n\n if form.is_valid():\n\n nome = form.cleaned_data['nome']\n novo_departamento = form.cleaned_data['novo_departamento']\n\n update(request, nome, novo_departamento)\n\n #return JsonResponse({\"pessoa_editada\": form.cleaned_data})\n return render(request, \"pessoa/sucesso.html\", {'form': form})\n\n else:\n form = FormularioEditarDepartamento()\n\n return render(request, \"pessoa/editarDepartamento.html\", {'form': form})\n\n\ndef deletePessoa(request):\n if request.method == 'POST':\n form = FormularioDeletarPessoa(request.POST)\n\n if form.is_valid():\n\n nome = form.cleaned_data['nome']\n\n delete(request, nome)\n\n #return JsonResponse({\"pessoa_excluída\": form.cleaned_data})\n return render(request, \"pessoa/sucesso.html\", {'form': form})\n\n else:\n form = FormularioDeletarPessoa()\n\n return render(request, \"pessoa/deletarPessoa.html\", {'form': form})\n","sub_path":"aplicacao/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"265193154","text":"\"\"\"\n@Project: Energy-Consumption\n@Description: Constants for Sprit monitor data\n@Time:2020/9/8 17:15\n\n\"\"\"\n\nX_COLUMN_NAMES = ['power(kW)', 'quantity(kWh)', 'tire_type', 'city',\n 'motor_way', 'country_roads', 'driving_style',\n 'consumption(kWh/100km)', 'A/C', 'park_heating', 'avg_speed(km/h)']\nY_COLUMN_NAME = ['trip_distance(km)']\nREQUIRE_ENCODED_COLUMNS = ['tire_type', 'driving_style']\n\nDEEP_MLP = 'DeepMLP'\nRF = 'RandomForest'\nMLP = 'MLP'\nADA_BOOST = 'AdaBoost'\n\nMODEL_SAVED_PATH = '../ml_models'\n","sub_path":"utils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"411478853","text":"from unit_page import *\r\nfrom base_action import SubjectMixiner\r\nfrom resource_query import ResourceQuery\r\n\r\nclass unit_resource(UnitBasePage, SubjectMixiner):\r\n def __init__(self):\r\n UnitBasePage.__init__(self)\r\n \r\n def execute(self):\r\n \r\n self.unit = self.getUnit()\r\n if self.unit == None:\r\n self.addActionError(u\"您所访问的机构不存在!\")\r\n return self.ERROR \r\n if self.unit.delState == True:\r\n self.addActionError(u\"您所访问的机构已经被删除!\")\r\n return self.ERROR\r\n \r\n self.get_resource_list()\r\n \r\n #res_cate = __jitar__.categoryService.getCategoryTree(\"resource\")\r\n #request.setAttribute(\"res_cate\", res_cate)\r\n self.get_cate_tree_without_cache()\r\n request.setAttribute(\"head_nav\", \"unit_resource\")\r\n request.setAttribute(\"unit\", self.unit) \r\n self.putGradeList()\r\n self.putSubjectList()\r\n self.putResouceCateList()\r\n templateName = \"template1\"\r\n if self.unit.templateName != None:\r\n templateName = self.unit.templateName\r\n return \"/WEB-INF/unitspage/\" + templateName + \"/unit_resource.ftl\"\r\n \r\n def get_resource_list(self):\r\n qry = ResourceQuery(\"\"\" r.resourceId, r.href, r.title, r.fsize, r.createDate, r.recommendState, \r\n u.loginName, u.nickName, r.subjectId as subjectId, grad.gradeName, sc.name as scName \"\"\")\r\n #qry.unitId = self.unit.unitId\r\n type = self.params.getStringParam(\"type\")\r\n if type == None or type == \"\": type = \"new\"\r\n list_type = \"\"\r\n if type == \"hot\":\r\n qry.orderType = ResourceQuery.ORDER_TYPE_VIEWCOUNT_DESC\r\n qry.custormAndWhereClause = \" r.approvedPathInfo Like '%/\" + str(self.unit.unitId) + \"/%'\"\r\n list_type = u\"最高人气\"\r\n elif type == \"rcmd\":\r\n #qry.recommendState = True\r\n #qry.rcmdState = True\r\n qry.custormAndWhereClause = \" r.approvedPathInfo Like '%/\" + str(self.unit.unitId) + \"/%' And r.rcmdPathInfo Like '%/\" + str(self.unit.unitId) + \"/%'\"\r\n list_type = u\"编辑推荐\"\r\n elif type == \"cmt\":\r\n qry.orderType = ResourceQuery.ORDER_TYPE_COMMENTCOUNT_DESC\r\n qry.custormAndWhereClause = \" r.approvedPathInfo Like '%/\" + str(self.unit.unitId) + \"/%'\"\r\n list_type = u\"评论最多\"\r\n else:\r\n type = \"new\"\r\n qry.custormAndWhereClause = \" r.approvedPathInfo Like '%/\" + str(self.unit.unitId) + \"/%'\"\r\n list_type = u\"最新资源\" \r\n request.setAttribute(\"type\", type)\r\n request.setAttribute(\"list_type\", list_type)\r\n \r\n qry.gradelevel = self.params.getIntParamZeroAsNull(\"level\")\r\n qry.subjectId = self.params.getIntParamZeroAsNull(\"subjectId\")\r\n qry.sysCateId = self.params.getIntParamZeroAsNull(\"categoryId\")\r\n qry.gradeId = self.params.getIntParamZeroAsNull(\"gradeId\")\r\n qry.k = self.params.getStringParam(\"k\")\r\n \r\n pager = self.createPager()\r\n \r\n pager.totalRows = qry.count()\r\n resource_list = qry.query_map(pager)\r\n \r\n request.setAttribute(\"resource_list\", resource_list)\r\n request.setAttribute(\"pager\", pager)\r\n request.setAttribute(\"subjectId\", qry.subjectId)\r\n request.setAttribute(\"categoryId\", qry.sysCateId)\r\n \r\n def get_cate_tree_without_cache(self): \r\n self.sbj_svc = __jitar__.subjectService\r\n type = self.params.getStringParam(\"type\")\r\n if type == None or type == \"\": type = \"new\"\r\n outHtml = \"\"\r\n subject_list = self.sbj_svc.getMetaSubjectList()\r\n for s in subject_list:\r\n msid = s.getMsubjId()\r\n outHtml = outHtml + \"d.add(\" + str(msid) + \",0,'\" + s.getMsubjName() + \"','unit_resource.py?type=\" + type + \"&subjectId=\" + str(msid) + \"&unitId=\" + str(self.unit.unitId) + \"');\"\r\n gradeIdList = self.sbj_svc.getMetaGradeListByMetaSubjectId(msid)\r\n if gradeIdList != None:\r\n for gid in gradeIdList:\r\n outHtml = outHtml + \"d.add(\" + str(msid) + str(gid.getGradeId()) + \",\" + str(msid) + \",'\" + gid.getGradeName() + \"','unit_resource.py?type=\" + type + \"&subjectId=\" + str(msid) + \"&gradeId=\" + str(gid.getGradeId()) + \"&target=child&unitId=\" + str(self.unit.unitId) + \"');\"\r\n gradeLevelList = self.sbj_svc.getGradeLevelListByGradeId(gid.getGradeId())\r\n for glevel in gradeLevelList:\r\n outHtml = outHtml + \"d.add(\" + str(msid) + str(gid.getGradeId()) + str(glevel.getGradeId()) + \",\" + str(msid) + str(gid.getGradeId()) + \",'\" + glevel.getGradeName() + \"','unit_resource.py?type=\" + type + \"&subjectId=\" + str(msid) + \"&gradeId=\" + str(glevel.getGradeId()) + \"&level=1&unitId=\" + str(self.unit.unitId) + \"');\" \r\n \r\n request.setAttribute(\"outHtml\", outHtml)\r\n \r\n def get_cate_tree(self):\r\n #下面的带缓存的版本有bug,没有过滤机构\r\n cache = __jitar__.cacheProvider.getCache('category')\r\n self.sbj_svc = __jitar__.subjectService\r\n type = self.params.getStringParam(\"type\")\r\n if type == None or type == \"\": type = \"new\"\r\n outHtml = cache.get(type + \"_outHtml_resource\")\r\n if outHtml == None or outHtml == \"\": \r\n cache_key = \"_subject_list_resource\"\r\n subject_list = cache.get(cache_key)\r\n if subject_list == None:\r\n subject_list = self.sbj_svc.getMetaSubjectList()\r\n cache.put(cache_key, subject_list)\r\n outHtml = \"\"\r\n for s in subject_list:\r\n msid = s.getMsubjId()\r\n outHtml = outHtml + \"d.add(\" + str(msid) + \",0,'\" + s.getMsubjName() + \"','unit_resource.py?type=\" + type + \"&subjectId=\" + str(msid) + \"&unitId=\" + str(self.unit.unitId) + \"');\"\r\n cache_key = \"_gradeIdList_resource\" + str(msid)\r\n gradeIdList = cache.get(cache_key)\r\n if gradeIdList == None:\r\n gradeIdList = self.sbj_svc.getMetaGradeListByMetaSubjectId(msid)\r\n cache.put(cache_key, gradeIdList) \r\n \r\n if gradeIdList != None:\r\n for gid in gradeIdList: \r\n outHtml = outHtml + \"d.add(\" + str(msid) + str(gid.getGradeId()) + \",\" + str(msid) + \",'\" + gid.getGradeName() + \"','unit_resource.py?type=\" + type + \"&subjectId=\" + str(msid) + \"&gradeId=\" + str(gid.getGradeId()) + \"&target=child&unitId=\" + str(self.unit.unitId) + \"');\"\r\n cache_key = \"_gradeLevelList_resource\" + str(gid.getGradeId())\r\n gradeLevelList = cache.get(cache_key)\r\n if gradeLevelList == None:\r\n gradeLevelList = self.sbj_svc.getGradeLevelListByGradeId(gid.getGradeId())\r\n cache.put(cache_key, gradeLevelList) \r\n for glevel in gradeLevelList:\r\n outHtml = outHtml + \"d.add(\" + str(msid) + str(gid.getGradeId()) + str(glevel.getGradeId()) + \",\" + str(msid) + str(gid.getGradeId()) + \",'\" + glevel.getGradeName() + \"','unit_resource.py?type=\" + type + \"&subjectId=\" + str(msid) + \"&gradeId=\" + str(glevel.getGradeId()) + \"&level=1&unitId=\" + str(self.unit.unitId) + \"');\"\r\n cache.put(type + \"_outHtml_resource\", outHtml)\r\n \r\n \r\n request.setAttribute(\"outHtml\", outHtml) \r\n \r\n def createPager(self):\r\n pager = self.params.createPager()\r\n pager.itemName = u\"资源\"\r\n pager.itemUnit = u\"个\"\r\n pager.pageSize = 20\r\n return pager\r\n","sub_path":"WebContent/WEB-INF/program/unit/unit_resource.py","file_name":"unit_resource.py","file_ext":"py","file_size_in_byte":7737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"429376248","text":"\"\"\"\nCopyright 2017-2018 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom __future__ import division\nimport keras\nimport numpy as np\nimport cv2\nfrom PIL import Image\nimport imageio\nfrom scipy.signal import convolve2d\nfrom skimage.filters.rank import entropy\nfrom skimage.morphology import square\n\nfrom .transform import change_transform_origin\n\ndef gaussian_pyramid(image, kernel, levels):\n \"\"\"\n A function to create a Gaussian pyramid of a defined number of levels and from a chosen kernel.\n\n :param image: The image we want to use of dimension (N,M,3) or (M,N)\n :param kernel: The Gaussian kernel of dimention (k,k)\n :param levels: The desired number of levels in the Gaussian pyramid, an integer\n :return: The Gaussian pyramid, a list of numpy arrays\n \"\"\"\n\n if len(np.shape(image)) == 3:\n gauss_l_r = image[:, :, 0]\n gauss_l_g = image[:, :, 1]\n gauss_l_b = image[:, :, 2]\n gauss_l = image\n pyramid = [gauss_l]\n for l in range(levels):\n if len(np.shape(image)) == 3:\n # channels last format\n gauss_l_r = downsample(gauss_l_r, kernel)\n gauss_l_g = downsample(gauss_l_g, kernel)\n gauss_l_b = downsample(gauss_l_b, kernel)\n gauss_l = np.zeros((gauss_l_b.shape[0], gauss_l_b.shape[1], 3))\n gauss_l[:, :, 0] = gauss_l_r\n gauss_l[:, :, 1] = gauss_l_g\n gauss_l[:, :, 2] = gauss_l_b\n else:\n gauss_l = downsample(gauss_l, kernel)\n pyramid.append(gauss_l)\n return pyramid\n\ndef laplacian_pyramid(image, kernel, levels):\n \"\"\"\n A function to create a Laplacian pyramid of a defined number of levels and from a chosen kernel.\n\n :param image: The image we want to use of dimension (N,M,3) or (M,N)\n :param kernel: The Gaussian kernel of dimention (k,k)\n :param levels: The desired number of levels in the Laplacian pyramid, an integer\n :return: The Laplacian pyramid, a list of numpy arrays\n \"\"\"\n\n gauss = gaussian_pyramid(image, kernel, levels)\n pyramid = []\n for l in range(len(gauss) - 2, -1, -1):\n if len(np.shape(image)) == 3:\n # channels last format\n gauss_l1r = upsample(gauss[l+1][:, :, 0])\n gauss_l1g = upsample(gauss[l+1][:, :, 1])\n gauss_l1b = upsample(gauss[l+1][:, :, 2])\n if gauss_l1r.shape[0] > gauss[l][:, :, 0].shape[0]:\n gauss_l1r = np.delete(gauss_l1r, -1, axis=0)\n gauss_l1g = np.delete(gauss_l1g, -1, axis=0)\n gauss_l1b = np.delete(gauss_l1b, -1, axis=0)\n if gauss_l1r.shape[1] > gauss[l][:, :, 0].shape[1]:\n gauss_l1r = np.delete(gauss_l1r, -1, axis=1)\n gauss_l1g = np.delete(gauss_l1g, -1, axis=1)\n gauss_l1b = np.delete(gauss_l1b, -1, axis=1)\n lap_l_r = gauss[l][:, :, 0] - gauss_l1r\n lap_l_g = gauss[l][:, :, 1] - gauss_l1g\n lap_l_b = gauss[l][:, :, 2] - gauss_l1b\n lap_l = np.zeros((lap_l_r.shape[0], lap_l_r.shape[1], 3))\n lap_l[:, :, 0] = lap_l_r\n lap_l[:, :, 1] = lap_l_g\n lap_l[:, :, 2] = lap_l_b\n else:\n gauss_l1 = upsample(gauss[l+1])\n if gauss_l1.shape[0] > gauss[l].shape[0]:\n gauss_l1 = np.delete(gauss_l1, -1, axis=0)\n if gauss_l1.shape[1] > gauss[l].shape[1]:\n gauss_l1 = np.delete(gauss_l1, -1, axis=1)\n lap_l = gauss[l] - gauss_l1\n pyramid.append(lap_l)\n return pyramid\n\ndef fused_laplacian_pyramid(gauss_pyramid_mod1, gauss_pyramid_mod2, lap_pyramid_mod1, lap_pyramid_mod2):\n \"\"\"\n A funtion that builds a fused Laplacian pyramid of two modalities of the same image\n\n :param gauss_pyramid_mod1: The Gaussian pyramid of modality 1, a list of grayscale images, the first one in highest resolution\n :param gauss_pyramid_mod2: The Gaussian pyramid of modality 2, a list of grayscale images, the first one in highest resolution\n :param lap_pyramid_mod1: The Laplacian pyramid of modality 1, a list of grayscale images, the last one in highest resolution\n :param lap_pyramid_mod2: The Laplacian pyramid of modality 2, a list of grayscale images, the last one in highest resolution\n :return: The fused Laplacian pyramid of two modalities, a list of grayscale images, the last one in highest resolution,\n \"\"\"\n\n fused_laplacian = []\n len_lap = len(lap_pyramid_mod1)\n for l in range(len_lap):\n fused_laplacian_temp = gauss_pyramid_mod1[len_lap-l-1]*lap_pyramid_mod1[l] + gauss_pyramid_mod2[len_lap-l-1]*lap_pyramid_mod2[l]\n fused_laplacian.append(fused_laplacian_temp)\n return fused_laplacian\n\ndef collapse_pyramid(lap_pyramid, gauss_pyramid):\n \"\"\"\n A function to collapse a Laplacian pyramid in order to recover the enhanced image\n\n :param lap_pyramid: A Laplacian pyramid, a list of grayscale images, the last one in highest resolution\n :param gauss_pyramid: A Gaussian pyramid, a list of grayscale images, the last one in lowest resolution\n :return: A grayscale image\n \"\"\"\n\n image = lap_pyramid[0]\n if len(np.shape(image)) == 3:\n im_r = upsample(gauss_pyramid[-1][:, :, 0])\n im_g = upsample(gauss_pyramid[-1][:, :, 1])\n im_b = upsample(gauss_pyramid[-1][:, :, 2])\n if im_r.shape[0] > image.shape[0]:\n im_r = np.delete(im_r, -1, axis=0)\n im_g = np.delete(im_g, -1, axis=0)\n im_b = np.delete(im_b, -1, axis=0)\n if im_r.shape[1] > image.shape[1]:\n im_r = np.delete(im_r, -1, axis=1)\n im_g = np.delete(im_g, -1, axis=1)\n im_b = np.delete(im_b, -1, axis=1)\n gauss = np.zeros((im_r.shape[0], im_r.shape[1], 3))\n gauss[:, :, 0] = im_r\n gauss[:, :, 1] = im_g\n gauss[:, :, 2] = im_b\n else:\n gauss = upsample(gauss_pyramid[-1])\n if gauss.shape[0] > image.shape[0]:\n gauss = np.delete(gauss, -1, axis=0)\n if gauss.shape[1] > image.shape[1]:\n gauss = np.delete(gauss, -1, axis=1)\n image = image + gauss\n for l in range(1,len(lap_pyramid),1):\n if len(np.shape(image)) == 3:\n im_r = upsample(image[:, :, 0])\n im_g = upsample(image[:, :, 1])\n im_b = upsample(image[:, :, 2])\n if im_r.shape[0] > lap_pyramid[l].shape[0]:\n im_r = np.delete(im_r, -1, axis=0)\n im_g = np.delete(im_g, -1, axis=0)\n im_b = np.delete(im_b, -1, axis=0)\n if im_r.shape[1] > lap_pyramid[l].shape[1]:\n im_r = np.delete(im_r, -1, axis=1)\n im_g = np.delete(im_g, -1, axis=1)\n im_b = np.delete(im_b, -1, axis=1)\n pyr_upsampled = np.zeros((im_r.shape[0], im_r.shape[1], 3))\n pyr_upsampled[:, :, 0] = im_r\n pyr_upsampled[:, :, 1] = im_g\n pyr_upsampled[:, :, 2] = im_b\n else:\n pyr_upsampled = upsample(image)\n if pyr_upsampled.shape[0] > lap_pyramid[l].shape[0]:\n pyr_upsampled = np.delete(pyr_upsampled, -1, axis=0)\n if pyr_upsampled.shape[1] > lap_pyramid[l].shape[1]:\n pyr_upsampled = np.delete(pyr_upsampled, -1, axis=1)\n image = lap_pyramid[l] + pyr_upsampled\n return image\n\ndef convolve(image, kernel):\n \"\"\"\n A fonction to perform a 2D convolution operation over an image using a chosen kernel.\n\n :param image: The grayscale image we want to use of dimension (N,M)\n :param kernel: The convolution kernel of dimention (k,k)\n :return: The convolved image of dimension (N,M)\n \"\"\"\n im_out = convolve2d(image, kernel, mode='same', boundary='symm')\n return im_out\n\ndef downsample(image, kernel):\n \"\"\"\n A function to downsample an image.\n\n :param image: The grayscale image we want to use of dimension (N,M)\n :param kernel: The Gaussian blurring kernel of dimention (k,k)\n :return: The downsampled image of dimension (N/factor,M/factor)\n \"\"\"\n blur_image = convolve(image, kernel)\n img_downsampled = blur_image[::2, ::2]\n return img_downsampled\n\ndef upsample(image):\n \"\"\"\n\n :param image: The grayscale image we want to use of dimension (N,M)\n :param factor: The upsampling factor, an integer\n :return: The upsampled image of dimension (N*factor,M*factor)\n \"\"\"\n\n #kernel = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]])/12\n kernel = smooth_gaussian_kernel(0.4)\n\n img_upsampled = np.zeros((image.shape[0]*2, image.shape[1]*2), dtype=np.float64)\n img_upsampled[::2, ::2] = image[:, :]\n img_upsampled = 4 * convolve(img_upsampled, kernel)\n return img_upsampled\n\ndef classical_gaussian_kernel(k, sigma):\n \"\"\"\n A function to generate a classical Gaussian kernel\n\n :param k: The size of the kernel, an integer\n :param sigma: variance of the gaussian distribution\n :return: A Gaussian kernel, a numpy array of shape (k,k)\n \"\"\"\n w = np.linspace(-(k - 1) / 2, (k - 1) / 2, k)\n x, y = np.meshgrid(w, w)\n kernel = 0.5*np.exp(-0.5*(x**2 + y**2)/(sigma**2))/(np.pi*sigma**2)\n return kernel\n\ndef smooth_gaussian_kernel(a):\n \"\"\"\n A 5*5 gaussian kernel to perform smooth filtering.\n\n :param a: the coefficient of the smooth filter. A float usually within [0.3, 0.6]\n :return: A smoothing Gaussian kernel, a numpy array of shape (5,5)\n \"\"\"\n w = np.array([0.25 - a/2.0, 0.25, a, 0.25, 0.25 - a/2.0])\n kernel = np.outer(w, w)\n return kernel\n\ndef normalized_local_entropy(image, window_size):\n \"\"\"\n A fonction that computes the local entropy given an image and a window size\n\n :param image: The grayscale image\n :param window_size: The size of the window that determines the neighbourhood of a pixel, an integer\n :return: The local entropy of the image, a grayscale image\n \"\"\"\n\n local_entropy = entropy(image, square(window_size))\n return local_entropy\n\ndef local_contrast(image, window_size):\n \"\"\"\n A fonction that computes the local contrast given an image and a window size\n\n :param image: The grayscale image\n :param window_size: The size of the window that determines the neighbourhood of a pixel, an integer\n :return: The local contrast of the image, a grayscale image\n \"\"\"\n\n conv_filter = np.ones((window_size,window_size), dtype=int)\n local_mean = convolve(image, conv_filter)/(window_size**2)\n contrast = np.zeros((image.shape[0], image.shape[1]))\n for x in range(image.shape[0]):\n for y in range(image.shape[1]):\n patch = image[max(0, x-int(window_size/2)):min(image.shape[0], x+int(window_size/2)), max(0, y-int(window_size/2)):min(image.shape[1], y+int(window_size/2))]\n patch = np.square(patch - local_mean[x,y])\n contrast[x,y] = np.sqrt(np.sum(patch)/(window_size**2))\n return contrast\n\ndef exposedness(image, sigma=0.2):\n \"\"\"\n A fonction that computes the exposedness\n\n :param image: The grayscale image\n :param sigma: A float, it is recommanded to set this value to 0.2\n :return: The exposedness of the image, a grayscale image.\n \"\"\"\n\n exposedness = np.exp(-np.square(image - 0.5)/(2*sigma**2))\n return exposedness\n\ndef visibility(image, kernel1, kernel2):\n \"\"\"\n A fonction that computes the visibility of an image given an image and two gaussian kernel\n\n :param image: The grayscale image\n :param kernel1: The gaussian kernel to compute the blurred image\n :param kernel2: The gaussian kernel to perform the final step of the visibility\n :return: The visibility, a grayscale image\n \"\"\"\n\n img_blur = convolve(image, kernel1)\n visibility = np.sqrt(convolve(np.square(image - img_blur), kernel2))\n return visibility\n\ndef weight_combination(entropy, contrast, visibility, alpha1, alpha2, alpha3):\n \"\"\"\n Combining the entropy, the contrast and the visibility to build a weight layer\n\n :param entropy: The local entropy of the image, a grayscale image\n :param contrast: The local contrast of the image, a grayscale image\n :param visibility: The visibility of the image, a grayscale image\n :param alpha1: The weight of the local entropy, a float within [0, 1]\n :param alpha2: The weight of the local contrast, a float within [0, 1]\n :param alpha3: The weight of the visibility, a float within [0, 1]\n :return: Weight map of the image, a grayscale image\n \"\"\"\n\n weight = entropy**alpha1 * contrast**alpha2 * visibility**alpha3\n return weight\n\ndef weight_normalization(weight1, weight2):\n \"\"\"\n A function to normalize the weights of each modality so the weights' sum is 1 for each pixel of the image\n\n :param weght1: The weight of madality 1, a grayscale image\n :param weight2: The weight of modality 2, a grayscale image\n :return: Two weights, weight1_normalized and weight2_normalized, respectively the normalized versions of weight1 and weight2, two grayscale images.\n \"\"\"\n\n weight1_normalized = weight1 / (weight1 + weight2)\n weight2_normalized = weight2 / (weight1 + weight2)\n return weight1_normalized, weight2_normalized\n\ndef convert_image_to_floats(image):\n \"\"\"\n A function to convert an image to a numpy array of floats within [0, 1]\n\n :param image: The image to be converted\n :return: The converted image\n \"\"\"\n\n if np.max(image) <= 1.0:\n return image\n else:\n return image / 255.0\n\ndef pyramid_fusion(im_intensities, im_dop):\n im_intensities = cv2.imread(im_intensities)\n im_dop = cv2.imread(im_dop)\n kernel = smooth_gaussian_kernel(0.4)\n levels = 4\n window_size = 5\n\n im_mod1 = convert_image_to_floats(im_intensities[:, :, 2])\n im_mod2 = convert_image_to_floats(im_dop[:, :, 1])\n\n # kernels to compute visibility\n kernel1 = classical_gaussian_kernel(5, 2)\n kernel2 = classical_gaussian_kernel(5, 2)\n\n # Computation of local entropy, local contrast and visibility for value channel\n local_entropy_mod1 = normalized_local_entropy(im_mod1, window_size)\n #local_contrast_mod1 = local_contrast(im_mod1, window_size)\n visibility_mod1 = visibility(im_mod1, kernel1, kernel2)\n exposedness_mod1 = exposedness(im_mod1)\n # Combination of local entropy, local contrast and visibility for value channel\n weight_mod1 = weight_combination(local_entropy_mod1, exposedness_mod1, visibility_mod1, 1, 1, 1)\n\n # Computation of local entropy, local contrast and visibility for value channel\n local_entropy_mod2 = normalized_local_entropy(im_mod2, window_size)\n #local_contrast_mod2 = local_contrast(im_mod2, window_size)\n exposedness_mod2 = exposedness(im_mod2)\n visibility_mod2 = visibility(im_mod2, kernel1, kernel2)\n # Combination of local entropy, local contrast and visibility for value channel\n weight_mod2 = weight_combination(local_entropy_mod2, exposedness_mod2, visibility_mod2, 1, 1, 1)\n\n # Normalising weights of value channel and IR image\n weightN_mod1, weightN_mod2 = weight_normalization(weight_mod1, weight_mod2)\n\n # Creating Gaussian pyramids of the weights maps of respectively the value channel and IR image\n gauss_pyr_mod1_weights = gaussian_pyramid(weightN_mod1, kernel, levels)\n gauss_pyr_mod2_weights = gaussian_pyramid(weightN_mod2, kernel, levels)\n\n # Creating Laplacian pyramids of respectively the value channel and IR image\n lap_pyr_mod1 = laplacian_pyramid(im_mod1, kernel, levels)\n lap_pyr_mod2 = laplacian_pyramid(im_mod2, kernel, levels)\n\n # Creating the fused Laplacian of the two modalities\n lap_pyr_fusion = fused_laplacian_pyramid(gauss_pyr_mod1_weights, gauss_pyr_mod2_weights, lap_pyr_mod1, lap_pyr_mod2)\n\n # Creating the Gaussian pyramid of value channel in order to collapse the fused Laplacian pyramid\n gauss_pyr_mod1 = gaussian_pyramid(im_mod1, kernel, levels)\n collapsed_image = collapse_pyramid(lap_pyr_fusion, gauss_pyr_mod1)\n\n im_intensities[:, :, 2] = collapsed_image\n\n return im_intensities[:, :, ::-1].copy()\n\ndef read_image_entropy(path):\n image = cv2.imread(path)\n window_size = 5\n ent_ch1 = normalized_local_entropy(image[:, :, 0], window_size)\n ent_ch2 = normalized_local_entropy(image[:, :, 1], window_size)\n ent_ch3 = normalized_local_entropy(image[:, :, 2], window_size)\n\n entropy_image = image.copy()\n entropy_image[:, :, 0] = ent_ch1\n entropy_image[:, :, 1] = ent_ch2\n entropy_image[:, :, 2] = ent_ch3\n\n return entropy_image[:, :, ::-1].copy()\n\ndef read_image_bgr(path):\n \"\"\" Read an image in BGR format.\n\n Args\n path: Path to the image.\n \"\"\"\n image = np.asarray(Image.open(path).convert('RGB'))\n return image[:, :, ::-1].copy()\n\ndef read_image_rgba(path):\n \"\"\" Read an image in RGBA format.\n\n Args\n path: Path to the image.\n \"\"\"\n image = cv2.imread(path)\n return image\n\ndef read_image_fusion(path):\n \"\"\"Read every channel of a fusion image.\n\n Args\n path: Path to the image.\n \"\"\"\n image = imageio.imread(path)\n return image[:, :, ::-1].copy()\n\ndef read_matrix_as_image(path):\n \"\"\"Read every channel of a fusion npy matrix.\n\n Args\n path: Path to the image.\n \"\"\"\n image = np.load(path)\n #img = np.zeros((image.shape[0], image.shape[1],6))\n #img[:,:,:5] = image\n #return img[:, :, ::-1].copy()\n return image[:, :, ::-1].copy()\n\ndef read_rgb_and_polar_images(path_rgb, path_polar):\n \"\"\"Read an RGB image and its polarimetric equivalent.\n\n Args\n path_rgb: Path to the RGB image.\n path_polar: Path to the polarimetric image.\n \"\"\"\n image_rgb = cv2.imread(path_rgb)\n image_rgb_rs = cv2.resize(image_rgb, dsize=(500, 500), interpolation=cv2.INTER_LANCZOS4)\n image_polar = cv2.imread(path_polar)\n image = np.zeros((500, 500, 6), dtype=int)\n image[:, :, :3] = image_rgb_rs\n image[:, :, 3:] = image_polar\n return image[:, :, ::-1].copy()\n\ndef read_rgb_and_polar_images_for_fusion(path_polar, path_rgb):\n \"\"\"Read an RGB image and its polarimetric equivalent.\n\n Args\n path_rgb: Path to the RGB image.\n path_polar: Path to the polarimetric image.\n \"\"\"\n \"\"\"image_rgb = cv2.imread(path_rgb)\n image_rgb_rs = cv2.resize(image_rgb, dsize=(500, 500), interpolation=cv2.INTER_LANCZOS4)\n image_polar = cv2.imread(path_polar)\n if np.shape(image_polar)[2] == 3:\n image = np.zeros((500, 500, 7), dtype=int)\n image[:, :, :3] = image_rgb_rs\n image[:, :, 3:6] = image_polar\n image[:, :, 6] = image_polar[:, :, 2]\n elif np.shape(image_polar)[2] == 4:\n image = np.zeros((500, 500, 7), dtype=int)\n image[:, :, :3] = image_rgb_rs\n image[:, :, 3:] = image_polar\n return image[:, :, ::-1].copy()\"\"\"\n image_rgb = cv2.imread(path_rgb)\n image_polar = cv2.imread(path_polar, cv2.IMREAD_UNCHANGED)\n if image_rgb.shape[0] != image_polar.shape[0] and image_rgb.shape[1] != image_polar.shape[1]:\n image_rgb_rs = cv2.resize(image_rgb, dsize=(500, 500), interpolation=cv2.INTER_LANCZOS4)\n else:\n image_rgb_rs = image_rgb\n if np.shape(image_polar)[2] == 3:\n image = np.zeros((image_polar.shape[0], image_polar.shape[1], 7), dtype=int)\n image[:, :, :3] = image_rgb_rs\n image[:, :, 3:6] = image_polar\n image[:, :, 6] = image_polar[:, :, 2]\n elif np.shape(image_polar)[2] == 4:\n image = np.zeros((image_polar.shape[0], image_polar.shape[1], 7), dtype=int)\n image[:, :, :4] = image_polar\n image[:, :, 4:] = image_rgb_rs\n #image[:, :, 5] = np.zeros((image_polar.shape[0], image_polar.shape[1]), dtype=int)\n return image[:, :, ::-1].copy()\n\ndef preprocess_image(x, mode='caffe'):\n \"\"\" Preprocess an image by subtracting the ImageNet mean.\n\n Args\n x: np.array of shape (None, None, 3) or (3, None, None).\n mode: One of \"caffe\" or \"tf\".\n - caffe: will zero-center each color channel with\n respect to the ImageNet dataset, without scaling.\n - tf: will scale pixels between -1 and 1, sample-wise.\n\n Returns\n The input with the ImageNet mean subtracted.\n \"\"\"\n # mostly identical to \"https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py\"\n # except for converting RGB -> BGR since we assume BGR already\n x = x.astype(keras.backend.floatx())\n if mode == 'tf':\n x /= 127.5\n x -= 1.\n elif mode == 'caffe':\n x[..., 0] -= 103.939\n x[..., 1] -= 116.779\n x[..., 2] -= 123.68\n\n return x\n\ndef preprocess_images(x, mode='caffe'):\n \"\"\" Preprocess an image by subtracting the ImageNet mean.\n\n Args\n x: np.array of shape (None, None, 3) or (3, None, None).\n mode: One of \"caffe\" or \"tf\".\n - caffe: will zero-center each color channel with\n respect to the ImageNet dataset, without scaling.\n - tf: will scale pixels between -1 and 1, sample-wise.\n\n Returns\n The input with the ImageNet mean subtracted.\n \"\"\"\n # mostly identical to \"https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py\"\n # except for converting RGB -> BGR since we assume BGR already\n x[0] = x[0].astype(keras.backend.floatx())\n x[1] = x[1].astype(keras.backend.floatx())\n if mode == 'tf':\n x[0] /= 127.5\n x[0] -= 1.\n x[1] /= 127.5\n x[1] -= 1.\n elif mode == 'caffe':\n x[0][..., 0] -= 103.939\n x[0][..., 1] -= 116.779\n x[0][..., 2] -= 123.68\n x[1][..., 0] -= 103.939\n x[1][..., 1] -= 116.779\n x[1][..., 2] -= 123.68\n\n return x\n\ndef adjust_transform_for_image(transform, image, relative_translation):\n \"\"\" Adjust a transformation for a specific image.\n\n The translation of the matrix will be scaled with the size of the image.\n The linear part of the transformation will adjusted so that the origin of the transformation will be at the center of the image.\n \"\"\"\n\n height, width, channels = image.shape\n\n result = transform\n\n # Scale the translation with the image size if specified.\n if relative_translation:\n result[0:2, 2] *= [width, height]\n\n # Move the origin of transformation.\n result = change_transform_origin(transform, (0.5 * width, 0.5 * height))\n\n return result\n\n\nclass TransformParameters:\n \"\"\" Struct holding parameters determining how to apply a transformation to an image.\n\n Args\n fill_mode: One of: 'constant', 'nearest', 'reflect', 'wrap'\n interpolation: One of: 'nearest', 'linear', 'cubic', 'area', 'lanczos4'\n cval: Fill value to use with fill_mode='constant'\n relative_translation: If true (the default), interpret translation as a factor of the image size.\n If false, interpret it as absolute pixels.\n \"\"\"\n def __init__(\n self,\n fill_mode = 'nearest',\n interpolation = 'linear',\n cval = 0,\n relative_translation = True,\n ):\n self.fill_mode = fill_mode\n self.cval = cval\n self.interpolation = interpolation\n self.relative_translation = relative_translation\n\n def cvBorderMode(self):\n if self.fill_mode == 'constant':\n return cv2.BORDER_CONSTANT\n if self.fill_mode == 'nearest':\n return cv2.BORDER_REPLICATE\n if self.fill_mode == 'reflect':\n return cv2.BORDER_REFLECT_101\n if self.fill_mode == 'wrap':\n return cv2.BORDER_WRAP\n\n def cvInterpolation(self):\n if self.interpolation == 'nearest':\n return cv2.INTER_NEAREST\n if self.interpolation == 'linear':\n return cv2.INTER_LINEAR\n if self.interpolation == 'cubic':\n return cv2.INTER_CUBIC\n if self.interpolation == 'area':\n return cv2.INTER_AREA\n if self.interpolation == 'lanczos4':\n return cv2.INTER_LANCZOS4\n\n\ndef apply_transform(matrix, image, params):\n \"\"\"\n Apply a transformation to an image.\n\n The origin of transformation is at the top left corner of the image.\n\n The matrix is interpreted such that a point (x, y) on the original image is moved to transform * (x, y) in the generated image.\n Mathematically speaking, that means that the matrix is a transformation from the transformed image space to the original image space.\n\n Args\n matrix: A homogeneous 3 by 3 matrix holding representing the transformation to apply.\n image: The image to transform.\n params: The transform parameters (see TransformParameters)\n \"\"\"\n output = cv2.warpAffine(\n image,\n matrix[:2, :],\n dsize = (image.shape[1], image.shape[0]),\n flags = params.cvInterpolation(),\n borderMode = params.cvBorderMode(),\n borderValue = params.cval,\n )\n return output\n\n\ndef resize_image(img, min_side=800, max_side=1333):\n \"\"\" Resize an image such that the size is constrained to min_side and max_side.\n\n Args\n min_side: The image's min side will be equal to min_side after resizing.\n max_side: If after resizing the image's max side is above max_side, resize until the max side is equal to max_side.\n\n Returns\n A resized image.\n \"\"\"\n (rows, cols, _) = img.shape\n\n smallest_side = min(rows, cols)\n\n # rescale the image so the smallest side is min_side\n scale = min_side / smallest_side\n\n # check if the largest side is now greater than max_side, which can happen\n # when images have a large aspect ratio\n largest_side = max(rows, cols)\n if largest_side * scale > max_side:\n scale = max_side / largest_side\n\n # resize the image with the computed scale\n img = cv2.resize(img, None, fx=scale, fy=scale)\n\n return img, scale\n\ndef resize_images(img, min_side=800, max_side=1333):\n #min_side = 800, max_side=1333\n \"\"\" Resize an image such that the size is constrained to min_side and max_side.\n\n Args\n min_side: The image's min side will be equal to min_side after resizing.\n max_side: If after resizing the image's max side is above max_side, resize until the max side is equal to max_side.\n\n Returns\n A resized image.\n \"\"\"\n (rows1, cols1, _) = img[0].shape\n (rows2, cols2, _) = img[1].shape\n\n smallest_side1 = min(rows1, cols1)\n smallest_side2 = min(rows2, cols2)\n\n # rescale the image so the smallest side is min_side\n scale1 = min_side / smallest_side1\n scale2 = min_side / smallest_side2\n\n # check if the largest side is now greater than max_side, which can happen\n # when images have a large aspect ratio\n largest_side1 = max(rows1, cols1)\n largest_side2 = max(rows2, cols2)\n if largest_side1 * scale1 > max_side:\n scale1 = max_side / largest_side1\n if largest_side2 * scale2 > max_side:\n scale2 = max_side / largest_side2\n\n # resize the image with the computed scale\n img[0] = cv2.resize(img[0], None, fx=scale1, fy=scale1)\n img[1] = cv2.resize(img[1], None, fx=scale2, fy=scale2)\n\n return img, [scale1, scale2]\n","sub_path":"keras_retinanet/utils/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":27753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"474720762","text":"# coding=utf-8\n# ------------------------------------\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n# ------------------------------------\n\n\"\"\"\nFILE: sample_import_train_deploy_project_async.py\n\nDESCRIPTION:\n This sample demonstrates how to import a project.\n\nUSAGE:\n python sample_import_train_deploy_project_async.py\n\n Set the environment variables with your own values before running the sample:\n 1) AZURE_CONVERSATIONS_ENDPOINT - endpoint for your CLU resource.\n 2) AZURE_CONVERSATIONS_KEY - API key for your CLU resource.\n 3) AZURE_CONVERSATIONS_PROJECT_NAME - project name for your CLU conversations project.\n\"\"\"\n\nimport asyncio\n\nasync def sample_import_project():\n import os\n from azure.core.credentials import AzureKeyCredential\n from azure.ai.language.conversations.authoring.aio import ConversationAuthoringClient\n\n clu_endpoint = os.environ[\"AZURE_CONVERSATIONS_ENDPOINT\"]\n clu_key = os.environ[\"AZURE_CONVERSATIONS_KEY\"]\n\n project_name = \"test_project\"\n\n exported_project_assets = {\n \"projectKind\": \"Conversation\",\n \"intents\": [{\"category\": \"Read\"}, {\"category\": \"Delete\"}],\n \"entities\": [{\"category\": \"Sender\"}],\n \"utterances\": [\n {\n \"text\": \"Open Blake's email\",\n \"dataset\": \"Train\",\n \"intent\": \"Read\",\n \"entities\": [{\"category\": \"Sender\", \"offset\": 5, \"length\": 5}],\n },\n {\n \"text\": \"Delete last email\",\n \"language\": \"en-gb\",\n \"dataset\": \"Test\",\n \"intent\": \"Delete\",\n \"entities\": [],\n },\n ],\n }\n\n client = ConversationAuthoringClient(\n clu_endpoint, AzureKeyCredential(clu_key)\n )\n poller = await client.begin_import_project(\n project_name=project_name,\n project={\n \"assets\": exported_project_assets,\n \"metadata\": {\n \"projectKind\": \"Conversation\",\n \"settings\": {\"confidenceThreshold\": 0.7},\n \"projectName\": \"EmailApp\",\n \"multilingual\": True,\n \"description\": \"Trying out CLU\",\n \"language\": \"en-us\",\n },\n \"projectFileVersion\": \"2022-05-01\",\n },\n )\n response = await poller.result()\n print(response)\n\n\nasync def sample_train_model():\n import os\n from azure.core.credentials import AzureKeyCredential\n from azure.ai.language.conversations.authoring.aio import ConversationAuthoringClient\n\n clu_endpoint = os.environ[\"AZURE_CONVERSATIONS_ENDPOINT\"]\n clu_key = os.environ[\"AZURE_CONVERSATIONS_KEY\"]\n\n project_name = \"test_project\"\n\n client = ConversationAuthoringClient(\n clu_endpoint, AzureKeyCredential(clu_key)\n )\n\n poller = await client.begin_train(\n project_name=project_name,\n configuration={\"modelLabel\": \"sample\", \"trainingMode\": \"standard\"},\n )\n\n response = await poller.result()\n print(response)\n\n\nasync def sample_deploy_model():\n import os\n from azure.core.credentials import AzureKeyCredential\n from azure.ai.language.conversations.authoring.aio import ConversationAuthoringClient\n\n clu_endpoint = os.environ[\"AZURE_CONVERSATIONS_ENDPOINT\"]\n clu_key = os.environ[\"AZURE_CONVERSATIONS_KEY\"]\n\n project_name = \"test_project\"\n deployment_name = \"production\"\n\n client = ConversationAuthoringClient(\n clu_endpoint, AzureKeyCredential(clu_key)\n )\n\n poller = await client.begin_deploy_project(\n project_name=project_name,\n deployment_name=deployment_name,\n deployment={\"trainedModelLabel\": \"sample\"},\n )\n response = await poller.result()\n print(response)\n\n\nasync def main():\n await sample_import_project()\n await sample_train_model()\n await sample_deploy_model()\n\nif __name__ == '__main__':\n asyncio.run(main())\n","sub_path":"sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/authoring/sample_import_train_deploy_project_async.py","file_name":"sample_import_train_deploy_project_async.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"96264030","text":"import tkinter\nokno = tkinter.Tk()\nokno.title('Názov okna')\nokno.config(width=500, height=100)\n\nCanvas.create_*()\n\npopisVstupu = \\\ntkinter.Label(okno, text='zadaj meno: ')\n\n\nvstupnaHodnota = tkinter.StringVar()\nvystupnaHodnota = tkinter.StringVar()\n\nvstup = \\\ntkinter.Entry(okno, textvariable=vstupnaHodnota)\nvystup = \\\ntkinter.Entry(okno, textvariable=vystupnaHodnota)\n\ntlacidlo = tkinter.Button\\\n(okno, text='ohviezdickuj', command=lambda: premenuj(vstupnaHodnota.get()))\n\nkoniec = tkinter.Button\\\n(okno, text='koniec', command=okno.destroy)\n\ndef premenuj(meno):\n ret = ''\n ret = ret + '*'\n for i in (meno):\n ret = ret + i\n ret = ret + '*'\n vystupnaHodnota.set(ret)\n\n\ncommand = \\\nlambda: premenuj(vstupnaHodnota.get())\n\npopisVstupu.grid(row=0, column=0)\nvstup.grid(row=0, column=1)\ntlacidlo.grid(row=1, column=0, columnspan=12)\nvystup.grid(row=2, column=1)\nkoniec.grid(row=3, column=0, columnspan=2)\n\nokno.mainloop()","sub_path":"okna.py","file_name":"okna.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"601111244","text":"from django.shortcuts import render\nimport random\n\nfrom django.http import HttpResponse\nfrom . import forms\nfrom.models import Document\nfrom django.template import loader\n\n\ndef importLogs(request):\n random.seed(10)\n if request.method == 'POST':\n form = forms.DocumentForm(request.POST,request.FILES)\n\n if form.is_valid():\n name = form.cleaned_data['document']\n print(str(name))\n\n save = form.save()\n id = save.pk\n print(id)\n context = {'form' : form}\n else:\n form = forms.DocumentForm()\n text = \"This fileformat is not supported. Please use a CSV or XES file.\"\n context = {'form' : form, 'text' : text}\n\n\n\n else:\n form = forms.DocumentForm()\n context = {'form' : form}\n\n template = loader.get_template('main.html')\n return HttpResponse(template.render(context, request))\n","sub_path":"queuemining/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"303284959","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@version: 3.5\n@author: morgana\n@license: Apache Licence \n@contact: vipmorgana@gmail.com\n@site: \n@software: PyCharm\n@file: FunctionCheckSpace.py\n@time: 2017/6/25 下午4:19\n写函数,检查用户传入的对象(字符串、列表、元组)的每一个元素是否含有空内容。\n\"\"\"\ndef CheckSpace(obj):\n for i in obj:\n if i.isspace():\n return(\"%s %s包含有空内容 \" % (i, obj))\n\n\n\n\nprint(CheckSpace(\"Joseph Morgana is good-looking \"))","sub_path":"Morgana/D20170624review/FunctionCheckSpace.py","file_name":"FunctionCheckSpace.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"420732658","text":"# Michael Sanchez\n#I pledge my honor that I have abided by the Stevens Honor System\nimport sys\nimport importlib\n\nFib = \"\"\"\n00 read r1\n01 copy r10 r1\n02 setn r1 0\n03 copy r9 r1\n04 setn r2 1\n05 jeqzn r10 15\n06 setn r8 1\n07 add r9 r9 r8\n08 nop\n09 write r1\n10 add r1 r1 r2\n11 sub r7 r10 r9\n12 jeqzn r7 18\n13 write r2\n14 add r9 r9 r8\n15 add r2 r1 r2\n16 sub r7 r10 r9\n17 jgtzn r7 07\n18 halt\n\"\"\"\n\n# Set this variable to whichever program you want to execute\n# when this file is loaded.\nRunThis = Fib\n\n# Choose whether to use debug mode; uncomment one of the following lines.\nMode = ['-n'] # not debug mode, \n#Mode = ['-d'] # debug mode\n#Mode = [] # prompt for whether to enter debug mode\n\n\n# When you press F5 in IDLE, the following code will\n# load the assembler and simulator, then run them.\n# You can interrupt with Ctrl-C; then re-start Python.\n\nif __name__ == \"__main__\" : \n import hmmmAssembler ; importlib.reload(hmmmAssembler)\n import hmmmSimulator ; importlib.reload(hmmmSimulator)\n hmmmAssembler.main(RunThis) # assemble input into machine code file out.b\n hmmmSimulator.main(Mode) # run the machine code in out.b\n\n\n","sub_path":"lab8.py","file_name":"lab8.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"358867032","text":"#!/usr/bin/env python\n\nimport rospy, math, random\nfrom std_msgs.msg import String\n\ndef dummyTalker():\n pub = rospy.Publisher('correlationTables', String, queue_size=10)\n corr = [[\"10\", \"24\" , 0], [\"11\", \"23\" , 1],[\"11\", \"22\" , 0], [\"10\", \"23\" , 0],\n [\"10\", \"22\" , 1],[\"11\", \"24\" , 1]]\n rospy.init_node('dummyTalker', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n corr_msg1 = \"\"\n while not rospy.is_shutdown():\n for i in range (len(corr)):\n corr[i][2] = abs(corr[i][2] - random.uniform(0,0.4))\n for i in range (len(corr)):\n for j in range(len(corr[i])):\n corr_msg1 += \" \" + str(corr[i][j])\n hello_msg1 = corr_msg1\n rospy.loginfo(hello_msg1 + \"\\n\")\n pub.publish(hello_msg1)\n rate.sleep()\n\nif __name__ == '__main__':\n try:\n dummyTalker()\n except rospy.ROSInterruptException:\n pass","sub_path":"scripts/talkerTableMatcher.py","file_name":"talkerTableMatcher.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"649734262","text":"#例1:发送data表单数据,通过POST发送\n\nimport urllib\nimport urllib2\n\nurl=\"http://www.someserver.com/register.cgi\"\n\nvalues={\n 'name':'NNH',\n 'location':'XJ',\n 'language':'Python'}\n\ndata=urllib.urlencode(values) #编码工作\nreq=urllib2.Request(url,data) #发送请求同时传data表单\nresponse=urllib2.urlopen(req) #接收反馈的消息\nthe_page=response.read() #读取反馈的消息\n","sub_path":"urllib2_test/urllib2_test03.py","file_name":"urllib2_test03.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"533631575","text":"import firebase_admin\nfrom firebase_admin import credentials, firestore\nfrom tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\nfrom collections import Counter\nimport tweepy\nimport argparse\nimport io\nimport json\nimport os\nfrom google.cloud import language\nimport numpy\nimport six\nimport random\nimport json\n#from classify-nlp import classify\n\nimport dotenv \nload_dotenv()\n#consumer key, consumer secret, access token, access secret.\nckey=os.getenv(\"API_KEY\")\ncsecret=os.getenv(\"API_SECRET_KEY\")\natoken=os.getenv(\"ACCESS_TOKEN\")\nasecret=os.getenv(\"ACCESS_SECRET_TOKEN\")\n\ngcloud_json_address = os.getenv(\"gc_json\")\n\ncred = credentials.Certificate(gcloud_json_address)\ndefault_app = firebase_admin.initialize_app(cred)\n\ndb = firestore.client()\n\n\ndef classify(text, verbose=True):\n \"\"\"Classify the input text into categories. \"\"\"\n\n language_client = language.LanguageServiceClient()\n\n document = language.types.Document(\n content=text,\n type=language.enums.Document.Type.PLAIN_TEXT)\n response = language_client.classify_text(document)\n categories = response.categories\n\n result = {}\n\n for category in categories:\n # Turn the categories into a dictionary of the form:\n # {category.name: category.confidence}, so that they can\n # be treated as a sparse vector.\n result[category.name] = category.confidence\n\n if verbose:\n print(text)\n for category in categories:\n print(u'=' * 20)\n print(u'{:<16}: {}'.format('category', category.name))\n print(u'{:<16}: {}'.format('confidence', category.confidence))\n\n return result\n\n\n\ndef authenticate(ckey, csecret, atoken, asecret):\n auth = tweepy.OAuthHandler(ckey, csecret)\n auth.set_access_token(atoken, asecret)\n api = tweepy.API(auth)\n\n return api\n\ndef get_users_most_recent_tweets(api):\n\ttimeline = api.home_timeline()\n\tarr = []\n\n\tfor t in timeline:\n\t\tstatus = api.get_status(t.id, tweet_mode=\"extended\")\n\t\t#arr.append(status.full_text)\n\t\ttry:\n\t\t\tarr.append(status.retweeted_status.full_text)\n\t\texcept AttributeError: # Not a Retweet\n\t\t\tif len(list(str(status.full_text))) >= 100:\n\t\t\t\tarr.append(status.full_text.strip(\"'\"))\n\n\tprint(arr)\n\n\tclassy = [classify(tweet) for tweet in arr]\n\tprint(classy)\n\n\tmost_common_topic = Counter(classy).most_common(1)[0][0]\n\treturn most_common_topic\n\ndef report_user_on_firebase(db, screen_name=\"weitingyp\"):\n\tapi = authenticate(ckey, csecret, atoken, asecret)\n\tname = api.get_user(screen_name).name\n\n\t#screen_name = \"weitingyp\"\n\t#name = \"Wei-Ting\"\n\n\tapi = authenticate(ckey, csecret, atoken, asecret)\n\tmct = get_users_most_recent_tweets(api)\n\t\n\t######## Yet to fix #####\n\tdoc_ref = db.collection(u'Users').document(screen_name)\n\tdoc_ref.set({\n \tu'screen_name': screen_name,\n \tu'name': name,\n \tu'mct': mct\n\t})\n\n\tprint(\"User \"+scree_name+\" Added to Firestore\")\n\ndef make_search_on_topic(db, screen_name=\"weitingyp\"):\n\tusers_ref = db.collection(u'Users').document(screen_name) # where(u'mct', u'==', u'/Finance/Investing/Commodities & Futures Trading').stream() #.document(scree_name)\n\tdoc = users_ref.get()\n\t\n\t#print(doc.get('screen_name'))\n\n\treturn doc\n\n\ndef ethnicity(n):\n\traces = {'White': 0, 'Latinx': 0, 'Black':0, 'Asian':0}\n\tarr = ['White', 'White', 'White', 'White', 'Black', 'Black', 'Latinx']\n\n\tfor i in range(n):\n\t\traces[arr[random.randint(0, 6)]] += 1\n\n\t# Gets the least common of the races\n\tleast_common_race = Counter(races).most_common()[:-2:-1][0][0]\n\tprint(Counter(races))\n\tprint(least_common_race)\n\n\treturn least_common_race\n\n\n\n\ndef make_search(doc, db):\n\tapi = authenticate(ckey, csecret, atoken, asecret)\n\ttweets = api.search(doc.get('mct'))\n\tlcr = ethnicity(len(tweets))\n\n\t#where(u'mct', u'==', u'/Finance/Investing/Commodities & Futures Trading').stream()\n\n\t# See whether there's a tweet that matches the lack of ethnicity in the conversation\n\tif doc.get('race') == lcr:\n\t\t### Push tweets[0] to notification.\n\t\tprint(tweets[0].user.screen_name, tweets[0].id) # These two elements are enough to produce a twitter link to the tweet\n\n\tref_link = 'https://www.twitter.com/{}/status/{}'.format(tweets[0].user.screen_name, tweets[0].id)\n\t\n\tprint(ref_link)\n\n\treturn ref_link\n\n\n#report_user_on_firebase(db)\nmake_search(make_search_on_topic(db), db)\n\n\n# Need to add credentials to gitignore\n\n\n\n# # Adds notification to the notifications stack\n# @app.route('/notifications', methods = ['POST'])\n# def tweet(username, tweet_id):\n# \"\"\"\n# Render tweet and thread, e.g. https://twitter.com/chrissyfarr/status/1309907362086612993\n# \"\"\"\n# #TODO: replace with thread details/parameters\n# return render_template('index.html')\n\n","sub_path":"react-flask-app/api/push_notification.py","file_name":"push_notification.py","file_ext":"py","file_size_in_byte":4703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"214948383","text":"import os\nimport tempfile\nimport argparse\n\n\nclass FileReader:\n\n def __init__(self, file_path):\n self.file_path = file_path\n\n\n def read (self, key):\n if not os.path.exists(self.file_path):\n return (\"\")\n with open (self.file_path, \"r\") as f:\n for line in f:\n key_value = line.split(\";\")\n if len(key_value) >=2 and key_value[0] == key:\n yield key_value[1]\n\n\n def write (self, key, value):\n with open (self.file_path, \"a\") as f:\n f.write('{0};{1}'.format(key, value))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--key\", nargs=1)\n parser.add_argument(\"--value\", nargs='?')\n return parser.parse_args()\n\n\nstorage_path = os.path.join(tempfile.gettempdir(), 'storage.data')\n\nfile = FileReader(storage_path)\n\nargs = parse_args()\nkey = args.key[0]\nvalue = args.value\n\nmust_read = value is None\n\nif must_read:\n print(f\"reading from {storage_path}, key {key}\")\n for value in file.read(key):\n print(f\"found value {value}\")\n print(\"done\")\nelse:\n print(f\"writing to {storage_path}, key {key} value {value}\")\n file.write(key, value)\n print(\"done\")\n\n","sub_path":"src_course1/task7/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"454208769","text":"import numpy as np\nimport math as m\nfrom pyIbex import *\nfrom vibes import vibes\nimport time\nfrom SaveLoadData import *\n\n\nif __name__ == '__main__':\n\tA=[1,1]\n\tB=[0,-1]\n\tC=[-1,0]\n\n\tvibes.beginDrawing()\n\tvibes.newFigure('TestCase1')\n\tvibes.setFigureProperties({'x': 0, 'y': 0,'width': 500, 'height': 500})\n\tvibes.axisLimits(-25, 25, -25, 25)\n\tt,dt,Field,inc,dif,x,y,xmp,ymp,xm,ym = loadData(\"rot1.pckl\")\n\tt,dt,Field,inc,dif,x2,y2,xmp,ymp,xm,ym = loadData(\"rot2.pckl\")\n\ttime.sleep(15)\n\tfor i in range(len(t)):\n\n\t\tvibes.drawCircle(x[i], y[i], 1, 'black[green]')\n\t\tvibes.drawCircle(x2[i], y2[i], 1, 'black[red]')\n\t\tvibes.drawCircle(xm, ym, 0.4, 'black[blue]')\n\t\tvibes.drawArrow([20, 15], [15, 15], 1, 'black[black]')\n\t\tvibes.drawArrow([20, 10], [15, 10], 1, 'black[black]')\n\t\tvibes.drawArrow([20, 5], [15, 5], 1, 'black[black]')\n\t\tvibes.drawArrow([20, 0], [15, 0], 1, 'black[black]')\n\t\tvibes.drawArrow([20, -5], [15, -5], 1, 'black[black]')\n\t\ttime.sleep(0.001)\n\t\tvibes.clearFigure()\n\tvibes.endDrawing()\n","sub_path":"Code/Other/AnimationCylindre.py","file_name":"AnimationCylindre.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"143062714","text":"import itertools\nfrom utils.key_utils import *\nfrom openers.value_data import *\n\n\ndef get_score(heroes_subscore, matchup_subscore, combo_subscore, faction_subscore, heroes_coefficient,\n matchup_coefficient, combo_coefficient, faction_coefficient):\n return heroes_subscore * heroes_coefficient + matchup_subscore * matchup_coefficient + \\\n combo_subscore * combo_coefficient + faction_subscore * faction_coefficient\n\n\ndef predict(radiant, dire, heroes_coefficient=PREDICTION_HEROES_COEFFICIENT,\n matchup_coefficient=PREDICTION_MATCHUP_COEFFICIENT, combo_coefficient=PREDICTION_COMBO_COEFFICIENT,\n faction_coefficient=PREDICTION_FACTION_COEFFICIENT):\n radiant_heroes_subscore = 0\n dire_heroes_subscore = 0\n radiant_matchup_subscore = 0\n dire_matchup_subscore = 0\n radiant_combo_subscore = 0\n dire_combo_subscore = 0\n radiant_faction_subscore = faction_values[RADIANT_KEY]\n dire_faction_subscore = faction_values[DIRE_KEY]\n\n for hero in radiant:\n radiant_heroes_subscore += hero_values[hero]\n\n for hero in dire:\n dire_heroes_subscore += hero_values[hero]\n\n for radiant_hero in radiant:\n for dire_hero in dire:\n matchup_key = make_matchup_key(radiant_hero, dire_hero)\n radiant_hero_value_key = make_value_key(radiant_hero)\n dire_hero_value_key = make_value_key(dire_hero)\n radiant_matchup_subscore += matchup_values[matchup_key][radiant_hero_value_key]\n dire_matchup_subscore += matchup_values[matchup_key][dire_hero_value_key]\n\n for combo in itertools.combinations(radiant, 2):\n hero1 = combo[0]\n hero2 = combo[1]\n combo_key = make_combo_key(hero1, hero2)\n radiant_combo_subscore += combo_values[combo_key]\n\n for combo in itertools.combinations(dire, 2):\n hero1 = combo[0]\n hero2 = combo[1]\n combo_key = make_combo_key(hero1, hero2)\n dire_combo_subscore += combo_values[combo_key]\n\n radiant_score = get_score(radiant_heroes_subscore, radiant_matchup_subscore, radiant_combo_subscore,\n radiant_faction_subscore, heroes_coefficient, matchup_coefficient,\n combo_coefficient, faction_coefficient)\n dire_score = get_score(dire_heroes_subscore, dire_matchup_subscore, dire_combo_subscore, dire_faction_subscore, heroes_coefficient, matchup_coefficient, combo_coefficient,\n faction_coefficient)\n return radiant_score, dire_score\n","sub_path":"src/utils/prediction_utils.py","file_name":"prediction_utils.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"167649975","text":"'''\nUtilities for working with Braille.\n\nThis module can represent Braille in two different ways:\neither as Unicode characters, or in binary form.\n\nThe eight dots of extended Braille are numbered as follows:\n``14``\n``25``\n``36``\n``78``\nTraditional Braille uses only the first six dots.\nThe letter D, for example, is represented by the\nBraille pattern\n``**``\n``.*``\n``..``\nwhere stars represent raised dots. Since dots\n1,4,5 are raised, the binary representation is\n011001, or 25 in base 10.\n\n >>> braille_converter('binary','english')(int('011001',2))\n 'D'\n >>> braille_converter('unicode','binary')('\\u2819')\n 25\n >>> braille_converter('english','binary')('D')\n 25\n >>> grid = [[1,0,0,1],[0,0,1,0],[0,0,1,0]]\n >>> l2e = braille_converter('list','english')\n >>> [l2e(l) for l in braille_divide_grid(grid)]\n ['A', 'S']\n >>> grid[0][2] = None\n >>> [braille_possible_letters(*braille_list_to_binary_mask(l)) for l in braille_divide_grid(grid)]\n [['A'], ['P', 'S']]\n'''\n\nbraille_alphabet = '\\u2801\\u2803\\u2809\\u2819\\u2811\\u280b\\u281b\\u2813\\u280a\\u281a\\u2805\\u2807\\u280d\\u281d\\u2815\\u280f\\u281f\\u2817\\u280e\\u281e\\u2825\\u2827\\u283a\\u282d\\u283d\\u2835'\nbraille_binary = [ord(ch)-0x2800 for ch in braille_alphabet]\n\nbraille_representations = ['binary','english','list','unicode']\n\ndef braille_converter(from_rep,to_rep):\n '''\n Returns a function that converts from the Braille representation\n ``from_rep`` to the Braille representation ``to_rep``\n '''\n if from_rep==to_rep:\n return lambda x : x\n if from_rep=='binary':\n if to_rep=='english':\n return braille_binary_to_english\n elif to_rep=='list':\n return braille_binary_to_list\n elif to_rep=='unicode':\n return braille_binary_to_unicode\n else:\n raise ValueError('Invalid braille representation %s'%to_rep)\n elif to_rep=='binary':\n if from_rep=='english':\n return braille_english_to_binary\n elif from_rep=='list':\n return braille_list_to_binary\n elif from_rep=='unicode':\n return braille_unicode_to_binary\n else:\n raise ValueError('Invalid braille representation %s'%from_rep)\n else:\n return lambda x, f1=braille_converter(from_rep,'binary'), f2=braille_converter('binary',to_rep): f2(f1(x))\n\ndef _braille_validate_binary(s):\n if s<0 or s>=0x100:\n raise ValueError('Invalid binary Braille sequence')\n\ndef braille_binary_to_unicode(code):\n _braille_validate_binary(code)\n return chr(0x2800+code)\n\ndef braille_binary_to_english(code):\n _braille_validate_binary(code)\n return chr(0x41+braille_binary.index(code))\n\ndef braille_binary_to_list(code):\n _braille_validate_binary(code)\n return [(code>>i)&1 for i in range(6)]\n\ndef braille_english_to_binary(ch):\n i = ord(ch)-0x41\n if i<0 or i>=26:\n raise ValueError(\"Invalid English letter\")\n return braille_binary[i]\n\ndef braille_unicode_to_binary(br):\n i = ord(br)-0x2800\n if i<0 or i>=0x100:\n raise ValueError(\"Invalid Braille unicode character\")\n return i\n\ndef braille_list_to_binary(l):\n if len(l)!=6 and len(l)!=8:\n raise ValueError(\"Invalid Braille list length\")\n return sum(bool(l[i])< Check hashtag selecting\")\n self.errorMessage.exec()\n return \"ERROR\"\n finally:\n return dicts\n\n def filterComment(self, comment):\n start = 0\n end = 0\n\n list_comment = []\n\n for m in range(len(comment)):\n if (comment[m] == \"[\"):\n start = m + 1\n\n elif (comment[m] == \"]\"):\n end = m\n list_comment.append(comment[start:end])\n start = 0\n end = 0\n\n return list_comment\n\n def launcherHandler(self):\n followValue = self.checkBoxFollow.isChecked()\n likeValue = self.checkBoxLike.isChecked()\n commentValue = self.checkBoxComment.isChecked()\n\n boysValue = self.checkBoxBoys.isChecked()\n girlsValue = self.checkBoxBoys.isChecked()\n\n langList = []\n\n for s in self.lang_holder.values():\n s = s.currentText()\n\n if(s == \"All\"):\n langList = list(tuplesWithCountries)\n langList.remove(\"None\")\n langList.remove(\"All\")\n break\n\n elif(s == \"None\" or s == None):\n continue\n\n elif(s not in langList):\n langList.append(s)\n\n hashtagValueList = self.hashtagField.toPlainText()\n commentValueList = self.commentField.toPlainText()\n\n hashtagDict = self.filterHastag(hashtagValueList)\n\n commentValueList = self.filterComment(commentValueList)\n\n self.workerT = workerLaunch(hashtagDict,followValue,likeValue,langList,girlsValue,boysValue,commentValue,commentValueList)\n self.workerT.start()\n \n def paintEvent(self, Event):\n bg_rect_title = QPainter(self)\n bg_rect_title.setPen(QPen(QColor.fromRgb(91, 170, 234), 2, Qt.SolidLine))\n bg_rect_title.setBrush(QBrush(QColor.fromRgb(255, 255, 255), Qt.SolidPattern))\n bg_rect_title.drawRoundedRect(8, 10, 835, 76, 7, 7)\n\n bg_rect_info = QPainter(self)\n bg_rect_info.setPen(QPen(QColor.fromRgb(91, 170, 234), 2, Qt.SolidLine))\n bg_rect_info.setBrush(QBrush(QColor.fromRgb(255, 255, 255), Qt.SolidPattern))\n bg_rect_info.drawRoundedRect(8, 95, 835, 110, 7, 7)\n\n bg_rect_setting = QPainter(self)\n bg_rect_setting.setPen(QPen(QColor.fromRgb(91, 170, 234), 2, Qt.SolidLine))\n bg_rect_setting.setBrush(QBrush(QColor.fromRgb(255, 255, 255), Qt.SolidPattern))\n bg_rect_setting.drawRoundedRect(8, 215, 835, 450, 7, 7)\n\n def UISetting(self):\n self.setFixedSize(850, 700)\n\n self.setWindowTitle(\"Settings\")\n\n usernameText = QLabel(\"Username:\", self)\n fontUS = usernameText.font()\n fontUS.setLetterSpacing(QFont.AbsoluteSpacing, 2)\n usernameText.setFont(fontUS)\n usernameText.resize(550, 100)\n usernameText.setObjectName(\"usernameText\")\n usernameText.move(15, 1)\n\n usernameTitle = QLabel(username, self)\n fontUS = usernameTitle.font()\n fontUS.setLetterSpacing(QFont.AbsoluteSpacing, 2)\n usernameTitle.setFont(fontUS)\n usernameTitle.resize(550, 100)\n usernameTitle.setObjectName(\"usernameTitle\")\n usernameTitle.move(250, 1)\n\n textinfo = QLabel(\"Account data\", self)\n textinfo.resize(200, 40)\n fontIn = textinfo.font()\n fontIn.setLetterSpacing(QFont.AbsoluteSpacing, 2)\n textinfo.setFont(fontIn)\n textinfo.setObjectName(\"textInfo\")\n textinfo.move(15, 100)\n\n followersNrString = \"Followers: {}\".format(followers)\n followersNr = QLabel(followersNrString, self)\n followersNr.resize(165, 45)\n followersNr.setObjectName(\"followNr\")\n followersNr.move(15, 150)\n\n followingNrString = \"Following: {}\".format(following)\n followingNr = QLabel(followingNrString, self)\n followingNr.setAlignment(Qt.AlignCenter)\n followingNr.resize(165, 45)\n followingNr.setObjectName(\"followNr\")\n followingNr.move(200, 150)\n\n followersCSVText = QLabel(\"Open 'Followers' CSV file\", self)\n followersCSVText.resize(180, 100)\n followersCSVText.setObjectName(\"followersCSVText\")\n followersCSVText.move(570, 130)\n\n openFollowersCSV = QPushButton(\"Open\", self)\n openFollowersCSV.setFixedSize(70, 30)\n openFollowersCSV.setObjectName(\"openFollowersCSV\")\n openFollowersCSV.move(750, 165)\n\n followingCSVText = QLabel(\"Open 'Following' CSV file\", self)\n followingCSVText.resize(180, 100)\n followingCSVText.setObjectName(\"followingCSVText\")\n followingCSVText.move(570, 95)\n\n openFollowingCSV = QPushButton(\"Open\", self)\n openFollowingCSV.setFixedSize(70, 30)\n openFollowingCSV.setObjectName(\"openFollowingCSV\")\n openFollowingCSV.move(750, 130)\n\n settingTitle = QLabel(\"Settings\", self)\n settingTitle.resize(120, 100)\n settingTitle.setObjectName(\"settingTitle\")\n settingTitle.move(15, 190)\n\n settingFunctions = QLabel(\"Functions\", self)\n settingFunctions.resize(120, 70)\n settingFunctions.setObjectName(\"settingFunctions\")\n settingFunctions.move(15, 240)\n\n self.checkBoxFollow = QCheckBox(\"Follow\", self)\n self.checkBoxFollow.move(50, 310)\n\n self.checkBoxLike = QCheckBox(\"Like\", self)\n self.checkBoxLike.move(160, 310)\n\n self.checkBoxComment = QCheckBox(\"Comment\", self)\n self.checkBoxComment.move(250, 310)\n\n genderPick = QLabel(\"Gender\", self)\n genderPick.resize(120, 65)\n genderPick.setObjectName(\"genderPick\")\n genderPick.move(15, 330)\n\n self.checkBoxBoys = QCheckBox(\"Boys\", self)\n self.checkBoxBoys .move(50, 390)\n\n self.checkBoxGirls = QCheckBox(\"Girls\", self)\n self.checkBoxGirls .move(160, 390)\n\n languagePick = QLabel(\"Language\", self)\n languagePick.resize(120, 60)\n languagePick.setObjectName(\"languagePick\")\n languagePick.move(15, 410)\n\n self.lang_holder = {}\n\n for x in range(4):\n self.lang_holder[\"lang_\" + str(x)] = QComboBox(self)\n self.lang_holder[\"lang_\" + str(x)].resize(100, 30)\n self.lang_holder[\"lang_\" + str(x)].setPlaceholderText(\"None\")\n self.lang_holder[\"lang_\" + str(x)].addItems(tuplesWithCountries)\n self.lang_holder[\"lang_\" + str(x)].move(50 + x * 130, 470)\n\n hashtagPick = QLabel(\"Pick hashtag -> [hashtag:weight]\", self)\n hashtagPick.resize(250, 65)\n hashtagPick.setObjectName(\"hashtagPick\")\n hashtagPick.move(15, 495)\n\n self.hashtagField = QTextEdit(self)\n self.hashtagField.resize(350, 100)\n self.hashtagField.move(15, 560)\n\n commentPick = QLabel(\"Pick comment\", self)\n commentPick.resize(190, 65)\n commentPick.setObjectName(\"commentPick\")\n commentPick.move(380, 495)\n\n self.commentField = QTextEdit(self)\n self.commentField.resize(350, 100)\n self.commentField.move(380, 560)\n\n self.launchButton = QPushButton(\"Launch\", self)\n self.launchButton.setFixedSize(100, 50)\n self.launchButton.setObjectName(\"launchButton\")\n self.launchButton.move(735, 610)\n\n self.show()\n\n class workerLaunch(QThread):\n\n def __init__(self,dict_has,follow,like,lang,girl,boys,comment,commentList):\n super(workerLaunch, self).__init__()\n\n self.mutex = QMutex()\n\n self.hashtag_dict = dict_has\n self.follow = follow\n self.like = like\n self.langList = lang\n self.girlValue = girl\n self.boyValue = boys\n self.comment = comment\n self.commentList = commentList\n\n @pyqtSlot()\n def run(self):\n self.mutex.lock()\n botObject.tagFinder(self.hashtag_dict, self.follow, self.like, self.langList, self.girlValue, self.boyValue, self.comment, self.commentList)\n self.mutex.unlock()\n\n app = QApplication(sys.argv)\n windowSetting = settingWindow()\n app.exec_()\n\n","sub_path":"pyqt5_gui2.py","file_name":"pyqt5_gui2.py","file_ext":"py","file_size_in_byte":23841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"236207609","text":"from flask import Flask, request, render_template, jsonify\r\nimport os\r\nimport db\r\n\r\napp= Flask(__name__)\r\napp.secret_key= os.urandom(32)\r\n\r\n@app.route('/id')\r\ndef consultaId():\r\n return render_template('consultaproducto.html')\r\n\r\n@app.route('/listProduct', methods=['GET'])\r\ndef listProductById():\r\n idp = request.args.get('idproducto')\r\n #output= db.getProductoSecure(idp)\r\n output= db.getProducto(idp)\r\n return render_template(\"productos.html\", productos=output)\r\n\r\n\r\nif __name__==\"__main__\":\r\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"509780849","text":"from django.urls import path\r\nfrom CreditCards.views.auth import *\r\nfrom CreditCards.views.credit_cards import *\r\n\r\napp_name = 'CreditCards'\r\n\r\nurlpatterns = [\r\n path('view/', CreditCardsListView.as_view(), name='all_cards'),\r\n path('/', CreditCardsDetailView.as_view(), name='card_details'),\r\n path('signup/', SignUpController.as_view(), name='signup'),\r\n path('login/', LoginController.as_view(), name='login'),\r\n path('logout/', logout_user, name='logout'),\r\n path('add/', CreateCreditCardView.as_view(), name='add_card'),\r\n path('/edit/', UpdateCreditCardView.as_view(), name='edit_card'),\r\n path('/delete/', DeleteCreditCardView.as_view(), name='delete_card'),\r\n]\r\n","sub_path":"CreditCards/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"345531119","text":"from os.path import join\nimport os\n\nimport cv2\nimport numpy as np\nimport pandas as pd\n\nfrom .imdb import Imdb\n\n\nclass Fashion(Imdb):\n def __init__(self, label_path, datatype='train'):\n super().__init__('fashion')\n assert datatype in ['train', 'test', 'val']\n self.label_path = label_path\n with open(join(label_path, 'Eval/list_eval_partition.txt'), 'r') as fp:\n with open(join(label_path, 'Anno/list_bbox.txt'), 'r') as fb:\n with open(join(label_path, 'Anno/list_landmarks.txt'), 'r') as fl:\n _ = int(fp.readline().strip())\n _ = int(fb.readline().strip())\n _ = int(fl.readline().strip())\n partition = pd.read_csv(fp, delimiter=' *')\n bbox = pd.read_csv(fb, delimiter=' *')\n landmarks = pd.read_csv(fl, delimiter=' *')\n bbox = pd.concat((bbox, landmarks['clothes_type'], partition['evaluation_status']), axis=1)\n self.bbox = bbox.groupby('evaluation_status').get_group(datatype)\n self.num_images = len(self.bbox)\n\n def label_from_index(self, index):\n img_info = self.bbox.iloc[index]\n image_file = self.image_path_from_index(index)\n assert os.path.isfile(image_file), 'Path does not exist: {}'.format(image_file)\n y,x = cv2.imread(image_file).shape[:2]\n xmin = float(img_info.x_1) / x\n ymin = float(img_info.y_1) / y\n xmax = float(img_info.x_2) / x\n ymax = float(img_info.y_2) / y\n return np.array([[img_info.clothes_type, xmin, ymin, xmax, ymax],])\n\n def image_path_from_index(self, index):\n return join(self.label_path, 'Img',self.bbox.iloc[index].image_name)\n\n\nif __name__ == '__main__':\n imdb = Fashion('../data/FashionLandmarkDetectionBenchmark')\n imdb.label_from_index(1)\n","sub_path":"ssd/dataset/fashion.py","file_name":"fashion.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"352612233","text":"import pytest\nimport os\nfrom SCNIC.general import simulate_correls\nfrom SCNIC.between_correls import between_correls\nfrom biom.util import biom_open\n\n\n@pytest.fixture()\ndef args():\n class Arguments(object):\n def __init__(self):\n self.table1 = \"table1.biom\"\n self.table2 = \"table2.biom\"\n self.output = \"out_dir\"\n self.correl_method = \"spearman\"\n self.p_adjust = \"bh\"\n self.min_sample = None\n self.min_p = None\n self.min_r = None\n self.sparcc_filter = True\n self.force = False\n self.procs = 1\n\n return Arguments()\n\n\ndef test_between_correls(args, tmpdir):\n table1 = simulate_correls()\n table2 = simulate_correls()\n loc = tmpdir.mkdir(\"with_correls_test\")\n with biom_open(str(loc.join(\"table1.biom\")), 'w') as f:\n table1.to_hdf5(f, 'madebyme')\n with biom_open(str(loc.join(\"table2.biom\")), 'w') as f:\n table2.to_hdf5(f, 'madebyme')\n os.chdir(str(loc))\n between_correls(args)\n files = os.listdir(str(loc)+'/out_dir')\n assert \"correls.txt\" in files\n assert \"crossnet.gml\" in files\n","sub_path":"tests/test_between_correls.py","file_name":"test_between_correls.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"483303611","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 18 21:18:55 2018\n\n@author: yumi.zhang\n\"\"\"\n\nimport cherrypy, os \nfrom flask import Blueprint, render_template\nimport json\nfrom movie_engine import RecommendationEngine\nfrom sqlalchemy.orm import sessionmaker\nfrom tabledef import *\nfrom sqlalchemy import create_engine\nimport pandas as pd\nfrom requests import get\nfrom bs4 import BeautifulSoup\n\nmain = Blueprint('main', __name__)\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n \nfrom flask import Flask, request, session\n\n@main.route(\"/\")\ndef home():\n if not session.get('logged_in'):\n return render_template('login.html')\n else:\n user_id = session['username']\n logger.debug(\"User %s TOP ratings requested\", user_id)\n #select top 10 movies to show\n top_ratings = recommendation_engine.get_top_ratings(user_id, 10)\n \n #count how many movies did the user rate\n #select the top 10 movies that user gave high / low ratings\n rated_number, user_rated_movies_high, user_rated_movies_low, my_ratings_RDD = recommendation_engine.get_rated_movies(user_id)\n \n movie_file = pd.read_csv('file:///Users/yumi.zhang/Desktop/recommendation/datasets/ml-latest-small/movies.csv')\n tag_file = pd.read_csv('file:///Users/yumi.zhang/Desktop/recommendation/datasets/ml-latest-small/tags.csv')\n \n def get_poster_online(movidId):\n links = pd.read_csv('file:///Users/yumi.zhang/Desktop/recommendation/datasets/ml-latest-small/links.csv')\n \n number = \"tt00\"\n imdbId = list(links.loc[links['movieId'] == movidId, 'imdbId'])\n number += str(imdbId[0]) \n url = 'https://www.imdb.com/title/%s' % number\n \n response = get(url)\n html_soup = BeautifulSoup(response.text, 'html.parser')\n movie_containers = html_soup.find('div', class_ = 'poster')\n poster_url = movie_containers.a.img['src']\n \n return poster_url\n \n \n movie_year_name = []\n \n for i in top_ratings:\n movie_year_name.append(i[1])\n \n high_movie_year_name = []\n low_movie_year_name = []\n \n def get_movie_name(movieId):\n name = list(movie_file.loc[movie_file['movieId'] == movieId, 'title'])\n return name\n \n for i in user_rated_movies_high:\n tmp_name_high = get_movie_name(i[1])\n high_movie_year_name.append(tmp_name_high[0])\n \n for i in user_rated_movies_low:\n tmp_name_low = get_movie_name(i[1])\n low_movie_year_name.append(tmp_name_low[0])\n \n url_list = []\n genres_list = []\n tag_list = []\n \n for i in range(len(top_ratings)):\n movieId = top_ratings[i][0]\n poster_url = get_poster_online(movieId)\n url_list.append(poster_url)\n \n genres = list(movie_file.loc[movie_file['movieId'] == movieId, 'genres'])\n genres_list.append(genres)\n \n tag = list(tag_file.loc[tag_file['movieId'] == movieId, 'tag'])\n tag_list.append(tag)\n\n tmp_genre = []\n movie_genres = []\n for genre in genres_list:\n for i in genre:\n tmp_genre.append(i)\n movie_genres.append(tmp_genre)\n tmp_genre = []\n \n new_movie_genres = []\n \n for i in movie_genres:\n tmp = i[0].split('|')\n new_movie_genres.append(tmp)\n \n \n high_rated_movie_list = []\n for i in range(len(user_rated_movies_high)):\n movieId = user_rated_movies_high[i][1]\n high_rated_movie_url = get_poster_online(movieId)\n high_rated_movie_list.append(high_rated_movie_url)\n \n low_rated_movie_list = []\n for i in range(len(user_rated_movies_low)):\n movieId = user_rated_movies_low[i][1]\n low_rated_movie_url = get_poster_online(movieId)\n low_rated_movie_list.append(low_rated_movie_url)\n \n \n #get the newest movie information\n newmovie_src, newmovie_title, cinemas_name = get_newest_movies_info()\n \n return render_template('home.html', user_id = user_id, url_list = url_list, \n movie_year_name = movie_year_name,\n new_movie_genres = new_movie_genres, tag_list = tag_list, \n rated_number = rated_number, high_rated_movie_list = high_rated_movie_list,\n low_rated_movie_list = low_rated_movie_list, \n low_movie_year_name = low_movie_year_name,\n high_movie_year_name = high_movie_year_name,\n newmovie_src = newmovie_src, newmovie_title = newmovie_title,\n cinemas_name = cinemas_name)\n \ndef get_newest_movies_info():\n url = \"https://www.imdb.com/movies-in-theaters/\" \n response = get(url)\n html_soup = BeautifulSoup(response.text, 'html.parser')\n\n movie_containers = html_soup.find_all('div', class_ = 'image')\n newmovie_src = []\n title = []\n number = []\n for i in range(len(movie_containers)):\n newmovie_src.append(movie_containers[i].a.div.img['src'])\n title.append(movie_containers[i].a.div.img['title'])\n number.append(movie_containers[i].a['href'].split('/')[2])\n \n url = []\n for i in number:\n url.append('https://www.imdb.com/showtimes/title/%s/?ref_=inth_ov_sh' % i)\n \n #showtime_url = \"https://www.imdb.com/showtimes/title/tt4779682/?ref_=inth_ov_sh\"\n cinemas_name = []\n for i in url:\n span_name = []\n showtime_response = get(i)\n showtime_html_soup = BeautifulSoup(showtime_response.text, 'html.parser')\n new_movie_containers = showtime_html_soup.find_all('div', class_=\"fav_box\")\n \n for i in range(len(new_movie_containers)):\n span_name.append(new_movie_containers[i].h3.a.span.text)\n \n cinemas_name.append(span_name)\n \n return newmovie_src, title, cinemas_name\n\n@main.route('/login', methods=['POST'])\ndef login():\n POST_USERNAME = str(request.form['username'])\n POST_PASSWORD = str(request.form['password'])\n \n session['username'] = request.form['username']\n \n Session = sessionmaker(bind=engine)\n s = Session()\n query = s.query(User).filter(User.username.in_([POST_USERNAME]), User.password.in_([POST_PASSWORD]))\n result = query.first()\n \n if result:\n session['logged_in'] = True\n else:\n flash('wrong password!')\n return home()\n\n@main.route('/logout')\ndef logout():\n session['logged_in'] = False\n return home()\n\n@main.route(\"/shutdown\")\n@cherrypy.expose\ndef shutdown(self): \n cherrypy.engine.exit()\n \n@main.route(\"//ratings/top/\", methods=[\"GET\"])\ndef top_ratings(user_id, count):\n logger.debug(\"User %s TOP ratings requested\", user_id)\n top_ratings = recommendation_engine.get_top_ratings(user_id,count)\n \n return json.dumps(top_ratings)\n \n \n@main.route(\"//ratings/\", methods=[\"GET\"])\ndef movie_ratings(user_id, movie_id):\n logger.debug(\"User %s rating requested for movie %s\", user_id, movie_id)\n ratings = recommendation_engine.get_ratings_for_movie_ids(user_id, [movie_id])\n return json.dumps(ratings)\n\n@main.route(\"//ratings\", methods = [\"POST\"])\ndef add_ratings(user_id):\n # get the ratings from the Flask POST request object\n ratings_list = request.form.keys()[0].strip().split(\"\\n\")\n ratings_list = map(lambda x: x.split(\",\"), ratings_list)\n # create a list with the format required by the negine (user_id, movie_id, rating)\n ratings = map(lambda x: (user_id, int(x[0]), float(x[1])), ratings_list)\n # add them to the model using then engine API\n recommendation_engine.add_ratings(ratings)\n \n return json.dumps(ratings)\n \ndef create_app(spark_context, dataset_path):\n global recommendation_engine \n recommendation_engine = RecommendationEngine(spark_context, dataset_path) \n \n app = Flask(__name__)\n app.register_blueprint(main)\n app.secret_key = os.urandom(12)\n \n return app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"485908114","text":"from django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.views.generic import View, TemplateView\nfrom django.shortcuts import render, redirect\nfrom django.template import RequestContext, loader, Context\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.db import IntegrityError\nfrom django.http import JsonResponse, QueryDict\nfrom django.shortcuts import redirect, render\nfrom django.template import RequestContext\nfrom django.views.generic import TemplateView, View\n\nfrom account.emails import SendGrid\nfrom pairprogram.forms import SessionForm\nfrom pairprogram.models import Participant, Session\nfrom pyfirebase import Firebase\nfrom resources.views import LoginRequiredMixin\n\n\nclass StartPairView(LoginRequiredMixin, TemplateView):\n template_name = 'pairprogram/sessions.html'\n form_class = SessionForm\n\n def post(self, request, **kwargs):\n form = self.form_class(\n request.POST, instance=request.user.profile)\n\n if form.is_valid():\n new_session = Session.objects.create(\n initiator=request.user,\n session_name=form.cleaned_data['session_name'])\n new_session.save()\n Participant.objects.create(\n participant=request.user, session_id=new_session.id)\n messages.add_message(\n request, messages.SUCCESS, 'Session started successfully')\n return redirect('/pair/' + str(new_session.id),\n context_instance=RequestContext(request))\n\n\nclass ListSessionView(LoginRequiredMixin, TemplateView):\n form_class = SessionForm\n template_name = 'pairprogram/sessions.html'\n\n def get_context_data(self, *args, **kwargs):\n context = super(ListSessionView, self).get_context_data(**kwargs)\n participants = Participant.objects.filter(\n participant=self.request.user).all()\n\n sessions = []\n for participant in participants:\n sessions.append(participant.session)\n\n context['sessions'] = sessions\n context['sessionform'] = self.form_class()\n return context\n\n\nclass PairSessionView(LoginRequiredMixin, View):\n form_class = SessionForm\n template_name = 'pairprogram/editor.html'\n\n def get(self, request, *args, **kwargs):\n context = {}\n context['session_id'] = kwargs['session_id']\n participants = Participant.objects.filter(\n session_id=context['session_id']).all()\n\n result = any(self.request.user == row.participant\n for row in participants)\n context['profile'] = self.request.user.profile\n context['session'] = Session.objects.get(\n id=context['session_id'])\n context['sessionform'] = self.form_class()\n context['themes'] = settings.EDITOR_THEME.iteritems()\n context['languages'] = settings.EDITOR_LANGUAGE.iteritems()\n\n if not result:\n messages.add_message(self.request, messages.ERROR,\n 'No Access to this page')\n return redirect('/home',\n context_instance=RequestContext(self.request))\n\n return render(request, self.template_name, context)\n\n def send_invites(self, email, session, request):\n user = User.objects.filter(email=email).first()\n if user is not None:\n try:\n Participant.objects.create(\n participant=user, session=session)\n\n except IntegrityError:\n pass\n url = 'http://{}{}'.format(\n request.get_host(), reverse(\n 'pair_program', kwargs={'session_id': session.id}))\n else:\n url = 'http://{}{}?session_id={}'.format(\n request.get_host(), reverse('index'), session.id)\n\n email_context = {\n 'subject': 'Let\\'s Start Pairing now!',\n 'url': url,\n 'session_name': session.session_name\n }\n\n message = SendGrid.compose(\n sender='Codango-user {} <{}>'.format(\n request.user.username.upper(), request.user.email),\n recipient=email,\n subject=\"Join session in Codango\",\n text=loader.get_template(\n 'emails/session-invite.txt'\n ).render(Context(email_context)),\n html=loader.get_template(\n 'emails/session-invite.html'\n ).render(Context(email_context))\n )\n # send email\n response = SendGrid.send(message)\n return response\n\n def post(self, request, *args, **kwargs):\n user_list = request.POST.getlist('userList[]')\n session = Session.objects.get(id=kwargs['session_id'])\n result = []\n for email in user_list:\n response_dict = {}\n response_dict['email'] = email\n response_dict['status'] = \"error\"\n if request.user.email != email:\n participants = Participant.objects.filter(session=session)\n if len(participants) < 5:\n response = self.send_invites(email, session, request)\n response_dict['message'] = \"Successfully sent\" \\\n if response == 200 else \"There was an error\"\n response_dict['status'] = \"success\" \\\n if response == 200 else \"error\"\n else:\n response_dict[\n 'message'] = \"A session cannot hold more than 5 users\"\n else:\n response_dict[\n 'message'] = \"You can't send an invite to yourself\"\n result.append(response_dict)\n return JsonResponse(\n {'response': result})\n\n def put(self, request, *args, **kwargs):\n data = QueryDict(request.body)\n language = data.get('language', 'python')\n session = Session.objects.get(id=kwargs['session_id'])\n response_dict = {}\n if session:\n session.language = language\n session.save()\n response_dict[\n 'message'] = \"Session updated successfully\"\n else:\n response_dict[\n 'message'] = \"Session unable to updated\"\n\n return JsonResponse(\n {'response': response_dict})\n\n\nclass DeleteSessionView(LoginRequiredMixin, View):\n\n def post(self, request, *args, **kwargs):\n session_id = request.POST['session_id']\n firebase = Firebase('https://project-8667655276128018284.fir'\n 'ebaseio.com/')\n pair_users_ref = \"session/{}/users\".format(session_id)\n pair_users_session = firebase.ref(pair_users_ref).child(session_id)\n\n try:\n session = Session.objects.get(id=session_id)\n if session.initiator == self.request.user:\n pair_users_session.delete()\n session.delete()\n else:\n participant = Participant.objects.filter(\n session_id=session_id, participant_id=self.request.user)\n participant_id = self.request.user.id\n pair_users_session.child(str(participant_id)).delete()\n participant.delete()\n except Session.DoesNotExist:\n pass\n return JsonResponse({\n 'status': 'success',\n }, status=200)\n","sub_path":"codango/pairprogram/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"597293361","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n'''\r\n@Time : 2019/7/10 20:47\r\n@Author : Careslten\r\n@Site : \r\n@File : userList.py\r\n@Software: PyCharm\r\n'''\r\n\r\n\r\nfrom django.http import HttpResponse\r\nfrom oneGo import models\r\nimport json,time\r\nfrom Public.JsonData import DateEncoder\r\n\r\nclass user_list():\r\n\r\n def userList(self,request):\r\n query = []\r\n query_list = models.UserInfo.objects.filter(useing=1).values()\r\n print('aaaaaaaaaa',query_list)\r\n for i in query_list:\r\n a = {}\r\n a['id'] = i['id']\r\n a['status'] = i['status']\r\n a['username'] = i['username']\r\n a['sex'] = i['sex']\r\n a['old_login_time'] = i['old_login_time'].strftime('%Y-%m-%d %H:%M:%S')\r\n query.append(a)\r\n print('查询出的所有用户:', query)\r\n return HttpResponse(json.dumps({'status': 1, 'msg': '操作成功', 'data': query}))\r\n\r\n def session_test(self,request):\r\n username = request.session.get('username', None) # 取这个key的值,如果不存在就为None\r\n userid = request.session.get('user_id', None)\r\n times = time.strftime('%y-%m-%d-%H-%M-%S', time.localtime(time.time()))\r\n return HttpResponse(\r\n json.dumps({'status': 1, 'msg': '操作成功', 'data': {'username': username, 'userid': userid, 'time': times}}))\r\n\r\n def getuser(self,request):\r\n username = request.session.get('username', None)\r\n return HttpResponse(json.dumps({'status': 1, 'msg': '操作成功', 'data': {'username': username}}))\r\n\r\n def userHistory(self,request):\r\n username = request.session.get('username', 1)\r\n search = request.POST.get('search',None)\r\n user_host_history = ''\r\n if username == 1:\r\n return HttpResponse(json.dumps({'status': 1, 'msg': '登录过期'}))\r\n user_id = models.UserInfo.objects.get(username=username).id\r\n print(search=='')\r\n print(search)\r\n if search == '' or search == None:\r\n user_host_history = models.user_host.objects.filter(userid=user_id, status=1).values()\r\n\r\n else:\r\n user_host_history = models.user_host.objects.filter(userid=user_id, status=1,\r\n casename__icontains=search).values()\r\n user_history = []\r\n for i in user_host_history:\r\n everyhost = {}\r\n body = {}\r\n header = {}\r\n host_id = i['id']\r\n body_init = models.user_body.objects.filter(host_id_id=host_id, type=1, status=1).values()\r\n header_init = models.user_body.objects.filter(host_id_id=host_id, type=2, status=1).values()\r\n for everybody in body_init:\r\n body[everybody['key']] = everybody['value']\r\n for everheader in header_init:\r\n header[everheader['key']] = everheader['value']\r\n everyhost['id'] = i['id']\r\n everyhost['host'] = i['host']\r\n everyhost['body'] = body\r\n everyhost['header'] = header\r\n everyhost['create_date'] = i['create_date']\r\n everyhost['response_body'] = i['response_body']\r\n everyhost['type'] = i['method']\r\n everyhost['CaseName'] = i['casename']\r\n everyhost['json_body'] = i['json_body']\r\n everyhost['json_header'] = i['json_header']\r\n user_history.append(everyhost)\r\n return HttpResponse(json.dumps({'status': 1, 'msg': '操作成功', 'data': user_history}, cls=DateEncoder))\r\n\r\n #增加用户\r\n def add_User(self,request):\r\n username = request.POST.get('username',None)\r\n password = request.POST.get('password',None)\r\n if username == None or password == None:\r\n return HttpResponse(json.dumps({'status': 500, 'msg': '参数错误'}))\r\n query = models.UserInfo.objects.filter(username=str(username))\r\n if query.__len__() >= 1:\r\n return HttpResponse(json.dumps({'status': 500,'msg': '用户已存在'}))\r\n try:\r\n dic = {'username':username, 'password':password}\r\n models.UserInfo.objects.create(**dic)\r\n return HttpResponse(json.dumps({'status': 1, 'msg':'操作成功'}))\r\n except Exception as e:\r\n return HttpResponse(json.dumps({'status': 500, 'msg':e}))\r\n\r\n def userDelList(self, request):\r\n query = []\r\n try:\r\n query_list = models.UserInfo.objects.filter(useing=0).values()\r\n print('ddddddddd', query_list)\r\n for i in query_list:\r\n a = {}\r\n a['id'] = i['id']\r\n a['status'] = i['status']\r\n a['username'] = i['username']\r\n a['sex'] = i['sex']\r\n a['create_time'] = i['create_time'].strftime('%Y-%m-%d %H:%M:%S')\r\n query.append(a)\r\n print('查询出的所有已删除用户:', query)\r\n except Exception as e:\r\n print(e)\r\n return HttpResponse(json.dumps({'status': 1, 'msg':e}))\r\n return HttpResponse(json.dumps({'status': 1, 'msg': '操作成功', 'data': query}))\r\n\r\n def recoverCustomer(self,request):\r\n user_id = request.POST.get('user_id',None)\r\n if user_id == None:\r\n return HttpResponse(json.dumps({'status': 500, 'msg': '参数错误'}))\r\n try:\r\n models.UserInfo.objects.filter(id=user_id).update(useing=1)\r\n except Exception as e:\r\n return HttpResponse(json.dumps({'status': 500, 'msg': e}))\r\n return HttpResponse(json.dumps({'status': 1, 'msg': '操作成功'}))","sub_path":"oneGo/api/userList.py","file_name":"userList.py","file_ext":"py","file_size_in_byte":5669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"349799632","text":"\"\"\"\r\nAquí están las funciones para el programa principal del trabajo práctico\r\n\"\"\"\r\n\r\n\r\nimport os.path\r\nimport pickle\r\nimport random\r\n\r\nimport registro_confederacion\r\nimport registro_paises\r\nfrom registro_confederacion import Confederacion\r\n\r\n\r\n# ------------------------------------------------------------------------------\r\n# Funciones de interfaz\r\n\r\n# Stop\r\ndef enter():\r\n Enter = input(\"\\x1b[1;36m\"+\"Presione enter para continuar...\"+\"\\x1b[0;m\")\r\n\r\n\r\n\r\n\r\n# ------------------------------------------------------------------------------\r\n# Sector de validaciones y conversiones\r\n\r\n\r\n# Función para validar las confederaciones y opciones\r\ndef validar_rango(inf, sup, mensaje):\r\n \"\"\"\r\n Valida si un número está en cierto rango\r\n :param inf: Un número que marca el inicio del rangp\r\n :param sup: Otro número que indica el final del rango\r\n :param mensaje: Un mensaje cualquiera\r\n :return: El número que puede estar dentro del rango y si no, marca error\r\n \"\"\"\r\n n = inf - 1\r\n while n < inf or n > sup:\r\n n = int(input(mensaje))\r\n if n < inf or n > sup:\r\n print(\"\\n\\033[1;31m\"+\"Valor no válido. Ingrese un valor entre\",str(inf),\"y\",\r\n str(sup) + \"\\033[0;m\\n\")\r\n return n\r\n\r\n\r\n# Funcion para validar que el anfitrión está en la lista\r\ndef validar_anfitrion(vec_paises):\r\n \"\"\"\r\n Hace una validación de si el anfitrión está en el vector países\r\n :param vec_paises: Vector original de países\r\n :return: el anfitrión ya validado\r\n \"\"\"\r\n print(\"\\033[1;30m\"+\"Le daremos la oportunidad de elegir el anfitrion del mundial.\\n\"\r\n \"Usted debe ingresarlo (como todo nombre propio) con la primer letra en mayuscula.\\n\"+\"\\033[0;m\")\r\n anfi = input(\"\\033[1;30m\"+\"Ingrese el país anfitrión: \"+\"\\033[0;m\")\r\n paises = [pais.nombre for pais in vec_paises]\r\n while anfi not in paises:\r\n print(\"\\n\\t\\t\\t\\033[1;31m\"+\"No se encuentra en la lista.\\n\"\r\n \"Por favor, revise la forma en la que escribio el pais.\\n\"+ \"\\033[0;m\")\r\n anfi = input(\"\\033[1;30m\"+\"Ingrese el país anfitrión: \"+\"\\033[0;m\")\r\n return anfi\r\n\r\n\r\ndef convertir_num_letra_grupo(j):\r\n \"\"\"\r\n Convierte un número en letra de un grupo\r\n :param j: el número del 0 al 7\r\n :return: la cadena ya convertida\r\n \"\"\"\r\n if 0 > j > 8:\r\n return \"Valor inválido.\"\r\n grupos = (\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\")\r\n return grupos[j]\r\n\r\n\r\ndef convertir_num_conf(i):\r\n \"\"\"\r\n Convierte un número de codificación de una confederación en cadena\r\n :param i: el código de confederación\r\n :return: la cadena ya transformada\r\n \"\"\"\r\n if 0 > i > 5:\r\n return \"\\033[1;31m\"+\"Valor inválido.\" +\"\\033[0;m\\n\"\r\n confederacion = (\"UEFA\", \"CONMEBOL\", \"CONCACAF\", \"CAF\", \"AFC\", \"OFC\")\r\n return confederacion[i]\r\n\r\n\r\n# ------------------------------------------------------------------------------\r\n# Orden de mayor a menor con add_in_order\r\n\r\n\r\ndef add_in_order(vec_paises, paises):\r\n \"\"\"\r\n Usa el algoritmo de búsqueda binaria para ordenar de mayor a menor y luego\r\n los carga en el vector\r\n :param vec_paises: vector de países que todavía no se generó\r\n :param paises: una variable que contiene las líneas previamente\r\n transformadas en cadenas del registro Países\r\n :return: None\r\n \"\"\"\r\n n = len(vec_paises)\r\n pos = n\r\n izq, der = 0, n-1\r\n while izq <= der:\r\n c = (izq + der) // 2\r\n if vec_paises[c].puntos == paises.puntos:\r\n pos = c\r\n break\r\n if paises.puntos > vec_paises[c].puntos:\r\n der = c - 1\r\n else:\r\n izq = c + 1\r\n if izq > der:\r\n pos = izq\r\n vec_paises[pos:pos] = [paises]\r\n\r\n\r\ndef cargar_vector(fd, vec_paises):\r\n \"\"\"\r\n Se hace la carga del vector, abriendo el archivo de texto,\r\n transformándolo en cadena de registros y finalmente, se carga al vector\r\n :param fd: el archivo de texto\r\n :param vec_paises: vector de países que se recibirá el contenido del\r\n archivo prevuamente convertido en cadena\r\n :return: vector países\r\n \"\"\"\r\n if not os.path.exists(fd):\r\n print(\"El archivo no existe.\")\r\n return None\r\n\r\n m = open(fd, \"rt\", encoding=\"utf-8\")\r\n linea = m.readline()\r\n while linea != \"\":\r\n if linea[-1] == \"\\n\":\r\n linea = linea[:-1]\r\n if linea != \"\" and linea[0] != \"#\":\r\n paises = registro_paises.csv_to_linea(linea)\r\n add_in_order(vec_paises, paises)\r\n linea = m.readline()\r\n m.close()\r\n\r\n\r\ndef mostrar_vector_paises(vec_paises):\r\n for i in range(len(vec_paises)):\r\n print(registro_paises.to_string_paises(vec_paises[i]))\r\n\r\n\r\n# Opcion 2: busca el mayor\r\n\r\ndef mayor(vec_paises):\r\n \"\"\"\r\n Hace una búsqueda del mayor. Si hay más mayores, los carga en el vector\r\n mayores\r\n :param vec_paises: vector original de países\r\n :return: El vector de mayores\r\n \"\"\"\r\n mayor = None\r\n for paises in vec_paises:\r\n if mayor is None or paises.campeonatos > mayor:\r\n mayor = paises.campeonatos\r\n mayores = []\r\n for paises in vec_paises:\r\n if paises.campeonatos == mayor:\r\n mayores.append(paises)\r\n return mayores\r\n\r\n\r\n# Opcion 3: vector de conteo\r\n\r\ndef contador_paises_campeonatos(vec):\r\n \"\"\"\r\n Carga un vector de conteo para saber la cantidad de países que consiguieron\r\n campeonatos por cada confederación\r\n :param vec: el vector original de países\r\n :return: vector de conteo\r\n \"\"\"\r\n cont_camp = [0] * 6\r\n for i in range(len(vec)):\r\n c = int(vec[i].confederacion)\r\n if vec[i].campeonatos > 0:\r\n cont_camp[c] += 1\r\n for i in range(6):\r\n print(str(i) + \".\" + convertir_num_conf(i) + \" ====> \" + \"Cantidad \" \r\n \"de países con campeonatos: \" + str(cont_camp[i]))\r\n\r\n\r\n# Opcion 4\r\n\r\ndef vector_confederacion(vec_paises, x):\r\n \"\"\"\r\n Aquí se arma el vector de una confederación X que se ingresa por teclado\r\n :param vec_paises: vector original de países\r\n :param x: el código de una confederación\r\n :return: vector de una confederación X\r\n \"\"\"\r\n vec_conf = []\r\n for i in range(len(vec_paises)):\r\n if vec_paises[i].confederacion == x:\r\n nombre = vec_paises[i].nombre\r\n puntos = vec_paises[i].puntos\r\n campeonatos = vec_paises[i].campeonatos\r\n confederacion = Confederacion(nombre, puntos, campeonatos)\r\n vec_conf.append(confederacion)\r\n return vec_conf\r\n\r\n\r\ndef orden_sort(vec_conf):\r\n \"\"\"\r\n Ordena de mayor a menor el vector confederación mediante el algoritmo de\r\n selección directa\r\n :param vec_conf: vector de confederación X\r\n :return: None\r\n \"\"\"\r\n n = len(vec_conf)\r\n for i in range(n - 1):\r\n for j in range(i + 1, n):\r\n if vec_conf[i].puntos < vec_conf[j].puntos:\r\n vec_conf[i], vec_conf[j] = vec_conf[j], vec_conf[i]\r\n\r\n\r\ndef grabar_archivo_confederacion(vec_confederacion, archivo):\r\n \"\"\"\r\n Graba un archivo a partir de un vector de confederación previamente armado\r\n :param vec_confederacion: vector confederación\r\n :param archivo: el archvio con una confederación X.\r\n :return: None\r\n \"\"\"\r\n m = open(archivo, \"wb\")\r\n c = 0\r\n for i in range(len(vec_confederacion)):\r\n pickle.dump(vec_confederacion[i], m)\r\n c += 1\r\n print(\"\\033[1;30m\"+\"El archivo\", archivo, \"está cargado\",\"y se grabaron\",c,\"registros.\"+\"\\033[0;m\")\r\n m.close()\r\n\r\n\r\n# Opcion 5\r\n\r\ndef mostrar_archivo(archivo):\r\n \"\"\"\r\n Muestra el archivo de una confederación\r\n :param archivo: archivo confederación X\r\n :return: None\r\n \"\"\"\r\n m = open(archivo, \"rb\")\r\n t = os.path.getsize(archivo)\r\n while m.tell() < t:\r\n confederacion = pickle.load(m)\r\n print(registro_confederacion.to_string_confederacion(confederacion))\r\n m.close()\r\n\r\n\r\ndef buscar_archivo(archivo, vec_conf):\r\n \"\"\"\r\n Hace la búsqueda del archivo de una confederación. Si existe, directamente\r\n lo muestra. Si no, carga el archivo y luego lo muestra.\r\n :param archivo: el archivo confederación\r\n :param vec_conf: vector de confederaciiones\r\n :return: None\r\n \"\"\"\r\n escribir = True\r\n if not os.path.exists(archivo):\r\n nuevo = input(\"¿Desea escribir un nuevo archivo?(S/N): \")\r\n if nuevo == \"S\" or nuevo == \"s\":\r\n grabar_archivo_confederacion(vec_conf, archivo)\r\n mostrar_archivo(archivo)\r\n escribir = False\r\n if escribir == True:\r\n mostrar_archivo(archivo)\r\n\r\n\r\n# Opcion 6: la matriz y ingresa por teclado\r\n\r\ndef vec_resto(vec_paises, y):\r\n \"\"\"\r\n Vector auxiliar que toma al resto de los 24 países\r\n :param vec_paises: vector original de países\r\n :param y: país anfitrión cargado por teclado\r\n :return: vector con el resto de los 24 países\r\n \"\"\"\r\n resto = []\r\n for i in range(len(vec_paises)):\r\n if i > 8 and i <= 33 and vec_paises[i].nombre != y:\r\n resto.append(vec_paises[i])\r\n return resto\r\n\r\n\r\ndef primeros(vec_paises, y):\r\n \"\"\"\r\n Vector auxiliar que toma al resto de las cabezas de serie\r\n :param vec_paises: vector original de países\r\n :param y: país anfitrión cargado por teclado\r\n :return: vector con el resto de las cabezas de serie\r\n \"\"\"\r\n cab = []\r\n c = 0\r\n for i in range(len(vec_paises)):\r\n c += 1\r\n if c > 0 and c <= 9 and vec_paises[i].nombre != y:\r\n cab.append(vec_paises[i])\r\n return cab\r\n\r\n\r\ndef matriz(vec_paises, y):\r\n \"\"\"\r\n Se genera la matriz para el próximo mundial\r\n :param vec_paises: El vector original antes cargado\r\n :param y: El país anfitrión que se carga por teclado\r\n :return: La matriz\r\n \"\"\"\r\n # Cantidad de columnas y filas\r\n columnas = 8\r\n filas = 4\r\n\r\n # Vectores auxiliares que contienen a los 28 restantes (resto)\r\n # y a las cabezas de serie (head)\r\n resto = vec_resto(vec_paises, y)\r\n head = primeros(vec_paises, y)\r\n\r\n mat = [[None] * columnas for i in range(filas)]\r\n\r\n # Primera pasada sobre el vector original para saber encontrar el país\r\n # anfitrión y grabarlo en el primer casillero de la matriz\r\n for i in range(len(vec_paises)):\r\n if vec_paises[i].nombre == y:\r\n mat[0][0] = vec_paises[i]\r\n # Recorrida de la columna desde el lugar 1 para cargar el resto de las\r\n # cabezas de serie\r\n for c in range(1, columnas):\r\n mat[0][c] = head[c]\r\n # Recorrida por el resto de las filas y columnas para cargar el resto de\r\n # los países\r\n for f in range(1,4):\r\n for c in range(columnas):\r\n mat[f][c] = random.choice(resto)\r\n resto.remove(mat[f][c]) # Aquí se remueve desde el vector\r\n # auxiliar resto para que no se repitan en cada vuelta\r\n return mat\r\n\r\n\r\n#-----------------------------------------------------------------------------#\r\n\r\ndef mostrar_matriz(mat):\r\n \"\"\"\r\n Muestra la matriz en forma de cadena\r\n :param mat: La matriz antes generada\r\n :return: None\r\n \"\"\"\r\n for j in range(len(mat[0])):\r\n print(\"Grupo:\",convertir_num_letra_grupo(j))\r\n for i in range(len(mat)):\r\n print(registro_paises.to_string_paises(mat[i][j]))\r\n\r\n\r\n# Opcion 7\r\n\r\ndef buscar_pais_matriz(mat, pais):\r\n \"\"\"\r\n Busca si un país está dentro de la matriz\r\n :param mat: La matriz antes generada\r\n :param pais: El país que ingresa por teclado\r\n :return: El grupo donde se encuentra ese país si está\r\n \"\"\"\r\n grupo = None\r\n res = False\r\n for j in range(len(mat[0])):\r\n for i in range(len(mat)):\r\n if mat[i][j].nombre == pais:\r\n res = True\r\n grupo = j\r\n break\r\n if res:\r\n break\r\n return grupo\r\n","sub_path":"tp/2020_AED_TP4_Magnano_85978[1K10]_Lorello_85625[1K10]_Bruera_59149_[1K10]/funciones_paises.py","file_name":"funciones_paises.py","file_ext":"py","file_size_in_byte":11949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"156624758","text":"import requests\nfrom lxml import etree\nfrom urllib import request\nimport os\nimport re\nimport threading\nfrom queue import Queue\n\n\nclass Producer(threading.Thread):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'\n }\n\n def __init__(self, page_que, img_que, *args, **kwargs):\n super(Producer, self).__init__(*args, **kwargs)\n self.page_que = page_que\n self.img_que = img_que\n\n def run(self):\n while True:\n if self.page_que.empty():\n break\n url = self.page_que.get()\n self.parse_page(url)\n\n def parse_page(self, url):\n response = requests.get(url, headers=self.headers)\n text = response.text\n html = etree.HTML(text)\n imgs = html.xpath(\"//div[@class='page-content text-center']//img[@class!='gif']\")\n for img in imgs:\n img_url = img.get('data-original')\n alt = img.get('alt')\n alt = re.sub(r'[\\??\\.,。!!\\*]', '', alt)\n suffix = os.path.splitext(img_url)[1]\n filename = alt + suffix\n self.img_que.put((img_url, filename))\n\n\nclass Consumer(threading.Thread):\n def __init__(self, page_que, img_que, *args, **kwargs):\n super(Consumer, self).__init__(*args, **kwargs)\n self.page_que = page_que\n self.img_que = img_que\n\n def run(self):\n while True:\n if self.img_que.empty() and self.page_que.empty():\n break\n img_url, filename = self.img_que.get()\n request.urlretrieve(img_url, 'images/' + filename)\n print(filename + ' 下载完成!')\n\n\ndef main():\n page_que = Queue(100)\n img_que = Queue(1000)\n for x in range(1, 101):\n url = 'https://www.doutula.com/article/list/?page=%d' % x\n page_que.put(url)\n\n for x in range(5):\n t = Producer(page_que, img_que)\n t.start()\n\n for x in range(5):\n t = Consumer(page_que, img_que)\n t.start()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"多线程/demo7.py","file_name":"demo7.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"218995201","text":"import tensorflow as tf\nimport json\nimport codecs\nimport numpy\nimport copy\nimport nltk\n\n\nclass Data_holder:\n def find_all(self, a_str, sub):\n start = 0\n while True:\n start = a_str.find(sub, start)\n if start == -1: return\n yield start\n start += len(sub) # use start += 1 to find overlapping matches\n\n def __init__(self):\n self.whole_batch_index = 0\n self.Batch_Index = 0\n\n self.Batch_Size = 100\n self.Total_Batch_Size = 0\n\n self.P_Length = 70\n self.Q_Length = 85\n self.Word_Embedding_Dimension = 100\n\n self.argsort_length = []\n self.Paragraphs = []\n self.Questions = []\n self.Paragraphs_Length = []\n self.Questions_Length = []\n self.IDs = []\n self.Sentence_Index = []\n self.Sentence_s_e_Index = []\n\n self.in_path = \"C:\\\\Users\\\\Administrator\\\\Desktop\\\\qadataset\\\\train-v1.1.json\"\n self.data = json.load(open(self.in_path, 'r'))\n\n in_path_glove = \"C:\\\\Users\\\\Administrator\\\\Desktop\\\\qadataset\\\\glove6B100d.txt\"\n glove_f = codecs.open(in_path_glove, 'r', 'utf-8')\n\n self.words = []\n self.vectors = []\n\n for line in glove_f:\n tokens = line.split(' ')\n self.words.append(tokens.pop(0))\n self.vectors.append(tokens)\n\n self.vectors = numpy.array((self.vectors), 'f').reshape((-1, self.Word_Embedding_Dimension))\n\n self.dictionary = numpy.array(self.words)\n self.glove_arg_index = self.dictionary.argsort()\n self.dictionary.sort()\n\n def get_glove(self, word):\n word = \"\".join(word).lower()\n index = self.dictionary.searchsorted(word)\n # print(\"index,\", index)\n\n if index == 400000:\n index = 0\n\n if word == self.dictionary[index]:\n # print(\"Success: \", word)\n # input()\n return self.vectors[self.dictionary.searchsorted(word)]\n else:\n # if str != '':\n # print(\"fail: \", word)\n none_result = numpy.zeros((self.Word_Embedding_Dimension), dtype='f')\n for i in range(self.Word_Embedding_Dimension):\n none_result[i] = 10.0 / self.Word_Embedding_Dimension\n return none_result\n\n def get_glove_sequence(self, length, tokens):\n result = numpy.zeros((length, self.Word_Embedding_Dimension), dtype='f')\n padding = 0\n mylength = length\n if len(tokens) < length:\n mylength = len(tokens)\n padding = length - mylength\n for i in range(mylength):\n result[padding + i] = self.get_glove(tokens[i])\n\n return result\n\n def get_json(self):\n return self.data\n\n def set_batch(self):\n myindex = 0\n numberOfQuestions = 0\n fileIndex = 0\n\n max_plength = -99\n max_qlength = -99\n\n sentence_index = 0\n\n for article in self.data['data']:\n for para in article['paragraphs']:\n for qa in para['qas']:\n for answer in qa['answers']:\n start_index = int(answer['answer_start'])\n answer_length = len(answer['text'])\n\n original_str = \"\".join(para['context'])\n\n para_str = list(para['context'])\n para_str[start_index] = '#'\n para_str[start_index + answer_length - 1] = '#'\n\n temp_str = \"\".join(para_str)\n temp_str = temp_str.replace('.', ' .')\n temp_str = temp_str.replace(',', ' ,')\n temp_str = temp_str.replace('?', ' ?')\n temp_str = temp_str.replace('!', ' !')\n temp_str = temp_str.replace('(', ' ')\n temp_str = temp_str.replace(')', ' ')\n temp_str = temp_str.replace(u'\\u2013', ' - ')\n temp_str = temp_str.replace(u'\\u2014', ' - ')\n temp_str = temp_str.replace('-', ' - ')\n temp_str = temp_str.replace('\\'', ' \\' ')\n temp_str = temp_str.replace('\\\"', '')\n # parapraph pre-processing\n\n original_str = original_str.replace('.', ' .')\n original_str = original_str.replace(',', ' ,')\n original_str = original_str.replace('?', ' ?')\n original_str = original_str.replace('!', ' !')\n original_str = original_str.replace('(', ' ')\n original_str = original_str.replace(')', ' ')\n original_str = original_str.replace(u'\\u2013', ' - ')\n original_str = original_str.replace(u'\\u2014', ' - ')\n original_str = original_str.replace('-', ' - ')\n original_str = original_str.replace('\\'', ' \\' ')\n original_str = original_str.replace('\\\"', '')\n # string for index setting\n\n question_ = \"\".join(qa['question']).strip()\n question_ = question_.replace('?', ' ?')\n question_ = question_.split(' ')\n # question pre-processing\n\n Start_Index = 0\n Stop_Index = 0\n\n split1 = original_str.split(' ')\n\n para_str = \"\".join(temp_str)\n para_str = para_str.split(' ')\n\n dot_count = 0\n sentence_count = -1\n\n for i in range(len(para_str)):\n if para_str[i] == '.':\n dot_count += 1\n\n temp_list = list(para_str[i])\n\n if len(temp_list) > 0:\n if temp_list[0] == '#':\n sentence_count = dot_count\n # start, stop index processing\n\n self.IDs.append(qa['id'])\n sentences = original_str.split('.').copy()\n\n s_e_array = numpy.zeros(shape=[2], dtype=numpy.int32)\n s_e_array[0] = sentence_index\n index_array = numpy.zeros(shape=[len(original_str.split('.').copy())])\n index_array[sentence_count] = 1\n\n for i, sentence in enumerate(sentences):\n self.Sentence_Index.append(index_array[i])\n self.Paragraphs.append(sentence.split(' '))\n self.Questions.append(question_.copy())\n\n self.Paragraphs_Length.append(len(sentence.split(' ')))\n self.Questions_Length.append(len(question_))\n sentence_index += 1\n\n s_e_array[1] = sentence_index\n self.Sentence_s_e_Index.append((s_e_array.copy()))\n\n if max_plength < len(split1):\n max_plength = len(split1)\n if max_qlength < len(question_):\n max_qlength = len(question_)\n\n print(\"max p, q :\", max_plength, max_qlength, \" , \", numberOfQuestions)\n # self.P_Length = max_plength + 200\n # self.Q_Length = max_qlength + 50\n\n self.Total_Batch_Size = len(self.IDs)\n self.argsort_length = numpy.argsort(numpy.array(self.Paragraphs_Length))\n\n return max_plength, max_qlength\n\n def get_next_batch(self):\n cur_batch = 80\n cur_length = 50\n q_max = 25\n\n total_batch = 0\n\n np_range = numpy.arange(0, len(self.Sentence_s_e_Index), dtype='i')\n numpy.random.shuffle(np_range)\n\n for i in range(cur_batch):\n idx = np_range[i]\n total_batch += (self.Sentence_s_e_Index[idx][1] - self.Sentence_s_e_Index[idx][0])\n\n batch_paragraph = numpy.zeros((total_batch, cur_length, self.Word_Embedding_Dimension), dtype='f')\n batch_question = numpy.zeros((total_batch, q_max, self.Word_Embedding_Dimension), dtype='f')\n batch_label = numpy.zeros((total_batch, 2), dtype='f')\n\n index = 0\n\n for i in range(cur_batch):\n idx = np_range[i]\n\n for j in range(self.Sentence_s_e_Index[idx][0], self.Sentence_s_e_Index[idx][1]):\n batch_label[index, int(self.Sentence_Index[j])] = 1\n batch_paragraph[index] = self.get_glove_sequence(length=cur_length, tokens=self.Paragraphs[j])\n batch_question[index] = self.get_glove_sequence(length=q_max, tokens=self.Questions[j])\n index += 1\n\n return batch_paragraph, batch_question, batch_label, np_range, 0\n\n def get_test_batch(self):\n cur_batch = 1\n cur_length = 70\n q_max = 50\n\n total_batch = 0\n\n np_range = numpy.arange(0, len(self.Sentence_s_e_Index), dtype='i')\n numpy.random.shuffle(np_range)\n\n idx = np_range[0]\n total_batch += (self.Sentence_s_e_Index[idx][1] - self.Sentence_s_e_Index[idx][0])\n\n batch_paragraph = numpy.zeros((total_batch, cur_length, self.Word_Embedding_Dimension), dtype='f')\n batch_question = numpy.zeros((total_batch, q_max, self.Word_Embedding_Dimension), dtype='f')\n batch_label = numpy.zeros((total_batch, 2), dtype='f')\n\n index = 0\n\n idx = np_range[0]\n\n for j in range(self.Sentence_s_e_Index[idx][0], self.Sentence_s_e_Index[idx][1]):\n batch_label[index, int(self.Sentence_Index[j])] = 1\n batch_paragraph[index] = self.get_glove_sequence(length=cur_length, tokens=self.Paragraphs[j])\n batch_question[index] = self.get_glove_sequence(length=q_max, tokens=self.Questions[j])\n index += 1\n\n return batch_paragraph, batch_question, batch_label, idx, idx\n\n def get_sequence_batch(self):\n cont = True\n\n cur_batch = 80\n if self.Batch_Index + cur_batch > len(self.Sentence_s_e_Index):\n cur_batch = len(self.Sentence_s_e_Index) - self.Batch_Index\n cont = False\n cur_length = 70\n q_max = 50\n\n total_batch = 0\n\n np_range = numpy.arange(0, len(self.Sentence_s_e_Index), dtype='i')\n numpy.random.shuffle(np_range)\n\n for i in range(self.Batch_Index, self.Batch_Index + cur_batch):\n idx = i\n total_batch += (self.Sentence_s_e_Index[idx][1] - self.Sentence_s_e_Index[idx][0])\n\n batch_paragraph = numpy.zeros((total_batch, cur_length, self.Word_Embedding_Dimension), dtype='f')\n batch_question = numpy.zeros((total_batch, q_max, self.Word_Embedding_Dimension), dtype='f')\n batch_label = numpy.zeros((total_batch, 2), dtype='f')\n\n index = 0\n\n for i in range(self.Batch_Index, self.Batch_Index + cur_batch):\n idx = i\n\n for j in range(self.Sentence_s_e_Index[idx][0], self.Sentence_s_e_Index[idx][1]):\n batch_label[index, int(self.Sentence_Index[j])] = 1\n batch_paragraph[index] = self.get_glove_sequence(length=cur_length, tokens=self.Paragraphs[j])\n batch_question[index] = self.get_glove_sequence(length=q_max, tokens=self.Questions[j])\n index += 1\n\n self.Batch_Index += cur_batch\n\n return batch_paragraph, batch_question, batch_label, np_range, cur_batch, self.Batch_Index\n","sub_path":"QA/Sentence_Data_Processor.py","file_name":"Sentence_Data_Processor.py","file_ext":"py","file_size_in_byte":11643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"223016187","text":"# Prevodnik jednotek\r\n \r\nvolba = input (\"Co chcete prevest? Vzdalenost, teplotu, rychlost? \")\r\n\r\nwhile volba == \"vzdalenost\":\r\n distance = input (\"Chcete prevest km na mile nebo mile na km? \")\r\n if distance == \"km na mile\":\r\n dk = float(input(\"Jakou vzdalenost v km chcete prevest na mile: \"))\r\n Km_Mil = round(dk * 0.62137, 2) # prevod km na mile\r\n print (\"Vzdalenost\", dk, \"km je\", Km_Mil , \"mil.\", end=\"\")\r\n else:\r\n dm = float(input(\"Jakou vzdalenost v mil chcete prevest na km: \"))\r\n Mil_Km = round(dm * 1.609, 2) # prevod mile na km\r\n print (\"Vzdalenost\", dm, \"mil je\", Mil_Km , \"km.\", end=\"\")\r\n break \r\n \r\nwhile volba == \"teplota\":\r\n stupnice = input (\"Chcete prevest °C na F nebo F na °C? \")\r\n if stupnice == \"C na F\":\r\n CF = float(input(\"Jakou teplotu v °C chcete prevest na F: \"))\r\n prevodCF = round(9/5 * CF + 32, 2) # prevod C na F\r\n print (\"Teplota\", CF, \"°C je\", prevodCF , \"F.\", end=\"\")\r\n else:\r\n FC = float(input(\"Jakou teplotu v F chcete prevest na °C: \"))\r\n prevodFC = round(5/9 * FC - 32, 2) # prevod F na C\r\n print (\"Teplota\", FC, \"F je\", prevodFC , \"°C.\", end=\"\")\r\n break\r\n \r\nwhile volba == \"rychlost\":\r\n speed = input(\"Chcete prevod km/h na mph nebo mph na km/h? \")\r\n if speed == \"kmh na mph\":\r\n kmh = float(input (\"Jakou rychlost v km/h chcete prevest? \"))\r\n kmhMPH = round(0.621 * kmh, 2) # prevod kmh na mph (miles per hour)\r\n print (\"Rychlost\", kmh, \"km/h je\", kmhMPH, \"mph.\")\r\n else:\r\n mph = float(input (\"Jakou rychlost v mph chcete prevest? \"))\r\n mphKMH = round(1.609 * mph, 2) # prevod mph (miles per hour) na km/h\r\n print (\"Rychlost\", mph, \"mph je\", mphKMH, \"km/h.\")\r\n break","sub_path":"Convertor.py","file_name":"Convertor.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"391961623","text":"import os\nimport sys\nimport time\nimport argparse\nimport ast\nimport logging\nimport numpy as np\nimport paddle.fluid as fluid\n\nfrom model import resnet_3d\nfrom reader import Ucf101\nfrom config import parse_config, merge_configs, print_configs\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport time\nimport numpy as np\n\nlogging.root.handlers = []\nFORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'\nlogging.basicConfig(filename='logger.log', level=logging.INFO, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"Paddle Video train script\")\n parser.add_argument(\n '--model_name',\n type=str,\n default='resnet_3d',\n help='name of model to train.')\n parser.add_argument(\n '--config',\n type=str,\n default='configs/ucf101.txt',\n help='path to config file of model')\n parser.add_argument(\n '--batch_size',\n type=int,\n default=None,\n help='training batch size. None to use config file setting.')\n parser.add_argument(\n '--learning_rate',\n type=float,\n default=None,\n help='learning rate use for training. None to use config file setting.')\n parser.add_argument(\n '--pretrain',\n type=str,\n default=None,\n help='path to pretrain weights. None to use default weights path in ~/.paddle/weights.'\n )\n parser.add_argument(\n '--use_gpu',\n type=ast.literal_eval,\n default=True,\n help='default use gpu.')\n parser.add_argument(\n '--epoch',\n type=int,\n default=100,\n help='epoch number, 0 for read from config file')\n parser.add_argument(\n '--save_dir',\n type=str,\n default='checkpoints_models',\n help='directory name to save train snapshoot')\n args = parser.parse_args()\n return args\n\n\ndef train(args):\n # parse config\n place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()\n with fluid.dygraph.guard(place):\n config = parse_config(args.config)\n train_config = merge_configs(config, 'train', vars(args))\n print_configs(train_config, 'Train')\n # train_model = TSN1.TSNResNet('TSN',train_config['MODEL']['num_layers'],\n # train_config['MODEL']['num_classes'],\n # train_config['MODEL']['seg_num'],0.00002)\n train_model = resnet_3d.generate_model(train_config['MODEL']['num_layers'])\n # 根据自己定义的网络,声明train_model\n # opt = fluid.optimizer.Momentum(learning_rate=train_config['MODEL']['learning_rate'],momentum = 0.9, parameter_list=train_model.parameters())\n # opt = fluid.optimizer.Momentum(0.001, 0.9, parameter_list=train_model.parameters())\n # opt=fluid.optimizer.SGDOptimizer(learning_rate=train_config['MODEL']['learning_rate'], parameter_list=train_model.parameters())\n opt = fluid.optimizer.AdamOptimizer(learning_rate=train_config['MODEL']['learning_rate'],\n parameter_list=train_model.parameters())\n if args.pretrain:\n # 加载上一次训练的模型,继续训练\n train_model = resnet_3d.generate_model(train_config['MODEL']['num_layers'], n_classes=1039)\n # model, _ = fluid.dygraph.load_dygraph(args.save_dir + '/tsn_model')\n model, _ = fluid.dygraph.load_dygraph('data/data51645/paddle_dy')\n\n train_model.load_dict(model)\n train_model.fc = fluid.dygraph.Linear(512 * 4, 101, act='softmax')\n print('pretrain is ok')\n\n # build model\n if not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n\n # get reader\n train_config.TRAIN.batch_size = train_config.TRAIN.batch_size\n # train_reader = KineticsReader(args.model_name.upper(), 'train', train_config).create_reader()\n train_reader = Ucf101(args.model_name.upper(), 'train', train_config).create_reader()\n\n epochs = args.epoch or train_model.epoch_num()\n\n # test\n test_config = merge_configs(config, 'test', vars(args))\n label_dic = np.load('label_dir.npy', allow_pickle=True).item()\n label_dic = {v: k for k, v in label_dic.items()}\n\n # get infer reader\n # test_reader = Ucf101(args.model_name.upper(), 'test', test_config).create_reader()\n t_acc = []\n v_acc = []\n t_loss = []\n for i in range(epochs):\n train_acc_list = []\n train_loss_list = []\n for batch_id, data in enumerate(train_reader()):\n dy_x_data = np.array([x[0] for x in data]).astype('float32')\n dy_x_data = np.transpose(dy_x_data, (0, 2, 1, 3, 4))\n y_data = np.array([[x[1]] for x in data]).astype('int64')\n # if batch_id ==0:\n # print(dy_x_data.shape)\n # print(y_data.shape)\n\n img = fluid.dygraph.to_variable(dy_x_data)\n label = fluid.dygraph.to_variable(y_data)\n label.stop_gradient = True\n\n # out, acc = train_model.forward(img, label)\n out, acc = train_model(img, label)\n train_acc_list.append(acc.numpy()[0])\n # print('shape',out.shape,label.shape)\n # print(out)\n # print(label)\n\n loss = fluid.layers.cross_entropy(out, label)\n avg_loss = fluid.layers.mean(loss)\n train_loss_list.append(avg_loss.numpy())\n\n avg_loss.backward()\n\n opt.minimize(avg_loss)\n train_model.clear_gradients()\n\n if batch_id % 10 == 0:\n logger.info(\n \"Loss at epoch {} step {}: {}, acc: {}\".format(i, batch_id, avg_loss.numpy(), acc.numpy()))\n print(\"Loss at epoch {} step {}: {}, acc: {}\".format(i, batch_id, avg_loss.numpy(), acc.numpy()))\n t_loss.append(np.mean(train_loss_list))\n t_acc.append(np.mean(train_acc_list))\n # val_acc_list = []\n # for batch_id, data in enumerate(test_reader()):\n # dy_x_data = np.array([x[0] for x in data]).astype('float32')\n # dy_x_data = np.transpose(dy_x_data,(0,2,1,3,4))\n # y_data = np.array([[x[1]] for x in data]).astype('int64')\n\n # img = fluid.dygraph.to_variable(dy_x_data)\n # label = fluid.dygraph.to_variable(y_data)\n # label.stop_gradient = True\n # out, acc = train_model.forward(img, label)\n # val_acc_list.append(acc.numpy()[0])\n # v_acc.append(np.mean(val_acc_list))\n # print(\"测试集准确率为:{}\".format(np.mean(val_acc_list)))\n fluid.dygraph.save_dygraph(train_model.state_dict(), args.save_dir + '/res3d_model_' + str(i + 1))\n\n print('t_acc', t_acc)\n print('t_loss', t_loss)\n # print('v_acc',v_acc)\n # get infer reader\n # val_reader = KineticsReader(args.model_name.upper(), 'valid', val_config).create_reader()\n # logger.info(\"Final loss: {}\".format(avg_loss.numpy()))\n # print(\"Final loss: {}\".format(avg_loss.numpy()))\n result_list = []\n result_list.append(t_acc)\n result_list.append(t_loss)\n np_list = np.array(result_list).T\n name = ['train_acc', 'train_loss']\n test = pd.DataFrame(columns=name, data=np_list)\n now = int(time.time())\n timeArray = time.localtime(now)\n today_time = time.strftime(\"%Y-%m-%d-%H-%M-%S\", timeArray)\n test.to_csv('train_result_' + today_time + '_.csv')\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n # check whether the installed paddle is compiled with GPU\n logger.info(args)\n\n train(args)\n","sub_path":"util_scripts/generate_video_jpgs.py","file_name":"generate_video_jpgs.py","file_ext":"py","file_size_in_byte":7909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"587642247","text":"# -*- coding: utf-8 -*-\ndef InsertSort(lst):\n n=len(lst)\n if n<=1:\n return lst\n for i in range(1,n):\n j=i\n target=lst[i] #每次循环的一个待插入的数\n while j>0 and target biggest:\n biggest = batch_reward\n pi.save_model('model/ppo_for_g/ranvsppo{0}.ckpt'.format(biggest))\n\n value_memory = []\n state_memory = []\n reward_memory = []\n action1_memory = []\n action2_memory = []\n value_next_memory = []\n\n t = 0","sub_path":"script/ppo_training_process.py","file_name":"ppo_training_process.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"645428105","text":"# coding=utf-8\nimport random\n\nfrom yowsup.layers.protocol_messages.protocolentities import TextMessageProtocolEntity\n\n\ndef random_roll(message):\n \"\"\"\n Rolls a number between 1 and 10.\n :param message: A message of parent class MessageProtocol\n :return: A TextMessageProtocol with the response\n \"\"\"\n value = random.randint(1, 10)\n if value == 10:\n return TextMessageProtocolEntity(u'༼ つ ಠ益ಠ༽つ [%d]' % value, to=message.getFrom())\n else:\n return TextMessageProtocolEntity(u'༼ つ ◕_◕ ༽つ [%d]' % value, to=message.getFrom())\n\n\ndef random_raza(message):\n raza_list = [\"Baumann\", \"Cantu\", \"Echeverry\", \"Garcia\", \"Jorge\", \"Guerra\", \"Lamadrid\", \"Fred\", \"Marchand\", \"Ricky\",\n \"David\",\n \"Esteban\", \"Ortiz\", \"Olaf\", \"Peña\", \"Memo\", \"Eduardo\", \"Victor\", \"Pato\", \"Vela\"]\n return TextMessageProtocolEntity(random.choice(raza_list), to=message.getFrom())\n\n\ndef generate_insult_string():\n beban_list = [\"Estufa \", \"Estonia \", \"Estebana \", \"Esteban \", \"Estufutria Lentes \", \"BebAnus \", \"EstebAnus\"]\n es_list = [\"es \", \"es un \", \"sera \", \"siempre lo fue \", \"sueña con ser \", \"aspira ser \", \"es una \", \"prefiere ser \",\n \"ama ser \", \" \", \" \", \" \"]\n adj_list = [\"popo\", \"pipi\", \"caca\", \"exremento\", \"rata cochina\", \"traidor\", \"nutria\", \"gata\", \"baguette\",\n \"faguette\", \"nini\", \"huele a vaca\", \"el peor abogado de el mundo\", \"el mas inutil de todos\", \"chino\",\n \"zorra\", \"gordo \", \"gorda \", \"cuatrojos \", \"inutil \", \"tonto \", \"tonta \", \"basofia\", \"basura\", \"puto\",\n \"puta\", \"Pato\", \"Patricio\", \"Pato con Lentes\", \"vagina\", \"pitos\", \"traga pitos\", \"perro\",\n \"perra de Baumann\", \"perra de Cantu\", \"perra de Echeverry\", \"perra de mau Garcia\", \"perra de Jorge\",\n \"perra de Guerra\", \"perra de Rana\", \"perra de AlFredrick\", \"perra de Marchand\", \"perra de Ricky\",\n \"perra de David\", \"perra de Ortiz\", \"perra de Olaf\", \"perra de Peña\", \"perra de Memo\",\n \"perra de Eduardo\", \"perra de Victor\", \"perra de Pato\", \"perra de Vela\"]\n insult_sentence = \" つ ಠ益ಠ༽つ \" + random.choice(beban_list) + random.choice(es_list) + random.choice(adj_list)\n return insult_sentence\n\n\ndef random_estaban(message):\n return TextMessageProtocolEntity(generate_insult_string(), to=message.getFrom())\n\n\ndef generate_eightball(message):\n response_list = [\"It is decidedly so\", \"Without a doubt\", \"Yes definitely\", \"You may rely on it\",\n \"As I see it, yes\", \"Most likely\", \"Outlook good\", \"Yes\", \"Signs point to yes\",\n \"Don't count on it\", \"My reply is no\", \"My sources say no\", \"Maybe\", \"Not a chance\",\n \"Outlook not so good\", \"Very doubtful\", \"Ni a vergasos\", \"Nope\", \"Nel\"]\n return TextMessageProtocolEntity(random.choice(response_list), to=message.getFrom())\n\n\ndef generate_jorgita_message(message):\n response_list = [\"tiene herpes en el ano\", \"es un gusano\", \"es la perrita de rana\", \"es el pollo de pato\",\n \"pato es puto\", \"ama mamar palito\", \"ama a patito\", \"no es fino\", \"es un puñetas\",\n \"es peor de nini que peña\", \"no le sabe a los lasers\", \"va a ser la major doctorita del mundito\",\n \"no sabe hablar\", \"no sabe caminar\", \"es putita\", \"es prostitute\", \"es lentita\", \"es caquita\",\n \"es una cerdita\", \"es una lagarta\", \"es Filipina\", \"es super gordita\", \"tiene caries\",\n \"tiene cancer felino\", \"es una nalga en comparación con olaf\", \"todos le ganan\", \"es un pájaro \",\n \"no puede ver\", \"esta ciego \", \"es todo tonto\", \"le gusta comer popó\", \"es un amor\",\n \"esta retrasado mental\", \"da asco\", \"me da ganas de vomitar\"]\n return TextMessageProtocolEntity(random.choice(response_list), to=message.getFrom())\n\n\ndef generate_pato_message(message):\n pato_response_list = [\"esta ciego\", \"es un tonto\", \"es la perra de jorgita\"]\n return TextMessageProtocolEntity(random.choice(pato_response_list), to=message.getFrom())\n","sub_path":"BasicTextProtocols.py","file_name":"BasicTextProtocols.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"111341436","text":"#problem found at https://leetcode.com/problems/coin-change/\n\n#description: You are given coins of different denominations and a total amount of money amount. Write a function\n# to compute the fewest number of coins that you need to make up that amount. If that amount of money cannot be \n#made up by any combination of the coins, return -1.\n\ndef coinChange(coins, amount):\n\tif(amount == 0):\n\t\treturn 0\n\n\tsort_coins = sorted(coins)\n\t\n\tans = attempt(sort_coins, amount)\n\twhile(ans == -1 and len(sort_coins) > 0):\n\t\tsort_coins.remove(max(sort_coins))\n\t\tans = attempt(sort_coins, amount)\n\treturn ans\n\n\ndef attempt(coins, amount):\n\t#if largest element in coins is larget than amount, it cant be used, so return -1 such that \n\t#current largest coin is removed from consideration\n\tif(len(coins)<1):\n\t\tif(len(coins) == 0):\n\t\t\treturn -1\n\t\tif(amount%coins[0] == 0):\n\t\t\treturn amount/coins[0]\n\t\telse:\n\t\t\treturn -1\n\n\tif(coins[len(coins)-1] > amount):\n\t\treturn -1\n\telif(coins[len(coins)-1] == amount):\n\t\treturn 1\n\telse:\n\t\tcount = amount/coins[len(coins)-1]\n\t\tremainder = amount%coins[len(coins)-1]\n\t\tif remainder == 0:\n\t\t\treturn count\n\t\telif remainder in coins:\n\t\t\treturn count + 1\n\n\t\ttempcoins = coins[:]\n\t\ttempcoins.remove(max(tempcoins))\n\t\tr = attempt(tempcoins, remainder)\n\t\twhile(r == -1 and len(tempcoins) > 0):\n\t\t\ttempcoins.remove(max(tempcoins))\n\t\t\tr = attempt(tempcoins, remainder)\n\t\tif(r == -1):\n\t\t\treturn r\n\t\telse:\n\t\t\treturn count + r\n\n\n\n\n\n\nl = [1,2147483647]\nprint(coinChange(l, 2))\n\n","sub_path":"python/coin_change.py","file_name":"coin_change.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"253121470","text":"from __future__ import unicode_literals\nimport scrapy\nimport json\nimport os\nfrom scrapy.spiders import Spider\nfrom scrapy.http import FormRequest\nfrom scrapy.http import Request\nfrom chainxy.items import ChainItem\nfrom lxml import etree\nfrom selenium import webdriver\nfrom lxml import html\nimport usaddress\n\nclass candlewoodsuites(scrapy.Spider):\n\tname = 'candlewoodsuites'\n\tdomain = ''\n\thistory = []\n\n\tdef start_requests(self):\n\t\tinit_url = 'https://www.allstays.com/hotels-by-chain/candlewood-suites.htm'\n\t\tyield scrapy.Request(url=init_url, callback=self.parse_state)\n\n\tdef parse_state(self, response):\n\t\tstate_list = response.xpath('//a[@class=\"mapside button\"]/@href').extract()\n\t\tfor state in state_list : \n\t\t\tstate_link = 'https:' + state\n\t\t\tyield scrapy.Request(url=state_link, callback=self.parse_store)\n\n\tdef parse_store(self, response):\n\t\tstore_list = response.xpath('//a[@class=\"full-width button\"]/@href').extract()\n\t\tif store_list:\n\t\t\tfor store in store_list:\n\t\t\t\tstore_link = 'https:' + store\n\t\t\t\tyield scrapy.Request(url=store_link, callback=self.parse_page)\n\n\tdef parse_page(self, response):\n\t\ttry:\n\t\t\titem = ChainItem()\n\t\t\titem['store_name'] = self.validate(response.xpath('//span[@itemprop=\"name\"]/text()').extract_first())\n\t\t\titem['address'] = self.validate(response.xpath('//span[@itemprop=\"streetAddress\"]//text()').extract_first())\n\t\t\titem['city'] = self.validate(response.xpath('//span[@itemprop=\"addressLocality\"]/text()').extract_first())\n\t\t\titem['state'] = self.validate(response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first())\n\t\t\titem['zip_code'] = self.validate(response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first())\n\t\t\titem['country'] = 'United States'\n\t\t\titem['phone_number'] = self.validate(response.xpath('//span[@itemprop=\"telephone\"]/text()').extract_first())\n\t\t\tif item['store_name'] != '':\n\t\t\t\tyield item\t\t\t\n\t\texcept:\n\t\t\tpass\n\n\tdef validate(self, item):\n\t\ttry:\n\t\t\treturn item.strip()\n\t\texcept:\n\t\t\treturn ''\n\n\tdef eliminate_space(self, items):\n\t\ttmp = []\n\t\tfor item in items:\n\t\t\tif self.validate(item) != '':\n\t\t\t\ttmp.append(self.validate(item))\n\t\treturn tmp","sub_path":"step4/chainxy/spiders/candlewoodsuites.py","file_name":"candlewoodsuites.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"53659132","text":"from os import chdir\nfrom os.path import isdir, basename, dirname, sep\nfrom commands import getstatusoutput\n\n\ndef has_captcha(page_source):\n keywords = ['recaptcha_submit',\n 'manual_recaptcha_challenge_field']\n return any(keyword in page_source for keyword in keywords)\n\n\ndef pack_crawl_data(crawl_dir):\n if not isdir(crawl_dir):\n print(\"Cannot find the crawl dir: %s\" % crawl_dir)\n return False\n\n crawl_dir = crawl_dir[:-1] if crawl_dir.endswith(sep) else crawl_dir\n crawl_name = basename(crawl_dir)\n containing_dir = dirname(crawl_dir)\n chdir(containing_dir)\n arc_path = \"%s.tar.gz\" % crawl_name\n tar_cmd = \"tar czvf %s %s\" % (arc_path, crawl_name)\n print(\"Packing the crawl dir with cmd: %s\" % tar_cmd)\n status, txt = getstatusoutput(tar_cmd)\n if status:\n print(\"Tar command failed: %s \\nSt: %s txt: %s\"\n % (tar_cmd, status, txt))\n else:\n # http://stackoverflow.com/a/2001749/3104416\n tar_gz_check_cmd = \"gunzip -c %s | tar t > /dev/null\" % arc_path\n tar_status, tar_txt = getstatusoutput(tar_gz_check_cmd)\n if tar_status:\n print(\"Tar check failed: %s tar_status: %s tar_txt: %s\"\n % (tar_gz_check_cmd, tar_status, tar_txt))\n return False\n else:\n return True\n\n\ndef run_cmd(cmd):\n return getstatusoutput('%s ' % (cmd))\n","sub_path":"variance/utils/gen_utils.py","file_name":"gen_utils.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"169464565","text":"# -*- coding: utf-8 -*-\n\"\"\"\nIn this implementation there is an ANN with GRU Units which are a slightly simpler form of LSTM units.\nAs input this network gets the approximated Suspension state of the car, the speed, and the road profile.\nIt approximates the suspension state with the predicted i values for a n_samples timesteps beforehand and iterates them\nfor n_approx times.\n\nThe loss function is the acceleration of the body Zh_dtdt.\n\nSadly the network doesnt reduce the loss function over multiple episodes.\n\nThe road profile and roadprofile gradient are normalized. The suspension values are not.\n\"\"\"\n\n# Commented out IPython magic to ensure Python compatibility.\nimport numpy as np\n# %tensorflow_version 2.x\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom Hackathon_2020.io import prepare_data_matrix_2, create_stacked_batch\nfrom Hackathon_2020.loss import tf_tweaked_loss, tf_activeSuspension\nfrom Hackathon_2020.utils.predict_models import predict_RNN_05\n\n# Set a seed to get the comparable results\ntf.random.set_seed(313)\n\n# Parameters for the dataset for easy changing:\nbatchsize = 16\nn_epochs = 1\ndelta_time = 0.005 # in s\nvel = 15 # in m/s\nk = 20 # factor for car and street\n\n# define the number of samples we look back into the past\nn_samples = 5\n\n# define the amount of approximating the suspension state for each batch\nn_approx = 4\n\nPROFILE = \"ts1_2_k_3.0.csv\"\n\n# Output indices: prof, prof_dt, Zb, Zb_dt, Zb_dtdt, Zt, Zt_dt, Zt_dtdt, vel\nraw_train_data = prepare_data_matrix_2(PROFILE, vel, k, delta_time)\ntrain_data = raw_train_data\ntrain_profile = train_data[:, 0]\n\n# Calculate gradient of road profile; no data for first point -> 0\nfor n in range(len(train_data) - 1):\n train_data[n + 1, 1] = (train_data[n + 1, 0] - train_data[n, 0]) / delta_time\n\n# Normalization of input data\ntrain_mean = np.mean(raw_train_data, axis=0)\ntrain_stddev = np.std(raw_train_data, axis=0)\ntrain_data[:, 0] = (raw_train_data[:, 0] - train_mean[0]) / train_stddev[0]\ntrain_data[:, 1] = (raw_train_data[:, 1] - train_mean[1]) / train_stddev[1]\n\n# get the indices to form batches\nbatch_indices = [i for i in range(0, len(train_data), batchsize)]\n\nfrom tensorflow.keras.layers import Layer, Conv2D, MaxPool2D, Flatten, Dense, BatchNormalization, GlobalAveragePooling2D\n\n\nclass Model(Layer):\n\n def __init__(self):\n super(Model, self).__init__()\n self.GRU = tf.keras.layers.GRU(16,\n return_sequences=True,\n return_state=False,\n recurrent_initializer='glorot_uniform')\n self.RNN = tf.keras.layers.SimpleRNN(8,\n return_sequences=True,\n return_state=False,\n recurrent_initializer='glorot_uniform')\n self.dense_2 = Dense(16, activation=None, kernel_initializer=tf.keras.initializers.GlorotNormal,\n bias_initializer=tf.keras.initializers.Zeros)\n self.bn2 = tf.keras.layers.BatchNormalization()\n self.dense_3 = Dense(1, activation=tf.keras.activations.sigmoid,\n kernel_initializer=tf.keras.initializers.GlorotNormal,\n bias_initializer=tf.keras.initializers.Zeros)\n\n def call(self, x, is_training=False):\n # Calculate forward step through all layers\n # x = self.GRU(x) # , initial_state=hidden)\n x = self.RNN(x) # , initial_state=hidden)\n x = self.dense_2(x)\n x = tf.nn.relu(self.bn2(x, training=is_training))\n x = self.dense_3(x)\n return x\n\n\n\"\"\"### Training of the model\"\"\"\n\n#tf.keras.backend.clear_session()\n\n# use Adam's default learning rate\ninitial_learning_rate = 0.0001\n\n# every epoch, the learning rate will decay to learning_rate * 0.5\nlr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n initial_learning_rate,\n decay_steps=int(len(train_data) / batchsize),\n decay_rate=0.5,\n staircase=True)\n\n# Training parameters\noptimizer = tf.keras.optimizers.Adam(learning_rate=initial_learning_rate) # lr_schedule)\n\n# Initialize model\n#model = Model()\n\n# Initialize lists for later visualiztion\ntrain_steps = []\ntrain_losses = []\ni_ls = []\nZh_ls = []\nZt_ls = []\nZb_ls = []\nZt_dtdt_ls = []\nZb_dtdt_ls = []\n# test_steps = []\n# test_losses = []\nstep = 0\nerr_ct = 0\nt = []\n\n# Define progressbar\nprogress = tf.keras.utils.Progbar(target=len(train_data) * n_epochs / batchsize)\n\nfor epoch in range(n_epochs):\n\n # for idx in np.random.permutation(batch_indices):\n for idx in np.random.permutation(batch_indices):\n if idx + batchsize > len(train_data):\n batch_x = create_stacked_batch(train_data, start=idx, stop=len(train_data), n_samples=n_samples)\n batch_profile = raw_train_data[idx:, 0]\n\n elif idx < n_samples:\n batch_x = create_stacked_batch(train_data, start=n_samples, stop=idx + batchsize, n_samples=n_samples)\n batch_profile = raw_train_data[n_samples:idx + batchsize, 0]\n\n else:\n batch_x = create_stacked_batch(train_data, start=idx, stop=idx + batchsize, n_samples=n_samples)\n batch_profile = raw_train_data[idx:idx + batchsize, 0]\n\n # Perform a training step:\n # Compute the output, loss and the gradients.\n with tf.GradientTape() as tape:\n # Approximate the suspension state and do a forward step afterwards\n for idx_approx in range(n_approx):\n for idx_sample in range(n_samples):\n C = model(batch_x, False)\n for n in range(len(batch_x)):\n ActiveSuspension = tf_activeSuspension(batch_x[n, idx_sample, 2], batch_x[n, idx_sample, 3],\n batch_x[n, idx_sample, 4], batch_x[n, idx_sample, 5],\n batch_x[n, idx_sample, 0], batch_x[n, idx_sample, 1],\n batch_x[n, idx_sample, 8], delta_time)\n batch_x[:, idx_sample, 2:8] = tf.convert_to_tensor(np.squeeze(np.array(ActiveSuspension)))\n C = model(batch_x, True)\n # print(C)\n # Compute loss\n loss = tf.math.reduce_mean(\n tf_tweaked_loss(tf.squeeze(C[:, n_samples - 1]), len(C), tf.squeeze(batch_profile)))\n # Compute gradient\n gradients = tape.gradient(loss, model.trainable_variables)\n\n # Apply gradients\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n train_losses.append(loss)\n train_steps.append(step)\n\n #for j in range(len(batch_x)):\n # i_ls.append(2 * float(C[j, n_samples - 1, 0]))\n # Zh_ls.append(batch_x[j, n_samples - 1, 0])\n # Zt_ls.append(batch_x[j, n_samples - 1, 3])\n # Zb_ls.append(batch_x[j, n_samples - 1, 2])\n # Zt_dtdt_ls.append(batch_x[j, n_samples - 1, 7])\n # Zb_dtdt_ls.append(batch_x[j, n_samples - 1, 6])\n # if len(t) == 0:\n # t.append(0)\n # else:\n # t.append(t[len(t)-1]+delta_time)\n\n progress.update(step)\n step += 1\n if step > 30:\n break\n\n#model.save(\"model_RNN_04.h5\")\n\n\"\"\"### Quick check of results\"\"\"\n\nprint(\"Errors for Gradient: \", err_ct)\n\n# Get minimum loss and accuracy for training and validation\n# loss_train = min(train_losses)\n\n# Print results\n# print('Minimum training loss: {}'.format(loss_train))\n# print('Minimum test loss: {}'.format(loss_val))\n\n\"\"\"### Plot results\"\"\"\n\n# Create two plotting areas\nfig = plt.figure(figsize=(16, 6))\nloss = fig.add_subplot(2, 2, 1)\nvals = fig.add_subplot(2, 2, 2)\nvals2 = fig.add_subplot(2, 2, 3)\nvals3 = fig.add_subplot(2, 2, 4)\n\n# Loss plot on left area\nloss.plot(train_steps, train_losses)\nvals.plot(t, i_ls)\nvals2.plot(t, Zh_ls)\nvals2.plot(t, Zb_ls)\nvals2.plot(t, Zt_ls)\nvals3.plot(t, Zb_dtdt_ls)\nvals3.plot(t, Zt_dtdt_ls)\nvals.legend(\"i\")\nvals2.legend((\"Zh\", \"Zb\", \"Zt\"))\nvals3.legend((\"Zb_dtdt\", \"Zt_dtdt\"))\n# loss.plot(test_steps, test_losses)\n# loss.legend((\"training\", \"validation\"))\nloss.set_title(\n \"RNN_04 with vel: {0} n_samples: {1} n_approx: {2} batchsize: {3} profile: {4}\".format(vel, n_samples, n_approx, batchsize, PROFILE))\n# Add vertical lines to denote epochs\ntrain_steps_per_epoch = len(train_steps) // n_epochs\nfor epoch in range(1, n_epochs):\n loss.axvline(train_steps_per_epoch * epoch, c='gray')\n# Add labels to axis\nloss.set_xlabel(\"steps\")\nloss.set_ylabel(\"loss\")\n# Set x limits\nloss.set_xlim(left=0, right=len(train_steps))\nvals.set_ylim(bottom=0, top=2)\n\nfig.show()\n","sub_path":"models/RNN_05.py","file_name":"RNN_05.py","file_ext":"py","file_size_in_byte":8775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"451791997","text":"import random\n\n\nSUITS = ('D', 'C', 'S', 'H')\nVALUES = ('2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A')\n\n\nclass Deck(object):\n def __init__(self):\n self.deck_cards = []\n for v in VALUES:\n for s in SUITS:\n self.deck_cards.append(v + s)\n\n def shuffle_deck(self):\n random.shuffle(self.deck_cards)\n return self.deck_cards\n\n def give_out_cards(self, hand, max_size):\n while len(hand) != max_size:\n card = random.choice(self.deck_cards)\n hand.append(card)\n self.deck_cards.remove(card)\n return hand\n\n\nclass Hand(object):\n def __init__(self, cards=None):\n if not cards:\n self.cards_on_hand = []\n self.cards_on_hand = cards\n\n def print_cards(self):\n for card in self.cards_on_hand:\n print(card, end=' ')\n print()\n\n\nclass Diller(object):\n def __init__(self, deck, max_size):\n self.deck = deck\n self.hand = []\n self.max_size = max_size\n\n def give_out_cards(self):\n while len(self.hand) != self.max_size:\n card = random.choice(self.deck)\n self.hand.append(card)\n self.deck.remove(card)\n return self.hand\n\n\nclass Win(object):\n def __init__(self, table, hand):\n self.table = table\n self.hand = hand\n self.hand_and_table = hand + table\n self.list_win_combs = []\n self.list_win_cards = []\n\n def chek_combination(self):\n n = 0\n while n != 7:\n f_card = self.hand_and_table[n]\n self.list_win_cards.append(f_card)\n for card in self.hand_and_table[n+1:]:\n if f_card[:-1] == card[:-1]:\n self.list_win_cards.append(card)\n if len(self.list_win_cards) > 1:\n self.list_win_combs.append(self.list_win_cards.copy())\n self.list_win_cards.clear()\n n += 1\n\n if not self.list_win_combs:\n self.list_win_combs.append(self.hand)\n\n return self.list_win_combs\n\n\ndef main():\n play_deck = Deck().shuffle_deck()\n cards_player1 = Diller(play_deck, 2).give_out_cards()\n cards_player2 = Diller(play_deck, 2).give_out_cards()\n player1_hand = Hand(cards_player1)\n player1_hand.print_cards()\n player2_hand = Hand(cards_player2)\n player2_hand.print_cards()\n table_cards = Diller(play_deck, 5).give_out_cards()\n table = Hand(table_cards)\n table.print_cards()\n win_combs_player1 = Win(table_cards, cards_player1).chek_combination()\n win_combs_player2 = Win(table_cards, cards_player2).chek_combination()\n\n print('Best combinatons player1:')\n for combs in win_combs_player1:\n print(combs, end='')\n print()\n\n print('Best combinatons player2:')\n for combs in win_combs_player2:\n print(combs, end='')\n print()\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"homework6/poker.py","file_name":"poker.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"197212054","text":"def get_input():\n with open('input.txt', 'r') as in_file:\n for line in in_file:\n memory = [int(x.strip()) for x in line.split('\\t')]\n return memory\n\n\ndef get_max_value_and_index(numbers):\n max_value = numbers[0]\n max_value_index = 0\n\n for i in range(len(numbers)):\n if numbers[i] > max_value:\n max_value = numbers[i]\n max_value_index = i\n return max_value, max_value_index\n\n\ndef distribute(numbers, value, start):\n i = start\n while value > 0:\n if i == len(numbers):\n i = 0\n numbers[i] += 1\n value -= 1\n i += 1\n\n\ndef already_seen(history, memory):\n for m in history:\n if m == memory:\n return True\n return False\n\n\nif __name__ == '__main__':\n # memory = get_input()\n memory = [14, 13, 12, 11, 9, 8, 8, 6, 6, 4, 4, 3, 1, 1, 0, 12]\n history = [memory.copy()]\n loops = 0\n print(memory)\n print(history)\n\n while True:\n max_value, max_value_index = get_max_value_and_index(memory)\n memory[max_value_index] = 0\n distribute(memory, max_value, max_value_index + 1)\n print(memory)\n print(history)\n loops += 1\n if already_seen(history, memory):\n break\n history.append(memory.copy())\n\n print('loops: ', loops)\n","sub_path":"day6/second_part.py","file_name":"second_part.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"112189937","text":"import pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef add_missing_dummy_columns(d, columns):\n missing_cols = set(columns) - set(d.columns)\n for c in missing_cols:\n d[c] = 0\n\n\ndef fix_columns(d, columns):\n add_missing_dummy_columns(d, columns)\n\n # make sure we have all the columns we need\n assert (set(columns) - set(d.columns) == set())\n\n d = d[columns]\n return d\n\n\nclass FeatureExtractor():\n def __init__(self):\n pass\n \n #def __init__(self,attribute_names):\n # self.attribute_names = attribute_names\n \n \n \n def fit(self, X_df, y=None):\n global column_dummies\n if y is not None:\n column_dummies = pd.concat(\n [X_df.get(['yearOfRegistration', 'gearbox', 'powerPS', 'model', 'kilometer', 'monthOfRegistration']),\n pd.get_dummies(X_df.seller, prefix = 'Size', drop_first=True),\n #pd.get_dummies(X_df.offerType, prefix='Auction', drop_first=True),\n pd.get_dummies(X_df.vehicleType, prefix='Color', drop_first=True),\n pd.get_dummies(X_df.fuelType, prefix='Transmission', drop_first=True),\n pd.get_dummies(X_df.brand, prefix='Nationality', drop_first=True),\n pd.get_dummies(X_df.notRepairedDamage, prefix='notRepairedDamage', drop_first=True),\n \n ],\n axis=1).columns\n return self\n return self\n \n \n def transform(self, X_df):\n #print(column_dummies)\n X_df_new = pd.concat(\n [X_df.get(['yearOfRegistration', 'gearbox', 'powerPS', 'model', 'kilometer', 'monthOfRegistration']),\n pd.get_dummies(X_df.seller, prefix = 'seller', drop_first=True),\n #pd.get_dummies(X_df.offerType, prefix='Auction', drop_first=True),\n pd.get_dummies(X_df.vehicleType, prefix='vehicleType', drop_first=True),\n pd.get_dummies(X_df.fuelType, prefix='fuelType', drop_first=True),\n pd.get_dummies(X_df.brand, prefix='brand', drop_first=True),\n pd.get_dummies(X_df.notRepairedDamage, prefix='notRepairedDamage', drop_first=True),\n \n ],\n axis=1)\n #X_df_new = X_df_new.fillna(-1)\n \n X_df_new = fix_columns(X_df_new, column_dummies)\n \n scaler = StandardScaler()\n\n X_df_new[['yearOfRegistration', 'gearbox', 'powerPS', 'model', 'kilometer', 'monthOfRegistration']] = scaler.fit_transform(X_df_new [['yearOfRegistration', 'gearbox', 'powerPS', 'model', 'kilometer', 'monthOfRegistration']])\n \n X_df_new= X_df_new.as_matrix()\n return X_df_new","sub_path":"submissions/starting_kit/feature_extractor.py","file_name":"feature_extractor.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"130505350","text":"class Solution:\n def fourSum(self, nums, target):\n self.res = []\n if len(nums) < 4: return self.res\n \n nums.sort()\n if nums[0] * 4 > target or nums[-1] * 4 < target: return self.res\n \n for i in range(len(nums)-3):\n if i > 0 and nums[i] == nums[i-1]:\n continue\n for j in range(i+1, len(nums)-2):\n if j > i + 1 and nums[j] == nums[j-1]:\n continue\n self.findPair(nums, target, i, j)\n \n return self.res\n \n def findPair(self, nums, target, a_idx, b_idx):\n l = b_idx + 1\n r = len(nums) - 1\n \n while l < r:\n a, b, c, d = nums[a_idx], nums[b_idx], nums[l], nums[r]\n total = a + b + c + d\n if total == target:\n self.res.append([a, b, c, d])\n l += 1\n r -= 1\n while l < r and nums[l] == nums[l-1]:\n l += 1\n while l < r and nums[r] == nums[r+1]:\n r -= 1\n elif total < target:\n l += 1\n else:\n r -= 1","sub_path":"leetcode/patterns/02-two-pointers/09_4sum.py","file_name":"09_4sum.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"390334870","text":"'''\nYou are given two jugs with capacities x and y litres. There is an infinite amount of water supply \navailable. You need to determine whether it is possible to measure exactly z litres using these two jugs.\n\nIf z liters of water is measurable, you must have z liters of water contained within one or both buckets \nby the end.\n\nOperations allowed:\n\nFill any of the jugs completely with water.\nEmpty any of the jugs.\nPour water from one jug into another till the other jug is completely full or the first jug itself is empty.\nExample 1: (From the famous \"Die Hard\" example)\n\nInput: x = 3, y = 5, z = 4\nOutput: True\nExample 2:\n\nInput: x = 2, y = 6, z = 5\nOutput: False\n\n'''\nfrom collections import deque\n\ndef can_measure_water(x, y, z):\n que = deque([(0, 0)])\n visited = set()\n if x + y < z: return False\n while que:\n i, j = que.popleft()\n visited.add((i, j))\n states = set()\n if i + j == z: return True\n\n states.add((x, j))\n states.add((i, y))\n states.add((0, j))\n states.add((i, 0))\n states.add((min(i + j, x), (i + j) - min(i + j, x)))\n states.add(((i + j) - min(i + j, y), min(i + j, y)))\n\n que.extend(states - visited)\n\n return False \n","sub_path":"algorithms/graphs/water_and_jug.py","file_name":"water_and_jug.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"188650845","text":"import copy\n\nhoukou = [\"hidari\",\"migi\"]\ndotti = [\"tera\", \"jinja\"]\n\nfor i in houkou:\n for j in dotti:\n dotti_tmp = copy.deepcopy(dotti)\n dotti_tmp.remove(j)\n houkou_tmp = copy.deepcopy(houkou)\n for k in houkou_tmp:\n for l in dotti_tmp:\n print(i,j,\"->\",k,l) ","sub_path":"abc/119/hoge.py","file_name":"hoge.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"302146378","text":"from flask import Flask, request\nfrom flask_restful import Resource, Api\nimport sys\nimport os\nimport uuid\nimport requests\nimport json\nimport hazelcast\n\napp = Flask(__name__)\napi = Api(app)\nport = 5507\n\nclient = hazelcast.HazelcastClient(\n cluster_members=[\"192.168.1.102:5702\"]\n )\nmy_map = client.get_map(\"my-distributed-map\").blocking()\n\n@app.route('/', methods = ['POST','GET'])\ndef user():\n if request.method == 'POST':\n msg = request.args[\"message\"]\n id = request.args[\"uuid\"]\n print(id,msg)\n \n my_map.put(id,msg)\n\n return \"done\"\n if request.method == 'GET':\n\n msg = msg = ' '.join(map(str, my_map.values()))\n \n return {\n \"msg\":msg\n }\n\nif __name__ == '__main__':\n app.run(host=\"127.0.0.1\", port=port)\n\n","sub_path":"lab4/log_serv_2.py","file_name":"log_serv_2.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"626343897","text":"# Copyright (c) 2019 Red Hat, Inc.\n#\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nfrom __future__ import absolute_import\n\nimport testtools\n\nimport tobiko\nfrom tobiko.openstack import stacks\nfrom tobiko.shell import ip\nfrom tobiko.tests.functional.openstack.stacks import test_cirros\n\n\nclass VlanProxyServerStackTest(test_cirros.CirrosServerStackTest):\n\n #: Stack of resources with a server attached to a floating IP\n stack = tobiko.required_fixture(stacks.VlanProxyServerStackFixture)\n\n\nclass UbuntuVlanServerTest(testtools.TestCase):\n\n #: Stack of resources with a server attached to a floating IP\n stack = tobiko.required_fixture(stacks.UbuntuServerStackFixture)\n\n def test_vlan_ipv4_fixed_ip(self):\n self._test_vlan_fixed_ip(ip_version=4)\n\n def test_vlan_ipv6_fixed_ip(self):\n self._test_vlan_fixed_ip(ip_version=6)\n\n def _test_vlan_fixed_ip(self, ip_version: int):\n expected_ip = self.get_vlan_fixed_ip(ip_version=ip_version)\n for attempt in tobiko.retry(timeout=600.,\n interval=10.):\n try:\n actual_ip = ip.find_ip_address(\n device=self.stack.vlan_device,\n ip_version=ip_version,\n ssh_client=self.stack.ssh_client,\n scope='global',\n unique=True)\n except tobiko.ObjectNotFound:\n attempt.check_limits()\n else:\n break\n else:\n raise RuntimeError('Broken retry loop')\n self.assertEqual(expected_ip, actual_ip)\n self.stack.assert_vlan_is_reachable(ip_version=ip_version)\n\n def get_vlan_fixed_ip(self, ip_version: int):\n try:\n return self.stack.find_vlan_fixed_ip(ip_version=ip_version)\n except tobiko.ObjectNotFound:\n self.skipTest(f\"Server {self.stack.server_id} has any \"\n f\"IPv{ip_version} address on VLAN device.\")\n","sub_path":"tobiko/tests/functional/openstack/stacks/test_vlan.py","file_name":"test_vlan.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"388504349","text":"from collections import OrderedDict\nfrom collections import Counter\n\ndef read_file_to_list(file):\n data = []\n with open(file) as input:\n for line in input:\n temp_arr = []\n temp_arr.append(extract_name(line))\n temp_arr.append(extract_id(line))\n temp_arr.append(extract_checksum(line))\n data.append(temp_arr)\n return data\n\ndef extract_checksum(line):\n return line.strip().split('[')[1][:-1]\n\ndef extract_id(line):\n return line.split('-')[-1][:3]\n\ndef extract_name(line):\n return ''.join(list(filter(lambda x: x.isalpha(), line.split('-'))))\n\ndef cesar_cipher(room_list):\n temp_room_list = room_list[:]\n room_list.clear()\n for room in temp_room_list:\n sid = int(room[1])\n shift = sid%26\n name = room[0]\n decrypted_name = ''\n for letter in name:\n if (ord(letter) + shift) > 122:\n decrypted_name += chr(ord(letter) + shift - 26)\n else:\n decrypted_name += chr(ord(letter) + shift)\n room_list.append([decrypted_name, sid])\n\ndef main():\n data = read_file_to_list('input.txt')\n cesar_cipher(data)\n print(*(i[1] for i in data if 'north' in i[0]))\n\nif __name__ == '__main__':\n main()\n","sub_path":"advent of code 4/4_2.py","file_name":"4_2.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"538563883","text":"\"\"\" \nGiven a linked list, remove the n-th node from the end of list and return its head.\n\nExample:\n\nGiven linked list: 1->2->3->4->5, and n = 2.\n\nAfter removing the second node from the end, the linked list becomes 1->2->3->5.\nNote:\n\nGiven n will always be valid.\n\nFollow up:\n\nCould you do this in one pass?\n\n\"\"\"\n# Definition for singly-linked list.\nfrom typing import *\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n # one pass: save all nodes in a list and remove the desired node. \n ll = []\n c = head\n while c is not None:\n ll.append(c)\n c = c.next\n if n == len(ll):\n return [] if len(ll) == 1 else ll[1]\n else:\n ll[-(n+1)].next = ll[-n].next\n return head\na = Solution()\nn = 2\nll = []\nfor i in range(0,8):\n ll.append(ListNode(i))\n if i != 0:\n ll[i-1].next = ll[i]\nhead = a.removeNthFromEnd(ll[0],n)\nwhile head is not None:\n print(head.val)\n head = head.next","sub_path":"leetcode1-115/19. Remove Nth Node From End of List.py","file_name":"19. Remove Nth Node From End of List.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"385320673","text":"from openerp import tools, api\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nfrom openerp import SUPERUSER_ID\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nimport pytz\nimport logging\n\nclass spa_appt(osv.osv):\n\t_inherit = \"customer.appt\"\t\n\n\t_columns = {\n\t\t'package_left': fields.char('Package Left'),\n\t\t'specialcss': fields.char(\"Special CSS\"),\t\t\n\t\t'cust_type': fields.many2one(\"spadate.custtype\", \"Customer type\")\n\t}\n\n\t@api.onchange('customer_id','service_id','staff_id','date_start', 'shop_id')\n\tdef _appt_change(self):\n\t\tsuper(spa_appt, self)._appt_change()\n\t\tif self.customer_id:\n\t\t\tself.name = self.customer_id.name\n\t\t\tself.cust_type = self.customer_id.cust_type\n\t\t\tleft_msg = \"\"\n\t\t\tif self.customer_id.packages:\n\t\t\t\tcount = 0\n\t\t\t\tfor pack in self.customer_id.packages:\n\t\t\t\t\tcount += pack.package_count\n\t\t\t\tif count == 1:\n\t\t\t\t\tleft_msg = \"** 1 LEFT **\"\n\t\t\t\telif pack.package_count == 5:\n\t\t\t\t\tleft_msg = \"** 5 LEFT **\"\t\t\t\n\t\t\t\n\t\t\tif self.customer_id.special_request_ids:\n\t\t\t\tself.specialcss =\"special1\"\n\t\t\t\tleft_msg += \" (\"\n\t\t\t\tfor special in self.customer_id.special_request_ids:\n\t\t\t\t\tleft_msg += special.name+\",\"\n\t\t\t\tleft_msg += \")\"\n\t\t\telif self.customer_id.cust_type.name == \"VIP\":\n\t\t\t\tself.specialcss = \"special2\"\n\t\t\telif self.customer_id.cust_type.name == \"Blacklist\":\n\t\t\t\tself.specialcss = \"special3\"\n\n\t\t\tself.package_left = left_msg\n\n\n\n\n\t@api.one\n\tdef reject_appt(self):\n\t\tir_model_data = self.pool.get('ir.model.data')\n\t\tcron_id = ir_model_data.get_object_reference(self._cr, self._uid, 'customer_appt', 'custappt_status_cancelled')[1]\n\t\tmessage = \"\"\n\t\tif self.appt_status.name == \"Confirmed\" or self.appt_status.name == \"Pending\":\n\t\t\tmessage = \"Sorry, we are fully booked for your requested timing. Our staff will be calling you shortly.\"\t\t\t\n\t\telif self.appt_status.name == \"Request new time\" and self.request_start:\n\t\t\t#request = datetime.strptime(self.request_start, \"%Y-%m-%d %H:%M:%S\")\n\t\t\t#from_zone = pytz.utc\n\t\t\t#to_zone = pytz.timezone('Singapore') or pytz.utc\n\t\t\t#date_from = request.replace(tzinfo=from_zone)\n\t\t\t#date_local = date_from.astimezone(to_zone)\n\t\t\tmessage = \"Sorry, we are fully booked for your requested timing. Our staff will be calling you shortly.\"\n\t\t\n\t\tif message != \"\":\n\t\t\tself.env['user.notification'].sudo().create({'partner_id': self.customer_id.id, 'notification_type': 'appointment', 'count': 1})\n\t\t\tself.env['res.partner'].send_notification(self.customer_id.id, message, category=\"APPT_CATEGORY\")\t\t\t\n\t\tself.write({'appt_status':cron_id})\n\n\n\tdef write(self, cr, uid, ids, vals, context=None):\n\t\tif vals.get('appt_status', False):\n\t\t\tapptstatus = self.pool.get('custappt.status').browse(cr, uid, vals.get('appt_status'))\n\t\t\tif apptstatus.name == \"Confirmed\":\n\t\t\t\tappt = self.browse(cr, uid, ids[0])\n\t\t\t\tfrom_zone = pytz.utc\n\t\t\t\tto_zone = pytz.timezone('Singapore') or pytz.utc\n\t\t\t\tif vals.get('date_start', False):\n\t\t\t\t\tdate_obj = datetime.strptime(vals.get('date_start'), \"%Y-%m-%d %H:%M:%S\")\n\t\t\t\telse:\n\t\t\t\t\tdate_obj = datetime.strptime(appt.date_start, \"%Y-%m-%d %H:%M:%S\")\n\t\t\t\tdate_from = date_obj.replace(tzinfo=from_zone)\n\t\t\t\tdate_local = date_from.astimezone(to_zone)\n\t\t\t\tif appt.appt_status.name == \"Request new time\":\n\t\t\t\t\tmessage = \"Your appointment time has been changed to \"+date_local.strftime(\"%d %b, %I:%M%p\")+\".\"\n\t\t\t\telse:\n\t\t\t\t\tmessage = \"Your appointment on \"+date_local.strftime(\"%d %b, %I:%M%p\")+\" is confirmed.\"\n\t\t\t\tself.pool.get('user.notification').create(cr, SUPERUSER_ID, {'partner_id': appt.customer_id.id, 'notification_type': 'appointment', 'count': 1})\n\t\t\t\tself.pool.get('res.partner').send_notification(cr, uid, appt.customer_id.id, message, category=\"APPT_CATEGORY\")\n\t\t\t\t\n\t\treturn super(spa_appt, self).write(cr, uid, ids, vals, context=context)\n","sub_path":"abs_cust/spa_appt.py","file_name":"spa_appt.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"30005131","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nimport bs4 as bs\nimport urllib.request\nimport ngram as ngram\nfrom django.utils.html import strip_tags\n\n# Create your views here.\nsauce = urllib.request.urlopen('http://archivo.elcomercio.pe/noticias/marina-guerra-peru-51667').read()\nsoup = bs.BeautifulSoup(sauce,'lxml')\n\ntextohtml= ''\nstrip = ''\nvariable = []\n\ntextohtml = soup.prettify()\n\n\ndef getcomercio(request):\n data={\n 'Titulo' : soup.title.string,\n #'HTML' : strip_tags(listap)\n #'HTML' : variable\n 'HTML' : strip_tags(listap),\n 'Resultado' : (var,'Coincidencias')\n\n \n }\n return JsonResponse(data)\n\nstrip = strip_tags(textohtml) \nlistap = strip.split()\n\nfrecuenciaPalab = []\nfor w in listap:\n frecuenciaPalab.append(listap.count(w))\nvariable = ([i for i in zip(listap, frecuenciaPalab)])\n\nvar = listap.count('almirante')\n\nif var > 0:\n print (var,'Coincidencias')\n\nelse:\n print (\"Ninguna Coincidencias\")\n\n\n\n\n\n","sub_path":"comercio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"554035533","text":"import os.path\nimport tempfile\nfrom itertools import zip_longest\nfrom unittest import TestCase\n\nimport importlib_resources\nimport numpy as np\nfrom pandas import DataFrame\nfrom scipy import misc\n\nimport tests.saved_test_data\nfrom aspire.image import Image\nfrom aspire.source import ArrayImageSource\nfrom aspire.storage import StarFile, StarFileBlock\n\nDATA_DIR = os.path.join(os.path.dirname(__file__), \"saved_test_data\")\n\n\n# From itertools standard recipes\ndef grouper(iterable, n, fillvalue=None):\n \"\"\"\n Collect data into fixed-length chunks or blocks.\n\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\n\n :param iterable: Iterable object to split into chunks\n :param n: Size of each chunk\n :param fillvalue: Value to tail fill if iterable not exact multiple of n\n :return: iterator over chunks of length n\n \"\"\"\n\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\n\nclass StarFileTestCase(TestCase):\n def setUp(self):\n with importlib_resources.path(tests.saved_test_data, \"sample.star\") as path:\n self.starfile = StarFile(path)\n\n # Independent Image object for testing Image source methods\n L = 768\n self.im = Image(misc.face(gray=True).astype(\"float64\")[:L, :L])\n self.img_src = ArrayImageSource(self.im)\n\n # We also want to flex the stack logic.\n self.n = 21\n im_stack = np.broadcast_to(self.im.data, (self.n, L, L))\n # make each image methodically different\n im_stack = np.multiply(im_stack, np.arange(self.n)[:, None, None])\n self.im_stack = Image(im_stack)\n self.img_src_stack = ArrayImageSource(self.im_stack)\n\n # Create a tmpdir object for this test instance\n self._tmpdir = tempfile.TemporaryDirectory()\n # Get the directory from the name attribute of the instance\n self.tmpdir = self._tmpdir.name\n\n def tearDown(self):\n # Destroy the tmpdir instance and contents\n self._tmpdir.cleanup()\n\n def testLength(self):\n # StarFile is an iterable that gives us blocks.\n # We have 2 blocks in our sample starfile.\n self.assertEqual(2, len(self.starfile))\n\n def testIteration(self):\n # A StarFile can be iterated over, yielding StarFileBlocks\n for block in self.starfile:\n self.assertTrue(isinstance(block, StarFileBlock))\n\n def testBlockByIndex(self):\n # Indexing a StarFile with a 0-based index gives us a 'block',\n block0 = self.starfile[0]\n self.assertTrue(isinstance(block0, StarFileBlock))\n # Our first block has no 'loop's.\n self.assertEqual(0, len(block0))\n\n def testBlockByName(self):\n # Indexing a StarFile with a string gives us a block with that name\n # (\"data_\" in starfile).\n # In our case the block at index 1 has name 'planetary'\n block1 = self.starfile[\"planetary\"]\n # This block has a two 'loops'.\n self.assertEqual(2, len(block1))\n\n def testBlockProperties(self):\n # A StarFileBlock may have attributes that were read from the\n # starfile key=>value pairs.\n block0 = self.starfile[\"general\"]\n # Note that no typecasting is performed\n self.assertEqual(block0._three, \"3\")\n\n def testLoop(self):\n loop = self.starfile[1][0]\n self.assertIsInstance(loop, DataFrame)\n\n def testData1(self):\n df = self.starfile[\"planetary\"][0]\n self.assertEqual(8, len(df))\n self.assertEqual(4, len(df.columns))\n # Note that no typecasting of values is performed at io.StarFile level\n self.assertEqual(\"1\", df[df[\"_name\"] == \"Earth\"].iloc[0][\"_gravity\"])\n\n def testData2(self):\n df = self.starfile[\"planetary\"][1]\n self.assertEqual(3, len(df))\n self.assertEqual(2, len(df.columns))\n # Missing values in a loop default to ''\n self.assertEqual(\"\", df[df[\"_name\"] == \"Earth\"].iloc[0][\"_discovered_year\"])\n\n def testSave(self):\n # Save the StarFile object to disk,\n # read it back, and check for equality.\n # Note that __eq__ is supported for StarFile/StarFileBlock classes\n\n with open(\"sample_saved.star\", \"w\") as f:\n self.starfile.save(f)\n self.starfile2 = StarFile(\"sample_saved.star\")\n self.assertEqual(self.starfile, self.starfile2)\n\n os.remove(\"sample_saved.star\")\n","sub_path":"tests/test_starfileio.py","file_name":"test_starfileio.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"41536004","text":"from random import randrange\r\nfrom math import sin, sqrt\r\nfrom copy import deepcopy\r\nfrom MathFunctions import stdDev, sigma\r\n\r\nSTEP = 50 # percent\r\ndirection = 1\r\n\r\n# max = 231.7211106994124\r\n# max_a = 20.47\r\n# max_b = 20.47\r\n\r\n\r\ndef fitness(a, b):\r\n return ((a / 2) * sin(a) + 5) * ((b / 2) * sin(b) + 5)\r\n\r\n\r\ndef diversity(chromosome, avg_chromosome):\r\n d_sum = 0\r\n for index in range(len(chromosome)):\r\n d_sum += abs(avg_chromosome[index]-chromosome[index])\r\n return (sigma(d_sum/(len(chromosome)*10)))*232\r\n\r\n\r\ndef fdValues(population):\r\n avg_chrm = [sum([population[i][index] for i in range(len(population))]) / len(population) for index in range(len(population[0]))]\r\n\r\n f_values = [fitness(population[i][0], population[i][1]) for i in range(len(population))]\r\n d_values = [diversity(population[i], avg_chrm) for i in range(len(population))]\r\n\r\n return list(zip(population, f_values, d_values))\r\n\r\n\r\ndef retrieveKeptChromosomes(population):\r\n fd_values = fdValues(population)\r\n\r\n return_chromosomes = []\r\n rs = []\r\n for tup in fd_values:\r\n rs.append(sqrt(((232-tup[1])**2) + ((232-tup[2])**2)))\r\n r_values = list(sorted(zip(rs, [fd_values[i][0] for i in range(len(fd_values))])))\r\n for tup in r_values[:250]:\r\n return_chromosomes.append(tup[1])\r\n\r\n return return_chromosomes\r\n\r\n\r\ndef mutateChromosome(chromosome, mutations_per_chromosome):\r\n new_chromosome = chromosome\r\n global direction\r\n\r\n for _ in range(mutations_per_chromosome):\r\n direction = direction - 2*direction\r\n percent = (randrange(STEP // 2, STEP) / 100) * direction\r\n new_chromosome[randrange(0,len(new_chromosome))] *= percent\r\n\r\n return new_chromosome\r\n\r\n\r\ndef printStats(population):\r\n avg_chrm = [sum([population[i][0] for i in range(len(population))]) / len(population),\r\n sum([population[i][1] for i in range(len(population))]) / len(population)\r\n ]\r\n print('Mean Chromosome: ', avg_chrm)\r\n\r\n max, max_i = 0, 0\r\n for i, c in enumerate(population):\r\n c_f = fitness(c[0], c[1])\r\n if max < c_f: max, max_i = c_f, i\r\n print('Highest Fitness: ', max, '--', population[max_i])\r\n\r\n max, max_i = 0, 0\r\n for i, c in enumerate(population):\r\n c_f = diversity(c, avg_chrm)\r\n if max < c_f: max, max_i = c_f, i\r\n print('Highest Diversity: ', max, '--', population[max_i])\r\n\r\n if __name__ == '__main__':\r\n total_f = 0\r\n for c in population:\r\n total_f += fitness(c[0], c[1])\r\n print('Mean Fitness: ', total_f/len(population))\r\n\r\n total_d = 0\r\n for c in population:\r\n total_d += diversity(c, avg_chrm)\r\n print('Mean Diversity: ', total_d / len(population))\r\n\r\n stddev_sum = 0\r\n pop_size = len(population[0])\r\n for index in range(pop_size):\r\n stddev_sum += stdDev([population[i][index] for i in range(len(population))])\r\n avg_stddev = stddev_sum/pop_size\r\n print('Standard Deviation: ', avg_stddev)\r\n\r\nif __name__ == '__main__':\r\n chromosomes = [[3, 3] for _ in range(500)]\r\n close = False\r\n generation = 1\r\n\r\n while not close:\r\n command = input(\"Enter command or how many generations to simulate. \\n\\n > \")\r\n try:\r\n generations = int(command)\r\n\r\n for g in range(generations):\r\n print('Generation:', generation+g)\r\n\r\n chromosomes = retrieveKeptChromosomes(chromosomes)\r\n\r\n for index in range(250):\r\n chrsm = mutateChromosome(deepcopy(chromosomes[index]), 5)\r\n for i, x in enumerate(chrsm):\r\n if x > 25: chrsm[i] = 25.0\r\n if x < 0: chrsm[i] = 0.0\r\n\r\n chromosomes.append(chrsm)\r\n\r\n for _ in range(randrange(50, 75)):\r\n chr_1_i, chr_2_i = 0, 0\r\n while chr_1_i == chr_2_i:\r\n chr_1_i = randrange(0, 500)\r\n chr_2_i = randrange(0, 500)\r\n chr_1 = chromosomes[chr_1_i]\r\n chr_2 = chromosomes[chr_2_i]\r\n chromosomes[chr_1_i] = chr_1[:1] + chr_2[1:]\r\n chromosomes[chr_2_i] = chr_2[:1] + chr_1[1:]\r\n\r\n generation += generations\r\n\r\n except ValueError:\r\n if command == 'stop': close = True\r\n elif command == 'stats': printStats(chromosomes)\r\n elif command == 'step':\r\n try: STEP = int(input(\" New STEP Value -- \"))\r\n except:\r\n print('ERROR: STEP Not Int')\r\n print(' STEP Reverted to Previous STEP:', STEP)\r\n elif command == 'population':\r\n for index, chromosome in enumerate(chromosomes):\r\n if (index % 50) == 0: input()\r\n print('Chromosome', index+1, '--', chromosome)\r\n elif command == 'help':\r\n print('Commands:\\n stop\\n stats\\n step\\n population\\n help')\r\n else: print('Incorrect Command')\r\n","sub_path":"GeneticAlgorithm.py","file_name":"GeneticAlgorithm.py","file_ext":"py","file_size_in_byte":5103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"30681912","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nPE_0231\n\nThe prime factorisation of binomial coefficients\n\nThe binomial coefficient 10C3 = 120.\n120 = 23 × 3 × 5 = 2 × 2 × 2 × 3 × 5, and 2 + 2 + 2 + 3 + 5 = 14.\nSo the sum of the terms in the prime factorisation of 10C3 is 14. \n\nFind the sum of the terms in the prime factorisation of 20000000C15000000\n\n frac(m/p^j)>frac(n/p^j) in n_C_m\n\nCreated on Sun Nov 20 05:12:08 2016\n@author: mbh\n\"\"\"\n\nimport time\nimport numpy as np\nimport numba as nb\n\n# revisit Jan 17 2020\n\ndef main(n=20000000,k=15000000):\n t0=time.perf_counter()\n print(p231(n,k),time.perf_counter()-t0) # 1.8 s\n t1=time.perf_counter()\n print(p231vec(n,k),time.perf_counter()-t1) # 0.2 s\n \n\n# @nb.njit\ndef p231(n=20000000,k=15000000):\n \n f = power_in_factorial\n return( sum(p * (f(p, n) - f(p, k) - f(p, n - k)) for p in primeSieve(n + 1)))\n\ndef p231vec(n=20000000,k=15000000):\n \n p=primeSieve(n)\n f = sum_factorial_factors\n return( f(p, n) - f(p, k) - f(p, n - k))\n\n\n# Legendre theorem\n@nb.njit\ndef power_in_factorial(p, n):\n \"\"\"Return the exponent of the prime p in the factorization of n!\"\"\"\n result = 0\n while True:\n n //= p\n if not n:\n break\n result += n\n return result \n\n#to vectorise it\ndef sum_factorial_factors(primes, n):\n \"\"\"Return the sum of terms in the prime factorization of n!\"\"\"\n p = primes\n n = np.full_like(p, n)\n result = r = np.zeros_like(p)\n while True:\n n //= p\n if n[-1] == 0: # some primes not contributing any more?\n l = n.argmin() # number of primes still contributing\n if l == 0:\n break\n n, r, p = n[:l], r[:l], p[:l]\n r += n\n result *= primes\n return result.sum()\n\n# @nb.njit \ndef primeSieve(n):\n \"\"\"return array of primes 2<=p<=n\"\"\"\n sieve=np.ones(n+1,dtype=np.int8)\n for i in range(2, int((n+1)**0.5+1)):\n if sieve[i]>0:\n sieve[2*i::i]=0#False\n return np.nonzero(sieve)[0][2:] \n \n# work from 2016\ndef p231v1(n=20000000,m=15000000):\n t=time.perf_counter()\n factors=[]\n primes=primeSieve(n)\n for p in primes:\n count=0\n j=0\n while p**j(n/p**j)%1:\n count+=1\n j+=1\n if count>0:\n factors.append((p,count)) \n print(sum([x[0]*x[1] for x in factors]),time.perf_counter()-t) \n\n\n\n\n#Legendre's theorem\n# @nb.njit\ndef facpfac(n):\n \"\"\"\"returns prime factors of n!\"\"\"\n ps=primeSieve(n)\n factors={}\n for prime in ps:\n exp=0\n power=1\n delta=10\n while delta>0:\n delta=n//prime**power\n exp+=delta\n power+=1\n factors[prime]=exp\n return(factors) \n\n@nb.njit \ndef primeSieve(n):\n \"\"\"return array of primes 2<=p<=n\"\"\"\n sieve=np.ones(n+1,dtype=np.int8)\n for i in range(2, int((n+1)**0.5+1)):\n if sieve[i]>0:\n sieve[2*i::i]=0#False\n return np.nonzero(sieve)[0][2:]\n\ndef nCk(n,k,memo={}):\n if n 1:\n factors.append(n)\n return factors","sub_path":"PE_0231/PE_0231.py","file_name":"PE_0231.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"369617121","text":"import numpy as np\nimport cv2\nimport time\nimport logging\n# Make sure that caffe is on the python path:\ncaffe_root = '../caffe-ssd/python' # this file is expected to be in {sfd_root}/sfd_test_code/AFW\n\nimport os\nimport sys\nsys.path.insert(0, caffe_root)\nimport caffe\nfRegistered = 1\nlogging.basicConfig(filename='detectionMisses.log',level=logging.DEBUG)\n\nif __name__ == '__main__':\n caffe.set_device(0)\n caffe.set_mode_gpu()\n model_def = 'deploy.prototxt'\n model_weights = 'SFD.caffemodel'\n net = caffe.Net(model_def, model_weights, caffe.TEST)\n \n path = '/home/xdepartment/ly/data/jdface/registered90k'\n resultPath = '/home/xdepartment/ly/data/jdface/sfd90kResult/'\n files = os.listdir(path)\n print(len(files))\n count = 0\n debug = 0\n for file in files: \n count = count + 1\n print(count)\n print(file)\n #if file != '1958.jpg':\n # continue\n frame = cv2.imread(path+'/'+file)\n #print(file)\n #print(frame.shape)\n #frame = cv2.resize(frame, (200,200))\n start = time.time()\n image = frame\n height = image.shape[0]\n width = image.shape[1]\n #print('width,height:'+str(width)+','+str(height))\n if max(image.shape[0], image.shape[1]) < 320 or fRegistered == 1:\n im_shrink = 80.0 / max(image.shape[0], image.shape[1])\n else:\n im_shrink = 320.0 / max(image.shape[0], image.shape[1])\n image = cv2.resize(image, None, None, fx=im_shrink, fy=im_shrink, interpolation=cv2.INTER_LINEAR)\n\n net.blobs['data'].reshape(1, 3, image.shape[0], image.shape[1])\n transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})\n transformer.set_transpose('data', (2, 0, 1))\n transformer.set_mean('data', np.array([104, 117, 123]))\n transformer.set_raw_scale('data', 255)\n transformer.set_channel_swap('data', (2, 1, 0))\n transformed_image = transformer.preprocess('data', image)\n net.blobs['data'].data[...] = transformed_image\n\n detections = net.forward()['detection_out']\n det_conf = detections[0, 0, :, 2]\n det_xmin = detections[0, 0, :, 3]\n det_ymin = detections[0, 0, :, 4]\n det_xmax = detections[0, 0, :, 5]\n det_ymax = detections[0, 0, :, 6]\n \n flag = 0\n bbox = []\n for i in range(det_conf.shape[0]):\n xmin = max(0, int(round(det_xmin[i] * width)))\n ymin = max(0, int(round(det_ymin[i] * height)))\n xmax = min(width-1, int(round(det_xmax[i] * width)))\n ymax = min(height-1, int(round(det_ymax[i] * height)))\n # simple fitting to AFW, because the gt box of training data (i.e., WIDER FACE) is longer than the gt box of AFW\n # ymin += 0.2 * (ymax - ymin + 1) \n score = det_conf[i]\n \n if debug == 0:\n if score <= 0.2 or xmin >= xmax or ymin >= ymax: \n continue\n area = (ymax-ymin)*(xmax-xmin)\n if area < 300:\n continue\n else:\n print('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.\n format('person', score, xmin, ymin, xmax, ymax))\n \n bbox.append([xmin, ymin, xmax, ymax, score])\n flag = 1\n end = time.time()\n print(end-start)\n \n if debug == 0:\n if flag == 0:\n #cv2.imshow('result', frame)\n #cv2.waitKey(0)\n logging.debug('Missed:'+file)\n else:\n #print(bbox)\n tBbox = bbox[0]\n cv2.rectangle(frame, (tBbox[0], tBbox[1]), (tBbox[2], tBbox[3]), (255,0,0), 2)\n cv2.imwrite(resultPath+file, frame)\n #cv2.imshow('result',frame)\n #cv2.waitKey(0)\n else: \n if count == 290:\n cv2.imshow('result', frame)\n cv2.waitKey(0)\n \n #cv2.imwrite('processed/frame{:d}.jpg'.format(count),frame)\n\n\n\n\n","sub_path":"sfd/jd_AFW_folder.py","file_name":"jd_AFW_folder.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"615166941","text":"import os\nimport time\nfrom typing import Tuple\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom sklearn.tree import DecisionTreeClassifier\nfrom torch.utils.data import Dataset\n\nfrom .base import BaseClassifier, ClassifierNotTrainedError, BaseXModel\nfrom ..utils.base import tree_to_formula, NotAvailableError\nfrom ..utils.metrics import Metric, Accuracy\n\n\nclass XDecisionTreeClassifier(BaseClassifier, BaseXModel):\n \"\"\"\n Decision Tree class module. It does provides for explanations.\n\n :param n_classes: int\n number of classes to classify - dimension of the output layer of the network\n :param n_features: int\n number of features - dimension of the input space\n :param max_depth: int\n maximum depth for the classifier. The deeper is the tree, the more complex are the explanations provided.\n \"\"\"\n\n def __init__(self, n_classes: int, n_features: int, max_depth: int = None,\n device: torch.device = torch.device('cpu'), name: str = \"tree.pth\"):\n\n super().__init__(name=name, device=device)\n assert device == torch.device('cpu'), \"Only cpu training is provided with decision tree models.\"\n\n self.n_classes = n_classes\n self.n_features = n_features\n\n self.model = DecisionTreeClassifier(max_depth=max_depth)\n\n def forward(self, x, **kwargs) -> torch.Tensor:\n \"\"\"\n forward method extended from Classifier. Here input data goes through the layer of the ReLU network.\n A probability value is returned in output after sigmoid activation\n\n :param x: input tensor\n :return: output classification\n \"\"\"\n x = x.detach().cpu().numpy()\n output = self.model.predict_proba(x)\n return output\n\n def get_loss(self, output: torch.Tensor, target: torch.Tensor, **kwargs) -> None:\n \"\"\"\n Loss is not used in the decision tree as it is not a gradient based algorithm. Therefore, if this function\n is called an error is thrown.\n :param output: output tensor from the forward function\n :param target: label tensor\n :param kwargs:\n :raise: NotAvailableError\n \"\"\"\n raise NotAvailableError()\n\n def get_device(self) -> torch.device:\n \"\"\"\n Return the device on which the classifier is actually loaded. For DecisionTree is always cpu\n\n :return: device in use\n \"\"\"\n return torch.device(\"cpu\")\n\n def fit(self, train_set: Dataset, val_set: Dataset, metric: Metric = Accuracy(),\n verbose: bool = True, save=True, **kwargs) -> pd.DataFrame:\n \"\"\"\n fit function that execute many of the common operation generally performed by many method during training.\n Adam optimizer is always employed\n\n :param train_set: training set on which to train\n :param val_set: validation set used for early stopping\n :param metric: metric to evaluate the predictions of the network\n :param verbose: whether to output or not epoch metrics\n :param save: whether to save the model or not\n :return: pandas dataframe collecting the metrics from each epoch\n \"\"\"\n\n # Loading dataset\n train_loader = torch.utils.data.DataLoader(train_set, 1024)\n train_data, train_labels = [], []\n for data in train_loader:\n train_data.append(data[0]), train_labels.append(data[1])\n train_data, train_labels = torch.cat(train_data).numpy(), torch.cat(train_labels).numpy()\n\n # Fitting decision tree\n if len(train_labels.squeeze().shape) > 1:\n train_labels = np.argmax(train_labels, axis=1)\n self.model = self.model.fit(X=train_data, y=train_labels)\n\n # Compute accuracy, f1 and constraint_loss on the whole train, validation dataset\n train_acc = self.evaluate(train_set, metric=metric)\n val_acc = self.evaluate(val_set, metric=metric)\n\n if verbose:\n print(f\"Train_acc: {train_acc:.1f}, Val_acc: {val_acc:.1f}\")\n\n if save:\n self.save()\n\n # Performance dictionary\n performance_dict = {\n \"tot_loss\": [0],\n \"train_accs\": [train_acc],\n \"val_accs\": [val_acc],\n \"best_epoch\": [0],\n }\n performance_df = pd.DataFrame(performance_dict)\n return performance_df\n\n def predict(self, dataset, *args, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict function to compute the prediction of the decision tree on a certain dataset\n\n :param dataset: dataset on which to test\n :return: a tuple containing the outputs computed on the dataset and the labels\n \"\"\"\n outputs, labels = [], []\n loader = torch.utils.data.DataLoader(dataset, 1024)\n for data in loader:\n batch_data = data[0]\n batch_output = self.forward(batch_data)\n outputs.append(batch_output)\n labels.append(data[1].numpy())\n labels = np.concatenate(labels)\n outputs = np.vstack(outputs)\n return torch.FloatTensor(outputs), torch.FloatTensor(labels)\n\n def save(self, name=None, **kwargs) -> None:\n \"\"\"\n Save model on a file named with the name of the model if parameter name is not set.\n\n :param name: Save the model with a name different from the one assigned in the __init__\n \"\"\"\n from joblib import dump\n if name is None:\n name = self.name\n dump(self.model, name)\n\n def load(self, device=torch.device(\"cpu\"), name=None, **kwargs) -> None:\n from joblib import load\n \"\"\"\n Load decision tree model.\n\n :param name: Load a model with a name different from the one assigned in the __init__\n \"\"\"\n if name is None:\n name = self.name\n try:\n self.model = load(name)\n except FileNotFoundError:\n raise ClassifierNotTrainedError() from None\n\n def prune(self):\n raise NotAvailableError()\n\n def get_local_explanation(self, **kwargs):\n raise NotAvailableError()\n\n def get_global_explanation(self, class_to_explain: int, concept_names: list = None, *args,\n return_time: bool = False, **kwargs):\n if concept_names is None:\n concept_names = [f\"f_{i}\" for i in range(self.n_features)]\n start_time = time.time()\n formula = tree_to_formula(self.model, concept_names, class_to_explain)\n if return_time:\n return formula, time.time() - start_time\n return formula\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"deep_logic/models/tree.py","file_name":"tree.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"137513939","text":"\n\nimport hashlib\nimport io\nimport logging\nimport pickle\nimport re\nimport threading\n\nfrom site_utils import retrieve_url, save_file, list_files, read_file\nfrom host import get_host\n\n# regex to extract an icon link from the of a 404 error page\nicon_regex = re.compile(r''']*rel=['\"](shortcut )?icon['\"] [^>]*href=['\"]?\\.?(?P[^'\" ]*)[^>]*>''', re.IGNORECASE)\n\n\nclass FileCache(object):\n def __init__(self, data_cls, filename):\n self.new_count = 0\n self.filename = filename\n self.data = data_cls()\n\n self._load()\n\n def _load(self):\n logging.info('loading from {}'.format(self.filename))\n raw_data = read_file(self.filename)\n if raw_data is None:\n logging.info('Nothing loaded')\n return\n\n logging.info('Loaded from file')\n self.data = pickle.load(io.BytesIO(raw_data))\n\n def _save(self):\n logging.info('Saving to file {}'.format(self.filename))\n data = pickle.dumps(self.data)\n try:\n save_file(data, self.filename)\n except:\n pass\n else:\n self.new_count = 0\n\nclass HostToHashDict(FileCache):\n def __init__(self):\n super(type(self), self).__init__(dict, 'hash_dict')\n\n\n def __contains__(self, item):\n return item in self.data\n\n def __setitem__(self, key, value):\n if key not in self.data:\n self.data[key] = value\n self.new_count += 1\n\n if self.new_count > 5:\n self._save()\n\n def __getitem__(self, item):\n return self.data[item]\n\n\nclass HashSet(FileCache):\n def __init__(self):\n super(type(self), self).__init__(set, 'hash_set')\n\n def __contains__(self, item):\n return item in self.data\n\n def add(self, item):\n if item not in self.data:\n self.data.add(item)\n self.new_count += 1\n\n if self.new_count > 5:\n self._save()\n\n\nclass Favicon:\n \"\"\"\n This is a Singleton class which implements a favicon cache.\n \"\"\"\n host_to_hash = HostToHashDict()\n hash_set = HashSet()\n lock = threading.Lock()\n BASE = 'https://gammacrawler.appspot.com/favicons/'\n #BASE = 'http://localhost:8080/favicons/'\n\n cache_hits = 0\n total_requests = 0\n\n @classmethod\n def get_favicon(cls, url, page=None):\n \"\"\"\n Retrieves and stores the site's favicon. Returns a local (on this server) URL to the stored favicon\n :param url: site for which we want a favicon\n :return: if a favicon is found, returns a URL to our locally served favicon.\n If no favicon is found, returns None\n \"\"\"\n with cls.lock:\n cls.total_requests += 1.0\n host, host_key = cls._get_host_key(url)\n\n if host_key in cls.host_to_hash:\n cls.cache_hits += 1.0\n logging.info('Favicon cache hit! Hit percetage {:.2%}'.format(cls.cache_hits / cls.total_requests))\n icon_hash = cls.host_to_hash[host_key]\n if icon_hash:\n filename = icon_hash + '.ico'\n return cls.BASE + filename\n else:\n return None\n\n logging.info('Favicon cache miss. Hit percetage {:.2%}'.format(cls.cache_hits / cls.total_requests))\n\n if page:\n # attempt to extract from the page first\n icon = cls._extract_favicon_from_page(page, url)\n\n # if either we didn't get passed the page, or no icon could be extracted, attempt the default route\n if not page or not icon:\n icon = cls._download_favicon(host + '/favicon.ico')\n\n if not icon:\n cls.host_to_hash[host_key] = None\n return None\n\n icon_hash = hashlib.md5(icon).hexdigest()\n cls.host_to_hash[host_key] = icon_hash\n\n if icon_hash not in cls.hash_set:\n save_file(icon, icon_hash + '.ico')\n cls.hash_set.add(icon_hash)\n\n return cls.BASE + icon_hash + '.ico'\n\n\n @classmethod\n def _get_host_key(cls, url):\n host = get_host(url)\n host_key = host[host.find('//')+2:]\n\n return host, host_key\n\n @classmethod\n def _download_favicon(cls, favicon_url, level=1):\n \"\"\"\n Attempts to download a favicon. If successful, returns the favicon.\n If a 404 is returned, attempts to find a favicon link within the returned page. If one is found,\n attempts to retrieve and return that icon\n :param favicon_url: the URL of the icon to retrieve\n :return:\n \"\"\"\n # prevents a form of infinite recursion\n if level > 3:\n return None\n\n res = retrieve_url(favicon_url)\n\n if res is None:\n return None\n elif res.status_code == 200:\n return res.content\n elif res.status_code == 404:\n return cls._extract_favicon_from_page(res.content, favicon_url, level+1)\n\n return None\n\n @classmethod\n def _extract_favicon_from_page(cls, page, url, level=1):\n\n # prevent a form of infinite recursion\n if level > 3:\n return None\n\n #first check the cache\n host, host_key = cls._get_host_key(url)\n\n match = icon_regex.search(page)\n if match:\n icon_url = match.group('icon')\n if icon_url.startswith('//'):\n icon_url = 'http:' + icon_url\n elif icon_url.startswith('/'):\n icon_url = get_host(url) + icon_url\n return cls._download_favicon(icon_url, level+1)\n else:\n return None\n","sub_path":"crawler/favicon.py","file_name":"favicon.py","file_ext":"py","file_size_in_byte":5685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"323760235","text":"from typing import Any\n\nfrom aiohttp import BasicAuth\nfrom fake_useragent import UserAgent\nfrom pydantic import BaseModel\nfrom yarl import URL\n\nfrom sneakpeek.plugins.utils import parse_config_from_obj\nfrom sneakpeek.scraper_context import BeforeRequestPlugin, Request\n\n\nclass ProxyPluginConfig(BaseModel):\n \"\"\"Proxy plugin config\"\"\"\n\n proxy: str | URL | None = None #: Proxy URL\n proxy_auth: BasicAuth | None = None #: Proxy authentication info to use\n\n class Config:\n arbitrary_types_allowed = True\n\n\nclass ProxyPlugin(BeforeRequestPlugin):\n \"\"\"Proxy plugin automatically sets proxy arguments for all HTTP requests.\"\"\"\n\n def __init__(self, default_config: ProxyPluginConfig | None = None) -> None:\n self._default_config = default_config or ProxyPluginConfig()\n self._user_agents = UserAgent(\n use_external_data=self._default_config.use_external_data,\n browsers=self._default_config.browsers,\n )\n\n @property\n def name(self) -> str:\n return \"proxy\"\n\n async def before_request(\n self,\n request: Request,\n config: Any | None,\n ) -> Request:\n config = parse_config_from_obj(\n config,\n self.name,\n ProxyPluginConfig,\n self._default_config,\n )\n if not request.kwargs:\n request.kwargs = {}\n if config.proxy:\n request.kwargs[\"proxy\"] = config.proxy\n if config.proxy_auth:\n request.kwargs[\"proxy_auth\"] = config.proxy_auth\n return request\n","sub_path":"sneakpeek/plugins/proxy_plugin.py","file_name":"proxy_plugin.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"} +{"seq_id":"417071222","text":"# -*- coding: utf-8 -*-\n\n# Write an algorithm to determine if a number is happy.\n#\n# A happy number is a number defined by the following process: Starting with any positive integer, replace the number\n# by the sum of the squares of its digits, and repeat the process until the number equals 1 (where it will stay), or it\n# loops endlessly in a cycle which does not include 1. Those numbers for which this process ends in 1 are happy numbers.\n#\n# Example\n# 19 is a happy number\n#\n# 1^2 + 9^2 = 82\n# 8^2 + 2^2 = 68\n# 6^2 + 8^2 = 100\n# 1^2 + 0^2 + 0^2 = 1\n\n\nclass Solution:\n # @param {int} n an integer\n # @return {boolean} true if this is a happy number or false\n def isHappy(self, n):\n # Write your code here\n sums = []\n cur = n \n while True: \n str_digits = str(cur)\n digits = [int(s) for s in list(str_digits)]\n sq_sum = sum([i ** 2 for i in digits])\n if sq_sum == 1:\n print(sums)\n return True\n if sq_sum in sums:\n print(sums)\n return False\n sums.append(sq_sum)\n cur = sq_sum\n\n\n# TEST\na = Solution()\nprint(a.isHappy(19))\n \n \n ","sub_path":"Happy Number.py","file_name":"Happy Number.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"19"}