diff --git "a/2897.jsonl" "b/2897.jsonl" new file mode 100644--- /dev/null +++ "b/2897.jsonl" @@ -0,0 +1,689 @@ +{"seq_id":"129477021","text":"\"\"\"Module with tortoise configuration options\"\"\"\n\nfrom .settings import settings\n\nTORTOISE_CFG = {\n \"connections\": {\n \"default\": {\n \"engine\": \"tortoise.backends.asyncpg\",\n \"credentials\": settings.db.dict(by_alias=True),\n },\n },\n \"apps\": {\n \"billing\": {\n \"models\": [\"src.db.models\"],\n }\n },\n \"use_tz\": True,\n \"timezone\": \"W-SU\",\n}\n","sub_path":"billing_api/src/core/tortoise.py","file_name":"tortoise.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"620940471","text":"from tkinter import Tk\nfrom kayttoliittyma import Kayttoliittyma\nfrom sovelluslogiikka import Sovelluslogiikka\nfrom Komentotehdas import Komentotehdas\n\n\ndef main():\n sovellus = Sovelluslogiikka()\n\n window = Tk()\n window.title(\"Laskin\")\n komentotehdas = Komentotehdas()\n\n kayttoliittyma = Kayttoliittyma(sovellus, window, komentotehdas)\n kayttoliittyma.kaynnista()\n\n window.mainloop()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"viikko5/laskin/src/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"418945251","text":"import os\nimport logging\n\nfrom telegram import ReplyKeyboardRemove\nfrom telegram.ext import MessageHandler, Filters\n\nfrom yandex_translate import YandexTranslate\n\nfrom bot.user import StateId\nfrom bot.modes.talk.command import query\n\nfrom bot.handlers.matches import matches\nfrom bot.handlers.talk import talk\nfrom bot.handlers.xo3 import xo_3\nfrom bot.handlers.xo5 import xo_5\n\n\nlogger = logging.getLogger(__name__)\n\ntranslate = YandexTranslate(os.environ['TR_TOKEN'])\n\ndef text(bot, update):\n logger.info (\"Text command, id: \" + str (update.message.chat_id) + \" text: \" + update.message.text)\n\n user_state = bot.state[update.message.chat_id]\n text = update.message.text.lower()\n\n switch_mode_words = [\n 'switch',\n 'select'\n 'change',\n 'exchange',\n 'play',\n 'show',\n 'start',\n 'open'\n\n ]\n tic_tac_words = ['tic-tac', 'tic tac', 'in a row', 'tictac']\n matches_words = ['matches']\n talk_words = ['talk', 'math', 'dialog']\n\n for x in switch_mode_words:\n if x in text:\n for y in tic_tac_words:\n if y in text:\n if '5' in text or 'five' in text:\n xo_5(bot, update)\n return\n else:\n xo_3(bot, update)\n return\n\n for y in matches_words:\n if y in text:\n matches(bot, update)\n return\n\n for y in talk_words:\n if y in text:\n talk(bot, update)\n return\n\n if (user_state.state_id == StateId.Talk):\n talk_handler(bot, update)\n\n if (user_state.state_id == StateId.XO_5):\n xo5_game_handler(bot, update)\n\n if (user_state.state_id == StateId.XO_3):\n xo3_game_handler(bot, update)\n\n if (user_state.state_id == StateId.Matches):\n matches_game_handler(bot, update)\n\n if (user_state.state_id == StateId.Translate):\n translate_handler(bot, update)\n\n\nText_handler = MessageHandler (Filters.text, text)\n\n\ndef xo5_game_handler(bot, update):\n reply_markup = ReplyKeyboardRemove ()\n\n user_state = bot.state[update.message.chat_id]\n\n if (user_state.xo5_game is None):\n bot.send_message (\n chat_id=update.message.chat_id,\n text=\"No game found, use /xo5 to start new game\",\n reply_markup=reply_markup\n )\n\n return\n\n ok = user_state.xo5_game.sendUserMove(update.message.text)\n\n if (not ok):\n bot.send_message (\n chat_id=update.message.chat_id,\n text=\"Invalid Move\",\n reply_markup=reply_markup,\n )\n\n return\n\n\n bot.send_message (\n chat_id=update.message.chat_id,\n text=user_state.xo5_game.getGameState(),\n reply_markup=reply_markup,\n parse_mode = \"MARKDOWN\"\n )\n\n winner = user_state.xo5_game.getWin()\n\n if (winner is not None):\n user_state.xo5_game = None\n user_state.state_id = StateId.Start\n\n bot.send_message (\n chat_id=update.message.chat_id,\n text=\"Game Over, Winner: \" + winner,\n reply_markup=reply_markup\n )\n\ndef xo3_game_handler(bot, update):\n reply_markup = ReplyKeyboardRemove ()\n\n user_state = bot.state[update.message.chat_id]\n\n if (user_state.xo3_game is None):\n bot.send_message (\n chat_id=update.message.chat_id,\n text=\"No game found, use /xo3 to start new game\",\n reply_markup=reply_markup\n )\n\n return\n\n ok = user_state.xo3_game.xo_bot(update.message.text)\n\n if (not ok):\n bot.send_message (\n chat_id=update.message.chat_id,\n text=\"Invalid Move\",\n reply_markup=reply_markup\n )\n\n return\n\n bot.send_message (\n chat_id=update.message.chat_id,\n text=user_state.xo3_game.getGameState(),\n reply_markup=reply_markup\n )\n\n winner = user_state.xo3_game.getWin()\n\n if (winner is not None):\n user_state.xo3_game = None\n user_state.state_id = StateId.Start\n\n bot.send_message (\n chat_id=update.message.chat_id,\n text=\"Game Over, Winner: \" + winner,\n reply_markup=reply_markup\n )\n\ndef matches_game_handler(bot, update):\n reply_markup = ReplyKeyboardRemove ()\n\n user_state = bot.state[update.message.chat_id]\n\n if (user_state.matches_game is None):\n bot.send_message (\n chat_id=update.message.chat_id,\n text=\"No game found, use /matches to start new game\",\n reply_markup=reply_markup\n )\n\n return\n\n ok = user_state.matches_game.sendUserMove(update.message.text)\n\n if (not ok):\n bot.send_message (\n chat_id=update.message.chat_id,\n text=\"Invalid Move\",\n reply_markup=reply_markup\n )\n\n return\n\n bot.send_message (\n chat_id=update.message.chat_id,\n text=user_state.matches_game.getGameState(),\n reply_markup=reply_markup\n )\n\n winner = user_state.matches_game.getWin()\n\n if (winner is not None):\n user_state.matches_game = None\n user_state.state_id = StateId.Start\n\n bot.send_message (\n chat_id=update.message.chat_id,\n text=\"Game Over, Winner: \" + winner,\n reply_markup=reply_markup\n )\n\ndef talk_handler(bot, update):\n reply_markup = ReplyKeyboardRemove ()\n\n bot.send_message (\n chat_id=update.message.chat_id,\n text=\"Thinking...\",\n reply_markup=reply_markup\n )\n\n result = query(update.message.text)\n print(result)\n\n if (result is None or result == \"\"):\n bot.send_message (\n chat_id=update.message.chat_id,\n text=\"Woops, sorry I am not smart enough\",\n reply_markup=reply_markup\n )\n\n return\n\n bot.send_message (\n chat_id=update.message.chat_id,\n text=result,\n reply_markup=reply_markup\n )\n\ndef translate_handler(bot, update):\n reply_markup = ReplyKeyboardRemove ()\n\n text = translate.translate(update.message.text, 'ru').get('text')\n\n if len(text) < 1:\n bot.send_message (\n chat_id=update.message.chat_id,\n text=\"Translation error\",\n reply_markup=reply_markup\n )\n\n return\n\n bot.send_message (\n chat_id=update.message.chat_id,\n text=\"Translation: \" + text[0],\n reply_markup=reply_markup\n )","sub_path":"bot/handlers/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":6526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"16865813","text":"import random\nfrom pygame.sprite import Sprite\nfrom nlc_dino_runner.utils.constants import CLOUD, SCREEN_WIDTH\n\n\nclass Cloud(Sprite):\n def __init__(self):\n self.image = CLOUD\n self.pos_x = SCREEN_WIDTH + random.randint(200, 500)\n self.pos_y = random.randint(100, 150)\n self.rect = self.image.get_rect()\n\n def update(self, game_speed):\n self.pos_x -= game_speed\n if self.pos_x < - self.rect.width:\n self.pos_x = SCREEN_WIDTH + random.randint(500, 1000)\n self.pos_y = random.randint(100, 150)\n\n def draw(self, screen):\n screen.blit(self.image, (self.pos_x, self.pos_y))\n","sub_path":"nlc_dino_runner/components/clouds/cloud.py","file_name":"cloud.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"511119830","text":"import glob\nimport csv\nimport os\nimport pandas as pd\nimport numpy as np\n\nfilter='processed/CSVData*.csv'\n\nfiles = glob.glob(filter)\n#\"2016-02-20 11:07 AM\"\nif files:\n\tfor file in files:\n\t\tin_csv=file\n\t\tout_csv=\"grouped/\"+os.path.basename(file)\n\t\tnewnames = [\"date\",\"credit\",\"description\",\"balance\",\"category\"]\n\t\tdf = pd.read_csv(in_csv, names=newnames, header=0)\n\t\t#print(df)\n\t\t\n\t\tdf['date'] = pd.to_datetime(df['date'], format='%d/%m/%Y')\n\t\tdf['sdate'] = df['date'].apply(lambda x: x.strftime('%b-%Y'))\n\t\tdf = df.set_index('date')\n\t\t#print(df)\n\t\t#g1 = df.groupby([lambda x: x.year, lambda x: x.month])\n\t\tg1=df.groupby('sdate')\n\t\tg2=g1['balance'].max()\n\t\tg2.reset_index().to_csv(out_csv, index=False)","sub_path":"grouped_netbank.py","file_name":"grouped_netbank.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"257779542","text":"employees=[\n[101,\"anu\",\"developer\",2500,1989,1999],\n[101,\"ammu\",\"testing\",24000,1990,2005],\n[103,\"achu\",\"ba\",21000,1975,1988],\n[104,\"meera\",\"ba\",20000,1990,1999]\n]\nfor employee in employees:\n print(employee[1])\n\nfor employee in employees:\n print(employee[2]==\"developer\")\n print(employee)\n\ntotal=0\nfor salary in employees:\n total+=employee[3]\nprint(\"total Salary=\",total)\n\nsalary_list=[]\nfor high_salary in employees:\n salary_list.append(employee[3])\nprint(\"high_salary=\",max(salary_list))\n\nhighest_experience=[]\nfor employee in employees:\n highest_experience.append(employee[5]-employee[4])\nprint(\"employee=\",max(highest_experience))#highest experience\nprint(highest_experience) #sorting the experience\n","sub_path":"python_collections/pythonprograms/lists/employee_list.py","file_name":"employee_list.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"292142056","text":"def findPaths(cur, seen, paths):\n if cur == 'end':\n return [[cur]]\n if cur != cur.upper():\n seen = seen + [cur]\n\n ans = []\n for next in paths[cur]:\n if next in seen:\n continue\n for p in findPaths(next, seen, paths):\n ans.append([cur] + p)\n return ans\n\n\npaths = {}\n\nwith open(\"in.txt\", \"r\") as file:\n for line in file:\n start, end = line.strip().split(\"-\")\n ends = paths.get(start, set())\n ends.add(end)\n paths[start] = ends\n starts = paths.get(end, set())\n starts.add(start)\n paths[end] = starts\n\nprint(len(findPaths('start', [], paths)))\n","sub_path":"advent2021/a12/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"241462423","text":"\nimport numpy as np\nfrom collections import Counter\ndata=np.loadtxt(open('D:\\sta.csv'),delimiter=',',skiprows = 1, encoding='utf8',dtype=str) #读取数据\nn=int(input(\"输入伏邪代码,“风\\寒\\湿\\热\\瘀\\毒”分别为“0/1/3/5/6/7” :\"))\na=0#记录行数\ncount=0#记录满足条件的数量\nfangji=[] #储存满足条件的数据\nfor tof in data[:,n]: #判断是否满足某种伏邪\n if tof=='1':\n fangji.append(data[a,8])\n a=a+1\n count=count+1\n else:\n a=a+1\nprint(\"符合条件的数据有:%d\"%count)\nResult=Counter(fangji)\nprint(\"结果为\",Result)","sub_path":"question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"539122885","text":"'''\nk-近邻算法的一般流程:\n收集数据:可以使用爬虫进行数据的收集,也可以使用第三方提供的免费或收费的数据。一般来讲,数据放在txt文本文件中,按照一定的格式进行存储,便于解析及处理。\n准备数据:使用Python解析、预处理数据。\n分析数据:可以使用很多方法对数据进行分析,例如使用Matplotlib将数据可视化。\n测试算法:计算错误率。\n使用算法:错误率在可接受范围内,就可以运行k-近邻算法进行分类。\n'''\nimport numpy as np\nfrom matplotlib.font_manager import FontProperties\nimport matplotlib.lines as mlines\nimport matplotlib.pyplot as plt\n\nfrom kNearestNeighbor import classify0\n\n'''\n函数说明:打开并解析文件,对数据进行分类:1代表不喜欢,2代表魅力一般,3代表极具魅力\nParameters:\n filename - 文件名\nReturns:\n returnMat - 特征举证\n classLabelVector - 分类Label向量\n'''\ndef file2matrix(filename):\n #打开文件\n fr = open(filename)\n #读取文件所有内容\n arrayOfLines = fr.readlines()\n #得到文件行数\n numberOfLines = len(arrayOfLines)\n #返回的NumPy矩阵,解析完成的数据:numberOfLines行,3列\n returnMat = np.zeros((numberOfLines,3))\n # 返回的分类标签向量\n classLabelVector = []\n #行的索引值\n index = 0\n for line in arrayOfLines:\n # s.strip(rm),当rm空时,默认删除空白符(包括'\\n','\\r','\\t',' ')\n line = line.strip()\n # 使用s.split(str=\"\",num=string,cout(str))将字符串根据'\\t'分隔符进行切片。\n listFromLine = line.split('\\t')\n # 将数据前三列提取出来,存放到returnMat的NumPy矩阵中,也就是特征矩阵\n returnMat[index,:] = listFromLine[0:3]\n ##根据文本中标记的喜欢的程度进行分类,1代表不喜欢,2代表魅力一般,3代表极具魅力\n if listFromLine[-1] == 'didntLike':\n classLabelVector.append(1)\n elif listFromLine[-1]=='smallDoses':\n classLabelVector.append(2)\n elif listFromLine[-1]=='largeDoses':\n classLabelVector.append(3)\n index += 1\n return returnMat,classLabelVector\n'''\n函数说明:对数据进行归一化\nParameters:\n dataSet - 特征矩阵\nReturns:\n normDataSet - 归一化后的特征矩阵\n ranges - 数据范围\n minVals -数据最小值\n'''\ndef autoNorm(dataSet):\n #获得数据的最小值\n minVals = dataSet.min(0)\n maxVals = dataSet.max(0)\n #最大值和最小值的范围\n ranges = maxVals - minVals\n #shape(dataSet)返回dataSet的矩阵行列数\n normDataSet = np.zeros(np.shape(dataSet))\n #返回dataSet的行数\n m= dataSet.shape[0]\n #原始值减去最小值\n normDataSet = dataSet - np.tile(ranges,(m,1))\n #除以最大和最小值的差,得到归一化数据\n normDataSet = normDataSet / np.tile(ranges,(m,1))\n #返回归一化数据结果,数据范围,最小值\n return normDataSet,ranges,minVals\n'''\n函数说明:分类器测试函数\nParameters:\n None\nReturns:\n normDataSet - 归一化后的特征矩阵\n ranges - 数据范围\n minVals - 数据最小值\n'''\ndef datingClassTest():\n #打开的文件名\n filename = \"datingTestSet.txt\"\n #将返回的特征矩阵和分类向量分别存储到datingDataMat和datingLabels中\n datingDataMat,datingLabels = file2matrix(filename)\n #取所有数据的百分之十\n hoRatio = 0.10\n #数据归一化,返回归一化后的矩阵,数据范围,数据最小值\n normMat,ranges,minVals = autoNorm(datingDataMat)\n #获得normMat的行数\n m = normMat.shape[0]\n #百分之十的测试数据的个数\n numTestVecs = int(m * hoRatio)\n #分类错误计数\n errorCount = 0.0\n\n for i in range(numTestVecs):\n #前numTestVecs个数据作为测试集,后m - numTestVecs个数据作为训练集\n classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],\n datingLabels[numTestVecs:m],4)\n print(\"分类结果:%d\\t真实类别:%d\" % (classifierResult,datingLabels[i]))\n if classifierResult != datingLabels[i]:\n errorCount += 1.0\n print(\"错误率:%f%%\" % (errorCount/float(numTestVecs)*100))\n'''\n函数说明:通过输入一个人的三维特征,进行分类输出 \nParameters:\n 无\nReturns:\n 无\n'''\ndef classifyPerson():\n #输出结果\n resultList = ['讨厌','有些喜欢','非常喜欢']\n #三维特征用户输入\n precentTats = float(input(\"玩视频游戏所耗时间百分比:\"))\n ffMiles = float(input(\"每年获得的飞行常客里程数:\"))\n iceCream = float(input(\"每周消费的冰淇淋公升数:\"))\n #打开的文件名\n filename= \"datingTestSet.txt\"\n #打开并处理数据\n datingDataMat,datingLabels = file2matrix(filename)\n #训练集归一化\n normMat, ranges, minVals = autoNorm(datingDataMat)\n #生成numpy数组,测试集\n inArr = np.array([ffMiles,precentTats,iceCream])\n #测试集归一化\n norminArr = (inArr-minVals)/ranges\n #返回分类结果\n classifierResult = classify0(norminArr,normMat,datingLabels,3)\n #打印结果\n print(\"你可能%s这个人\" % (resultList[classifierResult-1]))\n\n'''\n函数说明:可视化数据\nParameters:\n datingDataMat - 特征矩阵\n datingLabels - 分类Label\nReturns:\n None\n'''\ndef showdatas(datingDataMat, datingLabels):\n #设置汉字格式\n font = FontProperties(fname = r\"c:\\windows\\fonts\\simsun.ttc\", size=14)\n # 将fig画布分隔成1行1列,不共享x轴和y轴,fig画布的大小为(13,8)\n # 当nrow=2,nclos=2时,代表fig画布被分为四个区域,axs[0][0]表示第一行第一个区域\n fig, axs = plt.subplots(nrows=2,ncols=2,sharex=False,sharey=False,figsize = (13,8))\n\n numberOfLabels = len(datingLabels)\n LabelsColors = []\n for i in datingLabels:\n if i ==1:\n LabelsColors.append('black')\n if i==2:\n LabelsColors.append('orange')\n if i==3:\n LabelsColors.append('red')\n #画出散点图,以datingDataMat矩阵的第一(飞行常客例程)、第二列(玩游戏)数据画散点数据,散点大小为15,透明度为0.5\n axs[0][0].scatter(x=datingDataMat[:,0], y=datingDataMat[:,1], color = LabelsColors, s =15, alpha = .5)\n # 设置标题,x轴label,y轴label\n axs0_title_text = axs[0][0].set_title(u'每年获得的飞行常客里程数与玩视频游戏所消耗时间占比',FontProperties=font)\n axs0_xlabel_text = axs[0][0].set_xlabel(u'每年获得的飞行常客里程数',FontProperties=font)\n axs0_ylabel_text = axs[0][0].set_ylabel(u'玩视频游戏所消耗时间占',FontProperties=font)\n plt.setp(axs0_title_text,size=9,weight='bold',color='red')\n plt.setp(axs0_xlabel_text, size=7, weight='bold', color='black')\n plt.setp(axs0_ylabel_text, size=7, weight='bold', color='black')\n\n # 画出散点图,以datingDataMat矩阵的第一(飞行常客例���)、第三列(冰激凌)数据画散点数据,散点大小为15,透明度为0.5\n axs[0][1].scatter(x=datingDataMat[:, 0], y=datingDataMat[:, 2], color=LabelsColors, s=15, alpha=.5)\n # 设置标题,x轴label,y轴label\n axs1_title_text = axs[0][1].set_title(u'每年获得的飞行常客里程数与每周消费的冰激淋公升数', FontProperties=font)\n axs1_xlabel_text = axs[0][1].set_xlabel(u'每年获得的飞行常客里程数', FontProperties=font)\n axs1_ylabel_text = axs[0][1].set_ylabel(u'每周消费的冰激淋公升数', FontProperties=font)\n plt.setp(axs1_title_text, size=9, weight='bold', color='red')\n plt.setp(axs1_xlabel_text, size=7, weight='bold', color='black')\n plt.setp(axs1_ylabel_text, size=7, weight='bold', color='black')\n\n # 画出散点图,以datingDataMat矩阵的第二(玩游戏)、第三列(冰激凌)数据画散点数据,散点大小为15,透明度为0.5\n axs[1][0].scatter(x=datingDataMat[:, 1], y=datingDataMat[:, 2], color=LabelsColors, s=15, alpha=.5)\n # 设置标题,x轴label,y轴label\n axs2_title_text = axs[1][0].set_title(u'玩视频游戏所消耗时间占比与每周消费的冰激淋公升数', FontProperties=font)\n axs2_xlabel_text = axs[1][0].set_xlabel(u'玩视频游戏所消耗时间占比', FontProperties=font)\n axs2_ylabel_text = axs[1][0].set_ylabel(u'每周消费的冰激淋公升数', FontProperties=font)\n plt.setp(axs2_title_text, size=9, weight='bold', color='red')\n plt.setp(axs2_xlabel_text, size=7, weight='bold', color='black')\n plt.setp(axs2_ylabel_text, size=7, weight='bold', color='black')\n # 设置图例\n didntLike = mlines.Line2D([],[],color='black',marker='.',\n markersize=6, label = 'didntLike')\n smallDoses = mlines.Line2D([],[],color='orange',marker='.',\n markersize=6, label = 'smallDoses')\n largeDoses = mlines.Line2D([],[],color='red',marker='.',\n markersize=6, label = 'largeDoses')\n #添加图例\n axs[0][0].legend(handles = [didntLike,smallDoses,largeDoses])\n axs[0][1].legend(handles=[didntLike, smallDoses, largeDoses])\n axs[1][0].legend(handles=[didntLike, smallDoses, largeDoses])\n # 显示图片\n plt.show()\nif __name__ == '__main__':\n #打开的文件名\n filename = \"datingTestSet.txt\"\n #打开并处理数据\n datingDataMat, datingLabels = file2matrix(filename)\n print(\"特征矩阵是:\",datingDataMat)\n print(\"标签向量是:\",datingLabels)\n normDataSet,ranges,minVals = autoNorm(datingDataMat)\n print(\"归一化后的数据集:\",normDataSet)\n print(\"数据的取值范围:\",ranges)\n print(\"数据的最小值:\",minVals)\n datingClassTest()#我们可以改变函数datingClassTest内变量hoRatio和分类器k的值,检测错误率是否随着变量值的变化而增加。依赖于分类算法、数据集和程序设置,分类器的输出结果可能有很大的不同。\n classifyPerson()\n #数据可视化\n # showdatas(datingDataMat,datingLabels)\n","sub_path":"kNN_DataWebsite.py","file_name":"kNN_DataWebsite.py","file_ext":"py","file_size_in_byte":10141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"349163936","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'cookbook'\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^(?P[0-9]+)/$', views.page, name='page'),\n]\n","sub_path":"cookbook/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"173506605","text":"# Program 1 : Print the following pattern using while loop \n\nnum=int(input(\"Enter the no of rows : \"))\n\ni=num\nwhile i>=1:\n j=i\n while j>= 1:\n print(j,end=\" \")\n j=j-1\n print()\n i=i-1","sub_path":"Personel/Yash/Assessment/1march/prog1.py","file_name":"prog1.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"23057076","text":"#导入聊天记录\nimport pandas as pd\ndf = pd.read_excel(\"C:\\\\Users\\\\kfzx-bocw\\\\Desktop\\\\Label2.xlsx\" ,sep=',',header=0,encoding='gb18030',dtype='str')\n#将不同人说的换成0 1 类别\ndef func(a):\n if a == 'Pluto':\n return 1;\n else:\n return 0\ndf['label'] = df.exception.map(func)\n\n\nimport jieba.finalseg\nfrom numpy import *\n#准备数据集\ndataSet = []\nfor i in df.index:\n dataSet.append(\" \".join(jieba.cut(df.content[i])).split())\nclassVec = df.label.tolist()\n#创建一个包含在所有文档中出现的不重复的词的列表\ndef createVocabList(dataSet):\n vocabSet = set()\n for document in dataSet:\n vocabSet = vocabSet | set(document)\n return list(vocabSet)\nvocabList = createVocabList(dataSet)\n#将一个文档转换为词向量 存在改词即为1 否则为0\ndef setOfwords2Vec(vocabList, inputSet): #词汇表 inputSet表示某个文档\n returnVec = [0]*len(vocabList)\n for word in inputSet:\n if word in vocabList:\n returnVec[vocabList.index(word)] = 1\n else:\n print('the world: %s is not in my vocabulary' % word)\n return returnVec\n#训练矩阵\ntrainMat = []\nfor postinDoc in dataSet:\n trainMat.append(setOfwords2Vec(vocabList,postinDoc))\nprint(shape(trainMat))\n#p0Vect是类别为0的条件下每个特征词向量的出现的概率;\n#p1Vect是类别为1的条件下每个特征词向量的出现的概率;\n#pAbusive是类别为1的概率 1-pAbusive是类别为0的概率\ndef trainNB1(trainMatrix,trainCategory):\n numTrainDocs = len(trainMatrix)\n numWords = len(trainMatrix[0])\n pAbusive = sum(trainCategory)/float(numTrainDocs) #p(辱骂的=1)的概率\n\n\n p0Num = ones(numWords);p1Num = ones(numWords) #n列\n p0Denom = 2.0; p1Denom = 2.0 #分母\n for i in range(numTrainDocs):\n if trainCategory[i] == 1:\n p1Num += trainMatrix[i] #n列同时计数\n p1Denom += sum(trainMatrix[i]) ##标量 分母是该类的总词条数目\n else:\n p0Num += trainMatrix[i]\n p0Denom += sum(trainMatrix[i])\n p1Vect = log(p1Num/p1Denom)\n p0Vect = log(p0Num/p0Denom)\n return p0Vect,p1Vect,pAbusive\n\n\n#训练\np0V,p1V,pAb = trainNB1(trainMat,classVec)\np0V\n\n\n#预测\ndef classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):\n p1 =sum(vec2Classify*p1Vec) + log(pClass1) #转为log后*全部为+\n p0 =sum(vec2Classify*p0Vec) + log(1 - pClass1)\n if p1>p0:\n return 1\n else:\n return 0\n\n\ndef prefunc(string):\n testWords = string\n testEntry = \" \".join(jieba.cut(testWords)).split()\n thisDoc = array(setOfwords2Vec(vocabList,testEntry))\n predict = classifyNB(thisDoc,p0V,p1V,pAb)\n if predict==0:\n print (\"这句话是答应不如常在说的\")\n else:\n print(\"这句话是Pluto说的\")\n#最后在这里预测\nstring = \"你大爷\"\nprefunc(string)\n\n\n\n\n","sub_path":"com/icbc/classify/naviebayes2.py","file_name":"naviebayes2.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"296472920","text":"\n#IWA::Disabled||* * * * *||nothing useful for automation\n\nimport os\nfrom Engine.classes.config import variables\nfrom labels.labels import get_label_meucy17\n\nsettings = variables.workspaces['REPORTS'].settings\nsettings_default = variables.workspaces['DEFAULT'].settings\ndaily_revision = settings_default.get('daily_revision')\n\n\npath_to_folder = os.path.join(os.environ.get(\"XDG_RUNTIME_DIR\"), 'gvfs', 'smb-share:server=172.30.136.211,share=toyota_cy17_meu', 'Daily')\npath_to_latest_reports = os.path.join(settings.get('path_to_artifacts'), daily_revision)\n\nlabel = get_label_meucy17(os.listdir(path_to_folder))\n\nos.system('cp -r ' + path_to_latest_reports + ' ' + os.path.join(path_to_folder, '', label))\n","sub_path":"scripts/SCRIPTS/DAILY_copy_to_rs.py","file_name":"DAILY_copy_to_rs.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"171430519","text":"import logging\nimport sys\nimport time\n\nimport numpy as np\nfrom PyQt5 import QtCore, QtWidgets\n\nfrom database import Database\nfrom ui import Ui_MainWindow\nfrom utils import logger\nfrom visualizer import Visualizer\nfrom volumeProfile import VolumeProfile\n\n\nclass ApplicationWindow(QtWidgets.QMainWindow):\n def __init__(self):\n super().__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n\n self.db = Database(0, \"5T\")\n\n self.visualizer = Visualizer(self)\n self.ui.verticalLayout.addWidget(self.visualizer)\n\n self.volumeProfile = VolumeProfile(self)\n\n self.previousIndex = 7\n self.ui.cbInterval.setCurrentIndex(7)\n\n self.timer = QtCore.QTimer(self)\n self.timer.timeout.connect(self.updatePlot)\n self.timer.start(2000)\n\n self.ui.actionVolumeProfile.triggered.connect(self.actionVolumeProfile)\n self.ui.actionConsole.triggered.connect(self.actionConsole)\n\n self.ui.cbSymbol.currentIndexChanged.connect(self.cbSymbolSelect)\n self.ui.cbInterval.activated.connect(self.cbIntervalSelect)\n\n @QtCore.pyqtSlot()\n def actionVolumeProfile(self):\n self.volumeProfile.updateDate()\n self.volumeProfile.show()\n\n @QtCore.pyqtSlot()\n def actionConsole(self):\n self.db.console.show()\n\n @QtCore.pyqtSlot(int)\n def cbSymbolSelect(self, i):\n self.visualizer.setIndex(i)\n self.volumeProfile.deleteAll()\n\n @QtCore.pyqtSlot(int)\n def cbIntervalSelect(self, i):\n text = self.ui.cbInterval.currentText()\n\n if text == \"-----\":\n self.ui.cbInterval.setCurrentIndex(self.previousIndex)\n elif i != self.previousIndex:\n self.previousIndex = i\n if text[-1] == \"s\":\n interval = text.replace(\"s\", \"S\")\n elif text[-1] == \"m\":\n interval = text.replace(\"m\", \"T\")\n elif text[-1] == \"h\":\n interval = text.replace(\"h\", \"H\")\n self.visualizer.setInterval(interval)\n\n self.ui.centralwidget.setFocus()\n\n @QtCore.pyqtSlot()\n def updatePlot(self):\n self.visualizer.refresh()\n\n\nif __name__ == \"__main__\":\n qapp = QtWidgets.QApplication.instance()\n if not qapp:\n qapp = QtWidgets.QApplication(sys.argv)\n\n app = ApplicationWindow()\n # app.showMaximized()\n app.show()\n app.activateWindow()\n app.raise_()\n qapp.exec_()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"300877994","text":"\nfrom django.db import models\n\n# Create your models here.\nfrom dao.mst_research import MstResearch\nfrom dao.mst_lab_members import MstLabMembers\nfrom MySQLdb.cursors import DictCursor\nimport sys\nsys.path.append(\"../../dao\")\n\nclass ResearchModel():\n\n\n def __init__(self):\n\n self.mst_research = MstResearch()\n self.mst_lab_members = MstLabMembers()\n\n\n \"\"\"\n ユーザの研究一覧を取得する\n \"\"\"\n def get_own_research_list(self, id):\n\n research_list = []\n result = self.mst_research.get_list_by_member_id(id)\n\n for research in result:\n \n data = {\n \"uuid\" : research[\"research_uuid\"],\n \"id\" : research[\"research_id\"],\n \"title\" : research[\"research_title\"],\n \"purpose\": research[\"research_purpose\"],\n \"member\" : research[\"member_uuid\"],\n }\n\n research_list.append(data)\n\n return research_list\n \n\n \"\"\"\n 研究室の研究一覧を取得する\n \"\"\"\n def get_lab_research_list(self, member_id, lab_id):\n\n research_list = []\n result = self.mst_research.get_list_by_lab_id(lab_id)\n\n for research in result:\n\n # if research[\"member_uuid\"] == id:\n # continue\n\n uuid = research[\"member_uuid\"]\n \n member = self.mst_lab_members.get_row_by_uuid(uuid)\n\n try:\n name = member[\"member_name\"]\n\n except:\n continue\n\n research_list.append({\n \"uuid\": research[\"research_uuid\"],\n \"id\" : research[\"research_id\"],\n \"title\": research[\"research_title\"],\n \"purpose\": research[\"research_purpose\"],\n \"member\": uuid,\n \"name\": name,\n \"isOwn\": member_id == uuid\n })\n\n # print(research_list)\n return research_list\n\n ","sub_path":"content/django/research_management_api/research/index_models.py","file_name":"index_models.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"45688510","text":"from collections import namedtuple\nfrom datetime import datetime, timedelta\nimport logging\nimport os\n\nfrom celery.schedules import crontab\nfrom celery.task import periodic_task, task\nfrom dateutil.relativedelta import relativedelta\nfrom django.conf import settings\nfrom django.db import Error, connections\n\nfrom corehq.apps.userreports.models import get_datasource_config\nfrom corehq.apps.userreports.util import get_indicator_adapter\nfrom corehq.form_processor.interfaces.dbaccessors import CaseAccessors\nfrom corehq.form_processor.change_publishers import publish_case_saved\nfrom corehq.util.decorators import serial_task\nfrom corehq.util.soft_assert import soft_assert\nfrom dimagi.utils.chunked import chunked\nfrom dimagi.utils.logging import notify_exception\n\ncelery_task_logger = logging.getLogger('celery.task')\n\nUCRAggregationTask = namedtuple(\"UCRAggregationTask\", ['type', 'date'])\n\n\n@periodic_task(run_every=crontab(minute=0, hour=21), acks_late=True, queue='background_queue')\ndef run_move_ucr_data_into_aggregation_tables_task(date=None):\n move_ucr_data_into_aggregation_tables.delay(date)\n\n\n@serial_task('move-ucr-data-into-aggregate-tables', timeout=30 * 60, queue='background_queue')\ndef move_ucr_data_into_aggregation_tables(date=None, intervals=3):\n date = date or datetime.utcnow().date()\n monthly_date = date.replace(day=1)\n if hasattr(settings, \"ICDS_UCR_DATABASE_ALIAS\") and settings.ICDS_UCR_DATABASE_ALIAS:\n with connections[settings.ICDS_UCR_DATABASE_ALIAS].cursor() as cursor:\n\n path = os.path.join(os.path.dirname(__file__), 'migrations', 'sql_templates', 'create_functions.sql')\n celery_task_logger.info(\"Starting icds reports create_functions\")\n with open(path, \"r\") as sql_file:\n sql_to_execute = sql_file.read()\n cursor.execute(sql_to_execute)\n celery_task_logger.info(\"Ended icds reports create_functions\")\n\n path = os.path.join(os.path.dirname(__file__), 'sql_templates', 'update_locations_table.sql')\n celery_task_logger.info(\"Starting icds reports update_location_tables\")\n with open(path, \"r\") as sql_file:\n sql_to_execute = sql_file.read()\n cursor.execute(sql_to_execute)\n celery_task_logger.info(\"Ended icds reports update_location_tables_sql\")\n\n aggregation_tasks = []\n\n for interval in range(intervals - 1, -1, -1):\n calculation_date = (monthly_date - relativedelta(months=interval)).strftime('%Y-%m-%d')\n aggregation_tasks.append(UCRAggregationTask('monthly', calculation_date))\n\n aggregation_tasks.append(UCRAggregationTask('daily', date.strftime('%Y-%m-%d')))\n aggregate_tables.delay(aggregation_tasks[0], aggregation_tasks[1:])\n\n\n@task(queue='background_queue', bind=True, default_retry_delay=15 * 60, acks_late=True)\ndef aggregate_tables(self, current_task, future_tasks):\n aggregation_type = current_task.type\n aggregation_date = current_task.date\n\n if aggregation_type == 'monthly':\n path = os.path.join(os.path.dirname(__file__), 'sql_templates', 'update_monthly_aggregate_tables.sql')\n elif aggregation_type == 'daily':\n path = os.path.join(os.path.dirname(__file__), 'sql_templates', 'update_daily_aggregate_table.sql')\n else:\n raise ValueError(\"Invalid aggregation type {}\".format(aggregation_type))\n\n if hasattr(settings, \"ICDS_UCR_DATABASE_ALIAS\") and settings.ICDS_UCR_DATABASE_ALIAS:\n with connections[settings.ICDS_UCR_DATABASE_ALIAS].cursor() as cursor:\n with open(path, \"r\") as sql_file:\n sql_to_execute = sql_file.read()\n celery_task_logger.info(\n \"Starting icds reports {} update_{}_aggregate_tables\".format(\n aggregation_date, aggregation_type\n )\n )\n\n try:\n cursor.execute(sql_to_execute, {\"date\": aggregation_date})\n except Error as exc:\n notify_exception(\n None,\n message=\"Error occurred during ICDS aggregation\",\n details={\n 'type': aggregation_type,\n 'date': aggregation_date,\n 'error': exc,\n }\n )\n self.retry(exc=exc)\n\n celery_task_logger.info(\n \"Ended icds reports {} update_{}_aggregate_tables\".format(\n aggregation_date, aggregation_type\n )\n )\n\n if future_tasks:\n aggregate_tables.delay(future_tasks[0], future_tasks[1:])\n else:\n # temporary soft assert to verify it's completing\n _soft_assert = soft_assert(to='{}@{}'.format('jemord', 'dimagi.com'))\n _soft_assert(False, \"Aggregation completed on {}\".format(settings.SERVER_ENVIRONMENT))\n celery_task_logger.info(\"Aggregation has completed\")\n\n\n@periodic_task(\n queue='background_queue',\n run_every=crontab(day_of_week='sunday', minute=0, hour=21),\n acks_late=True\n)\ndef recalculate_stagnant_cases():\n domain = 'icds-cas'\n config_ids = [\n 'static-icds-cas-static-ccs_record_cases_monthly',\n 'static-icds-cas-static-ccs_record_cases_monthly_v2',\n 'static-icds-cas-static-ccs_record_cases_monthly_tableau_v2',\n 'static-icds-cas-static-child_cases_monthly',\n 'static-icds-cas-static-child_cases_monthly_v2',\n 'static-icds-cas-static-child_cases_monthly_tableau_v2',\n ]\n\n stagnant_cases = set()\n\n for config_id in config_ids:\n config, is_static = get_datasource_config(config_id, domain)\n adapter = get_indicator_adapter(config)\n case_ids = _find_stagnant_cases(adapter)\n celery_task_logger.info(\n \"Found {} stagnant cases in config {}\".format(len(case_ids), config_id)\n )\n stagnant_cases = stagnant_cases.union(set(case_ids))\n celery_task_logger.info(\n \"Total number of stagant cases is now {}\".format(len(stagnant_cases))\n )\n\n case_accessor = CaseAccessors(domain)\n num_stagnant_cases = len(stagnant_cases)\n current_case_num = 0\n for case_ids in chunked(stagnant_cases, 1000):\n current_case_num += len(case_ids)\n cases = case_accessor.get_cases(list(case_ids))\n for case in cases:\n publish_case_saved(case, send_post_save_signal=False)\n celery_task_logger.info(\n \"Resaved {} / {} cases\".format(current_case_num, num_stagnant_cases)\n )\n\n\ndef _find_stagnant_cases(adapter):\n stagnant_date = datetime.utcnow() - timedelta(days=45)\n table = adapter.get_table()\n query = adapter.get_query_object()\n query = query.with_entities(table.columns.doc_id).filter(\n table.columns.inserted_at <= stagnant_date\n ).distinct()\n return query.all()\n","sub_path":"custom/icds_reports/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":6975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"508521162","text":"def image(activity):\n if activity == \"Shopping\":\n return \"https://cdn.vox-cdn.com/thumbor/pkA5HyV81YeDEWwLxbgreubS8W8=/0x0:6048x4032/1200x800/filters:focal(3747x1737:4713x2703)/cdn.vox-cdn.com/uploads/chorus_image/image/58384919/GettyImages_463173435.0.jpg\"\n elif activity == \"Dining\":\n return \"https://manofmany.com/wp-content/uploads/2016/09/Fine-Dining.jpg\"\n elif activity == \"Sightseeing\":\n return \"https://www.holidayrepresentations.com/blog/wp-content/uploads/2018/03/multiple-places-around-the-world.jpg\"\n else:\n return \"\"\n \n\ndef nyc_activity(activity, walking, crowds):\n if activity == \"Shopping\":\n if walking == \"True\" and crowds == \"False\":\n return \"Shopping along Madison Avenue in Manhattan\"\n elif walking == \"False\" and crowds == \"True\":\n return \"Shopping in Bloomingdales or Barneys\"\n elif walking == \"True\" and crowds == \"True\":\n return \"Shopping in Soho\"\n else:\n return \"Shopping in Soho (make sure to go on a weekday!)\"\n elif activity == \"Dining\":\n if walking == \"True\" and crowds == \"False\":\n return \"Buy a picnic from Gourmet Garage and head to Central Park\"\n elif walking == \"False\" and crowds == \"True\":\n return \"Grab some hotdogs and eat on the steps of the Met\"\n elif walking == \"True\" and crowds == \"True\":\n return \"Head to Columbus Circle and have a sit down dinner\"\n else:\n return \n elif activity == \"Sightseeing\":\n if walking == \"True\" and crowds == \"False\":\n return \"Go to the Brooklyn Promenade and take in Manhattan's skyline\"\n elif walking == \"False\" and crowds == \"True\":\n return \"Head to the top floor of the Empire State building and take in the view\"\n elif walking == \"True\" and crowds == \"True\":\n return \"Go for a walk on the Highline\"\n else:\n return \"Schedule a bus tour on New York City\"\n else:\n return \"Sorry there was an error please try again\"\n\n\ndef rome_activity(activity, walking, crowds):\n if activity == \"Shopping\":\n if walking == \"True\" and crowds == \"False\":\n return \"Walk along Via Urbana and Via del Boschetto\"\n elif walking == \"False\" and crowds == \"True\":\n return \"Go shopping at Centro Commerciale Porta di Roma\"\n elif walking == \"True\" and crowds == \"True\":\n return \"Walk along Via del Corso\"\n else:\n return \"Go shopping at Centro Commerciale Porta di Roma\"\n elif activity == \"Dining\":\n if walking == \"True\" and crowds == \"False\":\n return \"Walk through Trastevere and check out all the hip restaurants and bars\"\n elif walking == \"False\" and crowds == \"True\":\n return \"Have lunch at La Moretta and then check out the nearby Tiber River\"\n elif walking == \"True\" and crowds == \"True\":\n return \"Explore the Spanish Steps and then head to Ginger Sapori e Salute for dinner\"\n else:\n return \"Walk through the smaller streets around the Trevi Fountain and try a local panino shop\"\n elif activity == \"Sightseeing\":\n if walking == \"True\" and crowds == \"False\":\n return \"Go for a bike ride or walk along the Via Appia\"\n elif walking == \"False\" and crowds == \"True\":\n return \"Take a guided tour of the Pantheon\"\n elif walking == \"True\" and crowds == \"True\":\n return \"Take a walking tour of the most beautiful fountains in Rome (be sure to see the Trevi Fountain and the Fontana dei Quattro Fiumi!)\"\n else:\n return \"Rent a golf cart and go for a ride through the Villa Borghese gardens\"\n else:\n return \"Sorry there was an error please try again\"\n\ndef brussels_activity(activity, walking, crowds):\n if activity == \"Shopping\":\n if walking == \"True\" and crowds == \"False\":\n return \"Walk along Avenue Louise.\"\n elif walking == \"False\" and crowds == \"True\":\n return \"Gos shop at Anspach\"\n elif walking == \"True\" and crowds == \"True\":\n return \"Visit the Galerie du Roi\"\n else:\n return \"Shop online before going to stores.\"\n elif activity == \"Dining\":\n if walking == \"True\" and crowds == \"False\":\n return \"Go have dinner at Rouge Tomate\"\n elif walking == \"False\" and crowds == \"True\":\n return \"Go to Pei and Mei\"\n elif walking == \"True\" and crowds == \"True\":\n return \"Go have some middle eastern food at Al Barmaki\"\n else:\n return \"Order room service\"\n elif activity == \"Sightseeing\":\n if walking == \"True\" and crowds == \"False\":\n return \"Go to la Place du Grand Sablon and la Place du Petit Sablon\"\n elif walking == \"False\" and crowds == \"True\":\n return \"Go to la Grand Place to visit the City Hall and the Maison du Roi\"\n elif walking == \"True\" and crowds == \"True\":\n return \"Visit Notre Dame du Sablon\"\n else:\n return \"Get on a tram and take a ride around Brussels\"\n else:\n return \"Sorry there was an error please try again\"","sub_path":"app/models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"440645325","text":"# -*- coding: utf-8 -*-\n# python 2.7\nimport lxml.html\nimport datetime\nimport json\nimport requests\n\n\ndef getCA():\n url = 'http://minkabu.jp/top/stock_news'\n tree = lxml.html.parse(url)\n contents = map(lambda html: html.text, tree.xpath('//*[@id=\"ajax_update_stock_news\"]//td'))\n contents2=map(lambda html: html.text, tree.xpath('//*[@id=\"ajax_update_stock_news\"]//td/a'))\n for i in range(0,len(contents)-1,1):\n if contents[i]==None:\n contents[i]=0\n else:\n contents[i]=contents[i].encode('utf-8')\n j=0\n res=[]\n for i in range(2,len(contents)-1,5):\n if contents[i+4]==0:\n j=j+1\n else:\n update_date=contents[i]\n corp_date=contents[i+2]\n hold=contents[i+4].split(':')\n corp_rate=float(hold[0])/float(hold[1])\n corp_name=contents2[j].encode('utf-8')\n j=j+1\n jsondata={'update_date':update_date, 'corp_date':corp_date, 'corp_rate':corp_rate, 'corp_name':corp_name}\n res.append(jsondata)\n\n print(res)\n return res\n\n\ndef postDB(post_data):\n response = requests.post('http://54.199.174.85:3000/api/ca', post_data)\n\n \nif __name__ == '__main__':\n res=getCA()\n for i in range(0,len(rows),1):\n tmp=res[i]\n postDB(tmp)\n\n","sub_path":"NAVcorp_old.py","file_name":"NAVcorp_old.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"322564455","text":"import os\nimport numpy as np\nimport scipy.io as spio\nimport configparser\nfrom keras.models import model_from_json\nfrom pyflow import reader as rd\nfrom pyflow import export as fex\nimport warnings\nimport matplotlib.pyplot as plt\nimport time\nimport datetime\nimport itertools\nfrom commons import fix_labels, debinning, data_preprocessing\nfrom matplotlib import colors\n\n# free params list\nconfig = configparser.ConfigParser()\nconfig.read('settings.ini')\ntraining_file = config.get('NN', 'training_db')\ntest_file = config.get('NN', 'test_db')\nmodel_file = config.get('NN', 'model_file')\nweights_file = config.get('NN', 'weights_file')\nroot_dir = config.get('NN', 'root_dir')\noutput_dir = config.get('NN', 'output_dir')\ngate_to_filter = config.get('Training', 'gate_to_filter')\nfeature_order = config.get('Training', 'feature_order')\nfeature_order = feature_order.split(',')\n\n# loading the model\nprint('loading existing model')\nmodel = model_from_json(open(model_file).read())\nmodel.load_weights(weights_file)\n\n\n''' TEST PART '''\ndict_test = spio.loadmat(test_file)\ntest_x = dict_test['test_x']\ntest_y = dict_test['test_y']\ntest_y = fix_labels(test_x, test_y)\n# test_y = np.reshape(test_y, (test_y.shape[1],))\n\nn_test_samples, n_features = test_x.shape\nerror = 0\npatient_list = dict_test['test_patients']\nresult_list = []\nfor i in range(0, n_test_samples):\n this_test = test_x[i, :]\n this_test = np.reshape(this_test, (1, this_test.shape[0]))\n prediction = model.predict(this_test)\n ground_truth = test_y[i]\n error += np.abs(prediction - ground_truth)\n # print('{}/{} ---> pred: {} true: {}'.format(i + 1, n_test_samples, prediction, ground_truth))\n # assign all the samples to the bins\n test_file = os.path.join(root_dir, patient_list[i], '{}.xml'.format(patient_list[i]))\n xml = rd.fcmReader(test_file)\n exp = xml.loadSpecific()\n print('{}: {}'.format(patient_list[i], exp.gateOrder))\n\n mydata, mylabels = data_preprocessing(exp=exp, feature_order=feature_order, gate_to_filter=gate_to_filter)\n\n xlab = 'CD45'\n ylab = 'CD10'\n blab = 'BLASTS'\n fxlab = feature_order.index(xlab)\n fylab = feature_order.index(ylab)\n\n gateNames = [x.upper() for x in exp.gateOrder]\n gateNames = [x.replace(' ', '') for x in gateNames]\n try:\n bIdx = gateNames.index(blab)\n except ValueError:\n bIdx = None\n mylabels = np.zeros((mydata.shape[0],))\n warnings.warn('No blasts in this sample')\n if bIdx is not None:\n mylabels = mylabels[:, bIdx]\n\n blasts = mydata[mylabels == 1, :]\n non_blasts = mydata[mylabels == 0, :]\n gt_pos = blasts.shape[0]\n gt_neg = non_blasts.shape[0]\n bx = blasts[:, fxlab]\n by = blasts[:, fylab]\n nx = non_blasts[:, fxlab]\n ny = non_blasts[:, fylab]\n h1 = plt.subplot(1, 2, 1)\n plt.plot(nx, ny, '.b', alpha=0.1)\n plt.plot(bx, by, '.r', alpha=0.3)\n plt.xlabel(xlab)\n plt.ylabel(ylab)\n plt.ylim([0, 1])\n plt.xlim([0, 1])\n plt.title('Ground Truth ({0}/{1:1.3})'.format(gt_pos, gt_pos / (gt_neg + gt_pos)))\n\n lab_pred, lab_analog = debinning(mydata, prediction)\n lab_pred = lab_pred.reshape((mydata.shape[0]))\n lab_analog = lab_analog.reshape((mydata.shape[0]))\n pred_blasts = mydata[lab_pred == 1, :]\n pred_non_blasts = mydata[lab_pred == 0, :]\n pr_pos = pred_blasts.shape[0]\n pr_neg = pred_non_blasts.shape[0]\n pbx = pred_blasts[:, fxlab]\n pby = pred_blasts[:, fylab]\n pnx = pred_non_blasts[:, fxlab]\n pny = pred_non_blasts[:, fylab]\n h2 = plt.subplot(1, 2, 2)\n # mycolor = (lab_analog[i], .3, 1 - lab_analog[i])\n # mycolor1 = (1 - lab_analog[i], .3, lab_analog[i])\n plt.plot(pnx, pny, '.b', alpha=0.1)\n plt.plot(pbx, pby, '.r', alpha=0.3)\n plt.xlabel(xlab)\n plt.ylabel(ylab)\n plt.ylim([0, 1])\n plt.xlim([0, 1])\n plt.title('Predictions ({0}/{1:1.3})'.format(pr_pos, pr_pos / (pr_pos + pr_neg)))\n plt.suptitle('Binning results comparison {} - {} {} cells'.format(patient_list[i], gt_pos + gt_neg, gate_to_filter))\n outfile = os.path.join(output_dir, '{}_{}.png'.format(patient_list[i], i))\n plt.savefig(outfile)\n plt.clf()\n # plt.show()\n myres = fex.prepareReportExperiment(lab_pred, mylabels, patient_list[i])\n result_list.append(myres)\n\nt = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\nfex.printResults(result_list, os.path.join(output_dir, 'results_t{0}_{1}.txt'.format(1, t)))\nprint('Overall absolute error on test: {}'.format(error))\n","sub_path":"NnBinningRegressionTest.py","file_name":"NnBinningRegressionTest.py","file_ext":"py","file_size_in_byte":4468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"274781373","text":"# -*- coding: utf-8 -*-\n'''Configuration-related objects'''\n\nimport datetime\n\n__maintainer__ = 'Cezary Bartoszuk '\n__credits__ = ['Cezary Bartoszuk']\n\n\n_DEFAULT_PORT = 1138\n\n_TIMEOUT_SECONDS = 10\n\n_TIMESTAMP_YEAR = 1989\n_TIMESTAMP_MONTH = 7 # July\n_TIMESTAMP_DAY = 12\n_TIMESTAMP_HOUR = 12\n_TIMESTAMP_MINUTE = 5\n\n\nclass Config:\n '''Application configuration object.'''\n\n self_ip = '127.0.0.1'\n\n default_port = 1138\n\n timeout = 10\n\n timestamp = datetime.datetime(\n year=_TIMESTAMP_YEAR,\n month=_TIMESTAMP_MONTH,\n day=_TIMESTAMP_DAY,\n hour=_TIMESTAMP_HOUR,\n minute=_TIMESTAMP_MINUTE)\n","sub_path":"sklab/core/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"409336415","text":"from django.contrib.auth import authenticate,logout,login\nfrom rest_framework.decorators import api_view, permission_classes, authentication_classes\nfrom django.contrib.auth.models import User\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom . import models,serializers\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.authentication import SessionAuthentication, BasicAuthentication\nfrom rest_framework.permissions import IsAuthenticated\n\n\n@api_view(['POST'])\ndef userRegister(request):\n userSerial=serializers.UserSerializer(data=request.data)\n if userSerial.is_valid():\n userSerial.save()\n return Response({'ok':'True'},status=status.HTTP_201_CREATED)\n return Response({'ok': 'false'},status=status.HTTP_409_CONFLICT)\n\n\n\n@api_view(['POST'])\ndef userLogin(request):\n user=authenticate(\n username=request.POST['username'],\n password=request.POST['password']\n )\n if user is not None:\n login(request,user)\n return Response({'ok':'True'},status=status.HTTP_200_OK)\n else:\n return Response({'ok':'False'},status=status.HTTP_401_UNAUTHORIZED)\n\n\n@api_view(['GET'])\ndef userLogout(request):\n if request.user.is_authenticated():\n logout(request)\n return Response({'ok': 'True'},status=status.HTTP_200_OK)\n else:\n return Response({'ok': 'False'},status=status.HTTP_201_CREATED)\n\n@api_view(['GET'])\ndef projectList(request,offset):\n if request.user.is_authenticated():\n user=request.user\n catagorylist=user.catagory_set.all()\n flag=0#\n postlist=[]\n for catagory in catagorylist:\n if flag==0:\n postlist=catagory.post_set.all()\n flag=1\n else:\n postlist=postlist|catagory.post_set.all()\n if (flag == 1):\n postlist = postlist.order_by('-created')[int(offset):int(offset) + 10]\n projectlist = models.Project.objects.filter(post__in=postlist)\n eventlist = models.Event.objects.filter(post__in=postlist)\n teamlist = models.Team.objects.filter(post__in=postlist)\n projectlistserial = serializers.ProjectSerializer(projectlist, many=True)\n eventlistserial = serializers.EventSerializer(eventlist, many=True)\n teamlistserial = serializers.TeamSerializer(teamlist, many=True)\n walllistserial = {}\n walllistserial['project'] = projectlistserial.data\n walllistserial['event'] = eventlistserial.data\n walllistserial['team'] = teamlistserial.data\n return Response(walllistserial, status=status.HTTP_200_OK)\n return Response({'project': [], 'event': [], 'team': []}, status=status.HTTP_204_NO_CONTENT)\n return Response({'ok': 'False'},status=status.HTTP_204_NO_CONTENT)\n\ndef index(request):\n pass\n\n\n@api_view(['POST','GET'])\ndef example(request):\n if request.user.is_authenticated():\n return Response({'ok': 'True'},status=status.HTTP_200_OK)\n return Response({'ok': 'False'},status=status.HTTP_401_UNAUTHORIZED)\n","sub_path":"baseApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"604717393","text":"class Cars:\n def myColor(self,color):\n self.color=color\n print(self.color)\n\nclass BMW(Cars):\n def topSpeed(self,speed):\n self.speed=speed\n print(self.speed)\n\nobjcars=Cars()\nobjBMW=BMW()\n# objcars.myColor(\"Red\")\n# objcars.topSpeed(100)\nobjBMW.myColor(\"white\")\nobjBMW.topSpeed(150)\n\n ","sub_path":"day6/inheritx.py","file_name":"inheritx.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"84297026","text":"\"\"\"\nsegment between two points.\n\"\"\"\nimport struct\nfrom math import atan, pi\nfrom geo.point import Point\nfrom geo.quadrant import Quadrant\nfrom geo.coordinates_hash import CoordinatesHash\n\nclass Segment:\n \"\"\"\n oriented segment between two points.\n\n for example:\n\n - create a new segment between two points:\n\n segment = Segment([point1, point2])\n\n - create a new segment from coordinates:\n\n segment = Segment([Point([1.0, 2.0]), Point([3.0, 4.0])])\n\n - compute intersection point with other segment:\n\n intersection = segment1.intersection_with(segment2)\n\n \"\"\"\n # static attribute: common to every Segment\n scanLine = None\n scanPoint = None\n # this is a default adjuster\n # do not forget to update it with the correct one\n adjuster = CoordinatesHash()\n\n def __init__(self, points, _id=None):\n \"\"\"\n create a segment from an array of two points.\n \"\"\"\n self.endpoints = points\n self._angle = None\n self.index = _id\n self.key_cache = 0\n self.key_y = float('-inf')\n\n def copy(self):\n \"\"\"\n return duplicate of given segment (no shared points with original,\n they are also copied).\n \"\"\"\n return Segment([p.copy() for p in self.endpoints])\n\n def length(self):\n \"\"\"\n return length of segment.\n example:\n segment = Segment([Point([1, 1]), Point([5, 1])])\n distance = segment.length() # distance is 4\n \"\"\"\n return self.endpoints[0].distance_to(self.endpoints[1])\n\n def bounding_quadrant(self):\n \"\"\"\n return min quadrant containing self.\n \"\"\"\n quadrant = Quadrant.empty_quadrant(2)\n for point in self.endpoints:\n quadrant.add_point(point)\n return quadrant\n\n def svg_content(self):\n \"\"\"\n svg for tycat.\n \"\"\"\n return '\\n'.format(\n *self.endpoints[0].coordinates,\n *self.endpoints[1].coordinates)\n\n def intersection_with(self, other):\n \"\"\"\n intersect two 2d segments.\n only return point if included on the two segments.\n \"\"\"\n i = self.line_intersection_with(other)\n if i is None:\n return # parallel lines\n\n if self.contains(i) and other.contains(i):\n return i\n\n def line_intersection_with(self, other):\n \"\"\"\n return point intersecting with the two lines passing through\n the segments.\n none if lines are almost parallel.\n \"\"\"\n # solve following system :\n # intersection = start of self + alpha * direction of self\n # intersection = start of other + beta * direction of other\n directions = [s.endpoints[1] - s.endpoints[0] for s in (self, other)]\n denominator = directions[0].cross_product(directions[1])\n if abs(denominator) < 0.000001:\n # almost parallel lines\n return\n start_diff = other.endpoints[0] - self.endpoints[0]\n alpha = start_diff.cross_product(directions[1]) / denominator\n return self.endpoints[0] + directions[0] * alpha\n\n def angle(self):\n \"\"\"\n Return the angle between the segment and the abscise\n | -> pi/2\n _ -> 0\n / -> 3*pi/4\n \\\\ -> pi/4\n \"\"\"\n if self._angle is not None:\n return self._angle\n [denominator, numerator] = (self.endpoints[1] - self.endpoints[0]).coordinates\n if abs(denominator) < 0.000001:\n # almost vertical line\n self._angle = pi/2\n else:\n angle = atan(numerator/denominator)\n if angle < 0:\n angle = pi + angle\n self._angle = angle\n return self._angle\n\n def contains(self, possible_point):\n \"\"\"\n is given point inside us ?\n be careful, determining if a point is inside a segment is a difficult problem\n (it is in fact a meaningless question in most cases).\n you might get wrong results for points extremely near endpoints.\n \"\"\"\n distance = sum(possible_point.distance_to(p) for p in self.endpoints)\n return abs(distance - self.length()) < 0.000001\n\n def key(self):\n \"\"\"\n Return the key of the segment\n \"\"\"\n #pylint: disable=C0103\n # x, y, _x, _y are absiss and ordonates\n\n [x, y] = self.scanPoint.coordinates\n\n if y != self.key_y:\n point = self.line_intersection_with(Segment.scanLine)\n point = self.adjuster.hash_point(point)\n [_x, _] = point.coordinates\n self.key_cache = _x\n self.key_y = y\n\n xSegment = self.key_cache\n angle = self.angle()\n if xSegment >= x:\n return (xSegment, angle)\n else:\n return (xSegment, -angle)\n\n @staticmethod\n def changeScanPoint(y, x):\n \"\"\"\n Update the static atttributes of Segment class\n \"\"\"\n #pylint: disable=C0103\n # y is an ordonate\n Segment.scanLine = Segment([Point([0, y]), Point([1, y])])\n Segment.scanPoint = Point([x, y])\n\n def __lt__(self, other):\n return self.key() < other.key()\n\n def __le__(self, other):\n return self.key() <= other.key()\n\n\n def __eq__(self, other):\n return self.key() == other.key()\n\n def __str__(self):\n return \"Segment([\" + str(self.endpoints[0]) + \", \" + \\\n str(self.endpoints[1]) + \"])\"\n\n def __repr__(self):\n return \"[\" + repr(self.endpoints[0]) + \", \" + \\\n repr(self.endpoints[1]) + \"])\"\n\n\ndef load_segments(filename):\n \"\"\"\n loads given .bo file.\n returns a vector of segments.\n \"\"\"\n coordinates_struct = struct.Struct('4d')\n segments = []\n adjuster = CoordinatesHash()\n\n with open(filename, \"rb\") as bo_file:\n packed_segment = bo_file.read(32)\n index_segment = 0\n while packed_segment:\n coordinates = coordinates_struct.unpack(packed_segment)\n raw_points = [Point(coordinates[0:2]), Point(coordinates[2:])]\n adjusted_points = [adjuster.hash_point(p) for p in raw_points]\n segments.append(Segment(adjusted_points, index_segment))\n packed_segment = bo_file.read(32)\n index_segment += 1\n\n return adjuster, segments\n","sub_path":"geo/segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":6389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"330974190","text":"import json\nimport os \nfrom sys import argv\nfrom datetime import datetime\n\npathBaseJson= \"/home/\" + str(os.getlogin()) + \"/.local/bin/nylinuxUtil/base.json\"\npathBuildBash= \"/home/\" + str(os.getlogin()) + \"/.local/bin/nylinuxUtil/ny/build.sh\"\nname = argv[1]\nfil = open(pathBaseJson, \"r\")\npat= 1\n#print(fil.read())\njs = json.loads(fil.read())\nfor i in js[\"project\"] :\n if i[\"name\"] == name:\n pat = i[\"path\"]\n print(pat)\n\nos.system(\"cd \"+ str(pat) + \" && \" + str(pathBuildBash)) \n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"ny/nyb.py","file_name":"nyb.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"635985401","text":"import random\ndef play():\n print(\"You are walkingin an ice storm back to camp.\")\n print(\"You see 3 ice bridges ahead. They look dangerous.\")\n alive = True\n score = 0\n while alive:\n number = random.randint(1,3)\n print(\"Choose bridge 1, 2, or 3\")\n guess = int(input())\n if guess == number:\n print(\"Crack -- Crash -- Bye, byeeeeeeeeeee!\")\n alive = False\n elif guess != 1 and guess != 2 and guess != 3:\n score -= 1\n print(\"You stray too far from the bridges, and nearly slip off the edge. You lose a point.\")\n else:\n print(\"Nice job! You are safe for now...\")\n print(\"There are more bridges ahead.\")\n score += 1\n print(\"Game Over! You scored\",str(score) + \".\")\nplayGame = True\nwhile playGame:\n play()\n print(\"Would you like to play again?\")\n again = input()\n if again != \"yes\":\n playGame = False\n quit()\n","sub_path":"Python - Beginner/lesson 5 - youwen.py","file_name":"lesson 5 - youwen.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"93282866","text":"#!/usr/bin/env python3\n\nimport click\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n@click.command()\n@click.argument('csv-path', nargs=-1, type=click.Path())\n@click.option('-c', '--col', type=int)\n@click.option('-o', '--output-path', type=click.Path(), default=\"comparison_graph.png\")\n@click.option('--ylabel')\n@click.option('--ymax', type=float)\ndef bin_graph(csv_path, col, output_path, ylabel=\"\", ymax=None):\n all_data = []\n max_mats = 0\n\n print(\"loading data...\")\n for path in csv_path:\n r = np.loadtxt(path, delimiter=',', skiprows=1, usecols=col)\n rmax_by_mat = np.maximum.accumulate(r)\n all_data.append(rmax_by_mat)\n max_mats = max(max_mats, rmax_by_mat.size)\n\n np_data = np.empty((max_mats, len(all_data)))\n for i, dataset in enumerate(all_data):\n np_data[:,i] = dataset\n\n print(\"plotting...\")\n fig = plt.figure(figsize=(3.75,3.75), tight_layout=True)\n ax = fig.add_subplot(1, 1, 1)\n\n ax.set_xlabel(\"# materials\")\n ax.set_ylabel(ylabel)\n\n ax.grid(linestyle='-', color='0.8', zorder=0)\n ax.plot(range(max_mats), np_data)\n if ymax:\n ax.axhline(ymax, linestyle=\"--\", lw=3, color=\"black\", label=\"Max\")\n\n ax.legend(csv_path)\n\n fig.savefig(output_path, dpi=300)\n plt.close(fig)\n\n\n\nif __name__ == '__main__':\n bin_graph()\n","sub_path":"htsohm/bin/graph_max_prop_by_num_materials.py","file_name":"graph_max_prop_by_num_materials.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"591148794","text":"\r\n# (C1)(w)V(G|C2)+T\r\n\r\n#symbol \" ' \" for undefine symbol and sign for english\r\n\r\n'''\r\nC1 = initial consonant onset\r\nw = labiovelar on-glide /w/\r\nV = vowel nucleus\r\nG = off-glide coda (/j/ or /w/)\r\nC2 = final consonant coda\r\nT = tone.\r\n'''\r\nCus_onsets = { u'b' : u'b', u't' : u't', u'th' : u'tʰ', u'đ' : u'd', u'ch' : u'c', \r\n\t\t\t\tu'kh' : u'x', u'g' : u'ɣ', u'l' : u'l', u'm' : u'm', u'n': u'n', \r\n\t\t\t\tu'ngh': u'ŋ', u'nh' : u'ɲ', u'ng' : u'ŋ', u'ph' : u'f', u'v' : u'v', \r\n\t\t\t\tu'x' : u's', u'd' : u'z', u'h' : u'h', u'p' : u'p', u'qu' : u'kw',\r\n\t\t\t\tu'gi' : u'j', u'tr' : u'ʈ', u'k' : u'k', u'c' : u'k', u'gh' : u'ɣ', \r\n\t\t\t\tu'r' : u'ʐ', u's' : u'ʂ', u'gi': u'j'}\r\n\t\t\t\t\r\n\t\t\t \r\nCus_nuclei = { u'a' : u'a', u'á' : u'a', u'à' : u'a', u'ả' : u'a', u'ã' : u'a', u'ạ' : u'a', \r\n\t\t\t\tu'â' : u'ɤ̆', u'ấ' : u'ɤ̆', u'ầ' : u'ɤ̆', u'ẩ' : u'ɤ̆', u'ẫ' : u'ɤ̆', u'ậ' : u'ɤ̆',\r\n\t\t\t\tu'ă' : u'ă', u'ắ' : u'ă', u'ằ' : u'ă', u'ẳ' : u'ă', u'ẵ' : u'ă', u'ặ' : u'ă',\r\n\t\t\t\tu'e' : u'ɛ', u'é' : u'ɛ', u'è' : u'ɛ', u'ẻ' : u'ɛ', u'ẽ' : u'ɛ', u'ẹ' : u'ɛ',\r\n\t\t\t\tu'ê' : u'e', u'ế' : u'e', u'ề' : u'e', u'ể' : u'e', u'ễ' : u'e', u'ệ' : u'e',\r\n\t\t\t\tu'i' : u'i', u'í' : u'i', u'ì' : u'i', u'ỉ' : u'i', u'ĩ' : u'i', u'ị' : u'i',\r\n\t\t\t\tu'o' : u'ɔ', u'ó' : u'ɔ', u'ò' : u'ɔ', u'ỏ' : u'ɔ', u'õ' : u'ɔ', u'ọ' : u'ɔ',\r\n\t\t\t\tu'ô' : u'o', u'ố' : u'o', u'ồ' : u'o', u'ổ' : u'o', u'ỗ' : u'o', u'ộ' : u'o',\r\n\t\t\t\tu'ơ' : u'ɤ', u'ớ' : u'ɤ', u'ờ' : u'ɤ', u'ở' : u'ɤ', u'ỡ' : u'ɤ', u'ợ' : u'ɤ',\r\n\t\t\t\tu'u' : u'u', u'ú' : u'u', u'ù' : u'u', u'ủ' : u'u', u'ũ' : u'u', u'ụ' : u'u',\r\n\t\t\t\tu'ư' : u'ɯ', u'ứ' : u'ɯ', u'ừ' : u'ɯ', u'ử' : u'ɯ', u'ữ' : u'ɯ', u'ự' : u'ɯ',\r\n\t\t\t\tu'y' : u'i', u'ý' : u'i', u'ỳ' : u'i', u'ỷ' : u'i', u'ỹ' : u'i', u'ỵ' : u'i',\r\n\t\t\t\t\r\n\t\t\t\tu'eo' : u'eo', u'éo' : u'eo', u'èo' : u'eo', u'ẻo' : u'eo', u'ẽo': u'eo', u'ẹo' : u'eo',\r\n\t\t\t\tu'êu' : u'ɛu', u'ếu' : u'ɛu', u'ều' : u'ɛu', u'ểu' : u'ɛu', u'ễu': u'ɛu', u'ệu' : u'ɛu',\r\n\t\t\t\tu'ia' : u'iə', u'ía' : u'iə', u'ìa' : u'iə', u'ỉa' : u'iə', u'ĩa' : u'iə', u'ịa' : u'iə',\r\n\t\t\t\tu'ia' : u'iə', u'iá' : u'iə', u'ià' : u'iə', u'iả' : u'iə', u'iã' : u'iə', u'iạ' : u'iə',\r\n\t\t\t\tu'iê' : u'iə', u'iế' : u'iə', u'iề' : u'iə', u'iể' : u'iə', u'iễ' : u'iə', u'iệ' : u'iə',\r\n\t\t\t\tu'oo' : u'ɔ', u'óo' : u'ɔ', u'òo' : u'ɔ', u'ỏo' : u'ɔ', u'õo' : u'ɔ', u'ọo' : u'ɔ',\r\n\t\t\t\tu'oo' : u'ɔ', u'oó' : u'ɔ', u'oò' : u'ɔ', u'oỏ' : u'ɔ', u'oõ' : u'ɔ', u'oọ' : u'ɔ',\r\n\t\t\t\tu'ôô' : u'o', u'ốô' : u'o', u'ồô' : u'o', u'ổô' : u'o', u'ỗô' : u'o', u'ộô' : u'o',\t\t\t\t \r\n u'ôô' : u'o', u'ôố' : u'o', u'ôồ' : u'o', u'ôổ' : u'o', u'ôỗ' : u'o', u'ôộ' : u'o',\t\t\t\t \r\n u'ua' : u'uə', u'úa' : u'uə', u'ùa' : u'uə', u'ủa' : u'uə', u'ũa' : u'uə', u'ụa' : u'uə',\r\n\t\t\t\tu'uô' : u'uə', u'uố' : u'uə', u'uồ' : u'uə', u'uổ' : u'uə', u'uỗ' : u'uə', u'uộ' : u'uə',\r\n\t\t\t\tu'ưa' : u'ɯə', u'ứa' : u'ɯə', u'ừa' : u'ɯə', u'ửa' : u'ɯə', u'ữa' : u'ɯə', u'ựa' : u'ɯə',\r\n\t\t\t\tu'ươ' : u'ɯə', u'ướ' : u'ɯə', u'ườ' : u'ɯə', u'ưở' : u'ɯə', u'ưỡ' : u'ɯə', u'ượ' : u'ɯə',\r\n\t\t\t\tu'yê' : u'iɛ', u'yế' : u'iɛ', u'yề' : u'iɛ', u'yể' : u'iɛ', u'yễ' : u'iɛ', u'yệ' : u'iɛ', \r\n u'uơ' : u'uə', u'uở' : u'uə', u'uờ': u'uə', u'uở' : u'uə', u'uỡ' : u'uə', u'uợ' : u'uə',\r\n\t\t\t\t}\r\n\t\t\t\t \r\n\t \r\nCus_offglides = { u'ai' : u'aj', u'ái' : u'aj', u'ài' : u'aj', u'ải' : u'aj', u'ãi' : u'aj', u'ại' : u'aj',\r\n\t\t\t\t u'ay' : u'ăj', u'áy' : u'ăj', u'ày' : u'ăj', u'ảy' : u'ăj', u'ãy' : u'ăj', u'ạy' : u'ăj',\r\n\t\t\t\t u'ao' : u'aw', u'áo' : u'aw', u'ào' : u'aw', u'ảo' : u'aw', u'ão' : u'aw', u'ạo' : u'aw',\r\n\t\t\t\t u'au' : u'ăw', u'áu' : u'ăw', u'àu' : u'ăw', u'ảu' : u'ăw', u'ãu' : u'ăw', u'ạu' : u'ăw',\r\n\t\t\t\t u'ây' : u'ɤ̆j', u'ấy' : u'ɤ̆j', u'ầy' : u'ɤ̆j', u'ẩy' : u'ɤ̆j', u'ẫy' : u'ɤ̆j', u'ậy' : u'ɤ̆j', \r\n\t\t\t\t u'âu' : u'ɤ̆w', u'ấu' : u'ɤ̆w', u'ầu': u'ɤ̆w', u'ẩu' : u'ɤ̆w', u'ẫu' : u'ɤ̆w', u'ậu' : u'ɤ̆w',\r\n\t\t\t\t u'eo' : u'ew', u'éo' : u'ew', u'èo' : u'ew', u'ẻo' : u'ew', u'ẽo' : u'ew', u'ẹo' : u'ew',\r\n\t\t\t\t u'iu' : u'iw', u'íu' : u'iw', u'ìu' : u'iw', u'ỉu' : u'iw', u'ĩu' : u'iw', u'ịu' : u'iw',\r\n\t\t\t\t u'oi' : u'ɔj', u'ói' : u'ɔj', u'òi' : u'ɔj', u'ỏi' : u'ɔj', u'õi' : u'ɔj', u'ọi' : u'ɔj',\r\n\t\t\t\t u'ôi' : u'oj', u'ối' : u'oj', u'ồi' : u'oj', u'ổi' : u'oj', u'ỗi' : u'oj', u'ội' : u'oj',\r\n\t\t\t\t u'ui' : u'uj', u'úi' : u'uj', u'ùi' : u'uj', u'ủi' : u'uj', u'ũi' : u'uj', u'ụi' : u'uj', \r\n\t\t\t\t \r\n #u'uy' : u'uj', u'úy' : u'uj', u'ùy' : u'uj', u'ủy' : u'uj', u'ũy' : u'uj', u'ụy' : u'uj', \r\n u'uy' : u'ʷi', u'úy' : u'uj', u'ùy' : u'uj', u'ủy' : u'uj', u'ũy' : u'uj', u'ụy' : u'uj',\r\n #thay để hạn chế trùng âm\r\n u'uy' : u'ʷi', u'uý' : u'ʷi', u'uỳ' : u'ʷi', u'uỷ' : u'ʷi', u'uỹ' : u'ʷi', u'uỵ' : u'ʷi',\r\n\t\t\t\t \r\n u'ơi' : u'ɤj', u'ới' : u'ɤj', u'ời' : u'ɤj', u'ởi' : u'ɤj', u'ỡi' : u'ɤj', u'ợi' : u'ɤj', \r\n\t\t\t\t u'ưi' : u'ɯj', u'ứi' : u'ɯj', u'ừi' : u'ɯj', u'ửi' : u'ɯj', u'ữi' : u'ɯj', u'ựi' : u'ɯj', \r\n\t\t\t\t u'ưu' : u'ɯw', u'ứu' : u'ɯw', u'ừu' : u'ɯw', u'ửu' : u'ɯw', u'ữu' : u'ɯw', u'ựu' : u'ɯw',\r\n\r\n\t\t\t\t u'iêu' : u'iəw', u'iếu' : u'iəw', u'iều' : u'iəw', u'iểu' : u'iəw', u'iễu' : u'iəw', u'iệu' : u'iəw',\r\n\t\t\t\t u'yêu' : u'iəw', u'yếu' : u'iəw', u'yều' : u'iəw', u'yểu' : u'iəw', u'yễu' : u'iəw', u'yệu' : u'iəw', \r\n\t\t\t\t u'uôi' : u'uəj', u'uối' : u'uəj', u'uồi' : u'uəj', u'uổi' : u'uəj', u'uỗi' : u'uəj', u'uội' : u'uəj', \r\n\t\t\t\t u'ươi' : u'ɯəj', u'ưới' : u'ɯəj', u'ười' : u'ɯəj', u'ưởi' : u'ɯəj', u'ưỡi' : u'ɯəj', u'ượi' : u'ɯəj', \r\n\t\t\t\t u'ươu' : u'ɯəw', u'ướu' : u'ɯəw', u'ườu' : u'ɯəw', u'ưởu' : u'ɯəw', 'ưỡu' : u'ɯəw', u'ượu' : u'ɯəw'\t \r\n\t\t\t }\r\n#Các âm vòng ở đây i chang không vòm: không có w ở trước\t\t=> Try to add ʷ\t\r\nCus_onglides = { u'oa' : u'ʷa', u'oá' : u'ʷa', u'oà' : u'ʷa', u'oả' : u'ʷa', u'oã' : u'ʷa', u'oạ' : u'ʷa', \r\n\t\t u'óa' : u'ʷa', u'òa' : u'ʷa', u'ỏa' : u'ʷa', u'õa' : u'ʷa', u'ọa' : u'ʷa', \r\n\t\t\t u'oă' : u'ʷă', u'oắ' : u'ʷă', u'oằ' : u'ʷă', u'oẳ' : u'ʷă', u'oẵ' : u'ʷă', u'oặ' : u'ʷă', \t\r\n\t\t\t u'oe' : u'ʷɛ', u'oé' : u'ʷɛ', u'oè' : u'ʷɛ', u'oẻ' : u'ʷɛ', u'oẽ' : u'ʷɛ', u'oẹ' : u'ʷɛ', \t\r\n\t\t\t u'oe' : u'ʷɛ', u'óe' : u'ʷɛ', u'òe' : u'ʷɛ', u'ỏe' : u'ʷɛ', u'õe' : u'ʷɛ', u'ọe' : u'ʷɛ', \t\r\n\t\t\t u'ua' : u'ʷa', u'uá' : u'ʷa', u'uà' : u'ʷa', u'uả' : u'ʷa', u'uã' : u'ʷa', u'uạ' : u'ʷa', \r\n\t\t\t u'uă' : u'ʷă', u'uắ' : u'ʷă', u'uằ' : u'ʷă', u'uẳ' : u'ʷă', u'uẵ' : u'ʷă', u'uặ' : u'ʷă', \t\r\n\t\t\t u'uâ' : u'ʷɤ̆', u'uấ' : u'ʷɤ̆', u'uầ' : u'ʷɤ̆', u'uẩ' : u'ʷɤ̆', u'uẫ' : u'ʷɤ̆', u'uậ' : u'ʷɤ̆', \r\n\t\t\t u'ue' : u'ʷɛ', u'ué' : u'ʷɛ', u'uè' : u'ʷɛ', u'uẻ' : u'ʷɛ', u'uẽ' : u'ʷɛ', u'uẹ' : u'ʷɛ', \r\n\t\t\t u'uê' : u'ʷe', u'uế' : u'ʷe', u'uề' : u'ʷe', u'uể' : u'ʷe', u'uễ' : u'ʷe', u'uệ' : u'ʷe', \r\n\t\t\t u'uơ' : u'ʷɤ', u'uớ' : u'ʷɤ', u'uờ' : u'ʷɤ', u'uở' : u'ʷɤ', u'uỡ' : u'ʷɤ', u'uợ' : u'ʷɤ', \r\n\t\t\t u'uy' : u'ʷi', u'uý' : u'ʷi', u'uỳ' : u'ʷi', u'uỷ' : u'ʷi', u'uỹ' : u'ʷi', u'uỵ' : u'ʷi',\r\n\t\t u'uya' : u'ʷiə', u'uyá' : u'ʷiə', u'uyà' : u'ʷiə', u'uyả' : u'ʷiə', u'uyã' : u'ʷiə', u'uyạ' : u'ʷiə', \r\n\t\t\t\t u'uyê' : u'ʷiə', u'uyế' : u'ʷiə', u'uyề' : u'ʷiə', u'uyể' : u'ʷiə', u'uyễ' : u'ʷiə', u'uyệ' : u'ʷiə', \r\n\t\t\t\t u'uyu' : u'ʷiu', u'uyú' : u'ʷiu', u'uyù' : u'ʷiu', u'uyủ' : u'ʷiu', u'uyũ' : u'ʷiu', u'uyụ' : u'ʷiu', \r\n\t\t\t\t u'uyu' : u'ʷiu', u'uýu' : u'ʷiu', u'uỳu' : u'ʷiu', u'uỷu' : u'ʷiu', u'uỹu' : u'ʷiu', u'uỵu' : u'ʷiu',\r\n u'oen' : u'ʷen', u'oén' : u'ʷen', u'oèn' : u'ʷen', u'oẻn' : u'ʷen', u'oẽn' : u'ʷen', u'oẹn' : u'ʷen', \t\r\n u'oet' : u'ʷet', u'oét' : u'ʷet', u'oèt' : u'ʷet', u'oẻt' : u'ʷet', u'oẽt' : u'ʷet', u'oẹt' : u'ʷet' \t\r\n\t\t\t\t}\r\n\r\nCus_onoffglides = { u'oe' : u'ɛj', u'oé' : u'ɛj', u'oè' : u'ɛj', u'oẻ' : u'ɛj', u'oẽ' : u'ɛj', u'oẹ' : u'ɛj', \r\n\t\t\t\t u'oai' : u'aj', u'oái' : u'aj', u'oài' : u'aj', u'oải' : u'aj', u'oãi' : u'aj', u'oại' : u'aj',\r\n\t\t\t\t u'oay' : u'ăj', u'oáy' : u'ăj', u'oày' : u'ăj', u'oảy' : u'ăj', u'oãy' : u'ăj', u'oạy' : u'ăj',\r\n\t\t\t\t u'oao' : u'aw', u'oáo' : u'aw', u'oào' : u'aw', u'oảo' : u'aw', u'oão' : u'aw', u'oạo' : u'aw',\r\n\t\t\t\t u'oeo' : u'ew', u'oéo' : u'ew', u'oèo' : u'ew', u'oẻo' : u'ew', u'oẽo' : u'ew', u'oẹo' : u'ew',\r\n\t\t\t\t u'oeo' : u'ew', u'óeo' : u'ew', u'òeo' : u'ew', u'ỏeo' : u'ew', u'õeo' : u'ew', u'ọeo' : u'ew',\r\n\t\t\t\t u'ueo' : u'ew', u'uéo' : u'ew', u'uèo' : u'ew', u'uẻo' : u'ew', u'uẽo' : u'ew', u'uẹo' : u'ew',\r\n\t\t\t\t u'uai' : u'aj', u'uái' : u'aj', u'uài' : u'aj', u'uải' : u'aj', u'uãi' : u'aj', u'uại' : u'aj',\r\n\t\t\t\t u'uay' : u'ăj', u'uáy' : u'ăj', u'uày' : u'ăj', u'uảy' : u'ăj', u'uãy' : u'ăj', u'uạy' : u'ăj',\r\n\t\t\t\t u'uây' : u'ɤ̆j', u'uấy' : u'ɤ̆j', u'uầy' : u'ɤ̆j', u'uẩy' : u'ɤ̆j', u'uẫy' : u'ɤ̆j', u'uậy' : u'ɤ̆j'\r\n\t\t\t\t }\r\n\r\nCus_codas = { u'p' : u'p', u't' : u't', u'c' : u'k', u'm' : u'm', u'n' : u'n', u'ng' : u'ŋ', u'nh' : u'ɲ', u'ch' : u'tʃ' }\r\n\r\nCus_tones_p = { u'á' : 5, u'à' : 2, u'ả' : 4, u'ã' : 3, u'ạ' : 6, \r\n\t\t\t\tu'ấ' : 5, u'ầ' : 2, u'ẩ' : 4, u'ẫ' : 3, u'ậ' : 6,\r\n\t\t\t\tu'ắ' : 5, u'ằ' : 2, u'ẳ' : 4, u'ẵ' : 3, u'ặ' : 6,\r\n\t\t\t\tu'é' : 5, u'è' : 2, u'ẻ' : 4, u'ẽ' : 3, u'ẹ' : 6,\r\n\t\t\t\tu'ế' : 5, u'ề' : 2, u'ể' : 4, u'ễ' : 3, u'ệ' : 6,\r\n\t\t\t\tu'í' : 5, u'ì' : 2, u'ỉ' : 4, u'ĩ' : 3, u'ị' : 6,\r\n\t\t\t\tu'ó' : 5, u'ò' : 2, u'ỏ' : 4, u'õ' : 3, u'ọ' : 6,\r\n\t\t\t\tu'ố' : 5, u'ồ' : 2, u'ổ' : 4, u'ỗ' : 3, u'ộ' : 6,\r\n\t\t\t\tu'ớ' : 5, u'ờ' : 2, u'ở' : 4, u'ỡ' : 3, u'ợ' : 6,\r\n\t\t\t\tu'ú' : 5, u'ù' : 2, u'ủ' : 4, u'ũ' : 3, u'ụ' : 6,\r\n\t\t\t\tu'ứ' : 5, u'ừ' : 2, u'ử' : 4, u'ữ' : 3, u'ự' : 6,\r\n\t\t\t\tu'ý' : 5, u'ỳ' : 2, u'ỷ' : 4, u'ỹ' : 3, u'ỵ' : 6,\r\n\t\t\t }\r\n\t\t\t \r\nCus_gi = { u'gi' : u'zi', u'gí': u'zi', u'gì' : u'zi', u'gì' : u'zi', u'gĩ' : u'zi', u'gị' : u'zi'}\r\n\r\nCus_qu = {u'quy' : u'kwi', u'qúy' : u'kwi', u'qùy' : u'kwi', u'qủy' : u'kwi', u'qũy' : u'kwi', u'qụy' : u'kwi'}\r\n\r\n\r\n################################################3\r\nimport sys, codecs, re\r\nfrom io import StringIO\r\nfrom optparse import OptionParser\r\nfrom string import punctuation\r\n#import prosodic as p\r\n\r\ndef trans(word, dialect, glottal, pham, cao, palatals):\r\n\r\n \r\n #Custom\r\n onsets, nuclei, codas, onglides, offglides, onoffglides, qu, gi = Cus_onsets, Cus_nuclei, Cus_codas, Cus_onglides, Cus_offglides, Cus_onoffglides, Cus_qu, Cus_gi\r\n\r\n\r\n\r\n if pham or cao:\r\n\r\n #Custom\r\n tones_p = Cus_tones_p\r\n\r\n\r\n tones = tones_p\r\n\r\n ons = ''\r\n nuc = ''\r\n cod = ''\r\n ton = 0\r\n oOffset = 0\r\n cOffset = 0 \r\n l = len(word)\r\n\r\n if l > 0:\r\n if word[0:3] in onsets: # if onset is 'ngh'\r\n ons = onsets[word[0:3]]\r\n oOffset = 3\r\n elif word[0:2] in onsets: # if onset is 'nh', 'gh', 'kʷ' etc\r\n ons = onsets[word[0:2]]\r\n oOffset = 2\r\n elif word[0] in onsets: # if single onset\r\n ons = onsets[word[0]]\r\n oOffset = 1\r\n\r\n if word[l-2:l] in codas: # if two-character coda\r\n cod = codas[word[l-2:l]]\r\n cOffset = 2\r\n elif word[l-1] in codas: # if one-character coda\r\n cod = codas[word[l-1]]\r\n cOffset = 1\r\n \r\n\r\n #if word[0:2] == u'gi' and cod and len(word) == 3: # if you just have 'gi' and a coda...\r\n if word[0:2] in gi and cod and len(word) == 3: # if you just have 'gi' and a coda...\r\n nucl = u'i'\r\n ons = u'z'\r\n else:\r\n nucl = word[oOffset:l-cOffset]\r\n\r\n if nucl in nuclei:\r\n if oOffset == 0:\r\n if glottal == 1:\r\n if word[0] not in onsets: # if there isn't an onset.... \r\n ons = u'ʔ'+nuclei[nucl] # add a glottal stop\r\n else: # otherwise...\r\n nuc = nuclei[nucl] # there's your nucleus \r\n else: \r\n nuc = nuclei[nucl] # there's your nucleus \r\n else: # otherwise...\r\n nuc = nuclei[nucl] # there's your nucleus\r\n \r\n elif nucl in onglides and ons != u'kw': # if there is an onglide...\r\n nuc = onglides[nucl] # modify the nuc accordingly\r\n if ons: # if there is an onset...\r\n ons = ons+u'w' # labialize it, but...\r\n else: # if there is no onset...\r\n ons = u'w' # add a labiovelar onset \r\n\r\n elif nucl in onglides and ons == u'kw': \r\n nuc = onglides[nucl]\r\n \r\n elif nucl in onoffglides:\r\n cod = onoffglides[nucl][-1]\r\n nuc = onoffglides[nucl][0:-1]\r\n if ons != u'kw':\r\n if ons:\r\n ons = ons+u'w'\r\n else:\r\n ons = u'w'\r\n elif nucl in offglides:\r\n cod = offglides[nucl][-1]\r\n nuc = offglides[nucl][:-1]\r\n \r\n elif word in gi: # if word == 'gi', 'gì',...\r\n ons = gi[word][0]\r\n nuc = gi[word][1]\r\n\r\n elif word in qu: # if word == 'quy', 'qúy',...\r\n ons = qu[word][:-1]\r\n nuc = qu[word][-1]\r\n \r\n else: \r\n # Something is non-Viet\r\n return (None, None, None, None)\r\n\r\n\r\n # Velar Fronting (Northern dialect)\r\n if dialect == 'n':\r\n if nuc == u'a':\r\n if cod == u'k' and cOffset == 2: nuc = u'ɛ'\r\n if cod == u'ɲ' and nuc == u'a': nuc = u'ɛ'\r\n\r\n # Final palatals (Northern dialect)\r\n if nuc not in [u'i', u'e', u'ɛ']:\r\n if cod == u'ɲ': \r\n cod = u'ɲ' # u'ŋ'\r\n elif palatals != 1 and nuc in [u'i', u'e', u'ɛ']:\r\n if cod == u'ɲ': \r\n cod = u'ɲ'#u'ŋ'\r\n if palatals == 1:\r\n if cod == u'k' and nuc in [u'i', u'e', u'ɛ']: \r\n cod = u'c'\r\n\r\n # Velar Fronting (Southern and Central dialects)\r\n else:\r\n if nuc in [u'i', u'e']:\r\n if cod == u'k': cod = u't'\r\n if cod == u'ŋ': cod = u'n'\r\n\r\n # There is also this reverse fronting, see Thompson 1965:94 ff.\r\n elif nuc in [u'iə', u'ɯə', u'uə', u'u', u'ɯ', u'ɤ', u'o', u'ɔ', u'ă', u'ɤ̆']:\r\n if cod == u't': \r\n cod = u'k'\r\n if cod == u'n': cod = u'ŋ'\r\n\r\n # Monophthongization (Southern dialects: Thompson 1965: 86; Hoàng 1985: 181)\r\n if dialect == 's':\r\n if cod in [u'm', u'p']:\r\n if nuc == u'iə': nuc = u'i'\r\n if nuc == u'uə': nuc = u'u'\r\n if nuc == u'ɯə': nuc = u'ɯ'\r\n\r\n # Tones \r\n # Modified 20 Sep 2008 to fix aberrant 33 error\r\n tonelist = [tones[word[i]] for i in range(0,l) if word[i] in tones]\r\n if tonelist:\r\n ton = str(tonelist[len(tonelist)-1])\r\n else:\r\n if not (pham or cao):\r\n if dialect == 'c':\r\n ton = str('35')\r\n else:\r\n ton = str('33')\r\n else:\r\n ton = str('1')\r\n \r\n # Modifications for closed syllables\r\n if cOffset !=0:\r\n\r\n # Obstruent-final nang tones are modal voice\r\n if (dialect == 'n' or dialect == 's') and ton == u'21g' and cod in ['p', 't', 'k']:\r\n #if ton == u'21\\u02C0' and cod in ['p', 't', 'k']: # fixed 8 Nov 2016\r\n ton = u'21'\r\n\r\n # Modification for sắc in closed syllables (Northern and Central only)\r\n if ((dialect == 'n' and ton == u'24') or (dialect == 'c' and ton == u'13')) and cod in ['p', 't', 'k']:\r\n ton = u'45'\r\n\r\n # Modification for 8-tone system\r\n if cao == 1:\r\n if ton == u'5' and cod in ['p', 't', 'k']:\r\n ton = u'5b'\r\n if ton == u'6' and cod in ['p', 't', 'k']:\r\n ton = u'6b'\r\n\r\n # labialized allophony (added 17.09.08)\r\n if nuc in [u'u', u'o', u'ɔ']:\r\n if cod == u'ŋ':\r\n cod = u'ŋ͡m' \r\n if cod == u'k':\r\n cod = u'k͡p'\r\n\r\n return (ons, nuc, cod, ton)\r\n \r\ndef convert(word, dialect, glottal, pham, cao, palatals, delimit):\r\n \"\"\"Convert a single orthographic string to IPA.\"\"\"\r\n\r\n ons = ''\r\n nuc = ''\r\n cod = ''\r\n ton = 0\r\n seq = ''\r\n\r\n try:\r\n (ons, nuc, cod, ton) = trans(word, dialect, glottal, pham, cao, palatals)\r\n if None in (ons, nuc, cod, ton):\r\n seq = u'['+word+u']'\r\n else:\r\n seq = delimit+delimit.join(filter(None, (ons, nuc, cod, ton)))+delimit\r\n except (TypeError):\r\n pass\r\n\r\n return seq\r\n \r\n\r\n\r\n########################333\r\nfrom vinorm import *\r\nfrom underthesea import word_tokenize\r\nimport eng_to_ipa\r\n\r\nsyms=['ɯəj', 'ɤ̆j', 'ʷiə', 'ɤ̆w', 'ɯəw', 'ʷet', 'iəw', 'uəj', 'ʷen', 'tʰw', 'ʷɤ̆', 'ʷiu', 'kwi', 'ŋ͡m', 'k͡p', 'cw', 'jw', 'uə', 'eə', 'bw', 'oj', 'ʷi', 'vw', 'ăw', 'ʈw', 'ʂw', 'aʊ', 'fw', 'ɛu', 'tʰ', 'tʃ', 'ɔɪ', 'xw', 'ʷɤ', 'ɤ̆', 'ŋw', 'ʊə', 'zi', 'ʷă', 'dw', 'eɪ', 'aɪ', 'ew', 'iə', 'ɣw', 'zw', 'ɯj', 'ʷɛ', 'ɯw', 'ɤj', 'ɔ:', 'əʊ', 'ʷa', 'mw', 'ɑ:', 'hw', 'ɔj', 'uj', 'lw', 'ɪə', 'ăj', 'u:', 'aw', 'ɛj', 'iw', 'aj', 'ɜ:', 'kw', 'nw', 't∫', 'ɲw', 'eo', 'sw', 'tw', 'ʐw', 'iɛ', 'ʷe', 'i:', 'ɯə', 'dʒ', 'ɲ', 'θ', 'ʌ', 'l', 'w', '1', 'ɪ', 'ɯ', 'd', '∫', 'p', 'ə', 'u', 'o', '3', 'ɣ', '!', 'ð', 'ʧ', '6', 'ʒ', 'ʐ', 'z', 'v', 'g', 'ă', '_', 'æ', 'ɤ', '2', 'ʤ', 'i', '.', 'ɒ', 'b', 'h', 'n', 'ʂ', 'ɔ', 'ɛ', 'k', 'm', '5', ' ', 'c', 'j', 'x', 'ʈ', ',', '4', 'ʊ', 's', 'ŋ', 'a', 'ʃ', '?', 'r', ':', 'η', 'f', ';', 'e', 't', \"'\"]\r\n\r\ndef normEng (eng,delemit):\r\n return \"\"\r\n'''\r\n x= p.Text(eng)\r\n x.parse()\r\n PAR = str(x.bestParses()[0]).split(\"|\")\r\n SYL = x.syllables()\r\n if len(PAR) != len(SYL):\r\n print(\"check dif len: \", eng)\r\n result=\"/\"+\"/\".join(list(eng))\r\n return result\r\n result = \"\"\r\n for i,syl in enumerate(SYL):\r\n syllable = str(syl).replace(\"'\",\"\").replace(\"ː\",\"\").replace(\"ɑ\",\"a\")\r\n if PAR[i].lower().upper() == PAR[i]:\r\n result+=syllable+\"'5\"+\" \"\r\n else:\r\n result+=syllable+\"'1\"+\" \"\r\n result=result.rstrip(\" \")\r\n if delemit !=\"\":\r\n takemore=\"\"\r\n for r in result:\r\n if r in syms:\r\n takemore+=delemit+r\r\n result=takemore\r\n return result\r\n'''\r\ndef Parsing(listParse, text, delimit):\r\n undefine_symbol = \"'\"\r\n if listParse == \"default\":\r\n listParse=['ɯəj', 'ɤ̆j', 'ʷiə', 'ɤ̆w', 'ɯəw', 'ʷet', 'iəw', 'uəj', 'ʷen', 'tʰw', 'ʷɤ̆', 'ʷiu', 'kwi', 'ŋ͡m', 'k͡p', 'cw', 'jw', 'uə', 'eə', 'bw', 'oj', 'ʷi', 'vw', 'ăw', 'ʈw', 'ʂw', 'aʊ', 'fw', 'ɛu', 'tʰ', 'tʃ', 'ɔɪ', 'xw', 'ʷɤ', 'ɤ̆', 'ŋw', 'ʊə', 'zi', 'ʷă', 'dw', 'eɪ', 'aɪ', 'ew', 'iə', 'ɣw', 'zw', 'ɯj', 'ʷɛ', 'ɯw', 'ɤj', 'ɔ:', 'əʊ', 'ʷa', 'mw', 'ɑ:', 'hw', 'ɔj', 'uj', 'lw', 'ɪə', 'ăj', 'u:', 'aw', 'ɛj', 'iw', 'aj', 'ɜ:', 'kw', 'nw', 't∫', 'ɲw', 'eo', 'sw', 'tw', 'ʐw', 'iɛ', 'ʷe', 'i:', 'ɯə', 'dʒ', 'ɲ', 'θ', 'ʌ', 'l', 'w', '1', 'ɪ', 'ɯ', 'd', '∫', 'p', 'ə', 'u', 'o', '3', 'ɣ', '!', 'ð', 'ʧ', '6', 'ʒ', 'ʐ', 'z', 'v', 'g', 'ă', '_', 'æ', 'ɤ', '2', 'ʤ', 'i', '.', 'ɒ', 'b', 'h', 'n', 'ʂ', 'ɔ', 'ɛ', 'k', 'm', '5', ' ', 'c', 'j', 'x', 'ʈ', ',', '4', 'ʊ', 's', 'ŋ', 'a', 'ʃ', '?', 'r', ':', 'η', 'f', ';', 'e', 't', \"'\"]\r\n listParse.sort(reverse = True,key=len)\r\n output=\"\"\r\n skip=0\r\n for ic,char in enumerate(text):\r\n ##print(char,skip)\r\n check = 0\r\n if skip>0:\r\n skip=skip-1\r\n continue\r\n for l in listParse:\r\n \r\n if len(l) <= len(text[ic:]) and l == text[ic:ic+len(l)]:\r\n output+=delimit+l\r\n check =1\r\n skip=len(l)-1\r\n break\r\n if check == 0:\r\n #Case symbol not in list\r\n if str(char) in [\"ˈ\",\"ˌ\",\"*\"]:\r\n continue\r\n #print(\"this is not in symbol :\"+ char+\":\")\r\n output+=delimit+undefine_symbol\r\n return output.rstrip()+delimit\r\n\r\ndef T2IPA_split(text,delimit):\r\n sys.path.append('./Rules') # make sure we can find the Rules files\r\n #Setup option\r\n glottal = 0\r\n pham = 0 \r\n cao = 0\r\n palatals = 0\r\n tokenize = 0\r\n dialect='n' #\"c\"\"s\"\r\n tone_type=0\r\n if tone_type==0:\r\n pham=1\r\n else:\r\n cao=1\r\n #Input text\r\n line = text\r\n if line =='\\n':\r\n return \"\"\r\n else:\r\n compound = u''\r\n ortho = u'' \r\n words = line.split()\r\n ## toss len==0 junk\r\n words = [word for word in words if len(word)>0]\r\n ## hack to get rid of single hyphens or underscores\r\n words = [word for word in words if word!=u'-']\r\n words = [word for word in words if word!=u'_']\r\n for i in range(0,len(words)):\r\n word = words[i].strip()\r\n ortho += word\r\n word = word.strip(punctuation).lower()\r\n ## 29.03.16: check if tokenize is true\r\n ## if true, call this routine for each substring\r\n ## and re-concatenate \r\n if (tokenize and '-' in word) or (tokenize and '_' in word):\r\n substrings = re.split(r'(_|-)', word)\r\n values = substrings[::2]\r\n delimiters = substrings[1::2] + ['']\r\n ipa = [convert(x, dialect, glottal, pham, cao, palatals, delimit).strip() for x in values]\r\n seq = ''.join(v+d for v,d in zip(ipa, delimiters))\r\n else:\r\n seq = convert(word, dialect, glottal, pham, cao, palatals, delimit).strip()\r\n # concatenate\r\n if len(words) >= 2:\r\n ortho += ' '\r\n if i < len(words)-1:\r\n seq = seq+u' '\r\n compound = compound + seq\r\n return compound\r\ndef T2IPA(text):\r\n sys.path.append('./Rules') # make sure we can find the Rules files\r\n #Setup option\r\n glottal = 0\r\n pham = 0 \r\n cao = 0\r\n palatals = 0\r\n tokenize = 0\r\n delimit = ''\r\n dialect='n' #\"c\"\"s\"\r\n tone_type=0\r\n if tone_type==0:\r\n pham=1\r\n else:\r\n cao=1\r\n #Input text\r\n line = text\r\n if line =='\\n':\r\n return \"\"\r\n else:\r\n compound = u''\r\n ortho = u'' \r\n words = line.split()\r\n ## toss len==0 junk\r\n words = [word for word in words if len(word)>0]\r\n ## hack to get rid of single hyphens or underscores\r\n words = [word for word in words if word!=u'-']\r\n words = [word for word in words if word!=u'_']\r\n for i in range(0,len(words)):\r\n word = words[i].strip()\r\n ortho += word\r\n word = word.strip(punctuation).lower()\r\n ## 29.03.16: check if tokenize is true\r\n ## if true, call this routine for each substring\r\n ## and re-concatenate \r\n if (tokenize and '-' in word) or (tokenize and '_' in word):\r\n substrings = re.split(r'(_|-)', word)\r\n values = substrings[::2]\r\n delimiters = substrings[1::2] + ['']\r\n ipa = [convert(x, dialect, glottal, pham, cao, palatals, delimit).strip() for x in values]\r\n seq = ''.join(v+d for v,d in zip(ipa, delimiters))\r\n else:\r\n seq = convert(word, dialect, glottal, pham, cao, palatals, delimit).strip()\r\n # concatenate\r\n if len(words) >= 2:\r\n ortho += ' '\r\n if i < len(words)-1:\r\n seq = seq+u' '\r\n compound = compound + seq\r\n return compound\r\n\r\nEN={\"a\":\"ây\",\"ă\":\"á\",\"â\":\"ớ\",\"b\":\"bi\",\"c\":\"si\",\"d\":\"đi\",\"đ\":\"đê\",\"e\":\"i\",\"ê\":\"ê\",\"f\":\"ép\",\"g\":\"giy\",\"h\":\"ếch\",\"i\":\"ai\",\"j\":\"giây\",\"k\":\"cây\",\"l\":\"eo\",\"m\":\"em\",\"n\":\"en\",\"o\":\"âu\",\"ô\":\"ô\",\"ơ\":\"ơ\",\"p\":\"pi\",\"q\":\"kiu\",\"r\":\"a\",\"s\":\"ét\",\"t\":\"ti\",\"u\":\"diu\",\"ư\":\"ư\",\"v\":\"vi\",\"w\":\"đắp liu\",\"x\":\"ít\",\"y\":\"quai\",\"z\":\"giét\"}\r\nimport re\r\ndef vi2IPA_split(texts,delimit):\r\n content=[]\r\n with open(imp.find_module('viphoneme')[1]+\"/Popular.txt\",encoding=\"utf-8\") as f:\r\n content=f.read().splitlines()\r\n tess = texts.split(\".\")\r\n Results =\"\"\r\n for text in tess:\r\n #print(\"------------------------------------------------------\")\r\n TN= TTSnorm(text)\r\n #TN=text\r\n #print(\"------------------------------------------------------\")\r\n #print(\"Text normalize: \",TN)\r\n TK= word_tokenize(TN)\r\n #print(\"Vietnamese Tokenize: \",TK)\r\n\r\n \r\n for iuv,under_valid in enumerate(TK):\r\n token_under=under_valid.split(\" \")\r\n checkinvalid=0\r\n ##print(token_under)\r\n if len(token_under) >1:\r\n for tok in token_under:\r\n if tok not in content or \"[\" in T2IPA(tok):\r\n checkinvalid=1\r\n if checkinvalid==1:\r\n TK = TK[:iuv] + TK[iuv+1 :]\r\n for tok in reversed(token_under):\r\n TK.insert(iuv, tok)\r\n\r\n IPA=\"\"\r\n\r\n for tk in TK:\r\n ipa = T2IPA_split(tk,delimit).replace(\" \",\"_\")\r\n if ipa ==\"\":\r\n IPA+=delimit+tk+delimit+\" \"\r\n elif ipa[0]==\"[\" and ipa[-1]==\"]\":\r\n eng = eng_to_ipa.convert(tk)\r\n if eng[-1] == \"*\":\r\n if tk.lower().upper() == tk:\r\n ##print(\"ENGLISH\",tk)\r\n #Đọc tiếng anh từng chữ\r\n letter2sound=\"\"\r\n for char in tk:\r\n CHAR = str(char).lower()\r\n if CHAR in list(EN.keys()):\r\n letter2sound+=EN[CHAR]+\" \"\r\n else:\r\n letter2sound+=char+\" \"\r\n IPA+=T2IPA_split(letter2sound,delimit)+\" \"\r\n else:\r\n #Giữ nguyên\r\n #Future: test experiment\" Nếu từ unknow có thể dùng eng_norm để chuyển qua thay thế chứ không cần giữ nguyên như này\r\n IPA+=Parsing(\"default\",tk.lower(),delimit)+\" \"\r\n else:\r\n #This use for version english not splited by syllable\r\n #IPA+=Parsing(\"default\",eng,delimit)+\" \"\r\n #This version will split english to each syllable\r\n IPA+=normEng(tk,delimit)+ delimit+\" \"\r\n\r\n\r\n #Check tu dien tieng anh Etrain bưc\r\n #Neu co Mapping\r\n #Neu khong, check co nguyen am\r\n #Neu co de nguyen\r\n #Neu khong danh van\r\n #print(\" ..................Out of domain word: \" ,ipa)\r\n else:\r\n IPA+=ipa+\" \"\r\n IPA=re.sub(delimit+'+', delimit, IPA)\r\n IPA=re.sub(' +', ' ', IPA)\r\n #print(\"IPA Vietnamese: \",IPA)\r\n #print(\"------------------------------------------------------\")\r\n Results+= IPA.rstrip()+\" \"+delimit+\".\"+delimit+\" \"\r\n\r\n \r\n return Results.rstrip()\r\ndef vi2IPA(text):\r\n #print(\"------------------------------------------------------\")\r\n TN= TTSnorm(text)\r\n #print(\"------------------------------------------------------\")\r\n #print(\"Text normalize: \",TN)\r\n TK= word_tokenize(TN)\r\n #print(\"Vietnamese Tokenize: \",TK)\r\n IPA=\"\"\r\n for tk in TK:\r\n ipa = T2IPA(tk).replace(\" \",\"_\")\r\n if ipa ==\"\":\r\n IPA+=tk+\" \"\r\n elif ipa[0]==\"[\" and ipa[-1]==\"]\":\r\n eng = eng_to_ipa.convert(tk)\r\n if eng[-1] == \"*\":\r\n if tk.lower().upper() == tk:\r\n #Đọc tiếng anh từng chữ\r\n letter2sound=\"\"\r\n for char in tk:\r\n CHAR = str(char).lower()\r\n if CHAR in list(EN.keys()):\r\n letter2sound+=EN[CHAR]+\" \"\r\n else:\r\n letter2sound+=char+\" \"\r\n IPA+=T2IPA_split(letter2sound,\"\")+\" \"\r\n else:\r\n #Giữ nguyên\r\n IPA+=Parsing(\"default\",tk,\"\")+\" \"\r\n else:\r\n IPA+=eng+\" \"\r\n #Check tu dien tieng anh Etrain bưc\r\n #Neu co Mapping\r\n #Neu khong, check co nguyen am\r\n #Neu co de nguyen\r\n #Neu khong danh van\r\n #print(\" ..................Out of domain word: \" ,ipa)\r\n else:\r\n IPA+=ipa+\" \"\r\n IPA=re.sub(' +', ' ', IPA)\r\n #print(\"IPA Vietnamese: \",IPA)\r\n #print(\"------------------------------------------------------\")\r\n return IPA\r\n","sub_path":"viphoneme/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":30519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"301219340","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport pickle\r\n\r\n\r\ndef count_parameters(trained_vars):\r\n total_parameters = 0\r\n print('=' * 100)\r\n for variable in trained_vars:\r\n variable_parameters = 1\r\n for dim in variable.get_shape():\r\n variable_parameters *= dim.value\r\n print('{:70} {:20} params'.format(variable.name, variable_parameters))\r\n print('-' * 100)\r\n total_parameters += variable_parameters\r\n print('=' * 100)\r\n print(\"total trainable parameters: %d\" % total_parameters)\r\n print('=' * 100)\r\n\r\n\r\ndef load_vocab(vocab_file):\r\n print('loading vocabulary ...')\r\n with open(vocab_file, 'rb') as f:\r\n word_dict = pickle.load(f)\r\n print('vocab size = %d' % len(word_dict))\r\n return word_dict\r\n\r\n\r\ndef vectorize(docs):\r\n sequence_lengths = [len(doc) for doc in docs]\r\n max_sequence_length = np.max(sequence_lengths)\r\n padded_docs = np.zeros(shape=[len(docs), max_sequence_length], dtype=np.int32)\r\n padded_docs_mask = np.zeros(shape=[len(docs), max_sequence_length], dtype=np.float32)\r\n for i, doc in enumerate(docs):\r\n padded_docs[i, :sequence_lengths[i]] = doc\r\n padded_docs_mask[i, :sequence_lengths[i]] = 1.0\r\n\r\n return padded_docs, padded_docs_mask, sequence_lengths, max_sequence_length\r\n\r\n\r\ndef load_glove(glove_file, embedding_size, vocab):\r\n print('loading glove pre-trained word embeddings ...')\r\n embedding_weights = {}\r\n f = open(glove_file, encoding='utf-8')\r\n for line in f:\r\n values = line.split()\r\n word = values[0]\r\n vector = np.asarray(values[1:], dtype='float32')\r\n embedding_weights[word] = vector\r\n f.close()\r\n print('total {} word vectors in {}'.format(len(embedding_weights), glove_file))\r\n\r\n embedding_matrix = np.random.uniform(-0.5, 0.5, (len(vocab), embedding_size)) / embedding_size\r\n\r\n oov_count = 0\r\n for word, i in vocab.items():\r\n embedding_vector = embedding_weights.get(word)\r\n if embedding_vector is not None:\r\n embedding_matrix[i] = embedding_vector\r\n else:\r\n oov_count += 1\r\n print('number of OOV words = %d' % oov_count)\r\n\r\n return embedding_matrix\r\n","sub_path":"fasttext/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"186235222","text":"import os\nimport sys\nimport json\nimport shutil\n\n\nclass Build:\n def __init__(\n self,\n build_type='develop', rebuild=False,\n download_model=True, model_path='../model_weights',\n develop=False,\n gitee=False\n ):\n \"\"\"\n build_type: what goes after python setup.py\n download_model: weather you wantt o downloadt he model\n model_path: where you want to store the downloaded models\n develop: weather you want to install packages for jobs other than inferencing\n \"\"\"\n self.python_executable = sys.executable\n self.cwd = os.path.split(__file__)[0]\n os.chdir(self.cwd)\n self.opt = {\n 'build_type': build_type,\n 'develop': develop,\n 'download_model': download_model,\n 'model_path': os.path.abspath(model_path),\n 'gitee': gitee,\n 'rebuild': rebuild\n }\n # Make sure building tools are there\n self.pip_install(['pip', 'setuptools', 'wheel'])\n # Packages that all algorithms will use\n self.pip_install(['numpy', 'opencv-python', 'Pillow', 'torch', 'torchvision'])\n # Create folder for model\n if download_model:\n os.makedirs(self.opt['model_path'], exist_ok=True)\n\n @staticmethod # Internal\n def terms_to_delete(path: str):\n return ['dist', 'build', [fil for fil in os.listdir(path) if fil[-9:] == '.egg-info'][0]]\n\n # Internal\n def pip_install(self, libs: list):\n if isinstance(libs, str):\n libs = [libs]\n if libs:\n os.system(f\"{self.python_executable} -m pip install -U {' '.join(libs)}\")\n\n def download_from_google_drive(self, link, save_dir):\n try:\n from utils.download import download_file_from_google_drive\n except ModuleNotFoundError:\n self.pip_install(['requests', 'tqdm'])\n from utils.download import download_file_from_google_drive\n download_file_from_google_drive(link, save_dir)\n\n # Algorithms\n def BasicSR(self, cuda_extensions=None, **kwargs):\n # Resolve options\n opt = self.opt\n opt.update(kwargs)\n if cuda_extensions is None:\n import torch\n cuda_extensions = True if torch.cuda.is_available() else False\n # Install packages\n self.pip_install(['addict', 'future', 'lmdb', 'pyyaml', 'requests', 'scikit-image', 'scipy', 'tb-nightly', 'tqdm', 'yapf'])\n if opt['develop']:\n pass\n os.chdir('../third_party')\n if os.path.exists('BasicSR') and opt['rebuild']:\n shutil.rmtree('BasicSR')\n os.system(f\"git clone https://{'gitee' if opt['gitee'] else 'github'}.com/xinntao/BasicSR.git\")\n os.chdir('BasicSR')\n os.system(f\"{self.python_executable} setup.py {opt['build_type']}{'' if cuda_extensions else ' --no_cuda_ext'}\")\n # Download models\n links = {\n 'EDVR': [\n ('1LGhWdzAIu818_IDptIUBGCBJoE11jQLk', 'official_L_deblur_REDS.pth'),\n ('1eEWNZCCL17cf-G4yKF65rjV8Yy4eXnwM', 'official_L_deblurcomp_REDS.pth'),\n ('1C6tFY8CjjLaGqpPddWRrgqRThNVd9DZD', 'official_L_x4_SR_REDS.pth'),\n ('1ehwhFsVG8WCJ5tTfJRCpYzexPrB-ru5e', 'official_L_x4_SR_Vimeo90K.pth'),\n ('1WUwcPvp6rHrgxgfUtByfoosVUZ7w0i-N', 'official_L_x4_SRblur_REDS.pth'),\n ('1ddnMOCu87T_WbUFNvY0yihs44cViHvoY', 'official_M_woTSA_x4_SR_REDS.pth'),\n ('1scZpjI0iMRXdNSklR5j5Ei3mXbzIES9r', 'official_M_x4_SR_REDS.pth')\n ],\n 'esrgan': [\n ('1ZZUHpIHdK2WijNiiV_QyFooJV9SuEgk1', 'official_ESRGAN_x4_old_arch.pth'),\n ('1AIyRcdAHj4l-pwTfUHaSoOgy2L2uuphN', 'official_ESRGAN_x4.pth'),\n ('1r9CEwpWaBQvFjuEJk7J8rDP9cnuOwhgK', 'official_PSNR_SRx4_DF2K.pth'),\n ('1l48p8GCErCrg_p3zFBNCjJ7Jc21eP-vb', 'official_PSNR_x4_old_arch.pth'),\n ('1SWZDffT4iZJ3ufsPBSbIRcPCcTGbM3vw', 'official_PSNR_x4.pth'),\n ('1qSSyzbxnnRgH11DGEXpcrSma2fCLRXfK', 'official_SR_x4_DF2KOST.pth')\n ]\n }\n # Resolve download_model\n opt['download_model'] = links.keys() if opt['download_model'] == True else []\n for a in opt['download_model']:\n if a in links.keys():\n os.makedirs(f'{opt[\"model_path\"]}/{a}', exist_ok=True)\n for link in links[a]:\n self.download_from_google_drive(link[0], f'{opt[\"model_path\"]}/{a}/{link[1]}')\n os.chdir(self.cwd)\n\n def SSM(self, **kwargs):\n # Resolve options\n opt = self.opt\n opt.update(kwargs)\n if opt['develop']:\n self.pip_install(['click', 'tensorboardX'])\n if opt['download_model']:\n os.makedirs(f'{opt[\"model_path\"]}/SSM', exist_ok=True)\n self.download_from_google_drive('10cOGtYTheDg2rF3geLtOUYvYyMjfCUct', f'{opt[\"model_path\"]}/SSM/official.pth')\n\n def DAIN_all_in_one(self, cc=None, **kwargs):\n # Resolve options\n opt = self.opt\n opt.update(kwargs)\n if cc is None:\n import torch\n cc = ['%d%d' % torch.cuda.get_device_capability()]\n if opt['develop']:\n self.pip_install(['bisect'])\n os.chdir('vfin/dain')\n # Write compiler args\n nvcc_args = []\n for cc_ in cc:\n nvcc_args.append('-gencode')\n nvcc_args.append(f'arch=compute_{cc_},code=sm_{cc_}')\n nvcc_args.append('-w')\n with open('compiler_args.json', 'w') as f:\n json.dump({\n 'develop': opt['develop'],\n 'extra_compile_args': {'nvcc': nvcc_args, 'cxx': ['-std=c++14', '-w']}\n }, f)\n print(f'Compiling for compute compatibility {cc}')\n # Compile\n # DAIN's package\n os.system(f'{self.python_executable} setup.py {opt[\"build_type\"]}')\n \"\"\"\n os.chdir('my_package')\n packages = ['DepthFlowProjection', 'FilterInterpolation', 'FlowProjection']\n if opt['develop']:\n packages.extend(['InterpolationCh', 'SeparableConv', 'SeparableConvFlow', 'MinDepthFlowProjection', 'Interpolation'])\n for folder in packages:\n os.chdir(f\"{'' if folder == packages[0] else '../'}{folder}\")\n os.system(f'{self.python_executable} setup.py {opt[\"build_type\"]}')\n if opt['build_type'] == 'install':\n for file_to_delete in self.terms_to_delete('.'):\n shutil.rmtree(file_to_delete)\n # PWCNet\n os.chdir('../../PWCNet/correlation_package_pytorch1_0')\n os.system(f'{self.python_executable} setup.py {opt[\"build_type\"]}')\n if opt['build_type'] == 'install':\n for file_to_delete in self.terms_to_delete('.'):\n shutil.rmtree(file_to_delete)\n os.chdir('../..')\n \"\"\"\n if opt['build_type'] == 'install':\n for file_to_delete in self.terms_to_delete('.'):\n shutil.rmtree(file_to_delete)\n os.remove('compiler_args.json')\n os.chdir(self.cwd)\n # Download model\n if opt['download_model']:\n os.makedirs(f'{opt[\"model_path\"]}/DAIN', exist_ok=True)\n self.download_from_google_drive('1r-gVVu6oxCSZyBij4d4tPtssifGZlG5X', f'{opt[\"model_path\"]}/DAIN/dain_app_experimental.pth')\n self.download_from_google_drive('1vxRb52qyJt3J_AJzzA1LiEdfPEyf9bXf', f'{opt[\"model_path\"]}/DAIN/official.pth')\n\n def DAIN(self, cc=None, **kwargs):\n # Resolve options\n opt = self.opt\n opt.update(kwargs)\n if cc is None:\n import torch\n cc = ['%d%d' % torch.cuda.get_device_capability()]\n if opt['develop']:\n self.pip_install(['bisect'])\n os.chdir('vfin/dain')\n # Write compiler args\n nvcc_args = []\n for cc_ in cc:\n nvcc_args.append('-gencode')\n nvcc_args.append(f'arch=compute_{cc_},code=sm_{cc_}')\n nvcc_args.append('-w')\n with open('compiler_args.json', 'w') as f:\n json.dump({'nvcc': nvcc_args, 'cxx': ['-std=c++14', '-w']}, f)\n print(f'Compiling for compute compatibility {cc}')\n # Compile\n # DAIN's package\n os.chdir('my_package')\n packages = ['DepthFlowProjection', 'FilterInterpolation', 'FlowProjection']\n if opt['develop']:\n packages.extend(['InterpolationCh', 'SeparableConv', 'SeparableConvFlow', 'MinDepthFlowProjection', 'Interpolation'])\n for folder in packages:\n os.chdir(f\"{'' if folder == packages[0] else '../'}{folder}\")\n os.system(f'{self.python_executable} setup.py {opt[\"build_type\"]}')\n if opt['build_type'] == 'install':\n for file_to_delete in self.terms_to_delete('.'):\n shutil.rmtree(file_to_delete)\n # PWCNet\n os.chdir('../../PWCNet/correlation_package_pytorch1_0')\n os.system(f'{self.python_executable} setup.py {opt[\"build_type\"]}')\n if opt['build_type'] == 'install':\n for file_to_delete in self.terms_to_delete('.'):\n shutil.rmtree(file_to_delete)\n os.chdir('../..')\n os.remove('compiler_args.json')\n os.chdir(self.cwd)\n # Download model\n if opt['download_model']:\n os.makedirs(f'{opt[\"model_path\"]}/DAIN', exist_ok=True)\n self.download_from_google_drive('1r-gVVu6oxCSZyBij4d4tPtssifGZlG5X', f'{opt[\"model_path\"]}/DAIN/dain_app_experimental.pth')\n self.download_from_google_drive('1vxRb52qyJt3J_AJzzA1LiEdfPEyf9bXf', f'{opt[\"model_path\"]}/DAIN/official.pth')\n\n def DeOldify(self, download_model=True):\n os.chdir('plugins')\n os.system(f'{self.python_executable} -m pip install ')\n if download_model:\n pass\n","sub_path":"vrt/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":9882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"361138313","text":"# training/utilities/nlpAPI.py - utility calls to the NLP back-end API\n#\n# Copyright © Mirinae Corp., John Wainwright 2020\n#\nimport json\nimport http.client, urllib.parse\n\ndef extractVerbPhrase(sourceText, options=None): # 'endingForm'):\n \"call the NLP API to extract sentence-final verb-phrase and attendant form data from given sourceText\"\n\n # set up the call\n if options is None:\n options = dict(phraseForm='verbPhrase')\n body = json.dumps(dict(sourceText=sourceText, options=options))\n\n headers = {\"Content-Type\": \"application/json; charset=utf-8\",\n \"Accept\": \"application/json; charset=utf-8\",\n \"Cache-Control\": \"no-cache\",\n \"Content-Length\": str(len(body))\n }\n try:\n conn = http.client.HTTPSConnection(\"alpha.mirinae.io\")\n # local test: conn = http.client.HTTPConnection(\"localhost:2000\")\n conn.request(\"POST\", \"/api/nlp/extractverbphrase\", body, headers)\n response = conn.getresponse()\n except:\n # server down?\n return dict(success=False, error=\"Server not responding\")\n #\n if response.status != 200:\n failReason = response.reason\n return dict(success=False, status=response.status, error=response.reason)\n else:\n try:\n data = response.read()\n return json.loads(data.decode('utf-8'))\n except:\n return dict(success=False, error=\"Illegal JSON response\")\n\nif __name__ == \"__main__\":\n\n # test it\n result = extractVerbPhrase(\"나는 자전거를 탈 수 있을 것이야. 나는 배가 고파.\")\n print(result)","sub_path":"pipeline/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"545648627","text":"#from pylab import *\nimport numpy as np\nfrom vgg_rq import vgg_rq\n\n#VGG_KR_FROM_P Extract K, R from camera matrix.\n#\n# [K,R,t] = VGG_KR_FROM_P(P [,noscale]) finds K, R, t such that P = K*R*[eye(3) -t].\n# It is det(R)==1.\n# K is scaled so that K[2,2] == 1 and K[0,0 ] > 0. Optional parameter noscale prevents this.\n#\n# Works also generally for any P of size N-by-(N+1).\n# Works also for P of size N-by-N, then t is not computed.\n\n\n# Author: Andrew Fitzgibbon \n# Modified by werner.\n# Date: 15 May 98\n\ndef vgg_KR_from_P(P, noscale=False):\n \n N = np.shape(P)[0]\n H = P[:, :N]\n \n K, R = vgg_rq(H)\n \n if not noscale:\n K = K / K[N-1, N-1]\n if K[0,0] < 0:\n D = np.diag(np.hstack((np.array([-1,-1]), np.ones(N-2))))\n K = np.dot(K, D)\n R = np.dot(D, R)\n \n t = np.linalg.lstsq(-P[:,0:N], P[:, -1], rcond=None)[0]\n \n return K, R, t","sub_path":"exercise6/exercise6/Python/vgg_KR_from_P.py","file_name":"vgg_KR_from_P.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"98316503","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 12 20:04:17 2021\n\n@author: amandaseger\n\"\"\"\nfrom AdvertisingEnvironment.BiddingEnvironment import *\nfrom AdvertisingEnvironment.Learner import *\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import RBF, ConstantKernel as C\n\nclass GPTS_Learner(Learner):\n def __init__(self, n_arms, arms):\n super().__init__(n_arms)\n self.arms = arms\n self.means = np.zeros(n_arms)\n self.sigmas = np.ones(n_arms) * 10\n self.pulled_arms = []\n alpha = 10.0\n kernel = C(1.0, (1e-3, 1e3)) * RBF(1.0, (1e-3, 1e3))\n self.gp = GaussianProcessRegressor(kernel=kernel, alpha = alpha**2, normalize_y=True, n_restarts_optimizer=10)\n\n \n def update_observations(self, pulled_arm, reward):\n super().update_observations(pulled_arm, reward)\n self.pulled_arms.append(self.arms[pulled_arm])\n \n def update_model(self):\n x = np.atleast_2d(self.pulled_arms).T\n y = self.collected_rewards\n self.gp.fit(x,y)\n \n x_pred = np.atleast_2d(self.arms).T\n self.means, self.sigmas = self.gp.predict(x_pred, return_std=True)\n self.sigmas = np.maximum(self.sigmas, 1e-2)\n \n def update(self, pulled_arm, reward):\n self.t += 1\n self.update_observations(pulled_arm, reward)\n self.update_model()\n \n \n def pull_arm(self):\n idx = np.argmax(np.random.normal(self.means, self.sigmas))\n return idx ","sub_path":"PricingAdvertising/AdvertisingEnvironment/GPTS_Learner.py","file_name":"GPTS_Learner.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"528071037","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 24 01:49:00 2019\r\n\r\n@author: User\r\n\"\"\"\r\n\r\nfrom neuralStellar import *\r\nfrom datetime import datetime\r\nimport os\r\n\r\ntime_now=datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")\r\nfolder_name='Hin_gridNN_outputs_'+time_now\r\nos.mkdir(folder_name)\r\n\r\nfile='grid_0_0.csv'\r\nsmall_grid=stellarGrid(file)\r\nsmall_grid.buildIndex()\r\nsmall_grid.popIndex(['','star_mass','star_age','star_feh','star_MLT','effective_T','luminosity','delta_nu'],\r\n proper=['step','mass','age','feh','MLT','Teff','L','delnu'])\r\nsmall_grid.initialData()\r\n\r\nin_dex=['mass','age','feh','MLT']\r\nout_dex=['L','Teff','delnu']\r\nx_in=small_grid.fetchData('evo',in_dex)\r\ny_out=small_grid.fetchData('evo',out_dex)\r\nx_in, y_out=shuffleInputs(x_in,y_out)\r\nm1=NNmodel('evo',in_dex, out_dex)\r\nm1.buildModel([len(x_in),len(y_out)], 8, 128, reg=['l2',0.0001])\r\nm1.compileModel(0.001,'MAE',metrics=['MAE','MSE'], beta_1=0.9999, beta_2=0.999)\r\nm1.fitModel(x_in, y_out, 500000, len(x_in[0]),folder_name+'/small_grid_model.h5', keep_log=False)\r\nm1.saveHist(folder_name+'/trainHistoryDict')","sub_path":"Hin's_files/GPU_runs/small_grid_28.py","file_name":"small_grid_28.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"164742458","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom data.data_loader import data_loader, DataLoader, Utterance\nfrom satisfaction import configuration\nfrom satisfaction.feature_extracter import Feature_Extracter\nfrom satisfaction.experimenter import Experimenter\nimport numpy as np\nimport os\n\n__author__ = \"Rui Meng\"\n__email__ = \"rui.meng@pitt.edu\"\n\nif __name__ == '__main__':\n # initialize\n config = configuration.load_config()\n extractor = Feature_Extracter(config)\n exp = Experimenter(config)\n\n best_results = []\n # iterate each dataset\n for data_name in config['data_names']:\n config.param['data_name'] = data_name\n\n config.logger.info('*' * 50)\n config.logger.info('-' * 20 + data_name + '-' * 20)\n config.logger.info('*' * 50)\n # initialize data_loader\n loader = data_loader(data_name, {'config': config})\n\n # load raw and annotated data\n all_sessions = loader()\n session_ids, annotated_sessions = loader.load_annotated_data()\n loader.stats()\n\n # train and test\n X_raw, Y = extractor.split_to_instances(annotated_sessions)\n X = extractor.extract()\n result = exp.run_cross_validation(X, Y)\n\n # find the best classifier (with best F1-score)\n result = result[np.asarray(result).T[4].argmax()]\n result[0] = data_name + ' - ' + result[0]\n best_results.append(result)\n\n exp.export_summary(best_results, os.path.join(config.param['experiment_path'], 'summary_of_each_dataset.csv'))","sub_path":"dialogue/deprecated/satisfaction/entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"406010156","text":"import re\nfrom sortedcontainers import SortedSet, SortedDict\n\n\n\ndef main():\n\n\tsetup()\n\ndef setup():\n\n\t# Dictionary to store word as the key and value as the id.\n\tdict = {}\n\n\t# Array to store filename\n\tfileArr = [v for v in range(44)]\n\n\t\n\t\n\t# Iterate through every file in the file array and populate the dictionary\n\tfor file in fileArr:\n\n\t\tpopulateMainDictionary(str(file), dict)\n\n\t# Dictionary to store ids as the key and a set as value which contains the occurences of that id(word). \n\t# A sorted dictionary stored the keys in the sorted format, so there is no need to explicitly sort the dictionary.\n\tdict1=SortedDict()\n\n\t\n\t# Iterate through every file in the file array and populate the second dictionary\n\tfor fileName in range(len(fileArr)):\n\t\tpopulateSecondDictionary(dict, fileName, dict1)\n\t\t\n\t\n\t# Function to get the inverted index for a given word. It returns a list of files in which the given word exists.\n\tgetInvertedIndex(\"all\",dict, dict1)\n\tgetInvertedIndex(\"Shakespeare\", dict, dict1)\n\n\t\n\t\n\n# Function to populate the second dictionary\ndef populateSecondDictionary(dict, fileName, dict1):\n\n\t\n\t# Open the file to process the contents of the file\n\twith open(str(fileName), 'r') as f:\n\n\t\tfor line in f:\n\n\t\t\t# Process every word in the line\n\t\t\tfor word in line.split():\n\n\t\t\t\t# Regex to remove any punctuations\n\t\t\t\tcleanString = re.sub('\\W+','', word)\n\n\t\t\t\t# If the word is present in the main dictionary, then get its value and check if the value is present in the second dictionary\n\t\t\t\tif cleanString in dict:\n \t\t\t\t\tvalue = dict[cleanString]\n \t\t\t\t\t# If the value is not present in the second dictionary, then create a sorted set and add the filename to it. \n \t\t\t\t\t# A sorted set makes sure that there are no duplicates and the items in the set are already ordered, so there is no need to explicitly sort it.\n \t\t\t\t\tif value not in dict1:\n \t\t\t\t\t\ts = SortedSet()\n \t\t\t\t\t\ts.add(fileName)\n \t\t\t\t\t\tdict1[value]=s\n \t\t\t\t\telse:\n \t\t\t\t\t\tdict1[value].add(fileName)\n\t\n \t\t\t\t\t\n# Function which populates the first dictionary \ndef populateMainDictionary(file, dict):\n\n\t# Initialize the id to -1\n\tindex = -1\n\n\t\n\t# Open the file to process the contents of the file\n\twith open(file,'r') as f: \n\t\tfor line in f:\n\t\t\t# Process every word in the line\n \t\t\tfor word in line.split():\n \t\t\t\t# Regex to remove any punctuations\n \t\t\t\tcleanString = re.sub('\\W+','', word)\n \t\t\t\tif cleanString not in dict:\n \t\t\t\t\tindex=index+1\n \t\t\t\t\tdict[cleanString]=index\n\n\n# Function which prints the files in which the given word exists\ndef getInvertedIndex(word, dict, dict1):\n\n\t# If the word is present in the main dictionary, then get the value for it and use the value to get the occurences of that word from the second dictionary.\n\tif word in dict:\n\t\tval = dict[word]\n\n\t\tresult = dict1[val]\n\t\tprint(result)\t\n\telse:\n\t\tprint('Given word does not exist!')\n\n\n# Run the program\nmain()\n","sub_path":"assignment2.py","file_name":"assignment2.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"493258192","text":"import requests\nimport time\nimport requests\n\npage = requests.get(\"http://www.nhm.ac.uk/discover/the-cannibals-of-goughs-cave.html\")\nprint(page.status_code)\n\nsoup = bs(page.content, 'html.parser')\n\nfor i in soup.find_all(\"div\", class_=\"article--container\"):\n i_descendants = i.descendants\n for d in i_descendants:\n if d.name in ['h1', 'h2', 'p']:\n print(d.text)\n\ndef get_tfidf(vect):\n tf = TfidfVectorizer(input='content', analyzer='word', ngram_range=(1, 1),\n min_df=0, stop_words='english', sublinear_tf=True)\n tfidf_matrix = tf.fit_transform(vect)\n print(tfidf_matrix)\n feature_array = tf.get_feature_names()\n\n doc = 2\n feature_index = tfidf_matrix[doc, :].nonzero()[1]\n print(feature_index)\n tfidf_scores = zip(feature_index, [tfidf_matrix[doc, x] for x in feature_index])\n\n for w, s in [(feature_array[i], s) for (i, s) in tfidf_scores]:\n print(w, s)\n","sub_path":"spike/exploratorysoup.py","file_name":"exploratorysoup.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"207820507","text":"# %%\nimport pickle\nfrom pybtex.database import parse_file\nfrom utils import parse_entry\n\n# %%\nBIB_FILE = \"references.bib\"\nbib = parse_file(BIB_FILE, \"bibtex\")\n\nQUEUE_FILE = \"queue.txt\"\ntry:\n with open(QUEUE_FILE, \"rb\") as fp:\n queue = pickle.load(fp)\nexcept FileNotFoundError:\n queue = []\n\n# %%\nwith open(\"README.md\", \"a\") as f:\n for entry in bib.entries.values():\n\n if entry.key not in queue:\n queue.append(entry.key)\n md_str = parse_entry(entry)\n f.write(md_str + \"\\n\" + \"\\n\")\n\n# %%\nwith open(QUEUE_FILE, \"wb\") as fp:\n pickle.dump(queue, fp)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"160938705","text":"import bs4\r\nimport requests\r\nimport re\r\nimport urllib.request, urllib.error\r\nimport os\r\nimport argparse\r\nimport sys\r\nimport json\r\n\r\n\r\ndef get_soup(url,header):\r\n\treturn bs4.BeautifulSoup(urllib.request.urlopen(urllib.request.Request(url,headers=header)),'html.parser')\r\n\r\ndef main(args):\r\n\tparser = argparse.ArgumentParser(description = 'Options for scraping Google images')\r\n\tparser.add_argument('-s', '--search', type = str, help = 'search term')\r\n\targs = parser.parse_args()\r\n\r\n\tquery = args.search.split()\r\n\tquery = '+'.join(query)\r\n\tmax_images = 100\r\n\r\n\tsave_directory = \"PenguinImages\" + '/' + query\r\n\tsave_directory = urllib.parse.unquote(save_directory)\t#URLのクエリをパース\r\n\tif not os.path.exists(save_directory):\r\n\t\tos.makedirs(save_directory)\r\n\r\n\t# スクレーピング\r\n\turl = \"https://www.google.co.jp/search?q=\" + query + \"&source=lnms&tbm=isch\"\r\n\theader = {'User-Agent':\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36\"}\r\n\tsoup = get_soup(url,header)\r\n\tActualImages = []\r\n\r\n\tfor a in soup.find_all(\"div\", {\"class\":\"rg_meta\"}):\r\n\t\tlink, Type = json.loads(a.text)[\"ou\"] ,json.loads(a.text)[\"ity\"]\r\n\t\tActualImages.append((link,Type))\r\n\tfor i, (img, Type) in enumerate(ActualImages[0:max_images]):\r\n\t\ttry:\r\n\t\t\tType = Type if len(Type) > 0 else 'jpg'\r\n\t\t\tprint(\"Downloading image {} ({}), type is {}\".format(i, img, Type))\r\n\t\t\traw_img = urllib.request.urlopen(img).read()\r\n\t\t\tf = open(os.path.join(save_directory, \"img_\" + str(i) + \".\" + Type), 'wb')\r\n\t\t\tf.write(raw_img)\r\n\t\t\tf.close()\r\n\t\texcept Exception as e:\r\n\t\t\tprint (\"could not load : \" + img)\r\n\t\t\tprint (e)\r\n\r\nif __name__ == '__main__':\r\n\tfrom sys import argv\r\n\ttry:\r\n\t\tmain(argv)\r\n\texcept KeyboardInterrupt:\r\n\t\tpass\r\n\tsys.exit()","sub_path":"image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"281428037","text":"import datetime,time\nimport argparse\ninitstr=input(\"input initial text:\")\nparser =argparse.ArgumentParser()\nparser.add_argument(\"first\",help=\"Dispay string which have to changed\",type=str)\nparser.add_argument(\"second\",help=\"Dispay string which have to changed\",type=str)\n\nargument=parser.parse_args()\nfirst_text=argument.first\nsecond_text=argument.second\nprint(\"The given text:\",initstr)\nprint(\"First word:\",first_text)\nprint(\"Second word:\",second_text)\n\nprint(\"output:\",initstr.replace(first_text,second_text))","sub_path":"Lecture2/week2/homework/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"532641234","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/buildutils/test/test_compat.py\n# Compiled at: 2007-08-08 19:57:13\n\"\"\"Unit tests for the buildutils.compat package.\"\"\"\n\ndef test_string_template():\n from buildutils.compat.string_template import Template\n actual = Template('hello ${who}').substitute({'who': 'world'})\n expected = 'hello world'\n assert actual == expected","sub_path":"pycfiles/buildutils-0.3-py2.4/test_compat.py","file_name":"test_compat.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"51909350","text":"import os\nimport cv2\nimport glob\nimport torch\nimport imageio\nimport numpy as np\nimport pandas as pd\nimport seaborn as sn\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix\nfrom scipy.io import wavfile\nfrom PIL import Image\n\ndef set_lr(optimizer, lrs):\n\tif(len(lrs) == 1):\n\t\tfor param in optimizer.param_groups:\n\t\t\tparam['lr'] = lrs[0]\n\telse:\n\t\tfor i, param in enumerate(optimizer.param_groups):\n\t\t\tparam['lr'] = lrs[i]\n\ndef get_lr(optimizer):\n\toptim_param_groups = optimizer.param_groups\n\tif(len(optim_param_groups) == 1):\n\t\treturn optim_param_groups[0]['lr']\n\telse:\n\t\tlrs = []\n\t\tfor param in optim_param_groups:\n\t\t\tlrs.append(param['lr'])\n\t\treturn lrs\n\ndef histogram_sizes(img_dir, h_lim = None, w_lim = None):\n\ths, ws = [], []\n\tfor file in glob.iglob(os.path.join(img_dir, '**/*.*')):\n\t\ttry:\n\t\t\twith Image.open(file) as im:\n\t\t\t\th, w = im.size\n\t\t\t\ths.append(h)\n\t\t\t\tws.append(w)\n\t\texcept:\n\t\t\tprint('Not an Image file')\n\n\tif(h_lim is not None and w_lim is not None):\n\t\ths = [h for h in hs if h5)\nnum=14\nprint(Solution().isUgly(num))\n","sub_path":"UglyNumber.py","file_name":"UglyNumber.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"455663416","text":"import pygame\nfrom settings import *\n\n\n# Основной класс блока\nclass Block(pygame.sprite.Sprite):\n def __init__(self, x = 0, y = 0):\n pygame.sprite.Sprite.__init__(self)\n \n self.image = pygame.Surface([BLOCK_WIDTH, BLOCK_HEIGHT])\n \n # Заполнение блока\n self.image.fill(BLACK)\n \n # Координаты блока\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y \n \n # Тип блока\n self.blockType = \"MainBlock\" # Название блока\n self.pervious = False # Проницаемость блока\n\n\n\nclass Bonus(Block):\n def __init__(self, x = 0, y = 0):\n Block.__init__(self, x, y)\n \n self.image.fill(ORANGE)\n \n self.blockType = \"BonusSpeed\" # Название блока\n self.pervious = True # Проницаемость блока \n \n \nclass BulletSpeed(Block):\n def __init__(self, x = 0, y = 0):\n Block.__init__(self, x, y)\n \n self.image.fill(GREEN)\n \n self.blockType = \"BulletsSpeed\" # Название блока\n self.pervious = True # Проницаемость блока \n \nclass BulletFreq(Block):\n def __init__(self, x = 0, y = 0):\n Block.__init__(self, x, y)\n \n self.image.fill(PURPLE)\n \n self.blockType = \"BulletsFreq\" # Название блока\n self.pervious = True # Проницаемость блока ","sub_path":"block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"437923956","text":"from JumpScale import j\n\nOsisBaseObject=j.core.osis.getOsisBaseObjectClass()\n\nclass Grid(OsisBaseObject):\n\n \"\"\"\n identifies the grid\n \"\"\"\n\n def __init__(self, ddict={}, name=\"\", id=0, useavahi=1):\n if ddict != {}:\n self.load(ddict)\n else:\n self.name = name\n self.useavahi = useavahi\n self.nid=0\n self.id=id\n self.guid=id \n\n def initFromLocalNodeInfo(self):\n \"\"\"\n get ipaddr info & gid & nid from local config\n \"\"\"\n self.ipaddr=[item for item in j.system.net.getIpAddresses() if item !=\"127.0.0.1\"]\n self.id= j.application.config.getInt(\"gridmaster.grid.id\")\n\n if not j.application.config.exists(\"grid.node.id\"):\n #register the own masternode to the grid\n ays = j.atyourservice.get(\"jumpscale\", \"grid_node\")\n ays.configure()\n if j.application.config.getInt(\"grid.node.id\")==0:\n raise RuntimeError(\"grid nid cannot be 0\")\n\n self.nid=j.application.config.getInt(\"grid.node.id\")\n\n def getUniqueKey(self):\n \"\"\"\n return unique key for object, is used to define unique id\n \"\"\"\n return self.id\n\n def getSetGuid(self):\n \"\"\"\n use osis to define & set unique guid (sometimes also id)\n \"\"\"\n self.guid = int(self.id)\n self.id = int(self.id)\n return self.guid\n\n","sub_path":"apps/osis/logic/system/grid/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"213601188","text":"tablica = [\"x\"] * 100\nodpowiedzi = []\nfor i in range(1,101):\n for j in range(i,101,i):\n if tablica[j-1] == \"x\":\n tablica[j-1] = \"o\"\n else:\n tablica[j-1] = \"x\"\n\nfor index, wartosc in enumerate(tablica):\n if wartosc == \"o\":\n odpowiedzi.append(index+1)\nprint (\"Following doors are open: \"+str(odpowiedzi).strip('[]'))\n","sub_path":"doors.py","file_name":"doors.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"643390562","text":"'''\nGiven a string S and a string T, find the minimum window in S which will contain all the characters in T in complexity O(n).\n\nFor example,\nS = \"ADOBECODEBANC\"\nT = \"ABC\"\nMinimum window is \"BANC\".\n\nNote:\nIf there is no such window in S that covers all characters in T, return the empty string \"\".\n\nIf there are multiple such windows, you are guaranteed that there will always be only one unique minimum window in S.\n'''\n\nclass Solution(object):\n def minWindow(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: str\n \"\"\"\n M = len(s)\n N = len(t)\n if M < N:\n return \"\"\n res = '0' * (M + 1)\n tdic = {}\n for x in t:\n if x in tdic:\n tdic[x] += 1\n else:\n tdic[x] = 1\n for i in range(N):\n if s[i] in tdic:\n tdic[s[i]] -= 1\n idx1 = 0\n idx2 = N\n while idx2 - idx1 >= N and idx2 <= M:\n if self.satisfied(tdic):\n res = res if len(res) <= idx2 - idx1 else s[idx1:idx2]\n if s[idx1] in tdic:\n tdic[s[idx1]] += 1\n idx1 += 1\n continue\n if idx2 < M:\n if s[idx2] in tdic:\n tdic[s[idx2]] -= 1\n idx2 += 1\n if len(res) > M:\n return \"\"\n return res\n\n def satisfied(self, tdic):\n for k, v in tdic.items():\n if v > 0:\n return False\n return True\n\ns = Solution()\nres = s.minWindow(\"ADOBECODEBANC\", \"ABC\")\nprint(res)\n\n'''\n滑动窗口法\n'''","sub_path":"array_list/minimum_window_substring.py","file_name":"minimum_window_substring.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"373235201","text":"\"\"\"\nAR_SelectSameColor\n\nAuthor: Arttu Rautio (aturtur)\nWebsite: http://aturtur.com/\nName-US: AR_SelectSameColor\nVersion: 1.0.1\nDescription-US: Selects object(s) with same object color that active object has\n\nWritten for Maxon Cinema 4D R25.010\nPython version 3.9.1\n\nChange log:\n1.0.1 (18.08.2022) - R25 support\n\"\"\"\n# Libraries\nimport c4d\n\n# Functions\ndef GetNextObject(op):\n if op==None:\n return None\n if op.GetDown():\n return op.GetDown()\n while not op.GetNext() and op.GetUp():\n op = op.GetUp()\n return op.GetNext()\n \ndef IterateHierarchy(op, color):\n doc = c4d.documents.GetActiveDocument() # Get active Cinema 4D document\n if op is None:\n return\n while op:\n if op[c4d.ID_BASEOBJECT_COLOR] == color: # If object color is same as reference color\n op.SetBit(c4d.BIT_ACTIVE) # Select object\n doc.AddUndo(c4d.UNDOTYPE_CHANGE, op) # Add undo command for selecting object\n op = GetNextObject(op) # Get next object\n return True\n\ndef main():\n doc = c4d.documents.GetActiveDocument() # Get active Cinema 4D document\n doc.StartUndo() # Start recording undos\n try: # Try to execute following script\n active_object = doc.GetActiveObject() # Get active object\n reference_color = active_object[c4d.ID_BASEOBJECT_COLOR] # Object color\n start_object = doc.GetFirstObject() # Get first object\n IterateHierarchy(start_object, reference_color) # Do the thing\n except: # If something went wrong\n pass # Do nothing\n doc.EndUndo() # Stop recording undos\n c4d.EventAdd() # Refresh Cinema 4D\n \n# Execute main()\nif __name__=='__main__':\n main()","sub_path":"AR_Scripts_1.74/Object Manager/AR_SelectSameColor.py","file_name":"AR_SelectSameColor.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"130845642","text":"# Exercício Python 096: Faça um programa que tenha uma função chamada área(), que receba as dimensões \n# de um terreno retangular (largura e comprimento) e mostre a área do terreno.\n# https://youtu.be/oV1s53YGtvE\n\n\nprint(f'{\"PARÂMETROS TERRENOS\":^30}')\nprint('-'*30)\n\ndef area(larg, comp):\n a = larg * comp\n print(f'A ÁREA DO TERRENO É DE {larg} x {comp}: {a:.1f} M²')\n\n\narea(float(input('LARGURA: ')),\n float(input('COMPRIMENTO: '))\n )\n","sub_path":"EXERCICIOS/#096 - Função que calcula área.py","file_name":"#096 - Função que calcula área.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"319205542","text":"n = int(input())\nc0 = 0\nc1 = 0\nc2 = 0\nc3 = 0\nfor i in range(n):\n s = input()\n if s == \"AC\":\n c0 += 1\n elif s == \"WA\":\n c1 += 1\n elif s == \"TLE\":\n c2 += 1\n elif s == \"RE\":\n c3 += 1\n else:\n exit()\n\nprint(f\"AC x {c0}\")\nprint(f\"WA x {c1}\")\nprint(f\"TLE x {c2}\")\nprint(f\"RE x {c3}\")\n","sub_path":"Python_codes/p02613/s065802632.py","file_name":"s065802632.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"378199155","text":"#!/usr/bin/env python\n\nimport cx_Oracle\n\ntnsname = cx_Oracle.makedsn('10.65.10.247', '61521', 'test1')\nconn = cx_Oracle.connect('chbase', 'Lnyd*132', tnsname)\nc = conn.cursor()\n\n\nresult = c.executemany('select * from chbase.bs_static_data')\n\nprint(result)\n\n# 关闭游标\nc.close()\n# 关闭DB连接\nconn.close()\n","sub_path":"src/com/dao/DaoBsStaticData.py","file_name":"DaoBsStaticData.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"545041319","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n__author__ = 'Sasha'\n\nfrom lxml import etree\n\nclass Container:\n\n def make_data(self,list,stype):\n\n Mainroot = etree.Element('datalist')\n\n for element in list:\n\n root = etree.Element('data')\n type = etree.SubElement(root,'type')\n type.text = stype\n plannumber = etree.SubElement(root,'plannumber')\n #Номер плана граффика\n\n plannumber.text = element.get('PlanNumber')\n\n listElement = etree.Element('list')\n #Перебор данных заявки\n for i in element.get('Data'):\n if i.get('Lable')=='' or i.get('Value')=='':\n continue\n line = etree.Element('line')\n etree.SubElement(line,'Lable').text = i.get('Lable')\n datatype = i.get('Type')\n etree.SubElement(line,'Type').text = datatype\n\n if datatype == 'table':\n self.makeTableData(line,i.get('Value'))\n else:\n etree.SubElement(line,'Value').text = i.get('Value')\n\n\n listElement.append(line)\n\n root.append(listElement)\n Mainroot.append(root)\n\n handle = etree.tostring(Mainroot, pretty_print=True, encoding='utf-8', xml_declaration=True)\n\n f = open('data.txt', 'w')\n f.write(handle)\n return handle\n\n def makeTableData(self,line,data):\n\n Value = etree.SubElement(line,'Value')\n for dataline in data:\n newline = etree.Element('line')\n etree.SubElement(newline,'Lable').text = dataline.get('Lable')\n if type(dataline.get('Value')) == type([]):\n self.makeTableData(newline,dataline.get('Value'))\n else:\n etree.SubElement(newline,'Value').text = dataline.get('Value')\n Value.append(newline)\n","sub_path":"container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"44473601","text":"from Internet import GoodsList\nfrom Internet import EntpList\nfrom Internet import checkDoc\nhost = \"smtp.gmail.com\" # Gmail STMP 서버 주소.\nport = \"587\"\n\ndef MakeHtmlDoc():\n print(\"리스트를 받아옵니다\")\n from xml.dom.minidom import getDOMImplementation\n # DOM 개체를 생성합니다.\n impl = getDOMImplementation()\n \n newdoc = impl.createDocument(None, \"html\", None) # HTML 최상위 엘리먼트를 생성합니다.\n top_element = newdoc.documentElement\n header = newdoc.createElement('header')\n top_element.appendChild(header)\n\n # Body 엘리먼트 생성\n body = newdoc.createElement('body')\n\n for item in GoodsList:\n # Bold 엘리먼트를 생성합니다.\n b = newdoc.createElement('b')\n # 텍스트 노드를 만듭니다.\n ibsnText = newdoc.createTextNode(\"goodName:\" + item)\n b.appendChild(ibsnText)\n\n body.appendChild(b)\n \n #
부분을 생성합니다.\n br = newdoc.createElement('br')\n\n body.appendChild(br)\n\n # title 부분을 생성합니다.\n #p = newdoc.createElement('p')\n # 텍스트 노드를 만듭니다.\n #titleText= newdoc.createTextNode(\"Good:\" + item[1])\n #p.appendChild(titleText)\n\n #body.appendChild(p)\n #body.appendChild(br) #
부분을 부모 에릴먼트에 추가합니다.\n \n # Body 엘리먼트를 최상위 엘리먼트에 추가시킵니다.\n top_element.appendChild(body)\n \n return newdoc.toxml()\n \ndef MakeHtmlDoc2():\n print(\"리스트를 받아옵니다\")\n from xml.dom.minidom import getDOMImplementation\n # DOM 개체를 생성합니다.\n impl = getDOMImplementation()\n \n newdoc = impl.createDocument(None, \"html\", None) # HTML 최상위 엘리먼트를 생성합니다.\n top_element = newdoc.documentElement\n header = newdoc.createElement('header')\n top_element.appendChild(header)\n\n # Body 엘리먼트 생성\n body = newdoc.createElement('body')\n\n for item in EntpList:\n # Bold 엘리먼트를 생성합니다.\n b = newdoc.createElement('b')\n # 텍스트 노드를 만듭니다.\n ibsnText = newdoc.createTextNode(\"entpName:\" + item)\n b.appendChild(ibsnText)\n\n body.appendChild(b)\n \n #
부분을 생성합니다.\n br = newdoc.createElement('br')\n\n body.appendChild(br)\n\n # title 부분을 생성합니다.\n #p = newdoc.createElement('p')\n # 텍스트 노드를 만듭니다.\n #titleText= newdoc.createTextNode(\"Good:\" + item[1])\n #p.appendChild(titleText)\n\n #body.appendChild(p)\n #body.appendChild(br) #
부분을 부모 에릴먼트에 추가합니다.\n \n # Body 엘리먼트를 최상위 엘리먼트에 추가시킵니다.\n top_element.appendChild(body)\n \n return newdoc.toxml()\n\ndef sendMail():\n if not checkDoc():\n return None\n \n global host, port\n html = \"\"\n html2 = \"\"\n title = \"생필품 가격 조회 서비스\"\n \n senderAddr = \"kjw955486@gmail.com\" \n recipientAddr = \"kjw8576@naver.com\" #str(input ('recipient email address :'))\n #msgtext = str(input ('write message :'))\n # passwd = \"wlsdn9450\"\n cmd = input(\"상품명을 전송하려면 a를, 업체명을 전송하려면 b를 누르세요 : \")\n \n if cmd == 'a':\n html = MakeHtmlDoc()\n else:\n html2 = MakeHtmlDoc2()\n \n import mysmtplib\n # MIMEMultipart의 MIME을 생성합니다.\n from email.mime.multipart import MIMEMultipart\n from email.mime.text import MIMEText\n \n #Message container를 생성합니다.\n msg = MIMEMultipart('alternative')\n\n #set message\n msg['Subject'] = title\n msg['From'] = senderAddr\n msg['To'] = recipientAddr\n \n \n \n #msgPart = MIMEText(msgtext, 'plain')\n if cmd == 'a':\n bookPart = MIMEText(html, 'html', _charset = 'UTF-8')\n else:\n bookPart2 = MIMEText(html2, 'html', _charset = 'UTF-8')\n \n # 메세지에 생성한 MIME 문서를 첨부합니다.\n \n #msg.attach(msgPart)\n if cmd == 'a':\n msg.attach(bookPart)\n else:\n msg.attach(bookPart2)\n \n print (\"connect smtp server ... \")\n s = mysmtplib.MySMTP(host,port)\n #s.set_debuglevel(1)\n s.ehlo()\n s.starttls()\n s.ehlo()\n s.login(\"kjw955486@gmail.com\", \"wlsdn9450\") # 로긴을 합니다. \n s.sendmail(senderAddr , [recipientAddr], msg.as_string())\n s.close()\n \n print (\"Mail sending complete!!!\")","sub_path":"code/dist/PriceInfo-1.0/Gmail.py","file_name":"Gmail.py","file_ext":"py","file_size_in_byte":4565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"275184014","text":"def binario_decimal(binario):\n b=list(binario)\n b.reverse()\n resultado=0\n n=0\n for i in b:\n resultado+=int(i)*(2**n)\n n+=1\n return resultado\n\n\n\ndef decodificar(mensaje):\n b=mensaje.split(\",\")\n final=''\n for i in b:\n final+=chr(binario_decimal(i))\n return final\n\nprint(decodificar('01101000,01101111,01101100,01100001'))\n","sub_path":"tema9_ej3/tema9_ej3_15638162.py","file_name":"tema9_ej3_15638162.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"175236889","text":"\"\"\" launch_experiments.py.\n\n Code to train other models from config files\n\n Inspired by Onconet with original source:\n https://github.com/yala/OncoNet_Public/blob/master/scripts/dispatcher.py\n\n Usage:\n python scripts/launch_experiments.py --python-file-name training/train_model.py --experiment-config-file static/train_config.py --log-dir results/test/\n python scripts/launch_experiments.py --python-file-name training/train_model.py --experiment-config-file static/train_config.py --log-dir results/test/ --sbatch-script scripts/slurm_scripts/slurm_train_fastas.sh\n\"\"\"\nimport sys\nimport os\nimport argparse\nimport json\nimport multiprocessing\nimport subprocess\nfrom typing import Tuple\nfrom enzpred.utils import launcher_utils\n\n# Constants\nRESULTS_PATH_APPEAR_ERR = \"save_dir should not appear in config. It will be determined automatically per job\"\n\n\ndef get_args():\n \"\"\" Get arguments \"\"\"\n options = argparse.ArgumentParser()\n # Name of python file to run\n options.add_argument('--python-file-name',\n action=\"store\",\n help=\"Name of python script to run!\")\n options.add_argument('--experiment-config-file',\n action=\"store\",\n help=\"Json config file\")\n options.add_argument('--log-dir',\n action=\"store\",\n help=\"Where to store results\")\n options.add_argument(\"--sbatch-script\",\n action=\"store\",\n default=None,\n help=\"Path to the sbatch script to launch programs\")\n options.add_argument(\"--use-gpu\",\n action=\"store\",\n default=None,\n help=\"Use this flag if launching on gpu\")\n\n return options.parse_args()\n\n\n## Multiprocessing\n\n\ndef launch_experiment(log_dir: str, python_file_name: str, gpu: int,\n flag_string: str) -> Tuple[str, str]:\n \"\"\"launch_experiments.\n\n Launch an experiment and direct logs and results to a unique filepath.\n Alert of something goes wrong.\n\n Args:\n log_dir (str): Directory for the log of the results\n python_file_name (str): Name of the python file passed\n gpu (int): gpu to run this machine on.\n flag_string (str): flags to use for this model run. Will be fed into\n scripts/main.py\n\n Returns:\n Tuple[str,str]: results_path, log_path\n\n \"\"\"\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n\n log_name = launcher_utils.md5(flag_string)\n log_stem = log_name\n\n results_path = os.path.join(log_dir, \"{}\".format(log_stem))\n log_path = os.path.join(log_dir, \"{}.txt\".format(log_stem))\n\n if gpu:\n experiment_string = \"CUDA_VISIBLE_DEVICES={} python -u {} {} --out {}\".format(\n gpu, python_file_name, flag_string,\n os.path.join(results_path, \"out\"))\n else:\n experiment_string = \"python -u {} {} --out {}\".format(\n python_file_name, flag_string, os.path.join(results_path, \"out\"))\n\n # Redirect both stdout and output to a file for now\n shell_cmd = \"{} > {} 2>&1\".format(experiment_string, log_path)\n\n if not os.path.exists(results_path):\n print(\"Launched exp: {}\".format(shell_cmd))\n os.makedirs(results_path)\n subprocess.call(shell_cmd, shell=True)\n else:\n print(\"ERROR LAUNCHING; dir {} exists\".format(results_path))\n\n return results_path, log_path\n\n\ndef worker(log_dir: str, python_file_name: str, gpu: int,\n job_queue: multiprocessing.Queue, done_queue: multiprocessing.Queue):\n \"\"\"worker.\n\n Worker thread for each gpu. Consumes all jobs and pushes results to done_queue.\n\n Args:\n log_dir (str): Directory for the log of the results\n python_file_name (str): Name of the python file passed\n gpu (int): Gpu this worker can access\n job_queue (multiprocessing.Queue): Queue of available jobs\n done_queue (multiprocessing.Queue): Queue where to push resulst\n \"\"\"\n while not job_queue.empty():\n params = job_queue.get()\n if params is None:\n return\n done_queue.put(launch_experiment(log_dir, python_file_name, gpu, params))\n\n\ndef multiprocessing_launch(log_dir: str, python_file_name: str,\n experiment_config_file: str, use_gpu: bool,\n **kwargs):\n \"\"\"multiprocessing_launch.\n\n Use this to launch jobs on multiprocessing\n\n Args:\n log_dir (str): log_dir\n python_file_name (str): python_file_name\n experiment_config_file (str): experiment_config_file\n use_gpu (bool): use_gpu\n kwargs: Absorb slurm script arg\n \"\"\"\n\n experiment_config = json.load(open(experiment_config_file, 'r'))\n\n if 'save_dir' in experiment_config['search_space']:\n print(RESULTS_PATH_APPEAR_ERR)\n sys.exit(1)\n\n job_list, experiment_axes = launcher_utils.parse_dispatcher_config(\n experiment_config)\n\n # For multiprocessing:\n job_queue = multiprocessing.Queue()\n done_queue = multiprocessing.Queue()\n\n # Add jobs to the queue\n for job in job_list:\n job_queue.put(job)\n\n if use_gpu:\n print(\"Launching Dispatcher with {} jobs!\".format(len(job_list)))\n for gpu in experiment_config['available_gpus']:\n print(\"Start gpu worker {}\".format(gpu))\n multiprocessing.Process(target=worker,\n args=(log_dir, python_file_name, gpu, job_queue,\n done_queue)).start()\n\n else:\n print(\"Launching Dispatcher with {} jobs!\".format(len(job_list)))\n multiprocessing.Process(target=worker,\n args=(log_dir, python_file_name, None, job_queue,\n done_queue)).start()\n\n\n## Slurm\ndef slurm_launch(log_dir: str, python_file_name: str, experiment_config_file: str,\n use_gpu: bool, sbatch_script: str, **kwargs):\n \"\"\"slurm_launch.\n\n Use this to launch on slurm\n\n Args:\n log_dir (str): log_dir\n python_file_name (str): python_file_name\n experiment_config_file (str): experiment_config_file\n use_gpu (bool): use_gpu\n sbatch_script (str): sbatch_script\n kwargs: kwargs\n \"\"\"\n\n experiment_config = json.load(open(experiment_config_file, 'r'))\n if 'save_dir' in experiment_config['search_space']:\n print(RESULTS_PATH_APPEAR_ERR)\n sys.exit(1)\n\n job_list, experiment_axies = launcher_utils.parse_dispatcher_config(\n experiment_config)\n\n for flag_string in job_list:\n if not os.path.isdir(log_dir):\n os.makedirs(log_dir)\n\n log_stem = launcher_utils.md5(flag_string)\n results_path = os.path.join(log_dir, \"{}\".format(log_stem))\n\n # Useful for slurm?\n log_path = os.path.join(log_dir, \"{}.txt\".format(log_stem))\n shell_cmd = \"python -u {} {} --out {}\".format(\n python_file_name, flag_string, os.path.join(results_path, \"out\"))\n\n # Run in sbatch\n if sbatch_script is not None:\n shell_cmd = (\"sbatch --export=CMD=\\\"{}\\\" {}\".format(\n shell_cmd, sbatch_script))\n\n if not os.path.exists(results_path):\n os.makedirs(results_path)\n\n subprocess.call(shell_cmd, shell=True)\n else:\n raise Exception(\n \"Path to this results file {} already exists\".format(\n results_path))\n\n print(\"Launched exp: {}\\n\".format(shell_cmd))\n\n\nif __name__ == \"__main__\":\n args = vars(get_args())\n\n if args['sbatch_script'] is not None:\n slurm_launch(**args)\n else:\n multiprocessing_launch(**args)\n","sub_path":"{{ cookiecutter.repo_name }}/{{ cookiecutter.repo_name }}/scripts/launch_experiments.py","file_name":"launch_experiments.py","file_ext":"py","file_size_in_byte":7800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"142337437","text":"\"\"\"\nFunctional test\n\nFast Job Epic\n\nStoryboard is defined within the comments of the program itself\n\"\"\"\n\nimport unittest\nfrom flask import url_for\nfrom biblib.tests.stubdata.stub_data import UserShop, LibraryShop\nfrom biblib.tests.base import MockSolrQueryService, TestCaseDatabase, MockSolrBigqueryService, MockEndPoint\nimport json\n\nclass TestJobFastEpic(TestCaseDatabase):\n \"\"\"\n Base class used to test the Job Fast Epic\n \"\"\"\n\n def test_job_fast_epic(self):\n \"\"\"\n Carries out the epic 'Fast Job', where a user wants to add their articles to\n their private libraries so that they can send it on to a prospective\n employer\n\n :return: no return\n \"\"\"\n\n # Mary creates a private library and\n # 1. Gives it a name.\n # 2. Gives it a description.\n # 3. Makes it public to view.\n\n # Stub data\n user_mary = UserShop()\n user_random = UserShop()\n stub_library = LibraryShop(want_bibcode=True, public=True)\n\n self.assertIs(list, type(stub_library.get_bibcodes()))\n self.assertIs(list, type(stub_library.user_view_post_data['bibcode']))\n\n # Make the library and make it public to be viewed by employers\n url = url_for('userview')\n response = self.client.post(\n url,\n data=stub_library.user_view_post_data_json,\n headers=user_mary.headers\n )\n library_id = response.json['id']\n self.assertEqual(response.status_code, 200, response)\n self.assertTrue('bibcode' in response.json)\n self.assertTrue(response.json['name'] == stub_library.name)\n\n # She then asks a friend to check the link, and it works fine.\n url = url_for('libraryview', library=library_id)\n with MockSolrBigqueryService(\n canonical_bibcode=stub_library.bibcode) as BQ, \\\n MockEndPoint([user_mary]) as EP:\n response = self.client.get(\n url,\n headers=user_random.headers\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json['documents']),\n len(stub_library.bibcode))\n\n # Accidentally tries to add the same bibcodes, but it does not work as\n # expected\n url = url_for('documentview', library=library_id)\n with MockSolrQueryService(canonical_bibcode = json.loads(stub_library.document_view_post_data_json('add')).get('bibcode')) as SQ:\n response = self.client.post(\n url,\n data=stub_library.document_view_post_data_json('add'),\n headers=user_mary.headers\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json['number_added'], 0)\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"biblib/tests/functional_tests/test_job_fast_epic.py","file_name":"test_job_fast_epic.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"224059876","text":"from django.shortcuts import render, redirect\nfrom django.core.urlresolvers import reverse\nimport random\n\ndef index(request):\n if 'word' not in request.session:\n return redirect(reverse('random_word_gen:create'))\n else:\n return render(request, 'random_word_gen/index.html')\n\ndef create(request):\n # initializes the number of random words generated\n if 'count' not in request.session:\n request.session['count'] = 1\n else:\n request.session['count'] += 1\n char = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',\n 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n word = ''\n # grabs an index referencing a specific letter in the char list and\n # concatenates that letter with the rest of the word\n for num in range(14):\n index = random.randrange(26)\n word += char[index]\n request.session['word'] = word\n return redirect(reverse('random_word_gen:index'))\n","sub_path":"Django/multiple_apps_main/apps/random_word_gen/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"16842918","text":"# -*- coding: utf-8 -*-QS\nimport os\nimport sys\nimport spintax\nimport time\nfrom termcolor import colored\n\nfrom telethon.sync import TelegramClient\nfrom telethon.tl.functions.channels import JoinChannelRequest\nfrom telethon.tl.functions.photos import UploadProfilePhotoRequest\nfrom telethon.tl.functions.photos import DeletePhotosRequest\nfrom telethon.tl.functions.account import UpdateProfileRequest\nfrom telethon.tl.functions.account import UpdateUsernameRequest\n\nos.system('cls')\n\n\nacc = 1\napi_id = 1\napi_hash = '1'\n\ngroup = '@some_group'\n\n\ndef names():\n try:\n file = open(file='names.txt', mode='r', encoding='utf-8')\n names_list = file.read().splitlines()\n global names\n names = names_list.pop()\n except Exception as e:\n print(colored(f'Read file names.txt error: , {e}, file will close in 5 seconds!', 'red'))\n time.sleep(5)\n sys.exit()\n\n\ndef log():\n account = f'account{acc}'\n client = TelegramClient(account, api_id, api_hash)\n client.connect()\n if not client.is_user_authorized():\n print(colored(f'{account} is not authorized', 'red'))\n else:\n name = spintax.spin(names)\n print(name)\n print(account)\n try:\n client(UpdateProfileRequest(first_name=name))\n print(colored('Username changed successfully!', 'green'))\n\n except Exception as e:\n print(colored('Name substitution error', 'red'), e)\n try:\n client(UpdateProfileRequest(last_name=''))\n print(colored('Surname changed successfully!', 'green'))\n except Exception as e:\n print(colored('Surname substitution error:', 'red'), e)\n try:\n client(UpdateProfileRequest(about=''))\n print(colored('Bio deleted successfully!', 'green'))\n except Exception as e:\n print(colored('Bio deleted error:', 'red'), e)\n try:\n client(DeletePhotosRequest(client.get_profile_photos('me')))\n print(colored('Photos deleted successfully!', 'red'))\n except Exception as e:\n print(colored('Photos deleted error:', 'red'), e)\n pass\n try:\n client(UploadProfilePhotoRequest(\n client.upload_file(f'.\\\\photos\\\\photo{acc}.jpg'))\n )\n print(colored('Photo changed successfully!', 'green'))\n except Exception as e:\n print(colored('Photo substitution error:', 'red'), e)\n try:\n client(UpdateUsernameRequest(''))\n print(colored('Username deleted successfully!', 'green'))\n except Exception as e:\n print(colored('Username substitution error:', 'red'), e)\n try:\n client(JoinChannelRequest(group))\n print(colored('Join to channel successfully!'))\n except Exception as e:\n print(colored('Join to channel error:', 'red'), e)\n\n client.disconnect()\n print(colored('Go to next account', 'yellow'))\n\n\nnames()\nwhile acc <= 20:\n log()\n acc += 1\nelse:\n print(colored('Work done!', 'yellow'))\n sys.exit()\n","sub_path":"telethon/first_log.py","file_name":"first_log.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"355381645","text":"import sqlite3\r\nimport tkinter as tk\r\nfrom tkinter import Button\r\nfrom tkinter import Listbox\r\nfrom tkinter import Scrollbar\r\nfrom tkinter import filedialog\r\nfrom tkinter import PhotoImage\r\nimport data_access as data\r\nfrom functools import partial\r\nimport os\r\nimport webbrowser\r\nimport time\r\n\r\n\r\nclass ViewGui(tk.Tk):\r\n\r\n def __init__(self):\r\n tk.Tk.__init__(self)\r\n self.database = data.DatabaseAccess(\"macro_database.db\")\r\n\r\n self.play_img = PhotoImage(file='assets\\img\\play.png')\r\n self.play_img = self.play_img.subsample(1)\r\n\r\n self.edit_img = PhotoImage(file='assets\\img\\edit.png')\r\n self.edit_img = self.edit_img.subsample(2)\r\n\r\n self.delete_img = PhotoImage(file='assets\\img\\del.png')\r\n self.delete_img = self.delete_img.subsample(1)\r\n\r\n self.button_width = 25\r\n self.button_height = 25\r\n\r\n if self.database.check_db() is True:\r\n pass\r\n else:\r\n exit()\r\n\r\n # Title\r\n self.title(\"Macro App\")\r\n self.iconbitmap('assets\\img\\icon.ico')\r\n hub = tk.Frame(self)\r\n hub.grid_columnconfigure(0,weight=1)\r\n hub.grid_rowconfigure(0, weight=1)\r\n hub.pack(side=tk.TOP,\r\n fill=tk.BOTH,\r\n expand=True)\r\n\r\n # Left Side container\r\n self.left_container = tk.Frame(hub, bg=\"gray\")\r\n self.left_container.pack(side=tk.LEFT,\r\n fill=\"both\")\r\n\r\n macro_group_label = tk.Label(self.left_container, text=\"Macro Groups\")\r\n macro_group_label.pack(side=tk.TOP,\r\n ipady=5,\r\n fill=\"x\")\r\n\r\n scroll_bar = Scrollbar(self.left_container)\r\n scroll_bar.pack(side=tk.RIGHT,\r\n fill=\"y\")\r\n\r\n self.marco_group_listbox = Listbox(self.left_container, yscrollcommand=scroll_bar.set)\r\n\r\n self.marco_group_listbox.pack(side=tk.TOP,\r\n fill=\"both\",\r\n expand=True)\r\n\r\n scroll_bar.config(command=self.marco_group_listbox.yview())\r\n macro_group_add_button = Button(self.left_container,\r\n text=\"(Create New Group)\",\r\n command=self.new_group)\r\n macro_group_add_button.pack(side=tk.TOP,\r\n fill=\"x\")\r\n\r\n self.populate_groups(self.marco_group_listbox)\r\n\r\n # Right Side\r\n self.macro_label = tk.Label(hub, text=\" \")\r\n self.macro_label.pack(side=tk.TOP,\r\n fill=\"x\",\r\n ipady=5)\r\n\r\n self.right_container = tk.Frame(hub)\r\n self.right_container.pack(side=tk.RIGHT,\r\n fill=tk.BOTH,\r\n expand=True)\r\n\r\n def populate_groups(self,frame):\r\n for groups in self.database.get_groups():\r\n groups_id = groups[0]\r\n group_name = groups[1]\r\n bound_display_records = partial(self.populate_records, groups_id)\r\n new_button = tk.Button(frame,\r\n text=f\"{group_name}\",\r\n command=bound_display_records)\r\n new_button.pack(fill=\"x\")\r\n\r\n def populate_records(self, id):\r\n for items in self.right_container.winfo_children():\r\n items.destroy()\r\n\r\n self.macro_label.config(text=f\"{self.database.find_group_name(id)[0]}\")\r\n\r\n # Apps\r\n app_frame = tk.Frame(self.right_container)\r\n app_frame.pack(side=tk.TOP,\r\n fill=\"x\",\r\n padx=10,\r\n pady=1)\r\n\r\n app_label = tk.Label(app_frame, text=\"Apps\")\r\n app_label.pack(side=tk.LEFT)\r\n\r\n bound_add_apps = partial(self.add_new_app, id)\r\n app_add_button = tk.Button(app_frame,\r\n text=\"+\",\r\n command=bound_add_apps)\r\n app_add_button.pack(side=tk.RIGHT)\r\n\r\n app_box = tk.Listbox(self.right_container)\r\n app_box.pack(side=tk.TOP,\r\n fill=\"x\",\r\n padx=10)\r\n\r\n db_results = self.database.get_records_by_type('A',id)\r\n for apps in db_results:\r\n seating_frame = tk.Frame(app_box)\r\n\r\n new_app_name = tk.Label(seating_frame, text=f\"{apps[1]}\")\r\n new_app_name.pack(side=tk.LEFT)\r\n\r\n bound_app_trigger = partial(self.activate_address,apps[0],id,apps[3])\r\n new_app_trigger = tk.Button(seating_frame, text=\"P\",\r\n image=self.play_img,\r\n height=self.button_width,\r\n width=self.button_height,\r\n command=bound_app_trigger)\r\n\r\n new_app_trigger.pack(side=tk.RIGHT)\r\n\r\n bound_edit_app = partial(self.edit_app, id, apps[0])\r\n new_app_edit = tk.Button(seating_frame,\r\n text=\"E\",\r\n image=self.edit_img,\r\n height=self.button_width,\r\n width=self.button_height,\r\n command=bound_edit_app)\r\n\r\n new_app_edit.pack(side=tk.RIGHT)\r\n\r\n bound_delete_app = partial(self.delete_record, id, apps[0])\r\n new_app_delete = tk.Button(seating_frame,\r\n text=\"X\",\r\n height=self.button_width,\r\n width=self.button_height,\r\n image=self.delete_img,\r\n command=bound_delete_app)\r\n new_app_delete.pack(side=tk.RIGHT)\r\n\r\n seating_frame.pack(side=tk.TOP, fill=\"x\")\r\n\r\n # Links\r\n link_frame = tk.Frame(self.right_container)\r\n link_frame.pack(side=tk.TOP,\r\n fill=\"x\",\r\n padx=10,\r\n pady=1)\r\n\r\n link_label = tk.Label(link_frame, text=\"Links\")\r\n link_label.pack(side=tk.LEFT)\r\n\r\n bound_add_links = partial(self.add_new_link, id)\r\n link_add_button = tk.Button(link_frame, text=\"+\", command=bound_add_links)\r\n link_add_button.pack(side=tk.RIGHT)\r\n\r\n link_box = tk.Listbox(self.right_container)\r\n link_box.pack(side=tk.TOP,\r\n fill=\"x\",\r\n padx=10)\r\n\r\n db_results = self.database.get_records_by_type('L', id)\r\n for links in db_results:\r\n seating_frame = tk.Frame(link_box)\r\n\r\n new_link_name = tk.Label(seating_frame, text=f\"{links[1]}\")\r\n new_link_name.pack(side=tk.LEFT)\r\n\r\n bound_link_trigger = partial(self.activate_address, links[0], id, links[3])\r\n new_link_trigger = tk.Button(seating_frame,text=\"P\",\r\n image=self.play_img,\r\n height=self.button_width,\r\n width=self.button_height,\r\n command=bound_link_trigger)\r\n new_link_trigger.pack(side=tk.RIGHT)\r\n\r\n bound_edit_link = partial(self.edit_link, id, links[0])\r\n new_link_edit = tk.Button(seating_frame,\r\n text=\"E\",\r\n image=self.edit_img,\r\n height=self.button_width,\r\n width=self.button_height,\r\n command=bound_edit_link)\r\n new_link_edit.pack(side=tk.RIGHT)\r\n\r\n bound_delete_link = partial(self.delete_record, id, links[0])\r\n new_link_delete = tk.Button(seating_frame,\r\n text=\"X\",\r\n image=self.delete_img,\r\n height=self.button_width,\r\n width=self.button_height,\r\n command=bound_delete_link)\r\n new_link_delete.pack(side=tk.RIGHT)\r\n\r\n seating_frame.pack(side=tk.TOP, fill=\"x\")\r\n\r\n # Settings\r\n setting_frame = tk.Frame(self.right_container)\r\n setting_label=tk.Label(setting_frame, text=\"Settings\")\r\n setting_label.pack()\r\n\r\n bound_trigger = partial(self.edit_group,id)\r\n edit_group_name = tk.Button(setting_frame,\r\n text=\"Edit Group Name\",\r\n command = bound_trigger)\r\n edit_group_name.pack()\r\n\r\n bound_trigger = partial(self.activate_group_address, id, 'A')\r\n trigger_all_apps = tk.Button(setting_frame,\r\n text=\"Trigger all applications\",\r\n command=bound_trigger)\r\n trigger_all_apps.pack()\r\n\r\n bound_trigger = partial(self.activate_group_address, id, 'L')\r\n trigger_all_links = tk.Button(setting_frame,\r\n text=\"Trigger all links\",\r\n command=bound_trigger)\r\n trigger_all_links.pack()\r\n\r\n bound_trigger = partial(self.delete_group,id)\r\n remove_group = tk.Button(setting_frame,\r\n text=\"Delete this group\",\r\n command=bound_trigger)\r\n remove_group.pack()\r\n\r\n setting_frame.pack()\r\n\r\n def new_group(self):\r\n new_macro = tk.Toplevel()\r\n new_macro.grab_set()\r\n new_macro.title(\"Add macro\")\r\n new_macro.minsize(250, 100)\r\n new_macro.maxsize(450, 300)\r\n name_label = tk.Label(new_macro, text=\"Name:\")\r\n status_label = tk.Label(new_macro, text=\"\")\r\n name_entry = tk.Entry(new_macro)\r\n name_label.pack()\r\n name_entry.pack()\r\n status_label.pack()\r\n\r\n def validate_button():\r\n if name_entry.get() != '' and name_entry.get() != '\\n' and self.database.check_group_by_name(name_entry.get()) is not True:\r\n self.database.add_group(name_entry.get())\r\n self.refresh_groups()\r\n new_macro.destroy()\r\n else:\r\n status_label.config(text=\"Name is missing or group already exist\")\r\n\r\n finalize_button = tk.Button(new_macro, text=\"Finish\", command=validate_button)\r\n finalize_button.pack()\r\n\r\n def refresh_records(self, group_id):\r\n for items in self.right_container.winfo_children():\r\n items.destroy()\r\n\r\n if group_id > -1:\r\n self.populate_records(group_id)\r\n else:\r\n self.macro_label.config(text=\"\")\r\n\r\n def refresh_groups(self):\r\n for items in self.marco_group_listbox.winfo_children():\r\n items.destroy()\r\n self.populate_groups(self.marco_group_listbox)\r\n\r\n def add_new_app(self, group_id):\r\n new_app_window = tk.Toplevel()\r\n new_app_window.grab_set()\r\n new_app_window.title(\"New App\")\r\n new_app_window.minsize(250, 100)\r\n new_app_window.maxsize(450, 300)\r\n\r\n name_label = tk.Label(new_app_window, text=\"Name:\")\r\n dir_label = tk.Label(new_app_window, text=\"program path:\")\r\n status_label = tk.Label(new_app_window, text=\"\")\r\n\r\n name_entry = tk.Entry(new_app_window)\r\n\r\n def add_app_address(entry_text):\r\n filename = filedialog.askopenfilename(initialdir=\"/\",\r\n title=\"Select File\",\r\n filetypes=((\"executables\", \"*exe\"), (\"all files\", \"*\")))\r\n app_path = os.path.basename(filename)\r\n\r\n if app_path != '' and app_path != \"\\n\":\r\n entry_text.set(filename)\r\n\r\n dir_frame = tk.Frame(new_app_window)\r\n dir_address = tk.StringVar()\r\n bound_app_address = partial(add_app_address,dir_address)\r\n dir_entry = tk.Entry(dir_frame, textvariable=dir_address)\r\n dir_button = tk.Button(dir_frame, text=\">\",command=bound_app_address)\r\n\r\n name_label.pack()\r\n name_entry.pack()\r\n\r\n dir_label.pack()\r\n dir_frame.pack()\r\n dir_entry.pack(side=tk.LEFT)\r\n dir_button.pack(side=tk.RIGHT)\r\n\r\n status_label.pack()\r\n\r\n def validate_button():\r\n if name_entry.get() != '' and name_entry.get() != '\\n' and \\\r\n self.database.check_record(group_id, dir_entry.get()) is not True:\r\n self.database.add_record(name_entry.get(), group_id, 'A', dir_entry.get())\r\n self.refresh_records(group_id)\r\n new_app_window.destroy()\r\n else:\r\n status_label.config(text=\"Name or path for application is missing or already exist in this macro group\")\r\n\r\n finalize_button = tk.Button(new_app_window, text=\"Finish\", command=validate_button)\r\n finalize_button.pack(ipadx=5)\r\n\r\n def add_new_link(self, group_id):\r\n new_link_window = tk.Toplevel()\r\n new_link_window.grab_set()\r\n new_link_window.title(\"New link\")\r\n new_link_window.minsize(250, 100)\r\n new_link_window.maxsize(450, 300)\r\n\r\n name_label = tk.Label(new_link_window, text=\"Name:\")\r\n dir_label = tk.Label(new_link_window, text=\"url path:\")\r\n status_label = tk.Label(new_link_window, text=\"\")\r\n name_entry = tk.Entry(new_link_window)\r\n dir_entry = tk.Entry(new_link_window)\r\n name_label.pack()\r\n name_entry.pack()\r\n dir_label.pack()\r\n dir_entry.pack()\r\n status_label.pack()\r\n\r\n def validate_button():\r\n if name_entry.get() != '' and name_entry.get() != '\\n' and \\\r\n self.database.check_record(group_id, dir_entry.get()) is not True:\r\n self.database.add_record(name_entry.get(), group_id, 'L', dir_entry.get())\r\n self.refresh_records(group_id)\r\n new_link_window.destroy()\r\n else:\r\n status_label.config(text=\"Name or path for the link is missing or already exist in this macro group\")\r\n\r\n finalize_button = tk.Button(new_link_window, text=\"Finish\", command=validate_button)\r\n finalize_button.pack(ipadx=5)\r\n\r\n def edit_app(self, group_id, id):\r\n new_top_window = tk.Toplevel()\r\n new_top_window.grab_set()\r\n\r\n db_result = self.database.get_record(id,group_id)\r\n name = db_result[0][1]\r\n address = db_result[0][4]\r\n\r\n new_top_window.title(f\"Editing {name}\")\r\n new_top_window.minsize(350, 200)\r\n\r\n entry_name = tk.StringVar()\r\n entry_name.set(name)\r\n\r\n entry_address = tk.StringVar()\r\n entry_address.set(address)\r\n\r\n name_label = tk.Label(new_top_window, text=\"Name:\")\r\n name_entry = tk.Entry(new_top_window,textvariable=entry_name)\r\n\r\n dir_label = tk.Label(new_top_window, text=\"program path:\")\r\n dir_frame = tk.Frame(new_top_window)\r\n dir_entry = tk.Entry(dir_frame, textvariable=entry_address)\r\n dir_entry.pack(side=tk.LEFT)\r\n\r\n def add_app_address(entry_text):\r\n filename = filedialog.askopenfilename(initialdir=\"/\",\r\n title=\"Select File\",\r\n filetypes=((\"executables\", \"*exe\"), (\"all files\", \"*\")))\r\n app_path = os.path.basename(filename)\r\n\r\n if app_path != '' and app_path != \"\\n\":\r\n entry_text.set(filename)\r\n\r\n bound_app_address = partial(add_app_address,entry_address)\r\n dir_entry_button = tk.Button(dir_frame, text=\"O\",command=bound_app_address)\r\n dir_entry_button.pack(side=tk.RIGHT)\r\n\r\n status_label = tk.Label(new_top_window, text=\"\")\r\n\r\n name_label.pack()\r\n name_entry.pack()\r\n dir_label.pack()\r\n dir_frame.pack()\r\n status_label.pack()\r\n\r\n def validate_button():\r\n if name_entry.get() != '' and name_entry.get() != '\\n' and dir_entry.get() != '' and dir_entry.get() != '\\n':\r\n self.database.edit_record_address(id, group_id,dir_entry.get())\r\n self.database.edit_record_name(id, group_id,name_entry.get())\r\n self.refresh_groups()\r\n new_top_window.destroy()\r\n else:\r\n status_label.config(text=\"Name or address already exist or is empty\")\r\n finalize_button = tk.Button(new_top_window, text=\"Finish\", command=validate_button)\r\n finalize_button.pack(ipadx=5)\r\n\r\n def edit_link(self, group_id, id):\r\n new_top_window = tk.Toplevel()\r\n new_top_window.grab_set()\r\n db_result = self.database.get_record(id, group_id)\r\n name = db_result[0][1]\r\n address = db_result[0][4]\r\n entry_name = tk.StringVar()\r\n entry_name.set(name)\r\n entry_address = tk.StringVar()\r\n entry_address.set(address)\r\n new_top_window.title(f\"Editing {name}\")\r\n new_top_window.minsize(350, 200)\r\n name_label = tk.Label(new_top_window, text=\"Name:\")\r\n dir_label = tk.Label(new_top_window, text=\"program path:\")\r\n status_label = tk.Label(new_top_window, text=\"\")\r\n name_entry = tk.Entry(new_top_window,\r\n textvariable=entry_name)\r\n dir_entry = tk.Entry(new_top_window,\r\n textvariable=entry_address)\r\n name_label.pack()\r\n name_entry.pack()\r\n dir_label.pack()\r\n dir_entry.pack()\r\n status_label.pack()\r\n\r\n def validate_button():\r\n if name_entry.get() != '' and name_entry.get() != '\\n' and dir_entry.get() != '' and dir_entry.get() != '\\n':\r\n self.database.edit_record_address(id, group_id, dir_entry.get())\r\n self.database.edit_record_name(id, group_id, name_entry.get())\r\n self.refresh_groups()\r\n new_top_window.destroy()\r\n else:\r\n status_label.config(text=\"Name or address already exist or is empty\")\r\n finalize_button = tk.Button(new_top_window, text=\"Finish\", command=validate_button)\r\n finalize_button.pack(ipadx=5)\r\n\r\n def edit_group(self, group_id):\r\n new_top_window = tk.Toplevel()\r\n new_top_window.grab_set()\r\n name = self.database.find_group_name(group_id)[0]\r\n new_top_window.title(f\"{name}\")\r\n new_top_window.minsize(250, 100)\r\n new_top_window.maxsize(450, 300)\r\n\r\n name_label = tk.Label(new_top_window, text=\"Name:\")\r\n name_var = tk.StringVar()\r\n name_var.set(name)\r\n name_entry = tk.Entry(new_top_window,textvariable=name_var)\r\n name_label.pack()\r\n name_entry.pack()\r\n\r\n status_label = tk.Label(new_top_window, text=\"\")\r\n status_label.pack()\r\n\r\n def validate_button():\r\n if name_entry.get() != '' and name_entry.get() != '\\n' and self.database.check_group_by_name(name_entry.get()) is not True:\r\n self.database.edit_group_name(group_id,name_entry.get())\r\n self.refresh_groups()\r\n new_top_window.destroy()\r\n else:\r\n status_label.config(text=\"Group name already exist or is empty\")\r\n\r\n finalize_button = tk.Button(new_top_window, text=\"Finish\", command=validate_button)\r\n finalize_button.pack(ipadx=5)\r\n\r\n def delete_record(self,group_id, id):\r\n self.database.delete_record(id, group_id)\r\n self.refresh_records(group_id)\r\n\r\n def activate_address(self, id, group_id, type):\r\n if type == 'A':\r\n record = self.database.get_record(id,group_id)\r\n os.startfile(record[0][4])\r\n elif type == 'L':\r\n record = self.database.get_record(id, group_id)\r\n webbrowser.open(record[0][4])\r\n else:\r\n print(f\"{type} has not been implemented\")\r\n\r\n def activate_group_address(self, group_id, type):\r\n if type == 'A':\r\n records = self.database.get_records_by_type(type,group_id)\r\n for app in records:\r\n os.startfile(app[4])\r\n elif type == 'L':\r\n records = self.database.get_records_by_type(type, group_id)\r\n for link in records:\r\n webbrowser.open(link[4])\r\n time.sleep(2)\r\n else:\r\n pass\r\n\r\n def delete_group(self, id):\r\n self.database.delete_group(id)\r\n self.refresh_groups()\r\n self.refresh_records(-1)\r\n\r\n\r\nview = ViewGui()\r\nview.minsize(550, 350)\r\nview.mainloop()\r\n","sub_path":"src/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":20867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"498135433","text":"from django.contrib.gis.db import models\nfrom django.contrib.auth.models import User\nfrom localground.apps.site.models import Base\nfrom datetime import datetime\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes import fields\nfrom django.conf import settings\n\n\nclass BasePermissions(models.Model):\n\n \"\"\"\n Abstract base class for media groups (Project and View objects).\n \"\"\"\n access_authority = models.ForeignKey('ObjectAuthority',\n db_column='view_authority',\n verbose_name='Sharing')\n access_key = models.CharField(max_length=16, null=True, blank=True)\n users = fields.GenericRelation('UserAuthorityObject')\n\n def _has_user_permissions(self, user, authority_id):\n # anonymous or null users don't have user-level permissions:\n if user is None or not user.is_authenticated():\n return False\n\n # object owners have blanket view/edit/manage user-level permissions:\n if self.owner == user:\n return True\n\n # users with privileges which are greater than or equal to\n # the authority_id have user-level permisisons:\n return len(self.users\n .filter(user=user)\n .filter(authority__id__gte=authority_id)\n ) > 0\n\n def can_view(self, user=None, access_key=None):\n # projects and views marked as public are viewable:\n if self.access_authority.id == ObjectAuthority.PUBLIC:\n return True\n\n # projects and views marked as \"PUBLIC_WITH_LINK\" that provide\n # the correct access_key are viewable:\n elif self.access_authority.id == ObjectAuthority.PUBLIC_WITH_LINK \\\n and self.access_key == access_key:\n return True\n\n # projects which are accessible by the user are viewable:\n else:\n return self._has_user_permissions(user, UserAuthority.CAN_VIEW)\n\n def can_edit(self, user):\n return self._has_user_permissions(user, UserAuthority.CAN_EDIT)\n\n def can_manage(self, user):\n return self._has_user_permissions(user, UserAuthority.CAN_MANAGE)\n\n def share_url(self):\n return '/profile/{0}/{1}/share/'.format(\n self.model_name_plural,\n self.id)\n\n class Meta:\n abstract = True\n app_label = 'site'\n\n\nclass ObjectAuthority(models.Model):\n\n \"\"\"\n Describes the permissions configuration of any class inheriting from\n BasePermissions (either private, public-with-key, or public)\n \"\"\"\n PRIVATE = 1\n PUBLIC_WITH_LINK = 2\n PUBLIC = 3\n\n name = models.CharField(max_length=255, blank=True)\n description = models.CharField(max_length=1000, blank=True, null=True)\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n app_label = 'site'\n\n\nclass UserAuthority(models.Model):\n\n \"\"\"\n Used in conjunction with ObjectAuthority to assign user-level permissions\n (special cases) which are beyond the group's baseline permissions. There\n are 3 user-level permissions statuses: \"can view,\" \"can edit,\" and\n \"can manage.\"\n \"\"\"\n CAN_VIEW = 1\n CAN_EDIT = 2\n CAN_MANAGE = 3\n\n name = models.CharField(max_length=255, blank=True)\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n app_label = 'site'\n\n\nclass UserAuthorityObject(models.Model):\n\n \"\"\"\n Model that assigns a particular User (auth_user) and UserAuthority object to\n a particular Group.\n \"\"\"\n user = models.ForeignKey(settings.AUTH_USER_MODEL)\n authority = models.ForeignKey('UserAuthority')\n time_stamp = models.DateTimeField(default=datetime.now)\n granted_by = models.ForeignKey(\n 'auth.User',\n related_name=\"%(app_label)s_%(class)s_related\")\n\n # Following fields are required for using GenericForeignKey\n content_type = models.ForeignKey(ContentType)\n object_id = models.PositiveIntegerField()\n object = fields.GenericForeignKey()\n\n def to_dict(self):\n return {\n 'username': self.auth_user.username,\n 'authority_id': self.authority.id,\n 'authority': self.authority.name\n }\n\n def __unicode__(self):\n return self.user.username\n\n # Leveraging parent project / snapshot's can_edit function\n def can_view(self, user, access_key=None):\n # to view someone else's privs, you need edit privs:\n return self.object.can_edit(user) or self.user == user\n\n def can_edit(self, user, authority_id):\n # deletegate to can_manage:\n return self.object.can_manage(user) or \\\n (self.user == user and self.authority.id > authority_id)\n\n def can_delete(self, user):\n return self.object.can_manage(user) or self.user == user\n\n class Meta:\n app_label = 'site'\n\n\nclass ObjectUserPermissions(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL,\n db_column='user_id', on_delete=models.DO_NOTHING)\n user_authority = models.ForeignKey(\n 'UserAuthority',\n db_column='authority_id',\n on_delete=models.DO_NOTHING)\n\n class Meta:\n abstract = True\n app_label = 'site'\n\n\nclass AudioUser(ObjectUserPermissions):\n audio = models.ForeignKey(\n 'Audio',\n db_column='audio_id',\n on_delete=models.DO_NOTHING,\n related_name='authuser')\n\n class Meta:\n app_label = 'site'\n managed = False\n db_table = 'v_private_audio'\n\n\nclass PhotoUser(ObjectUserPermissions):\n photo = models.ForeignKey(\n 'Photo',\n db_column='photo_id',\n on_delete=models.DO_NOTHING,\n related_name='authuser')\n\n class Meta:\n app_label = 'site'\n managed = False\n db_table = 'v_private_photos'\n\n\nclass VideoUser(ObjectUserPermissions):\n video = models.ForeignKey(\n 'Video',\n db_column='video_id',\n on_delete=models.DO_NOTHING,\n related_name='authuser')\n\n class Meta:\n app_label = 'site'\n managed = False\n db_table = 'v_private_videos'\n\n\nclass MarkerUser(ObjectUserPermissions):\n marker = models.ForeignKey(\n 'Marker',\n db_column='marker_id',\n on_delete=models.DO_NOTHING,\n related_name='authuser')\n\n class Meta:\n app_label = 'site'\n managed = False\n db_table = 'v_private_markers'\n\n\nclass PrintUser(ObjectUserPermissions):\n print_obj = models.ForeignKey(\n 'Print',\n db_column='print_id',\n on_delete=models.DO_NOTHING,\n related_name='authuser')\n\n class Meta:\n app_label = 'site'\n managed = False\n db_table = 'v_private_prints'\n\nclass MapImageUser(ObjectUserPermissions):\n mapimage = models.ForeignKey(\n 'MapImage',\n db_column='mapimage_id',\n on_delete=models.DO_NOTHING,\n related_name='authuser')\n\n class Meta:\n app_label = 'site'\n managed = False\n db_table = 'v_private_mapimages'\n\n\nclass SnapshotUser(ObjectUserPermissions):\n snapshot = models.ForeignKey(\n 'Snapshot',\n db_column='snapshot_id',\n on_delete=models.DO_NOTHING,\n related_name='authuser')\n\n class Meta:\n app_label = 'site'\n managed = False\n db_table = 'v_private_views'\n\n\nclass ProjectUser(ObjectUserPermissions):\n project = models.ForeignKey('Project', db_column='project_id',\n on_delete=models.DO_NOTHING,\n related_name='authuser')\n\n class Meta:\n app_label = 'site'\n managed = False\n db_table = 'v_private_projects'\n\n\nclass FormUser(ObjectUserPermissions):\n form = models.ForeignKey('Form', db_column='form_id',\n on_delete=models.DO_NOTHING,\n related_name='authuser')\n\n class Meta:\n app_label = 'site'\n managed = False\n db_table = 'v_private_forms'\n\n\nclass PresentationUser(ObjectUserPermissions):\n presentation = models.ForeignKey(\n 'Presentation',\n db_column='presentation_id',\n on_delete=models.DO_NOTHING,\n related_name='authuser')\n\n class Meta:\n app_label = 'site'\n managed = False\n db_table = 'v_private_presentations'\n \nclass LayerUser(ObjectUserPermissions):\n layer = models.ForeignKey(\n 'Layer',\n db_column='layer_id',\n on_delete=models.DO_NOTHING,\n related_name='authuser')\n\n class Meta:\n app_label = 'site'\n managed = False\n db_table = 'v_private_layers'\n","sub_path":"apps/site/models/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":8641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"496727210","text":"# sheldon woodward\n# 4/13/18\n\n\"\"\"Tree of all farmer-wolf-goat-cabbage possibilities.\"\"\"\n\n\nclass FWGCTree:\n states = []\n solution = None\n\n def __init__(self, depth=0, state=None, history=''):\n self.depth = depth\n self.history = history\n self.nodes = []\n # no state given\n if state is None:\n state = [False, False, False, False]\n # check for solution\n if all(state):\n FWGCTree.solution = history\n # check for loop state\n FWGCTree.states.append(state)\n # generate next nodes\n for i in range(4):\n # generate state\n new_state = state[:]\n if i > 0 and new_state[0] == new_state[i]:\n new_state[i] = not new_state[i]\n new_state[0] = not new_state[0]\n # add to tree\n if not FWGCTree.bad_state(new_state):\n self.nodes.append(FWGCTree(depth + 1, new_state, history + ('N', 'W', 'G', 'C')[i]))\n\n @staticmethod\n def bad_state(state):\n # pre-existing state\n if state in FWGCTree.states:\n return True\n # wolf alone with goat\n if state[0] != state[1] and state[1] == state[2]:\n return True\n # goat alone with cabbage\n if state[0] != state[2] and state[2] == state[3]:\n return True\n return False\n","sub_path":"river_crossing/FWGCTree.py","file_name":"FWGCTree.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"44453837","text":"# Write a non-fruitful function drawPoly(someturtle, somesides, somesize) which \r\n# makes a turtle draw a regular polygon. When called with drawPoly(tess, 8, 50), \r\n# it will draw a shape like this\r\n\r\nimport turtle\r\n\r\ndef drawPoly(t, ss, sz):\r\n \"\"\"Make turtle t draw a polygon with ss number of sides of side sz.\"\"\"\r\n for i in range(ss):\r\n t.forward(sz)\r\n t.left(float(360/ss))\r\n\r\nwn = turtle.Screen()\r\n\r\ntess = turtle.Turtle()\r\ntess.pensize(3)\r\n\r\ndrawPoly(tess, 8, 50)\r\n\r\nwn.exitonclick()","sub_path":"polygon_function.py","file_name":"polygon_function.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"151871203","text":"import webob\n\nfrom wormhole import exception\nfrom wormhole import wsgi\n\nfrom wormhole.common import log\nfrom wormhole.common import importutils\nfrom wormhole.common import utils\nfrom wormhole.i18n import _\nfrom wormhole.lxc_client import LXCClient\nfrom wormhole.net_util import network\n\nfrom wormhole.tasks import addtask\nfrom wormhole.tasks import FAKE_SUCCESS_TASK, FAKE_ERROR_TASK\n\nfrom wormhole.state import *\n\nimport six\nimport os\nimport base64\nimport json\n\nimport time\nimport sys, traceback\n\nfrom oslo.config import cfg\n\ncontainer_opts = [\n cfg.StrOpt('container_volume_link_dir',\n default=\"/var/lib/wormhole/.by-volume-id\",\n help='The dir containing symbolic files named volume-id targeting device path.'),\n cfg.StrOpt('container_driver',\n default=\"lxc\",\n help='The container manager'),\n]\n\nCONF = cfg.CONF\nCONF.register_opts(container_opts)\n\nLOG = log.getLogger(__name__)\n\nWORMHOLE_SETTING_FILE = '/var/lib/wormhole/settings.json'\n\ndef volume_link_path(volume_id):\n return os.path.sep.join([CONF.get('container_volume_link_dir'), volume_id])\n\ndef container_root_path():\n CONTAINER_LINK_NAME = \"data-device-link\"\n return volume_link_path(CONTAINER_LINK_NAME)\n\ndef check_dev_exist(dev_path):\n \"\"\" check /dev/sde exists by `fdisk'. Note `lsblk' can't guarentee that. \"\"\"\n disk_info, _ignore_err = utils.trycmd('fdisk', '-l', dev_path)\n return disk_info.strip() != ''\n\ndef load_settings():\n return json.load(open(WORMHOLE_SETTING_FILE))\n\ndef save_settings(settings):\n with open(WORMHOLE_SETTING_FILE, 'w') as f:\n f.write(json.dumps(settings))\n\nclass ContainerController(wsgi.Application):\n\n def __init__(self):\n self._manager = None\n self._container = None\n self._ns_created = False\n vif_class = importutils.import_class(CONF.lxc.vif_driver)\n self.vif_driver = vif_class()\n self._settings = None\n self._setup_volume_mapping()\n super(ContainerController, self).__init__()\n\n def _setup_volume_mapping(self):\n self._volume_mapping = {}\n self._mount_path = {}\n self.root_dev_path = os.path.realpath(container_root_path())\n\n link_dir = CONF.get('container_volume_link_dir')\n\n if not os.path.exists(link_dir):\n os.makedirs(link_dir)\n return\n\n for link in os.listdir(link_dir):\n link_path = volume_link_path(link)\n if os.path.islink(link_path):\n realpath = os.path.realpath(link_path)\n if realpath.startswith(\"/dev/\"):\n self._volume_mapping[link] = realpath\n LOG.info(_(\"Found volume mapping %s ==> %s\"),\n link, self._volume_mapping[link])\n\n def _discovery_use_eth(self):\n res = self.manager.execute(self.container['id'], '/sbin/ip', 'link', 'show')\n _found_dev = set()\n for line in res.split('\\n'):\n if line and not line.startswith(' '):\n _, n = line.split()[:2]\n _found_dev.add(n.strip(':').split('@')[0])\n return _found_dev\n\n def _available_eth_name(self):\n net_prefix = 'eth'\n used_eths = self._discovery_use_eth()\n i = 0\n while 1:\n name = net_prefix + str(i)\n if name not in used_eths:\n LOG.debug(_(\"Available net name ==> %s\"), name)\n return name\n i += 1\n\n @property\n def manager(self):\n if self._manager is None:\n self._manager = LXCClient()\n return self._manager\n\n @property\n def container(self):\n if self._container is None:\n containers = self.manager.list(all=True)\n if not containers:\n raise exception.ContainerNotFound()\n if len(containers) > 1:\n LOG.warn(_(\"Have multiple(%d) containers: %s !\"), len(containers), containers)\n self._container = { \"id\" : containers[0][\"id\"],\n \"name\" : containers[0][\"name\"]}\n return self._container\n\n def _attach_bdm(self, block_device_info):\n \"\"\" Attach volume, setup symbolic for volume id mapping to device name.\n \"\"\"\n if block_device_info:\n for bdm in block_device_info.get('block_device_mapping', []):\n LOG.debug(_(\"Attach block device mapping %s\"), bdm)\n mount_device = bdm['mount_device']\n volume_id = bdm['connection_info']['data']['volume_id']\n self._add_mapping(volume_id, mount_device, bdm.get('real_device', ''))\n\n def _update_bdm(self, block_device_info):\n \"\"\" Update mapping info. \"\"\"\n if block_device_info:\n new_volume_mapping = {}\n for bdm in block_device_info.get('block_device_mapping', []):\n LOG.debug(_(\"Attach block device mapping %s\"), bdm)\n mount_device = bdm['mount_device']\n size_in_g = bdm.get('size', \"0\")\n volume_id = bdm['connection_info']['data']['volume_id']\n new_volume_mapping[volume_id] = {\"mount_device\" : mount_device, \"size\": str(size_in_g) + \"G\"}\n\n all_devices = utils.list_device()\n to_remove_volumes = set(self._volume_mapping) - set(new_volume_mapping)\n\n for comm_volume in set(self._volume_mapping).intersection(new_volume_mapping):\n _path = self._volume_mapping[comm_volume]\n _size = new_volume_mapping[comm_volume]['size']\n # If the device not exist or size not match, then remove it.\n if not check_dev_exist(_path) or \\\n any([d['name'] == _path and d['size'] == _size for d in all_devices]):\n LOG.info(_(\"Volume %s doesn't match, update it.\"), comm_volume)\n to_remove_volumes.add(comm_volume)\n\n if to_remove_volumes:\n LOG.info(_(\"Possible detach volume when vm is stopped:%s\"), to_remove_volumes)\n\n for remove in to_remove_volumes:\n self._remove_mapping(remove, ensure=False)\n\n to_add_volumes = set(new_volume_mapping) - set(self._volume_mapping)\n\n if to_add_volumes:\n LOG.info(_(\"Possible attach volume when vm is stopped\"))\n new_devices = [d for d in all_devices if d['name'] not in self._volume_mapping.values()]\n\n ## group by size\n for size in set([d['size'] for d in new_devices]):\n _devices = sorted([d['name'] for d in new_devices if d['size'] == size])\n _to_add_volumes = []\n for _s in (size, '0G'):\n _to_add_volumes.extend(sorted([v for v in to_add_volumes if new_volume_mapping[v]['size'] == _s]))\n LOG.debug(_(\"Size: %s, new_devices:%s, added_volums:%s\"),\n size, _devices, _to_add_volumes)\n for add, new_device in zip(_to_add_volumes, _devices):\n self._add_mapping(add, new_volume_mapping[add]['mount_device'], new_device)\n\n def plug_vifs(self, network_info):\n \"\"\"Plug VIFs into networks.\"\"\"\n instance = self.container['id']\n for vif in network_info:\n LOG.debug(_(\"Plug vif %s\"), vif)\n self.vif_driver.plug(vif, instance)\n\n def _find_container_pid(self, container_id):\n n = 0\n while True:\n # NOTE(samalba): We wait for the process to be spawned inside the\n # container in order to get the the \"container pid\". This is\n # usually really fast. To avoid race conditions on a slow\n # machine, we allow 10 seconds as a hard limit.\n if n > 20:\n return\n info = self.manager.inspect_container(container_id)\n if info:\n pid = info['State']['Pid']\n # Pid is equal to zero if it isn't assigned yet\n if pid:\n return pid\n time.sleep(0.5)\n n += 1\n\n def _create_ns(self):\n container_id = self.container['id']\n netns_path = '/var/run/netns'\n if not os.path.exists(netns_path):\n utils.execute('mkdir', '-p', netns_path, run_as_root=True)\n nspid = self._find_container_pid(container_id)\n if not nspid:\n msg = _('Cannot find any PID under container \"{0}\"')\n raise RuntimeError(msg.format(container_id))\n netns_path = os.path.join(netns_path, container_id)\n utils.execute(\n 'ln', '-sf', '/proc/{0}/ns/net'.format(nspid),\n '/var/run/netns/{0}'.format(container_id),\n run_as_root=True)\n self._ns_created = True\n\n def _attach_vifs(self, network_info):\n \"\"\"Plug VIFs into container.\"\"\"\n if not network_info:\n return\n container_id = self.container['id']\n instance = container_id\n\n for idx, vif in enumerate(network_info):\n new_remote_name = self._available_eth_name()\n self.vif_driver.attach(vif, instance, container_id, new_remote_name)\n\n def _get_repository(self, image_name):\n\n return \"\"\n\n def create(self, request, image_name, image_id, root_volume_id=None, network_info={},\n block_device_info={}, inject_files=[], admin_password=None):\n \"\"\" create the container. \"\"\"\n if root_volume_id:\n # Create VM from volume, create a symbolic link for the device.\n LOG.info(_(\"Create new container from volume %s\"), root_volume_id)\n self._add_root_mapping(root_volume_id)\n\n def _do_create():\n if admin_password is not None:\n self._inject_password(admin_password)\n if inject_files:\n self._inject_files(inject_files, plain=True)\n if block_device_info:\n try:\n self._attach_bdm(block_device_info)\n except Exception as e:\n LOG.exception(e)\n try:\n container = self.container\n LOG.warn(_(\"Already a container exists\"))\n # Do the work anyway\n _do_create()\n return FAKE_SUCCESS_TASK\n except exception.ContainerNotFound:\n repository = self._get_repository(image_name)\n #local_image_name = repository + ':' + image_id\n local_image_name = image_id\n\n def _do_create_after_download_image(name):\n LOG.debug(_(\"Create container from image %s\"), name)\n self.manager.create_container(name, network_disabled=True)\n _do_create()\n\n if self.manager.images(name=local_image_name):\n LOG.debug(_(\"Repository = %s already exists\"), local_image_name)\n _do_create_after_download_image(local_image_name)\n return FAKE_SUCCESS_TASK\n else:\n def _do_pull_image():\n name = local_image_name\n\n try:\n import re\n m = re.search(r'\\d+\\.\\d+\\.\\d+\\.\\d+', repository)\n if m:\n utils.execute('ping', '-W', '3', '-c', '1', m.group())\n LOG.debug(_(\"Starting pull image repository=%s:%s\"), repository, image_id)\n resp = self.manager.pull(repository, tag=image_id, insecure_registry=True)\n LOG.debug(_(\"Done pull image repository=%s:%s, resp %s\"), repository, image_id, resp)\n if any(resp.find(s)!=-1 for s in ['\"error\":', image_name + \" not found\"]):\n LOG.warn(_(\"Can't pull image, use the local image with name=%s\"), image_name)\n name = image_name\n except Exception as e:\n name = image_name\n LOG.exception(e)\n _do_create_after_download_image(name)\n task = addtask(_do_pull_image)\n LOG.debug(_(\"Pull image task %s\"), task)\n return task\n\n def start(self, request, network_info={}, block_device_info={}):\n \"\"\" Start the container. \"\"\"\n container_id = self.container['id']\n LOG.info(_(\"Start container %s network_info %s block_device_info %s\"),\n container_id, network_info, block_device_info)\n if block_device_info:\n try:\n self._update_bdm(block_device_info)\n except Exception as e:\n LOG.exception(e)\n raise\n for bdm in block_device_info.get('block_device_mapping', []):\n LOG.debug(_(\"Attach block device mapping %s\"), bdm)\n mount_device = bdm['mount_device']\n volume_id = bdm['connection_info']['data']['volume_id']\n real_device = bdm.get('real_device', self._volume_mapping[volume_id])\n self.manager.attach_volume(self.container['id'], real_device, mount_device, static=True)\n\n if network_info:\n try:\n self.plug_vifs(network_info)\n except Exception as e:\n msg = _('Cannot setup network for container {}: {}').format(\n self.container['name'],\n repr(traceback.format_exception(*sys.exc_info()))\n )\n LOG.debug(msg, exc_info=True)\n raise exception.ContainerStartFailed(msg)\n self.manager.start(container_id, network_info=network_info)\n self._create_ns()\n self._settings = {\"network_info\": network_info, \"block_device_info\": block_device_info}\n save_settings(self._settings)\n\n def _stop(self, container_id, timeout=5):\n\n msg = 'Stop successfully'\n try:\n msg = self.manager.stop(container_id, min(timeout, 2))\n except Exception as e:\n self.manager.unpause(container_id)\n self.manager.stop(container_id, timeout)\n self._ns_created = False\n self._container = None\n return msg\n\n def _sync(self):\n LOG.debug(_(\"Flush file system buffers\"))\n if hasattr(os, 'sync'):\n os.sync()\n else:\n import ctypes\n libc = ctype.CDLL(\"libc.so.6\")\n libc.sync()\n\n def stop(self, request):\n \"\"\" Stop the container. \"\"\"\n container_id = self.container['id']\n LOG.info(_(\"Stop container %s\"), container_id)\n return self._stop(container_id)\n # No sync by now\n # self._sync()\n\n def _extract_dns_entries(self, network_info):\n dns = []\n if network_info:\n for net in network_info:\n subnets = net['network'].get('subnets', [])\n for subnet in subnets:\n dns_entries = subnet.get('dns', [])\n for dns_entry in dns_entries:\n if 'address' in dns_entry:\n dns.append(dns_entry['address'])\n return dns if dns else None\n\n def unplug_vifs(self, network_info):\n \"\"\"Unplug VIFs from networks.\"\"\"\n instance = self.container['id']\n for vif in network_info:\n self.vif_driver.unplug(instance, vif)\n\n def restart(self, request, network_info={}, block_device_info={}):\n \"\"\" Restart the container. \"\"\"\n # return webob.Response(status_int=204)\n container_id = self.container['id']\n LOG.info(_(\"Restart container %s, network_info:%s, bdm:%s\"),\n container_id, network_info, block_device_info)\n self._stop(container_id)\n try:\n network.teardown_network(container_id)\n if network_info:\n self.unplug_vifs(network_info)\n netns_file = '/var/run/netns/{0}'.format(container_id)\n # if os.path.exists(netns_file):\n # os.remove(netns_file)\n except Exception as e:\n LOG.warning(_('Cannot destroy the container network'\n ' during reboot {0}').format(e),\n exc_info=True)\n return\n\n try:\n self.start(request, network_info=network_info)\n except Exception as e:\n LOG.warning(_('Cannot start on reboot: %s'), e,\n exc_info=True)\n return\n\n def _save_interface(self, vif, action='add'):\n if not vif:\n return\n\n if self._settings is None:\n self._settings = load_settings()\n net_info = self._settings.setdefault('network_info', [])\n\n idx = -1\n for i in range(len(net_info)):\n if net_info[i]['id'] == vif['id']:\n idx = i\n break\n if action == 'add':\n if idx == -1:\n net_info.append(vif)\n else:\n net_info[idx] = vif\n save_settings(self._settings)\n elif action == 'del' and idx >= 0:\n net_info.pop(idx)\n save_settings(self._settings)\n\n\n def detach_interface(self, request, vif):\n if vif:\n LOG.debug(_(\"Detach network info %s\"), vif)\n container_id = self.container['id']\n self.vif_driver.unplug(container_id, vif)\n self.manager.remove_interfaces(container_id, [vif])\n self._save_interface(vif, action='del')\n return webob.Response(status_int=200)\n\n def attach_interface(self, request, vif):\n if vif:\n if not self._ns_created:\n self._create_ns()\n LOG.debug(_(\"Attach network info %s\"), vif)\n instance = container_id = self.container['id']\n self.vif_driver.plug(vif, instance)\n new_remote_name = self._available_eth_name()\n self.vif_driver.attach(vif, instance, container_id, new_remote_name)\n self.manager.add_interfaces(container_id, [vif], net_names=[new_remote_name])\n self._save_interface(vif, action='add')\n return webob.Response(status_int=200)\n\n def _inject_files(self, inject_files, plain=False):\n container_id = self.container['id']\n\n try:\n for (path, content_base64) in inject_files:\n # Ensure the parent dir of injecting file exists\n dirname = os.path.dirname(path)\n if not dirname:\n dirname = '/'\n\n filename = os.path.basename(path)\n\n content = content_base64 if plain else base64.b64decode(content_base64)\n LOG.debug(_(\"Inject file %s, content: len = %d, partial = %s\"), path, len(content), content[:30])\n\n # TODO: file already exists in the container, need to backup?\n self.manager.inject_file(container_id, path, content)\n\n except TypeError as e: # invalid base64 encode\n LOG.exception(e)\n raise exception.InjectFailed(path=path, reason=\"contents %s\" % e.message)\n except Exception as e:\n LOG.exception(e)\n raise exception.InjectFailed(path='', reason=repr(e) + str(e.message))\n\n def inject_files(self, request, inject_files):\n self._inject_files(inject_files, plain=True)\n return webob.Response(status_int=200)\n\n\n def _read_file(self, path):\n \"\"\" Read container path content. \"\"\"\n return self.manager.read_file(self.container['id'], path)\n\n def _inject_password(self, admin_password):\n \"\"\"S et the root password to admin_passwd\n \"\"\"\n # The approach used here is to copy the password and shadow\n # files from the instance filesystem to local files, make any\n # necessary changes, and then copy them back.\n\n LOG.debug(_(\"Inject admin password admin_passwd=\"))\n admin_user = 'root'\n\n passwd_path = os.path.join('/etc', 'passwd')\n shadow_path = os.path.join('/etc', 'shadow')\n\n passwd_data = self._read_file(passwd_path)\n shadow_data = self._read_file(shadow_path)\n\n new_shadow_data = utils.set_passwd(admin_user, admin_password,\n passwd_data, shadow_data)\n self._inject_files([(shadow_path, new_shadow_data)], plain=True)\n\n def inject_password(self, request, admin_password):\n \"\"\" Modify root password. \"\"\"\n admin_password = base64.b64decode(admin_password)\n self._inject_password(admin_password)\n\n def _add_mapping(self, volume_id, mountpoint, device='', static=True):\n LOG.debug(_(\"Attach volume %s : device %s, mountpoint %s\"), volume_id, device, mountpoint)\n if not device:\n link_file = volume_link_path(volume_id)\n if os.path.islink(link_file):\n device = os.path.realpath(link_file)\n else:\n LOG.warn(_(\"Can't find the device of volume %s when attaching volume\"), volume_id)\n return\n else:\n if not device.startswith(\"/dev/\"):\n device = \"/dev/\" + device\n self._volume_mapping[volume_id] = device\n utils.trycmd('ln', '-sf', device, volume_link_path(volume_id))\n self._mount_path[device] = mountpoint\n if mountpoint != 'none': \n self.manager.attach_volume(self.container['id'], device, mountpoint, static)\n\n def attach_volume(self, request, volume, device, mount_device):\n \"\"\" attach volume. \"\"\"\n self._add_mapping(volume, mount_device, device, static=False)\n return None\n\n def detach_volume(self, request, volume):\n device = self._remove_mapping(volume, static=False)\n return webob.Response(status_int=200)\n\n def _add_root_mapping(self, volume_id):\n self.root_dev_path = os.path.realpath(self.root_dev_path)\n self._add_mapping(volume_id, \"none\", self.root_dev_path)\n\n def _remove_mapping(self, volume_id, ensure=True, static=True):\n link_file = volume_link_path(volume_id)\n if os.path.islink(link_file):\n dev_path = os.path.realpath(link_file)\n # ignore the manager root volume\n self.root_dev_path = os.path.realpath(self.root_dev_path)\n if not dev_path.startswith(self.root_dev_path):\n LOG.debug(_(\"Detach volume %s\"), volume_id)\n if ensure:\n # ensure the device path is not visible in host/container\n if check_dev_exist(dev_path):\n LOG.warn(_(\"Try to delete device %s, but it seems exist.\"), dev_path)\n utils.trycmd('bash', '-c', 'echo 1 > /sys/block/%s/device/delete' % dev_path.replace('/dev/',''))\n os.remove(link_file)\n self._volume_mapping.pop(volume_id)\n self.manager.detach_volume(self.container['id'], dev_path,\n self._mount_path.get(dev_path,''), static)\n\n def create_image(self, request, image_name, image_id):\n \"\"\" Create a image from the container. \"\"\"\n repository = self._get_repository(image_name)\n LOG.debug(_(\"Creating image from repo = %s, tag = %s\"), repository, image_id)\n def _create_image_cb():\n LOG.debug(_(\"Pushing image %s\"), repository)\n self.manager.commit(self.container['id'], repository=repository,\n tag=image_id)\n self.manager.push(repository, tag=image_id, insecure_registry=True)\n LOG.debug(_(\"Doing image %s\"), repository)\n task = addtask(_create_image_cb)\n LOG.debug(_(\"Created image task %s\"), task)\n return task\n\n def pause(self, request):\n self.manager.pause(self.container['id'])\n\n def unpause(self, request):\n self.manager.unpause(self.container['id'])\n\n def console_output(self, request):\n return { \"logs\": self.manager.logs(self.container['id']) }\n\n def status(self, request):\n try:\n images = self.manager.images()\n if images:\n containers = self.manager.list(all=True)\n if containers:\n status = containers[0]['status']\n code = ([k for k in STATE_MAP if STATE_MAP[k] == status.upper()]\n or [UNKNOWN])[0]\n else:\n code = CONTAINER_NOT_FOUND\n else: code = IMAGE_NOT_EXIST\n except Exception as e:\n code = MANAGER_NOT_START\n LOG.error(repr(traceback.format_exception(*sys.exc_info())))\n return { \"status\":\n { \"code\" : code,\n \"message\": STATE_MAP[code]\n }\n }\n\n def image_info(self, request):\n image_name = request.GET.get('image_name')\n image_id = request.GET.get('image_id')\n re = self.manager.images(name=self._get_repository(image_name) + ':' + image_id)\n return {\"name\" : image_name, \"id\": image_id, \"size\" : re[0]['size'] if re else 0}\n\ndef create_router(mapper):\n controller = ContainerController()\n mapper.connect('/container/create',\n controller=controller,\n action='create',\n conditions=dict(method=['POST']))\n mapper.connect('/container/start',\n controller=controller,\n action='start',\n conditions=dict(method=['POST']))\n mapper.connect('/container/stop',\n controller=controller,\n action='stop',\n conditions=dict(method=['POST']))\n mapper.connect('/container/restart',\n controller=controller,\n action='restart',\n conditions=dict(method=['POST']))\n\n mapper.connect('/container/attach-interface',\n controller=controller,\n action='attach_interface',\n conditions=dict(method=['POST']))\n mapper.connect('/container/detach-interface',\n controller=controller,\n action='detach_interface',\n conditions=dict(method=['POST']))\n\n mapper.connect('/container/inject-files',\n controller=controller,\n action='inject_files',\n conditions=dict(method=['POST']))\n mapper.connect('/container/admin-password',\n controller=controller,\n action='inject_password',\n conditions=dict(method=['POST']))\n\n mapper.connect('/container/detach-volume',\n controller=controller,\n action='detach_volume',\n conditions=dict(method=['POST']))\n mapper.connect('/container/attach-volume',\n controller=controller,\n action='attach_volume',\n conditions=dict(method=['POST']))\n\n mapper.connect('/container/create-image',\n controller=controller,\n action='create_image',\n conditions=dict(method=['POST']))\n\n mapper.connect('/container/pause',\n controller=controller,\n action='pause',\n conditions=dict(method=['POST']))\n mapper.connect('/container/unpause',\n controller=controller,\n action='unpause',\n conditions=dict(method=['POST']))\n\n mapper.connect('/container/console-output',\n controller=controller,\n action='console_output',\n conditions=dict(method=['GET']))\n mapper.connect('/container/status',\n controller=controller,\n action='status',\n conditions=dict(method=['GET']))\n mapper.connect('/container/image-info',\n controller=controller,\n action='image_info',\n conditions=dict(method=['GET']))\n","sub_path":"wormhole/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":27965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"469578249","text":"import random, urllib.request\r\ndef downloadImage(url):\r\n name = random.randrange(1, 100)\r\n name = str(name) + '.jpg'\r\n urllib.request.urlretrieve(url, name)\r\nwhile True:\r\n\r\n image_url = input(\"Введите url картинки >> \")\r\n\r\n\r\n\r\n downloadImage(image_url)\r\n","sub_path":"urlImageMaster.py","file_name":"urlImageMaster.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"207075925","text":"import os\r\n\r\nimport CommonTips\r\n\r\nsub_model_name = 'Dump照片'\r\n\r\nsave_path = \"C:\\\\Users\\\\D22433\\\\Desktop\\\\DumpPhotos\\\\\"\r\nremote_path = \"/sdcard/AlgoTest/\"\r\n\r\npull_photo_2_local = \"adb pull \"+remote_path+\" \"+save_path\r\ndel_local_photos = \"del /Q \"+save_path+\"\\\\*\"\r\ndel_remote_photos = 'adb shell \"rm -rf '+remote_path+'/*\"'\r\n\r\nop_info = '''--------------Dump照片--------------\r\n| 【0】:从安卓端拉取dump照片至本地\r\n| 【1】:删除本地dump照片\r\n| 【2】:删除安卓dump照片\r\n| 【3】:删除本地&安卓dump照片''' + CommonTips.tip_ops+'-----------------------------------'\r\n\r\n\r\ndef main():\r\n print(op_info)\r\n while True:\r\n cmd = input('('+sub_model_name+')'+CommonTips.tip_input_cmd)\r\n\r\n if cmd.isdigit():\r\n cmd = int(cmd)\r\n if cmd == 0:\r\n os.system(pull_photo_2_local)\r\n elif cmd == 1:\r\n os.system(del_local_photos)\r\n elif cmd == 2:\r\n os.system(del_remote_photos)\r\n elif cmd == 3:\r\n os.system(del_local_photos)\r\n os.system(del_remote_photos)\r\n else:\r\n print(CommonTips.tip_arg_error)\r\n else:\r\n if 'h' == cmd.lower():\r\n print(op_info)\r\n elif 'q' == cmd.lower():\r\n print(CommonTips.tip_quit)\r\n break\r\n else:\r\n print(CommonTips.tip_arg_error)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","sub_path":"DumpPhotos.py","file_name":"DumpPhotos.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"623754140","text":"__author__ = 'Ryan'\n#version 1\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport csv\nfrom time import strftime\nimport sched, time\n\n#get the daily price of SPG hotels in NYC\ndef get_price(inyear,inmonth,inday,outyear,outmonth,outday): # two numbers for month and day; four numbers for year.\n url = \"https://assistive.usablenet.com/tt/www.starwoodhotels.com/preferredguest/search/results/detail.html?localeCode=en_US&\" \\\n \"complexSearchField=New+York+City&skinCode=SPG&numberOfChildren=0&numberOfRooms=1&numberOfAdults=1&\" \\\n \"arrivalDate=\"+str(inmonth)+\"%2F\"+str(inday)+\"%2F\"+str(inyear)+\"&departureDate=\"+str(outmonth)+\"%2F\"+str(outday)+\"%2F\"+str(outyear)+\"&un_jtt_redirect\"\n r = requests.get(url)\n data = r.text\n soup = BeautifulSoup(data)\n hotel_name = []\n hotel_price = []\n info = soup.find_all(class_=\"property\")\n for property in info:\n\n name = property.find('h3').a.string.encode('utf-8')\n print(name)\n hotel_name.append(name)\n\n # grab price data\n if property.find_all(class_='marginLeft20 marginBottom10') == []: # hotel is full\n price = 0\n else:\n price = int(str(property.find_all(class_='marginLeft20 marginBottom10')[0].find(class_='rateAmount').string)[4:])\n hotel_price.append(price)\n result = sorted(zip(hotel_name,hotel_price))\n print(result)\n return result\n\n#save the result as a csv file\ndef csv_writer(list):\n with open(strftime(\"%Y-%m-%d %H:%M\")+'.csv', 'w') as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerows(list)\n\nget_price(2015,11,23,2015,11,24)\n\n#repeatedly execute the function every half hour\ns = sched.scheduler(time.time, time.sleep)\ndef do_something(sc):\n csv_writer(get_price())\n sc.enter(10, 1, do_something, (sc,))\n\ns.enter(10, 1, do_something, (s,))\ns.run()\n","sub_path":"Grab_data.py","file_name":"Grab_data.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"299675282","text":"import glob\nimport os\nimport queue\nimport threading\nfrom datetime import datetime\n\nimport cv2\n# cv2.setNumThreads(5)\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nfrom PIL import Image, ImageChops\nfrom skimage import io, transform\nfrom torch.autograd import Variable\nfrom torchvision import transforms # , utils\n\n# from data_loader import RescaleT, SalObjVideoIterable, ToTensorLab\nfrom models import U2NET # full size version 173.6 MB\nfrom models import U2NETP # small version u2net 4.7 MB\nfrom models import U2NETP_short\n\n# import torch.optim as optim\n\nstudent_inference_queue = queue.Queue(3)\n\norig_image_queue = queue.Queue()\nstudent_result_queue = queue.Queue(3)\n\n\n# normalize the predicted SOD probability map\ndef normPRED(d):\n ma = torch.max(d)\n mi = torch.min(d)\n\n dn = (d-mi)/max(ma-mi, 0.001)\n\n return dn\n\n# TODO: make abstract or flagify\nimg_bg = io.imread(\"data/example_bgs/tokyo.jpg\")\nimg_bg = Image.fromarray(img_bg)\nimg_bg_resized = None\n# post_image = None\n\ndef paint_output(image_name,pred,orig,d_dir,width=None, height=None):\n predict = pred\n predict = predict.squeeze()\n predict_np = predict.cpu().data.numpy()\n del pred\n\n im = Image.fromarray(predict_np*255).convert('RGB')\n img_name = image_name.split(\"/\")[-1]\n \n # orig_image_arr = orig.cpu().data.numpy()[0]\n orig_image_arr = orig\n pred_mask_arr = np.array(im.resize((orig_image_arr.shape[1],orig_image_arr.shape[0]),resample=Image.BILINEAR), dtype=np.float32)\n global img_bg_resized\n if img_bg_resized is None:\n img_bg_resized = np.array(img_bg.resize((orig_image_arr.shape[1],orig_image_arr.shape[0]),resample=Image.BILINEAR))\n inv_mask = 255 - pred_mask_arr\n bg = (inv_mask / 255) * img_bg_resized\n bg = bg.astype(np.uint8)\n pred_img_arr = orig_image_arr * pred_mask_arr / 255\n pred_img_arr = pred_img_arr.astype(np.uint8)\n out = pred_img_arr + bg\n\n return out\n\ndef cv2_thread_func(video_name, output_size=320):\n video = cv2.VideoCapture(video_name)\n images_in_flight = []\n try:\n while True:\n succ, image = video.read()\n # image = cv2.resize(image,\n # (image.shape[1] * 320 // image.shape[0], 320),\n # interpolation=cv2.INTER_AREA)\n\n image = image[:,:,::-1]\n\n orig_image = image.copy()\n orig_image_queue.put(orig_image)\n\n resized_img = Image.fromarray(image).convert('RGB')\n resized_img = resized_img.resize((output_size,output_size),resample=Image.NEAREST)\n resized_img = np.array(resized_img)\n \n image = resized_img/np.max(resized_img)\n tmpImg = np.zeros((image.shape[0],image.shape[1],3))\n tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229\n tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224\n tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225\n\n # RGB to BRG\n tmpImg = tmpImg.transpose((2, 0, 1))\n\n student_inference_queue.put({\n # \"orig_image\": orig_image,\n \"image\": torch.from_numpy(tmpImg)\n })\n except:\n print(\"CV2 reader hard exit\")\n student_inference_queue.put(\"kill\")\n orig_image_queue.put(\"kill\")\n exit()\n\ndef paint_thread_func():\n cv2.namedWindow(\"im\")\n vid_out = None\n # TODO: make flag for video saving params\n keep_vid = False\n t = datetime.now()\n while True:\n orig_image = orig_image_queue.get()\n if not vid_out and keep_vid:\n vid_out = cv2.VideoWriter(\"out.mp4\",\n cv2.VideoWriter_fourcc('M', 'P', '4', 'V'),\n # TODO: get fps from cv2 thread message\n 25, (orig_image.shape[1], orig_image.shape[0]))\n pred_mask = student_result_queue.get()\n if (orig_image == \"kill\") or (pred_mask == \"kill\"):\n print(\"Drawer exiting gracefully\")\n break\n pred_mask = torch.clamp(pred_mask * 3 - 2, 0, 1)\n merged_image = paint_output(\"\", pred_mask, orig_image, \"\")[:,:,::-1]\n cv2.imshow(\"im\", merged_image)\n # print(\"time to paint:\", datetime.now() - t)\n t = datetime.now()\n if vid_out:\n vid_out.write(merged_image)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n print(\"Writer released\")\n cv2.destroyAllWindows()\n if vid_out:\n vid_out.release()\n exit()\n\ndef main():\n\n # --------- 1. get image path and name ---------\n model_name='u2net'#u2netp\n\n # TODO: make input to script\n # video_path = 0 # for local camera\n video_path = './data/example_videos/v0.mp4'\n # video_path = \"http://10.1.10.17:8080/video\" # IP camera\n prediction_dir = './data/out/'\n model_dir = './saved_models/'+ model_name + '/' + model_name + '.pth'\n\n # --------- 2. dataloader ---------\n # Not needed, we have a worker thread now\n # --------- 3. model define ---------\n if(model_name=='u2net'):\n print(\"...load U2NET---173.6 MB\")\n teacher = U2NET(3,1)\n elif(model_name=='u2netp'):\n print(\"...load U2NEP---4.7 MB\")\n teacher = U2NETP(3,1)\n teacher.load_state_dict(torch.load(model_dir))\n\n student = U2NETP_short(3, 1)\n if torch.cuda.is_available():\n teacher.cuda()\n student.cuda()\n teacher.eval()\n # student.eval()\n\n cv_thread = threading.Thread(target=cv2_thread_func, args=(\n video_path, 320\n ))\n paint_thread = threading.Thread(target=paint_thread_func)\n cv_thread.start()\n paint_thread.start()\n \n critereon = nn.BCELoss(reduction='none')\n optimizer = torch.optim.SGD(student.parameters(), lr=0.01, momentum=0.0)\n \n t_loop = datetime.now()\n frame_until_teach = 0\n U_MAX = 8\n DELTA_MIN = 8\n DELTA_MAX = 64\n delta = DELTA_MIN\n delta_remain = 1\n # --------- 4. inference for each image ---------\n\n teacher_mode = False\n if teacher_mode:\n while True:\n data_test = student_inference_queue.get()\n if data_test == \"kill\":\n print(\"Pytorch thread exiting gracefully\")\n student_result_queue.put(\"kill\")\n exit()\n inputs_test = data_test['image'].unsqueeze(0)\n inputs_test = inputs_test.type(torch.FloatTensor)\n if torch.cuda.is_available():\n inputs_test = inputs_test.cuda()\n td1,td2,td3,td4,td5,td6,td7= teacher(inputs_test)\n pred = td1[:,0,:,:]\n student_result_queue.put(pred.detach())\n del td1,td2,td3,td4,td5,td6,td7, pred\n # pred = normPRED(pred)\n\n # b = 0\n # c = 0\n while True:\n delta_remain -= 1\n data_test = student_inference_queue.get()\n if data_test == \"kill\":\n print(\"Pytorch thread exiting gracefully\")\n student_result_queue.put(\"kill\")\n exit()\n inputs_test = data_test['image'].unsqueeze(0)\n inputs_test = inputs_test.type(torch.FloatTensor)\n if torch.cuda.is_available():\n inputs_test = inputs_test.cuda()\n # a = datetime.now()\n # print(inputs_test)\n d1,d2,d3,d4,d5,d6,d7= student(inputs_test)\n # b += (datetime.now() - a).microseconds\n # c += 1\n # print(b/c)\n\n # normalization\n pred = d1[:,0,:,:]\n # pred = normPRED(pred)\n\n if delta_remain <= 0:\n if torch.isnan(pred).any():\n print(\"WARN: PRED NAN\")\n # print(pred)\n # print(pred.max())\n # print(pred.min())\n continue\n # trigger teacher learning\n td1,td2,td3,td4,td5,td6,td7= teacher(inputs_test)\n teacher_pred = td1[:,0,:,:].detach()\n # teacher_pred = normPRED(teacher_pred)\n\n loss = critereon(pred, teacher_pred)\n loss *= ((teacher_pred * 5) + 1) / 6\n loss = loss.mean()\n print('loss', loss.item())\n budget = U_MAX\n if loss.item() > 0.05:\n # Acceptable loss, skip teaching\n while loss.item() > 0.05 and budget > 0:\n if loss > 0.5:\n loss /= torch.norm(loss.detach())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n d1,d2,d3,d4,d5,d6,d7= student(inputs_test)\n pred = d1[:,0,:,:]\n # pred = normPRED(pred)\n loss = critereon(pred, teacher_pred)\n loss *= ((teacher_pred * 6) + 1) / 6\n loss = loss.mean()\n print('loss', loss.item())\n budget -= 1\n if loss.item() <= 0.05:\n # Loss still bad after training, decrease delay\n delta = min(DELTA_MAX, 2 * delta)\n else:\n delta = max(DELTA_MIN, delta // 2)\n delta_remain = delta\n del td1,td2,td3,td4,td5,td6,td7, teacher_pred\n student_result_queue.put(pred.detach())\n del d1,d2,d3,d4,d5,d6,d7, pred\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"infer_video.py","file_name":"infer_video.py","file_ext":"py","file_size_in_byte":9193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"165893821","text":"import sys\nsys.stdin = open('sample_input.txt', 'r')\n\nfor tc in range(1, 1+int(input())):\n print('#%d' %(tc), end=' ')\n N, M = map(int, input().split())\n li = [int(input()) for _ in range(N)]\n time = 0\n while M > 0:\n time += 1\n for wait_time in li:\n if time % wait_time == 0:\n M -= 1\n print(time)","sub_path":"05_알고리즘/190920/solving.py","file_name":"solving.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"235175228","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Dec 11 21:49:16 2020\r\n\r\n@author: aleja\r\n\"\"\"\r\n\r\nfrom PyQt5 import QtCore, QtWidgets, QtGui\r\nfrom fichapersonal_ui import *\r\nimport os\r\nfrom bdstd import BdStd\r\nfrom configctx import *\r\nimport calendar\r\nfrom datetime import date\r\n \r\n\r\nclass FichaPersonal(QtWidgets.QWidget, FichaPersonal_Ui):\r\n \r\n\r\n def __init__(self, id_personal):\r\n\r\n QtWidgets.QWidget.__init__(self)\r\n self.id_personal = id_personal \r\n \r\n ctx = ConfigCtx()\r\n self.carpeta = ctx.readvar(\"RUTAS\", \"datos_usr\")\r\n self.carpeta = self.carpeta.format(self.carpeta, id=id_personal)\r\n \r\n \r\n \r\n self.ui = FichaPersonal_Ui() \r\n self.ui.setupUi(self) \r\n \r\n\r\n \r\n #----------Botones------------------------------------------------------------\r\n\r\n self.ui.buttonDocumentos.clicked.connect(self.documentos)\r\n self.ui.buttonGuardar.clicked.connect(self.guardar)\r\n self.ui.buttonCerrar.clicked.connect(self.close)\r\n #-----------Poner foto--------------------------------------------------------\r\n \r\n \r\n foto = QtGui.QPixmap(self.carpeta+\"foto_personal.jpg\")\r\n self.ui.foto.setPixmap(foto)\r\n self.ui.foto.setScaledContents(True)\r\n print(self.carpeta+\"foto_personal.jpg\")\r\n \r\n #---------Rellenar datos------------------------------------------------------\r\n self.loadData()\r\n \r\n #----------Crear Checks------------------------------------------------------\r\n \r\n self.map_cargos = getCargosPersonal(self.id_personal)\r\n \r\n for i, map_cargo in enumerate(self.map_cargos):\r\n title = map_cargo['nombre']\r\n label_c = QtWidgets.QLabel(title, self.ui.widget)\r\n label_c.setObjectName(\"label_c\"+str(i))\r\n label_c.setGeometry(10, 10+(i*25), 180, 20) \r\n \r\n \r\n checkBox_c = QtWidgets.QCheckBox(self.ui.widget)\r\n checkBox_c.setObjectName(\"checkBox_c\"+str(i))\r\n checkBox_c.setGeometry(120, 10+(i*25), 50, 20)\r\n if map_cargo['checked'] == \"1\" :\r\n checkBox_c.setChecked(True)\r\n \r\n else:\r\n checkBox_c.setChecked(False)\r\n \r\n \r\n \r\n input_c = QtWidgets.QLineEdit(self.ui.widget)\r\n input_c.setObjectName(\"input_c\"+str(i))\r\n input_c.setGeometry(180, 10+(i*25), 80, 20)\r\n if map_cargo['checked'] == \"1\" :\r\n input_c.setText(str(map_cargo['tarifa']))\r\n else:\r\n input_c.setToolTip(str(map_cargo['tarifa']))\r\n \r\n \r\n #---------Rellena calendario--------------------------------------------------\r\n \r\n self.kale = Acalendar(self.ui, self.id_personal)\r\n self.kale.crea_calendario()\r\n \r\n #--------------Conecto los botones para pasar de mes---------------\r\n self.ui.buttonPre.clicked.connect(self.pre_month)\r\n self.ui.buttonNext.clicked.connect(self.next_month) \r\n \r\n #----------------Conecto los botones ocupar y liberar-----------------\r\n \r\n self.ui.buttonOcupar.clicked.connect(self.ocupar)\r\n self.ui.buttonLiberar.clicked.connect(self.liberar)\r\n \r\n self.ui.tableWidget.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)\r\n self.ui.tableWidget.verticalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)\r\n self.ui.tableWidget.verticalHeader().hide()\r\n #---------------Funciones para avanzar y retroceder mes---------------\r\n \r\n def pre_month(self):\r\n if self.kale.mes == 1:\r\n self.kale.mes=12\r\n self.kale.anyo-=1\r\n else:\r\n self.kale.mes-=1\r\n self.kale.crea_calendario()\r\n \r\n def next_month(self):\r\n print(\"antes:\", self.kale.mes)\r\n if self.kale.mes==12:\r\n self.kale.mes=1\r\n self.kale.anyo+=1\r\n else:\r\n self.kale.mes+=1\r\n print(\"despues:\", self.kale.mes)\r\n self.kale.crea_calendario() \r\n \r\n #---------------Funciones para ocupar y liberar fechas----------------\r\n \r\n def ocupar(self):\r\n row = self.ui.tableWidget.currentRow()\r\n column = self.ui.tableWidget.currentColumn()\r\n dd = self.ui.tableWidget.item(row, column).text()\r\n dd = dd.replace(\" \",\"0\")\r\n fecha=f\"{self.kale.anyo}-{self.kale.mes:02d}-{dd}\"\r\n print(fecha)\r\n self.ui.tableWidget.item(row, column).setBackground(QtGui.QColor(255,0,0))\r\n bd = BdStd()\r\n bd.runsql(f\"INSERT INTO personal_ocupado (id_personal,fecha) VALUES ('{self.id_personal}','{fecha}');\")\r\n \r\n \r\n def liberar(self):\r\n row = self.ui.tableWidget.currentRow()\r\n column = self.ui.tableWidget.currentColumn()\r\n dd = self.ui.tableWidget.item(row, column).text()\r\n dd = dd.replace(\" \",\"0\")\r\n fecha=f\"{self.kale.anyo}-{self.kale.mes:02d}-{dd}\"\r\n self.ui.tableWidget.item(row, column).setBackground(QtGui.QColor(255,255,255))\r\n bd = BdStd()\r\n bd.runsql(f\"DELETE FROM personal_ocupado WHERE id_personal = '{self.id_personal}' AND fecha = '{fecha}';\")\r\n \r\n \r\n #---------METODOS ------------------------------------------------------\r\n \r\n def loadData(self): \r\n # Creado mere\r\n if self.id_personal == None :\r\n data = [\"PEPEAL\",\"Alejandro\", \"Pérez Pérez\", \"67458932M\",\"654321987\", \"alejandro@example.es\",\r\n \"carrer example nº3 08012 Barcelona\",\"Sí\",\"ES12 2345 2345 2345 2345\", \"Ingles\" ]\r\n self.load_one(data)\r\n else :\r\n bd = BdStd()\r\n bd.runsql(\"SELECT * FROM personal WHERE id_personal = '\" + self.id_personal + \"'\")\r\n if bd.rows != None :\r\n for row in bd.rows :\r\n self.load_one(row)\r\n \r\n\r\n def load_one(self, data): #<------- en esta funcion he añadido 2 campos que faltaban\r\n self.ui.inputNombre.setText(data[0])\r\n self.ui.inputApellidos.setText(data[2])\r\n self.ui.inputDni.setText(data[3])\r\n self.ui.inputTelefono.setText(data[4])\r\n self.ui.inputMail.setText(data[5])\r\n self.ui.inputAutonomo.setText(data[6])\r\n self.ui.inputDireccion.setText(data[7])\r\n self.ui.inputCp.setText(data[8])\r\n self.ui.inputCiudad.setText(data[9])\r\n self.ui.inputIban.setText(data[10])\r\n self.ui.inputNotas.setPlainText(data[11])\r\n\r\n#------------Función abrir documentos-----------------------------------------\r\n\r\n def documentos(self):\r\n # mere añadidoo try catch, porque fallaba\r\n # se podria añadir un label a la pantalla para mostrar el mensaje\r\n try :\r\n path = self.carpeta\r\n path = os.path.realpath(path)\r\n os.startfile(path)\r\n except :\r\n import sys\r\n print(\"Error:\", sys.exc_info()[0])\r\n qm = QtWidgets.QMessageBox\r\n qm.warning(self, '', \"No hay documentos\")\r\n return\r\n \r\n \r\n def guardar(self):\r\n \r\n # Puesto el Update de los datos cambiados de personal en ficha personal---------\r\n nombre = self.ui.inputNombre.text()\r\n apellidos = self.ui.inputApellidos.text()\r\n dni = self.ui.inputDni.text()\r\n telefono = self.ui.inputTelefono.text()\r\n email = self.ui.inputMail.text()\r\n direccion = self.ui.inputDireccion.text()\r\n cp = self.ui.inputCp.text()\r\n ciudad = self.ui.inputCiudad.text()\r\n autonomo = self.ui.inputAutonomo.text()\r\n iban = self.ui.inputIban.text()\r\n notas = self.ui.inputNotas.toPlainText()\r\n \r\n txtsql = \"UPDATE personal SET nombre = '{}', apellidos = '{}', dni = '{}',\" \\\r\n \"telefono = '{}', email = '{}', direccion = '{}',\" \\\r\n \"cp = '{}', ciudad = '{}', autonomo = '{}',\" \\\r\n \"iban = '{}', notas = '{}' WHERE id_personal = '{}'\"\r\n txtsql = txtsql.format(nombre, apellidos, dni ,telefono ,email, direccion , cp, ciudad, autonomo, iban , notas, self.id_personal)\r\n bd = BdStd()\r\n bd.runsql(txtsql)\r\n \r\n i = 0\r\n for checkobj in self.ui.widget.findChildren(QtWidgets.QCheckBox):\r\n title = self.map_cargos[i]\r\n if checkobj.checkState():\r\n self.map_cargos[i]['checked'] = \"1\"\r\n else: \r\n self.map_cargos[i]['checked'] = \"0\"\r\n i+=1\r\n \r\n i = 0\r\n for caja in self.ui.widget.findChildren(QtWidgets.QLineEdit):\r\n title = self.map_cargos[i]\r\n \r\n if caja.text() != \"\":\r\n self.map_cargos[i]['tarifa'] = int(caja.text())\r\n i+=1 \r\n \r\n guardaTarifas(self.id_personal, self.map_cargos)\r\n \r\n msgBox = QtWidgets.QMessageBox()\r\n msgBox.setIcon(msgBox.Information)\r\n msgBox.setText(\"Cambios guardados correctamente\")\r\n msgBox.setWindowTitle(\"Aleph\")\r\n msgBox.exec_()\r\n \r\n \r\n \r\n \r\n\r\n#--------Calendario-----------------------------------------------------------\r\n \r\nclass Acalendar() :\r\n \r\n def __init__(self, winui, id_personal):\r\n \r\n #Instancia de TextCalendar\r\n self.cl = calendar.TextCalendar()\r\n self.id_personal=id_personal\r\n hoy = date.today()\r\n self.mes = hoy.month\r\n self.anyo = hoy.year\r\n self.winui = winui\r\n \r\n def crea_calendario(self):\r\n \r\n \r\n #Elegimos el formato del año y mes del calendario\r\n calendario = self.cl.formatmonth(self.anyo,self.mes)\r\n \r\n #Cambio los saltos de línea por espacios\r\n \r\n calendario=calendario.replace(\"\\n\",\" \")\r\n \r\n #Separo el calendario por espacios\r\n \r\n calendario=calendario.split(\" \")\r\n \r\n #Elimino el año el mes y los dias de la semana.\r\n \r\n for i in range(12):\r\n calendario.remove(calendario[0])\r\n \r\n #Vuelco a unir el calendario\r\n \r\n calendario=\" \".join(calendario)\r\n \r\n #Creo la variable newcalendar añadiendo los dias con los espacios del principio\r\n #para saberqué día de la semana es el 1.\r\n \r\n newcalendar=[]\r\n for i in range(0,len(calendario)-1,3):\r\n newcalendar.append(calendario[i]+calendario[i+1])\r\n for i in range(7):\r\n # mere añadido el viernes\r\n if newcalendar[0]==\"Fr\" or newcalendar[0]==\"Sa\" or newcalendar[0]==\"Su\":\r\n newcalendar.remove(newcalendar[0])\r\n #-----Monta el calendario de la persona----------------------------\r\n \r\n self.dias_event = getEventCale(self.id_personal, \"{:04d}-{:02d}\".format(self.anyo,self.mes))\r\n self.dias_ocupado = getOcupadoCale(self.id_personal, \"{:04d}-{:02d}\".format(self.anyo,self.mes))\r\n \r\n #------Rellena el calendario---------------------------------------\r\n \r\n k=0\r\n for i in range(5):\r\n for j in range(7):\r\n texto = \"\"\r\n dia = int0( newcalendar[k])\r\n evento=QtWidgets.QTableWidgetItem(newcalendar[k]+texto)\r\n if self.dias_event[dia-1] != \"\" :\r\n #colorear la celda AQUI\r\n texto = \"->\" + self.dias_event[dia-1]\r\n evento=QtWidgets.QTableWidgetItem(newcalendar[k]+texto)\r\n evento.setBackground(QtGui.QColor(170,0,255))\r\n \r\n elif self.dias_ocupado[dia-1] != \"\":\r\n evento.setBackground(QtGui.QColor(255,0,0))\r\n \r\n self.winui.tableWidget.setItem(i,j,evento)\r\n k+=1\r\n if k == len(newcalendar):\r\n newcalendar.append(\" \")\r\n \r\n #----Pongo en las etiquetas el mes y el año correspondientes---------\r\n meses = {1:\"Enero\",2:\"Febrero\",3:\"Marzo\",4:\"Abril\",5:\"Mayo\",6:\"Junio\",\\\r\n 7:\"Julio\",8:\"Agosto\",9:\"Septiembre\",10:\"Octubre\",11:\"Noviembre\",12:\"Diciembre\"}\r\n self.winui.labelMes.setText(meses[self.mes])\r\n self.winui.labelMes_2.setText(str(self.anyo)) #------labelMes_2 debería ser labelAnyo\r\n \r\n \r\n \r\n \r\n# LAS TRES FUNCIONES QUE CARGAN LOS CARGOS, TARIFAS DE LA BBDD, y DIAS OCUPADOS Y \r\n# LAS GUARDAN\r\n#------------------------------------------------------------------\r\ndef getArrayCargos():\r\n # devuelve un array con los cargos de la base de datos\r\n bd = BdStd()\r\n map_cargos = []\r\n bd.runsql(\"SELECT * FROM cargos ORDER BY id_cargo\") #id_cargo, nombre, tarifa\r\n \r\n if bd.rows != None :\r\n for row in bd.rows :\r\n dic = {'id' : row[0], 'nombre' : row[1], 'tarifa' : row[2], 'checked' : \"0\"}\r\n map_cargos.append(dic)\r\n #print(map_cargos)\r\n return (map_cargos)\r\n\r\n\r\ndef getCargosPersonal(id_personal):\r\n # rellena el array de cargos con los que tiene la persona en la base de datos\r\n map_cargos = getArrayCargos()\r\n bd = BdStd()\r\n bd.runsql(\"SELECT * FROM tarifas WHERE id_personal = '\"+id_personal+\"'\")\r\n #id_personal, id_cargo, tarifa\r\n\r\n if bd.rows != None :\r\n for row in bd.rows :\r\n for i, cargo in enumerate(map_cargos):\r\n if cargo['id'] == row[1]:\r\n map_cargos[i]['checked'] = \"1\" # ON\r\n map_cargos[i]['tarifa'] = row[2]\r\n break\r\n #print(map_cargos)\r\n return (map_cargos) \r\n\r\ndef guardaTarifas(id_personal, map_cargos):\r\n # guarda los cargos y las tarifas de la persona en la base de datos\r\n\r\n bd = BdStd()\r\n bd.runsql(\"DELETE FROM tarifas WHERE id_personal = '\"+id_personal+\"'\")\r\n \r\n sql = \"INSERT INTO tarifas (id_personal,id_cargo,tarifa) VALUES ('{}','{}','{}');\"\r\n\r\n for item in map_cargos :\r\n print (item)\r\n if item['checked'] == \"1\":\r\n print(sql.format(id_personal, item['id'], str(item['tarifa'])))\r\n bd.runsql(sql.format(id_personal, item['id'], str(item['tarifa'])))\r\n \r\n \r\n \r\ndef getEventCale(id_personal, yyyymm):\r\n \r\n # devuelve un array con dias y sus eventos \r\n bd = BdStd()\r\n dias_event = [\"\" for x in range(31)]\r\n txtsql = f\"\"\"SELECT fecha, id_evento FROM personal_evento WHERE id_personal = '{id_personal}'\r\n AND fecha BETWEEN '{yyyymm}-01' AND '{yyyymm}-31' ORDER BY id_personal, fecha\"\"\" \r\n bd.runsql(txtsql) \r\n if bd.rows != None :\r\n for row in bd.rows :\r\n dia = int0(row[0][8:10])\r\n dias_event[dia-1] = row[1]\r\n return (dias_event)\r\n\r\ndef getOcupadoCale(id_personal, yyyymm):\r\n \r\n # devuelve un array con dias y sus eventos \r\n bd = BdStd()\r\n dias_ocupado = [\"\" for x in range(31)]\r\n txtsql = f\"\"\"SELECT fecha, id_personal FROM personal_ocupado WHERE id_personal = '{id_personal}'\r\n AND fecha BETWEEN '{yyyymm}-01' AND '{yyyymm}-31' ORDER BY id_personal, fecha\"\"\"\r\n \r\n print(\"getOcupadoCale: \", txtsql)\r\n \r\n bd.runsql(txtsql)\r\n print(bd.rows)\r\n if bd.rows != None :\r\n for row in bd.rows :\r\n dia = int0(row[0][8:10])\r\n dias_ocupado[dia-1] = row[1]\r\n return (dias_ocupado)\r\n \r\n\r\ndef int0 (texto) :\r\n try:\r\n return(int(texto))\r\n except :\r\n return(0)\r\n \r\n \r\nif __name__ == \"__main__\":\r\n app = QtWidgets.QApplication([])\r\n window = FichaPersonal(\"ALPE48\") \r\n window.show()\r\n app.exec_()\r\n","sub_path":"elaleph/fichapersonal.py","file_name":"fichapersonal.py","file_ext":"py","file_size_in_byte":15885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"335211786","text":"from django.core.management.base import BaseCommand\nfrom apps.movies.models import Movie\nfrom datetime import date\nimport csv\n\n\nclass Command(BaseCommand):\n help = 'Uploads data to your database'\n\n def handle(self, *args, **options):\n path = options['path']\n with open(path, 'r') as d:\n tsv = csv.reader(d, delimiter='\\t')\n next(d)\n for data in tsv :\n if data[1] not in ['movie', 'short']:\n continue\n movie, created = Movie.objects.get_or_create(imdb_id=data[0])\n movie.title_type = data[1]\n movie.name = data[2]\n movie.is_adult = data[4] != '0'\n if data[5] != '\\\\N':\n movie.year = date(int(data[5]), 1, 1)\n else:\n movie.year = date(999, 9, 9)\n movie.genres = [genre for genre in data[7].split(',')]\n movie.save()\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--file',\n action='store',\n dest='path',\n required=True,\n help='Please put path to .tsv file',\n )\n","sub_path":"apps/movies/management/commands/load_movies.py","file_name":"load_movies.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"143136838","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n'''\nScraper for election results for the Bundestag election for Karlsruhe.\n\nThe results are published online as an HTML-export from PC-Wahl. This\nscraper uses the per-district results from that export for both the\nfirst and second votes and combines them into a single dataset.\n'''\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport re\n\n\nBOROUGHS = {\n 1: 'Innenstadt-Ost',\n 2: 'Innenstadt-West',\n 3: 'Südstadt',\n 4: 'Südweststadt',\n 5: 'Weststadt',\n 6: 'Nordweststadt',\n 7: 'Oststadt',\n 8: 'Mühlburg',\n 9: 'Daxlanden',\n 10: 'Knielingen',\n 11: 'Grünwinkel',\n 12: 'Oberreut',\n 13: 'Beiertheim-Bulach',\n 14: 'Weiherfeld-Dammerstock',\n 15: 'Rüppurr',\n 16: 'Waldstadt',\n 17: 'Rintheim',\n 18: 'Hagsfeld',\n 19: 'Durlach',\n 20: 'Grötzingen',\n 21: 'Stupferich',\n 22: 'Hohenwettersbach',\n 23: 'Wolfartsweier',\n 24: 'Grünwettersbach',\n 25: 'Palmbach',\n 26: 'Neureut',\n 27: 'Nordstadt',\n}\n\n\ndef district_to_borough(district_num):\n '''\n Return the borough number for a district.\n '''\n parts = district_num.split('-')\n return int(parts[0])\n\n\ndef extract_table(table):\n '''\n Extract data from a BeautifulSoup table.\n\n Returns the table's data as a nested list of strings.\n '''\n rows = []\n for tr in table.find_all('tr'):\n rows.append([td.get_text(separator='\\n').strip() for td in tr.find_all(['td', 'th'])])\n return rows\n\n\ndef fix_district_number(s):\n '''\n Fix the number of a voting district to match our other data.\n '''\n parts = s.split('.')\n return parts[0].rjust(3, '0') + '-' + parts[1].rjust(2, '0')\n\n\ndef parse_german_number(s):\n '''\n Parse a number string that uses German separators.\n\n Returns ``None`` if the string could not be parsed.\n '''\n s = s.replace('.', '')\n try:\n if ',' in s:\n return float(s.replace(',', '.'))\n else:\n return int(s)\n except ValueError:\n return None\n\n\ndef collapse_whitespace(s):\n '''\n Collapse all adjacent whitespace to a single space and strip the string.\n '''\n return re.sub(r'\\s+', ' ', s).strip()\n\n\nif __name__ == '__main__':\n\n import sys\n if sys.version_info.major == 3:\n from urllib.request import urlopen\n import csv\n else:\n from urllib import urlopen\n from backports import csv\n import io\n import json\n\n from bs4 import BeautifulSoup\n\n # fv = first vote, sv = second vote\n\n FV_URL = 'http://web3.karlsruhe.de/Stadtentwicklung/afsta/Wahlen/Wahlabend-Netmodul/2013-btw/erst/bundestag-2013-erst-wbz.php'\n SV_URL = 'http://web3.karlsruhe.de/Stadtentwicklung/afsta/Wahlen/Wahlabend-Netmodul/2013-btw/zweit/bundestag-2013-zwei-wbz.php'\n\n def get_data_from_url(url):\n '''\n Get tabular vote data from an URL.\n '''\n html = urlopen(url).read()\n soup = BeautifulSoup(html, 'html.parser')\n tables = soup.find_all('table')\n assert len(tables) == 2\n return extract_table(tables[1])\n\n fv_data = get_data_from_url(FV_URL)\n sv_data = get_data_from_url(SV_URL)\n assert len(fv_data) == len(sv_data)\n\n fv_header = fv_data[0]\n sv_header = sv_data[0]\n candidates = [collapse_whitespace(s) for s in fv_header[5:]]\n parties = [collapse_whitespace(s) for s in sv_header[5:]]\n\n def parse_votes(row, names):\n '''\n Parse the first/second votes of a single voting district.\n '''\n votes = {}\n for cell, name in zip(row[5:], names):\n parts = cell.split()\n num_votes = parse_german_number(parts[0])\n votes[name] = num_votes\n return votes\n\n # Parse data for each district\n districts = {}\n for fv_row, sv_row in zip(fv_data[1:], sv_data[1:]):\n for i in range(5):\n assert fv_row[i] == sv_row[i]\n district_num = fix_district_number(fv_row[0])\n borough_num = district_to_borough(district_num)\n district = {\n 'Wahlkreisnummer': 271,\n 'Wahlkreisname': 'Karlsruhe-Stadt',\n 'Stadtteilnummer': borough_num,\n 'Stadtteilname': BOROUGHS.get(borough_num),\n 'Wahlbezirksnummer': district_num,\n 'Wahlbezirksname': fv_row[1],\n 'Wahlberechtigte insgesamt': parse_german_number(fv_row[2]),\n 'Wähler/-innen': parse_german_number(fv_row[3]),\n 'Wahlbeteiligung': parse_german_number(fv_row[4].rstrip('%')),\n }\n # Extract votes for each candidate\n district['Erststimmen'] = parse_votes(fv_row, candidates)\n district['Gültige Erststimmen'] = sum(district['Erststimmen'].values())\n district['Zweitstimmen'] = parse_votes(sv_row, parties)\n district['Gültige Zweitstimmen'] = sum(district['Zweitstimmen'].values())\n districts[district_num] = district\n\n # Combine postal votes into a single row\n postal_votes = {\n 'Wahlkreisnummer': 271,\n 'Wahlkreisname': 'Karlsruhe-Stadt',\n 'Stadtteilname': 'Briefwahl',\n 'Stadtteilnummer': None,\n 'Wahlbezirksnummer': None,\n 'Wahlbezirksname': None,\n 'Wahlbeteiligung': None,\n 'Wahlberechtigte insgesamt': None,\n 'Wähler/-innen': 0,\n 'Erststimmen': {},\n 'Zweitstimmen': {},\n }\n for district_num in districts.keys():\n if districts[district_num]['Wahlbezirksname'] == 'Briefwahl':\n district = districts.pop(district_num)\n postal_votes['Wähler/-innen'] += district['Wähler/-innen']\n for vote_type in 'Erststimmen', 'Zweitstimmen':\n for key, value in district[vote_type].iteritems():\n postal_votes[vote_type][key] = postal_votes[vote_type].get(key, 0) + value\n postal_votes['Gültige ' + vote_type] = sum(postal_votes[vote_type].values())\n districts['Briefwahl'] = postal_votes\n\n # Export to CSV\n CSV_COLUMNS = (['Wahlkreisnummer', 'Wahlkreisname', 'Stadtteilnummer',\n 'Stadtteilname', 'Wahlbezirksnummer', 'Wahlbezirksname',\n 'Wahlberechtigte insgesamt', 'Wähler/-innen',\n 'Gültige Erststimmen'] + candidates + ['Gültige Zweitstimmen']\n + parties)\n with io.open('results.csv', 'w', newline='', encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerow(CSV_COLUMNS)\n for district_num in sorted(districts):\n d = districts[district_num]\n row = [d['Wahlkreisnummer'], d['Wahlkreisname'], d['Stadtteilnummer'],\n d['Stadtteilname'], d['Wahlbezirksnummer'], d['Wahlbezirksname'],\n d['Wahlberechtigte insgesamt'], d['Wähler/-innen'],\n d['Gültige Erststimmen']]\n row.extend(d['Erststimmen'][c] for c in candidates)\n row.append(d['Gültige Zweitstimmen'])\n row.extend(d['Zweitstimmen'][p] for p in parties)\n writer.writerow(row)\n\n # Export to JSON\n with open('results.json', 'w') as f:\n json.dump(districts, f)\n\n","sub_path":"daten/karlsruhe/wahldaten-scraper/ka_wahldaten_scraper.py","file_name":"ka_wahldaten_scraper.py","file_ext":"py","file_size_in_byte":7181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"205533740","text":"#!/usr/bin/env python\nimport glob\n\nimport cv2\nimport cv_bridge\nimport rospy\nimport sensor_msgs.msg\n\n\ndef publisher():\n rospy.init_node('debugging_publisher')\n\n # Parse parameters.\n ns = rospy.get_name() + '/'\n image_topic = rospy.get_param(ns + 'image_topic', '/image')\n image_folder = rospy.get_param(ns + 'image_folder')\n fps = float(rospy.get_param(ns + 'fps', 5.0))\n fix_width_to = rospy.get_param(ns + 'fix_width_to', 'none')\n file_extension = rospy.get_param(ns + 'file_extension', 'jpg')\n if fix_width_to == 'none':\n fix_width_to = None\n else:\n fix_width_to = int(fix_width_to)\n\n # Load the file list for publishing.\n file_list = sorted(glob.glob(image_folder + '/*' + file_extension))\n if len(file_list) == 0:\n print('No matching {} files fone in {}'.format(\n file_extension, image_folder))\n exit(1)\n\n # Init some variables needed for publishing\n image_publisher = rospy.Publisher(\n image_topic, sensor_msgs.msg.Image, queue_size=1)\n bridge = cv_bridge.CvBridge()\n\n # Forever keep looping over the files\n seq = 0\n while not rospy.is_shutdown():\n # Load the iamge\n image_file = file_list[seq % len(file_list)]\n image = cv2.imread(image_file)\n\n # Possible resize the image.\n if fix_width_to:\n h, w, _ = image.shape\n h = h * fix_width_to // w\n image = cv2.resize(image, (fix_width_to, h))\n\n # Convert it to the proper image format for publishing.\n image_msg = bridge.cv2_to_imgmsg(image, \"bgr8\")\n image_msg.header.seq = seq\n image_msg.header.stamp = rospy.Time.now()\n\n # Publish the image\n image_publisher.publish(image_msg)\n\n # Wait according to the fps. We'll just assume that loading and resizing\n # takes up to no actual time.\n rospy.sleep(1.0 / fps)\n seq += 1\n\n\nif __name__ == '__main__':\n publisher()\n","sub_path":"ros_nodes/debugging_image_publisher/scripts/publisher.py","file_name":"publisher.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"368016852","text":"from crispy_forms.bootstrap import FormActions\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Submit, HTML, Field\nfrom django import forms\n\n\nclass FastaForm(forms.Form):\n seq = forms.CharField(widget=forms.Textarea(attrs={'rows':10, 'cols':100}), label=\"Fasta sequence\", required=False)\n fastafile = forms.FileField(required=False, label=\"Select Fasta file\")\n\n def __init__(self, *args, **kwargs):\n super(FastaForm, self).__init__(*args, **kwargs)\n\n self.helper = FormHelper()\n self.helper.form_class = 'form-inline'\n self.helper.label_class = 'col-lg-3'\n self.helper.field_class = 'col-lg-2'\n self.helper.form_method = 'post'\n self.helper.layout = Layout(\n Field('seq', placeholder='''>Example seq\nGCAAATGCCGAGTCA'''),\n HTML(\"

\"),\n 'fastafile',\n FormActions(\n Submit('submit', 'Submit', css_class='btn-primary')))\n","sub_path":"revcomp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"408528600","text":"import pickle\nfrom sklearn.utils import shuffle\nimport tensorflow as tf\nfrom tensorflow.contrib.layers import flatten\nimport numpy as np\nimport cv2\n\n# define image input size\nHEIGHT = 800\nWIDTH = 600\nnum_channel = 3\nnum_class = 4\n\n# training parameters setting\nepoch = 500\nbatch_size = 32\nlearning_rate = 0.000001\n\nsplit = 0.9\n\nmodel = tf.contrib.keras.models.Sequential()\nmodel.add(tf.contrib.keras.layers.Conv2D(8, 4, 4, activation='relu', padding=\"valid\", batch_input_shape=(None, 600, 800, 3,)))\nmodel.add(tf.contrib.keras.layers.Conv2D(32, 3, 3, activation='relu', padding=\"valid\"))\nmodel.add(tf.contrib.keras.layers.Conv2D(64, 2, 2, activation='relu', padding=\"valid\"))\nmodel.add(tf.contrib.keras.layers.Conv2D(96, 2, 2, activation='relu', padding=\"valid\"))\nmodel.add(tf.contrib.keras.layers.Conv2D(128, 2, 2, activation='relu', padding=\"valid\"))\nmodel.add(tf.contrib.keras.layers.Flatten())\nmodel.add(tf.contrib.keras.layers.Dense(256))\nmodel.add(tf.contrib.keras.layers.Dense(128))\nmodel.add(tf.contrib.keras.layers.Dense(64))\nmodel.add(tf.contrib.keras.layers.Dense(32))\nmodel.add(tf.contrib.keras.layers.Dense(4))\nmodel.add(tf.contrib.keras.layers.Activation('softmax'))\n\nmodel.compile(optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=['mse', 'accuracy'])\n\ndef get_batch(data_list, label_list):\n batch_labels = []\n batch_data = []\n for key, file_name in enumerate(data_list):\n img_data = cv2.imread(file_name)\n img_data = cv2.resize(img_data, (HEIGHT, WIDTH))\n batch_data.append(img_data/255.0)\n temp_label = label_list[key]\n if temp_label == 4:\n temp_label = 3\n batch_labels.append(temp_label)\n return np.array(batch_data), tf.contrib.keras.utils.to_categorical(np.array(batch_labels), num_classes=4)\n\ndef train(data, label):\n train_data = data[:int(len(data) * split)]\n print(len(train_data))\n train_label = label[:int(len(data) * split)]\n validation_data = data[int(len(data) * split):]\n validation_label = label[int(len(data) * split):]\n highest_accuracy = 0.0\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n print('Start training ...\\n')\n for e in range(epoch):\n current_image = 0\n for start in range(0, len(train_data), batch_size):\n end = start + batch_size\n x_batch, y_batch = get_batch(train_data[start:end], train_label[start:end])\n model.train_on_batch(x_batch, y_batch)\n if current_image % 3 == 0:\n print(\"Processing image: \" + str(current_image * batch_size))\n current_image += 1\n validation_accuracy = evaluate(validation_data, validation_label)\n print(\"epoch \", e + 1)\n print(\"Validation accuracy = {:.3f}\\n\".format(validation_accuracy))\n train_data, train_label = shuffle(train_data, train_label)\n if validation_accuracy > highest_accuracy:\n highest_accuracy = validation_accuracy\n model.save('keras_light_model' + str(e) + '_' + str(validation_accuracy) + '.h5')\n print(\"Model Saved\")\n\n\ndef evaluate(data, label):\n\n num_examples = len(data)\n total_accuracy = 0\n sess = tf.get_default_session()\n num_batches = 0\n for offset in range(0, num_examples, batch_size):\n batch_x, batch_y = get_batch(data[offset:offset+batch_size], label[offset:offset+batch_size])\n loss, mse, accuracy = model.evaluate(batch_x, batch_y)\n print(accuracy)\n total_accuracy += accuracy\n num_batches += 1\n return total_accuracy / num_batches\n\n# -------------------------------------------------------------\n#\n# Entrance\n#\n#--------------------------------------------------------------\n\n# load data\ndata_path = 'data/simulator.pkl'\nwith open(data_path, 'rb') as f:\n data = pickle.load(f)\n\nimages = np.array(data['image'])\nlabels = np.array(data['label'])\n\n# shuffle data\ndata, label = shuffle(images, labels)\n\n# train\ntrain(data, label)\n","sub_path":"ros/src/tl_detector/light_classification/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"59134553","text":"from django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom .models import DataValue\n\n\n@receiver(post_save, sender=DataValue)\ndef save_precision(sender, instance, created, **kwargs):\n field = instance.field\n if field.precision is None:\n value = str(instance.value)\n decimal_posision = value.find('.')\n if decimal_posision == -1:\n field.precision = 0\n else:\n field.precision = len(value) - decimal_posision - 1\n field.save()","sub_path":"apps/datastream/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"368873377","text":"'''\nCVAE model.\n'''\nimport json\nimport torch\nimport os\nimport numpy as np\nfrom model_v2 import *\nfrom torch import optim\nfrom torch.distributions import kl_divergence, Normal\nfrom torch.nn import functional as F\nfrom torch.optim.lr_scheduler import ExponentialLR\nfrom sklearn.model_selection import train_test_split\nfrom ptb_v2 import *\n\n# initialization\nwith open('model_config_v2.json') as f:\n args = json.load(f)\nif not os.path.isdir('log'):\n os.mkdir('log')\nif not os.path.isdir('params'):\n os.mkdir('params')\nsave_path = 'params/{}.pt'.format(args['name'])\n\nfrom datetime import datetime\ntimestamp = str(datetime.now())\nsave_path_timing = 'params/{}.pt'.format(args['name'] + \"_\" + timestamp)\n\n# model dimensions\nEVENT_DIMS = 342\nRHYTHM_DIMS = 3\nNOTE_DIMS = 16\nCHROMA_DIMS = 24\n\nmodel = MusicAttrCVAE(roll_dims=EVENT_DIMS, rhythm_dims=RHYTHM_DIMS, note_dims=NOTE_DIMS, \n chroma_dims=CHROMA_DIMS,\n hidden_dims=args['hidden_dim'], z_dims=args['z_dim'], \n n_step=args['time_step'])\n\nif os.path.exists(save_path):\n print(\"Loading {}\".format(save_path))\n model.load_state_dict(torch.load(save_path))\nelse:\n print(\"Save path: {}\".format(save_path))\n\noptimizer = optim.Adam(model.parameters(), lr=args['lr'])\n\nif torch.cuda.is_available():\n print('Using: ', torch.cuda.get_device_name(torch.cuda.current_device()))\n model.cuda()\nelse:\n print('CPU mode')\n\nstep, pre_epoch = 0, 0\nbatch_size = args[\"batch_size\"]\nmodel.train()\n\n# dataloaders\nis_shuffle = True\ndata_lst, rhythm_lst, note_density_lst, chroma_lst = get_classic_piano()\ntlen, vlen = int(0.8 * len(data_lst)), int(0.9 * len(data_lst))\ntrain_ds_dist = YamahaDataset(data_lst, rhythm_lst, note_density_lst, \n chroma_lst, mode=\"train\")\ntrain_dl_dist = DataLoader(train_ds_dist, batch_size=batch_size, shuffle=is_shuffle, num_workers=0)\nval_ds_dist = YamahaDataset(data_lst, rhythm_lst, note_density_lst, \n chroma_lst, mode=\"val\")\nval_dl_dist = DataLoader(val_ds_dist, batch_size=batch_size, shuffle=is_shuffle, num_workers=0)\ntest_ds_dist = YamahaDataset(data_lst, rhythm_lst, note_density_lst, \n chroma_lst, mode=\"test\")\ntest_dl_dist = DataLoader(test_ds_dist, batch_size=batch_size, shuffle=is_shuffle, num_workers=0)\ndl = train_dl_dist\nprint(\"Train / Validation / Test\")\nprint(len(train_ds_dist), len(val_ds_dist), len(test_ds_dist))\n\n\ndef std_normal(shape):\n N = Normal(torch.zeros(shape), torch.ones(shape))\n if torch.cuda.is_available():\n N.loc = N.loc.cuda()\n N.scale = N.scale.cuda()\n return N\n\n\ndef loss_function(out, d,\n dis,\n step,\n beta=.1):\n # anneal beta\n if step < 1000:\n beta0 = 0\n else:\n beta0 = min((step - 10000) / 10000 * beta, beta) \n\n CE_X = F.nll_loss(out.view(-1, out.size(-1)),\n d.view(-1), reduction='mean')\n\n # all distribution conform to standard gaussian\n inputs = dis\n normal = std_normal(dis.mean.size())\n KLD = kl_divergence(dis, normal).mean()\n\n return CE_X + beta0 * KLD, CE_X\n\n\ndef train(step, d_oh, r_oh, n_oh, d, r, n, c, r_density, n_density):\n \n optimizer.zero_grad()\n\n res = model(d_oh, r_oh, n_oh, c, r_density, n_density)\n\n # package output\n out, dis, z = res\n \n # calculate loss\n loss, CE_X = loss_function(out, d, dis, step, beta=args['beta'])\n \n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1)\n optimizer.step()\n step += 1\n \n output = loss.item(), CE_X.item()\n return step, output\n\n\ndef evaluate(d_oh, r_oh, n_oh, d, r, n, c, r_density, n_density):\n\n # calculate rhythm and note density first\n r_density = torch.Tensor([Counter(k.cpu().detach().numpy())[1] / len(k) for k in r]).cuda()\n n_density = torch.Tensor([sum(k.cpu().detach().numpy()) / len(k) for k in n]).cuda()\n r_density = r_density.unsqueeze(-1)\n n_density = n_density.unsqueeze(-1)\n \n res = model(d_oh, r_oh, n_oh, c, r_density, n_density)\n\n # package output\n out, dis, z = res\n \n # calculate loss\n loss, CE_X = loss_function(out, d, dis, step, beta=args['beta'])\n \n output = loss.item(), CE_X.item()\n return output\n\n\ndef convert_to_one_hot(input, dims):\n if len(input.shape) > 1:\n input_oh = torch.zeros((input.shape[0], input.shape[1], dims)).cuda()\n input_oh = input_oh.scatter_(-1, input.unsqueeze(-1), 1.)\n else:\n input_oh = torch.zeros((input.shape[0], dims)).cuda()\n input_oh = input_oh.scatter_(-1, input.unsqueeze(-1), 1.)\n return input_oh\n\n\ndef training_phase(step):\n print(\"D - Data, R - Rhythm, N - Note, RD - Reg. Rhythm Density, ND- Reg. Note Density\")\n for i in range(1, args['n_epochs'] + 1):\n print(\"Epoch {} / {}\".format(i, args['n_epochs']))\n\n batch_loss, batch_test_loss = 0, 0\n b_CE_X, b_CE_R, b_CE_N = 0, 0, 0\n t_CE_X, t_CE_R, t_CE_N = 0, 0, 0\n b_l_r, b_l_n, t_l_r, t_l_n = 0, 0, 0, 0\n\n for j, x in tqdm(enumerate(train_dl_dist), total=len(train_dl_dist)):\n\n d, r, n, c, r_density, n_density = x\n d, r, n, c = d.cuda().long(), r.cuda().long(), \\\n n.cuda().long(), c.cuda().float()\n r_density, n_density = r_density.cuda().float().unsqueeze(-1), \\\n n_density.cuda().float().unsqueeze(-1)\n\n d_oh = convert_to_one_hot(d, EVENT_DIMS)\n r_oh = convert_to_one_hot(r, RHYTHM_DIMS)\n n_oh = convert_to_one_hot(n, NOTE_DIMS)\n\n step, loss = train(step, d_oh, r_oh, n_oh,\n d, r, n, c, r_density, n_density)\n loss, CE_X = loss\n batch_loss += loss\n b_CE_X += CE_X\n\n for j, x in tqdm(enumerate(val_dl_dist), total=len(val_dl_dist)):\n \n d, r, n, c, r_density, n_density = x\n d, r, n, c = d.cuda().long(), r.cuda().long(), \\\n n.cuda().long(), c.cuda().float()\n r_density, n_density = r_density.cuda().float().unsqueeze(-1), \\\n n_density.cuda().float().unsqueeze(-1)\n\n d_oh = convert_to_one_hot(d, EVENT_DIMS)\n r_oh = convert_to_one_hot(r, RHYTHM_DIMS)\n n_oh = convert_to_one_hot(n, NOTE_DIMS)\n\n loss = evaluate(d_oh, r_oh, n_oh,\n d, r, n, c, r_density, n_density)\n loss, CE_X = loss\n batch_test_loss += loss\n t_CE_X += CE_X\n \n print('batch loss: {:.5f} {:.5f}'.format(batch_loss / len(train_dl_dist),\n batch_test_loss / len(val_dl_dist)))\n print(\"train loss by term - D: {:.4f} R: {:.4f} N: {:.4f} RD: {:.4f} ND: {:.4f}\".format(\n b_CE_X / len(train_dl_dist), b_CE_R / len(train_dl_dist), \n b_CE_N / len(train_dl_dist),\n b_l_r / len(train_dl_dist), b_l_n / len(train_dl_dist)\n ))\n print(\"test loss by term - D: {:.4f} R: {:.4f} N: {:.4f} RD: {:.4f} ND: {:.4f}\".format(\n t_CE_X / len(val_dl_dist), t_CE_R / len(val_dl_dist), \n t_CE_N / len(val_dl_dist),\n t_l_r / len(val_dl_dist), t_l_n / len(val_dl_dist),\n ))\n\n torch.save(model.cpu().state_dict(), save_path)\n\n timestamp = str(datetime.now())\n save_path_timing = 'params/{}.pt'.format(args['name'] + \"_\" + timestamp)\n torch.save(model.cpu().state_dict(), save_path_timing)\n\n if torch.cuda.is_available():\n model.cuda()\n print('Model saved as {}!'.format(save_path))\n\n\ndef evaluation_phase():\n if torch.cuda.is_available():\n model.cuda()\n\n if os.path.exists(save_path):\n print(\"Loading {}\".format(save_path))\n model.load_state_dict(torch.load(save_path))\n \n def run(dl):\n \n t_CE_X, t_CE_R, t_CE_N = 0, 0, 0\n t_l_r, t_l_n = 0, 0\n t_acc_x, t_acc_r, t_acc_n = 0, 0, 0\n data_len = 0\n\n for i, x in tqdm(enumerate(dl), total=len(dl)):\n d, r, n, c, r_density, n_density = x\n d, r, n, c = d.cuda().long(), r.cuda().long(), \\\n n.cuda().long(), c.cuda().float()\n r_density, n_density = r_density.cuda().float().unsqueeze(-1), \\\n n_density.cuda().float().unsqueeze(-1)\n\n d_oh = convert_to_one_hot(d, EVENT_DIMS)\n r_oh = convert_to_one_hot(r, RHYTHM_DIMS)\n n_oh = convert_to_one_hot(n, NOTE_DIMS)\n \n res = model(d_oh, r_oh, n_oh, c, r_density, n_density)\n\n # package output\n out, dis, z = res\n \n # calculate loss\n loss, CE_X = loss_function(out, d, dis, step, beta=args['beta'])\n\n # update\n t_CE_X += CE_X.item()\n \n # calculate accuracy\n def acc(a, b, t, trim=False):\n a = torch.argmax(a, dim=-1).squeeze().cpu().detach().numpy()\n b = b.squeeze().cpu().detach().numpy()\n\n b_acc = 0\n for i in range(len(a)):\n a_batch = a[i]\n b_batch = b[i]\n\n if trim:\n b_batch = np.trim_zeros(b_batch)\n a_batch = a_batch[:len(b_batch)]\n\n correct = 0\n for j in range(len(a_batch)):\n if a_batch[j] == b_batch[j]:\n correct += 1\n acc = correct / len(a_batch)\n b_acc += acc\n \n return b_acc\n\n acc_x = acc(out, d, \"d\", trim=True)\n data_len += out.shape[0]\n\n # accuracy update store\n t_acc_x += acc_x\n \n \n # Print results\n print(\"CE: {:.4} {:.4} {:.4}\".format(t_CE_X / len(dl),\n t_CE_R / len(dl), \n t_CE_N / len(dl)))\n \n print(\"Regularized: {:.4} {:.4}\".format(t_l_r / len(dl),\n t_l_n / len(dl)))\n\n print(\"Adversarial: {:.4} {:.4}\".format(t_l_adv_r / len(dl),\n t_l_adv_n / len(dl)))\n \n print(\"Acc: {:.4} {:.4} {:.4}\".format(t_acc_x / data_len,\n t_acc_r / data_len, \n t_acc_n / data_len))\n \n if is_class:\n print(\"Class acc: {:.4} {:.4}\".format(c_acc_r / data_len,\n c_acc_n / data_len))\n\n dl = DataLoader(train_ds_dist, batch_size=128, shuffle=False, num_workers=0)\n run(dl)\n dl = DataLoader(test_ds_dist, batch_size=128, shuffle=False, num_workers=0)\n run(dl)\n\n\ntraining_phase(step)\nevaluation_phase()\n\n","sub_path":"trainer_cvae.py","file_name":"trainer_cvae.py","file_ext":"py","file_size_in_byte":11053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"242726143","text":"from functools import reduce\n\nwith open('step_5_5.txt', 'w') as file:\n file.write('25,58,96,14,26,85,47,36,45,25,14,96,36,256')\n\nwith open('step_5_5.txt', 'r') as file:\n numbers = file.read().split(',')\n summa = reduce(lambda x, y: int(x) + int(y), numbers)\n\nprint(summa)","sub_path":"step_5/step_5_5.py","file_name":"step_5_5.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"513559634","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n## @package DataCollectionDaemon \n# @brief sas7bdat_parallel_reader module - Read .sas7bdat files in page by page\n#\n# @par Change History\n# @verbatim\n# VER Date User ID Reason\n# ----- -------- ------------------- -----------------------------------------\n# 1.1 02.09.14 Kelvin.Liu@hgst.com Initial creation\n#\n# @endverbatim\n\n\nfrom sas7bdat import SAS7BDAT\nimport os\n\n\nclass SAS7BDAT_Parallel(SAS7BDAT):\n \"\"\"Read *.sas7bdat files page by page\"\"\"\n\n def __init__(self, path):\n \"\"\"Initialize a SAS7BDAT_Parallel() object\n\n Parameters:\n path - sas7bdat file path\n\n Returns:\n yield data page by page\n\n Raises:\n None\n \"\"\"\n\n super(SAS7BDAT_Parallel, self).__init__(path)\n import pandas as pd\n self.col_names = [x.name for x in self.header.cols]\n self.col_len = len(self.col_names)\n\n def read_data_by_page(self):\n \"\"\"\n read .sas7bdat files and yield data in pages as pandas DataFrame format\n \"\"\"\n\n if self.header.compression is not None:\n self.logger.error(\n '[%s] compressed data not yet supported',\n os.path.basename(self.path))\n\n with open(self.path, 'rb') as f:\n f.seek(self.header.headerlength)\n\n for page in self.readPages(f, self.header.pagecount,\n self.header.pagesize):\n if page.type not in self.PAGE_MIX_DATA and not\\\n (page.type == self.PAGE_META and\n self.header.compression == 'RLE'):\n continue\n\n yield self._get_page(page)\n\n '''\n ## TODO: apply multiprocessing, then reduce()\n pages = [page for page in self.readPages(\n f, self.header.pagecount, self.header.pagesize) \\\n if page.type in self.PAGE_MIX_DATA or \\\n (page.type == self.PAGE_META and self.header.compression == 'RLE')]\n\n from multiprocessing import Pool\n pool = Pool(processes=processes)\n df_list = pool.map(unwrap_self_get_page, zip([self]*len(pages), pages))\n results = pd.concat(df_list, ignore_index=True)\n return results#.reset_index(drop=True)\n '''\n\n def _get_page(self, page):\n\n page = page._asdict()\n if page['type'] == self.PAGE_META:\n page['data'] = self.uncompressData(page['data'])\n rowcountp = self.header.rowcountfp\n base = 129 + page['subheadercount'] * 24\n elif self.u64:\n if page['type'] in self.PAGE_MIX:\n rowcountp = self.header.rowcountfp\n base = 40 + page['subheadercount'] * 24\n base += (base % 8)\n else:\n rowcountp = self.readVal('h', page['data'], 34, 2)\n base = 40\n else:\n if page['type'] in self.PAGE_MIX:\n rowcountp = self.header.rowcountfp\n base = 24 + page['subheadercount'] * 12\n base += (base % 8)\n else:\n rowcountp = self.readVal('h', page['data'], 18, 2)\n base = 24\n if rowcountp > self.header.rowcount:\n rowcountp = self.header.rowcount\n\n df = pd.DataFrame(columns=self.col_names)\n for index_ in xrange(rowcountp):\n #print i\n row = []\n for col in self.header.cols:\n offset = base + col.attr.offset\n if col.attr.length > 0:\n # import pdb; pdb.set_trace()\n raw = page['data'][offset:offset + col.attr.length]\n try:\n if col.attr.type == 'character':\n val = self.readVal('s', raw, 0,\n col.attr.length)\n val = val.lstrip().strip()\n else:\n val = self.readVal(col.attr.type, raw, 0,\n col.attr.length)\n val = self.formatValue(val, col.label.format)\n except KeyboardInterrupt:\n return\n except:\n break\n row.append(val)\n base += self.header.rowlength\n if row and len(row) == self.col_len:\n #print 'SAS7BDAT_Parallel'\n df.loc[index_] = row\n else:\n continue\n\n return df.reset_index(drop=True)\n\n'''\ndef unwrap_self_get_page(arg, **kwargs):\n return SAS7BDAT_Parallel._get_page(*arg, **kwarg)\n'''","sub_path":"data_consolidation_daemon/utils/base/sas7bdat_parallel_reader.py","file_name":"sas7bdat_parallel_reader.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"539358906","text":"#FILE WITH FUNCTIONS USED IN ESP8266\n\nimport config\nimport machine\nimport time\n\nin_led = machine.Pin(2, machine.Pin.OUT)\nif (config.PARAMS['ENABLE_EXTERNAL_LED']):\n out_led = machine.Pin(config.GPIO['EXTERNAL_LED'], machine.Pin.OUT)\nif (config.PARAMS['ENABLE_BUZZER']):\n buzzer = machine.Pin(config.GPIO['BUZZER'], machine.Pin.OUT)\n buzzer.value(0)\n\ndef internal_led_blick(count,delay): \n if (config.PARAMS['ENABLE_INTERNAL_LED']):\n for i in range(count):\n in_led.off()\n time.sleep_ms(delay)\n in_led.on()\n time.sleep_ms(delay) \n\ndef external_led_blick(count, delay):\n if (config.PARAMS['ENABLE_EXTERNAL_LED']):\n for i in range(count):\n out_led.on()\n time.sleep_ms(delay)\n out_led.off()\n time.sleep_ms(delay)\n\ndef beep(count, delay):\n if (config.PARAMS['ENABLE_BUZZER']):\n for i in range(count):\n buzzer.value(1)\n time.sleep_ms(delay)\n buzzer.value(0)\n time.sleep_ms(delay) ","sub_path":"egs/smarthome/device/esp_pir/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"430213547","text":"import socket\nfrom helper import *\nfrom my_hash_function import *\nimport pymysql\nimport crypt\nimport time\nfrom binascii import b2a_hex, a2b_hex\n# 创建服务端的socket对象socketserver\nsocketserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nhost = '127.0.0.1'\nport = 9092\ndb=pymysql.connect(host='localhost',user='root',passwd='781886704',db='ddwdatabase',charset='utf8')\ncursor = db.cursor()\n# 绑定地址(包括ip地址会端口号)\nsocketserver.bind((host, port))\n# 设置监听\nsocketserver.listen(5)\n# 等待客户端的连接\n# 注意:accept()函数会返回一个元组\n# 元素1为客户端的socket对象,元素2为客户端的地址(ip地址,端口号)\nclientsocket, addr = socketserver.accept()\n\n\n\n# while循环是为了能让对话一直进行\nwhile True:\n # 接收客户端的请求\n recvmsg = clientsocket.recv(1024)\n # 把接收到的数据进行解码\n strData = recvmsg.decode(\"utf-8\")\n username,hashcode2,mac=strData.split('\\n')[0],strData.split('\\n')[1],strData.split('\\n')[2] # 用户名,散列值2,认证码明文\n sql = \"SELECT hashvalue1 FROM server_dict WHERE username = \"+\"\\'\"+username+\"\\'\"\n cursor.execute(sql)\n tt=cursor.fetchone()\n print('在数据库查到了用户名对应的散列值1',tt)\n # hashvalue1=answer_dict.get(username,None)\n hashvalue1=tt[0] if tt !=None else None # 在数据库查到了用户名对应的散列值1\n print(\"输出用户名、散列值2、认证码、散列值1:\", username, hashcode2,mac, hashvalue1)\n msg='WRONG'.encode()\n if hashvalue1 != None:\n hashvalue2 = hashForString('sha1',hashvalue1+mac) # 用服务器数据库存着的对应用户名的散列值1和认证码求散列值2\n if hashvalue2==hashcode2:\n print('success')\n # msg=my_encode(mac,hashvalue1) # 认证成功\n cr=crypt.PrpCrypt(hashvalue1[:16]) # 以散列值1作为密钥\n msg=cr.encrypt(mac) # 把认证码加密发给客户端\n clientsocket.send(msg)\n\ncursor.close()\nconnect.close()\nsocketserver.close()\n\n","sub_path":"网络安全/实验4/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"190108913","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse, sys, tempfile, os, glob, pickle, tqdm, math, time\nimport tensorflow as tf\nimport config\nfrom tf_data_handler import inputs\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom textwrap import wrap\n\nclass cnn_reverse_model:\n def __init__(self, trainable=True):\n self.trainable = trainable\n self.data_dict = None\n self.var_dict = {}\n\n def __getitem__(self, item):\n return getattr(self,item)\n\n def __contains__(self, item):\n return hasattr(self, item)\n\n def get_size(self, input_data):\n return np.prod([int(x) for x in input_data.get_shape()[1:]])\n\n def build(self, input_data, input_shape, output_shape, train_mode=None, verbose=True, full_cov=False):\n if verbose:\n print (\"Building the network...\")\n network_input = tf.identity(input_data, name='input')\n with tf.name_scope('reshape'):\n x_data = tf.reshape(network_input, [-1, input_shape[0], input_shape[1], input_shape[2]])\n \n # conv layer 1\n with tf.variable_scope('conv1'):\n self.W_conv1 = self.weight_variable([1, 5, input_shape[2], 8],var_name='wconv1')\n self.b_conv1 = self.bias_variable([8],var_name='bconv1')\n #self.norm1 = tf.layers.batch_normalization(self.conv2d(x_data, self.W_conv1,stride=[1,1,1,1]) + self.b_conv1,scale=True,center=True,training=train_mode,name='batchnorm1')\n self.norm1 = self.conv2d(x_data, self.W_conv1,stride=[1,1,1,1]) + self.b_conv1\n self.h_conv1 = tf.nn.leaky_relu(self.norm1, alpha=0.1)\n if verbose:\n print(self.h_conv1.get_shape())\n\n # conv layer 2\n with tf.variable_scope('conv2'):\n self.W_conv2 = self.weight_variable([1, 5, 8, 16],var_name='wconv2')\n self.b_conv2 = self.bias_variable([16],var_name='bconv2')\n #self.norm2 = tf.layers.batch_normalization(self.conv2d(self.h_conv1, self.W_conv2, stride=[1, 2, 2, 1]) + self.b_conv2,scale=True,center=True,training=train_mode,name='batchnorm2')\n self.norm2 = self.conv2d(self.h_conv1, self.W_conv2, stride=[1, 2, 2, 1]) + self.b_conv2\n self.h_conv2 = tf.nn.leaky_relu(self.norm2, alpha=0.1)\n if verbose:\n print(self.h_conv2.get_shape())\n\n # conv layer 3\n with tf.variable_scope('conv3'):\n self.W_conv3 = self.weight_variable([1, 5, 16, 32],var_name='wconv3')\n self.b_conv3 = self.bias_variable([32],var_name='bconv3')\n #self.norm3 = tf.layers.batch_normalization(self.conv2d(self.h_conv2, self.W_conv3, stride=[1, 2, 2, 1]) + self.b_conv3, scale=True,center=True,training=train_mode,name='batchnorm3')\n self.norm3 = self.conv2d(self.h_conv2, self.W_conv3, stride=[1, 2, 2, 1]) + self.b_conv3\n self.h_conv3 = tf.nn.leaky_relu(self.norm3,alpha=0.1)\n if verbose:\n print(self.h_conv3.get_shape())\n\n self.fc1 = self.fc_layer(self.h_conv3, self.get_size(self.h_conv3), 256, 'fc1')\n if verbose:\n print(self.fc1.get_shape())\n\n #self.fc2 = self.fc_layer(self.fc1, self.get_size(self.fc1), 512, 'fc2')\n #if verbose:\n # print(self.fc2.get_shape())\n\n self.fc2 = self.fc_layer(self.fc1, self.get_size(self.fc1), 128, 'fc2')\n if verbose:\n print(self.fc2.get_shape())\n\n #self.fc4 = self.fc_layer(self.fc3, self.get_size(self.fc3), 64, 'fc4')\n #if verbose:\n # print(self.fc4.get_shape())\n\n nparams = np.prod(output_shape)\n if full_cov:\n self.final_layer = self.fc_layer(self.fc2, self.get_size(self.fc2), nparams + nparams ** 2, 'final_layer')\n else:\n self.final_layer = self.fc_layer(self.fc2, self.get_size(self.fc2), nparams * 2, 'final_layer')\n self.final_layer = tf.concat([self.final_layer[:, :nparams], tf.nn.softplus(self.final_layer[:, nparams:])], 1)\n\n self.output = tf.identity(self.final_layer,name='output')\n if verbose:\n print(self.output.get_shape())\n\n def conv2d(self, x, W, stride=[1,1,1,1]):\n \"\"\"conv2d returns a 2d convolution layer with full stride.\"\"\"\n return tf.nn.conv2d(x, W, strides=stride, padding='SAME')\n\n def max_pool_2x2(self, x):\n \"\"\"max_pool_2x2 downsamples a feature map by 2X.\"\"\"\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n\n def max_pool_2x2_1(self, x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 1, 1, 1], padding='SAME')\n\n def weight_variable(self, shape, var_name):\n \"\"\"weight_variable generates a weight variable of a given shape.\"\"\"\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.get_variable(name=var_name,initializer=initial)\n\n def bias_variable(self, shape, var_name):\n \"\"\"bias_variable generates a bias variable of a given shape.\"\"\"\n initial = tf.constant(0.001, shape=shape)\n return tf.get_variable(name=var_name,initializer=initial)\n\n def fc_layer(self, bottom, in_size, out_size, name):\n with tf.variable_scope(name):\n weights, biases = self.get_fc_var(in_size, out_size, name)\n x = tf.reshape(bottom, [-1, in_size])\n fc = tf.nn.bias_add(tf.matmul(x, weights), biases)\n return fc\n\n def get_fc_var(self, in_size, out_size, name, init_type='xavier'):\n if init_type == 'xavier':\n weight_init = [\n [in_size, out_size],\n tf.contrib.layers.xavier_initializer(uniform=False)]\n else:\n weight_init = tf.truncated_normal(\n [in_size, out_size], 0.0, 0.001)\n bias_init = tf.truncated_normal([out_size], .0, .001)\n weights = self.get_var(weight_init, name, 0, name + \"_weights\")\n biases = self.get_var(bias_init, name, 1, name + \"_biases\")\n\n return weights, biases\n\n def get_var(\n self, initial_value, name, idx,\n var_name, in_size=None, out_size=None):\n if self.data_dict is not None and name in self.data_dict:\n value = self.data_dict[name][idx]\n else:\n value = initial_value\n\n if self.trainable:\n # get_variable, change the boolean to numpy\n if type(value) is list:\n var = tf.get_variable(\n name=var_name, shape=value[0], initializer=value[1])\n else:\n var = tf.get_variable(name=var_name, initializer=value)\n else:\n var = tf.constant(value, dtype=tf.float32, name=var_name)\n #var = tf.get_variable(name=var_name, initializer=value)\n\n self.var_dict[(name, idx)] = var\n\n return var\n\n'''\n# assumes isotropicity\n'''\ndef heteroskedastic_loss(p, q, nparams):\n param_est = p[:,:nparams]\n var = p[:, nparams:]\n diff_tensor = (param_est - q) ** 2\n return tf.reduce_sum( diff_tensor/var + tf.log(var) )\n\n'''\n# train the full covariance matrix instead\n'''\ndef heteroskedastic_cov_loss(p, q, nparams, eps=10):\n param_est = p[:, :nparams]\n # reshape to a matrix\n cov = tf.nn.softplus(tf.reshape(p[:,nparams:],[-1,nparams,nparams]))\n # extract the upper triangular matrix\n cov_upper = tf.matrix_band_part(cov, 0, 0)\n # enforce symmetry\n cov_sym = 0.5 * (cov_upper + tf.linalg.transpose(cov_upper))\n # determinant of covariance matrix\n cov_det = tf.linalg.det(cov_sym)\n # inverse of the covariance matrix\n cov_inv = tf.linalg.inv(cov_sym)\n # eigen values\n cov_eig = tf.linalg.eigvalsh(cov_sym)\n\n # diff\n diff = tf.expand_dims(param_est - q, axis = -1)\n term1 = tf.squeeze(tf.matmul(tf.matmul(tf.linalg.transpose(diff), cov_inv), diff))\n loss = tf.reduce_sum( term1 + tf.log(1e-30 + tf.abs(cov_det)) - eps * tf.minimum(tf.reduce_min(cov_eig, axis=-1), 0))\n #loss = tf.reduce_sum( term1 + tf.log(1e-30 + tf.abs(cov_det)) )\n return loss, cov_sym\n\ndef train_reverse_model(config):\n\n train_files = os.path.join(\n config.base_dir,\n config.tfrecord_dir,\n config.train_tfrecords)\n val_files = os.path.join(\n config.base_dir,\n config.tfrecord_dir,\n config.val_tfrecords)\n\n with tf.device('/cpu:0'): \n train_labels, train_data = inputs(\n tfrecord_file=train_files,\n num_epochs=config.epochs,\n batch_size=config.train_batch,\n target_data_dims=config.param_dims,\n target_label_dims=config.output_hist_dims)\n val_labels, val_data = inputs(\n tfrecord_file=val_files,\n num_epochs=config.epochs,\n batch_size=config.val_batch,\n target_data_dims=config.param_dims,\n target_label_dims=config.output_hist_dims)\n with tf.device('/gpu:0'):\n with tf.variable_scope(\"reversemodel\") as scope:\n print (\"creating the model\")\n model = cnn_reverse_model()\n model.build(train_data, config.output_hist_dims[1:], config.param_dims[1:], train_mode=True, full_cov=config.full_cov_matrix)\n y_conv = model.output\n nparams = np.prod(config.param_dims[1:])\n\n # Define loss and optimizer\n with tf.name_scope('loss'):\n labels = tf.reshape(train_labels, [-1, nparams])\n\n #### depending on the config, use the appropriate loss\n if config.full_cov_matrix:\n hke_loss, cov_sym = heteroskedastic_cov_loss(y_conv, labels, nparams)\n else:\n hke_loss = heteroskedastic_loss(y_conv, labels, nparams)\n\n with tf.name_scope('adam_optimizer'):\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_step = tf.train.AdamOptimizer(1e-4).minimize(hke_loss)\n\n #####\n ## VALIDATION\n #####\n print(\"building a validation model\")\n scope.reuse_variables()\n val_model = cnn_reverse_model()\n val_model.build(val_data, config.output_hist_dims[1:], config.param_dims[1:], train_mode=False, full_cov=config.full_cov_matrix)\n val_res = val_model.output\n norm_val_labels = tf.reshape(val_labels, [-1,nparams])\n \n #### select loss function for the val model as well\n if config.full_cov_matrix:\n val_loss, _ = heteroskedastic_cov_loss(val_res, norm_val_labels, nparams)\n else:\n val_loss = heteroskedastic_loss(val_res, norm_val_labels, nparams)\n\n tf.summary.scalar(\"loss\", hke_loss)\n summary_op = tf.summary.merge_all()\n saver = tf.train.Saver(tf.global_variables())\n\n gpuconfig = tf.ConfigProto()\n gpuconfig.gpu_options.allow_growth = True\n gpuconfig.allow_soft_placement = True\n\n with tf.Session(config=gpuconfig) as sess:\n train_writer = tf.summary.FileWriter(os.path.join(config.base_dir,config.summary_dir,config.model_name))\n train_writer.add_graph(tf.get_default_graph())\n\n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n sess.run(init_op)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n step = 0\n start = time.time()\n try:\n while not coord.should_stop():\n # train for a step\n if config.full_cov_matrix:\n _, loss, outputs, tr_data, tr_labels, norm_tr_labels, cov_mat = sess.run([train_step, hke_loss, y_conv, train_data, train_labels, labels, cov_sym])\n else:\n _, loss, outputs, tr_data, tr_labels, norm_tr_labels = sess.run([train_step, hke_loss, y_conv, train_data, train_labels, labels])\n\n step+=1\n if step % config.print_iters == 0:\n finish = time.time()\n print(\"step={}, loss={}, time_elapsed={} s/step\".format(step,loss,(finish-start)/float(config.print_iters)))\n start = finish\n saver.save(sess,os.path.join(\n config.model_output,\n config.model_name+'_'+str(step)+'.ckpt'\n ),global_step=step)\n if config.full_cov_matrix:\n print(cov_mat)\n\n if step % config.val_iters == 0:\n val_forward_pass_time = time.time()\n v_data, v_labels, norm_v_labels, v_res, v_loss = sess.run([val_data, val_labels, norm_val_labels, val_res, val_loss])\n\n summary_str = sess.run(summary_op)\n train_writer.add_summary(summary_str, step)\n print(\"\\t val loss = {}, time_elapsed = {}s\".format(v_loss, time.time() - val_forward_pass_time))\n '''\n nparams = np.prod(config.param_dims[1:])\n color_v = ['r', 'g', 'b', 'k', 'm', 'c', 'y']\n for k in range(nparams): \n plt.scatter(norm_v_labels[:, k], v_res[:, k], c = color_v[k], alpha=0.5); \n\n plt.pause(1);\n plt.clf()\n '''\n if config.full_cov_matrix:\n data_dump = {'predictions': outputs, 'labels': norm_tr_labels, 'cov':cov_mat}\n pickle.dump(data_dump, open( os.path.join(config.base_dir,config.summary_dir,config.model_name,'step%d.pickle'%step), 'wb'))\n \n except tf.errors.OutOfRangeError:\n print(\"Finished training for %d epochs\" % config.epochs)\n finally:\n coord.request_stop()\n coord.join(threads)\n\n\ndef test_rev_model_eval(config):\n test_files = os.path.join(\n config.base_dir,\n config.tfrecord_dir,\n config.test_tfrecords)\n\n errors = []\n data, labels, preds = [], [], []\n\n with tf.device('/cpu:0'): \n '''\n test_labels, test_data = inputs(\n tfrecord_file=test_files,\n num_epochs=1,\n batch_size=config.test_batch,\n target_data_dims=config.param_dims,\n target_label_dims=config.output_hist_dims)\n '''\n test_data = tf.placeholder(tf.float32, [1000, 1, 256, 2])\n test_labels = tf.placeholder(tf.float32, [1000, 1, 4, 1])\n\n with tf.device('/gpu:0'):\n with tf.variable_scope(\"model\") as scope:\n model = cnn_reverse_model()\n model.build(test_data, config.output_hist_dims[1:], config.param_dims[1:], train_mode=False)\n y_conv = model.output\n nparams = np.prod(config.param_dims[1:])\n labels = tf.reshape(test_labels, [-1, nparams])\n #labels = (labels - config.min_param_values)/config.param_range\n error = heteroskedastic_loss(y_conv, labels, nparams)\n\n gpuconfig = tf.ConfigProto()\n gpuconfig.gpu_options.allow_growth = True\n gpuconfig.allow_soft_placement = True\n saver = tf.train.Saver()\n\n X = pickle.load(open('../data/ddm/parameter_recovery/ddm_param_recovery_data_n_3000.pickle', 'rb'))\n with tf.Session(config=gpuconfig) as sess:\n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n sess.run(init_op)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n step=0\n try:\n while not coord.should_stop():\n # load the model here\n ckpts=tf.train.latest_checkpoint(config.model_output)\n saver.restore(sess,ckpts)\n #ip , op, pred, err, norm_labels = sess.run([test_data, test_labels, y_conv, error, labels])\n pred, err, norm_labels = sess.run([y_conv,error,labels],feed_dict={test_data: np.expand_dims(X[0],axis=1),test_labels:np.expand_dims(np.expand_dims(X[1],axis=-1),axis=1)})\n plt.figure(); sc = plt.scatter(norm_labels[:,0], pred[:,0], c=pred[:,4], edgecolors='none', cmap='jet', alpha=0.5); plt.colorbar(sc);\n plt.figure(); sc = plt.scatter(norm_labels[:,1], pred[:,1], c=pred[:,5], edgecolors='none', cmap='jet', alpha=0.5); plt.colorbar(sc);\n plt.figure(); sc =plt.scatter(norm_labels[:,2], pred[:,2], c=pred[:,6], edgecolors='none', cmap='jet', alpha=0.5); plt.colorbar(sc);\n plt.figure(); sc = plt.scatter(norm_labels[:,3], pred[:,3], c=pred[:,7], edgecolors='none', cmap='jet', alpha=0.5); plt.colorbar(sc)\n plt.show()\n \n import ipdb; ipdb.set_trace()\n batch_err = np.sum(err, axis=1)\n errors.append(batch_err)\n data.append(ip)\n labels.append(op)\n preds.append(pred)\n print('{} batches complete..'.format(len(errors)))\n except tf.errors.OutOfRangeError:\n print('Epoch limit reached!')\n finally:\n coord.request_stop()\n coord.join(threads)\n import ipdb; ipdb.set_trace()\n '''\n err_vals = np.array(errors).reshape((-1,))\n plt.hist(err_vals, bins=1000)\n plt.title('Model: %s, min error=%0.3f, max error=%0.3f'%(config.model_name,np.min(err_vals), np.max(err_vals)), fontsize=12)\n plt.gca().tick_params(axis='both', which='major', labelsize=6)\n plt.gca().tick_params(axis='both', which='minor', labelsize=6)\n #import ipdb; ipdb.set_trace()\n plt.savefig(os.path.join(config.results_dir, '{}_eval.png'.format(config.model_name)), dpi=300)\n plt.close()\n\n inp_data = np.array(data)\n inp_data = inp_data.reshape((inp_data.shape[0]*inp_data.shape[1],inp_data.shape[2],inp_data.shape[3]))\n inp_labs = np.array(labels)\n inp_labs = inp_labs.reshape((inp_labs.shape[0]*inp_labs.shape[1],inp_labs.shape[2],inp_labs.shape[3]))\n idx = np.argsort(err_vals)\n net_preds = np.array(preds)\n net_preds = net_preds.reshape((net_preds.shape[0]*net_preds.shape[1],net_preds.shape[2]))\n net_preds = net_preds.reshape(inp_labs.shape)\n\n # lets draw a 3x3 grid with\n fig, ax = plt.subplots(3,3)\n for k in range(9):\n r, c = int(k/3), k%3\n cur_idx = idx[-1 * (k+1)]\n parameters = np.around(inp_data[cur_idx].flatten(),decimals=2)\n err = err_vals[cur_idx]\n ax[r,c].plot(inp_labs[cur_idx],'r',alpha=0.5)\n ax[r,c].plot(net_preds[cur_idx],'-.g',alpha=0.5)\n mystr = 'err=%0.2f'%(err)\n ax[r,c].text(0.9,.9, \"\\n\".join(wrap('{}, params:{}'.format(mystr, parameters),30)), fontsize=6, horizontalalignment='right', verticalalignment='center', transform=ax[r,c].transAxes)\n #plt.show() \n ax[r,c].tick_params(axis='both', which='major', labelsize=6)\n ax[r,c].tick_params(axis='both', which='minor', labelsize=6)\n plt.savefig(os.path.join(config.results_dir, '{}_debug.png'.format(config.model_name)),dpi=300)\n plt.close()\n '''\n","sub_path":"al-cnn/src/reverse_model.py","file_name":"reverse_model.py","file_ext":"py","file_size_in_byte":19861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"645455501","text":"from django.db import models\nfrom user.models import MyUser\nfrom django.conf import settings\n\nfrom page.models import City\n\n# Create your models here.\n\nclass Plan(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete = models.CASCADE, related_name=\"%(app_label)s_%(class)s_user\")\n name = models.CharField(max_length = 100, null = True)\n des = models.TextField(null = True)\n image = models.ImageField(upload_to = 'media', null = True)\n time = models.DateTimeField(null = True)\n create_time = models.DateTimeField(auto_now_add = True)\n city = models.ForeignKey(City, on_delete = models.CASCADE, null = True)\n city_code = models.CharField(max_length = 100, null = True)\n address = models.CharField(max_length = 100, null = True)\n share = models.IntegerField()\n time_year = models.BooleanField(default = False)\n time_month = models.BooleanField(default = False)\n time_day = models.BooleanField(default = False)\n time_hour = models.BooleanField(default = False)\n time_minute = models.BooleanField(default = False)\n timezone = models.IntegerField(null = True)\n\nclass PlanParticipants(models.Model):\n plan = models.OneToOneField(Plan, on_delete = models.CASCADE)\n participants = models.ManyToManyField(settings.AUTH_USER_MODEL, through = 'ParticipantMoreInfo', related_name = \"%(app_label)s_%(class)s_participants\", through_fields = ('planparticipants','person'),)\n\nclass ParticipantMoreInfo(models.Model):\n person = models.ForeignKey(settings.AUTH_USER_MODEL, related_name = \"%(app_label)s_%(class)s_person\")\n planparticipants = models.ForeignKey(PlanParticipants, related_name = \"%(app_label)s_%(class)s_participants\")\n plan = models.ForeignKey(Plan, related_name = \"%(app_label)s_%(class)s_plan\")\n is_join = models.BooleanField(default = False)\n owner_invited = models.BooleanField(default = False)\n user_invited = models.BooleanField(default = False)\n time_join = models.DateTimeField(auto_now_add = True, null = True)\n\n class Meta:\n unique_together = ('person','planparticipants','plan')\n","sub_path":"plan/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"230412504","text":"from django.urls import path\nfrom django.conf.urls import url, include\nfrom django.views.generic import TemplateView\n\n\nfrom . import views\n\nurlpatterns = [\n path('start', TemplateView.as_view(template_name='base/start.html')),\n path('consent', TemplateView.as_view(template_name='base/consent.html')),\n path('signin', TemplateView.as_view(template_name='base/signin.html')),\n path('createsession', views.create_session, name='create_session'),\n path('/selectsource',TemplateView.as_view(template_name='base/selectsource.html')), \n\n # instagram \n path('/instagram/accountinfo/',views.accountinfo, name='accountinfo'), \n path('/instagram/postinfo/',TemplateView.as_view(template_name='base/postinfo.html')), \n path('/instagram/postinfo/checkpost/',views.checkpost, name='checkpost'),\n path('/instagram/postinfo/addposts/',views.addposts, name='addposts'),\n path('/instagram/addtags//',views.addtags, name='addtags'), \n path('/instagram/classification/instruction/',TemplateView.as_view(template_name='base/classification_instruction.html'), name='classification_instruction'), \n path('/instagram/classification//',views.classification, name='classification'),\n path('/instagram/classification/finish/',views.finish, name='finish'), \n\n # image upload\n path('/upload/',views.BasicUploadView.as_view(), name='upload'), \n path('/upload/delete/',views.deletephoto, name='deletephoto'), \n path('/upload/getphotos/',views.getphotos, name='getphotos'), \n path('/upload/createposts/',views.createposts_upload, name='createposts_upload'), \n path('/upload/generatetags//',views.generatetags, name='generatetags'), \n path('/upload/classification/instruction/',TemplateView.as_view(template_name='base/classification_instruction.html'), name='classification_instruction'), \n path('/upload/classification//',views.classification_upload, name='classification_upload'),\n\n # end page\n path('/upload/classification/finish/',views.finish, name='finish'), \n\n # image upload from other device \n path('/otherdevice/',TemplateView.as_view(template_name='base/choosedevice.html')), \n]\n","sub_path":"tagannotator/base/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"441734132","text":"#!/usr/bin/env python3\n# __@@__ Coding:utf-8\n\n\"\"\"\n@Version: ??\n@Author: luxutao\n@Licence: Apache Licence\n@Contact: xutao.lu.cn@gmail.com\n@Site: http://www.123m.me\n@Filename: testasync.py\n@Projectname: PycharmProjects\n@Time: 2016-9-7 下午10:14\n@Platform: Xubuntu 16.04\n@Python 3.5.2\n\"\"\"\n\nimport asyncio\n\n@asyncio.coroutine\ndef hello():\n print(\"Hello world!\")\n # 异步调用asyncio.sleep(1):\n r = yield from asyncio.sleep(5)\n print(r)\n print(\"Hello again!\")\n@asyncio.coroutine\ndef heihei():\n print('hello nimei')\n yield from asyncio.sleep(5)\n# 获取EventLoop:\nloop = asyncio.get_event_loop()\ntask = [heihei(),hello()]\n# 执行coroutine\nloop.run_until_complete(asyncio.wait(task))\nloop.close()","sub_path":"asynciomodule/testasync.py","file_name":"testasync.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"317973820","text":"#!/usr/bin/env python2.7\n#@HEADER\n###############################################################################\nclass MoveCountsViewerParameters:\n \"\"\"A class to describe MoveCountsViewer parameters\n \"\"\"\n\n ###########################################################################\n def __init__(self, viewer):\n\n # Set parameters based on viewer's attribute values\n\n # Set renderer parameters\n self.renderer_background = [1, 1, 1]\n\n # Set actor_vertices parameters\n self.actor_vertices_screen_size = 50 if viewer.interactive else 5000\n self.actor_vertices_color = [0, 0, 0]\n self.actor_vertices_opacity = .3 if viewer.interactive else .5\n\n # Set actor_labels parameters\n self.actor_labels_color = [0, 0, 0]\n self.actor_labels_font_size = 16 if viewer.interactive else 150\n self.actor_edges_opacity = .5 if viewer.interactive else 1\n self.actor_edges_line_width = 2 if viewer.interactive else 15\n\n # Set actor_arrows parameters\n self.actor_arrows_edge_glyph_position = .5\n self.actor_arrows_source_scale = .075\n\n # Set actor_bar parameters\n self.actor_bar_number_of_labels = 2\n self.actor_bar_width = .2\n self.actor_bar_heigth = .08\n self.actor_bar_position = [.4, .91]\n self.actor_bar_title_color = [0, 0, 0]\n self.actor_bar_label_color = [0, 0, 0]\n\n # Set window parameters\n self.window_size_x = 600\n self.window_size_y = 600\n\n # Set wti (WindowToImageFilter) parameters\n self.wti_scale = 10\n\n###############################################################################\n","sub_path":"src/Applications/MoveCountsViewerParameters.py","file_name":"MoveCountsViewerParameters.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"342169647","text":"#!/usr/bin/env python\nimport rospy\nfrom std_srvs.srv import Trigger, TriggerResponse\nfrom weiss_gripper_ieg76.srv import Move, MoveResponse, SetForce, SetForceResponse\nfrom serial_comm import SerialPortComm\nfrom driver_logic import DriverLogic\nfrom state_publisher import StatesPublisher\n\n\nclass Driver(object):\n\tdef __init__(self):\n\t\tserial_port_addr = rospy.get_param(\"~serial_port_address\", '/dev/ttyACM0')\n\t\tself.serial_port_comm = SerialPortComm(serial_port_addr, serial_timeout=0)\n\n\t\tself.driver_logic = DriverLogic(self.serial_port_comm)\n\n\t\tself.states_publisher_thread = StatesPublisher(0.8, self.serial_port_comm)\n\t\trospy.on_shutdown(self.shutdown_handler)\n\n\tdef log_reply(self, reply):\n\t\tif reply.success:\n\t\t\trospy.loginfo(reply.message)\n\t\telse:\n\t\t\trospy.logerr(reply.message)\n\n\tdef check_position(self, pos):\n\t\treturn 0 <= pos <= 30\n\n\tdef check_force(self, force):\n\t\treturn 0 <= force <= 100\n\n\tdef handle_reference(self, req):\n\t\trospy.loginfo(\"Referencing\")\n\t\treply = TriggerResponse()\n\t\tself.driver_logic.service_called(transition=\"do_reference\", params=req, trigger_response=reply)\n\t\treply.message = 'Referencing ' + reply.message\n\t\tself.log_reply(reply)\n\t\treturn reply\n\n\tdef handle_open(self, req):\n\t\trospy.loginfo(\"Opening\")\n\t\treply = MoveResponse()\n\t\tif not self.check_position(req.position):\n\t\t\treply.success = False\n\t\t\treply.message = 'Opening failed. Position must be 0.0(mm) <= position <= 30.0(mm).'\n\t\telse:\n\t\t\tself.driver_logic.service_called(transition=\"do_open\", params=req, trigger_response=reply)\n\t\t\treply.message = 'Opening ' + reply.message\n\t\tself.log_reply(reply)\n\t\treturn reply\n\n\tdef handle_close(self, req):\n\t\trospy.loginfo(\"Closing\")\n\t\treply = MoveResponse()\n\t\tif not self.check_position(req.position):\n\t\t\treply.success = False\n\t\t\treply.message = 'Closing failed. Position must be 0.0(mm) <= position <= 30.0(mm).'\n\t\telse:\n\t\t\tself.driver_logic.service_called(transition=\"do_close\", params=req, trigger_response=reply)\n\t\t\treply.message = 'Closing ' + reply.message\n\t\tself.log_reply(reply)\n\t\treturn reply\n\n\tdef handle_grasp(self, req):\n\t\trospy.loginfo(\"Grasping\")\n\t\treply = MoveResponse()\n\t\tif not self.check_position(req.position):\n\t\t\treply.success = False\n\t\t\treply.message = 'Grasping failed. Position must be 0.0(mm) <= position <= 30.0(mm).'\n\t\telse:\n\t\t\tself.driver_logic.service_called(transition=\"do_grasp\", params=req, trigger_response=reply)\n\t\t\treply.message = 'Grasping ' + reply.message\n\t\tself.log_reply(reply)\n\t\treturn reply\n\n\tdef handle_set_force(self, req):\n\t\trospy.loginfo(\"Set force\")\n\t\treply = SetForceResponse()\n\t\tif not self.check_force(req.grasping_force):\n\t\t\treply.success = False\n\t\t\treply.message = 'Force must be 0(%) <= force <= 100(%).'\n\t\telse:\n\t\t\treply.success = self.serial_port_comm.set_force(req.grasping_force)\n\t\tif reply.success:\n\t\t\treply.message = 'Set force successful.'\n\t\telse:\n\t\t\treply.message = 'Set force failed. ' + reply.message\n\t\tself.log_reply(reply)\n\t\treturn reply\n\n\tdef shutdown_handler(self):\n\t\tself.states_publisher_thread.shutdown()\n\t\tself.serial_port_comm.shutdown()\n\t\trospy.loginfo(\"Gracefully shutting down the driver...\")\n\n\tdef run(self):\n\t\tself.serial_port_comm.daemon = True\n\t\tself.states_publisher_thread.daemon = True\n\n\t\trospy.logdebug(\"Starting threads...\")\n\t\tself.serial_port_comm.start()\n\t\tself.states_publisher_thread.start()\n\t\trospy.logdebug(\"Threads started.\")\n\n\t\tgrasp_force = rospy.get_param(\"~grasping_force\", 100)\n\t\trospy.loginfo('Setting force to {}%...'.format(grasp_force))\n\t\twhile True:\n\t\t\tif self.serial_port_comm.set_force(grasp_force):\n\t\t\t\tbreak\n\t\trospy.loginfo('Force set.')\n\n\t\tserv_ref = rospy.Service('~reference', Trigger, self.handle_reference)\n\t\tserv_ref = rospy.Service('~open', Move, self.handle_open)\n\t\tserv_ref = rospy.Service('~close', Move, self.handle_close)\n\t\tserv_ref = rospy.Service('~grasp', Move, self.handle_grasp)\n\t\t# serv_ref = rospy.Service('ack', Trigger, self.handle_ack)\n\t\tserv_ref = rospy.Service('~set_force', SetForce, self.handle_set_force)\n\n\t\trospy.loginfo(\"Ready to receive requests.\")\n\n\t\trospy.spin()\n\n\nif __name__ == \"__main__\":\n\t# rospy.init_node('ieg_driver', log_level=rospy.DEBUG)\n\trospy.init_node('ieg_driver')\n\n\tdriver = Driver()\n\tdriver.run()\n","sub_path":"src/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"78826434","text":"from __future__ import absolute_import\nfrom config import config\nfrom ui.MultiSpot_Panel import MultiSpotPanelUI\nfrom __versions__ import PyVersion\nfrom utils.exceptions import Python3Error\nfrom utils.database import getDatabaseConnection\nfrom utils.database import MySQLCursorDict\nfrom utils.error_message import msgBox\nfrom mysql.connector.errors import IntegrityError\n\n\nclass MultiSpotPanel(MultiSpotPanelUI):\n \"\"\"Multi Spot Panel\"\"\"\n def __init__(self, parent):\n #TODO support for Python 3.0 and higher\n if PyVersion < 3.0:\n super(MultiSpotPanel, self).__init__(parent)\n else:\n raise Python3Error('Python Version incorrect! Expected 2.7 or '\n 'Lower')\n\n def ClearAll(self):\n \"\"\"Clears the whole Panel\"\"\"\n self.cb_Episodes.SetValue(True)\n self.cb_MultiSpot.SetValue(False)\n self.tc_multiSpotID.SetValue('')\n self.sc_search.SetValue('')\n self.lc_newList.DeleteAllItems()\n self.lc_searchList.DeleteAllItems()\n\n def setPermissions(self):\n \"\"\"Set User Permissions\"\"\"\n if config.qs_user['department'] == 'ADMIN':\n self.btn_CreateMultiSpot.Enable(True)\n self.btn_DeleteMultiSpot.Enable(True)\n self.btn_UpdateMultiSpot.Enable(True)\n elif config.qs_user['department'] == 'HOD':\n self.btn_CreateMultiSpot.Enable(True)\n self.btn_DeleteMultiSpot.Enable(False)\n self.btn_UpdateMultiSpot.Enable(True)\n elif config.qs_user['department'] == 'LIBRARY':\n self.btn_CreateMultiSpot.Enable(True)\n self.btn_DeleteMultiSpot.Enable(False)\n self.btn_UpdateMultiSpot.Enable(True)\n elif config.qs_user['department'] == 'MCR':\n self.btn_CreateMultiSpot.Enable(False)\n self.btn_DeleteMultiSpot.Enable(False)\n self.btn_UpdateMultiSpot.Enable(False)\n elif config.qs_user['department'] == 'QC':\n self.btn_CreateMultiSpot.Enable(False)\n self.btn_DeleteMultiSpot.Enable(False)\n self.btn_UpdateMultiSpot.Enable(False)\n elif config.qs_user['department'] == 'EDITORS':\n self.btn_CreateMultiSpot.Enable(True)\n self.btn_DeleteMultiSpot.Enable(False)\n self.btn_UpdateMultiSpot.Enable(True)\n else:\n msgBox('Please Login!')\n self.btn_CreateMultiSpot.Enable(False)\n self.btn_DeleteMultiSpot.Enable(False)\n self.btn_UpdateMultiSpot.Enable(False)\n\n def MultiSpot_Choice(self, event):\n \"\"\"Multi Spot Search Choicebox\"\"\"\n self.cb_Episodes.SetValue(False)\n event.Skip()\n\n def Episode_Choice(self, event):\n \"\"\"Episode Search Choicebox\"\"\"\n self.cb_MultiSpot.SetValue(False)\n event.Skip()\n\n def DoSearch(self, event):\n \"\"\"Search\"\"\"\n self.lc_searchList.DeleteAllItems()\n if self.cb_Episodes.GetValue() is True:\n sql = \"SELECT episode_id from program WHERE episode_id LIKE \" \\\n \"'%{0}%';\".format(self.sc_search.GetValue())\n connection = getDatabaseConnection()\n cursor = connection.cursor(cursor_class=MySQLCursorDict)\n cursor.execute(sql)\n results = cursor.fetchall()\n if results == []:\n msgBox(\"No Records Found!\", 'Info')\n else:\n msgBox(\"Records Found!\", 'Info')\n connection.close()\n #print '[SQL]' + sql\n for x, row in enumerate(results):\n self.lc_searchList.InsertStringItem(x, row['episode_id'])\n if self.cb_MultiSpot.GetValue() is True:\n sql = \"SELECT id from multi_spot WHERE id LIKE '%{}%';\".format(\n self.sc_search.GetValue())\n connection = getDatabaseConnection()\n cursor = connection.cursor(cursor_class=MySQLCursorDict)\n cursor.execute(sql)\n results = cursor.fetchall()\n connection.close()\n #print '[SQL]' + sql\n for x, row in enumerate(results):\n self.lc_searchList.InsertStringItem(x, row['id'])\n event.Skip()\n\n def Search_Entered(self, event):\n \"\"\"Search Text Entered\"\"\"\n searchTerm = self.sc_search.GetValue()\n if any(x for x in config.invalidChars if x in searchTerm):\n self.sc_search.Clear()\n event.Skip()\n\n def SearchListItem_Selected(self, event):\n \"\"\"List Item Selected\"\"\"\n if self.cb_Episodes.GetValue() is True:\n if self.lc_newList.GetItemCount() < 20:\n self.lc_newList.InsertStringItem(self.lc_newList.GetItemCount(),\n self.lc_searchList.GetItem(\n self.lc_searchList\n .GetFirstSelected(),\n 0).GetText())\n if self.cb_MultiSpot.GetValue() is True:\n sql = \"SELECT * from multi_spot WHERE id='{}'\".format(\n self.lc_searchList.GetItem(\n self.lc_searchList.GetFirstSelected(), 0).GetText())\n connection = getDatabaseConnection()\n cursor = connection.cursor(cursor_class=MySQLCursorDict)\n cursor.execute(sql)\n results = cursor.fetchone()\n connection.close()\n #print '[SQL]' + sql\n self.tc_multiSpotID.SetValue(results['id'])\n self.lc_newList.DeleteAllItems()\n for x in range(0, 20):\n if results['episode_id_' + str(x + 1)] is not None:\n self.lc_newList.InsertStringItem(x, results[\n 'episode_id_' + str(x + 1)])\n event.Skip()\n\n def multiSpotID_Entered(self, event):\n \"\"\"Multi Spot ID Entered\"\"\"\n episode_id = self.tc_multiSpotID.GetValue()\n if any(x for x in config.invalidChars if x in episode_id):\n self.tc_multiSpotID.Clear()\n event.Skip()\n\n def newListItem_Selected(self, event):\n \"\"\"List Item Selected\"\"\"\n self.lc_newList.DeleteItem(self.lc_newList.GetFirstSelected())\n event.Skip()\n\n def CreateMultiSpot(self, event):\n \"\"\"Create Multi Spot\"\"\"\n if self.lc_newList.GetItemCount() >= 2:\n if self.tc_multiSpotID.GetValue() != '':\n sql = \"INSERT INTO multi_spot ( id , \"\n for x in range(0, self.lc_newList.GetItemCount()):\n sql += 'episode_id_' + str(x + 1)\n if x < self.lc_newList.GetItemCount() - 1:\n sql += ' , '\n sql += \") VALUES ( '{}' ,\".format(self.tc_multiSpotID.GetValue())\n for x in range(0, self.lc_newList.GetItemCount()):\n sql += \"'{}'\".format(self.lc_newList.GetItem(x, 0).GetText())\n if x < self.lc_newList.GetItemCount() - 1:\n sql += ' , '\n sql += ');'\n connection = getDatabaseConnection()\n cursor = connection.cursor(cursor_class=MySQLCursorDict)\n cursor.execute(sql)\n connection.commit()\n msgBox(\"Multi Spot {0} Created!\".format(\n self.tc_multiSpotID.GetValue()), 'Info')\n connection.close()\n #print '[SQL]' + sql\n self.tc_multiSpotID.Clear()\n self.lc_newList.DeleteAllItems()\n else:\n msgBox(\"Less than 2 Episodes\", \"Error!\")\n event.Skip()\n\n def UpdateMultiSpot(self, event):\n \"\"\"Update Multi Spot\"\"\"\n if self.lc_newList.GetItemCount() >= 2:\n sql = \"UPDATE multi_spot SET \"\n listCount = self.lc_newList.GetItemCount()\n for x in range(1, 21):\n if x <= listCount:\n #print x\n listItem = self.lc_newList.GetItem(x-1, 0).GetText()\n sql += \"episode_id_{0} = '{1}',\".format(x, listItem)\n elif x > listCount and x < 19:\n sql += \"episode_id_{0} = {1},\".format(x, 'Null')\n if x == 20:\n sql += \"episode_id_{0} = {1}\".format(x, 'Null')\n sql += \" WHERE id='{0}';\".format(self.tc_multiSpotID.GetValue())\n connection = getDatabaseConnection()\n cursor = connection.cursor(cursor_class=MySQLCursorDict)\n cursor.execute(sql)\n connection.commit()\n msgBox(\"Multi Spot {0} Updated!\".format(self.tc_multiSpotID.GetValue()),\n 'Info')\n connection.close()\n self.tc_multiSpotID.Clear()\n self.lc_newList.DeleteAllItems()\n #print '[SQL]' + sql\n else:\n msgBox(\"Less than 2 Episodes\", \"Error!\")\n event.Skip()\n\n def DeleteMultiSpot(self, event):\n \"\"\"Delete Multi Spot\"\"\"\n if config.qs_user['department'] == 'ADMIN':\n connection = getDatabaseConnection()\n cursor = connection.cursor(cursor_class=MySQLCursorDict)\n sql = \"DELETE FROM multi_spot WHERE id='{0}';\".format(\n self.tc_multiSpotID.GetValue())\n cursor.execute(sql)\n connection.commit()\n msgBox(\"Multi Spot {0} Deleted!\".format(\n self.tc_multiSpotID.GetValue()), 'Info')\n connection.close()\n #print '[SQL]' + sql\n self.tc_multiSpotID.Clear()\n self.lc_newList.DeleteAllItems()\n self.lc_searchList.DeleteAllItems()\n event.Skip()","sub_path":"MultiSpot.py","file_name":"MultiSpot.py","file_ext":"py","file_size_in_byte":9640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"445433144","text":"\"\"\"\n给定一个包含非负整数的 m x n 网格,请找出一条从左上角到右下角的路径,使得路径上的数字总和为最小。\n\n说明:每次只能向下或者向右移动一步。\n\n示例:\n输入:\n[\n  [1,3,1],\n [1,5,1],\n [4,2,1]\n]\n输出: 7\n解释: 因为路径 1→3→1→1→1 的总和最小。\n\"\"\"\n\n\nclass Solution:\n def minPathSum(self, grid):\n n = len(grid)\n m = len(grid[0])\n\n dp = [[0 for _ in range(m)] for __ in range(n)]\n\n for i in range(n):\n for j in range(m):\n if i == 0: # 第一行为dp的前一行+grid的当前行\n dp[0][j] = dp[0][j - 1] + grid[0][j]\n elif j == 0: # 第一列为dp的前一列+grid的当前列\n dp[i][0] = dp[i - 1][0] + grid[i][0]\n else: # dp的前一行或前一列的最小值+grid的当前行列\n dp[i][j] = min(dp[i - 1][j], dp[i][j - 1]) + grid[i][j]\n return dp[-1][-1]\n\n","sub_path":"LeedCode/动态规划/64. 最小路径和.py","file_name":"64. 最小路径和.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"23773773","text":"from movie import process_movie\nfrom detect import pipeline\nimport matplotlib.image as mpimg\n\n\ndef main():\n movies = {\n 'project': {\n 'input': 'project_video.mp4',\n 'output': 'project_video_output.mp4',\n 'debug_folder': 'project_video_debug',\n 'start_frame': 0,\n 'end_frame': 5,\n 'entire_clip': True,\n 'debug_frames': [1, 31, 61, 91, 121, 151, 181, 211, 241, 271, 301],\n },\n 'test': {\n 'input': 'test_video.mp4',\n 'output': 'test_video_output.mp4',\n 'debug_folder': 'test_video_debug',\n 'start_frame': 0,\n 'end_frame': 5,\n 'entire_clip': True,\n 'debug_frames': [],\n },\n }\n\n import sys\n videos = sys.argv[1:]\n\n for video in videos:\n process_movie(movies[video], pipeline)\n\nif __name__ == '__main__':\n main()\n","sub_path":"detect_movies.py","file_name":"detect_movies.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"593560078","text":"\"\"\"\nAccess to sqlite database\n\"\"\"\nfrom configparser import ConfigParser\nimport sqlite3\n\nconfig = ConfigParser()\nconfig.read('config.ini')\nsqlite_file = config.get('main_config', 'sqlite_file')\n\n\ndef read_all_customer_info():\n \"\"\"Reads all the customers' info. from the sqlite table and creates a\n dictionary based on the primary keys\"\"\"\n customers = {}\n conn = sqlite3.connect(sqlite_file)\n c = conn.cursor()\n for row in c.execute('SELECT * FROM customers'):\n customer_id, customer_name = row\n customers[customer_id] = customer_name\n conn.close()\n return customers\n\n\ndef write_customer_info(customer_id, name):\n \"\"\" Writes the new customer info. to the sqlite table and returns the\n primary key of the inserted entity \"\"\"\n conn = sqlite3.connect(sqlite_file)\n c = conn.cursor()\n c.execute('INSERT INTO customers (name) VALUES (?)', (name,))\n print(c.lastrowid)\n conn.commit()\n conn.close()\n return c.lastrowid\n\n\ndef cleanup_customer_info():\n \"\"\"Deletes all customer info. from the sqlite table\"\"\"\n conn = sqlite3.connect(sqlite_file)\n c = conn.cursor()\n c.execute('DELETE FROM customers')\n conn.commit()\n conn.close()\n\n\nif __name__ == '__main__':\n cleanup_customer_info()","sub_path":"python/src/data_access.py","file_name":"data_access.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"537115332","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_config import cfg\nfrom oslo_policy import policy\n\n\n_ENFORCER = None\n_ADMIN_CTX_POLICY = 'context_is_admin'\n_ADVSVC_CTX_POLICY = 'context_is_advsvc'\n\n\ndef reset():\n global _ENFORCER\n if _ENFORCER:\n _ENFORCER.clear()\n _ENFORCER = None\n\n\ndef init(conf=cfg.CONF, policy_file=None):\n \"\"\"Init an instance of the Enforcer class.\"\"\"\n\n global _ENFORCER\n if not _ENFORCER:\n _ENFORCER = policy.Enforcer(conf, policy_file=policy_file)\n _ENFORCER.load_rules(True)\n\n\ndef refresh(policy_file=None):\n \"\"\"Reset policy and init a new instance of Enforcer.\"\"\"\n reset()\n init(policy_file=policy_file)\n\n\ndef check_is_admin(context):\n \"\"\"Verify context has admin rights according to policy settings.\"\"\"\n init()\n # the target is user-self\n credentials = context.to_dict()\n if _ADMIN_CTX_POLICY not in _ENFORCER.rules:\n return False\n return _ENFORCER.enforce(_ADMIN_CTX_POLICY, credentials, credentials)\n\n\ndef check_is_advsvc(context):\n \"\"\"Verify context has advsvc rights according to policy settings.\"\"\"\n init()\n # the target is user-self\n credentials = context.to_dict()\n if _ADVSVC_CTX_POLICY not in _ENFORCER.rules:\n return False\n return _ENFORCER.enforce(_ADVSVC_CTX_POLICY, credentials, credentials)\n","sub_path":"neutron_lib/policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"463270009","text":"import tensorflow as tf \n\n##########\n# Class: Model\n#\n# Represents a model for the neural network\n##########\nclass Model:\n\n\t##########\n\t# Initializes a model\n\t# The layers parameter is a list representing our neural network:\n\t#\t\t* The first entry represents the input layer\n\t#\t\t* The last entry represents the output layer\n\t#\t\t* All other entries represent hidden layers\n\t#\t\t* The numbers for each entry represent number of nodes in the layer\n\t##########\n\tdef __init__(self, name, layers, weights, biases):\n\t\tself.name = name\n\t\tself.layers = layers \n\t\tself.weights = weights\n\t\tself.biases = biases \n\n\t\tself.__build_model()\n\n\t##########\n\t# Builds the model\n\t##########\n\tdef __build_model(self):\n\t\t# The number of hidden layers\n\t\tself.n_hidden_layers = len(self.layers) - 2\n\t\t# Strip the input and output layers, just leaving hidden layers\n\t\tself.hidden_layer_nodes = self.layers[1:-1]\n\t\t# Storage for hidden layers\n\t\tself.hidden_layers = [None] * self.n_hidden_layers\n\t\t# The number of input nodes \n\t\tself.n_inputs = self.layers[0]\n\t\t# The number of output nodes\n\t\tself.n_outputs = self.layers[len(self.layers) - 1]\n\n\t##########\n\t# Static method to build a new, untrained model\n\t##########\n\t@staticmethod\n\tdef new_model(name, layers):\n\t\t# The number of hidden layers\n\t\tn_hidden_layers = len(layers) - 2\n\t\t# Strip the input and output layers, just leaving hidden layers\n\t\thidden_layer_nodes = layers[1:-1]\n\t\t# Storage for hidden layers\n\t\thidden_layers = [None] * n_hidden_layers\n\t\t# The number of input nodes \n\t\tn_inputs = layers[0]\n\t\t# The number of output nodes\n\t\tn_outputs = layers[len(layers) - 1]\n\n\t\t# build the weights and biases\n\t\tweights = {}\n\t\tbiases = {}\n\n\t\tfor i in range(n_hidden_layers):\n\t\t\t# Weights \n\t\t\tif i == 0:\n\t\t\t\t# First hidden layer, input is input layer\n\t\t\t\tweights[Model.get_translated_idx(i)] = tf.Variable(tf.random_normal([n_inputs,\n\t\t\t\t\thidden_layer_nodes[0]]))\n\t\t\telse:\n\t\t\t\t# Input is previous hidden layer\n\t\t\t\tweights[Model.get_translated_idx(i)] = tf.Variable(tf.random_normal([hidden_layer_nodes[i-1],\n\t\t\t\t\thidden_layer_nodes[i]]))\n\n\t\t\t# Biases\n\t\t\tbiases[Model.get_translated_idx(i, 'b')] = tf.Variable(tf.random_normal([hidden_layer_nodes[i]]))\n\n\t\t# Add outputs to weights and biases\n\t\tweights['out'] = tf.Variable(tf.random_normal([hidden_layer_nodes[n_hidden_layers-1],\n\t\t\tn_outputs]))\n\t\tbiases['out'] = tf.Variable(tf.random_normal([n_outputs]))\n\n\t\t# Return the new model\n\t\treturn Model(name, layers, weights, biases)\n\n\t##########\n\t# Static method to translate index for weights and biases\n\t##########\n\t@staticmethod\n\tdef get_translated_idx(idx, prefix='h'):\n\t\treturn prefix + str(idx)","sub_path":"Core/nn/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"530549534","text":"def change_me(org_list):\n\tprint(id(org_list))\n\tnew_list=org_list\n\tprint(id(new_list))\n\n\tif len(new_list) < 3:\n\t\tnew_list=['a','b','c']\n\n\tfor i,e in enumerate(new_list):\n\t\tif isinstance(e,list):\n\t\t\tnew_list[i] = '***'\n\n\tprint(new_list)\n\tprint(id(new_list))\n\ntest1=['a'] # -len(test1)<3,the assignment statement is excuted,test2 didn't change.\nchange_me(test1)\nprint(test1)\n\ntest2=[1,2,3,4,5,6,[1]] # -there is no assignment statement excuted,test2 changed.\nchange_me(test2)\nprint(test2)","sub_path":"passParaTest.py","file_name":"passParaTest.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"367309574","text":"\"\"\"\n* Copyright 2019 EPAM Systems\n*\n* Licensed under the Apache License, Version 2.0 (the \"License\");\n* you may not use this file except in compliance with the License.\n* You may obtain a copy of the License at\n*\n* http://www.apache.org/licenses/LICENSE-2.0\n*\n* Unless required by applicable law or agreed to in writing, software\n* distributed under the License is distributed on an \"AS IS\" BASIS,\n* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n* See the License for the specific language governing permissions and\n* limitations under the License.\n\"\"\"\n\nimport unittest\nimport logging\nimport os\nimport json\nimport sure # noqa\nfrom boosting_decision_making.boosting_featurizer import BoostingFeaturizer\nfrom utils import utils\n\n\nclass TestBoostingFeaturizer(unittest.TestCase):\n \"\"\"Tests boosting feature creation functionality\"\"\"\n @utils.ignore_warnings\n def setUp(self):\n self.one_hit_search_rs_explained = \"one_hit_search_rs_explained.json\"\n self.two_hits_search_rs_explained = \"two_hits_search_rs_explained.json\"\n self.log_message = \"log_message.json\"\n self.epsilon = 0.0001\n logging.disable(logging.CRITICAL)\n\n @utils.ignore_warnings\n def tearDown(self):\n logging.disable(logging.DEBUG)\n\n @staticmethod\n @utils.ignore_warnings\n def get_default_config():\n \"\"\"Get default config\"\"\"\n return {\n \"max_query_terms\": 50,\n \"min_should_match\": 0.8,\n \"min_word_length\": 0,\n }\n\n @staticmethod\n @utils.ignore_warnings\n def get_fixture(fixture_name, jsonify=True):\n \"\"\"Read fixture from file\"\"\"\n with open(os.path.join(\"fixtures\", fixture_name), \"r\") as file:\n return file.read() if not jsonify else json.loads(file.read())\n\n @utils.ignore_warnings\n def test_normalize_results(self):\n tests = [\n {\n \"elastic_results\": [],\n \"config\": TestBoostingFeaturizer.get_default_config(),\n \"result\": [],\n },\n {\n \"elastic_results\": [(self.get_fixture(self.log_message),\n self.get_fixture(self.one_hit_search_rs_explained))],\n \"config\": TestBoostingFeaturizer.get_default_config(),\n \"result\": [[{\"_score\": 158.08437,\n \"normalized_score\": 1.0, }]],\n },\n {\n \"elastic_results\": [(self.get_fixture(self.log_message),\n self.get_fixture(self.two_hits_search_rs_explained))],\n \"config\": TestBoostingFeaturizer.get_default_config(),\n \"result\": [[{\"_score\": 158.08437,\n \"normalized_score\": 1.0,\n },\n {\"_score\": 77.53298,\n \"normalized_score\": 0.4904,\n }, ]],\n },\n ]\n for idx, test in enumerate(tests):\n with sure.ensure('Error in the test case number: {0}', idx):\n _boosting_featurizer = BoostingFeaturizer(test[\"elastic_results\"],\n test[\"config\"],\n [])\n _boosting_featurizer.all_results.should.have.length_of(len(test[\"result\"]))\n for i in range(len(test[\"result\"])):\n for j in range(len(test[\"result\"][i])):\n for field in test[\"result\"][i][j]:\n elastic_res = _boosting_featurizer.all_results[i][1][j]\n elastic_res[field].should.equal(test[\"result\"][i][j][field],\n epsilon=self.epsilon)\n\n @utils.ignore_warnings\n def test_find_most_relevant_by_type(self):\n tests = [\n {\n \"elastic_results\": [],\n \"config\": TestBoostingFeaturizer.get_default_config(),\n \"result\": {},\n },\n {\n \"elastic_results\": [(self.get_fixture(self.log_message),\n self.get_fixture(self.one_hit_search_rs_explained))],\n \"config\": TestBoostingFeaturizer.get_default_config(),\n \"result\": {\"AB001\": {\"mrHit\": {\"_score\": 158.08437,\n \"_id\": \"1\"},\n \"compared_log\": self.get_fixture(self.log_message),\n \"score\": 1.0, },\n }\n },\n {\n \"elastic_results\": [(self.get_fixture(self.log_message),\n self.get_fixture(self.two_hits_search_rs_explained))],\n \"config\": TestBoostingFeaturizer.get_default_config(),\n \"result\": {\"AB001\": {\"mrHit\": {\"_score\": 158.08437,\n \"_id\": \"1\"},\n \"compared_log\": self.get_fixture(self.log_message),\n \"score\": 0.6709, },\n \"PB001\": {\"mrHit\": {\"_score\": 77.53298,\n \"_id\": \"2\"},\n \"compared_log\": self.get_fixture(self.log_message),\n \"score\": 0.3291, },\n }\n },\n {\n \"elastic_results\": [(self.get_fixture(self.log_message),\n self.get_fixture(self.two_hits_search_rs_explained)),\n (self.get_fixture(self.log_message),\n self.get_fixture(self.one_hit_search_rs_explained))],\n \"config\": TestBoostingFeaturizer.get_default_config(),\n \"result\": {\"AB001\": {\"mrHit\": {\"_score\": 158.08437,\n \"_id\": \"1\"},\n \"compared_log\": self.get_fixture(self.log_message),\n \"score\": 0.8031, },\n \"PB001\": {\"mrHit\": {\"_score\": 77.53298,\n \"_id\": \"2\"},\n \"compared_log\": self.get_fixture(self.log_message),\n \"score\": 0.1969, },\n }\n },\n ]\n for idx, test in enumerate(tests):\n with sure.ensure('Error in the test case number: {0}', idx):\n _boosting_featurizer = BoostingFeaturizer(test[\"elastic_results\"],\n test[\"config\"],\n [])\n scores_by_issue_type = _boosting_featurizer.find_most_relevant_by_type()\n scores_by_issue_type.should.have.length_of(len(test[\"result\"]))\n for issue_type in test[\"result\"]:\n scores_by_issue_type.keys().should.contain(issue_type)\n elastic_res = scores_by_issue_type[issue_type]\n for field in test[\"result\"][issue_type]:\n if type(test[\"result\"][issue_type][field]) != dict:\n elastic_res[field].should.equal(test[\"result\"][issue_type][field],\n epsilon=self.epsilon)\n else:\n for field_dict in test[\"result\"][issue_type][field]:\n result_field_dict = test[\"result\"][issue_type][field][field_dict]\n elastic_res[field][field_dict].should.equal(result_field_dict,\n epsilon=self.epsilon)\n","sub_path":"test/test_boosting_featurizer.py","file_name":"test_boosting_featurizer.py","file_ext":"py","file_size_in_byte":8284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"24667968","text":"from conans import ConanFile, CMake, tools\n\n\nclass LmeditorConan(ConanFile):\n name = \"lmeditor\"\n version = \"0.0.1\"\n license = \"\"\n author = \" \"\n url = \"\"\n description = \"\"\n topics = (\"\", \"\", \"\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n generators = \"cmake_find_package\", \"virtualrunenv\"\n requires = (\n 'lmengine/0.0.1',\n 'lmgl/0.0.1',\n 'lmlib/0.0.1',\n 'lmpl/0.0.1',\n 'lmtk/0.0.1',\n \"lmhuv/0.0.1\",\n )\n build_requires = (\n 'OpenMesh/8.0@lawrencem/stable',\n 'Catch2/2.6.1@catchorg/stable',\n 'embed-resource/0.2@lawrencem/stable',\n 'fmt/5.3.0@bincrafters/stable',\n 'yaml-cpp/0.6.2@bincrafters/stable',\n 'clara/1.1.5@bincrafters/stable',\n 'boost/1.70.0',\n )\n\n def imports(self):\n self.copy(\"embed-resource\", src=\"bin\")\n self.copy(\"embed-resource.exe*\", src=\"bin\")\n self.copy('embed-resource.cmake', dst='scripts', src='cmake')\n self.copy('glslangValidator*', src='bin')\n self.copy('*.dll', src='bin')\n\n def package_info(self):\n self.cpp_info.libs = ['lmeditor']\n","sub_path":"lmeditor/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"425023601","text":"#\n# @lc app=leetcode.cn id=11 lang=python3\n#\n# [11] 盛最多水的容器\n#\n# https://leetcode-cn.com/problems/container-with-most-water/description/\n#\n# algorithms\n# Medium (64.05%)\n# Likes: 1779\n# Dislikes: 0\n# Total Accepted: 272K\n# Total Submissions: 424.7K\n# Testcase Example: '[1,8,6,2,5,4,8,3,7]'\n#\n# 给你 n 个非负整数 a1,a2,...,an,每个数代表坐标中的一个点 (i, ai) 。在坐标内画 n 条垂直线,垂直线 i 的两个端点分别为 (i,\n# ai) 和 (i, 0)。找出其中的两条线,使得它们与 x 轴共同构成的容器可以容纳最多的水。\n# \n# 说明:你不能倾斜容器,且 n 的值至少为 2。\n# \n# \n# \n# \n# \n# 图中垂直线代表输入数组 [1,8,6,2,5,4,8,3,7]。在此情况下,容器能够容纳水(表示为蓝色部分)的最大值为 49。\n# \n# \n# \n# 示例:\n# \n# 输入:[1,8,6,2,5,4,8,3,7]\n# 输出:49\n# \n#\n\n# @lc code=start\nclass Solution:\n def maxArea(self, height: List[int]) -> int:\n left = 0\n right = len(height)-1\n res = 0\n while left 1]\ncur_group_id = current_groups.shape[0]\ncurrent_groups['group'] = range(cur_group_id)\nmerged_record_df = record_df.merge(current_groups[['SF', 'LFEUI', 'group']], how='left', on=['SF', 'LFEUI'])\nmerged_record_df['group'].fillna(0, inplace=True)\n\n#Match identical long forms\nlf_match_df = merged_record_df[merged_record_df['LFEUI'].isnull()]\ncurrent_groups = lf_match_df[['LF', 'group']].groupby(['LF'], axis=0)['LF'].size().reset_index(name='Size')\ncurrent_groups = current_groups[current_groups[\"Size\"] > 1]\ncurrent_groups['group2'] = range(cur_group_id, cur_group_id + len(current_groups))\ncur_group_id = cur_group_id + len(current_groups)\n\nmerged_record_df_2 = merged_record_df.merge(current_groups[['LF', 'group2']], how='left', on=['LF'])\nmerged_record_df_2['group2'].fillna(0, inplace=True)\nmerged_record_df_2['group'] = merged_record_df_2['group'] + merged_record_df_2['group2']\n\n\nmatch_df = full_df[full_df['match_score'] > .78]\nmatch_df = match_df.reset_index(inplace=False, drop=True)\nmatch_df['match_score'] = match_df.apply(lambda x: _remove_suspicious_matches(x), axis=1)\n\ngroup_ids = merged_record_df_2[['RecordID', 'group']]\ngroup_ids.set_index('RecordID', inplace=True)\n\n\ngroup_equivalencies = []\nfor inx, row in full_df.iterrows():\n if row['match_score'] > THRESHOLD:\n id_1 = row[\"RecordID1\"]\n id_2 = row[\"RecordID2\"]\n if group_ids.loc[id_1, 'group'] == 0 and group_ids.loc[id_2, 'group'] == 0:\n cur_group = cur_group_id\n group_ids.loc[id_1, 'group'] = cur_group\n group_ids.loc[id_2, 'group'] = cur_group\n cur_group_id += 1\n elif group_ids.loc[id_1, 'group'] == 0 and group_ids.loc[id_2, 'group'] != 0:\n cur_group = group_ids.loc[id_2, 'group']\n group_ids.loc[id_1, 'group'] = cur_group\n\n elif group_ids.loc[id_1, 'group'] != 0 and group_ids.loc[id_2, 'group'] == 0:\n cur_group = group_ids.loc[id_1, 'group']\n group_ids.loc[id_2, 'group'] = cur_group\n\n else:\n if group_ids.loc[id_1, 'group'] != group_ids.loc[id_2, 'group']:\n group_equivalencies.append([group_ids.loc[id_1, 'group'], group_ids.loc[id_2, 'group']])\n\ngroup_equivalencies_set = [(min(sample), max(sample)) for sample in group_equivalencies]\ngroup_equivalencies_set = set(group_equivalencies_set)\nequivalencies_dict = dict(group_equivalencies_set)\n\ngroup_ids['group'].replace(equivalencies_dict, inplace=True)\ngroup_ids.reset_index(inplace=True, drop=False)\n\ngrouped_df = record_df.merge(group_ids, how='left', on=\"RecordID\")\ngrouped_df.to_csv(\"/ssd-1/clinical/clinical-abbreviations/data/Step3Output_with_group.csv\", index=False)\n","sub_path":"code/Step4_RemoveRedundancy/group_generation/create_group_ids_new.py","file_name":"create_group_ids_new.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"267854019","text":"import os \nimport datetime\nimport numpy as np\nimport torch\n\nfrom utils import ReplayBuffer\nfrom modules import DynamicModel\nfrom tensorboardX import SummaryWriter\nimport pdb\n\nclass Dataset(torch.utils.data.Dataset):\n def __init__(self, buffer, mode=\"train\", train_ratio = 0.9):\n self.buffer = buffer\n self.current_state = buffer.state\n self.next_state = buffer.next_state\n self.action = buffer.action\n total_size = self.current_state.shape[0]\n num_train = int(total_size * train_ratio)\n if mode == \"train\":\n self.current_state = self.current_state[:num_train]\n self.next_state = self.next_state[:num_train]\n self.action = self.action[:num_train]\n elif mode == \"validation\":\n self.current_state = self.current_state[num_train:]\n self.next_state = self.next_state[num_train:]\n self.action = self.action[num_train:]\n else:\n raise ValueError\n\n def __getitem__(self, index):\n return self.current_state[index], self.action[index], self.next_state[index]\n def __len__(self):\n return len(self.current_state)\n\n\nif __name__ == \"__main__\":\n outdir = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n outdir = 'pretrain' + outdir\n outdir = os.path.join('./saved_models', outdir)\n os.system('mkdir ' + outdir)\n writer = SummaryWriter(logdir=('logs/pretrain{}').format(datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")))\n input_state_dim = 12\n output_state_dim = 9\n action_dim = 9\n hidden_dim=512\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = DynamicModel(input_state_dim=input_state_dim, action_dim=action_dim, output_state_dim=output_state_dim, hidden_dim=hidden_dim)\n model = model.to(device)\n buffer = ReplayBuffer(state_dim = input_state_dim, action_dim = action_dim)\n buffer.restore()\n training_set = Dataset(buffer, mode=\"train\")\n val_set = Dataset(buffer, mode=\"validation\")\n training_loader = torch.utils.data.DataLoader(dataset=training_set,\n batch_size=32,\n shuffle=True)\n val_loader = torch.utils.data.DataLoader(dataset=val_set,\n batch_size=32,\n shuffle=True)\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)\n criterion = torch.nn.MSELoss()\n train_step = len(training_loader)\n max_epoch = 100\n train_index = 0\n for epoch in range(max_epoch):\n for i, (current_state, action, next_state) in enumerate(training_loader):\n train_index += 1\n current_state = current_state.float().to(device)\n action = action.float().to(device)\n next_state = next_state[:,:output_state_dim].float().to(device)\n predict_state = model(current_state, action)\n loss = criterion(predict_state, next_state)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if (i + 1) % 1000 == 0:\n print(\"Epoch [{}/{}], Step [{}/{}], Loss: {:06f}\"\n .format(epoch, max_epoch, i, train_step, loss.item()))\n writer.add_scalar('train/loss', loss.item(), train_index)\n torch.save(model.state_dict(), os.path.join(outdir, \"model_{:03d}.ckpt\".format(epoch)))\n with torch.no_grad():\n loss_list = []\n for i, (current_state, action, next_state) in enumerate(val_loader):\n current_state = current_state.float().to(device)\n action = action.float().to(device)\n next_state = next_state[:,:output_state_dim].float().to(device)\n predict_state = model(current_state, action)\n loss = criterion(predict_state, next_state)\n loss_list.append(loss.item())\n print(\"Epoch [{}/{}], Validation Loss: {:06f}\"\n .format(epoch, max_epoch, np.array(loss_list).mean()))\n writer.add_scalar('val/loss', np.array(loss_list).mean(), epoch)\n writer.close()","sub_path":"train/pretrain_model.py","file_name":"pretrain_model.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"472817353","text":"import unittest\n# from nose.tools import *\n# import ex45.mastermind\nfrom ex45 import mastermind\n# from ex45 import \"mastermind_pieces.txt\"\n# import ex45\n# from ex45.mastermind import Level\n\n\nif __name__ == '__main__':\n unittest.main()\n\n\nclass TestLevel(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n print(\"SET UP!\")\n\n @classmethod\n def test_generate_code(self):\n print(\"before blah\")\n\n\n level = mastermind.Level()\n # level = ex45.mastermind.Level()\n print(\"after blah\")\n assert_equal(len(level.generate_code()), 12672)\n\n @classmethod\n def teardown(cls):\n print(\"TEAR DOWN!\")\n","sub_path":"Command Line Interface/tests/mastermind_tests.py","file_name":"mastermind_tests.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"481063682","text":"from flask import Flask\nfrom flask import request\nimport requests\nimport link_extract\nimport json\n\napp = Flask(__name__)\n\nfile_path = 'res/pedro_escamoso.html'\n\n\n@app.route('/')\ndef root():\n return 'Get Pedro el Escamoso'\n\n\n@app.route('/escamoso')\ndef escamoso():\n return json.dumps(link_extract.escamoso())\n\n\n@app.route('/all_links')\ndef test():\n url = request.args.get('url')\n page = requests.get(url)\n url_list = link_extract.extract_urls(page.text)\n return json.dumps(url_list)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"392664855","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 27 17:14:30 2018\n\n@author: neha\n\"\"\"\n\nimport nltk\nfrom nltk.corpus import gutenberg\nimport numpy as np\nimport string\nfrom nltk.corpus import brown \nimport random\n\ndef load():\n train=[]\n test=[] \n for c in brown.categories():\n sent=brown.sents(categories=c)\n s=[]\n for str1 in sent:\n s.append(str1)\n \n \n str2=[]\n for i in s:\n str2.append(' '.join(i))\n \n str3=''\n for i in str2:\n str3= str3+ ' '+ i\n \n punctuation={'`','\\''}\n for c in punctuation:\n str3= str3.replace(c,\"\")\n\n str3=' '.join(str3.split())\n # str3 = ' The Fulton County Grand Jury said Friday an investigation of Atlantas recent primary election produced no evidence that any irregularities took place . The jury further said in term-end presentments that the City Executive Committee , which had over-all charge of the election , deserves the praise and thanks of the City of Atlanta for the manner in which the election was conducted . The September-October term jury had been charged by Fulton Superior Court Judge Durwood Pye to investigate reports of possible irregularities in the hard-fought primary which was won by Mayor-nominate Ivan Allen Jr. .'\n words = str3.split(' ')\n train.append(words[:round(len(words)*0.8)])\n test.append(words[-round(len(words)*0.2):])\n\n train = [item for sublist in train for item in sublist]\n test = [item for sublist in test for item in sublist]\n return train,test\n\ndef cal_ngram(train,n):\n ngrams = {} \n #n=2\n for index, word in enumerate(train):\n if index < len(train)-(n-1):\n w=[]\n for i in range(n):\n w.append(train[index+i])\n ngram = tuple(w)\n# print(ngram)\n \n if ngram in ngrams:\n ngrams[ ngram ] = ngrams[ ngram ] + 1\n else:\n ngrams[ ngram ] = 1\n \n# sorted_ngrams = sorted(ngrams.items(), key = lambda pair:pair[1], reverse = True)\n return ngrams\n\n\ndef cal_ngram_list(ngrams):\n ngrams_list=[]\n for key,value in ngrams.items():\n ngrams_list.append(key)\n \n return ngrams_list\n\ndef unknown(unigrams,train):\n unknown_list=[]\n for key, value in unigrams.items():\n if value < 2:\n unknown_list.append(key[0])\n for index, word in enumerate(unigrams):\n if train[index] == key[0]:\n train[index] = ''\n if len(unknown_list)==500:\n break\n return train,unknown_list\n\n\ndef cal_probab(ngrams,n_1grams,n):\n prob = {}\n for key, value in ngrams.items():\n n_1key=[]\n for k in range(0,n-1):\n n_1key.append(key[k])\n \n prob[key] = value/(n_1grams[tuple(n_1key)])\n \n return prob\n\n\ndef cal_unigram_probab(ngrams,N):\n prob = {}\n for key, value in ngrams.items():\n prob[key] = value/N \n return prob\n\n\ndef check_existence(key,ngram_list,train_prob,n):\n found=0\n nfound=0\n alpha=1;\n t_prob=-1\n for i in reversed(range(len(ngram_list))):\n# print(i)\n# print(key)\n# k=[]\n# k.append(key)\n if key in ngram_list[i]:\n prob = train_prob[i]\n t_prob = alpha*prob[key]\n# print('break')\n found=found+1\n break\n else:\n key=key[i:n]\n# print(key)\n alpha=alpha*0.4\n \n if t_prob==-1:\n# print('unknown')\n ukn=tuple([''])\n nfound=nfound+1\n prob = train_prob[0]\n t_prob=alpha*prob[ukn]/0.4\n return t_prob\n \ndef cal_probab_test(tngram,ngram_list,train_prob,n):\n t_prob=0\n \n for key, value in tngram.items():\n# print(key)\n prob = check_existence(key,ngram_list,train_prob,n)\n# print(prob)\n t_prob = t_prob + np.log2(prob)\n \n return t_prob\n\ndef cal_perplexity(test,ngram_list,train_prob,n):\n tngram={}\n tngram=cal_ngram(test,n)\n tN=len(test)\n tprob=cal_probab_test(tngram,ngram_list,train_prob,n)\n perplexity=2 ** (tprob*(-1/tN))\n \n return perplexity\n\ndef init(train,n):\n N=len(train)\n unigrams=cal_ngram(train,1)\n #replace some vocab with \n train,unknown_list = unknown(unigrams,train)\n \n #get all ngrams and their counts\n ngram=[]\n \n for i in range(n):\n# print(i)\n ngram.append(cal_ngram(train,i+1))\n \n #calculate 1 to n gram's probabilities\n train_prob=[]\n train_prob.append(cal_unigram_probab(ngram[0],N))\n \n for i in range(1,n):\n# print(i)\n train_prob.append(cal_probab(ngram[i],ngram[i-1],i+1))\n \n \n \n #calculate ngram lists\n ngram_list=[]\n for i in range(n):\n ngram_list.append(cal_ngram_list(ngram[i]))\n \n return N,n,train,unknown_list,ngram,train_prob,ngram_list\n\n\n\ntrain,test=load()\nn=2\nN,n,train,unknown_list,ngram,train_prob,ngram_list=init(train,n)\n\nperplexity = cal_perplexity(test,ngram_list,train_prob,n)\n\nprint('Perplexity: ',perplexity)\n","sub_path":"S1_bigram.py","file_name":"S1_bigram.py","file_ext":"py","file_size_in_byte":5242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"640954004","text":"# -*+ coding: utf-8 -*-\nname = str(input(\"Hola ¿Como te llamas?: \"))\npeso = float(input(\"¿Cual es tu peso en Kilogramos?: \"))\ntalla = float(input(\"¿Cual es tu estatura en cc?: \"))\nimc = peso/(talla**2)\nprint(\"Hola ${} mucho gusto tu Indice de Masa Corporal es: ${}\".format(name, imc))\nprint(imc)\n\ndef evaluate_imc(imc):\n\tif(imc > 25):\n\t\treturn\"Estas en sobrepeso\"\n\telif(imc < 20):\n\t\treturn\"Estas por debajo de tu peso\"\n\telse:\n\t\treturn\"Estas en tu peso ideal\"\nresultado = evaluate_imc(imc)\nprint(resultado)\nif __name__ == '__main__':\n run()\n","sub_path":"funciones/masacorporal.py","file_name":"masacorporal.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"484892505","text":"passiveWords = [\n 'awoken',\n 'been',\n 'born',\n 'beat',\n 'become',\n 'begun',\n 'bent',\n 'beset',\n 'bet',\n 'bid',\n 'bidden',\n 'bound',\n 'bitten',\n 'bled',\n 'blown',\n 'broken',\n 'bred',\n 'brought',\n 'broadcast',\n 'built',\n 'burnt',\n 'burst',\n 'bought',\n 'cast',\n 'caught',\n 'chosen',\n 'clung',\n 'come',\n 'cost',\n 'crept',\n 'cut',\n 'dealt',\n 'dug',\n 'dived',\n 'done',\n 'drawn',\n 'dreamt',\n 'driven',\n 'drunk',\n 'eaten',\n 'fallen',\n 'fed',\n 'felt',\n 'fought',\n 'found',\n 'fit',\n 'fled',\n 'flung',\n 'flown',\n 'forbidden',\n 'forgotten',\n 'foregone',\n 'forgiven',\n 'forsaken',\n 'frozen',\n 'gotten',\n 'given',\n 'gone',\n 'ground',\n 'grown',\n 'hung',\n 'heard',\n 'hidden',\n 'hit',\n 'held',\n 'hurt',\n 'kept',\n 'knelt',\n 'knit',\n 'known',\n 'laid',\n 'led',\n 'leapt',\n 'learnt',\n 'left',\n 'lent',\n 'let',\n 'lain',\n 'lighted',\n 'lost',\n 'made',\n 'meant',\n 'met',\n 'misspelt',\n 'mistaken',\n 'mown',\n 'overcome',\n 'overdone',\n 'overtaken',\n 'overthrown',\n 'paid',\n 'pled',\n 'proven',\n 'put',\n 'quit',\n 'read',\n 'rid',\n 'ridden',\n 'rung',\n 'risen',\n 'run',\n 'sawn',\n 'said',\n 'seen',\n 'sought',\n 'sold',\n 'sent',\n 'set',\n 'sewn',\n 'shaken',\n 'shaven',\n 'shorn',\n 'shed',\n 'shone',\n 'shod',\n 'shot',\n 'shown',\n 'shrunk',\n 'shut',\n 'sung',\n 'sunk',\n 'sat',\n 'slept',\n 'slain',\n 'slid',\n 'slung',\n 'slit',\n 'smitten',\n 'sown',\n 'spoken',\n 'sped',\n 'spent',\n 'spilt',\n 'spun',\n 'spit',\n 'split',\n 'spread',\n 'sprung',\n 'stood',\n 'stolen',\n 'stuck',\n 'stung',\n 'stunk',\n 'stridden',\n 'struck',\n 'strung',\n 'striven',\n 'sworn',\n 'swept',\n 'swollen',\n 'swum',\n 'swung',\n 'taken',\n 'taught',\n 'torn',\n 'told',\n 'thought',\n 'thrived',\n 'thrown',\n 'thrust',\n 'trodden',\n 'understood',\n 'upheld',\n 'upset',\n 'woken',\n 'worn',\n 'woven',\n 'wed',\n 'wept',\n 'wound',\n 'won',\n 'withheld',\n 'withstood',\n 'wrung',\n 'written'\n]\n\nexceptions = [\n 'indeed'\n]\n\nimport re\n\nre = re.compile('\\\\b(am|are|were|being|is|been|was|be)\\\\b\\\\s*([\\\\w]+ed|' + '|'.join(passiveWords) + ')\\\\b', flags=re.IGNORECASE)\n\ndef passiveCheck(text):\n suggestions = []\n match = re.search(text)\n while(match):\n matchString = match.group(0)\n index = match.start()\n offset = len(matchString)\n try:\n matchString.index(exceptions[0])\n except:\n suggestions.append([index, offset])\n match = re.search(text[index+offset:-1])\n return suggestions\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"lib/passive.py","file_name":"passive.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"121001548","text":"from sklearn.linear_model import BayesianRidge\nfrom hyperopt import hp\nfrom utils import definitions\nfrom .. import model, model_regression\n\nclass BayesianRidgeRegressor(model.Model, model_regression.ModelRegression):\n def __init__(self, _project_name):\n super().__init__(_project_name)\n self.model_name = 'BayesianRidgeRegressor'\n self.params_list = {}\n\n def getHyperParameterSpace(self):\n return {\n 'n_iter': hp.quniform('n_iter', 200, 400, 12), \n 'alpha_1': hp.uniform('alpha_1', 0, 1),\n 'alpha_2': hp.uniform('alpha_2', 0, 1),\n 'lambda_1': hp.uniform('lambda_1', 0, 1),\n 'lambda_2': hp.uniform('lambda_2', 0, 1),\n 'alpha_init': hp.uniform('alpha_init', 0, 1),\n 'lambda_init': hp.uniform('lambda_init', 0, 1),\n 'compute_score': hp.choice('compute_score', [False, True]),\n 'fit_intercept': hp.choice('fit_intercept', [False, True]),\n 'normalize': hp.choice('normalize', [False, True]),\n 'copy_X': hp.choice('copy_X', [False, True]),\n }\n\n def getModel(self, _params):\n return BayesianRidge(\n n_iter= int(_params['n_iter']),\n alpha_1 = _params['alpha_1'],\n alpha_2 = _params['alpha_2'],\n lambda_1 = _params['lambda_1'],\n lambda_2 = _params['lambda_2'], \n alpha_init= _params['alpha_init'],\n lambda_init = _params['lambda_init'],\n compute_score= _params['compute_score'],\n fit_intercept= _params['fit_intercept'],\n normalize= _params['normalize'],\n copy_X= _params['copy_X'], \n )\n\n def trainModel(self, x, y, _params):\n self.model = self.getModel(_params)\n self.model.fit(x, y)\n self.saveModel()\n \n def getPredictResult(self, x):\n return self.model.predict(x)\n\n","sub_path":"AutomlCore/build/lib/algorithms/regression/bayesian_ridge_r.py","file_name":"bayesian_ridge_r.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"535840054","text":"import skimage.morphology\n\nprediction_mask = (pred_np.squeeze() == 15)\n\n# Let's apply some morphological operations to\n# create the contour for our sticker\n\ncropped_object = image_np * np.dstack((prediction_mask,) * 3)\n\nsquare = skimage.morphology.square(5)\n\ntemp = skimage.morphology.binary_erosion(prediction_mask, square)\n\nnegative_mask = (temp != True)\n\neroding_countour = negative_mask * prediction_mask\n\neroding_countour_img = np.dstack((eroding_countour, ) * 3)\n\ncropped_object[eroding_countour_img] = 248\n\npng_transparancy_mask = np.uint8(prediction_mask * 255)\n\nimage_shape = cropped_object.shape\n\npng_array = np.zeros(shape=[image_shape[0], image_shape[1], 4], dtype=np.uint8)\n\npng_array[:, :, :3] = cropped_object\n\npng_array[:, :, 3] = png_transparancy_mask\n\nio.imshow(cropped_object)\n\nio.imsave('output_image.png', png_array)\n","sub_path":"Image Segmentation/tensorflow_notes-master/step2.py","file_name":"step2.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"330348764","text":"import pytest\nimport time\nimport random\n\nclass TestClassPrRequest_not_logged():\n @pytest.fixture(autouse=True)\n def _request_signup_page(self, pr_notlogged, app_test_users):\n self.app = pr_notlogged\n self.app.user_creator = app_test_users.user_creator\n self.app.user_student = app_test_users.user_student\n self.pr = pr_notlogged.request_project\n\n\n def test_WHEN_choose_every_subcategory_EXPECTED_requests_lrkated_to_subcategory_TC6220(self, pr_notlogged):\n dict_of_subcategories = self.app.request_project.get_list_of_sub_cat()\n assert self.app.request_project.requests_are_related_to_sub_categories(dict_of_subcategories)\n\n def test_WHEN_requests_button_is_pressed_EXPECTED_project_requests_scr_opens_TC6200(self):\n self.app.home_el.logout_go_home_and_wait()\n self.app.home_el.button_learnondemand_click()\n self.app.live.navigation_button_press('Requests')\n assert self.app.request_project.screen_project_requests_is_displayed()\n\n def test_WHEN_requests_screen_opened_EXPECTED_5_sections_presented_TC6205(self):\n elts_dct = {}\n elts_dct['main_menu_section'] = self.pr.main_menu_section_is_displayed()\n elts_dct['filters_section'] = self.pr.filters_section_is_displayed()\n elts_dct['requests_list_section'] = self.pr.requests_list_section_is_displayed()\n elts_dct['instruction_section'] = self.pr.instruction_section_is_presented()\n #elts_dct['pagination_section'] = self.app.request_project.pagination_section_is_presented()\n assert all(elts_dct.values())\n\n def test_WHEN_requests_screen_opened_EXPECTED_5_elements_in_filters_TC6210(self):\n elts_dct = {}\n main_cat_filters = ['All', 'Programming', 'Game development', 'Data science', 'Design',\n 'Artificial intelligence', 'CryptoCurrency', 'VR & AR', 'Cybersecurity']\n\n elts_dct['main_cat_filters'] = self.pr.main_cat_filters_is_presented(main_cat_filters)\n elts_dct['subcategory_filters'] = self.pr.subcategory_filters_is_presented()\n elts_dct['popularity_latest_sorting'] = self.pr.sort_filter_is_presented('Most Popular')\n elts_dct['difficulty_filter'] = self.pr.sort_filter_is_presented('Difficulty')\n elts_dct['language_filter'] = self.pr.sort_filter_is_presented('Language')\n for x in elts_dct.values():\n assert x is True\n\n def test_WHEN_main_category_selected_EXPECTED_all_request_relatred_to_selected_category_TC6215(self):\n assert self.pr.list_of_requests_related_to_each_selected_category()\n\n\n def test_WHEN_maincategory_and_subcat_selected_EXPECTED_project_requests_are_proper_TC6222(self, pr_notlogged):\n main_cat = 'Programming'\n sub_cat = 'Python'\n assert self.pr.all_requests_related_to_sub_and_main_cat(main_cat, sub_cat)\n\n\n def test_WHEN_subcategory_is_selected_EXPECTED_x_button_is_presented_TC6223(self, pr_notlogged):\n self.pr.select_and_enter_random_subcategory()\n assert self.pr.close_x_button_is_presented(self.app.request_project.buttons_in_filter)\n\n def test_WHEN_subcategory_is_selected_and_x_pressed_EXPECTED_filter_is_reseted_TC6223(self, pr_notlogged):\n filter = self.pr.get_filter_by_text('Choose a category')\n self.pr.select_and_enter_random_subcategory()\n self.pr.close_x_button_click(filter, self.pr.sub_cat_filter)\n assert not self.pr.close_x_button_is_presented(self.pr.buttons_in_filter)\n\n def test_WHEN_sorting_by_popularity_is_selected_EXPECTED_items_are_sorted_TC6225(self, pr_notlogged):\n self.pr.select_value_in_right_filters(0, 'Most Popular') #0=Popular/New 1=Difficulty 2=Language\n assert self.pr.list_of_requests_sorted_by_popularity()\n\n def test_WHEN_sorting_by_latest_is_selected_EXPECTED_items_are_sorted_TC6226(self, pr_notlogged):\n self.pr.select_value_in_right_filters(0, 'Latest') #0=Popular/New 1=Difficulty 2=Language\n assert self.pr.list_of_requests_sorted_by_latest()\n\n def test_WHEN_filter_by_difficulty_is_selected_EXPECTED_items_are_filtered_TC6231(self, pr_notlogged):\n self.pr.select_value_in_right_filters(1, 'Beginner')\n list_api = self.pr.get_list_of_requests_by_api(difficulty=1)\n assert self.pr.list_of_pr_req_related_to_api_list(list_api)\n\n def test_WHEN_diffic_is_selected_and_x_pressed_EXPECTED_filter_is_reseted_TC6232(\n self, pr_notlogged):\n\n filter_d = self.pr.get_difficulty_filter()\n button = self.pr.select_value_in_right_filters(1, 'Beginner')\n self.pr.close_x_button_click(filter_d, button, 1) #1 if click of element (not locator)\n assert not self.pr.close_x_button_is_presented(self.pr.buttons_in_filter, filter_d)\n\n def test_WHEN_filter_by_language_is_selected_EXPECTED_items_are_filtered_TC6231(\n self, pr_notlogged):\n\n self.pr.select_value_in_right_filters(2, 'English')\n list_api = self.pr.get_list_of_requests_by_api(language='en') #english\n assert self.pr.list_of_pr_req_related_to_api_list(list_api)\n\n def test_WHEN_lang_is_selected_and_x_pressed_EXPECTED_filter_is_reseted_TC6232(self, pr_notlogged):\n filter_l = self.pr.get_language_filter()\n button = self.pr.select_value_in_right_filters(2, 'English')\n self.pr.close_x_button_click(filter_l, button, 1) #1 if click of element (not locator)\n assert not self.pr.close_x_button_is_presented(self.pr.buttons_in_filter, filter_l)\n\n\n def test_WHEN_several_filters_are_selected_EXPECTED_items_are_filtered_TC6231(self, pr_notlogged):\n self.pr.select_value_in_right_filters(2, 'English')\n self.pr.select_value_in_right_filters(1, 'Beginner')\n list_api = self.pr.get_list_of_requests_by_api(language='en', difficulty=1) # english\n assert self.pr.list_of_pr_req_related_to_api_list(list_api)\n\n def test_WHEN_user_is_not_logged_EXPECTED_elements_in_pr_correct_TC6252(self, pr_notlogged):\n elts_dct = {}\n elts_dct['likes_counter'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.likes_button)\n elts_dct['likes_button'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.likes_counter)\n elts_dct['title'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.project_request_titles)\n elts_dct['description'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.description_of_pr)\n elts_dct['language'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.language_title)\n elts_dct['subcategory_icon'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.subcategory_icon)\n elts_dct['name_of_creator'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.creator_name)\n elts_dct['creation_date'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.date_of_proj_request)\n\n\n assert all(elts_dct.values())\nclass TestClassPrRequest_student():\n @pytest.fixture(autouse=True)\n def _request_signup_page(self, pr_student, app_test_users):\n self.app = pr_student\n self.app.user_creator = app_test_users.user_creator\n self.app.user_student = app_test_users.user_student\n self.pr = pr_student.request_project\n\n def test_WHEN_student_is_logged_EXPECTED_elements_in_pr_correct_TC6250(self, pr_student):\n elts_dct = {}\n elts_dct['likes_counter'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.likes_button)\n elts_dct['likes_button'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.likes_counter)\n elts_dct['title'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.project_request_titles)\n elts_dct['description'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.description_of_pr)\n elts_dct['language'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.language_title)\n elts_dct['subcategory_icon'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.subcategory_icon)\n elts_dct['name_of_creator'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.creator_name)\n elts_dct['creation_date'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.date_of_proj_request)\n\n\n assert all(elts_dct.values())\n\n def test_WHEN_student_pressed_likes_button_EXPECTED_counter_is_increased_TC6260(self, pr_student):\n counter_value = self.pr.get_counter_of_likes(0)\n self.pr.press_likes_button(0)\n time.sleep(3)\n counter_value_after = self.pr.get_counter_of_likes(0)\n assert counter_value + 1 == counter_value_after\n\n def test_WHEN_student_is_logged_EXPECTED_make_section_is_correct_TC6290(self, pr_student):\n elts_dct = {}\n elts_dct['icon_of_section'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.make_section_icon)\n elts_dct['make your own request'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.request_project_button)\n elts_dct['text'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.make_section_text)\n elts_dct['title'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.make_section_title)\n assert all(elts_dct.values())\n\n def test_WHEN_request_project_button_pressed_EXPECTED_rp_popup_appears_TC6292(self, pr_student):\n self.app.general.but_press(self.pr.request_project_button)\n assert self.app.general.el_is_displayed(self.pr.pr_popup)\n\n def test_WHEN_pr_popup_opens_EXPECTED_fileds_are_correct_TC6300(self, pr_student):\n self.app.general.but_press(self.pr.request_project_button)\n elts_dct = {}\n elts_dct['main title'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.pr_popup_main_title)\n elts_dct['close_x_button'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.close_popup_button)\n elts_dct['pr name'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.pr_popup_pname)\n elts_dct['choose a category, difficulty, language'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.pr_popup_cat_dif_lang)\n elts_dct['choose a subcategory'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.pr_popup_subcategory)\n elts_dct['describe your idea'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.pr_popup_description)\n elts_dct['submit request'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.pr_popup_submit_button)\n assert all(elts_dct.values())\n\n def test_WHEN_x_button_pressed_in_popup_EXPECTED_pr_popup_closed_TC6301(self, pr_student):\n self.app.general.but_press(self.pr.request_project_button)\n self.app.general.but_press(self.pr.close_popup_button)\n assert not self.app.general.el_is_presented(self.pr.pr_popup)\n\n def _test_WHEN_all_fields_filled_in_prpopup_EXPECTED_pr_is_saved(self, pr_student):\n random_pr = self.pr.get_random_pr_data()\n self.app.general.but_press(self.pr.request_project_button)\n self.app.general.send_k(self.pr.pr_popup_pname, random_pr['pr_name'])\n self.app.general.send_k(self.pr.pr_popup_description, random_pr['description'])\n self.pr.choose_value_on_popup(random_pr, 'Choose a topic')\n\n self.app.general.but_press(self.pr.pr_popup_submit_button)\n assert not self.app.general.el_is_presented(self.pr.pr_popup)\n\n\n\nclass TestClassPrRequest_creator():\n @pytest.fixture(autouse=True)\n def _request_signup_page(self, pr_creator, app_test_users):\n self.app = pr_creator\n self.app.user_creator = app_test_users.user_creator\n self.app.user_student = app_test_users.user_student\n self.pr = pr_creator.request_project\n\n\n\n def test_WHEN_creator_is_logged_EXPECTED_elements_in_pr_correct_TC6251(self, pr_creator):\n elts_dct = {}\n elts_dct['likes_counter'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.likes_button)\n elts_dct['likes_button'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.likes_counter)\n elts_dct['title'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.project_request_titles)\n elts_dct['description'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.description_of_pr)\n elts_dct['language'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.language_title)\n elts_dct['subcategory_icon'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.subcategory_icon)\n elts_dct['name_of_creator'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.creator_name)\n elts_dct['creation_date'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.date_of_proj_request)\n elts_dct['create_project'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.create_this_project)\n\n\n assert all(elts_dct.values())\n\n def test_WHEN_create_project_button_is_pressed_EXPECTED_creation_is_started_TC6275(self, pr_creator):\n self.pr.press_random_create_this_proj_button()\n assert self.app.general.get_txt_of_el(self.pr.main_title) == 'CREATE PROJECT'\n\n def test_WHEN_creator_is_logged_EXPECTED_make_section_is_correct_TC6291(self, pr_creator):\n elts_dct = {}\n elts_dct['icon_of_section'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.make_section_icon)\n elts_dct['text'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.make_section_text)\n elts_dct['title'] = self.pr.pr_element_is_displayed_in_each(\n self.pr.make_section_title)\n assert all(elts_dct.values())\n\n","sub_path":"tests/test_project_requests.py","file_name":"test_project_requests.py","file_ext":"py","file_size_in_byte":13737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"589288589","text":"\"\"\"\r\nwcount.py:输入相应的参数后返回相应的网址中的出现频率位于前n位的字符及其出现次数。\r\n\r\n\r\n__author__=\"wengpeiyi\"\r\n__pkuid__=\"1800011749\"\r\n__email__=\"594592395@qq.com\"\r\n\"\"\"\r\n\r\n\r\nimport sys\r\nfrom urllib.request import urlopen\r\n\r\n\r\ndef wcount(lines, topn):\r\n \"\"\"count words from lines of text string, then sort by their counts\r\n in reverse order, output the topn (word count), each in one line. \r\n \"\"\"\r\n for i in range(len(lines)):\r\n if lines[i].isalpha() == False:\r\n lines = lines.replace(lines[i], \" \")\r\n lines = lines.lower()\r\n lines = lines.split()\r\n \r\n d = {}\r\n for i in range(len(lines)):\r\n if not lines[i] in d:\r\n d[lines[i]] = 1;\r\n if lines[i] in d:\r\n d[lines[i]] += 1\r\n #字典计数\r\n s = d.items()\r\n t = []\r\n for i in s:\r\n t.append(i)\r\n \r\n ans = sorted(t, key=lambda x: x[1], reverse=True)\r\n if topn <= len(ans):\r\n ans = ans[:topn]\r\n for i in ans:\r\n print(i[0], \" \", i[1])\r\n\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n if len(sys.argv) == 1:\r\n #用户需要输入url程序才能运行\r\n print('Usage: {} url [topn]'.format(sys.argv[0]))\r\n print(' url: URL of the txt file to analyze ')\r\n print(' topn: how many (words count) to output. If not given, will output top 10 words')\r\n sys.exit(1)\r\n elif len(sys.argv) == 2:\r\n #只输入网址,默认输出前十位\r\n address = \"\"\r\n address += sys.argv[1]\r\n try:\r\n doc = urlopen(address)\r\n docstr = doc.read()\r\n doc.close()\r\n lines = docstr.decode()\r\n wcount(lines, 10)\r\n except OSError:\r\n print(\"Sorry, 404: not Found\")\r\n \r\n elif len(sys.argv) == 3:\r\n address = \"\"\r\n address += sys.argv[1]\r\n doc = urlopen(address)\r\n docstr = doc.read()\r\n doc.close()\r\n lines = docstr.decode()\r\n n = int(sys.argv[2])/2\r\n wcount(lines, int(n))\r\n else:\r\n #最多只允许输出3个参数\r\n print(\"Sorry, invalid input. \")\r\n print(\"Only 2 parameters are allowed.\")\r\n","sub_path":"pyassign4/wcount.py","file_name":"wcount.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"144799626","text":"\"\"\"\nGiven an array nums of n integers and an integer target, find three integers in nums such that the sum is closest to target. \nReturn the sum of the three integers. You may assume that each input would have exactly one solution.\n\nExample:\n\nGiven array nums = [-1, 2, 1, -4], and target = 1.\n\nThe sum that is closest to the target is 2. (-1 + 2 + 1 = 2).\n\"\"\"\n\nclass Solution(object):\n def threeSumClosest(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n if not nums or len(nums) < 3:\n return []\n \n # First sort the array (nlogn)\n nums = sorted(nums)\n \n closest_sum = None\n min_distance = None\n\n # For each element, search the pair that adds to 0 with itself.\n for i in xrange(0, len(nums)-2): #(nlogn)\n start = i+1\n end = len(nums) -1\n while start < end:\n cur_sum = nums[i] + nums[start] + nums[end]\n cur_distance = abs(target-cur_sum)\n if cur_sum == target:\n return cur_sum\n if not min_distance or min_distance > cur_distance:\n min_distance = cur_distance\n closest_sum = cur_sum\n if cur_sum < target:\n start += 1\n else:\n end -= 1\n return closest_sum\n","sub_path":"3sum_closest.py","file_name":"3sum_closest.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"419342538","text":"from django.test import TestCase\nfrom datetime import date\n\n# Create your tests here.\nfrom django.urls import reverse\nfrom rest_framework.test import APITestCase, APIClient\nfrom rest_framework.views import status\nfrom .models import Blogposts\nfrom .serializers import BlogpostsSerializer\n\n# tests for views\n\n\nclass BaseViewTest(APITestCase):\n client = APIClient()\n\n @staticmethod\n def create_blogpost(title=\"\", body=\"\", media_url=\"\", author_id=1, posted_on=date.today()):\n if title != \"\" and body != \"\": #media_link should be optional\n Blogposts.objects.create(title=title, body=body, media_url=media_url, posted_on=posted_on)\n\n def setUp(self):\n # add test data\n self.create_blogpost(\"5 dishes to try\", \"spaghetti spaghetti spaghetti spaghetti spaghetti\", 1, date.today())\n self.create_blogpost(\"blatant clickbait\", \"find out more!\", 2, date.today())\n self.create_blogpost(\"talking to americans\", \"bring up politics and religion right away!\", 3, date.today())\n self.create_blogpost(\"title\", \"body\", \"media_url\", 4, date.today())\n\n\nclass GetAllBlogpostsTest(BaseViewTest):\n\n def test_get_all_blogposts(self):\n \"\"\"\n This test ensures that all songs added in the setUp method\n exist when we make a GET request to the songs/ endpoint\n \"\"\"\n # hit the API endpoint\n response = self.client.get(\n reverse(\"blogposts-all\", kwargs={\"version\": \"v1\"})\n )\n # fetch the data from db\n expected = Blogposts.objects.all()\n serialized = BlogpostsSerializer(expected, many=True)\n self.assertEqual(response.data, serialized.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)","sub_path":"api/blogpost/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"470155509","text":"from pydux.create_store import create_store\nfrom functools import reduce\n\n\ndef assign(*args):\n return reduce(lambda acc, x: acc.update(x) or acc, args, {})\n\n\ndef mylistener(*args, **kwargs):\n print('mylistener', args, kwargs)\n\n\ndef todo_reducer(state, action):\n if not state:\n return {'todos': []}\n if action['type'] == 'ADD_TODO':\n return assign(state, {\n 'foo': 'bar'\n })\n else:\n return state\n\n\nstore = create_store(todo_reducer)\nstore.subscribe(mylistener)\na = store.dispatch({'type': 'ADD_TODO'})\nprint('state:', store['get_state']())\nprint(a)\na = store.dispatch({'type': 'ADD_TODO'})\n# a = {'asdf': 1234}\n# b = {'asdf': 3456}\n#\n# print(assign(a,b))\n# print(a)\n# print(b)\n\n","sub_path":"gooey/examples/testingpydux.py","file_name":"testingpydux.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"301031024","text":"import numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.kernel_approximation import RBFSampler\n\n\n# Relevance Vector Machine Classifier using EM algorithm by Michael E. Tipping.\n### This is a python implementation of Relevance Vector Machine Classifier, it's based on github.com/ctgk/PRML/blob/master/prml/kernel/relevance_vector_classifier.py\nclass RVC:\n def sigmoid(self,a):\n return np.tanh(a * 0.5) * 0.5 + 0.5\n\n # Kernel matrix using rbf kernel with gamma = 0.3.\n def kernel_mat(self,X, Y):\n (x, y) = (np.tile(X, (len(Y), 1, 1)).transpose(1, 0, 2),\n np.tile(Y, (len(X), 1, 1)))\n d = np.repeat(1 / (0.3 * 0.3), X.shape[-1]) * (x - y) ** 2\n return np.exp(-0.5 * np.sum(d, axis=-1))\n def __init__(self, alpha=1.):\n self.threshold_alpha = 1e8\n self.alpha = alpha\n self.iter_max = 100\n self.relevance_vectors_ = []\n\n # estimates for singulat matrices.\n def ps_inv(self, m):\n # assuming it is a square matrix.\n a = m.shape[0]\n i = np.eye(a, a)\n return np.linalg.lstsq(m, i, rcond=None)[0]\n\n '''\n For the current fixed values of alpha, the most probable\n weights are found by maximizing w over p(w/t,alpha) \n using the Laplace approximation of finding an hessian.\n (E step)\n w = mean of p(w/t,alpha)\n cov = negative hessian of p(w/t,alpha)\n \n '''\n def _map_estimate(self, X, t, w, n_iter=10):\n for _ in range(n_iter):\n y = self.sigmoid(X @ w)\n g = X.T @ (y - t) + self.alpha * w\n H = (X.T * y * (1 - y)) @ X + np.diag(self.alpha) # negated Hessian of p(w/t,alpha)\n w -= np.linalg.lstsq(H, g, rcond=None)[0] # works even if for singular matrices.\n return w, self.ps_inv(H) # inverse of H is the covariance of the gaussian approximation.\n\n '''\n Fitting of input-target pairs works by\n iteratively finding the most probable weights(done by _map_estimate method)\n and optimizing the hyperparameters(alpha) until there is no\n siginificant change in alpha.\n \n (M step)\n Optimizing alpha:\n For the given targets and current variance(sigma^2) alpha is optimized over p(t/alpha,variance)\n It is done by Mackay approach(ARD).\n alpha(new) = gamma/mean^2\n where gamma = 1 - alpha(old)*covariance.\n \n After finding the hyperparameters(alpha),\n the samples which have alpha less than the threshold(hence weight >> 0)\n are choosen as relevant vectors.\n \n Now predicted y = sign(phi(X) @ mean) ( mean contains the optimal weights)\n '''\n def fit(self, X, y):\n Phi = self.kernel_mat(X, X)\n N = len(y)\n self.alpha = np.zeros(N) + self.alpha\n mean = np.zeros(N)\n for i in range(self.iter_max):\n param = np.copy(self.alpha)\n mean, cov = self._map_estimate(Phi, y, mean, 10)\n gamma = 1 - self.alpha * np.diag(cov)\n self.alpha = gamma / np.square(mean)\n np.clip(self.alpha, 0, 1e10, out=self.alpha)\n if np.allclose(param, self.alpha):\n break\n\n ret_alpha = self.alpha < self.threshold_alpha\n self.relevance_vectors_ = X[ret_alpha]\n self.y = y[ret_alpha]\n self.alpha = self.alpha[ret_alpha]\n Phi = self.kernel_mat(self.relevance_vectors_, self.relevance_vectors_)\n mean = mean[ret_alpha]\n self.mean, self.covariance = self._map_estimate(Phi, self.y, mean, 100)\n\n\n # gives probability for target to be class 0.\n def predict_proba(self, X):\n phi = self.kernel_mat(X, self.relevance_vectors_)\n mu_a = phi @ self.mean\n var_a = np.sum(phi @ self.covariance * phi, axis=1)\n return 1 - self.sigmoid(mu_a / np.sqrt(1 + np.pi * var_a / 8))\n\n def predict(self, X):\n phi = self.kernel_mat(X, self.relevance_vectors_)\n return (phi @ self.mean > 0).astype(np.int)\n\n\n# scipy.io loadmat doesn't seem to be accept the version of this data file\ndata = {}\nwith h5py.File('/pyprobml/data/bishop2class.mat', 'r') as f:\n for name, d in f.items():\n data[name] = np.array(d)\n\nX = data['X'].transpose()\nY = data['Y']\ny = Y.flatten()\ny = y - 1 # changing to {0,1}\n\n# Feature Mapping X to rbf_features to simulate non-linear logreg using linear ones.\nrbf_feature = RBFSampler(gamma=0.3, random_state=1)\nX_rbf = rbf_feature.fit_transform(X)\n\n# Using CV to find SVM regularization parameter.\nC = np.power(2, np.linspace(-5, 5, 10))\nmean_scores = [cross_val_score(SVC(kernel='rbf', gamma=0.3, C=c), X, y, cv=5).mean() for c in C]\nc = C[np.argmax(mean_scores)]\n\nclassifiers = {\n 'logregL2': LogisticRegression(C=0.2, penalty='l2',\n solver='saga',\n multi_class='ovr',\n max_iter=10000),\n 'logregL1': LogisticRegression(C=1, penalty='l1',\n solver='saga',\n multi_class='ovr',\n max_iter=10000),\n 'RVM': RVC(),\n 'SVM': SVC(kernel='rbf', gamma=0.3, C=c, probability=True)\n}\n\nh = 0.05 # step size in the mesh\n\n# Mesh to use in the boundary plotting.\nx_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\ny_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n\ndef plot_scatters(X, y):\n for class_value in range(2):\n # get row indexes for samples with this class\n row_ix = np.where(y == class_value)\n # creating scatter of these samples\n plt.scatter(X[row_ix, 0], X[row_ix, 1], cmap='Paired', marker='X', s=30)\n\n\ndef plot_SVs(SV):\n plt.scatter(SV[:, 0], SV[:, 1], s=100, facecolor=\"none\", edgecolor=\"green\")\n\n\nfor (name, clf) in classifiers.items():\n\n if name == 'logregL2':\n clf.fit(X_rbf, y)\n Z = clf.predict_proba(rbf_feature.fit_transform(np.c_[xx.ravel(), yy.ravel()]))\n Z = Z[:, 0].reshape(xx.shape)\n plt.title(name + \", nerr= {}\".format(np.sum(y != clf.predict(X_rbf))))\n plt.contour(xx, yy, Z, np.linspace(0, 1, 5), colors=['black', 'w'])\n plot_scatters(X, y)\n plt.show()\n plt.savefig(\"../figures/kernelBinaryClassifDemo{}.pdf\".format(name), dpi=300)\n\n elif name == 'logregL1':\n clf.fit(X_rbf, y)\n Z = clf.predict_proba(rbf_feature.fit_transform(np.c_[xx.ravel(), yy.ravel()]))\n Z = Z[:, 0].reshape(xx.shape)\n plt.title(name + \", nerr= {}\".format(np.sum(y != clf.predict(X_rbf))))\n plt.contour(xx, yy, Z, np.linspace(0, 1, 5), colors=['w','black', 'w'])\n plot_scatters(X, y)\n conf_scores = np.abs(clf.decision_function(X_rbf))\n SV = X[(conf_scores > conf_scores.mean())] # samples having a higher confidence scores are taken as support vectors.\n plot_SVs(SV)\n plt.show()\n plt.savefig(\"../figures/kernelBinaryClassifDemo{}.pdf\".format(name), dpi=300)\n\n elif name == 'RVM':\n clf.fit(X, y)\n Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n plt.title(name + \", nerr= {}\".format(np.sum(y != clf.predict(X))))\n plt.contour(xx, yy, Z, np.linspace(0, 1, 5), colors=['black', 'w'])\n plot_scatters(X, y)\n plot_SVs(clf.relevance_vectors_)\n plt.show()\n plt.savefig(\"../figures/kernelBinaryClassifDemo{}.pdf\".format(name), dpi=300)\n\n elif name == 'SVM':\n clf.fit(X, y)\n Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])\n Z = Z[:, 0]\n Z = Z.reshape(xx.shape)\n plt.title(name + \", nerr= {}\".format(np.sum(y != clf.predict(X))))\n plt.contour(xx, yy, Z, colors=['w', 'w', 'w', 'black'])\n plot_scatters(X, y)\n plot_SVs(clf.support_vectors_)\n plt.show()\n plt.savefig(\"../figures/kernelBinaryClassifDemo{}.pdf\".format(name), dpi=300)\n","sub_path":"scripts/kernelBinaryClassifDemo.py","file_name":"kernelBinaryClassifDemo.py","file_ext":"py","file_size_in_byte":8086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"35074623","text":"import pickle, glob, sys, csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io as sio\n\nfrom scipy import optimize\nfrom scipy.stats import vonmises, norm, beta, uniform, multivariate_normal\nfrom scipy.special import logsumexp, iv, beta, gamma, digamma\nfrom mpi4py import MPI\nfrom datetime import datetime\n\nfrom wind_velocity_field_utils import *\nfrom utils import *\nfrom cloud_velocity_vector_utils import *\nfrom feature_extraction_utils import _get_node_info, _save_file\n\ndef _run_experiment(gpr, kernel, degree, weights, CV):\n # Start timing\n t_init = datetime.now()\n # Wind velocity estimation by using selected standardized vectors\n UV_hat_, theta_ = _wind_velocity_field_gpr(XY_tr_, UV_tr_, w_tr_, XY_ts_, UV_ts_, w_ts_, dXYZ_, XY_stdz_, xy_stdz_, _stdz_x, _stdz_y,\n N_y, N_x, step_size, gpr, kernel, degree, weights, CV, N_grid, N_kfold, display = False)\n # Stop timing and estimate time for each training and testing\n t = datetime.now() - t_init\n return t.total_seconds(), theta_\n\ndef _get_results(time, theta_):\n # Unpack Variables and print results\n cv_, wmse_val, wmse_ts, mse_ts, wmae_ts, mae_ts, div, vor = theta_\n print('Time: {} Error: WMSE = {} MSE = {} WMAE = {} MAE = {}'.format(time, wmse_ts, mse_ts, wmae_ts, mae_ts))\n # Variables Initialization\n E_ = np.zeros((10))\n F_ = np.zeros((2))\n P_ = np.zeros((4))\n W_ = np.zeros((4))\n # Save Error-Matrics in a matrix\n E_ = np.array((time, wmse_val, wmse_ts, mse_ts, wmae_ts, mae_ts))\n # Save Flow Dynamics in a Matrix\n F_= np.array((div, vor))\n # eSVM CV-Parameters a matrix\n P_ = np.array((cv_[0][0], cv_[0][1], cv_[0][2], cv_[0][3]))\n # In case of two eSVM save then in another matrix\n if len(cv_) == 2:\n W_ = np.array((cv_[1][0], cv_[1][1], cv_[1][2], cv_[1][3]))\n return np.concatenate((E_, F_, P_, W_), axis = 0)\n\n# Save Data in a .csv file\n# key - time - WMSE Val - WMSE TS- MSE TS - WMAE TS - MAE TS - DIV - VOR - CV00 - CV01 - CV02 - CV03 - CV1 - CV11 - CV12 - CV13\ndef _save_files(x_, key, name):\n x_ = [key] + x_.tolist()\n print(x_)\n # Save vector of results\n with open(name, 'a', newline = '\\n') as f:\n writer = csv.writer(f)\n writer.writerow(x_)\n\nload_path = r'/users/terren/wheeler-scratch/data/DLWVF_testing_v2-28'\nsave_path = r'/users/terren/wind_velocity_field/logs'\n# Nodes and jobs information for communication from MPI\ni_job, N_jobs, comm = _get_node_info(verbose = False)\n# GPR Parameters\ni_gpr = int(sys.argv[1])\ni_kernel = int(sys.argv[2])\ni_data = int(sys.argv[3])\ni_layer = int(sys.argv[4])\nn_layers = int(sys.argv[5])\nweights = True\nCV = True\nstep_future_interval = 6\n# Set the parameters by cross-validation\nkernel_ = ['linear', 'rbf', 'poly', 'poly']\ndegree_ = [0, 0, 2, 3]\ndegree = degree_[i_kernel]\nkernel = kernel_[i_kernel]\nprint(r'Config.: GPR: {} Kernel: {} Degree: {} CV: {}'.format(i_gpr, kernel, degree, CV))\n# Cross-Validation Parameters\nN_grid = 5\nN_kfold = 3\nprint(r'Cross-Validation: No Grid Search: {} No. kfolds: {}'.format(N_grid, N_kfold))\n# Load-up samples\nname = r'D{}L{}-DLWVF-95-6_10100-CV0.pkl'.format(i_data, n_layers)\n#name = r'D{}-DLWVF-95-6_10100.pkl'.format(i_data)\n#name = r'D{}-DLWVF-95-8_00110.pkl'.format(i_data)\n#name = r'D{}-DLWVF-80-6_02004.pkl'.format(i_data)\nfile_name = '{}/{}'.format(load_path, name)\nX_ = _load_file(file_name)[0]\nprint(r'Day: {} Wind Layer: {} Sample: {} -- {} File: {} '.format(i_data, i_layer, i_job, len(X_), file_name))\n# Sample Variables Initializacion\nX_tr_, _, _, Z_tr_, W_tr_ = X_[i_job][i_layer]\n_, X_ts_, Y_ts_, _, _ = X_[i_job + step_future_interval][i_layer]\n# Get Training and Test Data\nXY_tr_, UV_tr_, w_tr_ = X_tr_\nXY_ts_, UV_ts_, w_ts_ = X_ts_\n# Get Prespective Transform Data\nXYZ_, dXYZ_, height, wind_flow_indicator_ = Y_ts_\n# Get Constants\nXY_stdz_, xy_stdz_, _stdz_x, _stdz_y = Z_tr_\np_segment, n_select, p_train, n_layers, lag, N_y, N_x, step_size = W_tr_\nconfig = r'{}-{}_{}-{}'.format(p_segment, lag, n_select, p_train)\n# loop over Samples\ntime, theta_ = _run_experiment(i_gpr, kernel, degree, weights, CV)\nx_ = _get_results(time, theta_)\n# Save Data only if they have commom wind layers\n#_save_files(x_, key = r'{}{}'.format(i_job, i_layer), name = r'{}/GP{}{}-{}{}-95-6_10100-{}.csv'.format(save_path, i_gpr, i_kernel, i_data, i_layer, int(CV)))\n_save_files(x_, key = r'{}{}'.format(i_job, i_layer),\n name = r'{}/GPR{}{}-95-6_10100-CV{}.csv'.format(save_path, i_gpr, i_kernel, int(CV)))\n","sub_path":"double_layer_wind_velocity_field_GPR_validation.py","file_name":"double_layer_wind_velocity_field_GPR_validation.py","file_ext":"py","file_size_in_byte":4545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"498604605","text":"# Copyright (C) 2002-2020 CERN for the benefit of the ATLAS collaboration\n#\n# File: CoolLumiUtilities/python/BunchLumisCondAlgDefault.py\n# Created: May 2019, sss\n# Purpose: Configure BunchLumisCondAlg.\n#\n\n\nfrom AthenaCommon.AlgSequence import AthSequencer\n\n\ndef BunchLumisCondAlgDefault():\n name = 'BunchLumisCondAlg'\n condSeq = AthSequencer ('AthCondSeq')\n\n if hasattr (condSeq, name):\n return getattr (condSeq, name)\n\n\n # Should only be used for Run 1.\n from IOVDbSvc.CondDB import conddb\n if conddb.dbdata != 'COMP200':\n return None\n\n folder = '/TDAQ/OLC/BUNCHLUMIS'\n\n from AthenaCommon.GlobalFlags import globalflags\n if globalflags.isOverlay():\n # Load reduced channel list for overlay jobs to try to reduce COOL access\n # Need Lucid AND, OR, HitOR, BcmH OR, BcmV OR\n conddb.addFolder('TDAQ', '101,102,103,201,211 /TDAQ/OLC/BUNCHLUMIS',\n className = 'CondAttrListCollection')\n\n else:\n conddb.addFolder('TDAQ', folder,\n className = 'CondAttrListCollection')\n\n from CoolLumiUtilities.CoolLumiUtilitiesConf import \\\n BunchLumisCondAlg\n\n from CoolLumiUtilities.FillParamsCondAlgDefault import FillParamsCondAlgDefault\n fpalg = FillParamsCondAlgDefault()\n\n alg = BunchLumisCondAlg (name,\n BunchLumisFolderInputKey = folder,\n FillParamsInputKey = fpalg.FillParamsOutputKey,\n BunchLumisOutputKey = 'BunchLumisCondData')\n condSeq += alg\n\n return alg\n","sub_path":"Database/CoolLumiUtilities/python/BunchLumisCondAlgDefault.py","file_name":"BunchLumisCondAlgDefault.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"608206151","text":"import os, re\nfrom xml.etree import ElementTree\nfrom shapefileIO import ShapefileWriter, TYPE as SHTYPE\nfrom PyQt4.QtCore import *\nfrom qgis.core import *\n\nclass XmlToShapefile(QObject):\n # Signal emitted to update progress\n prog_sig = pyqtSignal(int)\n def __init__(self, xml_path, sh_dir, formula):\n QObject.__init__(self)\n self.document = None\n self.writer = ShapefileWriter(sh_dir)\n ElementTree.register_namespace('geo', \"http://www.smart.mit.edu/geo\")\n self.document = ElementTree.parse(xml_path)\n self.formula = formula\n\n def parseLocation(self, data):\n \n searchX = data.find(\"x\").text\n searchY = data.find(\"y\").text\n x = float(searchX) \n y = float(searchY) \n \n pos = QgsPoint(eval(self.formula[0]), eval(self.formula[1]))\n # data.find(\"x\").text = \"--\" Won't parse out location\n # data.find(\"y\").text = \"--\"\n return pos\n\n def parseMulnode(self, mulnode):\n nodeId = mulnode.find(\"id\").text\n point = mulnode.find(\"point\")\n if point is None:\n QgsMessageLog.logMessage(\"No polypoint in node %s\"%str(nodeId), 'SimGDC')\n return \n point = self.parseLocation(point)\n attr = [nodeId]\n turningPaths = mulnode.find(\"turning_path\")\n\n self.writer.addPoint(SHTYPE.NODE, point, attr)\n\n def parseLane(self, segmentId, lane):\n laneId = lane.find(\"id\").text\n attr = [segmentId, laneId]\n coordinates = []\n polyLine = lane.find(\"polyline\")\n if polyLine is None:\n QgsMessageLog.logMessage(\"No polyline in lane %s\"%str(laneId), 'SimGDC')\n return\n points = polyLine.find(\"points\")\n for point in points.findall(\"point\"):\n coordinates.append(self.parseLocation(point))\n if len(coordinates) == 0:\n QgsMessageLog.logMessage(\"Lane %s has no polyline info.\"%laneId, 'SimGDC')\n return\n\n self.writer.addPolyline(SHTYPE.LANE, coordinates, attr)\n\n def parseLaneEdge(self, segmentId, laneEdge):\n laneNumber = laneEdge.find(\"laneNumber\").text\n attr = [segmentId, laneNumber]\n coordinates = []\n polyLine = laneEdge.find(\"polyline\")\n if polyLine is None:\n QgsMessageLog.logMessage(\"No polyline in laneEdge %s of segment %s\"%(str(laneNumber),str(segmentId)), 'SimGDC')\n return \n for polypoint in polyLine.findall('point'):\n coordinates.append(self.parseLocation(polypoint.find('location')))\n self.writer.addPolyline(SHTYPE.LANEEDGE, coordinates, attr)\n \n def parseCrossing(self, segmentId, crossing):\n crossingId = crossing.find(\"id\").text\n attr = [segmentId, crossingId]\n coordinates = [0, 1, 2, 3]\n nearLine = crossing.find(\"nearLine\")\n if nearLine is None:\n QgsMessageLog.logMessage(\"No nearLine in crossing %s\"%crossingId, 'SimGDC')\n return\n coordinates[0] = self.parseLocation(nearLine.find('first'))\n coordinates[1] = self.parseLocation(nearLine.find('second'))\n farLine = crossing.find(\"farLine\")\n if farLine is None:\n QgsMessageLog.logMessage(\"No farLine in crossing %s\"%crossingId, 'SimGDC')\n return \n coordinates[3] = self.parseLocation(farLine.find('first'))\n coordinates[2] = self.parseLocation(farLine.find('second'))\n self.writer.addPolygon(SHTYPE.CROSSING, coordinates, attr)\n\n def parseTurningPath(self, turningpath):\n\n id = turningpath.find(\"id\").text\n groupID = turningpath.find(\"group_id\").text\n attr = [id, groupID]\n coordinates = []\n polyline = turningpath.find(\"polyline\")\n if polyline is None:\n QgsMessageLog.logMessage(\"Turning Path %s has no polyline info.\"%id, 'SimGDC')\n return\n points = polyline.find(\"points\")\n for point in points.findall(\"point\"):\n x = point.find(\"x\")\n xtext = x.text\n if xtext is None:\n QgsMessageLog.logMessage(\"Point in turning path %s has no co-ordinate info\"%id, \"SimGDC\")\n continue\n coordinates.append(self.parseLocation(point))\n if len(coordinates) == 0:\n QgsMessageLog.logMessage(\"Turning Path %s has no polyline info.\"%id, 'SimGDC')\n return\n\n self.writer.addPolyline(SHTYPE.TURNINGPATH,coordinates, attr)\n\n def parseLink(self, link):\n\n id = link.find(\"id\").text\n road_name = link.find(\"road_name\").text\n attr = [id,road_name]\n coordinates = []\n polyline = link.find(\"polyline\")\n if polyline is None:\n QgsMessageLog.logMessage(\"Link %s has no polyline info.\"%id, 'SimGDC')\n return\n points = polyline.find(\"points\")\n for point in points.findall(\"point\"):\n coordinates.append(self.parseLocation(point))\n if len(coordinates) == 0:\n QgsMessageLog.logMessage(\"Link %s has no polyline info.\"%id, 'SimGDC')\n return\n\n self.writer.addPolyline(SHTYPE.LINK,coordinates, attr)\n\n\n def parseBusstop(self, busstop):\n point = busstop.find(\"point\")\n x = point.find(\"x\")\n text = x.text\n if text is None:\n QgsMessageLog.logMessage(\"Point in busstop has no co-ordinate info\", \"SimGDC\")\n return\n coordinates = self.parseLocation(point)\n attr = [busstop.find(\"segment_id\").text, busstop.find(\"id\").text]\n self.writer.addPoint(SHTYPE.BUSSTOP, coordinates, attr)\n\n def parseTrainstop(self, trainstop):\n point = trainstop.find(\"point\")\n x = point.find(\"x\")\n text = x.text\n if text is None:\n QgsMessageLog.logMessage(\"Point in trainstop has no co-ordinate info\", \"SimGDC\")\n return\n coordinates = self.parseLocation(point)\n attr = [\"\".join(trainstop.findall(\"segment_id\")), trainstop.find(\"id\").text]\n self.writer.addPoint(SHTYPE.TRAINSTOP, coordinates, attr)\n\n def parseSegment(self, linkId, segment):\n segmentId = segment.find(\"id\").text\n attr = [linkId, segmentId]\n coordinates = [] \n polyline = segment.find(\"polyline\")\n if polyline is None:\n QgsMessageLog.logMessage(\"segment %s has no polyline info.\"%segmentId, 'SimGDC')\n return\n points = polyline.find(\"points\")\n for point in points.findall(\"point\"):\n coordinates.append(self.parseLocation(point))\n if len(coordinates) == 0:\n QgsMessageLog.logMessage(\"segment %s has no polyline info.\"%segmentId, 'SimGDC')\n return\n # if len(coordinates) < 3:\n # coordinates.append(QgsPoint(coordinates[0]))\n # coordinates.append(QgsPoint(coordinates[1]))\n #parse Lane\n lanes = segment.find(\"lanes\")\n if lanes is not None:\n for lane in lanes.findall('lane'):\n self.parseLane(segmentId, lane)\n # #parse Lane Edge\n # laneEdges = segment.find(\"laneEdgePolylines_cached\")\n # if laneEdges is not None:\n # for laneEdge in laneEdges.findall('laneEdgePolyline_cached'):\n # self.parseLaneEdge(segmentId, laneEdge)\n\n self.writer.addPolyline(SHTYPE.SEGMENT, coordinates, attr)\n\n def run(self):\n if self.document == None:\n return\n progPercent = 0\n roadNetwork = self.document.find('road_network')\n #parse nodes\n nodes = roadNetwork.find('nodes')\n if nodes is not None:\n mulNodes = nodes.findall('node')\n\n count = len(mulNodes)\n for mulNode in mulNodes:\n self.parseMulnode(mulNode)\n progPercent = progPercent + 50.0/count\n self.prog_sig.emit(progPercent)\n\n\n for turningpath in roadNetwork.iter('turning_path'):\n self.parseTurningPath(turningpath)\n\n #parse obstacles\n pt_stops = roadNetwork.find(\"pt_stops\")\n if pt_stops is not None:\n for bus_stop in pt_stops.iter('bus_stop'):\n self.parseBusstop(bus_stop)\n for train_stop in pt_stops.iter('train_stop'):\n self.parseTrainstop(train_stop)\n\n #parse link,segment\n links = []\n linkParent = roadNetwork.find('links')\n if linkParent is not None:\n links = linkParent.findall('link')\n count = len(links)\n for link in links:\n self.parseLink(link)\n linkId = link.find('id').text\n segmentParent = link.find('segments')\n if segmentParent is not None:\n segments = segmentParent.findall('segment')\n if segments is not None:\n for segment in segments:\n self.parseSegment(linkId, segment)\n progPercent = progPercent + 50.0/count\n self.prog_sig.emit(progPercent)\n \n #save shapefiles\n self.writer.save()\n #save the rest of xml file\n xmlRemainPath = os.path.join(self.writer.path, \"data.xml\")\n self.document.write(xmlRemainPath, encoding=\"utf-8\", xml_declaration=True, default_namespace=None, method=\"xml\")\n","sub_path":"Trafficera-branch2/xmlToShapefile.py","file_name":"xmlToShapefile.py","file_ext":"py","file_size_in_byte":9312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"486044658","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Welcome to the world where fashion meets computer vision! This is a starter kernel that applies Mask R-CNN with COCO pretrained weights to the task of [iMaterialist (Fashion) 2019 at FGVC6](https://www.kaggle.com/c/imaterialist-fashion-2019-FGVC6).\n\n# In[26]:\n\n\nimport os\nimport gc\nimport sys\nimport json\nimport glob\nimport random\nfrom pathlib import Path\n\nimport cv2\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport itertools\nfrom tqdm import tqdm\n\nfrom imgaug import augmenters as iaa\nfrom sklearn.model_selection import StratifiedKFold, KFold, RepeatedStratifiedKFold\n\n\n# In[2]:\n\n\nDATA_DIR = Path('/home/ubuntu/efs/kaggle/imaterialist/')\nROOT_DIR = Path('/home/ubuntu/efs/kaggle/imaterialist/maskrcnn/logs')\n\n# For demonstration purpose, the classification ignores attributes (only categories),\n# and the image size is set to 512, which is the same as the size of submission masks\nNUM_CATS = 46\nIMAGE_SIZE = 512\n\nN_FOLD = 6\n# # Dowload Libraries and Pretrained Weights\n\n# In[3]:\n\n\n'''\n!git clone https://www.github.com/matterport/Mask_RCNN.git\nos.chdir('Mask_RCNN')\n\n!rm -rf .git # to prevent an error when the kernel is committed\n!rm -rf images assets # to prevent displaying images at the bottom of a kernel\n'''\n\n\n# In[4]:\n\n\nprint(ROOT_DIR/'Mask_RCNN')\nsys.path.append(\"/home/ubuntu/github/Mask_RCNN/\")\n#sys.path.append(ROOT_DIR/'Mask_RCNN')\nfrom mrcnn.config import Config\nfrom mrcnn import utils\nimport mrcnn.model as modellib\nfrom mrcnn import visualize\nfrom mrcnn.model import log\n\n\n# In[5]:\n\n\n\n\n# In[27]:\n\n\n#!wget --quiet https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5\n#!ls -lh mask_rcnn_coco.h5\n\n#COCO_WEIGHTS_PATH = 'mask_rcnn_coco.h5'\nCOCO_WEIGHTS_PATH = \"/home/ubuntu/efs/kaggle/imaterialist/maskrcnn/logs/fashion20190602/mask_rcnn_fashion_0000.h5\"\n\n\n# # Set Config\n\n# In[7]:\n\n\nsegment_df = pd.read_csv(DATA_DIR/\"train.csv\")\n\n\n# In[8]:\n\n\ndataset_size = len(list(segment_df.ImageId.unique()))\ntrain_ratio = (N_FOLD-1)/N_FOLD\ntrain_size = int(dataset_size*train_ratio)//32*32\nval_size = int(dataset_size-train_size)\nprint(train_size)\n\n\n# Mask R-CNN has a load of hyperparameters. I only adjust some of them.\n\n# In[9]:\n\n\nclass FashionConfig(Config):\n NAME = \"fashion\"\n NUM_CLASSES = NUM_CATS + 1 # +1 for the background class\n \n GPU_COUNT = 4\n IMAGES_PER_GPU = 4 # a memory error occurs when IMAGES_PER_GPU is too high\n \n BACKBONE = 'resnet101'\n \n IMAGE_MIN_DIM = IMAGE_SIZE\n IMAGE_MAX_DIM = IMAGE_SIZE \n IMAGE_RESIZE_MODE = 'none'\n \n RPN_ANCHOR_SCALES = (16, 32, 64, 128, 256)\n #DETECTION_NMS_THRESHOLD = 0.0\n \n # STEPS_PER_EPOCH should be the number of instances \n # divided by (GPU_COUNT*IMAGES_PER_GPU), and so should VALIDATION_STEPS;\n # however, due to the time limit, I set them so that this kernel can be run in 9 hours\n STEPS_PER_EPOCH = train_size/(GPU_COUNT*IMAGES_PER_GPU)#1000\n VALIDATION_STEPS = val_size/(GPU_COUNT*IMAGES_PER_GPU)#200\n \nconfig = FashionConfig()\nconfig.display()\n\n\n# # Make Datasets\n\n# In[10]:\n\n\nwith open(DATA_DIR/\"label_descriptions.json\") as f:\n label_descriptions = json.load(f)\n\nlabel_names = [x['name'] for x in label_descriptions['categories']]\n\n\n# In[11]:\n\n\n#segment_df = pd.read_csv(DATA_DIR/\"train.csv\")\n\nmultilabel_percent = len(segment_df[segment_df['ClassId'].str.contains('_')])/len(segment_df)*100\nprint(f\"Segments that have attributes: {multilabel_percent:.2f}%\")\n\n\n# Segments that contain attributes are only 3.46% of data, and [according to the host](https://www.kaggle.com/c/imaterialist-fashion-2019-FGVC6/discussion/90643#523135), 80% of images have no attribute. So, in the first step, we can only deal with categories to reduce the complexity of the task.\n\n# In[12]:\n\n\nsegment_df['CategoryId'] = segment_df['ClassId'].str.split('_').str[0]\n\nprint(\"Total segments: \", len(segment_df))\nsegment_df.head()\n\n\n# Rows with the same image are grouped together because the subsequent operations perform in an image level.\n\n# In[13]:\n\n\nimage_df = segment_df.groupby('ImageId')['EncodedPixels', 'CategoryId'].agg(lambda x: list(x))\nsize_df = segment_df.groupby('ImageId')['Height', 'Width'].mean()\nimage_df = image_df.join(size_df, on='ImageId')\n\nprint(\"Total images: \", len(image_df))\nimage_df.head()\n\n\n# Here is the custom function that resizes an image.\n\n# In[14]:\n\n\ndef resize_image(image_path):\n img = cv2.imread(image_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE), interpolation=cv2.INTER_AREA) \n return img\n\n\n# The crucial part is to create a dataset for this task.\n\n# In[15]:\n\n\nclass FashionDataset(utils.Dataset):\n\n def __init__(self, df):\n super().__init__(self)\n \n # Add classes\n for i, name in enumerate(label_names):\n self.add_class(\"fashion\", i+1, name)\n \n # Add images \n for i, row in df.iterrows():\n self.add_image(\"fashion\", \n image_id=row.name, \n path=str(DATA_DIR/'train'/row.name), \n labels=row['CategoryId'],\n annotations=row['EncodedPixels'], \n height=row['Height'], width=row['Width'])\n\n def image_reference(self, image_id):\n info = self.image_info[image_id]\n return info['path'], [label_names[int(x)] for x in info['labels']]\n \n def load_image(self, image_id):\n return resize_image(self.image_info[image_id]['path'])\n\n def load_mask(self, image_id):\n info = self.image_info[image_id]\n \n mask = np.zeros((IMAGE_SIZE, IMAGE_SIZE, len(info['annotations'])), dtype=np.uint8)\n labels = []\n \n for m, (annotation, label) in enumerate(zip(info['annotations'], info['labels'])):\n sub_mask = np.full(info['height']*info['width'], 0, dtype=np.uint8)\n annotation = [int(x) for x in annotation.split(' ')]\n \n for i, start_pixel in enumerate(annotation[::2]):\n sub_mask[start_pixel: start_pixel+annotation[2*i+1]] = 1\n\n sub_mask = sub_mask.reshape((info['height'], info['width']), order='F')\n sub_mask = cv2.resize(sub_mask, (IMAGE_SIZE, IMAGE_SIZE), interpolation=cv2.INTER_NEAREST)\n \n mask[:, :, m] = sub_mask\n labels.append(int(label)+1)\n \n return mask, np.array(labels)\n\n\n# Let's visualize some random images and their masks.\n\n# In[16]:\n\n\n\ndf_catlist = segment_df.groupby('ImageId')['CategoryId'].agg(lambda x: sorted(set(x)))\n\ncategory_list = []\n\nfor i, row in df_catlist.iteritems():\n temp = sorted(set([label_descriptions['categories'][int(cat)]['supercategory'] for cat in row]))\n lowerhalf = 'legs and feet' in temp or 'lowerbody' in temp\n upperhalf = 'upperbody' in temp or 'wholebody' in temp\n label = 0 if lowerhalf and upperhalf else 1 \n category_list.append(label)\n\nskf = RepeatedStratifiedKFold(n_splits=N_FOLD, n_repeats=10)\nsplitted = skf.split(image_df, category_list)\n\ndef gen_dataset():\n train_index, val_index = next(splitted)\n train_df = image_df.iloc[train_index]\n valid_df = image_df.iloc[val_index]\n \n train_dataset = FashionDataset(train_df)\n train_dataset.prepare()\n\n valid_dataset = FashionDataset(valid_df)\n valid_dataset.prepare()\n return train_dataset, valid_dataset\n\n\n# Let's visualize class distributions of the train and validation data.\n\n# In[18]:\n\n\n\n\n# Note that any hyperparameters here, such as LR, may still not be optimal\nLR = np.array([1, 1/3., np.power(1/3,2), np.power(1/3,3)])*1e-4\nEPOCHS = [5, 10,15,20]\n\nimport warnings \nwarnings.filterwarnings(\"ignore\")\n\n\n# This section creates a Mask R-CNN model and specifies augmentations to be used.\n\n# In[30]:\n\n\nmodel = modellib.MaskRCNN(mode='training', config=config, model_dir=ROOT_DIR)\n\nmodel.load_weights(COCO_WEIGHTS_PATH, by_name=True, exclude=[\n 'mrcnn_class_logits', 'mrcnn_bbox_fc', 'mrcnn_bbox', 'mrcnn_mask'])\n\n\n# In[24]:\n\n\naugmentation = iaa.Sequential([\n iaa.Fliplr(0.5), # only horizontal flip here\n # rotate and translation\n iaa.Affine(\n scale={\"x\": (0.9, 1.1), \"y\": (0.9, 1.1)},\n translate_percent={\"x\": (-0.1, 0.1), \"y\": (-0.1, 0.1)},\n rotate=(-40, 40)),\n # crop\n #iaa.CropAndPad(percent=(-0.1, 0.1)),\n # drop out pixel up to 10%\n #iaa.Dropout([0.01, 0.1])\n])\n\n\n# In[ ]:\nfor lr, epoch in zip(LR, EPOCHS):\n train_dataset, valid_dataset = gen_dataset()\n model.train(train_dataset, valid_dataset,\n learning_rate=lr,\n epochs=epoch,\n layers='all',\n augmentation=augmentation)\n\n\nprint(\"Training Complete\")\n","sub_path":"samples/imaterialist.py","file_name":"imaterialist.py","file_ext":"py","file_size_in_byte":8781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"546332707","text":"\nimport os\n\n\ndef _get_modules():\n \"\"\"\n Get available API Modules\n :returns: List of project modules (url endpoint)\n :rtype: list\n \"\"\"\n\n moduleList = []\n for root, dirs, files in os.walk('service'):\n for folder in dirs:\n moduleList.append(folder)\n\n return moduleList\n\n\ndef _get_service_list(module):\n \"\"\"\n Get Available Service Module\n :param module: module name\n :type module: string\n :returns: List of services\n :rtype: list\n \"\"\"\n\n service_list = []\n for root, dirs, files in os.walk('service/' + module):\n for filen in files:\n service_list.append(filen.replace('.py',''))\n return service_list\n\n\ndef _validate_params(module, service, queryParams, eventBody, method):\n \"\"\"\n Validate param list\n :param module: api module list\n :type module: string\n :param service: service used\n :type service: string\n :param params: list of used request parameters\n :type params: list of string\n :returns: if param list complete\n :rtype: boolean\n \"\"\"\n\n service = __import__('service.' + module + '.' + service, fromlist=[service])\n for sparam in service.PARAMS['qparams']:\n if sparam not in queryParams:\n return False\n\n if method == 'POST':\n for sparam in service.PARAMS['rbody']:\n if sparam not in eventBody:\n return False\n\n return True\n","sub_path":"utility/request_validator.py","file_name":"request_validator.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"447558069","text":"# ||==============================================================||\n# ||\n# || Program/File: RoboticsDataServer.py\n# ||\n# || Description:\t\t\n# ||\n# || Author: Logan Wilkovich\n# || Email: LWilkovich@gmail.com\n# || Creation Date: 21 November 2018 | Logan Wilkovich\n# ||===============================================================||\n# ||===============================================================||\n# ||=======================||\n# This Must Be Executed First!\nimport os\nimport sys\npathname = os.path.dirname(sys.argv[0]) \nos.chdir(os.path.abspath(pathname))\n# ||=======================||\n# Routes\nfrom Library.Utils.RouteExtension import RouteExtension\n# Library/Network\nfrom NetworkServer import NetworkServer\nfrom FlaskServer import FlaskServer\n# Library/Utils\nfrom DebugLogger import DebugLogger\nfrom ConfigLoader import ConfigLoader\n# Premades\nfrom threading import Thread\nfrom time import sleep, time, strftime, localtime\nimport traceback\nimport json\nimport psutil\nimport ast\n# ||=======================||\n# Global Variables\nprocess = psutil.Process(os.getpid())\n# ||=======================||\n# Notes\n\n# ||=======================||\n# ||===============================================================||\n\nclass RoboticsServer(object):\n\tdef __init__(self):\n\t\tself.type = \"RoboticsServer\"\n\n\t\tself.active = False\n\n\t\tself.process = process\n\t\tself.PID = os.getpid()\n\t\tself.processMemorySize = 0\n\t\tself.cpuCount = 0\n\t\t# ||=======================||\n\t\t# Program Config Varaibles\n\t\tself.useNetworkServer = True\n\t\tself.useFlaskServer = True\n\t\t\n\t\t# ||=======================||\n\t\t# Program Classes\n\t\tself.networkServer = NetworkServer()\n\t\tself.flaskServer = FlaskServer()\n\n\t\tself.debugLogger = DebugLogger(self.type)\n\t\tconfigLoader = ConfigLoader(self.type, self.debugLogger)\n\t\tself.config = configLoader.getConfig()\n\n\t\tself.debugLogger.setMessageSettings(\n\t\t\tast.literal_eval(self.config[\"Debug\"]),\n\t\t\tast.literal_eval(self.config[\"Standard\"]),\n\t\t\tast.literal_eval(self.config[\"Warning\"]),\n\t\t\tast.literal_eval(self.config[\"Error\"]))\n\n\t\tself.debugLogger.setLogSettings(\n\t\t\tast.literal_eval(self.config[\"DebugLog\"]),\n\t\t\tast.literal_eval(self.config[\"StandardLog\"]),\n\t\t\tast.literal_eval(self.config[\"WarningLog\"]),\n\t\t\tast.literal_eval(self.config[\"ErrorLog\"]))\n\n\t\t# ||=======================||\n\t\t# Config \n\t\tself.debug = self.config[\"Debug\"]\n\t\tself.log = self.config[\"Log\"]\n\n# ||=======================================================================||\n\n\tdef main(self):\n\t\tlogMessage = \"Process Started\"\n\t\tself.debugLogger.log(\"Standard\", self.type, logMessage)\n\t\t\n\t\t# ||=======================||\n\t\t# Program Setup\n\t\tif (self.useNetworkServer):\n\t\t\ttry:\n\t\t\t\tself.networkServer.bindConnection()\n\n\t\t\t\tself.networkServerThread = Thread(target = self.networkServer.networkServer)\n\t\t\t\tself.networkServerThread.setDaemon(True)\n\t\t\t\tself.networkServerThread.start()\n\n\t\t\texcept Exception as e:\n\t\t\t\tlogMessage = \"Main (useNetworkServer): \" + str(e)\n\t\t\t\tself.debugLogger.log(\"Standard\", self.type, logMessage)\n\n\t\tif (self.useFlaskServer):\n\t\t\ttry: \n\t\t\t\tself.flaskServerThread = Thread(target = self.flaskServer.flaskServer)\n\t\t\t\tself.flaskServerThread.setDaemon(True)\n\t\t\t\tself.flaskServerThread.start()\n\n\t\t\texcept Exception as e:\n\t\t\t\tlogMessage = \"Main (useNetworkServer): \" + str(e)\n\t\t\t\tself.debugLogger.log(\"Standard\", self.type, logMessage)\n\t\t\t\n\t\ttry:\n\t\t\twhile True:\n\t\t\t\tsleep(1)\n\t\texcept KeyboardInterrupt as e:\n\t\t\tprint(end='\\r')\n\t\t\tself.networkServer.closeServer()\n\t\t\tlogMessage = \"Process Joined\"\n\t\t\tself.debugLogger.log(\"Standard\", self.type, logMessage)\n\t\t\treturn\n\n# ||=======================================================================||\n\ndef main():\n\trs = RoboticsServer()\n\trs.main()\n\nif __name__ == '__main__':\n\tmain()\n\n# ||=======================================================================||","sub_path":"Source/RoboticsServer.py","file_name":"RoboticsServer.py","file_ext":"py","file_size_in_byte":3837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"207343896","text":"class Solution(object):\n def twoSum(self, numbers, target):\n \"\"\"\n :type numbers: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n dict = {}\n k=0\n for i in numbers:\n x=i\n if target-x in dict:\n return (dict[target-x]+1,k+1)\n dict[x]=k\n k += 1\n\nif __name__ == \"__main__\":\n print (Solution().twoSum([2,7,11,15],18))","sub_path":"array/twoSum2.py","file_name":"twoSum2.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"14065830","text":"import os\nimport sys\nimport traceback\nfrom socket import *\nfrom threading import Thread\nimport requests\n\nip = '127.0.0.1'\nport = int(sys.argv[1])\nserver_sock = socket(AF_INET, SOCK_STREAM)\nserver_sock.bind((ip,port))\nprint('Server socket open...')\nprint('Listening...')\nserver_sock.listen(1)\n\ndef main(clnt_sock):\n data = clnt_sock.recv(1500) #헤더(GET)를 포함한 데이터 받음\n parse_data = (data.decode()).split(' ')\n #print(parse_data)\n file_name = parse_data[1] #파일이름 파싱\n if parse_data[0] == 'GET': #GET일 경우 과정 수행\n if os.path.isfile('.'+file_name): #파일이 있을 경우\n f = open('.'+file_name, 'rb') #해당 파일을 연다\n f_read = f.read(1024) #파일을 1024바이트 크기만큼 읽은 후\n content_length = str(os.path.getsize('.'+file_name)).encode() + '\\n\\n'.encode()\n #헤더에 붙여서 보낼 파일 사이즈를 구한다\n clnt_sock.send('HTTP/1.1 200 OK\\n'.encode() + content_length + f_read)\n #헤더는 HTTP메시지, 파일크기, 1024데이터로 구성된다.\n\n while True: #이후 파일을 1024바이트씩 더이상 읽을 데이터가\n f_read = f.read(1024)#없을때까지 보낸다.\n if not f_read:\n break\n clnt_sock.send(f_read)\n f.close()\n clnt_sock.close()\n\n if 'mp3' in file_name: #파일이름에 mp3가 있을 경우 \n if os.path.isfile(file_name): # .을 뺀 파일명을 조사한다\n f = open(file_name, 'rb')\n f_read = f.read(1024)\n content_length = str(os.path.getsize(file_name)).encode() + '\\n\\n'.encode()\n clnt_sock.send('HTTP/1.1 200 OK\\n'.encode() + content_length + f_read)\n \n while True:\n f_read = f.read(1024)\n if not f_read:\n break\n clnt_sock.send(f_read)\n f.close()\n clnt_sock.close()\n\nwhile True:\n clnt_sock, addr = server_sock.accept()\n try:\n Thread(target=main, args=(clnt_sock,)).start()\n except:\n print(\"Thread did not start.\")\n traceback.print_exc()\n\nprint(\"Send Message back to client\")\n","sub_path":"2018/Computer Network/CN_201402448_한진영_10/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"321011702","text":"import pandas as pd\nimport numpy as np\nfrom pandas import datetime\nfrom matplotlib import pyplot as plt\nimport os\nfrom matplotlib import pyplot\n\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom matplotlib import pyplot\nfrom pandas.plotting import autocorrelation_plot\nimport matplotlib.patches as mpatches\n#from pyramid.arima import auto_arima\n#from pmdarima.arima import auto_arima\nimport pyflux as pf\nfrom sklearn.cluster import KMeans\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import MinMaxScaler\nfrom statsmodels.graphics.tsaplots import plot_acf, plot_pacf\nimport statsmodels.api as sm\nfrom statsmodels.tsa.statespace.sarimax import SARIMAX\nimport math\nfrom sklearn.metrics import mean_squared_error\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\n\nenergy = pd.read_csv('MAC000046_With_Acorn.csv')\nar = energy['day'].tolist()\nenergy = energy.reset_index()\nenergy.day = pd.to_datetime(energy.day,format='%Y-%m-%d').dt.date\n\nenergy2 = pd.read_csv('MAC000216_With_Acorn.csv')\nenergy2 = energy2.loc[energy2['day'].isin(ar)]\nenergy2 = energy2.reset_index()\nenergy2.day = pd.to_datetime(energy2.day,format='%Y-%m-%d').dt.date\n\nenergy3 = pd.read_csv('MAC000213_With_Acorn.csv')\nenergy3 = energy3.loc[energy3['day'].isin(ar)]\nenergy3 = energy3.reset_index()\nenergy3.day = pd.to_datetime(energy3.day,format='%Y-%m-%d').dt.date\n\nweather = pd.read_csv('C:/Users/A02290684/Desktop/clean energy/Project/data/weather_daily_darksky.csv')\n\nweather['day']= pd.to_datetime(weather['time']) # day is given as timestamp\nweather['day']= pd.to_datetime(weather['day'],format='%Y%m%d').dt.date\n# selecting numeric variables\nweather = weather[['temperatureMax', 'windBearing', 'dewPoint', 'cloudCover', 'windSpeed',\n 'pressure', 'apparentTemperatureHigh', 'visibility', 'humidity',\n 'apparentTemperatureLow', 'apparentTemperatureMax', 'uvIndex',\n 'temperatureLow', 'temperatureMin', 'temperatureHigh',\n 'apparentTemperatureMin', 'moonPhase','day']]\nweather = weather.dropna()\n\nweather_energy = energy.merge(weather,on='day')\nweather_energy2 = energy2.merge(weather,on='day')\nweather_energy3 = energy3.merge(weather,on='day')\n\n'''clustering 1'''\nscaler = MinMaxScaler()\nweather_scaled = scaler.fit_transform(weather_energy[['temperatureMax','humidity','windSpeed']])\n\nNc = range(1, 20)\nkmeans = [KMeans(n_clusters=i) for i in Nc]\nkmeans\n\nscore = [kmeans[i].fit(weather_scaled).score(weather_scaled) for i in range(len(kmeans))]\n\nkmeans = KMeans(n_clusters=3, max_iter=600, algorithm = 'auto')\nkmeans.fit(weather_scaled)\nweather_energy['weather_cluster'] = kmeans.labels_\n\n'''clustering 2'''\nscaler = MinMaxScaler()\nweather_scaled2 = scaler.fit_transform(weather_energy2[['temperatureMax','humidity','windSpeed']])\n\nNc = range(1, 20)\nkmeans = [KMeans(n_clusters=i) for i in Nc]\nkmeans\n\nscore = [kmeans[i].fit(weather_scaled2).score(weather_scaled2) for i in range(len(kmeans))]\n\nkmeans = KMeans(n_clusters=3, max_iter=600, algorithm = 'auto')\nkmeans.fit(weather_scaled2)\nweather_energy2['weather_cluster'] = kmeans.labels_\n\n'''clustering 2'''\nscaler = MinMaxScaler()\nweather_scaled3 = scaler.fit_transform(weather_energy3[['temperatureMax','humidity','windSpeed']])\n\nNc = range(1, 20)\nkmeans = [KMeans(n_clusters=i) for i in Nc]\nkmeans\n\nscore = [kmeans[i].fit(weather_scaled3).score(weather_scaled3) for i in range(len(kmeans))]\n\nkmeans = KMeans(n_clusters=3, max_iter=600, algorithm = 'auto')\nkmeans.fit(weather_scaled3)\nweather_energy3['weather_cluster'] = kmeans.labels_\n\n'''adding holidays'''\nholiday = pd.read_csv('C:/Users/A02290684/Desktop/clean energy/Project/data/uk_bank_holidays.csv')\nholiday['Bank holidays'] = pd.to_datetime(holiday['Bank holidays'],format='%Y-%m-%d').dt.date\n\nweather_energy = weather_energy.merge(holiday, left_on = 'day',right_on = 'Bank holidays',how = 'left')\nweather_energy['holiday_ind'] = np.where(weather_energy['Bank holidays'].isna(),0,1)\n\n'''adding holidays 2'''\nholiday = pd.read_csv('C:/Users/A02290684/Desktop/clean energy/Project/data/uk_bank_holidays.csv')\nholiday['Bank holidays'] = pd.to_datetime(holiday['Bank holidays'],format='%Y-%m-%d').dt.date\n\nweather_energy2 = weather_energy2.merge(holiday, left_on = 'day',right_on = 'Bank holidays',how = 'left')\nweather_energy2['holiday_ind'] = np.where(weather_energy2['Bank holidays'].isna(),0,1)\n\n'''adding holidays 3'''\nholiday = pd.read_csv('C:/Users/A02290684/Desktop/clean energy/Project/data/uk_bank_holidays.csv')\nholiday['Bank holidays'] = pd.to_datetime(holiday['Bank holidays'],format='%Y-%m-%d').dt.date\n\nweather_energy3 = weather_energy3.merge(holiday, left_on = 'day',right_on = 'Bank holidays',how = 'left')\nweather_energy3['holiday_ind'] = np.where(weather_energy3['Bank holidays'].isna(),0,1)\n\n'''Training'''\nweather_energy['Year'] = pd.DatetimeIndex(weather_energy['day']).year\nweather_energy['Month'] = pd.DatetimeIndex(weather_energy['day']).month\nweather_energy.set_index(['day'],inplace=True)\n\n'''Training2'''\nweather_energy2['Year'] = pd.DatetimeIndex(weather_energy2['day']).year\nweather_energy2['Month'] = pd.DatetimeIndex(weather_energy2['day']).month\nweather_energy2.set_index(['day'],inplace=True)\n\n'''Training3'''\nweather_energy3['Year'] = pd.DatetimeIndex(weather_energy3['day']).year\nweather_energy3['Month'] = pd.DatetimeIndex(weather_energy3['day']).month\nweather_energy3.set_index(['day'],inplace=True)\n\n'''splitting'''\nmodel_data = weather_energy[['energy_sum','weather_cluster','holiday_ind','Acorn_value1','Acorn_value2','Acorn_value3','Acorn_value4']]\ntrain = model_data.iloc[0:(len(model_data)-30)]\ntest = model_data.iloc[len(train):(len(model_data)-1)]\n\n\n'''splitting2'''\nmodel_data2 = weather_energy2[['energy_sum','weather_cluster','holiday_ind','Acorn_value1','Acorn_value2','Acorn_value3','Acorn_value4']]\ntrain2 = model_data2.iloc[0:(len(model_data2)-30)]\ntest2 = model_data2.iloc[len(train2):(len(model_data2)-1)]\n\n'''splitting3'''\nmodel_data3 = weather_energy3[['energy_sum','weather_cluster','holiday_ind','Acorn_value1','Acorn_value2','Acorn_value3','Acorn_value4']]\ntrain3 = model_data3.iloc[0:(len(model_data3)-30)]\ntest3 = model_data3.iloc[len(train3):(len(model_data3)-1)]\n\nresult = [train,train2,train3]\ntrain = pd.concat(result)\nresult2 = [test,test2,test3]\ntest = pd.concat(result2)\n#print(train.head(10))\n\n'''test'''\nt = sm.tsa.adfuller(train.energy_sum, autolag='AIC')\n#pd.Series(t[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])\ndef difference(dataset, interval):\n diff = list()\n for i in range(interval, len(dataset)):\n value = dataset.iloc[i] - dataset.iloc[i - interval]\n diff.append(value)\n return diff\n\nt = sm.tsa.adfuller(difference(train.energy_sum,1), autolag='AIC')\n#pd.Series(t[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])\n\nendog = train['energy_sum']\nexog = sm.add_constant(train[['weather_cluster','holiday_ind','Acorn_value1','Acorn_value2','Acorn_value3','Acorn_value4']])\n\nmod = sm.tsa.statespace.SARIMAX(endog=endog, exog=exog, order=(7,1,1),seasonal_order=(1,1, 0, 12),trend='c')\nmodel_fit = mod.fit()\nmodel_fit.summary()\n\n#train['avg_energy'].plot(figsize=(25,10))\n#model_fit.fittedvalues.plot()\n#plt.show()\n\n'''Test Prediction'''\n# predict = model_fit.predict(start = len(train),end = len(train)+len(test)-1,exog = sm.add_constant(test[['weather_cluster','holiday_ind','Acorn_value1','Acorn_value2','Acorn_value3','Acorn_value3']]))\n# test['predicted'] = predict.values\n#\n# test = test.head(15)\n# test['energy_sum'].plot(figsize=(25,10),color = 'red')\n# test['predicted'].plot()\n# red_patch = mpatches.Patch(color='blue', label='Average Energy')\n# blue_patch = mpatches.Patch(color='red', label='Predicted Energy')\n# plt.legend(handles=[red_patch,blue_patch])\n# plt.ylabel(\"Energy Consumption\")\n# plt.xlabel(\"Day\")\n# plt.show()\n\n'''Train Prediction'''\n# predict = model_fit.predict(start = 0,end = len(train)-1,exog = sm.add_constant(train[['weather_cluster','holiday_ind','Acorn_value1','Acorn_value2','Acorn_value3','Acorn_value3']]))\n# train['predicted'] = predict.values\n# print(train.tail(8))\n# #train.to_csv(\"Acorn_merged_Prediction_Train.csv\")\n# train['energy_sum'].plot(figsize=(25,10),color = 'red')\n# train['predicted'].plot()\n# red_patch = mpatches.Patch(color='blue', label='Average Energy')\n# blue_patch = mpatches.Patch(color='red', label='Predicted Energy')\n# plt.legend(handles=[red_patch,blue_patch])\n# plt.ylabel(\"Energy Consumption\")\n# plt.xlabel(\"Day\")\n# plt.show()\n\n#\n'''Scatter Plot Actual Vs Predicted'''\n\npredict = model_fit.predict(start = 0,end = len(train)-1,exog = sm.add_constant(train[['weather_cluster','holiday_ind','Acorn_value1','Acorn_value2','Acorn_value3','Acorn_value4']]))\ntrain['predicted'] = predict.values\n\nans1 = train.loc[train['Acorn_value4']==76]\nans2 = train.loc[train['Acorn_value4']==50]\nans3 = train.loc[train['Acorn_value4']==114]\n\nplt.scatter(ans1['energy_sum'],ans1['predicted'],s=5,color='blue',)\nplt.scatter(ans2['energy_sum'],ans2['predicted'],s=5,color='green',)\nplt.scatter(ans3['energy_sum'],ans3['predicted'],s=5,color='orange',)\ngreen_patch = mpatches.Patch(color='green', label='House216')\nblue_patch = mpatches.Patch(color='blue', label='House046')\norange_patch = mpatches.Patch(color='orange', label='House213')\nplt.ylim(ymax=60)\nplt.ylim(ymin=0)\nplt.yticks(np.arange(0,70,10))\nplt.legend(handles=[green_patch,blue_patch,orange_patch])\nplt.ylabel(\"Predicted values\")\nplt.xlabel(\"Actual values\")\nplt.title(\"Actual Values Vs Predicted Values For houses of different Acorns\")\nplt.show()\n\n\n","sub_path":"Acorn_Merger_Prediction_3_Houses.py","file_name":"Acorn_Merger_Prediction_3_Houses.py","file_ext":"py","file_size_in_byte":9660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"252554703","text":"import numpy as np\nfrom os.path import isfile\nimport csv\n\nwith open('labels') as f:\n reader = csv.reader(f, delimiter='\\n')\n labels = np.array([each for each in reader if len(each) > 0]).squeeze()\n labels = labels[:-1] #last image was not processed so dropping the corresponding label\n labels = labels.reshape(len(labels),1)\nprint(len(labels))\n\nprint(isfile(\"codes\"))\nif isfile(\"codes\"):\n print(\"codes already exist, loading them now...\")\n with open(\"codes\") as f:\n codes = np.fromfile(f, dtype=np.float32)\n codes = codes.reshape((len(labels), -1))\nprint(len(codes))\n","sub_path":"dog_app/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"49492461","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport time\n\n\nimport glob\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras.models import model_from_json\nfrom keras.callbacks import Callback\nfrom .util import inception_preprocess, preprocess, recompone_overlap\nfrom .model import BilinearUpsampling\nfrom .config import process_config\nfrom .trainer import DataGenerator\nfrom keras import backend as K\nfrom PIL import Image\nimport random\nimport tensorflow as tf\n\n\nclass Inference():\n def __init__(self):\n self.config = process_config('./algorithms/surface/configs.json')\n \n import tensorflow as tf\n from keras.backend.tensorflow_backend import set_session\n\n tfconfig = tf.ConfigProto()\n tfconfig.gpu_options.allocator_type = 'BFC'\n tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.3\n tfconfig.gpu_options.allow_growth = True\n set_session(tf.Session(config=tfconfig))\n\n self.graph = tf.get_default_graph()\n\n self.load_model()\n\n def analyze_name(self,path):\n name = os.path.split(path)[1]\n name = os.path.splitext(name)[0]\n return name\n\n def load_model(self):\n start = time.time()\n self.load_cornea_model()\n print('Loading cornea segmentation model with {}s'.format(time.time()-start))\n start = time.time()\n self.load_ulcer_model()\n print('Loading ulcer segmentation model with {}s'.format(time.time()-start))\n\n def load_cornea_model(self):\n self.cornea_model = model_from_json(open('./algorithms/surface/model/cornea/architecture.json').read(), custom_objects={'BilinearUpsampling': BilinearUpsampling})\n self.cornea_model.load_weights('./algorithms/surface/model/cornea/best_weights.h5', by_name=True)\n\n def load_ulcer_model(self):\n self.ulcer_model = model_from_json(open('./algorithms/surface/model/ulcer/architecture.json').read(), custom_objects={'BilinearUpsampling': BilinearUpsampling})\n self.ulcer_model.load_weights('./algorithms/surface/model/ulcer/best_weights.h5', by_name=True)\n\n def predict(self, input_path, output_path,):\n\n with self.graph.as_default():\n start = time.time()\n # BGR -> RGB\n self.raw = cv2.imread(input_path)\n self.raw = self.raw[:,:,::-1]\n self.cornea = self.predict_cornea()\n self.ulcer = self.predict_ulcer()\n masked = self.mask()\n id = self.analyze_name(input_path) + '.png'\n output_path = os.path.join(output_path, id)\n cv2.imwrite(output_path, masked.astype(np.uint8))\n print('Process {} with {}s'.format(id, time.time()-start))\n\n return output_path\n\n def predict_cornea(self):\n # resize\n if self.raw.shape[0] != self.config.cornea_height:\n raw = cv2.resize(self.raw, (self.config.cornea_width, self.config.cornea_height), interpolation=cv2.INTER_AREA)\n # preprocess\n input = np.expand_dims(raw, axis=0)\n input = inception_preprocess(input)\n # predict\n predictions = self.cornea_model.predict(input, batch_size=1, verbose=1)\n # binarized\n probResult = np.reshape(predictions[:,:,0], (self.config.cornea_height, self.config.cornea_width, 1))\n binaryResult = ((probResult>=0.5)).astype(np.uint8)*255\n binaryResult = cv2.resize(binaryResult, (self.config.ulcer_width, self.config.ulcer_height), interpolation=cv2.INTER_AREA)\n # ellipse fitting\n ret, thresh = cv2.threshold(binaryResult, 127, 255, 0)\n _, contours, hierarchy = cv2.findContours(thresh, 1, 2)\n _ellipse = cv2.fitEllipse(contours[0])\n ellipse = np.zeros((self.config.ulcer_height, self.config.ulcer_width, 1), dtype=np.uint8)\n cv2.ellipse(ellipse, _ellipse, 255, -1)\n\n return ellipse\n\n def predict_ulcer(self):\n # resize\n if self.raw.shape[0] != self.config.ulcer_height:\n raw = cv2.resize(self.raw, (self.config.ulcer_width, self.config.ulcer_height), interpolation=cv2.INTER_AREA)\n # FOV\n cornea = np.broadcast_to(self.cornea, (self.config.ulcer_height, self.config.ulcer_width, 3))\n raw[cornea==0] = 0\n # crop\n raw = np.reshape(raw, (1, self.config.ulcer_height, self.config.ulcer_width, 3))\n datagen = DataGenerator(config=self.config, test_data=raw)\n test_img_patches, new_height, new_width = datagen._test_data()\n # predict\n predictions = self.ulcer_model.predict(test_img_patches, batch_size=self.config.batch_size, verbose=1)\n # splice\n pred_imgs = recompone_overlap(predictions, self.config, new_height, new_width)\n pred_imgs = pred_imgs[:,0:self.config.ulcer_height,0:self.config.ulcer_width,:]\n\n probResult=1-pred_imgs[0,:,:,0]\n binaryResult = ((probResult>=0.5)).astype(np.uint8)*255\n binaryResult = np.reshape(binaryResult, (self.config.ulcer_height, self.config.ulcer_width, 1))\n\n return binaryResult\n\n def mask(self): \n # resize\n origin = self.raw[:,:,::-1]\n origin = cv2.resize(origin, (self.config.ulcer_width, self.config.ulcer_height), interpolation=cv2.INTER_AREA)\n # draw a mask for a cornea\n ret, thresh = cv2.threshold(self.cornea, 127, 255, 0)\n image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n img = cv2.drawContours(origin, contours, -1, (0,0,255), 3)\n # draw a mask for ulcer\n ret, thresh = cv2.threshold(self.ulcer, 127, 255, 0)\n image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n res = cv2.drawContours(img, contours, -1, (255,0,0), 3)\n \n return res\n'''\ndef test():\n infer = Inference()\n input_path = './input/9.jpg'\n output_path = './output'\n path = infer.predict(input_path, output_path)\n\n print(path)\n\nif __name__ == '__main__':\n test()\n'''\n","sub_path":"algorithm/surface/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":5988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"649762475","text":"# You're given an integer N. Write a program to check whether the integer is EVEN or ODD.\n\n# number of test cases\nt = int(input())\n\nfor _ in range(t):\n\t# input the number\n\tn = int(input())\n\n\t# check whether even or odd\n\tif (n%2 == 0):\n\t\tprint(\"Even\")\n\telse:\n\t\tprint(\"Odd\")","sub_path":"Python/Check Even or Odd.py","file_name":"Check Even or Odd.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"556862992","text":"\"\"\"PCX URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path,include\n\nfrom homepage import views as homeviews\nfrom user import views as userviews\nfrom checkout import views as chkviews\nfrom wishlist import views as w_views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',include('homepage.urls')),\n path('user/',include('user.urls')),\n path('checkout/',include('checkout.urls')),\n path('product/',include('product.urls')),\n path('product/',homeviews.product_view,name='product_full'),\n path('user/accounts/',include('allauth.urls')),\n path('login/',userviews.login_form,name='login'),\n path('signup/',userviews.signup_form,name='signup'),\n path('logout/',userviews.log_out,name='logout'),\n path('cart/',chkviews.cart, name='show_cart'),\n path('w_list/',w_views.view_wishlist, name='show_wishlist'),\n path('restapi/product/', include('product.restapi.urls','products_api')),\n path('search/',homeviews.product_search,name='search'),\n]","sub_path":"PCX/PCX/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"202630847","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.views.generic import DetailView, ListView\nfrom django.views.generic.edit import UpdateView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.urls import reverse\nfrom django.forms.models import model_to_dict\n\nfrom users.models import CustomUser\nfrom posts.models import Post\nfrom users.forms import ProfileForm\n\n\nclass UsersList(ListView):\n \"\"\"Display all users list.\"\"\"\n\n model = CustomUser\n paginate_by = 10\n template_name = 'users/users.html'\n\n def get_queryset(self):\n return CustomUser.objects.exclude(pk=self.request.user.id)\n\n\nclass UserDisplay(DetailView):\n \"\"\"Display user page with his posts.\"\"\"\n\n model = CustomUser\n template_name = 'users/user.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n user_id = self.object.pk\n me = self.request.user\n posts = Post.objects.filter(author=CustomUser.objects.get(pk=user_id))\n context['posts'] = posts\n context['user'] = get_object_or_404(CustomUser, id=user_id)\n context['is_friend'] = me.is_authenticated and me.friends.filter(pk=user_id).exists()\n context['my_id'] = me.id\n return context\n\n\nclass UserAddToFriends(LoginRequiredMixin, UpdateView):\n \"\"\"Adds user to friends.\"\"\"\n\n model = CustomUser\n fields = []\n\n def form_valid(self, form):\n user = self.object\n me = get_object_or_404(CustomUser, id=self.request.user.id)\n me.friends.add(user)\n return redirect('user', pk=user.pk)\n\n\nclass UserRemoveFromFriends(LoginRequiredMixin, UpdateView):\n \"\"\"Remove user from friends.\"\"\"\n\n model = CustomUser\n fields = []\n\n def form_valid(self, form):\n user = self.object\n me = get_object_or_404(CustomUser, id=self.request.user.id)\n me.friends.remove(user)\n return redirect('user', pk=user.pk)\n\n\nclass UserUpdate(LoginRequiredMixin, UpdateView):\n \"\"\"Updates user profile.\"\"\"\n\n model = CustomUser\n form = ProfileForm\n fields = ['name', 'description', 'image']\n template_name = 'users/profile.html'\n\n def get(self, request, *args, **kwargs):\n user = get_object_or_404(CustomUser, id=request.user.id)\n form = self.form(initial=model_to_dict(user))\n return render(request, self.template_name, {'form': form})\n\n def post(self, request, *args, **kwargs):\n form = self.form(request.POST, request.FILES, instance=request.user)\n if form.is_valid():\n form.save()\n return render(request, self.template_name, {'form': form})\n return render(\n request, self.template_name, {'form': form, 'pk': request.user.pk},\n )\n","sub_path":"transcendence/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"113940750","text":"import sqlite3\n\n\nclass DatabaseManager:\n def __init__(self, path):\n self.connection = sqlite3.connect(path)\n cursor = self.connection.cursor()\n cursor.execute(\"CREATE TABLE IF NOT EXISTS unfollowed (id INTEGER)\")\n cursor.execute(\"CREATE TABLE IF NOT EXISTS unremoved (id INTEGER)\")\n cursor.execute(\"CREATE TABLE IF NOT EXISTS removed (id INTEGER PRIMARY KEY, json TEXT)\")\n cursor.execute(\"CREATE TABLE IF NOT EXISTS friends (id INTEGER PRIMARY KEY, json TEXT)\")\n self.connection.commit()\n\n def update_queue(self, table, ids):\n cursor = self.connection.cursor()\n cursor.execute(\"DELETE FROM {table};\".format(table=table))\n cursor.executemany(\"INSERT INTO {table} (id) VALUES (?);\".format(table=table), [(user_id,) for user_id in ids])\n self.connection.commit()\n\n def get_queue(self, table):\n cursor = self.connection.cursor()\n rows = cursor.execute(\"SELECT id FROM {table}\".format(table=table))\n return [row[0] for row in rows]\n\n def close(self):\n self.connection.close()\n\n\nif __name__ == '__main__':\n db = DatabaseManager(\"test.db\")\n db.update_queue(\"unfollowed\", [10000000090, 10000000091])\n print(db.get_queue(\"unfollowed\"))\n db.close()\n","sub_path":"DatabaseManager.py","file_name":"DatabaseManager.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"379731643","text":"cars=['Ferrari', 'Fiat Panda', 'Fiat Panda 4*4', 'Skoda Felicia Fun']\n\n\n\nmax_size_list=len(cars)\ncounter=-1\nnumber=-1\n\n\nwhile counter right[j]:\n temp.append(right[j])\n j += 1\n elif left[i] == right[j]:\n temp.append(left[i])\n temp.append(right[j])\n i+=1\n j+=1\n else:\n temp.append(left[i])\n i+=1\n\n while (imaximum:\n# maximum = n\n# if n not in dict1:\n# dict1[n] = [i]\n# else:\n# dict1[n].append(i)\n# if len(dict1[maximum])<2:\n# print(dict1[maximum][0])\n# else:\n# print(merge_sort(dict1[maximum])[1])\n\ndef mode(stats):\n from collections import Counter\n a = Counter(stats)\n a = sorted(a.most_common(), key = lambda x:(-x[1], x[0]))\n if len(a)>=2:\n print(a[1][0] if a[0][1] == a[1][1] else a[0][0])\n else:\n print(a[0][0])\n\nimport sys\nnum = [int(sys.stdin.readline()) for i in range(int(sys.stdin.readline().strip()))]\nsort_stat = merge_sort(num)\nprint(round((sum(sort_stat))/(len(sort_stat))))\nprint(sort_stat[len(sort_stat)//2])\nmode(sort_stat)\nif len(sort_stat) == 1:\n print(0)\nelse:\n print(sort_stat[-1] - sort_stat[0])\n\n\n\n\n","sub_path":"정렬/통계학/statistics_이정엽 복사본.py","file_name":"statistics_이정엽 복사본.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"496970870","text":"from bs4 import BeautifulSoup\nimport requests\nfrom tkinter import *\nfrom tkinter import filedialog\nimport lxml\nimport os\n\nbase = Tk()\nbase.geometry('150x40')\n\n\n#Before running get the following libraries:\n# requests\n# lxml\n# os\n# BeautifulSoup\n\n# Also before running, make sure you go to pycharm console and type the following command:\n# pip install future\n# This will let you use the tkinter library\n\n\n#Given a degree audit file in html and the set of Classes you want, you will get a set returned which will include all the classes you want\n#If you want classes preivously taken: use time = \"TAKEN\"\n#If you want classes in progress: use time = \"IP\"\n#If you want classes you have scheduled: use time = \"FUTURE\"\ndef getCourseSet(fileK, time):\n\n soup = BeautifulSoup(fileK, \"lxml\")\n\n\n setOfCoursesIP = set()\n setOfCoursesFuture = set()\n setOfCoursesTaken = set()\n setOfCoursesRemaining = set()\n\n currentTerm = \"AU20\"\n\n if(time != \"REMAINING\"):\n trList = soup.find_all('tr', {'class': \"takenCourse\"})\n for setTr in trList:\n list1 = setTr.findChildren()\n if (termComparator(currentTerm, list1[0].text.strip()) == \"=\"):\n addToSet(setOfCoursesIP, list1[1].text.strip())\n elif (termComparator(currentTerm, list1[0].text.strip()) == \"<\"):\n addToSet(setOfCoursesFuture, list1[1].text.strip())\n else:\n addToSet(setOfCoursesTaken, list1[1].text.strip())\n if(time == \"TAKEN\"):\n return setOfCoursesTaken\n elif(time == \"IP\"):\n return setOfCoursesIP\n elif(time == \"FUTURE\"):\n return setOfCoursesFuture\n else:\n raise Exception(\"This is an invalid set of Class Selection: Choose from TAKEN, IP or FUTURE\")\n else:\n trList = soup.find_all('td',{'class':\"fromcourselist\"})\n for setTr in trList:\n list1 = setTr.findChildren()\n for x in list1:\n #print(x['class'][0])\n if(x['class'][0]!=\"number\"):\n string = x['department'] + \" \" + x['number']\n addToSet(setOfCoursesRemaining,string)\n return setOfCoursesRemaining\n\n\n\n\n\n#term 1 should generally be current term\n#returns > if term1 > term2\n# < if term 1 < term 2\n# = if term 1 = term 2\ndef termComparator(term1, term2):\n temp1 = term1[2:len(term1)]\n temp2 = term2[2:len(term2)]\n if(temp1 == temp2):\n if(term1[0:2]==\"AU\" and term2[0:2]==\"SP\"):\n return \">\"\n elif(term1[0:2]==\"SP\" and term2[0:2]==\"AU\"):\n return \"<\"\n else:\n return \"=\"\n elif(temp1 > temp2):\n return \">\"\n elif(temp1 < temp2):\n return \"<\"\n\n#given a setA and course, adds course if it hasn't already been added to setA\ndef addToSet(setA,course):\n bol = True\n for k in setA:\n if(k==course):\n bol = False\n if(bol == True):\n setA.add(course)\n return\n\n# MAIN Method below: Above are methods\n\n#with filedialog.askopenfile(initialdir=\"/\") as input:\n# fileK = open(input.name)\nfileK = open(\"DegAudit.html\")\nl = getCourseSet(fileK,\"REMAINING\")\n\nfor k in l:\n print(k)\n","sub_path":"audit.py","file_name":"audit.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"189698337","text":"# coding: utf-8\n\"\"\"\nThis modules holds methods for generating predictions from a model.\n\"\"\"\nimport os\nimport sys\nfrom typing import List, Optional\nfrom logging import Logger\nimport numpy as np\n\nimport torch\nfrom torchtext.data import Dataset, Field\n\nfrom speechjoey.helpers import load_config, make_logger,\\\n get_latest_checkpoint, load_checkpoint\nfrom speechjoey.model import build_model, Model\nfrom speechjoey.speech_model import build_speech_model, SpeechModel\nfrom speechjoey.batch import Batch\nfrom speechjoey.data import load_data, load_audio_data, make_data_iter\nfrom speechjoey.constants import UNK_TOKEN, PAD_TOKEN, EOS_TOKEN\nfrom speechjoey.loss import XentLoss\n\n# pylint: disable=too-many-arguments,too-many-locals,no-member\n\n\ndef generate_perplexities_on_data(model: Model, data: Dataset,\n logger: Logger,\n use_cuda: bool, max_output_length: int,\n loss_function: torch.nn.Module = None,\n ) \\\n -> List[float]:\n \"\"\"\n Generate a list of perplexities for every data example\n in given data, by validating on them.\n\n :param model: model module\n :param logger: logger\n :param data: dataset for validation\n :param use_cuda: if True, use CUDA\n :param max_output_length: maximum length for generated hypotheses\n :param loss_function: loss function that computes a scalar loss\n for given inputs and targets\n\n :return:\n - ppls_list: List of ppls results on data examples,\n \"\"\"\n\n valid_iter = make_data_iter(\n dataset=data, batch_size=1, batch_type=\"sentence\",\n shuffle=False, train=False)\n valid_sources_raw = data.src\n pad_index = model.src_vocab.stoi[PAD_TOKEN]\n # disable dropout\n model.eval()\n # don't track gradients during validation\n with torch.no_grad():\n ppls_list = []\n logger.info(\"Starting train data validation\")\n for i, valid_batch in enumerate(iter(valid_iter)):\n # run as during training to get validation loss (e.g. xent)\n\n if i % 1000 == 0:\n logger.info(\"{} sentences done\".format(str(i)))\n\n batch = Batch(valid_batch, pad_index, use_cuda=use_cuda)\n # sort batch now by src length and keep track of order\n\n # run as during training with teacher forcing\n if loss_function is not None and batch.trg is not None:\n batch_loss = model.get_loss_for_batch(\n batch, loss_function=loss_function)\n current_loss = batch_loss\n current_ntokens = batch.ntokens\n current_ppl = torch.exp(current_loss / current_ntokens)\n ppls_list.append(float(current_ppl))\n\n logger.info(\"Done with all {} sentences\".format(i + 1))\n\n return ppls_list\n\n\n# pylint: disable-msg=logging-too-many-args\ndef filter_noise(cfg_file,\n ckpt: str,\n output_path: str = None,\n logger: Logger = None) -> None:\n \"\"\"\n Main test function. Handles loading a model from checkpoint, generating\n translations and storing them and attention plots.\n\n :param cfg_file: path to configuration file\n :param ckpt: path to checkpoint to load\n :param output_path: path to output\n :param logger: log output to this logger (creates new logger if not set)\n \"\"\"\n\n if logger is None:\n logger = make_logger()\n\n cfg = load_config(cfg_file)\n\n # when checkpoint is not specified, take latest (best) from model dir\n if ckpt is None:\n model_dir = cfg[\"training\"][\"model_dir\"]\n ckpt = get_latest_checkpoint(model_dir)\n if ckpt is None:\n raise FileNotFoundError(\"No checkpoint found in directory {}.\"\n .format(model_dir))\n try:\n step = ckpt.split(model_dir + \"/\")[1].split(\".ckpt\")[0]\n except IndexError:\n step = \"best\"\n\n use_cuda = cfg[\"training\"].get(\"use_cuda\", False)\n max_output_length = cfg[\"training\"].get(\"max_output_length\", None)\n\n # load the data\n if cfg.get(\"speech\", True):\n train_data, _, _, src_vocab, trg_vocab = load_audio_data(\n cfg=cfg)\n else:\n train_data, _, _, src_vocab, trg_vocab = load_data(\n data_cfg=cfg[\"data\"])\n\n data_to_predict = (\"train\", train_data)\n\n # load model state from disk\n model_checkpoint = load_checkpoint(ckpt, use_cuda=use_cuda)\n\n # build model and load parameters into it\n if cfg.get(\"speech\", True):\n model = build_speech_model(\n cfg[\"model\"], src_vocab=src_vocab, trg_vocab=trg_vocab)\n else:\n model = build_model(\n cfg[\"model\"], src_vocab=src_vocab, trg_vocab=trg_vocab)\n model.load_state_dict(model_checkpoint[\"model_state\"])\n\n if use_cuda:\n model.cuda()\n\n pad_index = model.pad_index\n label_smoothing = 0.0\n loss_function = XentLoss(pad_index=pad_index,\n smoothing=label_smoothing)\n\n data_set_name, data_set = data_to_predict\n\n #pylint: disable=unused-variable\n ppls_list = generate_perplexities_on_data(\n model, data=data_set, max_output_length=max_output_length,\n use_cuda=use_cuda, loss_function=loss_function,\n logger=logger)\n #pylint: enable=unused-variable\n\n if output_path is None:\n raise ValueError(\"Output path must be specified\")\n\n else:\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n output_path_set = os.path.join(\n output_path, data_set_name + \"_perplexities.txt\")\n with open(output_path_set, \"w\") as outfile:\n first_iteration = True\n for ppls in ppls_list:\n if not first_iteration:\n outfile.write(\"\\n\")\n outfile.write(str(ppls))\n first_iteration = False\n\n logger.info(\"Perplexities saved to: %s\", output_path_set)\n","sub_path":"speechjoey/filtering.py","file_name":"filtering.py","file_ext":"py","file_size_in_byte":5996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"6235663","text":"class Solution:\n def firstMissingPositive(self, nums):\n nums.sort()\n length = len(nums)\n try:\n pos = nums.index(1)\n while True:\n if nums[pos+1]-nums[pos] == 1 or nums[pos+1]-nums[pos] == 0:\n pos += 1\n if pos+1 == length:\n return nums[pos]+1\n continue\n else:\n return nums[pos]+1\n except ValueError:\n return 1\nnums = [7,8,9,11,12]\nresult = Solution().firstMissingPositive(nums=nums)\nprint(result)","sub_path":"41. 缺失的第一个正数.py","file_name":"41. 缺失的第一个正数.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"522530866","text":"# -*- coding: UTF-8 -*-\n'''\ntest save in a redis not in the cluster_redis\n'''\nimport sys\nimport csv\nimport json\nimport time\nimport redis\nreload(sys)\nsys.path.append('../../')\nfrom time_utils import ts2datetime, datetime2ts\nfrom global_utils import R_CLUSTER_FLOW2 as r_cluster\n\n\n\n# save redis as Date:{uid1:'{str(at_uid1):count1,... }', uid2:'str(at_uid2):count2,...'}\ndef save_at(uid, at_uid, timestamp):\n date = ts2datetime(timestamp)\n ts = datetime2ts(date)\n key = str(uid)\n try:\n ruid_count_string = r_cluster.hget('at_'+str(ts), str(uid))\n ruid_count_dict = json.loads(ruid_count_string)\n try:\n ruid_count_dict[str(at_uid)] += 1\n except:\n ruid_count_dict[str(at_uid)] = 1\n r_cluster.hset('at_'+str(ts), str(uid), json.dumps(ruid_count_dict))\n except:\n r_cluster.hset('at_'+str(ts), str(uid), json.dumps({str(at_uid):1}))\n\n\n#abandon in version-15-12-08\n'''\n# save redis as Date:{uid1:'{ip:count...}', uid2:'{ip:count....}'}\ndef save_city(uid, ip, timestamp):\n date = ts2datetime(timestamp)\n ts = datetime2ts(date)\n key = str(uid)\n try:\n ip_count_string = r_cluster.hget('ip_'+str(ts), str(uid))\n ip_count_dict = json.loads(ip_count_string)\n try:\n ip_count_dict[str(ip)] += 1\n except:\n ip_count_dict[str(ip)] = 1\n r_cluster.hset('ip_'+str(ts), str(uid), json.dumps(ip_count_dict))\n except:\n r_cluster.hset('ip_'+str(ts), str(uid), json.dumps({str(ip):1}))\n'''\n\n#save redis as {date:{uid:'{ip:'timestamp1×tamp2'}'}}\ndef save_city_timestamp(uid, ip, timestamp):\n date = ts2datetime(timestamp)\n ts = datetime2ts(date)\n try:\n ip_timestamp_string = r_cluster.hget('new_ip_'+str(ts), str(uid))\n ip_timestamp_string_dict = json.loads(ip_timestamp_string)\n try:\n add_string = '&'+str(timestamp)\n ip_timestamp_string_dict[str(ip)] += add_string\n except:\n ip_timestamp_string_dict[str(ip)] = str(timestamp)\n r_cluster.hset('new_ip_'+str(ts), str(uid), json.dumps(ip_timestamp_string_dict))\n\n except:\n r_cluster.hset('new_ip_'+str(ts), str(uid), json.dumps({str(ip): str(timestamp)}))\n \n\n# save redis as 'activity_' + Date:{uid1:'{}', uid2:'{}'} \ndef save_activity(uid, ts, time_segment):\n key = str(ts)\n try:\n activity_count_dict = r_cluster.hget('activity_' + key, str(uid))\n activity_count_dict = json.loads(activity_count_dict)\n try:\n activity_count_dict[str(time_segment)] += 1\n except:\n activity_count_dict[str(time_segment)] = 1\n r_cluster.hset('activity_' + key, str(uid), json.dumps(activity_count_dict))\n except:\n r_cluster.hset('activity_' + key, str(uid), json.dumps({str(time_segment): 1}))\n\n","sub_path":"knowledge/cron/flow2/test_save_attribute.py","file_name":"test_save_attribute.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"340217075","text":"from collections import Counter\nimport numpy as np\nfrom sklearn.datasets import fetch_20newsgroups\nimport tensorflow as tf\nimport pandas as pd\n\n# 导入sklearn集合中的数据集,有监督学习,即里面的数据已经分好了类别\n# 具体参见http://scikit-learn.org/stable/datasets/twenty_newsgroups.html\ncategories = [\"comp.graphics\",\"sci.space\",\"rec.sport.baseball\"]\ntrain_set = fetch_20newsgroups(subset='train',categories=categories)\ntest_set = fetch_20newsgroups(subset='test', categories=categories)\n# print('total texts in train:',len(train_set.data))\n# print('total texts in test:',len(test_set.data))\n# 建立数据集单词字典,最终形式是text_index['the'] = 数量\nvocab = Counter()\nfor data in train_set.data:\n for word in data.split(' '):\n vocab[word.lower()] += 1\n\nfor test_data in test_set.data:\n for word in test_data.split(' '):\n vocab[word] += 1\nprint(len(vocab))\ntotal_words = len(vocab)\n\ndef get_index(vocab):\n # 先声明word是字典,否则word[element]报错\n word={}\n for i, element in enumerate(vocab):\n word[element.lower()] = i\n return word\n\ntext_index = get_index(vocab)\n\nprint(\"the is %s\" % text_index['the'])\n\n# 每层神经元数,包括输入神经元,隐藏神经元,输出神经元\"comp.graphics\",\"sci.space\",\"rec.sport.baseball\"\nn_hidden1 = 100\nn_hiddent2 = 100\nn_input_number = total_words\nn_class = 3\n\n# 在 神经网络的术语里,一次 epoch = 一个向前传递(得到输出的值)和一个所有训练示例的向后传递(更新权重)。\nlearning_rate = 0.01\nbatch_size = 150\ntraining_epochs = 10\ndisplay_step = 1\n\n\n\n# shape的None 元素对应于大小可变的维度在测试模型时,我们将用更大的批处理来提供字典,\n# 这就是为什么需要定义一个可变的批处理维度。\ninput_tensor = tf.placeholder(tf.float32, [None, n_input_number], name='input')\noutput_tensor = tf.placeholder(tf.float32, [None, n_class], name='output')\n\n# 神经元计算\ndef out_prediction(input_tensor, weights, biases):\n # 定义乘法运算矩阵乘法\n # relu是激活函数\n layer_1_multiplication = tf.matmul(input_tensor,weights['h1'])\n layer_1_addition = tf.add(layer_1_multiplication, biases['b1'])\n layer_1_activation = tf.nn.relu(layer_1_addition)\n\n layer_2_multiplication = tf.matmul(layer_1_activation, weights['h2'])\n layer_2_addition = tf.add(layer_2_multiplication, biases['b2'])\n layer_2_activation = tf.nn.relu(layer_2_addition)\n\n out_layer_multiplication = tf.matmul(layer_2_activation, weights['out'])\n out_layer_addition = out_layer_multiplication + biases['out']\n\n return out_layer_addition\n# shape参数含义;[]表示一个数,[3]表示长为3的向量,\n# [2,3]表示矩阵或者张量(tensor)同一个线性变换在不同的基下的表示\n# https://www.zhihu.com/question/20695804\n# 利用正态分布启动权值和偏差值\nweights = {\n 'h1':tf.Variable(tf.random_normal([n_input_number, n_hidden1])),\n 'h2':tf.Variable(tf.random_normal([n_hidden1, n_hiddent2])),\n 'out':tf.Variable(tf.random_normal([n_hiddent2, n_class]))\n}\nbiases = {\n 'b1':tf.Variable(tf.random_normal([n_hidden1])),\n 'b2':tf.Variable(tf.random_normal([n_hiddent2])),\n 'out':tf.Variable(tf.random_normal([n_class]))\n}\n\n\nprediction = out_prediction(input_tensor, weights, biases)\n\n# 由于分类问题,所以使用交叉熵误差进行优化,不断更新权值和output_tensor\ncross_loss = tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=output_tensor)\nloss = tf.reduce_mean(cross_loss)\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n\n# 数据初始化\ninit = tf.global_variables_initializer()\n\n# 批处理数据函数\ndef get_batch(df, i, batch_size):\n batches = []\n results = []\n texts = df.data[i * batch_size:i * batch_size + batch_size]\n categories = df.target[i * batch_size:i * batch_size + batch_size]\n# 构建矩阵索引\n for text in texts:\n layer = np.zeros(total_words, dtype=float)\n for word in text.split(' '):\n layer[text_index[word.lower()]] += 1\n\n batches.append(layer)\n\n for category in categories:\n y = np.zeros((3), dtype=float)\n if category == 0:\n y[0] = 1\n elif category == 1:\n y[1] = 1\n else:\n y[2] = 1\n results.append(y)\n\n return np.array(batches), np.array(results)\n\n# Session定义了Operation操作对象执行环境,在这里进行模型训练\nwith tf.Session() as sess:\n sess.run(init)\n\n # Training cycle\n for epoch in range(training_epochs):\n avg_cost = 0.\n total_batch = int(len(train_set.data)/batch_size)\n # Loop over all batches\n for i in range(total_batch):\n batch_x,batch_y = get_batch(train_set,i,batch_size)\n # Run optimization op (backprop) and cost op (to get loss value)\n # op(source op),当运行该函数,启动默认图,即运行out_prediction,并不断更新权值和分类结果\n # tf.Session.run(fetches, feed_dict=None, options=None, run_metadata=None)\n # feed_dict 参数是我们为每步运行所输入的数据。为了传递这个数据,我们需要定义tf.placeholders(提供给 feed_dict)\n c,_ = sess.run([loss,optimizer], feed_dict={input_tensor: batch_x,output_tensor:batch_y})\n # Compute average loss\n avg_cost += c / total_batch\n # Display logs per epoch step\n if epoch % display_step == 0:\n print(\"Epoch:\", '%04d' % (epoch+1), \"loss=\", \\\n \"{:.9f}\".format(avg_cost))\n print(\"Optimization Finished!\")\n\n\n # Test model\n correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(output_tensor, 1))\n # Calculate accuracy\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n total_test_data = len(train_set.target)\n batch_x_test, batch_y_test = get_batch(test_set,0,total_test_data)\n print(\"Accuracy:\", accuracy.eval({input_tensor: batch_x_test, output_tensor: batch_y_test}))\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"TensorFlow1.py","file_name":"TensorFlow1.py","file_ext":"py","file_size_in_byte":6130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"146372771","text":"\"\"\"\n存放基本的方法,比如:初始化driver,find查找元素\n\"\"\"\nimport logging\n\nfrom appium.webdriver.common.mobileby import MobileBy\nfrom appium.webdriver.webdriver import WebDriver\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\nclass BasePage:\n logging.basicConfig(level=logging.INFO)\n\n def __init__(self, driver: WebDriver = None):\n self.driver = driver\n\n def find(self, locator):\n logging.info(f'find:{locator}')\n return self.driver.find_element(*locator) # *是用来解包的,传过来是元组的形式\n\n def find_and_click(self, locator):\n logging.info(f'find_and_click:{locator}')\n self.find(locator).click() # 调用本地封装的find方法\n\n def find_and_sendkeys(self, locator, text):\n logging.info(f'find_and_sendkeys:{text}')\n self.find(locator).send_keys(text)\n\n def find_by_scroll(self, text):\n logging.info('find_by_scroll')\n return self.driver.find_element(MobileBy.ANDROID_UIAUTOMATOR,\n 'new UiScrollable'\n '(new UiSelector().'\n 'scrollable(true).'\n 'instance(0)).'\n 'scrollIntoView('\n 'new UiSelector().'\n f'text(\"{text}\").instance(0));')\n\n def webderiver_wait(self, locator, timeout=10):\n logging.info(f'webderiver_wait:{locator},timeout:{timeout}')\n element = WebDriverWait(self.driver, timeout).until(\n lambda x: x.find_element(*locator))\n return element\n\n def back(self, num=1):\n logging.info(f'back:{num}')\n for i in range(num):\n self.driver.back()\n","sub_path":"app/企业微信po/page/basepage.py","file_name":"basepage.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"92243656","text":"from nicos.devices.epics import EpicsReadable, pvname, EpicsDevice\nfrom nicos.core import status, Param, Override, Attach, usermethod, Readable, SIMULATION\n\n\nclass DelayedEpicsReadable(EpicsDevice, Readable):\n parameters = {\n 'readpv': Param('PV for reading device value',\n type=pvname, mandatory=False),\n }\n\n parameter_overrides = {\n 'pollinterval': Override(default=0.5)\n }\n\n pv_parameters = set(('readpv',))\n\n def doPreinit(self, mode):\n self._pvs = {}\n self._pvctrls = {}\n\n def doRead(self, maxage=0):\n try:\n return self._get_pv('readpv')\n except KeyError:\n raise RuntimeError('test')\n\n def setPVName(self, name):\n self._setROParam('readpv', name)\n self._initialise_pvs()\n\n\nclass EssChopper(EpicsDevice, Readable):\n parameters = {\n 'pvprefix': Param('PV prefix of the chopper.', type=pvname, mandatory=True)\n }\n\n attached_devices = {\n 'speed': Attach('Speed of the chopper disc', DelayedEpicsReadable),\n 'phase': Attach('Phase of the chopper disc', DelayedEpicsReadable),\n 'parkposition': Attach('Position in parked state', DelayedEpicsReadable),\n 'state': Attach('Current state of the chopper', DelayedEpicsReadable)\n }\n\n state_map = {\n 'init': (status.ERROR, 'Interlocks not fulfilled'),\n 'stopped': (status.OK, 'Waiting for commands'),\n 'parked': (status.OK, 'Parked'),\n 'parking': (status.BUSY, 'Moving to park position'),\n 'accelerating': (status.BUSY, 'Adjusting speed to target'),\n 'phase_locking': (status.BUSY, 'Acquiring phase lock'),\n 'phase_locked': (status.OK, 'Speed and phase locked'),\n 'stopping': (status.BUSY, 'Decelerating disc'),\n 'idle': (status.OK, 'Disc rotating freely, waiting for command.'),\n 'bearings': (status.BUSY, 'Initialising bearings'),\n }\n\n parameter_overrides = {\n 'unit': Override(mandatory=False),\n }\n\n internal_chopper_fields = {\n 'speed_setpoint': 'Spd',\n 'phase_setpoint': 'Phs',\n 'parkposition_setpoint': 'ParkAng',\n 'command': 'CmdS',\n }\n\n def _get_pv_parameters(self):\n return self.internal_chopper_fields.keys()\n\n def _get_pv_name(self, pvparam):\n return self.pvprefix + self.internal_chopper_fields[pvparam]\n\n def doInit(self, mode):\n if mode != SIMULATION:\n self._attached_speed.setPVName(self.pvprefix + 'Spd-RB')\n self._attached_phase.setPVName(self.pvprefix + 'Phs-RB')\n self._attached_state.setPVName(self.pvprefix + 'State')\n self._attached_parkposition.setPVName(self.pvprefix + 'ParkAng-RB')\n\n def doRead(self, maxage=0):\n return round(self._attached_speed.read(maxage), 2), round(self._attached_phase.read(maxage), 2)\n\n def doStatus(self, maxage=0):\n return self.state_map[self._attached_state.read()]\n\n @usermethod\n def interlock(self):\n self._put_pv('command', 'init')\n\n @usermethod\n def setSpeedAndPhase(self, speed, phase):\n self._put_pv('speed_setpoint', speed)\n self._put_pv('phase_setpoint', phase)\n self._put_pv('command', 'start')\n\n @usermethod\n def stop(self):\n self._put_pv('command', 'stop')\n\n @usermethod\n def parkAt(self, position):\n self._put_pv('parkposition_setpoint', position)\n self._put_pv('command', 'park')\n\n @usermethod\n def coast(self):\n self._put_pv('command', 'unlock')\n\n @usermethod\n def release(self):\n self._put_pv('command', 'deinit')\n","sub_path":"NICOS/essiip/lib/chopper.py","file_name":"chopper.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"59884999","text":"from flask import (\n session, render_template, url_for, flash,\n request, redirect, jsonify, Response\n)\n\nfrom app import app, fw, country_codes\nfrom .forms import UserNameForm\nfrom .utils import (\n batch_rename, subset, pl_cols_mapping, untypical_subset,\n make_unique, country_fix, get_top_movie, get_summary, fit_lm\n)\nfrom .tasks import (\n get_raw_data, fetch_row, combine_entries,\n get_rows_then_combine, get_user_data\n)\nfrom celery import chain\n\nfrom filmweb.filmweb import *\nfrom copy import deepcopy\nfrom operator import itemgetter\nimport math\nimport time\nimport random\nimport json\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\n\nEXPORT_TIMEOUT = 10\n\n@app.errorhandler(404)\ndef page_not_found(e):\n \"\"\"404 error redirect\"\"\"\n return render_template('404.html'), 404\n\n@app.route('/', methods = ['GET', 'POST'])\n@app.route('/index', methods = ['GET', 'POST'])\ndef index():\n \"\"\"Main page view\"\"\"\n for key in ['task_id', 'combine_entries_id', 'user_name', 'fw_id', 'type']:\n session[key] = None\n form = UserNameForm()\n if form.validate_on_submit():\n session['user_name'] = form.user.data\n return redirect(url_for('fetch'))\n return render_template('index.html', form=form)\n\n@app.route('/fetch', methods=['GET'])\ndef fetch():\n \"\"\"Page for monitoring celelery data export status\"\"\"\n user_name = session['user_name']\n if user_name is None:\n return redirect(url_for('index'))\n fw_url = 'https://www.filmweb.pl/user/{}'.format(user_name)\n try:\n fw_page = requests.get(fw_url)\n except:\n flash((\n 'Problem z połączeniem. '\n 'Możliwe, że strona filmweb.pl '\n 'jest chwilowo niedostępna.'\n ))\n return redirect(url_for('index'))\n soup = BeautifulSoup(fw_page.content, \"html.parser\")\n # check user existence / no of votes before proceeding\n fw_preview = soup.find('div', class_='userPreview')\n fw_uid = fw_preview['data-id']\n # when an user does not exist, beautiful soup returns the following string\n if fw_uid == '$user.id':\n flash('Użytkownik {} nie istnieje'.format(user_name))\n return redirect(url_for('index'))\n fw_vote_count = soup.find('div', class_='voteStatsBoxData')\n try:\n votes_count = json.loads(fw_vote_count.text)['votes']['films']\n assert (votes_count > 0)\n except:\n flash('Użytkownik {} nie ma żadnych ocen'.format(user_name))\n return redirect(url_for('index'))\n session['fw_id'] = fw_uid\n return render_template('fetch.html', user=user_name, fw_id=fw_uid)\n\n@app.route('/waiting', methods=['POST'])\ndef waiting():\n \"\"\"Returns current export status\"\"\"\n task = chain(\n get_raw_data.s(\n user=session['user_name'],\n fw_id=session['fw_id']\n ),\n get_rows_then_combine.s()\n ).apply_async()\n session['task_id'] = task.id\n response = jsonify({}), 202, {'Location': url_for('taskstatus', task_id=task.id)}\n return response\n\n@app.route('/status/')\ndef taskstatus(task_id):\n \"\"\"\n Queries celery worker for taskstatus\n parameter task_id refers to get_rows_then_combine\n Need to traverse task structure to get id of combine_entries task\n \"\"\"\n start = time.time()\n combine_entries_id = session.get('combine_entries_id')\n if combine_entries_id is None:\n while True:\n duration = time.time()-start\n if duration > EXPORT_TIMEOUT:\n response = {\n 'state': 'FAILURE',\n 'current': 0,\n 'total': 0,\n 'status': 'Błąd'\n }\n return jsonify(response)\n task = get_rows_then_combine.AsyncResult(task_id)\n if task.info is not None:\n combine_entries_id = task.info[0][0]\n session['combine_entries_id'] = combine_entries_id\n break\n task = get_rows_then_combine.AsyncResult(task_id)\n total = len(task.children[0])\n combine_entries_task = combine_entries.AsyncResult(combine_entries_id)\n fetch_row_group = task.children[0]\n if combine_entries_task.ready():\n response = {\n 'state': combine_entries_task.state,\n 'current': 1,\n 'total': 1,\n 'status': 'Oceny pobrane'\n }\n elif task.state == 'PENDING':\n response = {\n 'state': combine_entries_task.state,\n 'current': 0,\n 'total': 1,\n 'status': 'Rozpoczynam pobieranie...'\n }\n elif task.state != 'FAILURE':\n response = {\n 'state': combine_entries_task.state,\n 'current': fetch_row_group.completed_count(),\n 'total': total,\n 'status': 'Masz {} filmów, trwa ściąganie...'.format(total)\n }\n else:\n response = {\n 'state': combine_entries_task.state,\n 'current': 1,\n 'total': 1,\n 'status': str(task.info)\n }\n return jsonify(response)\n\n@app.route('/report', methods = ['GET'])\ndef report():\n \"\"\"Main view for generating report\"\"\"\n if request.args.get('type') == 'sample':\n session['type'] = 'sample'\n session['user_name'] = (\n 'Przykładowy eksport danych '\n '(użytkownik pieca, stan na 03.2019)'\n )\n movies = get_user_data(\n session_type=session.get('type'),\n task_id=session.get('combine_entries_id')\n )\n if not movies:\n return redirect(url_for('index'))\n # the most untypical ratings\n for entry in movies:\n url = entry.get('url')\n if url is None:\n url = 'https://www.filmweb.pl/films/search?q={}'.format(entry.get('name'))\n entry['url'] = url\n try:\n user_rate = float(entry.get('rate_user'))\n global_rate = float(entry.get('rate_global'))\n entry['rate_diff'] = user_rate-global_rate\n except:\n entry['rate_diff'] = None\n movies_untypical_sort = sorted(\n movies,\n key=itemgetter('rate_diff'),\n reverse=True\n )\n movies_untypical_sort = [subset(entry, untypical_subset) for entry in movies_untypical_sort]\n untypical_pos = movies_untypical_sort[:10]\n untypical_neg = movies_untypical_sort[-10:][::-1]\n for i in range(len(untypical_neg)):\n untypical_pos[i]['rate_global'] = round(untypical_pos[i].pop('rate_global'), 2)\n untypical_neg[i]['rate_global'] = round(untypical_neg[i].pop('rate_global'), 2)\n\n # Data with one country/genre per entry\n movies_country_unique = make_unique(country_fix(movies), 'countries')\n movies_genre_unique = make_unique(movies, 'genres')\n\n # generate summaries\n top_countries_count, top_countries_mean = get_summary(movies_country_unique, 'countries')\n top_directors_count, top_directors_mean = get_summary(movies, 'director_name')\n top_genres_count, top_genres_mean = get_summary(movies_genre_unique, 'genres')\n\n summaries = {\n 'untypical_pos':untypical_pos,\n 'untypical_neg':untypical_neg,\n 'top_countries_count':top_countries_count,\n 'top_directors_count':top_directors_count,\n 'top_genres_count':top_genres_count,\n 'top_countries_mean':top_countries_mean,\n 'top_directors_mean':top_directors_mean,\n 'top_genres_mean':top_genres_mean\n }\n\n return render_template('report.html', summaries = summaries)\n\n@app.route(\"/getcsv\")\ndef getcsv():\n \"\"\"Generate and send obtained data\"\"\"\n movies = get_user_data(\n session_type = session.get('type'),\n task_id = session.get('combine_entries_id')\n )\n if not movies:\n return redirect(url_for('index'))\n movies_pl = deepcopy(movies)\n for entry in movies_pl:\n if entry.get('url') is None:\n entry['url'] = 'https://www.filmweb.pl/films/search?q={}'.format(entry.get('name'))\n batch_rename(entry, pl_cols_mapping)\n movies_pl = [subset(el, list(pl_cols_mapping.values())) for el in movies_pl]\n string_file = ', '.join(['\"{}\"'.format(str(el)) for el in list(movies_pl[0].keys())])\n string_file += '\\n'\n for entry in movies_pl:\n row = ', '.join(\n ['\"{}\"'.format(re.sub('\\\"', '', str(el))) for el in list(entry.values())]\n )\n string_file += \"{}\\n\".format(row)\n header = {\"Content-disposition\": \"attachment; filename=fw_export.csv\"}\n return Response(\n string_file.encode('utf-8'),\n mimetype = \"text/csv\",\n headers = header\n )\n\n@app.route('/plotdata', methods = ['GET'])\ndef plotdata():\n \"\"\"Returns formatted json data for visualizations\"\"\"\n jitter = 0.4\n movies = get_user_data(\n session_type = session.get('type'),\n task_id = session.get('combine_entries_id')\n )\n names, user_rates, global_rates, votes, votes_log, dir_sex = ([] for i in range(6))\n keys = ['name', 'rate_user', 'rate_global', 'votes', 'year', 'director_sex']\n for entry in movies:\n if None in [entry.get(key) for key in keys]:\n continue\n name_formatted = '{} ({})'.format(\n entry.get('name'),\n entry.get('year')\n )\n names.append(name_formatted)\n user_rate_jitter = entry.get('rate_user') + random.uniform(-jitter, jitter)\n user_rates.append(user_rate_jitter)\n global_rates.append(entry.get('rate_global'))\n vote = entry.get('votes')\n votes.append(vote)\n votes_log.append(math.log(vote))\n dir_sex.append(entry.get('director_sex'))\n global_coeff = fit_lm(global_rates, user_rates)\n votes_coeff = fit_lm(votes, user_rates)\n votes_log_coeff = fit_lm(votes_log, user_rates)\n movies_country_unique = make_unique(country_fix(movies), 'countries')\n country_count, country_mean = get_summary(\n movies_country_unique,\n 'countries',\n all=True\n )\n # the same keys in both dicts\n map_tooltip = dict()\n for key in country_count:\n # put actual count to tooltip but log value for plotting\n tooltip = '{} 🎬 {} ★ {}'.format(\n key,\n country_count.get(key),\n country_mean.get(key)\n )\n map_tooltip[key] = tooltip\n country_count[key] = math.log(country_count[key] + 1)\n batch_rename(country_count, country_codes)\n batch_rename(country_mean, country_codes)\n batch_rename(map_tooltip, country_codes)\n response = {\n 'name': names,\n 'dir_sex': dir_sex,\n 'user_rates': user_rates,\n 'global_rates': global_rates,\n 'votes': votes,\n 'votes_log': votes_log,\n 'global_coeff': global_coeff,\n 'votes_coeff': votes_coeff,\n 'votes_log_coeff': votes_log_coeff,\n 'country_count': country_count,\n 'country_mean': country_mean,\n 'map_tooltip': map_tooltip\n }\n return jsonify(response)\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"528892172","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCalcula o valor de fatorial de x, dado o valor de x.\n\n@author: Prof. Diogo SM\n\"\"\"\nx = int(input(\"x: \"))\nf = 1\n\nfor i in range(1, x + 1):\n f = f * i\nprint(f)\n","sub_path":"aula10-repeticao-iii/fatorial.py","file_name":"fatorial.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"505837003","text":"# -*- coding: utf-8 -*-\n\"\"\"\nModule Description:\nDate:\nAuthor: Haoyuan Liu\n\"\"\"\nfrom msg_syn import MsgSyn\nimport util\nfrom datetime import datetime\nfrom send_log import net_logger\nfrom pymongo import errors\n\n\nclass MsgSynImpl(MsgSyn):\n\n def __init__(self, channel, game_id=None):\n super(MsgSynImpl, self).__init__(channel, game_id)\n\n def add_msg(self, msg):\n \"\"\"\n 增加一条新消息\n @param msg:\n @return: bool\n \"\"\"\n record = dict()\n record['time'] = datetime.now()\n record['msg'] = msg\n\n time_str = util.time_to_str(record['time'])\n try:\n obj = self.channel.insert_one(record)\n res = {\"time\": time_str, \"id\": str(obj.inserted_id)}\n self.limit_content()\n return res\n except:\n net_logger(\"Mongodb server stop service\", net=True, terminal=True)\n return False\n\n def pull_msgs(self, time=None):\n \"\"\"\n 获取时间点以上的消息,默认获取最新的page_size数量的消息\n @param time:字符串形式时间,默认None\n @return:list: [{\"time\": time1, \"msg\": msg1}, {\"time\": time2, \"msg\": msg2},...]\n\n \"\"\"\n if time is None:\n time = datetime.now()\n else:\n time = util.str_to_time(time)\n collection = self.channel\n records = collection.find({'time': {'$lt': time}}).limit(self._limit).sort('time', -1)\n msgs = self.format_msgs(records)\n\n return msgs\n\n def get_new_msg(self):\n \"\"\"\n 获取最新的一条消息\n @return: {\"time\": time, \"msg\": msg}\n \"\"\"\n collection = self.channel\n records = collection.find().limit(1).sort('time', -1)\n msgs = self.format_msgs(records)\n\n return msgs\n\n @staticmethod\n def format_msgs(msgs):\n result = []\n for r in list(msgs):\n r['time'] = util.time_to_str(r['time'])\n r[\"_id\"] = str(r[\"_id\"])\n result.append(r)\n return result\n","sub_path":"chaos_chat/syn_db/msg_syn_impl.py","file_name":"msg_syn_impl.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"592306603","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nРедактор Spyder\r\n\r\n@author: Дмитрий Мелкозеров\r\n\"\"\"\r\n\r\n# v Подключаемые пакеты v\r\n# ===========================================================================\r\nimport os\r\nimport importlib\r\nimport math as m\r\nimport time\r\nimport random as r\r\nimport numpy as np\r\nimport treecode.tree_code as tc\r\n# import threading\r\nfrom joblib import Parallel, delayed\r\n# import statistics as stat\r\n# import matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n# from matplotlib import animation\r\n# ===========================================================================\r\n# ^ Подключаемые пакеты ^\r\n# v Используемые функции v\r\n# ===========================================================================\r\n\r\n\r\ndef parameters_test(h, p, l):\r\n # Подфункция, позволяющая сгенерировать определенные\r\n # параметры для тела\r\n x = Distance * (indent_i + h * period) / i_test\r\n y = Distance * (indent_j + p * period) / j_test\r\n z = Distance * (indent_k + l * period) / k_test\r\n # Распределение скоростей и масс считаем нормальным\r\n Vx = r.normalvariate(0, 4) * v_avg\r\n Vy = r.normalvariate(0, 4) * v_avg\r\n Vz = r.normalvariate(0, 4) * v_avg\r\n mass = abs(m_avg)\r\n Sum = np.array([x, y, z, Vx, Vy, Vz, mass, 0, 0, 0, 0, 0, 0, 0])\r\n return Sum\r\n\r\n\r\ndef randomize_parameters():\r\n # Подфункция, позволяющая сгенерировать случайные параметры для тела\r\n x = r.random() * n * Distance\r\n y = r.random() * n * Distance\r\n z = r.random() * n * Distance\r\n# Распределение скоростей и масс считаем нормальным\r\n# (пока что квадратичное отклонение выбрано наугад)\r\n Vx = r.normalvariate(0, 4) * v_avg\r\n Vy = r.normalvariate(0, 4) * v_avg\r\n Vz = r.normalvariate(0, 4) * v_avg\r\n mass = abs(r.normalvariate(m_avg, 0.5*m_avg))\r\n Sum = np.array([x, y, z, Vx, Vy, Vz, mass, 0, 0, 0, 0, 0, 0, 0])\r\n return Sum\r\n\r\n\r\ndef randomize_ellipsoid():\r\n # Подфункция, позволяющая сгенерировать случайные параметры для тела\r\n x_r = 0\r\n y_r = 0\r\n z_r = 0\r\n particle_not_generated = True\r\n while particle_not_generated:\r\n x_r = r.random()\r\n y_r = r.random()\r\n z_r = r.random()\r\n x_el = (2 * x_r - 1) / a_inp\r\n y_el = (2 * y_r - 1) / b_inp\r\n z_el = (2 * z_r - 1) / c_inp\r\n ellipsoid = x_el * x_el + y_el * y_el + z_el * z_el\r\n if ellipsoid <= 1:\r\n particle_not_generated = False\r\n center = n * Distance / 2\r\n x = (x_r + 0.5) * center\r\n y = (y_r + 0.5) * center\r\n z = (z_r + 0.5) * center\r\n d_x = x - center\r\n d_y = y - center\r\n d_z = z - center\r\n# Распределение скоростей и масс считаем нормальным\r\n# (пока что квадратичное отклонение выбрано наугад)\r\n Vx = r.normalvariate(0, 3) * v_avg + w_y * d_z - w_z * d_y\r\n Vy = r.normalvariate(0, 3) * v_avg + w_z * d_x - w_x * d_z\r\n Vz = r.normalvariate(0, 3) * v_avg + w_x * d_y - w_y * d_x\r\n mass = abs(r.normalvariate(m_avg, 0.5*m_avg))\r\n Sum = np.array([x, y, z, Vx, Vy, Vz, mass, 0, 0, 0, 0, 0, 0, 0])\r\n return Sum\r\n\r\n\r\ndef birth_test():\r\n # Функция, создающая i*j*k тел\r\n # Сначала создаем массив нулей, а затем заполняем его;\r\n # тела находятся по первому индексу, параметры - по второму\r\n test_particles = np.zeros((i_test * j_test * k_test, 14))\r\n Num = 0\r\n for l in range(k_test):\r\n for p in range(j_test):\r\n for h in range(i_test):\r\n test_particles[Num] = parameters_test(h, p, l)\r\n Num += 1\r\n return test_particles\r\n\r\n\r\ndef birth_random(body_count):\r\n # Функция, создающая \"body_count\" тел\r\n # Сначала создаем массив нулей, а затем заполняем его;\r\n # тела находятся по первому индексу, параметры - по второму\r\n random_particles = np.zeros((body_count, 14))\r\n for l in range(body_count):\r\n random_particles[l] = randomize_parameters()\r\n return random_particles\r\n\r\n\r\ndef birth_ellipsoid(body_count):\r\n # Функция, создающая \"body_count\" тел\r\n # Сначала создаем массив нулей, а затем заполняем его;\r\n # тела находятся по первому индексу, параметры - по второму\r\n random_particles = np.zeros([body_count, 14])\r\n for l in range(body_count):\r\n random_particles[l] = randomize_ellipsoid()\r\n return random_particles\r\n\r\n\r\ndef distribution(X0, X_size):\r\n # Распределение X_size частиц по ячейкам со стороной Distance\r\n # с последующей сортировкой по номерам ячеек (3.04.18)\r\n for N_local in range(X_size):\r\n n_x = int(m.floor(X0[N_local, 0] / Distance))\r\n n_y = int(m.floor(X0[N_local, 1] / Distance))\r\n n_z = int(m.floor(X0[N_local, 2] / Distance))\r\n if (n_x > n) or (n_y > n) or (n_z > n) or \\\r\n (n_x < 0) or (n_y < 0) or (n_z < 0):\r\n X0[N_local, 11] = -1\r\n else:\r\n X0[N_local, 11] = n_x * n * n + n_y * n + n_z\r\n return X0[X0[:, 11].argsort(kind='mergesort')]\r\n\r\n\r\ndef particles_to_cell(Y, Y_size, order_n, n_max):\r\n # Функция, определяющая параметры самых малых ячеек из параметров\r\n # находящихся внутри частиц (13.04.18)\r\n R_local = np.zeros([n_max, 23])\r\n part_num = 0\r\n part_count = 0\r\n L_2 = 3 * Distance * Distance\r\n while Y[part_num, 11] < 0:\r\n part_num += 1\r\n if part_num == (np.size(Y, 0)):\r\n break\r\n for cell_num in range(n_max):\r\n R = np.zeros([12])\r\n if not part_num == Y_size:\r\n while Y[part_num, 11] == cell_num:\r\n R[0:3] += Y[part_num, 0:3] * Y[part_num, 6]\r\n R[3] += Y[part_num, 6]\r\n part_num += 1\r\n if part_num == Y_size:\r\n break\r\n R[4] = part_count\r\n R[5] = part_num\r\n part_count = part_num\r\n d_xy = 0\r\n d_xz = 0\r\n d_yz = 0\r\n if not R[3] == 0:\r\n # Расчет положения центра масс ячейки\r\n R[0:3] = R[0:3] / R[3]\r\n # Расчет положения геометрического центра ячейки\r\n cell_x = cell_num // (n * n)\r\n R[6] = Distance * (0.5 + cell_x)\r\n R[7] = Distance * (0.5 + ((cell_num // n) - cell_x * n))\r\n R[8] = Distance * (0.5 + (cell_num % n))\r\n # Расчет квадрупольного момента для выбранной ячейки\r\n for s in range(int(R[4]), int(R[5])):\r\n R[9] += Y[s, 6] * (Y[s, 0] - R[0]) * (Y[s, 1] - R[1])\r\n R[10] += Y[s, 6] * (Y[s, 0] - R[0]) * (Y[s, 2] - R[2])\r\n R[11] += Y[s, 6] * (Y[s, 1] - R[1]) * (Y[s, 2] - R[2])\r\n d_xy += Y[s, 6] * Y[s, 0] * Y[s, 1]\r\n d_xz += Y[s, 6] * Y[s, 0] * Y[s, 2]\r\n d_yz += Y[s, 6] * Y[s, 1] * Y[s, 2]\r\n R[9:12] *= 3\r\n # Итоговый вид строки с параметрами ячейки\r\n R_local[cell_num] = [R[0], R[1], R[2], R[6], R[7], R[8],\r\n R[3], R[9], R[10], R[11], L_2, order_n,\r\n R[4], R[5], 0, 0, 0, 0, 0, 0,\r\n d_xy, d_xz, d_yz]\r\n return R_local\r\n\r\n\r\ndef cells_to_cell(R_final, order_n, n_max):\r\n # Функция, вычисляющая параметры ячеек за счет\r\n # находящихся внутри ячеек с меньшим порядком (13.04.18)\r\n cell_length = Distance * (n / order_n)\r\n n_linear = order_n * 2\r\n n_total = int(m.pow(order_n, 3))\r\n R_local = np.zeros([n_total, 23])\r\n L_2 = 3 * Distance * Distance * n * n / (order_n * order_n)\r\n for cell_num in range(n_total):\r\n R = np.zeros([10])\r\n cell_x = cell_num // (order_n * order_n)\r\n cell_y = (cell_num // order_n) - cell_x * order_n\r\n cell_z = cell_num % order_n\r\n cell_num_0 = 2 * int(cell_x * n_linear * n_linear\r\n + cell_y * n_linear + cell_z)\r\n Numbers = [cell_num_0, cell_num_0 + 1,\r\n cell_num_0 + int(n_linear),\r\n cell_num_0 + int(n_linear) + 1,\r\n cell_num_0 + int(n_linear * n_linear),\r\n cell_num_0 + int(n_linear * n_linear) + 1,\r\n cell_num_0 + int(n_linear * n_linear + n_linear),\r\n cell_num_0 + int(n_linear * n_linear + n_linear) + 1]\r\n d_xy = 0\r\n d_xz = 0\r\n d_yz = 0\r\n# D_xy = 0\r\n# D_xz = 0\r\n# D_yz = 0\r\n for u in range(8):\r\n # Определяем параметры центра масс\r\n R[0:3] += R_final[Numbers[u], 0:3] \\\r\n * R_final[Numbers[u], 6]\r\n R[3] += R_final[Numbers[u], 6]\r\n # Определяем доп. параметры, связанные с квадрупольным вкладом\r\n# D_xy += R_final[Numbers[u], 6] \\\r\n# * R_final[Numbers[u], 0] * R_final[Numbers[u], 1]\r\n# D_xz += R_final[Numbers[u], 6] \\\r\n# * R_final[Numbers[u], 0] * R_final[Numbers[u], 2]\r\n# D_yz += R_final[Numbers[u], 6] \\\r\n# * R_final[Numbers[u], 1] * R_final[Numbers[u], 2]\r\n# d_xy += R_final[Numbers[u], 20]\r\n# d_xz += R_final[Numbers[u], 21]\r\n# d_yz += R_final[Numbers[u], 22]\r\n if not R[3] == 0:\r\n # Расчет положения ЦМ и геометрического центра ячейки\r\n R[0:3] = R[0:3] / R[3]\r\n R[4] = cell_length * (0.5 + cell_x)\r\n R[5] = cell_length * (0.5 + cell_y)\r\n R[6] = cell_length * (0.5 + cell_z)\r\n # Расчет квадрупольного момента для выбранной ячейки\r\n# for s in range(8):\r\n# if not R_final[Numbers[s], 6] == 0:\r\n# R[7] += R_final[Numbers[s], 6] \\\r\n# * (R_final[Numbers[s], 0] - R[0]) \\\r\n# * (R_final[Numbers[s], 1] - R[1])\r\n# R[8] += R_final[Numbers[s], 6] \\\r\n# * (R_final[Numbers[s], 0] - R[0]) \\\r\n# * (R_final[Numbers[s], 2] - R[2])\r\n# R[9] += R_final[Numbers[s], 6] \\\r\n# * (R_final[Numbers[s], 1] - R[1]) \\\r\n# * (R_final[Numbers[s], 2] - R[2])\r\n# if (R[7] == 0) and (R[8] == 0) and (R[9] == 0):\r\n# R[7] = R_final[Numbers[:], 7].sum()\r\n# R[8] = R_final[Numbers[:], 8].sum()\r\n# R[9] = R_final[Numbers[:], 9].sum()\r\n# else:\r\n# R[7] += d_xy - D_xy\r\n# R[8] += d_xz - D_xz\r\n# R[9] += d_yz - D_yz\r\n# R[7:10] *= 3\r\n# Итоговый вид строки с параметрами ячейки\r\n R_local[cell_num] = [R[0], R[1], R[2], R[4], R[5], R[6], R[3],\r\n R[7], R[8], R[9], L_2, order_n,\r\n Numbers[0], Numbers[1], Numbers[2], Numbers[3],\r\n Numbers[4], Numbers[5], Numbers[6], Numbers[7],\r\n d_xy, d_xz, d_yz]\r\n# Корректируем номера \"дочерних\" ячеек\r\n R_local[:, 12:20] += n_total\r\n R_final[0:(-n_max), 12:20] += n_total\r\n return np.vstack((R_local, R_final))\r\n\r\n\r\ndef tree_root(Particles, Mass_center):\r\n # Функция, с которой начинается tree code\r\n if use_multiprocessing:\r\n A0 = Parallel(n_jobs=workers, verbose=0)(\r\n delayed(tc.begin_tree)(Particles, Mass_center, i,\r\n n, eps_smooth)\r\n for i in range(1, 9))\r\n A = A0[0] + A0[1] + A0[2] + A0[3] + A0[4] + A0[5] + A0[6] + A0[7]\r\n else:\r\n A = np.zeros([np.size(Particles, 0), 4])\r\n if not Mass_center[1, 6] == 0:\r\n A += tc.begin_tree(Particles, Mass_center, 1, n, eps_smooth)\r\n if not Mass_center[2, 6] == 0:\r\n A += tc.begin_tree(Particles, Mass_center, 2, n, eps_smooth)\r\n if not Mass_center[3, 6] == 0:\r\n A += tc.begin_tree(Particles, Mass_center, 3, n, eps_smooth)\r\n if not Mass_center[4, 6] == 0:\r\n A += tc.begin_tree(Particles, Mass_center, 4, n, eps_smooth)\r\n if not Mass_center[5, 6] == 0:\r\n A += tc.begin_tree(Particles, Mass_center, 5, n, eps_smooth)\r\n if not Mass_center[6, 6] == 0:\r\n A += tc.begin_tree(Particles, Mass_center, 6, n, eps_smooth)\r\n if not Mass_center[7, 6] == 0:\r\n A += tc.begin_tree(Particles, Mass_center, 7, n, eps_smooth)\r\n if not Mass_center[8, 6] == 0:\r\n A += tc.begin_tree(Particles, Mass_center, 8, n, eps_smooth)\r\n return A\r\n\r\n\r\ndef tree_code_gravity(Y):\r\n # Функция, позволяющая получить новые параметры частиц\r\n # из матрицы Y с помощью метода Tree code (13.04.18)\r\n order_n = n\r\n Y_size = np.size(Y, 0)\r\n Y[:, 3:6] += Y[:, 7:10] * time_step / 2\r\n Y[:, 0:3] += Y[:, 3:6] * time_step\r\n Y = distribution(Y, Y_size)\r\n n_max = int(n * n * n)\r\n R_final = particles_to_cell(Y, Y_size, order_n, n_max)\r\n while order_n > 1:\r\n order_n *= 0.5\r\n R_final = cells_to_cell(R_final, order_n, n_max)\r\n Y[:, 7:11] = tree_root(Y, R_final)\r\n if Y[0, 11] < 0:\r\n Y = tc.N_body_direct(Y, eps_smooth)\r\n Y[:, 7:11] *= G\r\n Y[:, 3:6] += Y[:, 7:10] * time_step / 2\r\n return Y\r\n\r\n\r\ndef momentum_of_system(Y):\r\n # Функция, определяющая импульс всей системы и выводящая его в строку\r\n P = np.zeros([np.size(Y, 0), 3])\r\n P[:, 0] = np.multiply(Y[:, 3], Y[:, 6])\r\n P[:, 1] = np.multiply(Y[:, 4], Y[:, 6])\r\n P[:, 2] = np.multiply(Y[:, 5], Y[:, 6])\r\n print('Полный импульс системы ', P.sum(axis=0))\r\n\r\n\r\ndef momentum_of_particles(Y):\r\n # Функция, определяющая импульс всех материальных точек\r\n P = np.zeros([np.size(Y, 0), 3])\r\n P[:, 0] = np.multiply(Y[:, 3], Y[:, 6])\r\n P[:, 1] = np.multiply(Y[:, 4], Y[:, 6])\r\n P[:, 2] = np.multiply(Y[:, 5], Y[:, 6])\r\n if np.size(Y, 0) > 10:\r\n print('Импульсы всех материальных точек сохранены в файл')\r\n np.savetxt('Импульсы материальных точек.txt', P)\r\n else:\r\n print(P)\r\n\r\n\r\ndef kinetic_energy_Newton(Y):\r\n # Функция, определяющая кинетическую энергию каждой частицы\r\n V = np.multiply(Y[:, 3:6], Y[:, 3:6])\r\n E = V.sum(axis=1)\r\n E = np.multiply(E[:], Y[:, 6])\r\n E /= 2\r\n return E\r\n\r\n\r\ndef max_dT(Y):\r\n # Функция, определяющая максимальную разницу\r\n # кинетической энергии частиц за шаг\r\n E = kinetic_energy_Newton(Y)\r\n E = E - Y[:, 12]\r\n dE_plus = np.amax(E)\r\n dE_minus = np.amin(E)\r\n if abs(dE_minus) > dE_plus:\r\n dE = dE_minus\r\n else:\r\n dE = dE_plus\r\n return dE\r\n\r\n\r\ndef max_dU(Y):\r\n # Функция, определяющая максимальную разницу\r\n # потенциальной энергии частиц за шаг\r\n E = potential_energy_Newton(Y)\r\n E = E - Y[:, 13]\r\n dE_plus = np.amax(E)\r\n dE_minus = np.amin(E)\r\n if abs(dE_minus) > dE_plus:\r\n dE = dE_minus\r\n else:\r\n dE = dE_plus\r\n return dE\r\n\r\n\r\ndef plot_max_dE_kinetic(dE):\r\n # Функция, создающая график максимальной разницы\r\n # кинетической энергии частиц за все время работы программы\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n ax.plot(dE[1:, 0], dE[1:, 4])\r\n ax.set_xlabel('Номер шага')\r\n ax.set_ylabel('Kinetic energy')\r\n ax.set_title('Max kinetic energy difference per step')\r\n plt.savefig('Максимальное изменение кинетической энергии за шаг', dpi=640)\r\n plt.show()\r\n\r\n\r\ndef plot_max_dE_potential(dE):\r\n # Функция, создающая график максимальной разницы\r\n # потенциальной энергии частиц за все время работы программы\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n ax.plot(dE[1:, 0], dE[1:, 5])\r\n ax.set_xlabel('Номер шага')\r\n ax.set_ylabel('Potential energy')\r\n ax.set_title('Max potential energy difference per step')\r\n plt.savefig('Максимальное изменение потенциальной энергии за шаг', dpi=640)\r\n plt.show()\r\n\r\n\r\ndef potential_energy_Newton(Y):\r\n # Функция, определяющая кинетическую энергию каждой частицы\r\n E = np.multiply(Y[:, 10], Y[:, 6])\r\n return E\r\n\r\n\r\ndef system_kinetic_energy(Y):\r\n # Функция, определяющая полную энергию системы\r\n E = kinetic_energy_Newton(Y)\r\n E = E.sum(axis=0)\r\n return E\r\n\r\n\r\ndef system_potential_energy(Y):\r\n E = potential_energy_Newton(Y)\r\n E = E.sum(axis=0)\r\n return E\r\n\r\n\r\ndef system_energy_Newton(Y):\r\n # Функция, определяющая полную энергию системы\r\n E = system_kinetic_energy(Y)\r\n E = E + system_potential_energy(Y)\r\n return E\r\n\r\n\r\ndef plot_avg(E):\r\n # Функция, создающая график кинетической энергии частиц\r\n # за все время работы программы\r\n Energy = np.copy(E[:, 1:3])\r\n Energy /= N\r\n fig = plt.figure()\r\n ax = fig.add_subplot(211)\r\n ax1 = fig.add_subplot(212)\r\n ax.plot(E[:, 0], Energy[:, 0])\r\n ax1.plot(E[:, 0], Energy[:, 1])\r\n ax.xaxis.set_ticklabels([])\r\n ax1.set_xlabel('Номер шага')\r\n ax.set_ylabel('Kinetic enegry')\r\n ax1.set_ylabel('Potential energy')\r\n ax.set_title('Average energy')\r\n ax1.set_title(' ')\r\n plt.savefig('Средняя энергия материальной точки', dpi=640)\r\n plt.show()\r\n\r\n\r\ndef plot_system_enegry(E):\r\n # Функция, создающая график потенциальной энергии частиц\r\n # за все время работы программы\r\n fig = plt.figure()\r\n ax = fig.add_subplot(211)\r\n ax1 = fig.add_subplot(212)\r\n ax.plot(E[:, 0], E[:, 1])\r\n ax1.plot(E[:, 0], Energy[:, 2])\r\n ax.xaxis.set_ticklabels([])\r\n ax1.set_xlabel('Номер шага')\r\n ax.set_ylabel('Kinetic enegry')\r\n ax1.set_ylabel('Potential energy')\r\n ax.set_title('Energy at step')\r\n plt.savefig('Кинетическая и потенциальная энергия системы', dpi=640)\r\n plt.show()\r\n\r\n\r\ndef plot_total_energy(E):\r\n # Функция, создающая график потенциальной энергии частиц\r\n # за все время работы программы\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n ax.plot(E[:, 0], E[:, 3])\r\n ax.set_xlabel('Номер шага')\r\n ax.set_ylabel('Энергия')\r\n ax.set_title('Полная энергия системы')\r\n plt.savefig('Полная энергия системы', dpi=640)\r\n plt.show()\r\n\r\n\r\ndef plot_combined_energy(E):\r\n # Функция, создающая график потенциальной энергии частиц\r\n # за все время работы программы\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n ax.plot(E[:, 0], E[:, 3], label='Полная энергия', color='black')\r\n ax.plot(E[:, 0], E[:, 1], label='Кинетическая энергия', color='red')\r\n ax.plot(E[:, 0], E[:, 2], label='Потенциальная энергия', color='blue')\r\n ax.set_xlabel('Номер шага')\r\n ax.set_ylabel('Энергия')\r\n ax.set_title('Полная энергия системы')\r\n plt.legend()\r\n plt.savefig('Кинетическая, потенциальная, полная энергия системы', dpi=640)\r\n plt.show()\r\n\r\n\r\ndef is_gravity_field_weak(Y):\r\n # Функция, выдающая ошибку, если гравитационное поле становится\r\n # слишком сильным для применения используемой модели\r\n global error\r\n global error_name\r\n Array_phi = abs(Y[:, 10] / c_2)\r\n Array_phi = Array_phi >= 0.05\r\n if Array_phi.any():\r\n error = True\r\n error_name = 'Strong gravity field error'\r\n\r\n\r\ndef speed_limit(Y):\r\n # Функция, выдающая ошибку если скорость материальной\r\n # точки станет больше скорости света\r\n global error\r\n global error_name\r\n V = np.zeros([np.size(Y, 0), 3])\r\n V = np.multiply(Y[:, 3:6], Y[:, 3:6])\r\n V_2 = V.sum(axis=1) >= c_2\r\n if V_2.any():\r\n error = True\r\n error_name = 'FTL error'\r\n\r\n\r\ndef screenshot(System_parameters, name, point_size):\r\n # Функция для \"скирншота\" положения всех частиц\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n x = System_parameters[:, 0]\r\n y = System_parameters[:, 1]\r\n z = System_parameters[:, 2]\r\n ax.scatter(x, y, z, color='red', s=point_size)\r\n ax.autoscale(False)\r\n ax.set_xlabel('x, кпк')\r\n ax.set_ylabel('y, кпк')\r\n ax.set_zlabel('z, кпк')\r\n plt.savefig(name, dpi=1280)\r\n# plt.show()\r\n\r\n\r\ndef input_int_value(msg_0, msg_1, msg_2):\r\n print(msg_0)\r\n continue_input = True\r\n while continue_input:\r\n try:\r\n variable = int(input())\r\n if variable > 0:\r\n continue_input = False\r\n except ValueError:\r\n print(msg_1)\r\n print(msg_2)\r\n return variable\r\n\r\n\r\ndef input_float_value(msg_0, msg_00, msg_000, msg_1, msg_2):\r\n print(msg_0)\r\n print(msg_00)\r\n print(msg_000)\r\n continue_input = True\r\n while continue_input:\r\n try:\r\n variable = float(input())\r\n if variable >= 0:\r\n continue_input = False\r\n else:\r\n print('Введено некорректное значение. Попробуйте еще раз')\r\n except ValueError:\r\n print(msg_1)\r\n print(msg_2)\r\n return variable\r\n\r\n\r\ndef input_float_less_1_value(msg_0, msg_00, msg_1, crit):\r\n print(msg_0)\r\n print(msg_00 + str(crit))\r\n continue_input = True\r\n while continue_input:\r\n try:\r\n variable = float(input())\r\n if (variable >= -1) and (variable <= 1):\r\n continue_input = False\r\n else:\r\n print('Введено некорректное значение. Попробуйте еще раз')\r\n except ValueError:\r\n print(msg_1)\r\n return variable\r\n\r\n# ===========================================================================\r\n# ^ Используемые функции ^\r\n\r\n\r\nif __name__ == \"__main__\":\r\n importlib.reload(tc)\r\n # v Константы v\r\n # =======================================================================\r\n # Гравитационная постоянная\r\n # G = 6.67408313 * m.pow(10, -11) # м^3/(кг*с^2)\r\n G = 4.51811511 * m.pow(10, -15) # кпк^3/(М_(Солнца)* (10^12 с)^2)\r\n # G = 4.51811511 * m.pow(10, -7) # кпк^3/(М_(Млечного пути)* (10^15 с)^2)\r\n# Скорость света\r\n # c = 299792458 # м/с\r\n c = 9.7156188999 # кпк/(10^12 с)\r\n# ===========================================================================\r\n# ^ Константы ^\r\n# v Параметры системы v\r\n# ===========================================================================\r\n# Прочие переменные (желательно не трогать)\r\n marker_size = 0.2 # 1\r\n c_2 = c * c\r\n error = False\r\n error_name = ''\r\n not_forbid_launch = True\r\n continue_input = True\r\n interrupted = False\r\n workers = os.cpu_count()\r\n msg_N_0 = 'Введите число материальных точек'\r\n msg_N_1 = 'Число материальных точек всегда должно быть целым'\r\n msg_N_2 = 'Введите число материальных точек еще раз'\r\n msg_n_0 = 'Введите количество ячеек в формате 2^n (нужно задать n)'\r\n msg_n_1 = 'Число ячеек всегда должно быть целым'\r\n msg_n_2 = 'Введите число ячеек еще раз'\r\n msg_steps_0 = 'Введите число временных шагов'\r\n msg_steps_1 = 'Введено недопустимое число шагов'\r\n msg_steps_2 = 'Введите число шагов еще раз'\r\n msg_m_0 = 'Введите среднюю массу материальных точкек в массах галактик'\r\n msg_m_00 = '(Масса галактики имеет порядок 10^41 кг)'\r\n msg_m_1 = 'Cредняя масса материальной точки должна быть числом'\r\n msg_m_2 = 'Введите среднюю массу еще раз'\r\n msg_v_0 = 'Введите среднюю скорость материальных точкек в кпк/(10^12 с)'\r\n msg_v_00 = '(1 кпк/(10^12 с) = 3,08567758*10^7 м/с)'\r\n msg_v_000 = 'ВАЖНО ПОМНИТЬ! c = 9.7156188999 кпк/(10^12 с)'\r\n msg_v_1 = 'Cредняя скорость материальной точки должна быть числом'\r\n msg_v_2 = 'Введите среднюю скорость материальных точек еще раз'\r\n msg_d_0 = 'Введите размер ячейки в кпк'\r\n msg_d_1 = 'Размер ячейки должен быть в виде числа'\r\n msg_d_2 = 'Введите размер ячейки еще раз'\r\n msg_t_0 = 'Введите временной шаг в единицах (10^12 с)'\r\n msg_t_1 = 'Временной шаг должен быть в виде числа'\r\n msg_t_2 = 'Введите временной шаг еще раз'\r\n msg_ind_0 = 'Введите отступ от границы рассматриваемой'\r\n msg_ind_i_0 = 'области по оси X в кпк'\r\n msg_ind_j_0 = 'области по оси Y в кпк'\r\n msg_ind_k_0 = 'области по оси Z в кпк'\r\n msg_ind_1 = 'Отступ должен быть в виде числа'\r\n msg_ind_2 = 'Вве��ите отступ еще раз'\r\n msg_i_0 = 'Введите число материальных точек по оси X'\r\n msg_j_0 = 'Введите число материальных точек по оси Y'\r\n msg_k_0 = 'Введите число материальных точек по оси Z'\r\n msg_axis_1 = 'Число материальных точек всегда должно быть целым'\r\n msg_axis_2 = 'Введите число материальных точек еще раз'\r\n msg_per_0 = 'Введите расстояние между двумя соседними точками,'\r\n msg_per_00 = 'расположенных на одной оси в единицах длины ячейки'\r\n msg_per_1 = 'Расстояние должно быть в виде числа'\r\n msg_per_2 = 'Введите расстояние ячейки еще раз'\r\n msg_a_0 = 'Введите величину полуоси эллипсоида по оси X'\r\n msg_b_0 = 'Введите величину полуоси эллипсоида по оси Y'\r\n msg_c_0 = 'Введите величину полуоси эллипсоида по оси Z'\r\n msg_abc_0 = 'от 0 до 1. Где 1 соответствует четверти размера системы'\r\n msg_abc_1 = 'Длина полуоси должна быть числом'\r\n msg_w_0 = 'Введите начальную угловую скорость в размерности рад/(10^12 с)'\r\n msg_wx_0 = 'в плоскости YZ. Величина не должна превышать '\r\n msg_wy_0 = 'в плоскости XZ. Величина не должна превышать '\r\n msg_wz_0 = 'в плоскости XY. Величина не должна превышать '\r\n msg_w_1 = 'Угловая скорость должна быть числом'\r\n msg_eps_0 = 'Введите смягчающую длину потенциала в кпк'\r\n msg_eps_1 = 'Смягчающая длина должна быть числом'\r\n\r\n# Временной интервал\r\n # time_step = pow(10, 13) # с\r\n time_step = 100.0 # 0.000025 # 10^12 с\r\n # time_step = 0.01 # 10^15 с\r\n\r\n# Процентное распределение материи по типу\r\n d_e = 0.70 # Темная энергия\r\n d_m = 0.25 # Темная материя\r\n v_m = 0.05 # Видимая материя\r\n\r\n# Параметр \"сглаживания\" гравитационного взаимодействия на близких дистанциях\r\n eps_smooth = 5.0 # кпк\r\n\r\n# Параметры, которые нужны чаще всего (можно и нужно трогать)\r\n# Количество ячеек по одной оси координат (для tree codes) в виде 2^(n)\r\n n = 4\r\n\r\n# Минимальный размер ячейки по одной оси координат\r\n # Distance = 2 * 3.08567758 * pow(10, 22) # м\r\n Distance = 10 * m.pow(10, 3) # кпк\r\n # Distance = 5 # Мпк\r\n\r\n# Задаем первоначальный размер системы в единицах \"Distance\"\r\n# для функции parameters_test\r\n i_test = 10\r\n j_test = 10\r\n k_test = 10\r\n indent_i = 0.0\r\n indent_j = 0.0\r\n indent_k = 0.0\r\n\r\n# Параметры генерации эллипсоида в единицах (n * Distance / 2)\r\n a_inp = 1.0\r\n b_inp = 1.0\r\n c_inp = 1.0\r\n# Начальные угловые скорости эллипсоида\r\n w_x = 0.0\r\n w_y = 0.0\r\n w_z = 0.0000005\r\n\r\n# Средняя масса наблюдаемых объектов и их пекулярная скорость\r\n # m_avg = 1.98892 * pow(10, 41) # кг\r\n # v_avg = 0 #4 * pow(10, 5) / np.sqrt(3) # м/с\r\n m_avg = pow(10, 11) # масс Солнц\r\n v_avg = 0.0 # 1.3 * pow(10, -2) / np.sqrt(3) # кпк/(10^12 c)\r\n# m_avg = 1 #масс Млечного пути\r\n # v_avg = 0 #1.3 * pow(10, -2) / np.sqrt(3) # Мпк/(10^15 c)\r\n\r\n# Количество частиц\r\n N = 1000\r\n# Число шагов\r\n Steps = 1\r\n# Номера шагов, на которых требуется \"сфотографировать положение всех\r\n# материальных точек\r\n make_prelaunch_screenshot = False\r\n scr_step = []\r\n# Тип сгенерированной системы (обязательно заполнить!)\r\n system_generation_type = 'last'\r\n# Использовать несколько процессов для вычислений\r\n use_multiprocessing = False\r\n# Использовать данные, введенные вручную\r\n use_manual_input = False\r\n# Использовать телеметрию\r\n use_telemetry = True\r\n# Обратить время вспять\r\n inverse_time = False\r\n# ===========================================================================\r\n# ^ Параметры системы ^\r\n# v Область с исполняемым кодом v\r\n# ===========================================================================\r\n if use_manual_input:\r\n print('Введите название используемой конфигурации системы')\r\n system_generation_type = str(input())\r\n Distance = input_float_value(msg_d_0, '', '', msg_d_1, msg_d_2)\r\n n = input_int_value(msg_n_0, msg_n_1, msg_n_2)\r\n time_step = input_float_value(msg_t_0, '', '', msg_t_1, msg_t_2)\r\n Steps = input_int_value(msg_steps_0, msg_steps_1, msg_steps_2)\r\n eps_smooth = input_float_value(msg_eps_0, '', '', msg_eps_1, '')\r\n if (system_generation_type == 'random') or \\\r\n (system_generation_type == 'cube') or\\\r\n (system_generation_type == 'ellipsoid'):\r\n m_avg = input_float_value(msg_m_0, msg_m_00, '', msg_m_1, msg_m_2)\r\n m_avg *= m.pow(10, 11)\r\n v_avg = input_float_value(msg_v_0, msg_v_00, msg_v_000,\r\n msg_v_1, msg_v_2)\r\n if system_generation_type == 'random':\r\n N = input_int_value(msg_N_0, msg_N_1, msg_N_2)\r\n if system_generation_type == 'ellipsoid':\r\n N = input_int_value(msg_N_0, msg_N_1, msg_N_2)\r\n w_crit = 2 * c / (n * Distance)\r\n a_inp = input_float_less_1_value(msg_a_0, msg_abc_0,\r\n msg_abc_1, '')\r\n b_inp = input_float_less_1_value(msg_b_0, msg_abc_0,\r\n msg_abc_1, '')\r\n c_inp = input_float_less_1_value(msg_c_0, msg_abc_0,\r\n msg_abc_1, '')\r\n w_x = input_float_less_1_value(msg_w_0, msg_wx_0,\r\n msg_w_1, w_crit)\r\n w_y = input_float_less_1_value(msg_w_0, msg_wy_0,\r\n msg_w_1, w_crit)\r\n w_z = input_float_less_1_value(msg_w_0, msg_wz_0,\r\n msg_w_1, w_crit)\r\n print('Делать скриншоты системы?')\r\n print('y/n')\r\n input_variable = input()\r\n if (input_variable == 'y') or (input_variable == 'n'):\r\n if input_variable == 'y':\r\n make_prelaunch_screenshot = True\r\n enable_screenshots = True\r\n print('Наберите номера шагов, на которых нужно')\r\n print('сделать снимок системы')\r\n print('После того, как все нужные номера введены,')\r\n print('наберите \"end\" без кавычек, чтобы продолжить')\r\n while enable_screenshots:\r\n input_var = input()\r\n if input_var == 'end':\r\n enable_screenshots = False\r\n else:\r\n try:\r\n temp_var = int(input_var)\r\n scr_step.append(temp_var)\r\n except ValueError:\r\n print('Номер шага может быть только целым числом')\r\n else:\r\n make_prelaunch_screenshot = False\r\n scr_step = []\r\n else:\r\n print('Введено недопустимое значение')\r\n print('Создание скриншотов отменено')\r\n make_prelaunch_screenshot = False\r\n print('Использовать телеметрию?')\r\n print('y/n')\r\n input_variable = input()\r\n if (input_variable == 'y') or (input_variable == 'n'):\r\n use_telemetry = input_variable == 'y'\r\n else:\r\n print('Введено недопустимое значение')\r\n print('Телеметрия не используется')\r\n use_telemetry = False\r\n print('Использовать многоядерность?')\r\n print('y/n')\r\n input_variable = input()\r\n if (input_variable == 'y') or (input_variable == 'n'):\r\n use_multiprocessing = input_variable == 'y'\r\n else:\r\n print('Введено недопустимое значение')\r\n print('Многоядерность не используется')\r\n print('Изменить знак у временного интервала?')\r\n print('y/n')\r\n input_variable = input()\r\n if (input_variable == 'y') or (input_variable == 'n'):\r\n inverse_time = input_variable == 'y'\r\n else:\r\n print('Введено недопустимое значение')\r\n inverse_time = False\r\n if (d_e >= 0) and (d_m >= 0) and (v_m > 0) \\\r\n and (abs(1 - d_e - d_m - v_m) < 0.00000000001):\r\n m_avg = m_avg * (1 + (d_m / v_m))\r\n else:\r\n not_forbid_launch = False\r\n print('Недопустимое соотношение типов материи')\r\n if (time_step <= 0) or (Distance <= 0):\r\n not_forbid_launch = False\r\n print('Недопустимые параметры системы')\r\n if n > 0:\r\n n = int(m.pow(2, int(n)))\r\n else:\r\n not_forbid_launch = False\r\n print('Количество ячеек не может быть нулевым или отрицательным')\r\n if inverse_time:\r\n time_step *= -1\r\n try:\r\n try:\r\n try:\r\n if system_generation_type == 'cube':\r\n if use_manual_input:\r\n indent_i = input_float_value(msg_ind_0, msg_ind_i_0,\r\n '', msg_ind_1, msg_ind_2)\r\n indent_j = input_float_value(msg_ind_0, msg_ind_j_0,\r\n '', msg_ind_1, msg_ind_2)\r\n indent_k = input_float_value(msg_ind_0, msg_ind_k_0,\r\n '', msg_ind_1, msg_ind_2)\r\n i_test = input_int_value(msg_i_0, msg_axis_1,\r\n msg_axis_2)\r\n j_test = input_int_value(msg_j_0, msg_axis_1,\r\n msg_axis_2)\r\n k_test = input_int_value(msg_k_0, msg_axis_1,\r\n msg_axis_2)\r\n period = input_float_value(msg_per_0, msg_per_00, '',\r\n msg_per_1, msg_per_2)\r\n X = birth_test()\r\n np.savetxt('last config.txt', X)\r\n elif system_generation_type == 'random':\r\n X = birth_random(N)\r\n np.savetxt('last config.txt', X)\r\n elif system_generation_type == 'ellipsoid':\r\n if (a_inp == 0) or (b_inp == 0) or (c_inp == 0):\r\n not_forbid_launch = False\r\n print('Полуоси эллипсоида не могут быть нулевыми')\r\n else:\r\n X = birth_ellipsoid(N)\r\n np.savetxt('last config.txt', X)\r\n elif system_generation_type == 'last':\r\n X = np.loadtxt('last config.txt', dtype='float64')\r\n elif system_generation_type == 'debug':\r\n X = np.loadtxt('error config.txt', dtype='float64')\r\n elif system_generation_type == 'test':\r\n X = np.loadtxt('test config.txt', dtype='float64')\r\n elif system_generation_type == 'final':\r\n X = np.loadtxt('final config.txt', dtype='float64')\r\n else:\r\n not_forbid_launch = False\r\n print('Выбранная конфигурация не может быть загружена')\r\n except IOError:\r\n not_forbid_launch = False\r\n print('Отсутствует необходимый файл конфигурации')\r\n except TypeError:\r\n not_forbid_launch = False\r\n print('Число материальных точек всегда должно быть целым')\r\n except ValueError:\r\n not_forbid_launch = False\r\n print('Неприемлимое число материальных точек')\r\n if not_forbid_launch:\r\n if np.size(X, 1) == 12:\r\n migration = np.zeros([np.size(X, 0), 2])\r\n X = np.hstack((X, migration))\r\n np.savetxt('last config.txt', X)\r\n if workers >= 8:\r\n workers = 8\r\n elif workers >= 4:\r\n workers = 4\r\n elif workers >= 2:\r\n workers = 2\r\n else:\r\n use_multiprocessing = False\r\n try:\r\n if make_prelaunch_screenshot:\r\n screenshot(X, 'Шаг 0', marker_size)\r\n Energy = np.zeros([Steps, 6])\r\n start = time.time()\r\n for q in range(Steps):\r\n speed_limit(X)\r\n is_gravity_field_weak(X)\r\n if error:\r\n np.savetxt('error config.txt', X)\r\n screenshot(X, error_name, marker_size)\r\n print(error_name + ' at step ' + str(q))\r\n break\r\n X = tree_code_gravity(X)\r\n Energy[q] = [q,\r\n system_kinetic_energy(X),\r\n system_potential_energy(X),\r\n system_energy_Newton(X),\r\n max_dT(X),\r\n max_dU(X)]\r\n X[:, 12] = kinetic_energy_Newton(X)\r\n X[:, 13] = potential_energy_Newton(X)\r\n if q in scr_step:\r\n screenshot(X, 'Шаг ' + str(q), marker_size)\r\n computing_time = time.time() - start\r\n print(\"Время выполнения\", computing_time, \"с\")\r\n if use_telemetry:\r\n momentum_of_system(X)\r\n plot_max_dE_kinetic(Energy)\r\n plot_max_dE_potential(Energy)\r\n plot_avg(Energy)\r\n plot_system_enegry(Energy)\r\n plot_total_energy(Energy)\r\n plot_combined_energy(Energy)\r\n except KeyboardInterrupt:\r\n print('Работа программы прервана')\r\n momentum_of_system(X)\r\n plot_max_dE_kinetic(Energy)\r\n plot_max_dE_potential(Energy)\r\n plot_avg(Energy)\r\n plot_system_enegry(Energy)\r\n plot_total_energy(Energy)\r\n plot_combined_energy(Energy)\r\n print('Сохранить финальную конфигурацию системы?')\r\n print('y/n')\r\n input_variable = input()\r\n if input_variable == 'y':\r\n np.savetxt('final config.txt', X)\r\n elif input_variable == 'n':\r\n print('Конфигурация не будет сохранена')\r\n else:\r\n print('Введено недопустимое значение')\r\n# ===========================================================================\r\n# ^ Область с исполняемым кодом ^\r\n","sub_path":"N-body.py","file_name":"N-body.py","file_ext":"py","file_size_in_byte":44602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"98347049","text":"from django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom ckeditor.fields import RichTextField\nfrom solo.models import SingletonModel\nfrom uuslug import uuslug\n\n\nclass SiteMetaBase(models.Model):\n seo_title = models.CharField(max_length=200, verbose_name='SEO Заголовок')\n seo_description = models.TextField(max_length=300, verbose_name='SEO Описание')\n created = models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')\n updated = models.DateTimeField(auto_now=True, verbose_name='Дата изменения')\n\n class Meta:\n abstract = True\n\n\nclass Page(SiteMetaBase):\n title = models.CharField(max_length=200, verbose_name='Заголовок')\n content = RichTextField(blank=True, null=True, verbose_name='Контент')\n owner = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, related_name='pages',\n on_delete=models.CASCADE, verbose_name='Владелец')\n last_editor = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, related_name='lastEditPages',\n on_delete=models.CASCADE, verbose_name='Последний редактор')\n slug = models.SlugField(max_length=200, blank=True, null=True, verbose_name='Короткая ссылка')\n is_front = models.BooleanField(default=False, verbose_name='Главная страница')\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n if self.is_front:\n return reverse('home')\n else:\n return reverse('page', args=[str(self.slug)])\n\n class Meta:\n ordering = ['pk']\n verbose_name = \"Страница\"\n verbose_name_plural = \"Страницы\"\n\n\nclass Card(SiteMetaBase):\n title = models.CharField(max_length=200, verbose_name='Заголовок')\n text = RichTextField(blank=True, null=True, verbose_name='Текст')\n image = models.ImageField(upload_to='cards/', verbose_name='Картинка')\n pageId = models.ManyToManyField('Page', blank=True, related_name='cards', verbose_name='Страницы')\n slug = models.SlugField(max_length=200, blank=True, null=True, verbose_name='ЧПУ ссылка')\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n from django.urls import reverse\n return reverse('party-detail', args=[str(self.slug)])\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = uuslug(self.title, instance=self)\n super(Card, self).save(*args, **kwargs)\n\n class Meta:\n ordering = ['pk']\n verbose_name = 'Услуга'\n verbose_name_plural = 'Услуги'\n\n\nclass CardPhoto(models.Model):\n card = models.ForeignKey(Card, related_name='photos', on_delete=models.CASCADE)\n title = models.CharField(max_length=150, blank=True, null=True, verbose_name='Заголовок')\n file = models.ImageField(upload_to='cards/photo', verbose_name='Фото')\n\n def __str__(self):\n if self.title:\n return self.title\n else:\n return str(self.file.name)\n\n class Meta:\n verbose_name = 'Фото'\n verbose_name_plural = 'Фото'\n\n\nclass SiteConfiguration(SingletonModel):\n site_name = models.CharField(max_length=255, default='Название сайта', verbose_name='Название сайта')\n site_descritpion = models.TextField(max_length=255, default='Описание сайта', verbose_name='Описание сайта')\n address = models.CharField(max_length=255, default='Адрес', verbose_name='Адрес')\n phones = models.CharField(max_length=255, default='+7 999 999-99-99', help_text='Телефоны указывать через запятую',\n verbose_name='Телефоны')\n maintenance_mode = models.BooleanField(default=False, verbose_name='Режим обслуживания')\n\n def __str__(self):\n return \"Настройки сайта\"\n\n def phone_list(self):\n return self.phones.split(', ')\n\n class Meta:\n verbose_name = \"Настройки сайта\"\n verbose_name_plural = \"Настройки сайта\"\n\n\nclass SocialLink(models.Model):\n SOCIAL_CHOICES = (\n ('vk', 'Вконтакте'),\n ('odnoklassniki', 'Одноклассники'),\n ('instagram', 'Instagram'),\n ('youtube', 'YouTube'),\n ('facebook', 'Facebook'),\n )\n social = models.CharField(max_length=13, blank=True, null=True, choices=SOCIAL_CHOICES,\n verbose_name='Социальная сеть')\n link = models.CharField(max_length=255, blank=True, null=True, verbose_name='Ссылка на соц. сеть')\n site = models.ForeignKey(SiteConfiguration, related_name='socials', on_delete=models.CASCADE)\n\n def __str__(self):\n return self.social\n\n class Meta:\n verbose_name = \"Соц. сеть\"\n verbose_name_plural = \"Соц. сети\"\n\n\nclass Message(models.Model):\n name = models.CharField(max_length=255, verbose_name='Имя')\n phone = models.CharField(max_length=255, verbose_name='Телефон')\n comment = models.TextField(max_length=255, verbose_name='Телефон')\n created = models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Сообщение'\n verbose_name_plural = 'Сообщения'","sub_path":"alex_site/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"156964660","text":"\"\"\"\nDjango settings for pyconng project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nimport environ as environmental\n\nenv = environmental.Env()\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nROOT_DIR = environmental.Path(__file__) - 2 # (crosscheck/config/settings.py - 2 = crosscheck/)\nAPPS_DIR = ROOT_DIR.path('pyconng')\n\nempty = object()\ndef environ(key, default=empty):\n try:\n return os.environ[key]\n except KeyError:\n if default is empty:\n raise RuntimeError('environment variable \"%s\" does not exist' % (key))\n return default\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'qxl(3u+8%bb079sy%=^wxu5@)h68+hw#s_e6-lv3#n1^z^e4nm'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nALLOWED_HOSTS = [\n os.environ.get(\"GONDOR_INSTANCE_DOMAIN\"),\n \"2016.djangocon.us\",\n \"www.djangocon.us\",\n \"localhost',\"\n]\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n \"django.contrib.sites\",\n 'django.contrib.staticfiles',\n\n # external\n \"account\",\n \"crispy_forms\",\n \"easy_thumbnails\",\n # \"taggit\",\n \"reversion\",\n # \"metron\",\n \"sitetree\",\n \"waffle\",\n \"markitup\",\n\n # pinax\n \"pinax.boxes\",\n # \"pinax.eventlog\",\n \"pinax.pages\",\n # \"pinax.blog\",\n\n # symposion\n \"symposion\",\n \"symposion.conference\",\n \"symposion.speakers\",\n \"symposion.proposals\",\n \"symposion.reviews\",\n \"symposion.schedule\",\n \"symposion.sponsorship\",\n \"symposion.teams\",\n\n # project\n \"pyconng.proposals\",\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n)\n\nROOT_URLCONF = 'config.urls'\n\nWSGI_APPLICATION = 'config.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\nDATABASES = {\n # Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ\n 'default': env.db('DATABASE_URL', default='postgres://postgres:postgres@127.0.0.1/pyconng'),\n}\n# DATABASES = {\n# 'default': {\n# 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n# 'NAME': environ(\"DB_NAME\"),\n# 'USER': environ(\"DB_USER\"),\n# 'PASSWORD': environ(\"DB_PASSWORD\"),\n# 'HOST': environ(\"DB_HOST\"),\n# 'PORT': environ(\"DB_PORT\"),\n# }\n# }\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = str(ROOT_DIR('media'))\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = \"/media/\"\n\n# Absolute path to the directory static files should be collected to.\n# Don\"t put anything in this directory yourself; store your static files\n# in apps\" \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = str(ROOT_DIR('staticfiles'))\n\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n str(APPS_DIR.path('static')),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\nTEMPLATES = [\n {\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs\n 'DIRS': [\n str(APPS_DIR.path('templates')),\n ],\n 'OPTIONS': {\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug\n 'debug': DEBUG,\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders\n # https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types\n 'loaders': [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ],\n # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n # Your stuff: custom template context processors go here,\n \"account.context_processors.account\",\n \"symposion.reviews.context_processors.reviews\",\n 'config.context_processors.consts',\n ],\n },\n },\n]\n\n\nEMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n\nACCOUNT_EMAIL_AUTHENTICATION = False\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 2\nACCOUNT_EMAIL_CONFIRMATION_REQUIRED = False\nACCOUNT_EMAIL_VERIFICATION = False\nACCOUNT_LOGIN_URL = LOGIN_URL = '/account/login/'\nACCOUNT_LOGIN_REDIRECT_URL = \"dashboard\"\nACCOUNT_LOGOUT_REDIRECT_URL = \"home\"\nACCOUNT_OPEN_SIGNUP = True\nACCOUNT_SIGNUP_REDIRECT_URL = \"dashboard\"\nACCOUNT_UNIQUE_EMAIL = EMAIL_CONFIRMATION_UNIQUE_EMAIL = False\nACCOUNT_USE_AUTH_AUTHENTICATE = True\nACCOUNT_USER_DISPLAY = lambda user: user.email\n\nAUTHENTICATION_BACKENDS = [\n \"symposion.teams.backends.TeamPermissionsBackend\",\n \"account.auth_backends.UsernameAuthenticationBackend\",\n]\n\n# Symposion settings\n\nCONFERENCE_ID = 1\nPROPOSAL_FORMS = {\n \"tutorial\": \"pyconng.proposals.forms.TutorialProposalForm\",\n \"talk-25-min\": \"pyconng.proposals.forms.TalkProposalForm\",\n \"talk-45-min\": \"pyconng.proposals.forms.TalkProposalForm\",\n \"open-space\": \"pyconng.proposals.forms.OpenSpaceProposalForm\",\n}\nPINAX_PAGES_HOOKSET = \"config.hooks.PinaxPagesHookSet\"\nPINAX_BOXES_HOOKSET = \"config.hooks.PinaxBoxesHookSet\"\n\n# adjust for number of reviews currenly about 1/5 (default: 3)\nSYMPOSION_VOTE_THRESHOLD = 6\n\nMARKITUP_SET = \"markitup/sets/markdown\"\nMARKITUP_FILTER = [\"symposion.markdown_parser.parse\", {}]\nMARKITUP_SKIN = \"markitup/skins/simple\"\n\nTHEME_CONTACT_EMAIL = 'hello@djangocon.us'\n\nADMINS = [\n ('DjangoCon US Errors', 'errors@defna.org'),\n]\n\nMANAGERS = [\n ('DjangoCon US', 'hello@djangocon.us'),\n]\n\nSERVER_EMAIL = ''\nDEFAULT_FROM_EMAIL = \"DjangoCon US 2016 \"\n\n# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\n# MIGRATION_MODULES = {\n# 'sites': 'pyconng.contrib.sites.migrations'\n# }\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id\nSITE_ID = 1\n\nFIXTURE_DIRS = [\n str(ROOT_DIR('fixtures')),\n]\n","sub_path":"config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":8080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"100679896","text":"from vpython import*\r\n\r\nN = int(input('How many balls to be lifted?[1-4]'))\r\nNN, size = 5, 1\r\nm = [4,4,4,4,4]\r\nangle = pi/6\r\nk, L = 15000, 16\r\ng = 9.8\r\nballs_reference, balls, balls_stick = [0]*5, [0]*5, [0]*5\r\nscene = canvas(width=500, height=500, center=vec(0, 0, 0), background=vec(0.5, 0.5, 0))\r\nceiling = box(length=18, height=0.4, width=1, pos=vec(0,8,0), color=color.blue)\r\nfor i in range(NN):\r\n balls_reference[i] = sphere(pos=vec(-4+2*i,8,0), radius=0.4)\r\n balls_stick[i] = cylinder(radius=0.2, pos=vec(-4+2*i,8,0), axis=vec(0,-L,0))\r\n balls[i] = sphere(pos=vec(-4+2*i,-8,0), radius=size, color=color.black)\r\n balls[i].v = vec(0,0,0)\r\nfor i in range(N):\r\n balls_stick[i].axis = vec(-sin(angle),-cos(angle),0)*L\r\n balls[i].pos = balls_stick[i].pos + balls_stick[i].axis\r\n\r\ndef af_col_v(m1, m2, v1, v2, x1, x2): # function after collision velocity\r\n v1_prime = v1 + 2*(m2/(m1+m2))*(x1-x2) * dot (v2-v1, x1-x2) / dot (x1-x2, x1-x2)\r\n v2_prime = v2 + 2*(m1/(m1+m2))*(x2-x1) * dot (v1-v2, x2-x1) / dot (x2-x1, x2-x1)\r\n return (v1_prime, v2_prime)\r\n\r\ndt = 0.0005\r\ntime = 0\r\nwhile True:\r\n rate = (2000)\r\n for i in range(NN):\r\n balls_stick[i].axis = balls[i].pos - balls_stick[i].pos\r\n spring_force = -k * (mag(balls_stick[i].axis) - L) * balls_stick[i].axis.norm()\r\n balls[i].a = vector(0,-g,0) + spring_force / m[i]\r\n balls[i].v += balls[i].a*dt\r\n balls[i].pos += balls[i].v*dt\r\n for i in range(NN-1):\r\n if (mag(balls[i].pos-balls[i+1].pos)) <= 2*size and dot(balls[i].pos-balls[i+1].pos, balls[i].v-balls[i+1].v) <= 0:\r\n balls[i].v, balls[i+1].v = af_col_v (m[i], m[i+1], balls[i].v, balls[i+1].v, balls[i].pos, balls[i+1].pos)","sub_path":"physics_hw/b07901020(4)/must.py","file_name":"must.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"97465029","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Test de unidad para sincronizador.py\"\"\"\n# Este modulo esta muy acoplado con registrar y no esta bueno el tema de los\n# tests hasta que no se limpie un poco\n\n# Modulos externos\nimport sys\nimport unittest\nimport sqlite3\n\n# Modulos propios\nsys.path.append('../clases')\n\nfrom sincronizador import Sincronizador, config\n\n\nclass verificadorDeSincronizacion(unittest.TestCase):\n global sync\n sync = Sincronizador()\n\n def test1SincronizacionDeDominios(self):\n \"\"\"Verificacion de sincronizacion de dominios publicamente \"\"\"\\\n \"\"\"denegados/permitidos con el server\"\"\"\n\n # Me conecto a la base y borro los dominios que est\n conexion_db = sqlite3.connect(config.PATH_DB)\n cursor = conexion_db.cursor()\n cursor.execute('delete from dominios_publicamente_permitidos')\n cursor.execute('delete from dominios_publicamente_denegados')\n conexion_db.commit()\n\n # Pido la sincronizacion\n sync.sincronizarDominiosDenegados()\n sync.sincronizarDominiosPermitidos()\n cantidadDominiosPermitidos = cursor.execute(\n 'select count(*) from dominios_publicamente_permitidos'\n ).fetchone()[0]\n cantidadDominiosDenegados = cursor.execute(\n 'select count(*) from dominios_publicamente_denegados'\n ).fetchone()[0]\n\n self.assertTrue(\n cantidadDominiosPermitidos > 0 and\n cantidadDominiosDenegados > 0\n )\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"cliente/branches/cliente-gae/tests/sincronizadorTest.py","file_name":"sincronizadorTest.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"604296274","text":"\"\"\"\nДомашнее задание №1\n\nИспользование библиотек: ephem\n\n* Установите модуль ephem\n* Добавьте в бота команду /planet, которая будет принимать на вход\n название планеты на английском, например /planet Mars\n* В функции-обработчике команды из update.message.text получите\n название планеты (подсказка: используйте .split())\n* При помощи условного оператора if и ephem.constellation научите\n бота отвечать, в каком созвездии сегодня находится планета.\n\n\"\"\"\nimport logging\nimport ephem\nfrom datetime import datetime\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\n\ncurrent_date = (datetime.date(datetime.now()))\n\nlogging.basicConfig(format='%(name)s - %(levelname)s - %(message)s',\n level=logging.INFO,\n filename='bot.log')\n\n\nPROXY = {\n 'proxy_url': 'socks5://t1.learn.python.ru:1080',\n 'urllib3_proxy_kwargs': {\n 'username': 'learn',\n 'password': 'python'\n }\n}\n\n\ndef greet_user(update, context):\n text = 'Вызван /start'\n update.message.reply_text('Привет! Напиши название планеты в формате: /planet Mars')\n\n\ndef talk_to_me(update, context):\n try:\n user_text = update.message.text.split()\n planet = getattr(ephem, user_text[1])()\n planet.compute(ephem.Date(current_date))\n const = ephem.constellation(planet)\n print(const)\n update.message.reply_text(const)\n except AttributeError:\n print(update.message.reply_text('Такую планету еще не открыли')) \n\n\ndef main():\n mybot = Updater(\"1940519188:AAFtdXOZrb8j8PydiGJphA6UdWAfE05TBr0\", request_kwargs=PROXY, use_context=True)\n\n dp = mybot.dispatcher\n dp.add_handler(CommandHandler(\"start\", greet_user))\n dp.add_handler(MessageHandler(Filters.text, talk_to_me))\n\n mybot.start_polling()\n mybot.idle()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"8_ephem_bot.py","file_name":"8_ephem_bot.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"87453062","text":"import urllib.request\nimport printlog as pr\n\nclass Downloader:\n downloaded = 0\n downloadFinished = False\n downloadStarted = False\n def downloadfile(self,path, filename, url):\n pr.pl(\"Downloading file \"+ str(url))\n file = path + \"/\" + filename\n self.downloadStarted = True\n self.downloadFinished = False\n urllib.request.urlretrieve(url,file,self.show_prorgress)\n self.downloadFinished = True\n pr.pl(\"Completed download\")\n self.downloaded = 0\n\n def show_prorgress(self, count,block_size,total_size):\n self.downloaded += block_size\n print(str(round(self.downloaded / total_size * 100)) + \"%\")\n\n","sub_path":"src/trainerdownload.py","file_name":"trainerdownload.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"234539550","text":"\"\"\"\nAsk the user for a string and print out whether \nthis string is a palindrome or not. (A palindrome is a string that reads the same forwards and backwards.)\n\"\"\"\n\n\ndef main():\n print('Please enter a string')\n string = raw_input()\n string = string.strip().lower()\n return is_palindrome(string)\n\n\ndef is_palindrome(string):\n rev_str = string[::-1]\n if string == rev_str:\n print('given string is a palindrome')\n else:\n print('not palindrome')\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"ex6.py","file_name":"ex6.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"463318626","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 17 21:10:15 2018\r\n\r\n@author: eduarado\r\n\"\"\"\r\n\r\nimport os\r\nimport ntpath\r\n\r\n# Caminho para a pasta com subpastas/imagens\r\npath = \"C:\\\\Users\\\\eduar\\\\Desktop\\\\ficahs\\\\novas fichas\\\\Imagens\\\\\"\r\n\r\n#acessa o diretorio de forma recursiva para acessar as subpastas\r\nfor root, dir, files in os.walk(path):\r\n #acessa as subpastas para acessas os arquivos dentro delas\r\n for file in files:\r\n dirname = ntpath.basename(root)\r\n #caminho original\r\n ori = root + '\\\\' + file\r\n #adiciona o nome da pasta + underscore + a posição do arquivo + a extensão\r\n name, ext = os.path.splitext(file) \r\n dest = dirname + '_' + str(files.index(file)) + ext\r\n os.rename(ori, dest)\r\n \r\n\r\n\r\n\r\n \r\n","sub_path":"renomear_arquivos.py","file_name":"renomear_arquivos.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"183790088","text":"from django.core.management.base import BaseCommand, CommandError\nfrom randomuser.models import Users\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument('name', type=str)\n\n def handle(self, *args, **options):\n name_up = options.get('name')\n users = Users.objects.filter(name=name_up)\n if users.exists():\n users.delete()\n else:\n raise CommandError(\"Not found\")\n","sub_path":"randomuser/management/commands/delete_command.py","file_name":"delete_command.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"136411146","text":"from django.shortcuts import render, redirect\nfrom .models import Person, Social, Skill, Education, Experience, Stat, ImagePortfolio, SliderPortfolio, YoutubeVideoPortfolio, LocalVideoPortfolio\nfrom django.views.generic import UpdateView\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.mail import send_mail, BadHeaderError\nfrom .forms import ContactForm\n\n\n\"\"\" \nContext ={} is being used as a context dictionary to make to code look a bit cleaner and easier to understand.\n\n\n\"\"\"\n\n\n#Home page Views \n\ndef HomeView(request):\n persons = Person.objects.first()\n socials = Social.objects.all()\n\n context = {'persons':persons, 'socials': socials}\n return render(request, \"resume/home.html\", context)\n\n#About page Fuction based View \n\ndef AboutView(request):\n persons = Person.objects.first()\n skills = Skill.objects.all()\n experiences = Experience.objects.all()\n education = Education.objects.all()\n stat = Stat.objects.first()\n\n context = {'persons':persons,\n 'skills':skills,\n 'experiences':experiences,\n 'education':education,\n 'stat':stat,\n \n }\n return render(request, \"resume/about.html\", context)\n\n\n#Portfolio page unction based view\ndef PortfolioView(request):\n image_portfolio = ImagePortfolio.objects.all()\n slider_portfolio = SliderPortfolio.objects.all()\n local_video_portfolio = LocalVideoPortfolio.objects.all()\n youtube_portfolio = YoutubeVideoPortfolio.objects.all()\n\n\n\n context = {'image_portfolio':image_portfolio,\n 'slider_portfolio': slider_portfolio,\n 'local_video_portfolio':local_video_portfolio,\n 'youtube_portfolio':youtube_portfolio,\n }\n return render(request, \"resume/portfolio.html\", context)\n\n\n#Contact me page Fuction based view \ndef ContactView(request):\n persons = Person.objects.first()\n socials = Social.objects.all()\n form = ContactForm()\n\n if request.method == 'GET':\n form = ContactForm()\n else:\n form = ContactForm(request.POST)\n if form.is_valid():\n subject = form.cleaned_data['subject']\n from_email = form.cleaned_data['from_email']\n message = form.cleaned_data['message']\n try:\n send_mail(subject, message, from_email, ['bukhosi@symaxx.com'])\n except BadHeaderError:\n return HttpResponse('Invalid header found.')\n return redirect('home')\n\n\n context={'persons': persons, 'socials':socials, 'form':form}\n return render(request, \"resume/contact.html\", context)\n\n\"\"\" ######################## END FRONT VIEWS ############################ \"\"\"\n\n#These will all be dashboard view to make sure that we have easier way to edit and change our portfolio\n\nclass PortfolioEdit(UpdateView):\n model = Person\n template_name = \"resume/portfolio-edit.html\"\n fields = '__all__'","sub_path":"resume/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"567832112","text":"# -*- coding: utf-8 -*-\r\n\r\nif not request.is_local: raise HTTP(200, 'error')\r\n\r\ndef recalc():\r\n wagers_count = bets_count = 0\r\n for cash in db(db.cash.used==True).select():\r\n cash_id = cash.id\r\n wagers_cash = bets_cash = 0\r\n total = Decimal(0)\r\n sts = db(db.stats_cash.cash_id == cash_id).select().first()\r\n if not sts:\r\n sts = db.stats_cash[ db.stats_cash.insert( cash_id = cash_id )]\r\n for wager in db(db.wagers.cash_id == cash_id ).select():\r\n total += wager.total\r\n wagers_cash += 1\r\n sts.update_record( total = total, wagers = wagers_cash )\r\n wagers_count += wagers_cash\r\n\r\n stats = db(db.stats).select().first()\r\n stats.update_record( men = db(db.men).count(), wagers = wagers_count, bets = bets_count)\r\n\r\ndef index():\r\n h = CAT()\r\n for cash in db(db.cash.used==True).select():\r\n cash_id = cash.id\r\n sts = db(db.stats_cash.cash_id == cash_id).select().first()\r\n if not sts: continue\r\n h += SPAN(' ', round(float(sts.total),6),\r\n IMG(_src=URL('static', 'images/cash/' + cash.img_name), _width=30, _alt=''))\r\n return dict(h=h)\r\n","sub_path":"controllers/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"36909533","text":"import requests\nimport json\nfrom comon.log_handel import logger\nclass Request:\n def __init__(self,url):\n \"\"\"\n :param url: 必须传入的url\n \"\"\"\n self.url = url\n def re_get(self,*args,**kwargs):\n \"\"\"\n :param args: get请求传入的参数\n :param kwargs:\n :return:\n \"\"\"\n res = requests.get(self.url,*args,**kwargs)\n return res.text\n\n def re_post(self,body,*args,**kwargs):\n \"\"\"\n :param body: 请求体\n :param headers: 请求头,以及其他的内容\n :param kwargs:\n :return:\n \"\"\"\n res = requests.post(self.url,json.dumps(body),*args,**kwargs)\n return res.text\n\n\n def res(self,request,body,*args,**kwargs):\n logger.info(\"你输入的请求为{}\".format(request))\n if request.lower() == \"get\":\n\n res = self.re_get(*args,**kwargs)\n return res\n elif request.lower() == \"post\":\n res = self.re_post(body,*args,**kwargs)\n return res\n else:\n # return '你输入请求error{}'.format(request)\n logger.info('该版本没有您输入这种请求的处理方法{}'.format(request))\n\n\n\nif __name__ == '__main__':\n\n url= \"http://120.78.128.25:8766/futureloan/member/register\"\n\n hea = {\"Content-Type\":'application/json',\"X-Lemonban-Media-Type\":'lemonban.v2'}\n\n ti = {\"mobile_phone\":\"13891291198\",\"pwd\":\"hello1223\"}\n\n R = Request(url)\n c=R.res(request='pot',body=ti,headers=hea)\n\n","sub_path":"comon/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"172468890","text":"import itchat\nimport time\n\n\ndef itchat_SendMsg(nm, msg, second=None):\n from time import time\n\n try:\n itchat.auto_login(hotReload=True)\n myfriend = itchat.search_friends(name=nm)\n myfriendUserName = myfriend[0]['UserName']\n print(myfriendUserName)\n itchat.send(msg, toUserName=myfriendUserName)\n if second != None:\n t = time(second, itchat_SendMsg(nm, msg, second))\n t.start()\n except:\n message = u'It is wrong!!'\n itchat.send(message, toUserName='filehelper')\n\n\nif __name__ == '__main__':\n\n lines_list = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"]\n sleep_list = [5, 10, 4, 13, 9, 6, 11]\n\n j = 0\n\n while True:\n for i in range(len(lines_list)):\n itchat_SendMsg('Shadow', lines_list[i])\n j += 1\n\n # for num in range(len(sleep_list)):\n # time.sleep(sleep_list[num])\n\n if j >= 30:\n break\n\n print(\"It's OK!!!\")\n\n\n\n\n","sub_path":"Python/try_test/daughter.py","file_name":"daughter.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"335958643","text":"import json\nimport pandas as pd\nimport requests\nfrom flask import Flask, request, Response, make_response\nimport process\n\napp = Flask(__name__)\n\ndata = process.Reader('tideReadings.csv')\nstation_data = pd.read_csv(\"stations.csv\")\n\n\n# This is a stub showing the beginnings of one required endpoint\n# Must be editted to match API.rst description.\n@app.route('/station/json', methods=[\"GET\"])\ndef station_info():\n \"\"\"Return station info.\n\n The endpoint accepts query parameters:\n * stationName\n * stationRef\n\n At least one must be present.\n \"\"\"\n\n station_reference = request.args.get(\"stationReference\")\n station_name = request.args.get(\"stationName\")\n station_name = station_name.replace(\" \",\"+\")\n \n if station_name is not None:\n station = station_data.loc[station_data.stationName == station_name]\n else:\n station = station_data.loc[station_data.stationReference == station_reference]\n result_station = station.iloc[0]\n\n details = requests.get(result_station.stationURL).json()\n result = {\n \"stationName\": result_station.stationName,\n \"stationReference\": result_station.stationReference,\n \"northing\": details['items']['northing'],\n \"easting\": details['items']['easting'],\n \"latitude\": details['items']['lat'],\n \"longitude\": details['items']['long']\n }\n return json.dumps(result)\n\n# This is an example of a quick way to send a file.\n# There is plenty of room for improvement.\n@app.route('/data/graph', methods=[\"GET\"])\ndef data_graph():\n \"\"\"Return a graph of station data.\n\n The endpoint accepts query parameters:\n * stationName\n * stationRef\n * from\n * to\n \"\"\"\n station_reference = request.args.get(\"stationReference\")\n station_name = request.args.get(\"stationName\")\n station_name = station_name.replace(\" \",\"+\")\n\n if station_name is not None:\n # station_data = station_data.replace(\" \", \"+\")\n station = station_data.loc[station_data.stationName == station_name]\n else:\n station = station_data.loc[station_data.stationReference == station_reference]\n result_station = station.iloc[0]\n\n # Get optional parameters\n time_from = request.args.get(\"from\")\n time_to = request.args.get(\"to\")\n if time_from:\n pass\n else:\n time_from = None\n if time_to:\n pass\n else:\n time_to = None\n # plot pic\n magic_trick= data.station_graph(result_station.stationName, time_from, time_to)\n # img_stream = io.BytesIO(img)\n # img = Image.open(img_stream)\n # imgByteArr = io.BytesIO()\n # img.save(imgByteArr,format='PNG')\n # imgByteArr = imgByteArr.getvalue()\n # return send_file(io.BytesIO(imgByteArr),\n # mimetype = 'image/png',\n # as_attachment = True,\n # attachment_filename = 'tmp.png')\n image_data = open(\"tmp.png\", \"rb\").read()\n response = make_response(image_data)\n response.headers['Content-Type'] = 'image/png'\n return response\n\n\n\n@app.route('/data/json', methods=[\"GET\", \"POST\"])\ndef station_tide_info():\n if request.method == 'GET':\n station_reference = request.args.get(\"stationReference\")\n station_name = request.args.get(\"stationName\")\n station_name = station_name.replace(\" \",\"+\")\n\n if station_name is not None:\n station = station_data.loc[station_data.stationName == station_name]\n else:\n station = station_data.loc[station_data.stationReference == station_reference]\n # print(station)\n result_station = station.iloc[0]\n # print(\"*****************\\n\")\n # print(result_station)\n # print(result_station.stationName)\n\n time_from = request.args.get(\"from\")\n time_to = request.args.get(\"to\")\n\n if time_from:\n pass\n else:\n time_from = None\n if time_to:\n pass\n else:\n time_to = None\n \n statistic = request.args.get(\"statistic\")\n if statistic is not None:\n statistic = statistic.split(',')\n # Build result map\n # print(\"**********\")\n # print(\"statistic: \", statistic)\n # print(\"**********\")\n\n tide_values = data.station_tides(result_station.stationName, time_from, time_to)\n # print(\"**********\")\n # print(tide_values)\n # print(\"**********\")\n result_map = {\n \"stationName\": result_station.stationName,\n \"from\": tide_values.index[0],\n \"to\": tide_values.index[-1]\n }\n # print(\"**********\")\n # print(result_map)\n # print(\"**********\")\n\n if statistic[0] is None:\n result_map['tideValues'] = json.loads(tide_values.to_json())\n return json.dumps(result_map)\n\n elif statistic[0] == 'max':\n result_map['stationReference'] = result_station.stationReference\n result_map['max'] = data.max_tides()[result_station.stationName]\n return json.dumps(result_map)\n\n elif statistic[0] == 'min':\n result_map['stationReference'] = result_station.stationReference\n result_map['min'] = data.min_tides()[result_station.stationName]\n return json.dumps(result_map)\n\n elif statistic[0] == 'mean':\n # print(\"*************\\nI AM HERE\")\n # print(result_map)\n # print(\"*************\")\n result_map['stationReference'] = result_station.stationReference\n result_map['mean'] = data.mean_tides()[result_station.stationName]\n # print(\"*************\")\n # print(result_map)\n # print(\"*************\")\n return json.dumps(result_map)\n\n if request.method == \"POST\":\n write = request.args.get(\"write\")\n if write.lower() == 'true':\n write = True\n else:\n write = False\n\n data_to_add = json.loads(request.get_data())\n for new_data in data_to_add:\n data.add_data(new_data['dateTime'], new_data['stationName'], new_data['tideValue'])\n if write is not None:\n data.write_data('tideReadings.csv')\n return json.dumps({\"msg\": \"Data added successfully!\"})\n\n\n@app.route('/data/html', methods=[\"GET\"])\ndef draw_html():\n \"\"\"Return station tide info.\n\n The endpoint accepts query parameters:\n * stationName\n * stationRef\n * from\n * to\n * statistic\n \"\"\"\n station_reference = request.args.get(\"stationReference\")\n station_name = request.args.get(\"stationName\")\n station_name = station_name.replace(\" \",\"+\")\n # print(\"**********\")\n # print(station_name)\n # print(\"**********\")\n # print(\"**********\")\n # print(station_data.stationName)\n # print(\"**********\") \n if station_name:\n station = station_data.loc[station_data.stationName == station_name]\n else:\n station = station_data.loc[station_data.stationReference == station_reference]\n # print(station)\n # print(\"**********\")\n # print(station)\n # print(\"**********\")\n result_station = station.iloc[0]\n # print(\"**********\")\n # print(result_station)\n # print(\"**********\")\n time_from = request.args.get(\"from\")\n time_to = request.args.get(\"to\")\n if time_from:\n pass\n else:\n time_from = None\n if time_to:\n pass\n else:\n time_to = None\n\n statistic = request.args.get(\"statistic\")\n if statistic is not None:\n statistic = statistic.split(',')\n\n if result_station is not None:\n tide_values = data.station_tides(result_station.stationName, time_from, time_to).reset_index()\n tide_values.rename(columns={result_station.stationName: 'tideValue'}, inplace=True)\n return tide_values.to_html(index=False)\n\n if statistic is not None:\n # print(\"**********\")\n # print(statistic)\n # print(\"**********\")\n frames = []\n for statistic_method in statistic:\n if statistic_method == 'max':\n frames.append(data.max_tides(time_from, time_to))\n elif statistic_method == 'min':\n frames.append(data.min_tides(time_from, time_to))\n else:\n frames.append(data.mean_tides(time_from, time_to))\n # print(\"**********\")\n # print(result)\n # print(\"**********\")\n\n result = pd.concat(frames, axis=1, keys=statistic).reset_index()\n return result.to_html(index=False)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"612064119","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nsrc_img = cv2.imread('asuka.jpg')\ngray = cv2.cvtColor(src_img,cv2.COLOR_BGR2GRAY)\n\nfourier_img = np.fft.fft2(gray)\nfourier_img = np.fft.fftshift(fourier_img)\n\nH,W = gray.shape\ncenterx = int(W/2)\ncentery = int(H/2)\n\nmask = np.zeros_like(gray)\nR = 50\nfor x in range(0,W):\n for y in range(0,H):\n if (x- centery)**2 +(y- centery)**2>R**2:\n mask[x,y]=1\n\n# mask = 255- mask\ntmp = mask * 255\ncv2.imshow('mask',tmp)\n\nprint(mask)\nfourier_img *= mask\nfourier_img = np.fft.fftshift(fourier_img)\nifimg = np.fft.ifft2(fourier_img)\nifimg = np.uint8(ifimg.real)\n\n\ncv2.imshow('lp',ifimg)\ncv2.waitKey()\ncv2.destroyAllWindows()","sub_path":"High_Pass_Filter.py","file_name":"High_Pass_Filter.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"183657299","text":"#\n# @lc app=leetcode id=287 lang=python3\n#\n# [287] Find the Duplicate Number\n#\n# https://leetcode.com/problems/find-the-duplicate-number/description/\n#\n# algorithms\n# Medium (50.83%)\n# Total Accepted: 214.9K\n# Total Submissions: 422.7K\n# Testcase Example: '[1,3,4,2,2]'\n#\n# Given an array nums containing n + 1 integers where each integer is between 1\n# and n (inclusive), prove that at least one duplicate number must exist.\n# Assume that there is only one duplicate number, find the duplicate one.\n# \n# Example 1:\n# \n# \n# Input: [1,3,4,2,2]\n# Output: 2\n# \n# \n# Example 2:\n# \n# \n# Input: [3,1,3,4,2]\n# Output: 3\n# \n# Note:\n# \n# \n# You must not modify the array (assume the array is read only).\n# You must use only constant, O(1) extra space.\n# Your runtime complexity should be less than O(n2).\n# There is only one duplicate number in the array, but it could be repeated\n# more than once.\n# \n# \n#\nclass Solution:\n # def findDuplicate(self, nums: List[int]) -> int:\n def findDuplicate(self, nums) -> int:\n # n = len(nums) - 1\n # res = sum(nums) - (1+n)*n // 2\n # return res\n for i in range(len(nums)):\n if nums[abs(nums[i])-1] > 0:\n nums[abs(nums[i])-1] = 0 - nums[abs(nums[i])-1]\n else:\n # print(nums[nums[i] - 1])\n # print('i=', i, nums[i])\n return abs(nums[i])\n # print(nums)\n\n def findDuplicate(self, nums):\n tortoise = hare = nums[0]\n while True:\n tortoise = nums[tortoise]\n hare = nums[nums[hare]]\n if tortoise == hare:\n break\n \n p1 = nums[0]\n p2 = tortoise\n while p1 != p2:\n p1 = nums[p1]\n p2 = nums[p2]\n return p1\n# s = Solution()\n# nums = [1,3,4,2,2]\n# print(s.findDuplicate(nums))\n# nums = [3,1,3,4,2]\n# print(s.findDuplicate(nums))\n\n# nums = [2,2,2,2,2]\n# print(s.findDuplicate(nums))\n","sub_path":"python/tests/287_find_the_duplicate_number.py","file_name":"287_find_the_duplicate_number.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"619206757","text":"import pika\nimport os\n\nREQUEST_QUEUE = 'requests'\nRESULT_QUEUE = 'results'\nHOST = os.environ['HOST']\nPORT = '5672'\nRABBITMQ_USER, RABBITMQ_PASS = 'guest', 'guest'\n\ncredentials = pika.PlainCredentials(RABBITMQ_USER, RABBITMQ_PASS)\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\n host=HOST,\n port=PORT,\n socket_timeout=1000,\n credentials=credentials))\nchannel = connection.channel()\nchannel.queue_declare(queue=REQUEST_QUEUE)\nchannel.queue_declare(queue=RESULT_QUEUE)\n\n\ndef put_in_result_queue(message):\n channel.basic_publish(exchange='',\n routing_key=RESULT_QUEUE,\n body=message)\n\n\ndef parse_message(message):\n text = message.decode('utf-8')\n return [int(x) for x in text.split()]\n\n\ndef is_prime(number):\n if number < 2:\n return False\n return all([number % x != 0 for x in range(2, number)])\n\n\ndef find_primes(range_from, range_to):\n return [x for x in range(range_from, range_to) if is_prime(x)]\n\n\ndef write_result(range_from, range_to, result):\n message = f\"[{range_from} {range_to}]: \" + \", \".join(map(str, result))\n put_in_result_queue(message)\n\n\ndef callback(ch, method, properties, body):\n message = body\n print(\"Received:\", message)\n range_from, range_to = parse_message(message)\n result = find_primes(range_from, range_to)\n write_result(range_from, range_to, result)\n ch.basic_ack(delivery_tag=method.delivery_tag)\n print(\"processed\")\n\n\nchannel.basic_consume(callback,\n queue=REQUEST_QUEUE)\n\nprint('Waiting for messages. To exit press CTRL+C')\nchannel.start_consuming()\n","sub_path":"list10/zad4/application/reciever.py","file_name":"reciever.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"41473048","text":"\r\ndef creartxt():\r\n ar2=open(\"2.txt\",\"w\")\r\n ar2.close()\r\n\r\ndef grabar2():\r\n ar2=open(\"2.txt\",\"w\")\r\n ar2.write(\"\\tInvertir texto\\n\")\r\n def invertir():\r\n x = \"Diego Pilamunga\"\r\n ar2.write(\"Cadena de texto: \"+str(x)+\"\\n\")\r\n ar2.write(\"Cadena de texto invertida: \")\r\n n=(len(x)-1)\r\n while n!=-1:\r\n y=str(x[n])\r\n ar2.write(str(y))\r\n n=n-1\r\n invertir()\r\n\r\ncreartxt()\r\ngrabar2()","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"277045080","text":"import numpy as np\n\ndef softmax(x, scale = 1):\n x = np.array(x)/scale\n max_x = np.max(x)\n e_x = np.exp(x - max_x)\n p = e_x/e_x.sum()\n p = p/p.sum()\n\n return p\n\ndef logsumexp(x, scale = 1):\n x = np.array(x)/scale\n max_x = np.max(x)\n lse_x = max_x + np.log(np.exp(x-max_x).sum())\n lse_x = scale*lse_x\n return lse_x\n\ndef sparsedist(z, scale=1.):\n z = np.array(z/scale)\n if len(z.shape) == 1:\n z = np.reshape(z,(1,-1))\n z = z - np.mean(z, axis=1)[:, np.newaxis]\n\n # sort z\n z_sorted = np.sort(z, axis=1)[:, ::-1]\n\n # calculate k(z)\n z_cumsum = np.cumsum(z_sorted, axis=1)\n k = np.arange(1, z.shape[1] + 1)\n z_check = 1 + k * z_sorted > z_cumsum\n k_z = z.shape[1] - np.argmax(z_check[:, ::-1], axis=1)\n\n # calculate tau(z)\n tau_sum = z_cumsum[np.arange(0, z.shape[0]), k_z - 1]\n tau_z = ((tau_sum - 1) / k_z).reshape(-1, 1)\n\n # calculate p\n p = np.maximum(0, z - tau_z)\n return p \n\ndef sparsemax(z, scale=1.):\n z = np.array(z/scale) \n z = z - np.mean(z, axis=1)[:, np.newaxis]\n\n # calculate sum over S(z)\n p = sparsedist(z)\n s = p > 0\n # z_i^2 - tau(z)^2 = p_i (2 * z_i - p_i) for i \\in S(z)\n S_sum = np.sum(s * p * (2 * z - p), axis=1)\n\n return 0.5 * S_sum + 0.5\n\nif __name__ == '__main__':\n print(\"Main Started\")\n x = np.random.rand(5)\n print(x[range(0,len(x))])\n print(np.sort(x))\n print(np.max(x))\n print(logsumexp(x))\n print(sparsemax(x))\n print(softmax(x))\n print(sparsedist(x))","sub_path":"multigoal_experiments/maxapproxi.py","file_name":"maxapproxi.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"595332602","text":"from random import choice\n\nclass general_alphabeta_tree(object):\n\n def __init__(self, val, isMaximizingPlayer=True):\n self.val = val\n self.children = []\n self.alpha = float('inf')\n self.beta = float('-inf')\n self.isMaximizingPlayer = isMaximizingPlayer\n\n def AddSuccessor(self, T):\n T.isMaximizingPlayer = not self.isMaximizingPlayer\n self.children += [T]\n return True\n\n def evaluation(self):\n # Parameter: void\n # Return Type: float\n # A higher is a more prefered state\n # Implementation: REQUIRED\n raise NotImplementedError(\"The method 'evaluation(self)' was not implemented\")\n\n def isLeaf(self):\n # Parameter: void\n # Return Type: boolean\n # True = Node is a leaf node, i.e. no more Edges exist\n # False = Node is not a lead node\n # Implementation: REQUIRED\n raise NotImplementedError(\"The method 'isLeaf(self)' was not implemented\")\n\n def getEdges(self):\n # Parameter: void\n # Return Type: A list of Edges\n # See comments in self.search() for my definition of an Edge\n # Implementation: REQUIRED\n raise NotImplementedError(\"The method 'getEdges(self)' was not implemented\")\n\n def copy_node(self):\n # Parameter: void\n # Return Type: Child class that implements the general_search_tree class\n # Implementation: REQUIRED\n raise NotImplementedError(\"The method 'copy_node(self)' was not implemented\")\n\n def evolve(self, E):\n # Parameter: An Edge\n # Return Type: Child class that implements the general_search_tree class\n # Implementation: REQUIRED\n raise NotImplementedError(\"The method 'evolve(self, E)' was not implemented\")\n\n def search(self, depth):\n # Bad input\n if self.val == None:\n return False\n\n # Reached a terminating node (e.g. checkmate in chess)\n if self.isLeaf():\n return self.evaluation()\n # Reached end of depth\n if depth == 0:\n return self.evaluation()\n\n # Getting Edges\n # Edges take the parent node to the child node\n # For example if the game was chess, an edge would be moving a pawn to a2 to a4\n # and the evolve function is responsible for handling that logic\n # child = evolve(parent, Edges[i])\n Edges = self.getEdges()\n\n # Another possible way to handle a terminating node\n # TODO I don't think this ever gets called\n if Edges == []:\n return self.val.evaluation()\n\n # The core of the algorithm\n if self.isMaximizingPlayer:\n # initialize v\n v = float(\"-inf\")\n for E in Edges:\n # create child\n self.AddSuccessor( self.copy_node().evolve(E) )\n # searching\n v = max(v, self.children[-1].search(depth-1))\n # back propogating\n self.alpha = max(self.alpha, v)\n # pruning\n if self.beta >= self.alpha:\n break\n return v\n else:\n # initialize v\n v = float(\"inf\")\n for E in Edges:\n # create child\n self.AddSuccessor( self.copy_node().evolve(E) )\n # searching\n v = min(v, self.children[-1].search(depth-1))\n # back propogating\n self.beta = min(self.beta, v)\n # pruning\n if self.beta >= self.alpha:\n break\n return v\n\n def getBestChild(self, depth):\n # .search just returns the evaluation of a node, so we can't call it on the root\n # Instead we call it on every child and then return the child with the maximum evaluation\n\n # Getting Edges\n Edges = self.getEdges()\n # check if current state is already complete\n if Edges == []:\n return False\n\n # creating children\n for E in Edges:\n self.AddSuccessor( self.copy_node().evolve(E) )\n\n # Getting values of children\n children_alphabeta = [child.search(depth-1) for child in self.children]\n \"\"\"\n children_alphabeta = []\n for C in self.children:\n children_alphabeta += [C.search(depth-1)]\n \"\"\"\n\n # returning the appropriate child\n # just for variety, if there are multiple children with the same evaluation,\n # then it will return one of them at random\n if self.isMaximizingPlayer:\n max_child = max(children_alphabeta)\n return choice([ child for child, val in zip(self.children, children_alphabeta) if val == max_child ])\n #return choice([ self.children[i] for i in range(len(self.children)) if children_alphabeta[i] == max_child ])\n else:\n min_child = min(children_alphabeta)\n return choice([ child for child, val in zip(self.children, children_alphabeta) if val == min_child ])\n #return choice([ self.children[i] for i in range(len(self.children)) if children_alphabeta[i] == min_child ])\n","sub_path":"Data_Structures/Alpha-Beta Pruning/general_alphabeta_tree.py","file_name":"general_alphabeta_tree.py","file_ext":"py","file_size_in_byte":5252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"121332187","text":"#!/usr/bin/python3\n# coding=utf-8\n\"\"\"creates Google Earth kml file (/tmp/gps3_live.kml) for realtime (4 second GE default) updates of gps coordinates and history\n# Concept from Jaroslaw Zachwieja & TJ \n# from their work in gegpsd.py included in gpsd project (http://catb.org/gpsd)\n\"\"\"\nimport time\n\nfrom gps3 import gps3 # Moe, remember to CHANGE to straight 'import gps3' if not installed,\n\n# or check which Python version it's installed in. You forget sometimes.\n\n__author__ = 'Moe'\n__copyright__ = 'Copyright 2014-2016 Moe'\n__license__ = 'MIT'\n__version__ = '0.33.2'\n\nlink_file = '/tmp/gps3_live.kml' # AFAIK, 'Links' call href on time events or entry/exit Multiple href may be possible.\ngps3data_file = '/tmp/gps3_static.kml'\ngps3data_history = []\n\nlink_data = ('\\n'\n '\\n'\n '\\n'\n ' GPS3 Live\\n'\n ' \\n'\n ' {0}\\n'\n ' onInterval\\n'\n ' \\n'\n '\\n'\n '').format(gps3data_file) # inserts 'the file' into a refresh mode default 4 second\nf = open(link_file, 'w')\nf.write(link_data)\nf.close()\n\ngpsd_socket = gps3.GPSDSocket()\ngpsd_socket.connect(host='127.0.0.1', port=2947)\ngpsd_socket.watch()\ndata_stream = gps3.DataStream()\n\ntry:\n for new_data in gpsd_socket:\n if new_data:\n data_stream.unpack(new_data)\n if data_stream.TPV['lat'] != 'n/a':\n speed = data_stream.TPV['speed']\n latitude = data_stream.TPV['lat']\n longitude = data_stream.TPV['lon']\n altitude = data_stream.TPV['alt']\n\n if data_stream.TPV['track'] == 'n/a': heading = data_stream.TPV['track'] # 'track' frequently is missing and returns as 'n/a'\n else: heading = round(data_stream.TPV['track']) # and heading precision in hundreths is just clutter.\n\n gps3data_history.append(longitude)\n gps3data_history.append(latitude)\n gps3data_history.append(altitude)\n hist_string = str(gps3data_history).replace(' ', '') # GE > 7.1.xxxx spits up on spaces in \n\n static_file = ('\\n'\n '\\n'\n '\\n'\n ' Frankie likes walking and stopping \\n'\n\n ' \\n'\n ' {0:.2f} m/s {4}°\\n'\n ' Current gps location\\nAltitude: {3} Metres\\n'\n ' \\n'\n ' {1}\\n'\n ' {2}\\n'\n ' 600\\n'\n ' 0\\n'\n ' 0\\n'\n ' \\n'\n ' \\n'\n ' {1},{2},{3}\\n'\n ' \\n'\n ' \\n'\n\n ' \\n'\n ' Pin Scratches\\n'\n ' GPS Trail of Tears\\n'\n ' \\n'\n ' 7f0000ff\\n'\n ' 20\\n'\n ' 1\\n'\n ' {5}\\n'\n ' \\n'\n ' \\n'\n '\\n'\n '').format(speed, longitude, latitude, altitude, heading, hist_string.strip('[]'))\n\n f = open(gps3data_file, 'w')\n f.write(static_file)\n f.close()\n\n else:\n time.sleep(.1)\n time.sleep(.8) # default GE refresh rate is 4 seconds, therefore no refresh older than ~1 second from itself.\nexcept KeyboardInterrupt:\n gpsd_socket.close()\n print('\\nTerminated by user\\nGood Bye.\\n')\n# End\n","sub_path":"examples/gegps3.py","file_name":"gegps3.py","file_ext":"py","file_size_in_byte":4931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"459078800","text":"from unittest import TestCase\nfrom datetime import datetime\nfrom proactive.priority.worker import Worker\nfrom proactive.priority.taskunit import TaskUnit\nfrom proactive.priority.exceptions import MaxTaskLimitReachedException\nfrom .testutil import tHour\n\nclass TestWorker(TestCase):\n def setUp(self):\n self.task1 = TaskUnit(\n createdAt=datetime.now(),\n deadline=500,\n profit=2.56,\n processing=100,\n taskID=\"test1234\"\n )\n self.task2 = TaskUnit(\n createdAt=datetime.now(),\n deadline=500,\n profit=2.56,\n processing=100,\n taskID=\"test1234\"\n )\n self.task3 = TaskUnit(\n createdAt=datetime.now(),\n deadline=500,\n profit=2.56,\n processing=100,\n taskID=\"test1234\"\n )\n\n def test_maxTasksLimit(self):\n worker = Worker(workerID=\"W1\", begin=tHour(0, 00), end=tHour(23, 59), multitask=2)\n worker.assignTask(self.task1)\n worker.assignTask(self.task2)\n with self.assertRaises(MaxTaskLimitReachedException):\n worker.assignTask(self.task3)\n\n def test_exactTaskLimit(self):\n worker = Worker(workerID=\"W1\", begin=tHour(0, 00), end=tHour(23, 59), multitask=2)\n worker.assignTask(self.task1)\n worker.assignTask(self.task2)\n self.assertEqual(len(worker.assignedTasks), 2)\n\n def test_canAssignTasks(self):\n worker = Worker(workerID=\"W1\", begin=tHour(0, 00), end=tHour(23, 59), multitask=2)\n self.assertFalse(worker.hasReachedTaskLimit())\n worker.assignTask(self.task1)\n self.assertFalse(worker.hasReachedTaskLimit())\n worker.assignTask(self.task2)\n self.assertTrue(worker.hasReachedTaskLimit())\n\n def test_unnasignTask(self):\n worker = Worker(workerID=\"W1\", begin=tHour(0, 00), end=tHour(23, 59), multitask=2)\n worker.assignTask(self.task1)\n worker.unassignTask(self.task1.taskID)\n self.assertEqual(len(worker.assignedTasks), 0)\n\n def test_availableInPeriod(self):\n worker = Worker(workerID=\"W1\", begin=tHour(0, 00), end=tHour(23, 59), multitask=2)\n available = worker.availableInPeriod(begin=tHour(12, 00), end=tHour(13, 00))\n self.assertTrue(available)\n","sub_path":"QuickProactive/tests/test_worker.py","file_name":"test_worker.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"177449309","text":"\"\"\"\nPython script to read fits table created by dmstack.\n\n@author: Bhishan Poudel\n\n@date: May 21, 2019\n\nCommand: python read_fits_table.py -f FITS_TABLE_NAME\n\nNOTE: To see the parameters of fits table we can use FV app, not ds9.\n\"\"\"\n# Imports\nimport os,sys,argparse\nimport time\nimport glob\nimport shutil\nimport numpy as np\nimport pyfits\nimport astropy.table as table\nfrom astropy.io import fits\n\n\ndef src_fits_table(jedi_file,src_csv):\n #src_folder = jedi_file.split('.')[0]\n #src_fits = 'output/src/{}/src.fits'.format(src_folder)\n\n src_fits = jedi_file\n \n # Read table and its fields\n data_table, header_table = fits.getdata(src_fits, 1, header=True) # read extension 1\n \n # first column of src.fits is flags, in fitsfile header: these are TFLAGS1 to TFLAGS90\n # there are 90 flags and 77 columns, if we exclude first column 'flags' there are 76 cols.\n # in total there are 90 + 76 = 166 columns.\n tflags = [ header_table[hdr] for hdr in header_table if hdr.startswith('TFLAG')]\n data_flags = [ data_table[i][0] for i in range(len(data_table))]\n data_flags = np.array(data_flags).astype(int)\n \n # there are 177 columns, first column is flags, we exclude it from data_columns\n ttypes = [header_table[hdr] for hdr in header_table if hdr.startswith('TTYPE') ]\n ttypes = ttypes[1:] # exclude flags column\n \n data_columns = [ data_table[i][1:] for i in range(len(data_table))] # exclude first column 'flags'\n data_columns = np.array(data_columns) # make numpy from list\n \n data_all = np.c_[data_flags, data_columns]\n header_all = ','.join(tflags+ttypes)\n\n print('Creating: {}'.format(src_csv))\n np.savetxt(src_csv,data_all,fmt='%8.4f',header=header_all,delimiter=',')\n \n\nif __name__ == \"__main__\":\n import time, os\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\", \"--manual\", help='python read_fits_table.py -f FILENAME',required=False,type=str)\n parser.add_argument(\"-f\", \"--fname\", help='name of fits table',required=True,type=str)\n args = parser.parse_args()\n fname = args.fname\n\n # Beginning time\n program_begin_time = time.time()\n begin_ctime = time.ctime()\n\n # Run the main program\n src_fits = fname\n src_csv = src_fits.replace('.fits','.csv')\n src_fits_table(fname,src_csv)\n\n # Print the time taken\n program_end_time = time.time()\n end_ctime = time.ctime()\n seconds = program_end_time - program_begin_time\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n print(\"\\n\\nBegin time: \", begin_ctime)\n print(\"End time: \", end_ctime, \"\\n\")\n print(\"Time taken: {0: .0f} days, {1: .0f} hours, \\\n {2: .0f} minutes, {3: f} seconds.\".format(d, h, m, s))\n print(\"End of Program: {}\".format(os.path.basename(__file__)))\n print(\"\\n\")","sub_path":"read_fits_table.py","file_name":"read_fits_table.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"286340193","text":"import os\nimport sys\nimport time\nimport random\nrandom.seed(0) # For reproducible test cases\n\nsys.path.append(os.path.abspath(os.path.join(__file__ ,\"../../../\")))\nfrom test_utils import pass_msg, err_msg\n\nfrom inversions import inversions \nfrom inversions_naive import inversions_naive\n\n\ndef manual_test(to_test):\n test_type = 1\n \n test_cases = [\n {'in': [], 'out': (0, [])},\n {'in': [1], 'out': (0, [1])},\n {'in': [2, 3], 'out': (0, [2, 3])},\n {'in': [3, 2], 'out': (1, [2, 3])},\n {'in': [3, 3], 'out': (0, [3, 3])},\n {'in': [3, 6, 1, 0], 'out': (5, [0, 1, 3, 6])}, # 2+2+1\n {'in': [6, 7, 8, 9, 10], 'out': (0, [6, 7, 8, 9, 10])}, # sorted\n {'in': [5, 4, 3, 2, 1], 'out': (10, [1, 2, 3, 4, 5])}, # 4+3+2+1 = (5-1)*(5)/2 = 10\n {'in': [3, 4, 10, 12, 12], 'out': (0, [3, 4, 10, 12, 12])}, # 0+0+0+0\n {'in': [10, 9, 1, 12, 10], 'out': (4, [1, 9, 10, 10, 12])}, # 2+1+0+1\n {'in': [-12, -12, -12, -12, -12], 'out': (0, [-12, -12, -12, -12, -12])},\n {'in': [-12, -2, 12, 1, -100], 'out': (5, [-100, -12, -2, 1, 12])}, # 1+1+2+1\n ]\n\n for case in test_cases:\n input_arg = case['in']\n output = to_test(input_arg)\n expected_output = case['out']\n if output != expected_output:\n msg = err_msg(\n test_type, to_test.__name__, input_arg, output, expected_output\n )\n raise NameError(msg)\n\n return pass_msg(test_type, to_test.__name__)\n\n\ndef stress_test(to_test, test_against):\n test_type = 2\n\n total_tests = 1000\n min_len, max_len = 0, 1000\n min_num, max_num = -10000, 10000\n \n test_type = 2\n for _ in range(total_tests):\n arr_len = random.randint(min_len, max_len)\n arr = [random.randint(min_num, max_num) for i in range(arr_len)] \n output = to_test(arr)\n expected_output = test_against(arr)\n\n if output != expected_output:\n msg = err_msg(\n test_type, to_test.__name__, arr, output, expected_output\n )\n raise NameError(msg) \n \n return pass_msg(test_type, to_test.__name__)\n\n\ndef test_inversions():\n func_arr = [inversions]\n for func in func_arr:\n print('-----Testing: {}-----'.format(func.__name__))\n start_time = time.time()\n\n to_test = func\n test_against = inversions_naive\n print(manual_test(to_test))\n print(stress_test(to_test, test_against))\n\n print(\"--- %s seconds for testing ---\" % (time.time() - start_time))\n print()\n\n\nif __name__ == \"__main__\":\n test_inversions()\n","sub_path":"chapter3/inversions/test_inversions.py","file_name":"test_inversions.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"247363929","text":"import math \ndef is_prime(n): \n if n <= 1: \n return False\n max_div = math.floor(math.sqrt(n)) \n for i in range(2, 1 + max_div): \n if n % i == 0: \n return False\n return True\nk = int(input(\"Enter a number:\"))\nprint(\"Prime numbers are:\")\nfor n in range(1,k): \n x = is_prime(n) \n print(x,end = '')\n","sub_path":"Prime Factors/primefactorization.py","file_name":"primefactorization.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"62769635","text":"from stores import stores\nimport pprint\n\n# You must build the following response, as a Python dictionary\npp = pprint.PrettyPrinter(indent=2)\n\nm_categories = set(\n map(lambda x: (('category_id', x['category_id']), ('category_verbose', x['category_verbose'])), stores)\n)\nresponse = {\n \"category_list\": list(map(lambda x: dict(x), m_categories)),\n \"stores\": stores\n}\n\nresponse_example = {\n 'category_list': [\n {\n '': 1,\n 'category_verbose': 'Conveniência',\n },\n {\n 'category_id': 1,\n 'category_verbose': 'Conveniência',\n },\n ],\n 'stores': stores\n}\n\n# store_stype is a list of all the unique store types that exist in stores\n\n# You task is to implement the function build_response(stores) and return a\n# python dictionary like the response_example defined above. The stores object\n# CAN be mutated.\n\npp.pprint(response)\n","sub_path":"livecoding.py","file_name":"livecoding.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"505697123","text":"import socket\r\nimport json_handler\r\nimport json\r\n\r\n# used to test the sending and receiving of JSON files\r\n\r\n# creates a new socket\r\nsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n# sets the address and port nr of the server\r\nSERVER = 'localhost'\r\nPORT = 5555\r\n\r\nwhile 1:\r\n try:\r\n # attempts to connect to the given address and port\r\n socket.connect((SERVER, PORT))\r\n # print(socket.recv(4096).decode())\r\n while 1:\r\n # sets a predetermined example JSON object\r\n json_obj = json_handler.json_client_example()\r\n j = input('$- Press enter to send data >> ')\r\n # socket.send(j.encode())\r\n print(\"Sending : \")\r\n print(\"==================================\")\r\n # sends the example JSON object\r\n socket.send(json.dumps(json_obj).encode())\r\n # displays the object that was sent\r\n json_handler.pretty_print(json_obj)\r\n print(\"==================================\")\r\n # receive and display the JSON object form the server\r\n reply = socket.recv(4096).decode()\r\n json_handler.pretty_print(json.loads(reply))\r\n if j == 'quit':\r\n break\r\n # closes the socket\r\n socket.close()\r\n except Exception as e:\r\n print(str(e))\r\n break\r\n finally:\r\n socket.close()","sub_path":"pretoria/Rover_Software/client_test.py","file_name":"client_test.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"632392755","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models\nimport collections\nimport math\n\ndef weights_init(m):\n # Initialize filters with Gaussian random weights\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.ConvTranspose2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\nclass Decoder(nn.Module):\n # Decoder is the base class for all decoders\n\n names = ['deconv2', 'deconv3']\n\n def __init__(self):\n super(Decoder, self).__init__()\n\n self.layer1 = None\n self.layer2 = None\n self.layer3 = None\n self.layer4 = None\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n return x\n\nclass DeConv(Decoder):\n def __init__(self, in_channels, kernel_size):\n assert kernel_size>=2, \"kernel_size out of range: {}\".format(kernel_size)\n super(DeConv, self).__init__()\n\n def convt(in_channels):\n stride = 2\n padding = (kernel_size - 1) // 2\n output_padding = kernel_size % 2\n assert -2 - 2*padding + kernel_size + output_padding == 0, \"deconv parameters incorrect\"\n\n module_name = \"deconv{}\".format(kernel_size)\n return nn.Sequential(collections.OrderedDict([\n (module_name, nn.ConvTranspose2d(in_channels,in_channels//2,kernel_size,\n stride,padding,output_padding,bias=False)),\n ('batchnorm', nn.BatchNorm2d(in_channels//2)),\n ('relu', nn.ReLU(inplace=True)),\n ]))\n\n self.layer1 = convt(in_channels)\n self.layer2 = convt(in_channels // 2)\n self.layer3 = convt(in_channels // (2 ** 2))\n self.layer4 = convt(in_channels // (2 ** 3))\n\ndef choose_decoder(decoder, in_channels):\n # iheight, iwidth = 10, 8\n if decoder[:6] == 'deconv':\n assert len(decoder)==7\n kernel_size = int(decoder[6])\n return DeConv(in_channels, kernel_size)\n else:\n assert False, \"invalid option for decoder: {}\".format(decoder)\n\n\nclass ResNet(nn.Module):\n def __init__(self, layers, decoder, output_size, in_channels=3, pretrained=True):\n\n if layers not in [18, 34, 50, 101, 152]:\n raise RuntimeError('Only 18, 34, 50, 101, and 152 layer model are defined for ResNet. Got {}'.format(layers))\n\n super(ResNet, self).__init__()\n pretrained_model = torchvision.models.__dict__['resnet{}'.format(layers)](pretrained=pretrained)\n\n\n self.conv1 = pretrained_model._modules['conv1']\n self.bn1 = pretrained_model._modules['bn1']\n\n self.output_size = output_size\n\n self.relu = pretrained_model._modules['relu']\n self.maxpool = pretrained_model._modules['maxpool']\n self.layer1 = pretrained_model._modules['layer1']\n self.layer2 = pretrained_model._modules['layer2']\n self.layer3 = pretrained_model._modules['layer3']\n self.layer4 = pretrained_model._modules['layer4']\n\n # clear memory\n del pretrained_model\n\n # define number of intermediate channels\n if layers <= 34:\n num_channels = 512\n elif layers >= 50:\n num_channels = 2048\n\n self.conv2 = nn.Conv2d(num_channels,num_channels//2,kernel_size=1,bias=False)\n self.bn2 = nn.BatchNorm2d(num_channels//2)\n self.decoder = choose_decoder(decoder, num_channels//2)\n\n # setting bias=true doesn't improve accuracy\n self.conv3 = nn.Conv2d(num_channels//32,1,kernel_size=3,stride=1,padding=1,bias=False)\n self.bilinear = nn.Upsample(size=self.output_size, mode='bilinear', align_corners=True)\n\n # weight init\n self.conv2.apply(weights_init)\n self.bn2.apply(weights_init)\n self.decoder.apply(weights_init)\n self.conv3.apply(weights_init)\n\n def forward(self, x):\n # resnet encoder\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n # still encoder, but not pretrained\n x = self.conv2(x)\n x = self.bn2(x)\n \n # decoder\n x = self.decoder(x)\n x = self.conv3(x)\n x = self.bilinear(x)\n\n return x\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"17255008","text":"import numpy as np\nimport matplotlib\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import cm\nfrom matplotlib.colors import Normalize\nimport matplotlib.pyplot as plt\n\ndef spaced_detector(arr, pixelspacing, pixelsize, cmap='viridis'):\n dims = arr.shape[::-1]\n size = (np.array(dims)-1)*pixelspacing\n norm = Normalize(vmax=np.amax(arr), vmin=np.amin(arr))\n cmap = plt.get_cmap(cmap)\n\n def draw_rectangle(ax, ix, iy, val):\n x = ix*pixelspacing[0]\n y = iy*pixelspacing[1]\n ax.add_patch(\n plt.Rectangle(\n xy=(x-0.5*pixelsize[0],\n y-0.5*pixelsize[1]),\n width=pixelsize[0], height=pixelsize[1],\n facecolor=cmap(norm(val)),\n edgecolor='black',\n )\n )\n ax.text(\n x, y, '{:0.1f}'.format(val),\n horizontalalignment='center',\n verticalalignment='center',\n fontsize=8,\n )\n\n fig = plt.figure()\n gs = gridspec.GridSpec(1, 2, width_ratios=(1,0.05),\n left=0.05, bottom=0.05, top=0.95, right=0.95,\n wspace=0)\n ax = fig.add_subplot(gs[0,0])\n ax_cbar = fig.add_subplot(gs[0,1])\n ax.set_aspect('equal')\n ax.set_facecolor('white')\n #ax.set_xticks([])\n #ax.set_yticks([])\n ax.set_xlim((-pixelsize[0]*0.5, size[0]+pixelsize[0]*0.5))\n ax.set_ylim((-pixelsize[1]*0.5, size[1]+pixelsize[1]*0.5))\n for yy in range(dims[1]):\n for xx in range(dims[0]):\n draw_rectangle(ax, xx, yy, arr[yy, xx])\n fig.colorbar(\n cm.ScalarMappable(norm=norm, cmap=cmap),\n cax=ax_cbar,\n )\n return fig\n","sub_path":"test/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"326303447","text":"from django.db import models\nfrom django.utils import translation\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom polymorphic.models import PolymorphicModel, PolymorphicManager\n\nfrom wagtailkit.numerators.models import NumeratorMixin, Numerator\nfrom wagtailkit.core.models import KitBaseModel, MAX_LEN_SHORT, MAX_LEN_MEDIUM\nfrom wagtailkit.persons.models import Person, PersonManager\nfrom wagtailkit.teachers.models import Teacher\nfrom wagtailkit.academic.models import ProgramStudy, SchoolYear, CurriculumCourse\n\n\n_ = translation.gettext_lazy\n\n\nclass StudentPersonalManager(PersonManager):\n def get_queryset(self):\n return super().get_queryset().filter(\n models.Q(student__isnull=False) | models.Q(is_matriculant=True)\n ).prefetch_related('student')\n\n\nclass StudentPersonal(Person):\n class Meta:\n verbose_name = _('Student Personal')\n verbose_name_plural = _('Student Personals')\n proxy = True\n\n objects = StudentPersonalManager()\n\n @property\n def is_student(self):\n return bool(getattr(self, 'student', False))\n\n def save(self, *args, **kwargs):\n self.is_matriculant = True\n super().save(*args, **kwargs)\n\n\nclass StudentManager(models.Manager):\n def get_by_natural_key(self, sid):\n return self.get(sid=sid)\n\n\nclass Student(NumeratorMixin, KitBaseModel):\n class Meta:\n verbose_name = _('Student')\n verbose_name_plural = _('Students')\n permissions = (\n ('register_student', _('Can Register New Student')),\n )\n\n ACTIVE = 'ACT'\n ALUMNI = 'ALM'\n DROP_OUT = 'DRO'\n MOVED = 'MVD'\n MISC = 'MSC'\n STATUS = (\n (ACTIVE, _('Active')),\n (ALUMNI, _('Alumni')),\n (DROP_OUT, _('Drop out')),\n (MOVED, _('Moved')),\n (MISC, _('Misc')),\n )\n\n objects = StudentManager()\n\n inner_id = None\n numbering = Numerator.FIXED\n\n sid = models.CharField(\n editable=False, unique=True,\n max_length=MAX_LEN_SHORT,\n verbose_name=_('Student ID'))\n person = models.OneToOneField(\n Person, on_delete=models.CASCADE,\n verbose_name=_(\"Person\"))\n year_of_force = models.ForeignKey(\n SchoolYear, on_delete=models.PROTECT,\n verbose_name=_(\"Year of force\"))\n coach = models.ForeignKey(\n Teacher, null=True, blank=True,\n on_delete=models.SET_NULL,\n related_name='students',\n verbose_name=_('Coach'))\n rmu = models.ForeignKey(\n ProgramStudy, on_delete=models.PROTECT,\n verbose_name=_('Program Study'))\n registration_id = models.CharField(\n max_length=MAX_LEN_SHORT,\n verbose_name=_(\"Registration ID\"))\n registration = models.CharField(\n max_length=2, default='1',\n choices=(('1', 'Reguler'), ('P', 'Transfer')),\n verbose_name=_(\"Registration\"))\n status = models.CharField(\n choices=STATUS, default=ACTIVE,\n max_length=MAX_LEN_SHORT,\n verbose_name=_('Status'))\n status_note = models.CharField(\n null=True, blank=True,\n max_length=MAX_LEN_MEDIUM,\n verbose_name=_('Status note'))\n\n # wagtail autocomplete\n autocomplete_search_field = 'person__fullname'\n\n def autocomplete_label(self):\n return \"{} | {}\".format(self.sid, self.name())\n\n def generate_inner_id(self):\n \"\"\" Generate human friendly Student Number,\n override this method to customize inner_id format\n \"\"\"\n form = [\n str(self.year_of_force.year_start)[2:4],\n self.rmu.number,\n self.registration,\n str(self.reg_number).zfill(4)\n ]\n self.sid = ''.join(form)\n return self.sid\n\n def get_counter(self):\n custom_code = self.get_custom_code()\n ct_counter = Numerator.get_instance(self, custom_code=custom_code)\n return ct_counter\n\n def get_custom_code(self):\n form = [\n str(self.year_of_force.year_start)[2:4],\n self.rmu.number,\n self.registration\n ]\n return '{}{}{}'.format(*form)\n\n def __str__(self):\n return self.person.fullname\n\n def name(self):\n return self.person.fullname\n\n def natural_key(self):\n natural_key = (self.sid,)\n return natural_key\n\n\nclass StudentScoreManager(PolymorphicManager):\n\n def get_queryset(self):\n return super().get_queryset().select_related(\n 'student', 'course'\n ).annotate(\n sid = models.F('student__sid'),\n cid = models.F('course__course__inner_id'),\n curriculum = models.F('course__curriculum__code')\n )\n\nclass StudentScore(PolymorphicModel, KitBaseModel):\n class Meta:\n verbose_name = _(\"Student Score\")\n verbose_name_plural = _(\"Student Scores\")\n\n objects = StudentScoreManager()\n\n course = models.ForeignKey(\n CurriculumCourse,\n on_delete=models.PROTECT,\n related_name='student_scores',\n verbose_name=_('Course'))\n student = models.ForeignKey(\n Student, on_delete=models.CASCADE,\n related_name='scores',\n verbose_name=_(\"Student\"))\n numeric = models.PositiveIntegerField(\n default=0,\n validators=[\n MinValueValidator(0),\n MaxValueValidator(100),\n ],\n verbose_name=_(\"Numeric Score\"))\n alphabetic = models.CharField(\n max_length=1,\n verbose_name=_(\"Alphabetic Score\"))\n\n def __str__(self):\n return \"{} | {}\".format(self.student, self.course)\n\n\nclass ConversionScore(StudentScore):\n class Meta:\n verbose_name = _(\"Conversion Score\")\n verbose_name_plural = _(\"Conversion Scores\")\n\n ori_code = models.CharField(\n max_length=MAX_LEN_SHORT,\n verbose_name=_('Origin Code'))\n ori_name = models.CharField(\n max_length=MAX_LEN_SHORT,\n verbose_name=_('Origin Name'))\n ori_numeric_score = models.DecimalField(\n default=1,\n max_digits=3,\n decimal_places=2,\n validators=[\n MinValueValidator(1),\n MaxValueValidator(4),\n ],\n verbose_name=_('Origin Numeric'))\n ori_alphabetic_score = models.CharField(\n max_length=MAX_LEN_SHORT,\n verbose_name=_('Origin Alphabetic'))\n","sub_path":"wagtailkit/students/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"548444137","text":"#!/usr/bin/env python\nimport webapp2\n\nimport tba_config\n\nfrom controllers.admin.admin_cron_controller import AdminPostEventTasksDo, AdminCreateDistrictTeamsEnqueue, AdminCreateDistrictTeamsDo\nfrom controllers.cron_controller import YearInsightsEnqueue, YearInsightsDo, OverallInsightsEnqueue, OverallInsightsDo, TypeaheadCalcEnqueue, TypeaheadCalcDo\nfrom controllers.datafeed_controller import EventListEnqueue, EventDetailsEnqueue\nfrom controllers.datafeed_controller import EventListGet, EventDetailsGet, TeamDetailsGet\n\n\napp = webapp2.WSGIApplication([('/backend-tasks/enqueue/event_list/([0-9]*)', EventListEnqueue),\n ('/backend-tasks/enqueue/event_details/(.*)', EventDetailsEnqueue),\n ('/backend-tasks/get/event_list/([0-9]*)', EventListGet),\n ('/backend-tasks/get/event_details/(.*)', EventDetailsGet),\n ('/backend-tasks/get/team_details/(.*)', TeamDetailsGet),\n ('/backend-tasks/do/post_event_tasks/(.*)', AdminPostEventTasksDo),\n ('/backend-tasks/enqueue/rebuild_district_teams/([0-9]+)', AdminCreateDistrictTeamsEnqueue),\n ('/backend-tasks/do/rebuild_district_teams/([0-9]+)', AdminCreateDistrictTeamsDo),\n ('/backend-tasks/math/enqueue/overallinsights/(.*)', OverallInsightsEnqueue),\n ('/backend-tasks/math/do/overallinsights/(.*)', OverallInsightsDo),\n ('/backend-tasks/math/enqueue/insights/(.*)/([0-9]*)', YearInsightsEnqueue),\n ('/backend-tasks/math/do/insights/(.*)/([0-9]*)', YearInsightsDo),\n ('/backend-tasks/math/enqueue/typeaheadcalc', TypeaheadCalcEnqueue),\n ('/backend-tasks/math/do/typeaheadcalc', TypeaheadCalcDo),\n ],\n debug=tba_config.DEBUG)\n","sub_path":"backend_main.py","file_name":"backend_main.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"583619658","text":"# Copyright 2018 The TensorFlow Probability Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Monte Carlo Ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Dependency imports\nimport numpy as np\n\nfrom tensorflow.contrib import layers as layers_lib\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops.distributions import distribution as distribution_lib\nfrom tensorflow.python.ops.distributions import gamma as gamma_lib\nfrom tensorflow.python.ops.distributions import kullback_leibler\nfrom tensorflow.python.ops.distributions import normal as normal_lib\nfrom tensorflow.python.platform import test\nfrom tensorflow_probability.python import monte_carlo as monte_carlo_lib\nfrom tensorflow_probability.python.monte_carlo import _get_samples\n\nlayers = layers_lib\nmc = monte_carlo_lib\n\n\nclass GetSamplesTest(test.TestCase):\n \"\"\"Test the private method 'get_samples'.\"\"\"\n\n def test_raises_if_both_z_and_n_are_none(self):\n with self.test_session():\n dist = normal_lib.Normal(loc=0., scale=1.)\n z = None\n n = None\n seed = None\n with self.assertRaisesRegexp(ValueError, 'exactly one'):\n _get_samples(dist, z, n, seed)\n\n def test_raises_if_both_z_and_n_are_not_none(self):\n with self.test_session():\n dist = normal_lib.Normal(loc=0., scale=1.)\n z = dist.sample(seed=42)\n n = 1\n seed = None\n with self.assertRaisesRegexp(ValueError, 'exactly one'):\n _get_samples(dist, z, n, seed)\n\n def test_returns_n_samples_if_n_provided(self):\n with self.test_session():\n dist = normal_lib.Normal(loc=0., scale=1.)\n z = None\n n = 10\n seed = None\n z = _get_samples(dist, z, n, seed)\n self.assertEqual((10,), z.get_shape())\n\n def test_returns_z_if_z_provided(self):\n with self.test_session():\n dist = normal_lib.Normal(loc=0., scale=1.)\n z = dist.sample(10, seed=42)\n n = None\n seed = None\n z = _get_samples(dist, z, n, seed)\n self.assertEqual((10,), z.get_shape())\n\n\nclass ExpectationTest(test.TestCase):\n\n def test_works_correctly(self):\n with self.test_session() as sess:\n x = constant_op.constant([-1e6, -100, -10, -1, 1, 10, 100, 1e6])\n p = normal_lib.Normal(loc=x, scale=1.)\n\n # We use the prefex \"efx\" to mean \"E_p[f(X)]\".\n f = lambda u: u\n efx_true = x\n samples = p.sample(int(1e5), seed=1)\n efx_reparam = mc.expectation(f, samples, p.log_prob)\n efx_score = mc.expectation(f, samples, p.log_prob,\n use_reparametrization=False)\n\n [\n efx_true_,\n efx_reparam_,\n efx_score_,\n efx_true_grad_,\n efx_reparam_grad_,\n efx_score_grad_,\n ] = sess.run([\n efx_true,\n efx_reparam,\n efx_score,\n gradients_impl.gradients(efx_true, x)[0],\n gradients_impl.gradients(efx_reparam, x)[0],\n gradients_impl.gradients(efx_score, x)[0],\n ])\n\n self.assertAllEqual(np.ones_like(efx_true_grad_), efx_true_grad_)\n\n self.assertAllClose(efx_true_, efx_reparam_, rtol=0.005, atol=0.)\n self.assertAllClose(efx_true_, efx_score_, rtol=0.005, atol=0.)\n\n self.assertAllEqual(np.ones_like(efx_true_grad_, dtype=np.bool),\n np.isfinite(efx_reparam_grad_))\n self.assertAllEqual(np.ones_like(efx_true_grad_, dtype=np.bool),\n np.isfinite(efx_score_grad_))\n\n self.assertAllClose(efx_true_grad_, efx_reparam_grad_,\n rtol=0.03, atol=0.)\n # Variance is too high to be meaningful, so we'll only check those which\n # converge.\n self.assertAllClose(efx_true_grad_[2:-2],\n efx_score_grad_[2:-2],\n rtol=0.05, atol=0.)\n\n def test_docstring_example_normal(self):\n with self.test_session() as sess:\n num_draws = int(1e5)\n mu_p = constant_op.constant(0.)\n mu_q = constant_op.constant(1.)\n p = normal_lib.Normal(loc=mu_p, scale=1.)\n q = normal_lib.Normal(loc=mu_q, scale=2.)\n exact_kl_normal_normal = kullback_leibler.kl_divergence(p, q)\n approx_kl_normal_normal = monte_carlo_lib.expectation(\n f=lambda x: p.log_prob(x) - q.log_prob(x),\n samples=p.sample(num_draws, seed=42),\n log_prob=p.log_prob,\n use_reparametrization=(p.reparameterization_type\n == distribution_lib.FULLY_REPARAMETERIZED))\n [exact_kl_normal_normal_, approx_kl_normal_normal_] = sess.run([\n exact_kl_normal_normal, approx_kl_normal_normal])\n self.assertEqual(\n True,\n p.reparameterization_type == distribution_lib.FULLY_REPARAMETERIZED)\n self.assertAllClose(exact_kl_normal_normal_, approx_kl_normal_normal_,\n rtol=0.01, atol=0.)\n\n # Compare gradients. (Not present in `docstring`.)\n gradp = lambda fp: gradients_impl.gradients(fp, mu_p)[0]\n gradq = lambda fq: gradients_impl.gradients(fq, mu_q)[0]\n [\n gradp_exact_kl_normal_normal_,\n gradq_exact_kl_normal_normal_,\n gradp_approx_kl_normal_normal_,\n gradq_approx_kl_normal_normal_,\n ] = sess.run([\n gradp(exact_kl_normal_normal),\n gradq(exact_kl_normal_normal),\n gradp(approx_kl_normal_normal),\n gradq(approx_kl_normal_normal),\n ])\n self.assertAllClose(gradp_exact_kl_normal_normal_,\n gradp_approx_kl_normal_normal_,\n rtol=0.01, atol=0.)\n self.assertAllClose(gradq_exact_kl_normal_normal_,\n gradq_approx_kl_normal_normal_,\n rtol=0.01, atol=0.)\n\n def test_docstring_example_gamma(self):\n with self.test_session() as sess:\n num_draws = int(1e5)\n concentration_p = constant_op.constant(1.)\n concentration_q = constant_op.constant(2.)\n p = gamma_lib.Gamma(concentration=concentration_p, rate=1.)\n q = gamma_lib.Gamma(concentration=concentration_q, rate=3.)\n approx_kl_gamma_gamma = monte_carlo_lib.expectation(\n f=lambda x: p.log_prob(x) - q.log_prob(x),\n samples=p.sample(num_draws, seed=42),\n log_prob=p.log_prob,\n use_reparametrization=(p.reparameterization_type\n == distribution_lib.FULLY_REPARAMETERIZED))\n exact_kl_gamma_gamma = kullback_leibler.kl_divergence(p, q)\n [exact_kl_gamma_gamma_, approx_kl_gamma_gamma_] = sess.run([\n exact_kl_gamma_gamma, approx_kl_gamma_gamma])\n self.assertEqual(\n False,\n p.reparameterization_type == distribution_lib.FULLY_REPARAMETERIZED)\n self.assertAllClose(exact_kl_gamma_gamma_, approx_kl_gamma_gamma_,\n rtol=0.01, atol=0.)\n\n # Compare gradients. (Not present in `docstring`.)\n gradp = lambda fp: gradients_impl.gradients(fp, concentration_p)[0]\n gradq = lambda fq: gradients_impl.gradients(fq, concentration_q)[0]\n [\n gradp_exact_kl_gamma_gamma_,\n gradq_exact_kl_gamma_gamma_,\n gradp_approx_kl_gamma_gamma_,\n gradq_approx_kl_gamma_gamma_,\n ] = sess.run([\n gradp(exact_kl_gamma_gamma),\n gradq(exact_kl_gamma_gamma),\n gradp(approx_kl_gamma_gamma),\n gradq(approx_kl_gamma_gamma),\n ])\n # Notice that variance (i.e., `rtol`) is higher when using score-trick.\n self.assertAllClose(gradp_exact_kl_gamma_gamma_,\n gradp_approx_kl_gamma_gamma_,\n rtol=0.05, atol=0.)\n self.assertAllClose(gradq_exact_kl_gamma_gamma_,\n gradq_approx_kl_gamma_gamma_,\n rtol=0.03, atol=0.)\n\n\nif __name__ == '__main__':\n test.main()\n","sub_path":"tensorflow_probability/python/tests/monte_carlo_test.py","file_name":"monte_carlo_test.py","file_ext":"py","file_size_in_byte":9207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"272629495","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\nfrom tools.text_search import Text\n\n\nclass LeptinShred_Index():\n\n def __init__(self, driver):\n self.driver = driver\n\n def leptin_index_clickhere(self):\n\n #click here button\n clickhereButton = self.driver.find_element(By.ID, \"video-btn\")\n\n print(\"============ Leptin Index Banned words Start ===============\")\n go = Text(self.driver)\n go.bannedWords(self.driver)\n print(\"====== Leptin Index banned words finish ===============\")\n clickhereButton.click()\n\n","sub_path":"automated_funnels/pages/index/leptinSherd_index.py","file_name":"leptinSherd_index.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"411739005","text":"# -----------------------------------------------------------------------\n# \n# Copyright (c) 2001-2013, MeVis Medical Solutions AG, Bremen, Germany\n# ALL RIGHTS RESERVED\n# \n# THIS FILE CONTAINS CONFIDENTIAL AND PROPRIETARY INFORMATION OF MEVIS \n# MEDICAL SOLUTIONS AG. ANY DUPLICATION, MODIFICATION, DISTRIBUTION, OR \n# DISCLOSURE IN ANY FORM, IN WHOLE, OR IN PART, IS STRICTLY PROHIBITED \n# WITHOUT THE PRIOR EXPRESS WRITTEN PERMISSION OF MEVIS MEDICAL SOLUTIONS \n# AG.\n# \n#----------------------------------------------------------------------------------\n#! Standalone Installer Wizard\n#!\n# \\file StandaloneInstallerWizard.py\n# \\author Florian Link\n# \\date 02/2005\n#\n#----------------------------------------------------------------------------------\n\nfrom mevis import *\n\n_step = 0\n\n_titles = [\n \"Welcome\",\n \"General Settings\",\n \"Manual File Lists\",\n \"Application Options\",\n \"Installer Options\"]\n\n_infos = [\n \"Start from an existing macro module\",\n \"General settings for your standalone application\",\n \"Specify additional files that will not be detected by the module dependency analyzer\",\n \"Additional settings for your application\",\n \"Additional installer settings which are not required\"]\n\n# --- Initialization\n\n# Initialize module\ndef InitModule():\n InitFields()\n UpdateCreateEnable()\n\n# Initialize field values\ndef InitFields():\n info = MLAB.priv().licenseInformation()\n if info[\"valid\"]:\n ctx.field(\"licensePath\").setStringValue(info[\"filename\"])\n ctx.field(\"availableMacros\").value = \",\".join(MLAB.allMacroModules())\n\ndef InitWindow():\n global _step\n\n _step = 0\n UpdateTab()\n\ndef ExitWindow():\n pass\n\n\n# --- Field updates\n\n# Update createEnable flag\ndef UpdateCreateEnable():\n moduleName = ctx.field(\"moduleName\").stringValue()\n targetPackage = ctx.field(\"ModuleWizardPackageSelector.valid\").value\n ctx.field(\"createEnable\").setBoolValue(bool(moduleName) and targetPackage)\n\n# Update nextEnable flag\ndef UpdateNextEnable():\n flag = True\n if _step == 0:\n moduleName = ctx.field(\"moduleName\").stringValue()\n targetPackage = ctx.field(\"ModuleWizardPackageSelector.valid\").value\n flag = moduleName and targetPackage\n if _step >= len(_titles)-1:\n flag = False\n ctx.field(\"nextEnable\").value = flag\n\n# Perform \"Next\"\ndef NextStep():\n global _step\n _step += 1\n UpdateTab()\n\n# Perform \"Prev\"\ndef PrevStep():\n global _step\n _step -= 1\n UpdateTab()\n\n# Update tab view item\ndef UpdateTab():\n ctx.controlDebug(\"tab\").selectTabAtIndex(_step)\n ctx.field(\"stepTitle\").value = _titles[_step]\n ctx.field(\"stepInfo\").value = _infos[_step]\n UpdateNextEnable()\n\ndef fv(field):\n return ctx.field(field).value\n\ndef ConvertImage(src, target, type):\n if MLABFileManager.exists(src):\n ok = MLABGraphic.convertImage(src, target)\n if not ok:\n MLAB.showWarning(\"Could not convert \" + type + \" file: \" + src)\n else:\n MLAB.showWarning(\"The \" + type + \" file does not exist: \" + src)\n \n# --- Code creation\n\n# Create code from template list\ndef CreateCode ():\n cmdline = []\n\n if not ctx.field(\"productName\").value:\n ctx.field(\"productName\").value = ctx.field(\"moduleName\").value\n \n iconFileWin32 = fv(\"iconFileWin32\")\n iconFileMac = fv(\"iconFileMac\")\n headerImageWin32 = fv(\"headerImageWin32\")\n dsstoreFileMac = fv(\"dsstoreFileMac\")\n headerImageMac = fv(\"headerImageMac\")\n splashFile = fv(\"splashScreenImage\")\n productName = fv(\"productName\")\n moduleName = fv(\"moduleName\")\n \n package1 = MLABPackageManager.packageByIdentifier(fv(\"packageIdentifier\"))\n if not package1:\n MLAB.logError(\"package \" + fv(\"packageIdentifier\") + \" not found!\")\n return\n targetDir = package1.path() + \"/Configuration/Installers/\" + productName\n ctx.field(\"targetDir\").value = targetDir\n \n modes = {}\n modes[\"MAXIMIZED\"] = \"-showmaximized\"\n modes[\"FULLSCREEN\"] = \"-showfullscreen\"\n modes[\"NORMAL\"] = \"\"\n cmdline.append(modes[fv(\"windowMode\")])\n\n splashTarget = \"\"\n ctx.field(\"copiedSplashScreenImage\").value = \"\"\n splashTarget = targetDir+\"/\"+productName+\"Splash.png\"\n if splashFile:\n ctx.field(\"copiedSplashScreenImage\").value = productName+\"Splash.png\"\n\n if ctx.field(\"diagnosisFlag\").value:\n cmdline.append(\"-diagnosis\")\n \n iconTarget = \"\"\n if iconFileWin32 or iconFileMac:\n iconTarget = targetDir+\"/\"+productName\n ctx.field(\"copiedIconFile\").value = productName\n else:\n ctx.field(\"copiedIconFile\").value = \"\"\n headerTargetWin32 = \"\"\n if headerImageWin32:\n headerTargetWin32 = targetDir+\"/\"+productName+\".bmp\"\n ctx.field(\"copiedHeaderImageWin32\").value = productName+\".bmp\"\n else:\n ctx.field(\"copiedHeaderImageWin32\").value = \"\"\n\n dsstoreTargetMac = \"\"\n if dsstoreFileMac:\n dsstoreTargetMac = targetDir+\"/\"+productName+\".DSStore\"\n ctx.field(\"copiedDSStoreFileMac\").value = productName+\".DSStore\"\n else:\n ctx.field(\"copiedDSStoreFileMac\").value = \"\"\n \n headerTargetMac = \"\"\n if headerImageMac:\n headerTargetMac = targetDir+\"/\"+productName+\".png\"\n ctx.field(\"copiedHeaderImageMac\").value = productName+\".png\"\n else:\n ctx.field(\"copiedHeaderImageMac\").value = \"\"\n\n ctx.field(\"cmdLineArgs\").value = \" \".join(cmdline)\n \n userFileSection1 = \"\"\n if ctx.field(\"assembleInstallerScript\").value:\n userFileSection1 += \"\\n# additional files/commands\\n\"\n userFileSection1 += ctx.field(\"assembleInstallerScript\").value + \"\\n\"\n ctx.field(\"userFileSection\").value = userFileSection1\n\n templateListPath = ctx.field(\"templateListPath\").stringValue()\n CreateCodeFromTemplateList(templateListPath)\n\n # Clean up temporary field\n ctx.field(\"userFileSection\").value = \"\"\n\n\n if iconTarget:\n if iconFileWin32:\n if iconFileWin32.endswith(\".ico\"):\n MLABFileManager.copy(iconFileWin32, iconTarget + \".ico\")\n else:\n ConvertImage(iconFileWin32, iconTarget + \".ico\", \"windows icon\")\n if iconFileMac:\n if iconFileMac.endswith(\".icns\"):\n MLABFileManager.copy(iconFileMac, iconTarget + \".icns\")\n else:\n ConvertImage(iconFileMac, iconTarget + \".icns\", \"mac icon\")\n\n if headerTargetWin32:\n ConvertImage(headerImageWin32, headerTargetWin32, \"windows header image\")\n if dsstoreTargetMac:\n MLABFileManager.copy(dsstoreFileMac, dsstoreTargetMac)\n if headerTargetMac:\n ConvertImage(headerImageMac, headerTargetMac, \"mac header image\")\n if splashFile:\n ConvertImage(splashFile, splashTarget, \"splash image\")\n \n txt = \"All configuration files for your installer have been generated at

\"\n txt += \"\"+ctx.field(\"targetDir\").value+\"

\"\n \n if not MLAB.isMacOS():\n txt += \"Starting the generated batch file \"+ctx.field(\"productName\").value+\".bat will create an installer file named \" + ctx.field(\"productName\").value + \".exe.

\"\n txt += \"You can now click the Create Installer button to run the batch file. \"\n txt += \"Alternatively, you can run the created batch file at a later time or inside of your build system.\"\n \n ctx.field(\"createCodeDialogText\").value = txt\n ctx.showWindow(\"CreateCodeDialog\")\n\ndef CheckModuleName():\n mod = ctx.field(\"moduleName\").value\n moduleInfo = MLAB.moduleInfo(mod)\n if \"type\" in moduleInfo:\n if moduleInfo[\"type\"] != \"MacroModule\":\n MLAB.showWarning(\"The module \"+mod+\" is not a MacroModule!\")\n else:\n MLAB.showWarning(\"The module \"+mod+\" does not exist!\")\n\ndef createInstaller():\n proc = MLAB.newProcess()\n proc.addArgument(MLABFileManager.getExecutable(\"ToolRunner\"))\n proc.addArgument(ctx.field(\"targetDir\").value + \"/\" + ctx.field(\"productName\").value + \".mlinstall\")\n proc.run()\n\ndef checkExternalTools():\n proc = MLAB.newProcess()\n proc.addArgument(MLABFileManager.getExecutable(\"ToolRunner\"))\n proc.addArgument(\"-toolcheck\")\n proc.run()\n\ndef browseOutputDirectory():\n MLAB.openFile(ctx.field(\"targetDir\").value)\n\ndef createApplicationLicense():\n proc = MLAB.newProcess()\n proc.addArgument(MLABFileManager.getExecutable(\"ApplicationLicenseManager\"))\n proc.addArgument(ctx.field(\"targetDir\").value + \"/\" + ctx.field(\"productName\").value + \".mlinstall\")\n proc.run()\n\ndef packageIdentifierChanged(field):\n ident = ctx.field(\"packageIdentifier\").value\n if not MLABPackageManager.packageByIdentifier(ident):\n return\n try:\n group, name = ident.split(\"/\", 1)\n except:\n group = \"\"\n name = \"\"\n if group.startswith(\"FME\"):\n ctx.field(\"licenseFileNeeded\").value = False\n ctx.field(\"defaultStandaloneSetupInclude\").value = \"$(MLAB_FMEwork_General)/Configuration/Installers/Shared/Standalone/defaultFraunhoferMEVISStandaloneSetup.mli\"\n else:\n ctx.field(\"licenseFileNeeded\").value = True\n ctx.field(\"defaultStandaloneSetupInclude\").value = \"$(MLAB_MeVisLab_IDE)/Configuration/Installers/Shared/Standalone/defaultStandaloneSetup.mli\"\n\ndef setMeVisStandaloneLicenseDefault():\n lic = MLAB.variable(\"MLAB_MeVis_Foundation\")+\"/Configuration/Installers/Shared/Core/Resources/MeVisStandaloneApplicationLicense.dat\"\n ctx.field(\"licensePath\").value = lic\n#//# MeVis signature v1\n#//# key: MFowDQYJKoZIhvcNAQEBBQADSQAwRgJBANEfsmYse2e1dRhkQ9AQbreCq9uxwzWLoGom13MNYmyfwoJqQOEXljLFAgw2eEjaT12G4CdqKWhRxh9ANP6n7GMCARE=:VI/mB8bT4u+mRtf/ru8yUQi8BzpaS3UeL2x62YxsUYnVqCWuLrVNLiukIIjnJMKQXlc8ezmgOIcVAV7pgvgKpQ==\n#//# owner: MeVis\n#//# date: 2013-04-10T22:33:03\n#//# hash: RqfeNFYoJy5aAgj+ZegDU2rU3Qwb3pKc2JfPEQNKz8A7Myor+vzZ0ZRnkhN1phSEXhW/DLv3tcwONdVpp1rhgg==\n#//# MeVis end\n","sub_path":"django/static/js/MeVisLab/Private/Modules/Macros/ADK/Wizards/StandaloneInstaller/StandaloneInstallerWizard.py","file_name":"StandaloneInstallerWizard.py","file_ext":"py","file_size_in_byte":9454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"298127549","text":"import copy\nimport random\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\n\nfrom model import Actor, Critic\n\nSEED=13\nLR_SCHED_STEP=1000\nLR_SCHED_GAMMA=0.99\nACTOR_LR=3e-3\nCRITIC_LR=4e-4\nTAU=8e-3\nOU_NOISE_THETA=0.9\nOU_NOISE_SIGMA=0.01\n\nclass Agent():\n def __init__(self, num_agents, state_size, action_size):\n random.seed(SEED)\n\n # Configs\n self.state_size = state_size\n self.action_size = action_size\n\n # Actor Network\n self.actor = Actor(state_size, action_size, fc1_units=128, fc2_units=64, seed=SEED)\n self.actor_target = Actor(state_size, action_size, fc1_units=128, fc2_units=64, seed=SEED)\n self.soft_update(self.actor, self.actor_target, 1)\n\n # Critic Network\n self.critic = Critic(state_size, action_size, num_agents, fc1_units=128, fc2_units=64, seed=SEED)\n self.critic_target = Critic(state_size, action_size, num_agents, fc1_units=128, fc2_units=64, seed=SEED)\n self.soft_update(self.critic, self.critic_target, 1)\n\n # Optimizer\n self.actor_optimizer = optim.Adam(self.actor.parameters(), lr=ACTOR_LR)\n self.actor_lr_scheduler = optim.lr_scheduler.StepLR(self.actor_optimizer, step_size=LR_SCHED_STEP, gamma=LR_SCHED_GAMMA)\n self.critic_optimizer = optim.Adam(self.critic.parameters(), lr=CRITIC_LR)\n self.critic_lr_scheduler = optim.lr_scheduler.StepLR(self.critic_optimizer, step_size=LR_SCHED_STEP, gamma=LR_SCHED_GAMMA)\n\n # Initialize a noise process\n self.noise = OUNoise(action_size)\n\n def soft_update(self, local_model, target_model, tau=TAU):\n \"\"\"Soft update model parameters.\n θ_target = τ*θ_local + (1 - τ)*θ_target\n\n Params\n ======\n local_model (PyTorch model): weights will be copied from\n target_model (PyTorch model): weights will be copied to\n tau (float): interpolation parameter \n \"\"\"\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)\n\n\n def act(self, state):\n with torch.no_grad():\n self.actor.eval()\n state = torch.from_numpy(state).float()\n action = self.actor(state).data.cpu().numpy()\n self.actor.train()\n\n action += self.noise.sample()\n np.clip(action, a_min=-1, a_max=1, out=action)\n\n return action\n\n def lr_step(self):\n self.actor_lr_scheduler.step()\n self.critic_lr_scheduler.step()\n\n def reset_noise(self):\n self.noise.reset()\n\nclass OUNoise:\n \"\"\"Ornstein-Uhlenbeck process.\"\"\"\n def __init__(self, action_size, mu=0.):\n \"\"\"Initialize parameters and noise process.\"\"\"\n random.seed(SEED)\n self.mu = mu * np.ones(action_size)\n self.reset()\n\n def reset(self):\n \"\"\"Reset the internal state (= noise) to mean (mu).\"\"\"\n self.state = copy.copy(self.mu)\n\n def sample(self):\n \"\"\"Update internal state and return it as a noise sample.\"\"\"\n x = self.state\n random_array = [random.random() for i in range(len(x))]\n dx = OU_NOISE_THETA * (self.mu - x) + OU_NOISE_SIGMA * np.array(random_array)\n self.state = x + dx\n return self.state\n","sub_path":"ddpg_agent.py","file_name":"ddpg_agent.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"122162087","text":"#!/usr/bin/env python3\n\n#----\nimport sys\nsys.path.append('/home/amigos/ros/src/necst/lib')\nimport n2df\nimport numpy\nimport matplotlib.pyplot as plt\nimport pickle\n#from astropy.io import fits\nfrom scipy.optimize import curve_fit\n\n#-----\ndef f(x, a, b, c):\n return a*x**2 + b*x + c\n\ndef gaussian(x, a, mu, gamma):\n return a * numpy.exp(- gamma * (x - mu) **2)\n\ndef calc_integdata(IF, data_list, mode_list, lam, bet, scan_list, mi, ma, width, integ_mi, integ_ma):\n\n data_list = data_list[IF-1]\n mode_list = mode_list[IF-1]\n lam = lam[IF-1]\n bet = bet[IF-1]\n scan_list = scan_list[IF-1]\n \n xmask = []\n ymask = []\n hotmask = []\n offmask = []\n for i in range(len(mode_list)): \n if mode_list[i] == 'HOT':\n xmask.append(0)\n ymask.append(0)\n hotmask.append(1)\n offmask.append(0)\n elif mode_list[i] == 'OFF':\n xmask.append(0)\n ymask.append(0)\n hotmask.append(0)\n offmask.append(1)\n elif scan_list[i] == 1 and mode_list[i] == 'ON':\n xmask.append(1)\n ymask.append(0)\n hotmask.append(0)\n offmask.append(0)\n elif scan_list[i] == 2 and mode_list[i] == 'ON':\n xmask.append(0)\n ymask.append(1)\n hotmask.append(0)\n offmask.append(0)\n \n# calc Ta*\n\n tmp = []\n for i in range(len(hotmask)):\n if hotmask[i] == 1:\n if len(tmp) == 0:\n for j in range(i+1):\n tmp.append(data_list[i])\n else:\n tmp.append(data_list[i])\n else:\n if len(tmp) == 0:\n pass\n else:\n tmp.append(tmp[-1])\n HOTlist = numpy.array(tmp)\n \n tmp = []\n for i in range(len(offmask)):\n if offmask[i] == 1:\n if len(tmp) == 0:\n for j in range(i+1):\n tmp.append(data_list[i])\n else:\n tmp.append(data_list[i])\n else:\n if len(tmp) == 0:\n pass\n else:\n tmp.append(tmp[-1])\n OFFlist = numpy.array(tmp)\n \n ONlist = numpy.array(data_list)\n \n Taslist = (ONlist - OFFlist)/(HOTlist - OFFlist) * 300\n \n x = numpy.linspace(0, 32768, 32768)\n\n rTaslist_tmp = []\n rtmp = []\n for i in range(len(Taslist)):\n base = []\n start = int(numpy.argmax(Taslist[i][int(mi):int(ma)]) + (mi - width))\n end = int(numpy.argmax(Taslist[i][int(mi):int(ma)]) + (mi + width))\n dif = end - start\n base.extend(Taslist[i])\n base[start:end] = []\n param = numpy.polyfit(x[:32768-dif], base, 2)\n rTas = Taslist[i] - f(x, *param)\n rTaslist_tmp.append(rTas)\n rTaslist = numpy.array(rTaslist_tmp)\n \n# create data for plot\n xscan_Ta = []\n xscan_x = []\n xscan_y = []\n \n yscan_Ta = []\n yscan_x = []\n yscan_y = []\n\n for i in range(len(xmask)):\n if xmask[i] == 1:\n xscan_Ta.append(rTaslist[i])\n xscan_x.append(lam[i])\n xscan_y.append(bet[i])\n else:\n pass\n\n for i in range(len(ymask)):\n if ymask[i] == 1:\n yscan_Ta.append(rTaslist[i])\n yscan_x.append(lam[i])\n yscan_y.append(bet[i])\n else:\n pass\n\n # TA* integration\n xscan_integ = []\n yscan_integ = []\n for i in range(len(xscan_Ta)):\n lx = xscan_Ta[i]\n xscan_integ.append(numpy.sum(lx[int(integ_mi):int(integ_ma)]))\n\n for i in range(len(yscan_Ta)):\n ly = yscan_Ta[i]\n yscan_integ.append(numpy.sum(ly[int(integ_mi):int(integ_ma)]))\n\n return xscan_integ, xscan_x, xscan_y, yscan_integ, yscan_x, yscan_y, xscan_Ta, yscan_Ta\n\n\npara_init = numpy.array([25000., 0.1, 0.0001])\n\n #-----\ndef analysis(file_name, mi=10000, ma=30000, width=1000, integ_mi=15500, integ_ma=17500, plot=True, savefig=True, savepath_filename='/home/amigos/latest_obs/pointing_line.png'):\n# open file\n\n n = n2df.Read(file_name) \n _n = n.read_all()\n d = []\n for i in range(25):\n _d = []\n for j in range(len(_n)):\n _d.append(_n[j][i])\n d.append(_d)\n \n# define axis \n time = d[0]\n mode = d[21]\n mode = list(map(lambda x:x.decode() ,mode))\n subscan = d[22]\n _lam = d[23]\n _bet = d[24]\n\n# get integdata / mask\n data_list = []\n mode_list = []\n scan_list = []\n lam = []\n bet = []\n \n for h in range(20):\n d_ = d[h+1]\n d_list = []\n m_list = []\n s_list = []\n la_list = []\n be_list = []\n tmp = numpy.zeros(32768)\n for i in range(len(d_)):\n if subscan[i] == 1 and mode[i] == 'ON':\n tmp += d_[i]\n if subscan[i+1] == 2 or mode[i+1] == 'OFF' or mode[i+1] == 'HOT':\n d_list.append(tmp)\n m_list.append('ON')\n la_list.append(_lam[i])\n be_list.append(_bet[i])\n s_list.append(1)\n tmp = numpy.zeros(32768)\n else:\n pass\n elif subscan[i] == 2 and mode[i] == 'ON':\n tmp += d_[i]\n if subscan[i+1] == 1 or mode[i+1] == 'OFF' or mode[i+1] == 'HOT':\n d_list.append(tmp)\n m_list.append('ON')\n la_list.append(_lam[i])\n be_list.append(_bet[i])\n s_list.append(2)\n tmp = numpy.zeros(32768)\n else:\n pass\n elif mode[i] == 'OFF':\n tmp += d_[i]\n if mode[i+1] == 'ON' or mode[i+1] == 'HOT':\n d_list.append(tmp)\n m_list.append('OFF')\n la_list.append(0)\n be_list.append(0)\n if subscan[i] == 1:\n s_list.append(1)\n else:\n s_list.append(2)\n tmp = numpy.zeros(32768)\n else:\n pass \n elif mode[i] == 'HOT':\n tmp += d_[i]\n if i == len(d_)-1:\n d_list.append(tmp)\n m_list.append('HOT')\n la_list.append(0)\n be_list.append(0)\n if subscan[i] == 1:\n s_list.append(1)\n else:\n s_list.append(2)\n tmp = numpy.zeros(32768)\n else:\n if mode[i+1] == 'ON' or mode[i+1] == 'OFF':\n d_list.append(tmp)\n m_list.append('HOT')\n la_list.append(0)\n be_list.append(0)\n if subscan[i] == 1:\n s_list.append(1)\n else:\n s_list.append(2)\n tmp = numpy.zeros(32768)\n else:\n pass\n else:\n print(\"check\")\n data_list.append(d_list)\n mode_list.append(m_list)\n lam.append(la_list)\n bet.append(be_list)\n scan_list.append(s_list)\n \n ret1 = calc_integdata(1, data_list, mode_list, lam, bet, scan_list, mi, ma, width, integ_mi, integ_ma)\n\n xscan_integ = ret1[0]\n xscan_x = ret1[1]\n xscan_y = ret1[2]\n yscan_integ = ret1[3]\n yscan_x = ret1[4]\n yscan_y = ret1[5]\n xscan_Ta = ret1[6]\n yscan_Ta = ret1[7]\n\n\n# Gaussian Fitting function\n# Az fitting\n try:\n popt_az, pcov_az = curve_fit(gaussian, xscan_x, xscan_integ, p0 = para_init, maxfev=10000)\n error_az = numpy.sqrt(numpy.diag(pcov_az))\n\n x_g = numpy.linspace(xscan_x[0], xscan_x[-1], 1001)\n gaus_az = gaussian(x_g, popt_az[0], popt_az[1], popt_az[2])\n\n# El fitting\n popt_el, pcov_el = curve_fit(gaussian, yscan_y, yscan_integ, p0 = para_init, maxfev=10000)\n error_el = numpy.sqrt(numpy.diag(pcov_el))\n\n gaus_el = gaussian(x_g, popt_el[0], popt_el[1], popt_el[2])\n\n\n# dAz dEl\n dAz = popt_az[1]\n dEl = popt_el[1]\n hpbw_az = 1/numpy.sqrt(2*popt_az[2]) *2.35\n hpbw_el = 1/numpy.sqrt(2*popt_el[2]) *2.35\n\n\n# plot\n\n fig = plt.figure(figsize = (15, 5))\n\n axlist = [fig.add_subplot(1,2,i+1) for i in range(2)]\n\n axlist[0].plot(xscan_x, xscan_integ, \"o\")\n axlist[0].errorbar(xscan_x, xscan_integ, yerr = error_az[0], fmt = \"b+\")\n axlist[0].plot(x_g, gaus_az)\n axlist[0].set_xlabel(\"dAz [arcsec]\")\n axlist[0].set_ylabel(\"Ta* [K]\")\n\n axlist[1].plot(yscan_y, yscan_integ, \"o\")\n axlist[1].errorbar(yscan_y, yscan_integ, yerr = error_el[0], fmt = \"b+\")\n axlist[1].plot(x_g, gaus_el)\n axlist[1].set_xlabel(\"dEl [arcsec]\")\n axlist[1].set_ylabel(\"Ta* [K]\")\n\n [a.grid() for a in axlist]\n\n\n fig2 = plt.figure(figsize = (20,20))\n \n index_max = numpy.argmax(xscan_Ta[2][4000:12000]) + 4000 \n\n lim_mi = int(index_max - 800)\n lim_ma = int(index_max + 800)\n\n axlist = [fig2.add_subplot(5,5,i+1) for i in range(25)]\n\n axlist[2].plot(yscan_Ta[0])\n axlist[2].set_title(\"(0, 60)\")\n axlist[2].set_xlim(lim_mi, lim_ma)\n axlist[2].set_ylim(-10,50)\n\n axlist[7].plot(yscan_Ta[1])\n axlist[7].set_title(\"(0, 30)\")\n axlist[7].set_xlim(lim_mi, lim_ma)\n axlist[7].set_ylim(-10,50)\n\n axlist[10].plot(xscan_Ta[0])\n axlist[10].set_title(\"(-60, 0)\")\n axlist[10].set_xlim(lim_mi, lim_ma)\n axlist[10].set_ylim(-10,50)\n\n axlist[11].plot(xscan_Ta[1])\n axlist[11].set_title(\"(-30, 0)\")\n axlist[11].set_xlim(lim_mi, lim_ma)\n axlist[11].set_ylim(-10,50)\n\n# axlist[12].plot(xscan_Ta[2])\n axlist[12].plot(yscan_Ta[2])\n axlist[12].set_title(\"(0, 0)\")\n axlist[12].set_xlim(lim_mi, lim_ma)\n axlist[12].set_ylim(-10,50)\n\n axlist[13].plot(xscan_Ta[3])\n axlist[13].set_title(\"(30, 0)\")\n axlist[13].set_xlim(lim_mi, lim_ma)\n axlist[13].set_ylim(-10,50)\n\n axlist[14].plot(xscan_Ta[4])\n axlist[14].set_title(\"(60, 0)\")\n axlist[14].set_xlim(lim_mi, lim_ma)\n axlist[14].set_ylim(-10,50)\n\n axlist[17].plot(yscan_Ta[3])\n axlist[17].set_title(\"(0, -30)\")\n axlist[17].set_xlim(lim_mi, lim_ma)\n axlist[17].set_ylim(-10,50)\n\n axlist[22].plot(yscan_Ta[4])\n axlist[22].set_title(\"(0, -60)\")\n axlist[22].set_xlim(lim_mi, lim_ma)\n axlist[22].set_ylim(-10,50)\n\n axlist[0].set_visible(False)\n axlist[1].set_visible(False)\n axlist[3].set_visible(False)\n axlist[4].set_visible(False)\n axlist[5].set_visible(False)\n axlist[6].set_visible(False)\n axlist[8].set_visible(False)\n axlist[9].set_visible(False)\n axlist[15].set_visible(False)\n axlist[16].set_visible(False)\n axlist[18].set_visible(False)\n axlist[19].set_visible(False)\n axlist[20].set_visible(False)\n axlist[21].set_visible(False)\n axlist[23].set_visible(False)\n axlist[24].set_visible(False)\n\n [a.grid() for a in axlist]\n\n plt.axes([0.625,0.25, 0.25, 0.1])\n plt.axis(\"off\")\n #plt.text(0, 0.5, \"OBJECT : {}\".format(hdu[1].data[\"OBJECT\"][0]), fontsize=10)\n plt.text(0,0,\"dAz = {}\".format(round(dAz, 2)) + \" dEl = {}\".format(round(dEl, 2)) + \" (arcsec)\", fontsize = 10)\n plt.text(0,-0.5,\"HPBW_AZ = {}\".format(round(hpbw_az, 2)) + \" HPBW_EL = {}\".format(round(hpbw_el, 2)), fontsize = 10)\n plt.text(0, -1.0, \"DATA PATH : {}\".format(file_name), fontsize=6)\n\n except Exception as e:\n print(\"\\033[31m[ERROR OCCURRED]\\033[0m\\n\", e)\n \n # same as above\n fig2 = plt.figure(figsize = (20,20))\n \n axlist = [fig2.add_subplot(5,5,i+1) for i in range(25)]\n\n axlist[2].plot(yscan_Ta[0])\n axlist[2].set_title(\"(0, 60)\")\n\n axlist[7].plot(yscan_Ta[1])\n axlist[7].set_title(\"(0, 30)\")\n\n axlist[10].plot(xscan_Ta[0])\n axlist[10].set_title(\"(-60, 0)\")\n\n axlist[11].plot(xscan_Ta[1])\n axlist[11].set_title(\"(-30, 0)\")\n\n# axlist[12].plot(xscan_Ta[2])\n axlist[12].plot(yscan_Ta[2])\n axlist[12].set_title(\"(0, 0)\")\n\n axlist[13].plot(xscan_Ta[3])\n axlist[13].set_title(\"(30, 0)\")\n\n axlist[14].plot(xscan_Ta[4])\n axlist[14].set_title(\"(60, 0)\")\n\n axlist[17].plot(yscan_Ta[3])\n axlist[17].set_title(\"(0, -30)\")\n\n axlist[22].plot(yscan_Ta[4])\n axlist[22].set_title(\"(0, -60)\")\n\n axlist[0].set_visible(False)\n axlist[1].set_visible(False)\n axlist[3].set_visible(False)\n axlist[4].set_visible(False)\n axlist[5].set_visible(False)\n axlist[6].set_visible(False)\n axlist[8].set_visible(False)\n axlist[9].set_visible(False)\n axlist[15].set_visible(False)\n axlist[16].set_visible(False)\n axlist[18].set_visible(False)\n axlist[19].set_visible(False)\n axlist[20].set_visible(False)\n axlist[21].set_visible(False)\n axlist[23].set_visible(False)\n axlist[24].set_visible(False)\n\n plt.axes([0.625,0.25, 0.25, 0.1])\n plt.axis(\"off\")\n plt.text(0, 0.5, \"ERROR OCCURRED\", fontsize=10)\n #plt.text(0, 0, \"OBJECT : {}\".format(hdu[1].data[\"OBJECT\"][0]), fontsize=10)\n plt.text(0, -0.5, \"DATA PATH : {}\".format(file_name), fontsize=6)\n\n [a.grid() for a in axlist]\n\n finally:\n if savefig:\n plt.savefig(savepath_filename)\n if plot:\n plt.show()\n else:\n pass\n return\n\nif __name__ == \"__main__\":\n args = sys.argv\n if len(args) < 2:\n print(\"You must specify data_file\")\n sys.exit()\n\n file_name = args[1]\n# option\n# for baseline fitting to avoid spurious \n mi = int(5000)\n ma = int(15000) \n width = int(500)\n# integration range\n integ_mi = int(8000)\n integ_ma = int(9000)\n# specify option\n if len(args) == 7:\n # for baseline fitting to avoid spurious\n if args[2] != \"DEF\":\n mi = int(args[2])\n if args[3] != \"DEF\":\n ma = int(args[3])\n if args[4] != \"DEF\":\n width = int(args[4])\n# integration range\n if args[5] != \"DEF\":\n integ_mi = int(args[5])\n if args[6] != \"DEF\":\n integ_ma = int(args[6])\n else: pass\n \n analysis(file_name, mi, ma, width, integ_mi, integ_ma)\n","sub_path":"lib/pointing_line_xffts.py","file_name":"pointing_line_xffts.py","file_ext":"py","file_size_in_byte":14758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"221943478","text":"import hashlib\nimport time\n\ndef get_proof(header, nonce):\n\n preimage = f\"{header}:{nonce}\".encode()\n proof_hex = hashlib.sha256(preimage).hexdigest()\n return int(proof_hex, 16)\n\ndef mine(header, target, nonce):\n while get_proof(header, nonce) >= target:\n nonce = nonce + 1 # new guess\n return nonce\n\ndef mining_demo(header):\n nonce = previous_nonce = -1\n for difficulty_bits in range(1,30):\n target = 2 ** (256 - difficulty_bits)\n start_time = time.time()\n nonce = mine(header, target, previous_nonce)\n elapsed_time = time.time() - start_time\n minutes = elapsed_time // 60\n seconds = elapsed_time - (minutes * 60) // 1 \n proof = get_proof(header, nonce)\n\n target_str = f'{target:.0e}'\n elapsed_time_str = f'{elapsed_time:.0e}' if nonce != previous_nonce else ''\n bin_proof_str = f'{proof:0256b}'[:50]\n\n # print(f'bits: {difficulty_bits}, target: {target_str}, elapsed time: {int(minutes):02d}:{int(seconds):02d}, nonce: {nonce}, proof: {proof}')\n print(f'bits: {difficulty_bits:>3}, target: {target_str:>7}, elapsed time: {elapsed_time_str:>7}, nonce: {nonce:>10}, proof: {bin_proof_str}...')\n \n previous_nonce = nonce\nif __name__ == \"__main__\":\n header = \"hello\"\n # number of leading zeros we require\n # difficulty_bits = 25\n # target = 2 ** (256 - difficulty_bits)\n # nonce = mine(header, target)\n # print(nonce)\n # print(f'4 bits of proof? {str(proof < target):5} : {proof:#066x}')\n mining_demo(header)\n","sub_path":"powcoin/my_mining_demo.py","file_name":"my_mining_demo.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"538109229","text":"from metalearn import Metafeatures\nfrom DataExtraction import DataSetExtraction as DSE\nfrom DataExplorationMethods import information_theoretic_metafeatures_separate as ITM, common_operations as CO\nimport pandas as pd\nimport numpy as np\nimport os\nimport json\n\nX, y, features = DSE.import_example_data('Hepatitis')\n\nmissing_values = ''\n\ndfX = pd.DataFrame(X, columns=features)\n\ndfX = dfX.replace(missing_values, np.NaN)\n\ntype = {}\nfor header in list(dfX):\n X_f = dfX[header].values\n cleaned_X_f = np.delete(X_f, np.argwhere(X_f == missing_values))\n try:\n cleaned_X_f.astype(float)\n type[header] = 'NUMERICAL'\n dfX[header] = pd.to_numeric(dfX[header], errors='coerce')\n except:\n type[header] = \"CATEGORICAL\"\n dfX[header] = dfX[header].astype('category')\n\n#dfX = dfX.apply(pd.Categorical, errors='ignore')\n\ndfy = pd.Series(y, dtype='category')\ndfy.name = \"Output\"\n\nmf = Metafeatures()\n\nnoNaN_cat_feat, = mf._get_categorical_features_with_no_missing_values(dfX, column_types=type)\n\nentropies = ITM.get_separate_attribute_entropy(noNaN_cat_feat)\nbest_worst_entropies, locations = CO.return_most_important_attribute_entropies(entropies, return_end='Both', return_number=3)\nprint(best_worst_entropies)\nprint(locations)","sub_path":"Testing/DataExploration/TestAdditionalMethods.py","file_name":"TestAdditionalMethods.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"115924442","text":"#!/usr/bin/env python\n# -*- encoding: utf-8\n\nimport datetime as dt\nimport json\nfrom urllib.parse import urlparse, urlencode, urlunparse\n\n\naction_steps = [\n {\n 'actionStepType': 'Script',\n 'scriptText': open('prepare_entry.js').read(),\n },\n {\n 'fileTemplate': '[[draft]]',\n 'fileExtTemplate': 'json',\n 'fileNameTemplate': '[[uuid]]',\n 'folderTemplate': '/spending/[[date|%Y]]/[[date|%m]]/[[date|%d]]',\n 'writeType': 'create',\n 'actionStepType': 'Dropbox',\n },\n]\n\n\nparts = [\n 'x-drafts4',\n 'x-callback-url',\n '/import_action',\n\n # params\n '',\n\n urlencode({\n 'actionSteps': [json.dumps(action_steps)],\n 'shouldConfirm': ['0'],\n 'uuid': ['5F62BCA9-A01D-4804-82D3-2DF126ACED8E'],\n 'logLevel': ['1'],\n 'name': ['Record spending'],\n 'tintColor': [\n json.dumps([0.27500000596046448, 0.75700002908706665, 0.21600000560283661])\n ],\n 'modifiedAt': [dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S +0000')],\n 'disposition': ['2'],\n 'v': ['2'],\n 'iconImageName': ['454-pounds2'],\n 'description': ['Take a spending entry, convert it into a JSON file, and save it to Dropbox.']\n }),\n\n # fragment\n '',\n]\n\nprint(urlunparse(parts))\n","sub_path":"spending-tracker/create_callback_url.py","file_name":"create_callback_url.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"231209263","text":"\n\n\"\"\"\n A simple script to train a small lstm network on memory addresses but performs regression instead of\n classification.\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import division\nimport torch\nimport torch.nn as nn\nimport torch.optim as optimizers\nimport torch.nn.functional as F\nimport os\nimport pickle\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import*\nfrom sklearn import preprocessing\n\n\nclass NN(nn.Module):\n \"\"\"\n Our class defines MLP and LSTM models to test our data learning\n \"\"\"\n def __init__(self, input_dims):\n super(NN, self).__init__()\n self.input_dims = input_dims\n # define MLP\n size = 8\n self.MLP = nn.Sequential(\n nn.Linear(input_dims, size),\n nn.Softmax(),\n nn.Linear(size, size),\n nn.Softmax(),\n nn.Linear(size, size),\n nn.Softmax(),\n # nn.Linear(size, size),\n nn.Sigmoid(),\n nn.Linear(size, 1)\n # nn.LogSoftmax()\n )\n\n class LSTM(nn.Module):\n \"\"\"\n This is our lstm class\n \"\"\"\n def __init__(self, input_size, seq_length, hidden_size):\n super(LSTM, self).__init__()\n self.sequence_len = seq_length\n self.hidden_size = hidden_size\n self.lstm = nn.LSTM(input_size, hidden_size, num_layers=1, batch_first=True)\n self.fc_1 = nn.Linear(hidden_size, 8)\n self.fc_2 = nn.Linear(8, 1)\n\n def forward(self, input):\n # input = input.view(input.size()[0], 1, -1)\n h0 = torch.zeros(1, input.size()[0], self.hidden_size) # (num_of_lstm_layers, batch_size, input_size)\n c0 = torch.zeros(1, input.size()[0], self.hidden_size)\n output, hidden = self.lstm(input, (h0, c0))\n output = self.fc_2(F.relu(self.fc_1(output))).view(-1, 1)\n return output\n\n # instantiate lstm model with one layer of 8 hidden units\n self.my_lstm = LSTM(input_size=3, seq_length=1, hidden_size=8)\n\n\n def train_net(self, model, train_data, train_labels,\n test_data, test_labels, epochs,\n batch_size, learn_rate):\n # set it in training mode, and do some data conversion for pytorch\n self.train()\n train_data = torch.Tensor(train_data).float()\n train_labels = torch.Tensor(train_labels).float()\n optimizer = optimizers.Adam(self.parameters(), lr=learn_rate)\n criterion = nn.MSELoss()\n # training loop for #N epochs\n for e in range(1, epochs+1):\n epoch_loss = 0.0\n epoch_acc = 0.0\n for i in range(train_data.size()[0]//batch_size):\n # pick random *batch_size* # of examples to train on...\n indices = np.random.choice(train_data.size()[0], batch_size, replace=False)\n batch_data = train_data[indices,:]\n batch_labels = train_labels[indices]\n # print(batch_data.size())\n output = model(batch_data)*1000000\n batch_labels = batch_labels*1000000\n # print(output[90].int(), batch_labels[90].int())\n # prediction = output.long() #output.max(dim=1, keepdim=True)[1]\n epoch_acc += (batch_labels.int() == output.int()).sum().item()\n loss = criterion(output, batch_labels)\n epoch_loss += loss.item()\n loss.backward()\n optimizer.step()\n\n # zero out the gradients saved previously\n optimizer.zero_grad()\n self.zero_grad()\n print('epoch ({}/{}), batch_loss = {:.2f}, batch_acc = {:.2f}%'.format(e, epochs,\n epoch_loss/train_data.size()[0],\n epoch_acc*100.0/train_data.size()[0]))\n # print('log: saving model now...')\n # torch.save(self.state_dict(), 'models/model-{}.ckpt'.format(e))\n print('\\n testing now... \\n')\n return self.test_model(model=model, test_examples=test_data, labels=test_labels)\n\n def test_model(self, model, test_examples, labels):\n # check performance on test set\n self.eval() # set in eval mode\n test_examples = torch.Tensor(test_examples).float()\n labels = torch.Tensor(labels).float()\n print('testing on {} examples...'.format(test_examples.size()[0]))\n output = model(test_examples)\n # prediction = output.max(dim=1, keepdim=True)[1]\n # accurate = prediction.eq(labels.view_as(prediction)).sum().item()\n accurate = (labels.int().float() == output.float()).sum().item()\n print('Total test accuracy = {:.2f}%'.format(accurate*100/(test_examples.size()[0])))\n return output\n\n\ndef plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n return plt\n\n\ndef label_to_idx(labels):\n pass\n\n\ndef main():\n # load data files\n data_dir = 'dataset_regressionlstm'\n train_data_file = open(os.path.join(data_dir, 'train_delayValues_data.pkl'), 'rb')\n train_labels_file = open(os.path.join(data_dir, 'train_delayValues_label.pkl'), 'rb')\n train_data = np.asarray(pickle.load(train_data_file))\n train_labels = np.asarray(pickle.load(train_labels_file)) #/ 1000000.0\n test_data_file = open(os.path.join(data_dir, 'test_delayValues_data.pkl'), 'rb')\n test_labels_file = open(os.path.join(data_dir, 'test_delayValues_label.pkl'), 'rb')\n test_data = np.asarray(pickle.load(test_data_file))\n test_labels = np.asarray(pickle.load(test_labels_file)) #/ 10000000.0\n\n # preprocess them for easing the training...\n # train_data = preprocessing.scale(train_data)\n # test_data = preprocessing.scale(test_data)\n # train_labels = preprocessing.scale(train_labels)\n # test_labels = preprocessing.scale(test_labels)\n # print(train_labels)\n\n train_data_for_lstm = train_data.reshape((-1, 1, 3))\n test_data_for_lstm = test_data.reshape((-1, 1, 3))\n\n # reshape for one column\n train_labels = np.reshape(train_labels, newshape=(len(train_labels), 1))\n test_labels = np.reshape(test_labels, newshape=(len(test_labels), 1))\n\n print('3d training data, 1d labels: ', train_data.shape, train_labels.shape)\n print('3d test data, 1d labels: ', test_data.shape, test_labels.shape)\n\n # create model and train\n net = NN(input_dims=3)\n\n ###############################################################################################3\n # the first two lines are mlp implementation, the next two run the lstm\n # test mlp\n predictions = net.train_net(net.MLP, train_data, train_labels, test_data, test_labels,\n epochs=10000, batch_size=1024, learn_rate=0.001)\n\n # test lstm\n # predictions = net.train_net(net.my_lstm, train_data_for_lstm, train_labels, test_data_for_lstm,\n # test_labels, epochs=50, batch_size=1024, learn_rate=0.1)\n # print(predictions)\n ##############################################################################################3\n\n # conf_matrix = confusion_matrix(test_labels, predictions)\n # classes = ['page_hit', \"page_miss\"]\n # plot_confusion_matrix(conf_matrix, classes, normalize=True)\n # plt.title('Confusion Matrix')\n # plt.show()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n\n","sub_path":"pytorch/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":8600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"563665695","text":"#coding:utf-8\nimport unittest\nfrom config.config import config\nfrom time import sleep\nfrom case.public.invoiceState import invoiceState\nfrom case.public.oprateTime import oprationTime\nfrom case.public.pageDisplay import pageDisplay\nfrom case.public.numberDisplay import numberDisplay\n\ndriver = config.driver\nclass incomeRechord(unittest.TestCase):\n\n def setUp(self):\n sleep(1)\n\n def tearDown(self):\n sleep(1)\n\n def test026_incomeRecord(self):\n u\"\"\"收入记录\"\"\"\n driver.find_element_by_link_text(u\"收入记录\").click()\n sleep(5)\n\n def test027_cardID(self):\n u\"\"\"按卡号查询\"\"\"\n driver.find_element_by_id(\"cardCode\").send_keys(\"200100008896\")\n sleep(1)\n driver.find_elements_by_class_name(\"button\")[1].click()\n sleep(5)\n driver.find_element_by_id(\"cardCode\").clear()\n sleep(1)\n driver.find_elements_by_class_name(\"button\")[1].click()\n sleep(5)\n\n def test028_identificationNu(self):\n u\"\"\"纳税人识别号\"\"\"\n driver.find_element_by_id(\"taxpayerIdentificationNumber\").send_keys(\"911101083180097938\")\n sleep(1)\n driver.find_elements_by_class_name(\"button\")[1].click()\n sleep(5)\n driver.find_element_by_id(\"taxpayerIdentificationNumber\").clear()\n sleep(1)\n driver.find_elements_by_class_name(\"button\")[1].click()\n sleep(5)\n\n def test029_invoiceState(self):\n u\"\"\"按发票状态查询\"\"\"\n iv = invoiceState()\n iv.invoiceState()\n\n def test030_operateTime(self):\n u\"\"\"按操作时间查询\"\"\"\n op = oprationTime()\n op.oprationTime()\n\n def test031_pageDisplay(self):\n u\"\"\"翻页检查\"\"\"\n pd = pageDisplay()\n pd.pageDisplay()\n\n def test032_numDisplay(self):\n u\"\"\"每页显示条数\"\"\"\n nd = numberDisplay()\n nd.numberDisplay()\n num = driver.find_element_by_partial_link_text(\"10\").text\n print(num)\n text = \"10\"\n if num==text:\n pass\n else:\n print(u\"每页显示10条出现异常\")\n self.assertFalse(text)\n\n\nif __name__ == '__main__':\n unittest.main","sub_path":"case/3business_record/test06_incomeRecord.py","file_name":"test06_incomeRecord.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"312916602","text":"#!/usr/bin/python\nprint(\"Ingrese un numero\")\nfactorial=1\nsumatoria=0\nnumero=int(input())\nwhile numero < 0:\n\tprint(\"Ingrese un numero positivo\")\n\tnumero=int(input())\nprint(\"1.Sumatoria\\n2.Factorial\\nIngrese Opcion: \")\nopcion=int(input())\nif opcion == 1:\n\tfor i in range(numero+1):\n\t\tsumatoria+=i;\n\tprint(\"La sumatoria de \"+str(numero)+\"es \"+str(sumatoria))\nelif opcion == 2:\n\t\tfor i in range(1,numero+1):\n\t\t\tfactorial=factorial*i\n\t\tprint(\"El factorial de \"+str(numero)+\"es \"+str(factorial))","sub_path":"Clase 01/Programa07.py","file_name":"Programa07.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"77556547","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n\"\"\"\n *.py: Description of what * does.\n Last Modified:\n\"\"\"\n\n__author__ = \"Sathappan Muthiah\"\n__email__ = \"sathap1@vt.edu\"\n__version__ = \"0.0.1\"\n\nimport tensorflow as tf\nfrom collections import deque\nimport logging\nimport sys\nsys.path.append('../')\nimport ipdb\nfrom utils import variable_summaries\n\nlog = logging.getLogger('RNNDecoder')\n\n\nclass GORNNDecoder(object):\n BIOLOGICAL_PROCESS = 'GO:0008150'\n MOLECULAR_FUNCTION = 'GO:0003674'\n CELLULAR_COMPONENT = 'GO:0005575'\n FUNC_DICT = {'cc': CELLULAR_COMPONENT,\n 'mf': MOLECULAR_FUNCTION,\n 'bp': BIOLOGICAL_PROCESS}\n\n def __init__(self, inputlayer, labelembedding, num_negatives=10,\n learning_rate=0.001,\n lstm_statesize=256, numfuncs=5):\n self.inputs = inputlayer\n self.learning_rate = learning_rate\n self.num_neg_samples = num_negatives\n self.label_dimensions = labelembedding.shape[1]\n self.lstm_statesize = lstm_statesize\n self.labelembedding = labelembedding\n self.numfuncs = numfuncs\n # self.GO_MAT = GO_MAT\n\n def init_variables(self):\n # First 5 leaf GO nodes for a given sequence is only used.\n # size of ys_ is (batchsize x 5)\n self.ys_ = tf.placeholder(shape=[None, self.numfuncs],\n dtype=tf.int32, name='y_out')\n\n # this represents the label embedding, size (GO nodes x labelembeddingsize)\n self.labelemb = tf.get_variable('labelemb', initializer=self.labelembedding, dtype=tf.float32,\n trainable=False)\n\n # self.threshold = tf.placeholder(shape=(1,), dtype=tf.float32, name='thres')\n\n # the negative samples to be used, size (batchsize x number of negatives)\n self.negsamples = tf.placeholder(shape=[None, self.num_neg_samples], dtype=tf.int32, name='negsamples')\n self.lstmcell = tf.contrib.rnn.BasicLSTMCell(self.lstm_statesize, activation=tf.nn.elu)\n # name='lstmcell')\n\n self.output_weights = tf.get_variable('rnn_outputW', shape=[self.lstm_statesize, self.label_dimensions])\n self.output_bias = tf.get_variable('rnnout_bias', shape=[self.label_dimensions])\n self.ytransform = tf.get_variable('ytransform', shape=[self.label_dimensions, self.label_dimensions],\n initializer=tf.initializers.identity)\n\n def build(self):\n self.init_variables()\n\n ## batchsize x 5 x labelemb\n self.yemb = tf.nn.embedding_lookup(self.labelemb, self.ys_, name='yemb')\n\n ## batchsize x 10 x labelemb\n self.negemb = tf.nn.embedding_lookup(self.labelemb, self.negsamples, name='negemb')\n # rnnin = [tf.zeros(shape=(tf.shape(yemb)[0], 1)) for i in range(5)]\n log.info('input label embedding-{}'.format(self.yemb.get_shape()))\n log.info('negative sample embedding-{}'.format(self.negemb.get_shape()))\n\n rnnin = [self.inputs for i in range(self.numfuncs)]\n rnnout, rnn_final_states = tf.nn.static_rnn(self.lstmcell,\n rnnin, dtype=tf.float32)\n #initial_state=self.inputs\n #)\n # log.info('rnnout shape {}'.format(rnnout.get_shape()))\n rflat = tf.reshape(rnnout, shape=[-1, self.lstm_statesize])\n\n # batchsize*5 x labeldim\n self.output = tf.nn.l2_normalize(tf.nn.softplus(tf.matmul(rflat,\n self.output_weights)\n + self.output_bias,\n name='yhat'),\n axis=1)\n\n log.info('final decoder out shape {}'.format(self.output.get_shape()))\n # ipdb.set_trace()\n self.transformed_y = tf.nn.l2_normalize(tf.matmul(tf.reshape(self.yemb, shape=[-1, self.label_dimensions]),\n self.ytransform),\n axis=1)\n\n variable_summaries(self.transformed_y)\n # batch size*10 x labeldim\n self.transformed_negsamples = tf.nn.l2_normalize(tf.matmul(tf.reshape(self.negemb,\n shape=[-1, self.label_dimensions]),\n self.ytransform),\n axis=1)\n\n variable_summaries(self.ytransform)\n # batchsize *5 x 1\n self.cosinesim_pos = tf.reduce_sum(tf.multiply(self.output, self.transformed_y), axis=1)\n\n # batchsize *5 x batchsize*10\n self.cosinesim_neg = tf.matmul(self.output, tf.transpose(self.transformed_negsamples))\n\n # batchsize *5 x 1\n self.min_neg_dist = tf.reduce_min(self.cosinesim_neg, axis=1)\n\n self.loss = tf.reduce_mean(tf.exp(self.cosinesim_pos, name='posdist') /\n (tf.exp(self.min_neg_dist, name='negdist') + tf.constant(1e-3)),\n name='loss')\n\n tf.summary.scalar('loss', self.loss)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)\n self.train = self.optimizer.minimize(self.loss)\n\n self.summary = tf.summary.merge_all()\n # self.predictions, self.precision, self.recall, self.f1 = self.make_prediction()\n self.predictions = self.make_prediction()\n return self\n\n def make_prediction(self):\n # make unit-vectors, size (GO nodes x embeddingsize)\n norm_labelemb = tf.nn.l2_normalize(tf.matmul(self.labelemb, self.ytransform), axis=1, name='labelnorm')\n\n # get cosine similarity, size (batchsize*5 x GO nodes)\n distmat = tf.matmul(self.output, tf.transpose(norm_labelemb), name='pred_dist')\n\n # boolean matrix of size batchsize x GOlen\n pred_labels = tf.reshape(tf.argmin(distmat, axis=1), shape=[-1, self.numfuncs])\n\n #truelabels\n # true_labels = GODAG.vfunc(tf.reshape(self.ys_, ))\n # precision, recall, f1 = calc_performance_metrics(pred_labels, true_labels, threshold=0.2)\n return pred_labels\n\n\n\n","sub_path":"src/models/rnndecoder.py","file_name":"rnndecoder.py","file_ext":"py","file_size_in_byte":6398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"468109139","text":"from django.urls import path\nfrom rest_framework import permissions\nfrom rest_framework.routers import SimpleRouter\nfrom .views import (\n GlobalStats,\n MonitorViewSet,\n StatusView,\n CurrentStatusView,\n RefreshAll,\n UptimeView,\n GlobalStats,\n)\n\nrouter = SimpleRouter()\nrouter.register(\"monitors\", MonitorViewSet, basename=\"monitors\")\n\nurlpatterns = [\n path(\"status/\", StatusView.as_view(), name=\"status\"),\n path(\"currentstatus/\", CurrentStatusView.as_view(), name=\"currentstatus\"),\n path(\"refreshall/\", RefreshAll.as_view(), name=\"refreshall\"),\n path(\"uptime/\", UptimeView.as_view(), name=\"uptime\"),\n path(\"globalstats/\", GlobalStats.as_view(), name=\"globalstats\"),\n]\n\nurlpatterns += router.urls\n","sub_path":"server/monitor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"569652649","text":"#!/usr/bin/env python\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import db\nimport webapp2\n\nfrom membership import Membership\n\nclass BillingHandler(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n member = Membership.get_by_username(user.nickname())\n if not member:\n # User is not (yet) a member.\n self.redirect(\"http://signup.hackerdojo.com\")\n else:\n # Open billing information.\n url = member.spreedly_url()\n self.redirect(url)\n\napp = webapp2.WSGIApplication([\n (\"/my_billing\", BillingHandler),\n ], debug = True)\n","sub_path":"billing.py","file_name":"billing.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"131975904","text":"from flask import request, Flask, jsonify\nfrom flask_pymongo import PyMongo\nfrom bson.json_util import dumps\nfrom datetime import datetime\nimport json\n\napp1 = Flask(__name__)\n\napp1.config['MONGO_DBNAME'] = 'cadenas'\napp1.config['MONGO_URI'] = 'mongodb://mongo:27017/cadenas'\nmongo = PyMongo(app1)\n\n@app1.route('/')\ndef index():\n try:\n args = request.args\n cadena_param = args['cadena']\n except:\n cadena_param = 'Sin Cadena'\n ip = request.remote_addr\n now = datetime.now()\n dt_string = now.strftime(\"%Y-%m-%dT%H:%M:%S.000Z\")\n cadena = mongo.db.cadena\n cadena.insert({'fecha':dt_string, 'cadena':cadena_param, 'ip':ip})\n resultados = cadena.find().sort([('fecha', -1)]).limit(10)\n return jsonify({'Resultado Servicio 1':dumps(resultados)})\n \n\nif __name__ == '__main__':\n app1.run(debug=True, host='0.0.0.0')","sub_path":"app1/app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"355986428","text":"#Start with importing all required modules\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import violinplot, boxplot\nimport numpy as np\nfrom statistics import stdev\n\n#Import the dataframes\ndf1 = pd.read_csv('data_holding_file_1.csv', delimiter='\\t')\ndf2 = pd.read_csv('data_holding_file_2.csv', delimiter='\\t')\n\n#Define the columns that we will group by\ndf1_groups_col = 'Stage'\ndf2_groups_col = 'Chemogenetic_State'\n\n#Set the different required parameters\ndf1_stages = df1.Stage.unique()\ndf2_stages = df2.Chemogenetic_State.unique()\ndf1_stages = sorted(df1_stages)\ndf2_stages = sorted(df2_stages)\nfeatures = [x for x in df1.columns if x.startswith('_pf_') and df1[x].dtype != object]\nfeatures = features[:-3]\n\n#Assign all individual columns to a single nested dict, divided either by stage or chemogenetic state\ndf1_vector = {}\ndf2_vector = {}\n\nfor feature in features:\n df1_vector[feature] = {}\n for stage in df1_stages:\n df1_vector[feature][stage] = df1[feature][df1[df1_groups_col] == stage].values\n \nfor feature in features:\n df2_vector[feature] = {}\n for stage in df2_stages:\n df2_vector[feature][stage] = df2[feature][df2[df2_groups_col] == stage].values\n\n#Plot all required values from the data vectors\nfor feature in features:\n print(f\"{feature}:\")\n for stage in df1_stages:\n data = df1_vector[feature][stage]\n print(f\"STAGE:{stage} MEAN = {np.mean(data):.3f} / STD.DEV = {stdev(data):.4f}\")\n print(\"\\n\")\n\nfor feature in features:\n print(f\"{feature}:\")\n for stage in df2_stages:\n data = df2_vector[feature][stage]\n print(f\"STATE:{stage} MEAN = {np.mean(data):.3f} / STD.DEV = {stdev(data):.4f}\")\n print(\"\\n\")\n\n#Define plotting parameters\nletters = ['A', 'B', 'C','D', 'E', 'F','G', 'H', 'I']\n\nylabels = {}\nlabels = ['ΔÛ(l)', 'ΔÛ(r)', 'ΔÛ(m)', 'ΔÛ(z)', 'Û^2(z)', 'Û^2(min)', 'Fi(l)/s', 'Fi(r)/s', 'seconds']\nfor ii, feature in enumerate(features):\n ylabels[feature] = labels[ii]\n\n#Plot all data by putting a matplotlib boxplot over a matplotlib violinplot\nflierprops = dict(marker='.', markerfacecolor='black', markersize=5,\n linestyle='none')\n\nfig, axs = plt.subplots(3,3, figsize = (14,12))\n\nfig.subplots_adjust(left=0.25, wspace=0.3, hspace=0.3)\n\nfor key,ax,letter in zip(df1_vector, axs.reshape(-1), letters):\n ax.violinplot(df1_vector[key].values(), showextrema=False)\n b = ax.boxplot(df1_vector[key].values(), notch= True, flierprops=flierprops, showfliers=True)\n ax.set_xticks(range(1,len(df1_stages)+1))\n ax.set_xticklabels(df1_stages)\n ax.set_xlabel('Developmental Stages')\n ax.set_ylabel(ylabels[key])\n ax.set_title(f\"{letter} - {key}\")\n fig.tight_layout()\n\n#Instantly plot the required raw data values which are created by matplotlib \n n_per_stage = df1.groupby('Stage').count()\n \n counts = n_per_stage['ImgUUID']\n m22 = b['medians'][0].get_ydata()\n m23 = b['medians'][1].get_ydata()\n m24 = b['medians'][2].get_ydata()\n m25 = b['medians'][3].get_ydata()\n m26 = b['medians'][4].get_ydata()\n s22 = b['whiskers'][0].get_ydata() \n e22 = b['whiskers'][1].get_ydata()\n s23 = b['whiskers'][2].get_ydata() \n e23 = b['whiskers'][3].get_ydata()\n s24 = b['whiskers'][4].get_ydata()\n e24 = b['whiskers'][5].get_ydata()\n s25 = b['whiskers'][6].get_ydata()\n e25 = b['whiskers'][7].get_ydata()\n s26 = b['whiskers'][8].get_ydata()\n e26 = b['whiskers'][9].get_ydata()\n \n print(f\"{key}-VALUES:\\n STAGE 22: BOTTOM {s22[1]:.4f} / MEDIAN {m22[0]:.4f} / TOPPER {e22[1]:.4f}\\nSTAGE 23: BOTTOM {s23[1]:.4f} / MEDIAN {m23[0]:.4f} / TOPPER {e23[1]:.4f}\\nSTAGE 24: BOTTOM {s24[1]:.4f} / MEDIAN {m24[0]:.4f} / TOPPER {e24[1]:.4f}\\nSTAGE 25: BOTTOM {s25[1]:.4f} / MEDIAN {m25[0]:.4f} / TOPPER {e25[1]:.4f}\\nSTAGE 26: BOTTOM {s26[1]:.4f} / MEDIAN {m26[0]:.4f} / TOPPER {e26[1]:.4f}\\n\")\n# fig.savefig('filename.png')\n\n#Repeat the plotting process for the second dataframe\nflierprops = dict(marker='.', markerfacecolor='black', markersize=5,\n linestyle='none')\n\nfig, axs = plt.subplots(3,3, figsize = (14,12))\n\nfig.subplots_adjust(left=0.25, wspace=0.3, hspace=0.3)\n\nfor key,ax,letter in zip(df2_vector, axs.reshape(-1), letters):\n ax.violinplot(df2_vector[key].values(), showextrema=False)\n b = ax.boxplot(df2_vector[key].values(), notch= True, flierprops=flierprops, showfliers=True)\n ax.set_xticks(range(1,len(df2_stages)+1))\n ax.set_xticklabels(df2_stages)\n ax.set_xlabel('Chemogenetic States')\n ax.set_ylabel(ylabels[key])\n ax.set_title(f\"{letter} - {key}\")\n fig.tight_layout()\n \n n_per_stage = df2.groupby('Chemogenetic_State').count()\n \n counts = n_per_stage['ImgUUID']\n m3 = b['medians'][0].get_ydata()\n m3c = b['medians'][1].get_ydata()\n m4 = b['medians'][2].get_ydata()\n m4c = b['medians'][3].get_ydata()\n s3 = b['whiskers'][0].get_ydata() \n e3 = b['whiskers'][1].get_ydata()\n s3c = b['whiskers'][2].get_ydata() \n e3c = b['whiskers'][3].get_ydata()\n s4 = b['whiskers'][4].get_ydata()\n e4 = b['whiskers'][5].get_ydata()\n s4c = b['whiskers'][6].get_ydata()\n e4c = b['whiskers'][7].get_ydata()\n \n print(f\"{key}-VALUES:\\n HM3D: BOTTOM {s3[1]:.4f} / MEDIAN {m3[0]:.4f} / TOPPER {e3[1]:.4f}\\nHM3D-C: BOTTOM {s3c[1]:.4f} / MEDIAN {m3c[0]:.4f} / TOPPER {e3c[1]:.4f}\\nHM4D: BOTTOM {s4[1]:.4f} / MEDIAN {m4[0]:.4f} / TOPPER {e4[1]:.4f}\\nHM4D-C: BOTTOM {s4c[1]:.4f} / MEDIAN {m4c[0]:.4f} / TOPPER {e4c[1]:.4f}\\n\")\n# fig.savefig('filename.png')\n","sub_path":"ViolinPlotting.py","file_name":"ViolinPlotting.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"642224924","text":"\nimport os\n\nfrom django.views.generic.edit import CreateView, FormView\n\nfrom django.urls import reverse\nfrom common_data.utilities import ContextMixin\nfrom django.views.generic import TemplateView, DetailView\nfrom employees import forms\nfrom employees import models\nfrom django_filters.views import FilterView\nfrom common_data.views import PaginationMixin\nfrom employees.filters import LeaveRequestFilter\nfrom django.db.models import Q\nimport datetime\nfrom django.http import JsonResponse\nfrom employees import serializers\nfrom rest_framework import viewsets\n\n\nclass LeaveCalendarView(TemplateView):\n template_name = os.path.join('employees', 'leave', 'calendar.html')\n\n\nclass LeaveRequestList(ContextMixin,\n PaginationMixin, FilterView):\n filterset_class = LeaveRequestFilter\n queryset = models.Leave.objects.all()\n template_name = os.path.join('employees', 'leave', 'list.html')\n extra_context = {\n 'title': 'List of Vaction Applications',\n 'new_link': '/employees/leave-request'\n }\n\n\nclass LeaveDayRequestView(ContextMixin, CreateView):\n template_name = os.path.join('common_data', 'crispy_create_template.html')\n form_class = forms.LeaveRequestForm\n extra_context = {\n 'title': 'Vacation Application Form',\n 'description': 'Use this form to apply for vacation or to request leave of absence for the reasons under the category list.'\n }\n\n\nclass LeaveDayDetailView(DetailView):\n template_name = os.path.join('employees', 'leave', 'detail.html')\n model = models.Leave\n\n\nclass LeaveAuthorizationView(ContextMixin, FormView):\n form_class = forms.LeaveAuthorizationForm\n template_name = os.path.join('common_data', 'create_template.html')\n extra_context = {\n 'title': 'Authorize Leave Request'\n }\n\n def get_success_url(self):\n return reverse('employees:leave-detail', kwargs={\n 'pk': self.kwargs['pk']\n })\n\n def get_initial(self):\n return {\n 'leave_request': self.kwargs['pk']\n }\n\n def form_valid(self, cleaned_data):\n resp = super().form_valid(cleaned_data)\n leave_obj = models.Leave.objects.get(\n pk=cleaned_data['leave_request'].value())\n leave_obj.status = cleaned_data['status'].value()\n leave_obj.notes = cleaned_data['notes'].value()\n authorizer = models.Employee.objects.get(\n pk=cleaned_data['authorized_by'].value()\n )\n leave_obj.authorized_by = authorizer\n leave_obj.save()\n\n return resp\n\n\ndef _month_data(year, month):\n year = int(year)\n month = int(month)\n lower_limit = datetime.date(year=year, month=month, day=1)\n if not month == 12:\n upper_limit = datetime.date(year=year, month=month + 1, day=1)\n else:\n upper_limit = datetime.date(year=year + 1, month=1, day=1)\n\n leave_data = models.Leave.objects.filter(\n Q(\n Q(\n Q(start_date__gte=lower_limit) &\n Q(start_date__lt=upper_limit)\n ) | \n Q(\n Q(end_date__gte=lower_limit) &\n Q(end_date__lt=upper_limit)\n )\n ) &\n Q(status=1))\n \n\n def data_dict(d):\n return ({\n 'start_date': d.start_date.day,\n 'end_date': d.end_date.day,\n 'start_month': d.start_date.month,\n 'employee': d.employee.full_name,\n 'id': d.pk\n })\n\n return [data_dict(d) for d in leave_data]\n\n\ndef get_month_data(request, year=None, month=None):\n lower_limit = datetime.date(year=int(year), month=int(month), day=1)\n\n leave_data = _month_data(year, month)\n data = {\n 'leave': leave_data,\n 'title': lower_limit.strftime(\"%B, %Y\")\n }\n\n return JsonResponse(data)\n\n\ndef get_year_data(request, year=None):\n leave_data = []\n for i in range(12):\n leave_data.append(_month_data(year, (i+1)))\n data = {\n 'leave': leave_data\n\n }\n return JsonResponse(data)\n\n\nclass LeaveViewset(viewsets.ModelViewSet):\n queryset = models.Leave.objects.all()\n serializer_class = serializers.LeaveSerializer\n","sub_path":"employees/views/leave.py","file_name":"leave.py","file_ext":"py","file_size_in_byte":4150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"514957285","text":"#!/usr/bin/env python\n\nfrom googleapiclient import discovery\nfrom oauth2client.client import GoogleCredentials\nimport argparse\nfrom os import environ\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n#Variables\nbilling_account_id = '' #GCP Billing Account ID that project will be linked to\nservice_account_json_file_path = '' #Path to the SErvice Account's Private Key file\n\n#Set environment variable for service account authorization\nenviron['GOOGLE_APPLICATION_CREDENTIALS'] = service_account_json_file_path\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Links newly created GCP project to billing account')\n parser.add_argument('--project_id', type=str, help='Project ID to link to billing account', required=True)\n args = parser.parse_args()\n\n link_billing(args.project_id, billing_account_id)\n\n\ndef link_billing(project_id, billing_account_id):\n credentials = GoogleCredentials.get_application_default()\n service = discovery.build('cloudbilling', 'v1', credentials=credentials)\n\n name='projects/' + project_id\n project_billing_info_body = {\n 'name': 'projects/' + project_id + '/billingInfo',\n 'projectId': project_id,\n 'billingAccountName': 'billingAccounts/' + billing_account_id,\n 'billingEnabled': False\n }\n request = service.projects().updateBillingInfo(name=name, body=project_billing_info_body)\n response = request.execute()\n logger.info(\"Project: %s has been linked to Billing Account: %s\" % (project_id, billing_account_id))\n\nif __name__ == \"__main__\":\n main()\n# sys.exit(main(sys.argv[1:]))\n","sub_path":"modules/link_billing_account.py","file_name":"link_billing_account.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"413062383","text":"# -*- coding: utf-8 -*-\n\nfrom . import main\nfrom .forms import EditProfileForm, EditProfileAdminForm, PostForm\nfrom .. import db\nfrom ..decorators import permission_required\nfrom ..models import User, Permission, Post, Follow\nfrom flask import render_template, flash, redirect, url_for, request, current_app, abort, make_response\nfrom flask_login import login_required, current_user\nfrom flask_sqlalchemy import get_debug_queries\n\n\n@main.route('/user/')\ndef user(username):\n user = User.query.filter_by(username=username).first_or_404()\n posts = user.posts.order_by(Post.timestamp.desc()).all()\n return render_template('user.html', user=user, posts=posts)\n\n\n@main.route('/edit-profile', methods=['GET', 'POST'])\n@login_required\ndef edit_profile():\n form = EditProfileForm()\n\n from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class\n photos = UploadSet('photos', IMAGES)\n configure_uploads(current_app, photos)\n patch_request_class(current_app)\n\n if form.validate_on_submit():\n if form.photo.data is not None:\n form.photos.save(form.photo.data, name=str(current_user.id) + '_temp.')\n import sys\n from config import basedir\n if sys.platform == 'win32' or sys.platform == 'cygwin':\n avatar_path = basedir + '\\\\app\\\\static\\\\avatar\\\\'\n else:\n avatar_path = basedir + '/app/static/avatar/'\n # delete old avatar if it exists\n import os\n try:\n os.remove(avatar_path + current_user.avatar_path)\n except:\n pass\n # rename temp avatar\n temp_avatar = avatar_path + str(current_user.id) + '_temp.' + form.photo.data.filename[-3:]\n avatar = avatar_path + str(current_user.id) + '.' + form.photo.data.filename[-3:]\n os.rename(temp_avatar, avatar)\n\n avatar_path = str(current_user.id) + '.' + form.photo.data.filename[-3:]\n current_user.avatar_path = avatar_path\n\n current_user.name = form.name.data\n current_user.location = form.location.data\n current_user.about_me = form.about_me.data\n db.session.add(current_user)\n\n flash('your profile has been updated')\n\n resp = make_response(redirect(url_for('.user', username=current_user.username)), 302)\n resp.headers['Cache-Control'] = 'max-age=0'\n return resp\n\n form.name.data = current_user.name\n form.location.data = current_user.location\n form.about_me.data = current_user.about_me\n return render_template('edit_profile.html', form=form)\n\n\n@main.route('/', methods=['GET', 'POST'])\ndef index():\n form = PostForm()\n page = request.args.get('page', 1, type=int)\n\n show_followed = False\n if current_user.is_authenticated:\n show_followed = bool(request.cookies.get('show_followed', ''))\n if show_followed:\n query_result = current_user.followed_posts\n else:\n query_result = Post.query\n pagination = query_result.order_by(Post.timestamp.desc()).paginate(page, per_page=current_app.config[\n 'FLASKY_POST_PER_PAGE'], error_out=False)\n if current_user.can(Permission.WRITE_ARTICLES) and form.validate_on_submit():\n post = Post(body=form.body.data, author=current_user._get_current_object())\n db.session.add(post)\n return redirect(url_for('.index'))\n posts = pagination.items\n\n return render_template('index.html', form=form, posts=posts, pagination=pagination, show_followed=show_followed)\n\n\n@main.route('/post/')\ndef post(id):\n post = Post.query.get_or_404(id)\n return render_template('post.html', posts=[post])\n\n\n@main.route('/edit/', methods=['GET', 'POST'])\n@login_required\ndef edit(id):\n post = Post.query.get_or_404(id)\n if current_user != post.author and not current_user.can(Permission.ADMINISTER):\n abort(403)\n form = PostForm()\n if form.validate_on_submit():\n post.body = form.body.data\n db.session.add(post)\n flash('The post has been updated')\n return redirect(url_for('.post', id=post.id))\n form.body.data = post.body\n return render_template('edit_post.html', form=form)\n\n\n@main.route('/follow/')\n@login_required\n@permission_required(Permission.FOLLOW)\ndef follow(username):\n user = User.query.filter_by(username=username).first()\n if user is None:\n flash('Invalid user')\n return redirect(url_for('.index'))\n if current_user.is_following(user):\n flash(\"You're already following this user\")\n return redirect(url_for('.user', username=username))\n current_user.follow(user)\n flash(\"now you're following this user\")\n return redirect(url_for('.user', username=username))\n\n\n@main.route('/unfollow/')\n@login_required\n@permission_required(Permission.FOLLOW)\ndef unfollow(username):\n user = User.query.filter_by(username=username).first()\n if user is None:\n flash('Invalid user')\n return redirect(url_for('.index'))\n if not current_user.is_following(user):\n flash(\"You cannot unfollow a user which you're not following\")\n return redirect(url_for('.user', username=username))\n current_user.unfollow(user)\n flash(\"now you're unfollowing this user\")\n return redirect(url_for('.user', username=username))\n\n\n@main.route('/followers/')\ndef followers(username):\n user = User.query.filter_by(username=username).first()\n if user is None:\n flash('Invalid user.')\n return redirect(url_for('.index'))\n page = request.args.get('page', 1, type=int)\n pagination = user.followers.paginate(page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],\n error_out=False)\n follows = [{'user': item.follower, 'timestamp': item.timestamp} for item in pagination.items]\n return render_template('followers.html', user=user, title=\"Followers of\", endpoint='.followers',\n pagination=pagination, follows=follows)\n\n\n@main.route('/followed-by/')\ndef followed_by(username):\n user = User.query.filter_by(username=username).first()\n if user is None:\n flash('Invalid user.')\n return redirect(url_for('.index'))\n page = request.args.get('page', 1, type=int)\n pagination = user.followed.paginate(page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],\n error_out=False)\n follows = [{'user': item.followed, 'timestamp': item.timestamp} for item in pagination.items]\n return render_template('followers.html', user=user, title=\"Followed by\", endpoint='.followed_by',\n pagination=pagination, follows=follows)\n\n\n@main.route('/all')\n@login_required\ndef show_all():\n resp = make_response(redirect(url_for('.index')))\n resp.set_cookie('show_followed', '', max_age=30 * 24 * 60 * 60)\n return resp\n\n\n@main.route('/followed')\n@login_required\ndef show_followed():\n resp = make_response(redirect(url_for('.index')))\n resp.set_cookie('show_followed', '1', max_age=30 * 24 * 60 * 60)\n return resp\n\n\n@main.after_request\ndef after_request(response):\n for query in get_debug_queries():\n if query.duration >= current_app.config['FLASKY_SLOW_DB_QUERY_TIME']:\n current_app.logger.warning('Flow query: %s\\nParameters:%s\\nDuration:%fs\\nContext:%s\\n' % (\n query.statement, query.parameters, query.duration, query.context))\n return response\n","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"459537982","text":"import tensorflow as tf\n\nimport logger\nfrom argument_parser import setup_argument_parser\nfrom dataset import create_dataset\nfrom model import create_conditional_model, create_model, predict, train\n\n# Setup global logger\nlog = logger.setup_logger(__name__)\n\n\ndef main():\n # Extract config from arguments\n config = setup_argument_parser()\n\n log.info(\"Starting...\")\n\n log.info(\"Program will run with following parameters:\")\n log.info(config)\n\n # Create the image dataset from the data/input folder\n ds, val_ds, image_size = create_dataset(config)\n\n # Whether we are testing or training\n if config.training:\n model, dist = train(ds, val_ds, config, image_shape=image_size)\n else:\n log.info(\"Loading model...\")\n # Load model\n latest = tf.train.latest_checkpoint(config.checkpoints)\n log.info(latest)\n\n # Create a new model instance\n if config.class_conditional:\n model, dist = create_conditional_model(config, image_size)\n else:\n model, dist = create_model(config, image_size)\n\n # Load the params back into the model\n model.load_weights(latest).expect_partial()\n\n log.info(\"Loading done\")\n\n log.info(\"Predicting...\")\n\n predict(dist, config)\n\n log.info(\"Prediction done...\")\n\n log.info(\" Done \")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"PixelCNN/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"628658621","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nimport sys\nimport json\n\nroad_filename = 'testroads3.json'\nroads = []\n\nprint('load data...')\nusecols = ['starting_latitude', 'starting_longitude']\ndf = pd.read_csv(\n '../../../data/data_train_competition.csv', usecols=usecols)\ndf.columns = ['lat', 'lon']\n\n# df = df.ix[np.logical_and(df['lat'] > 40.63, df['lat'] < 40.64)]\n# df = df.ix[np.logical_and(df['lon'] > 22.935, df['lon'] < 22.945)]\ndf = df.sample(100000)\n\nX = df.as_matrix()\n\n# hyperparameters\nDnn = 0.0005\nDclose = 0.0003\nDroad = 0.0002\n\n\ndef roads_distance(x):\n roads_dist = np.zeros(len(roads))\n for roadi, road in enumerate(roads):\n assert len(road) > 1\n road = np.asarray(road)\n\n A = np.rollaxis(road[:-1], 1)\n B = np.rollaxis(road[1:], 1)\n C = x[:, np.newaxis]\n\n AB = B-A\n AC = C-A\n cross = AB[0]*AC[1] - AB[1]*AC[0]\n distAB = np.sqrt((A[0]-B[0])**2 + (A[1]-B[1])**2)\n distBC = np.sqrt((B[0]-C[0])**2 + (B[1]-C[1])**2)\n distAC = np.sqrt((A[0]-C[0])**2 + (A[1]-C[1])**2)\n\n distAB += 0.0001 # avoid zero division\n dist = np.abs(cross / distAB)\n\n # check if outside the segment\n BC = C-B\n dot1 = AB[0]*BC[0] + AB[1]*BC[1]\n dot2 = (-AB[0])*AC[0] + (-AB[1])*AC[1]\n\n res = \\\n (dot1 >= 0)*distBC + \\\n (dot2 >= 0)*distAC + \\\n np.logical_and(dot1 < 0, dot2 < 0)*dist\n roads_dist[roadi] = np.min(res)\n return roads_dist\n\n\ndef create_road(i0):\n ii = [i0]\n newii = ii\n it = 0\n while True:\n # 1. neighbors of ii\n dd = np.ones(len(X))*np.inf\n for i in newii:\n d = (X[:, 0] - X[i, 0])**2 + (X[:, 1] - X[i, 1])**2\n dd = np.minimum(dd, d)\n dd[ii] = 0\n\n jj = np.where(dd < Dnn**2)[0]\n\n # 2 linear regression line\n _jj = jj[np.random.choice(len(jj), min(len(jj), 20000), False)]\n m = LinearRegression(fit_intercept=False)\n m.fit(X[_jj, 0:1] - X[i0, 0], X[_jj, 1] - X[i0, 1])\n\n # 3. distance to linear regression\n # distance = |ax+by+c| / sqrt(a^2+b^2)\n a = -m.coef_[0]\n b = 1\n c = 0\n dd = np.abs(a*(X[jj, 0]-X[i0, 0]) + b*(X[jj, 1]-X[i0, 1]) + c) / \\\n np.sqrt(a**2+b**2)\n\n oldii = ii\n ii = np.union1d(ii, jj[dd < Droad])\n newii = np.array([i for i in ii if i not in oldii])\n if len(newii) == 0:\n if it <= 1:\n return None\n x0 = X[ii, 0].min()\n x1 = X[ii, 0].max()\n return (x0, m.predict([[x0]])[0]), ((x1, m.predict([[x1]])[0]))\n it += 1\n\nprint('generating roads...')\nfor it, i in enumerate(np.random.choice(len(X), len(X), False)):\n sys.stdout.write('\\r%5.2f%% (%d)' % (100*it/len(X), len(roads)))\n sys.stdout.flush()\n dd = roads_distance(X[i])\n if len(dd) == 0 or dd.min() > Dclose:\n # create new road\n road = create_road(i)\n if road:\n roads.append(road)\nsys.stdout.write('\\r \\r')\nprint('roads len: %d' % len(roads))\n\nprint('saving roads...')\nwith open(road_filename, 'w') as f:\n json.dump(roads, f)\n","sub_path":"tests/segmentation/autoinit2.py","file_name":"autoinit2.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"457771086","text":"import random\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom modules.layers import TransformerDecoderLayer\nfrom modules.layers import _get_pad_mask, _get_zero_mask, _get_subsequent_mask\nfrom utils.config import PAD, EOS, BOS, UNK\nfrom utils.dataset import load_pretrained_embedding\nfrom utils.misc import check_device\n\nfrom .Las import LAS\nfrom .TFEnc import Encoder\nfrom .TFDec import Decoder\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\nclass Seq2seq(nn.Module):\n\n\t\"\"\"\n\t\tacous/en enc + en/de dec\n\t\tembedding passing\n\t\"\"\"\n\n\tdef __init__(self,\n\t\tenc_vocab_size,\n\t\tdec_vocab_size,\n\t\tshare_embedder,\n\t\tenc_embedding_size = 200,\n\t\tdec_embedding_size = 200,\n\t\tload_embedding_src = None,\n\t\tload_embedding_tgt = None,\n\t\tmax_seq_len_src = 32,\n\t\tmax_seq_len_tgt = 300,\n\t\tnum_heads = 8,\n\t\tdim_model = 512,\n\t\tdim_feedforward = 1024,\n\t\tenc_layers = 6,\n\t\tdec_layers = 6,\n\t\tembedding_dropout=0.0,\n\t\tdropout=0.2,\n\t\tact=False,\n\t\tenc_word2id=None,\n\t\tenc_id2word=None,\n\t\tdec_word2id=None,\n\t\tdec_id2word=None,\n\t\ttransformer_type='standard',\n\t\tenc_emb_proj=False,\n\t\tdec_emb_proj=False,\n\t\t# pyramidal lstm params\n\t\tacous_dim=40,\n\t\tacous_hidden_size=256,\n\t\t# mode to select params to init\n\t\tmode='ASR',\n\t\tload_mode='ASR' # useful for storing frozen var\n\t\t):\n\n\t\tsuper(Seq2seq, self).__init__()\n\t\tself.EMB_DYN_AVE_PATH = \\\n\t\t'models/base/ted-asr-v001/eval_ted_train_STATS/2020_09_02_04_10_44/dyn_emb_ave.npy'\n\t\tself.EMB_DYN_AVE = torch.from_numpy(np.load(self.EMB_DYN_AVE_PATH))\n\n\t\t# define var\n\t\tself.enc_vocab_size = enc_vocab_size\n\t\tself.dec_vocab_size = dec_vocab_size\n\t\tself.enc_embedding_size = enc_embedding_size\n\t\tself.dec_embedding_size = dec_embedding_size\n\t\tself.load_embedding_src = load_embedding_src\n\t\tself.load_embedding_tgt = load_embedding_tgt\n\t\tself.max_seq_len_src = max_seq_len_src\n\t\tself.max_seq_len_tgt = max_seq_len_tgt\n\t\tself.num_heads = num_heads\n\t\tself.dim_model = dim_model\n\t\tself.dim_feedforward = dim_feedforward\n\n\t\tself.enc_layers = enc_layers\n\t\tself.dec_layers = dec_layers\n\n\t\tself.embedding_dropout = nn.Dropout(embedding_dropout)\n\t\tself.dropout = nn.Dropout(dropout)\n\t\tself.act = act\n\t\tself.enc_emb_proj = enc_emb_proj\n\t\tself.dec_emb_proj = dec_emb_proj\n\n\t\tself.enc_word2id = enc_word2id\n\t\tself.enc_id2word = enc_id2word\n\t\tself.dec_word2id = dec_word2id\n\t\tself.dec_id2word = dec_id2word\n\t\tself.transformer_type = transformer_type\n\t\tself.mode = mode\n\t\tself.load_mode = load_mode\n\n\t\t# ------------- define embedders -------------\n\t\tif self.load_embedding_src:\n\t\t\tembedding_matrix = np.random.rand(self.enc_vocab_size, self.enc_embedding_size)\n\t\t\tembedding_matrix = torch.FloatTensor(load_pretrained_embedding(\n\t\t\t\tself.enc_word2id, embedding_matrix, self.load_embedding_src))\n\t\t\tself.enc_embedder = nn.Embedding.from_pretrained(embedding_matrix,\n\t\t\t\tfreeze=False, sparse=False, padding_idx=PAD)\n\t\telse:\n\t\t\tself.enc_embedder = nn.Embedding(self.enc_vocab_size,\n\t\t\t\tself.enc_embedding_size, sparse=False, padding_idx=PAD)\n\n\t\tif self.load_embedding_tgt:\n\t\t\tembedding_matrix = np.random.rand(self.dec_vocab_size, self.dec_embedding_size)\n\t\t\tembedding_matrix = torch.FloatTensor(load_pretrained_embedding(\n\t\t\t\tself.dec_word2id, embedding_matrix, self.load_embedding_tgt))\n\t\t\tself.dec_embedder = nn.Embedding.from_pretrained(embedding_matrix,\n\t\t\t\tfreeze=False, sparse=False, padding_idx=PAD)\n\t\telse:\n\t\t\tself.dec_embedder = nn.Embedding(self.dec_vocab_size,\n\t\t\t\tself.dec_embedding_size, sparse=False, padding_idx=PAD)\n\n\t\tif share_embedder:\n\t\t\tassert enc_vocab_size == dec_vocab_size\n\t\t\tself.enc_embedder = self.dec_embedder\n\n\t\tself.enc_emb_proj_flag = True\n\t\tself.enc_emb_proj = nn.Linear(self.enc_embedding_size + self.dim_model,\n\t\t\tself.dim_model, bias=False) # static + dynamic embedding -> hidden\n\n\t\tself.dec_emb_proj_flag = False\n\t\tif (self.dec_embedding_size != self.dim_model) or (self.dec_emb_proj == True):\n\t\t\tself.dec_emb_proj_flag = True\n\t\t\tself.dec_emb_proj = nn.Linear(self.dec_embedding_size,\n\t\t\t\tself.dim_model, bias=False) # embedding -> hidden\n\n\t\t# ------------- construct enc, dec -------------------\n\t\t# params\n\t\tself.acous_dim = acous_dim\n\t\tself.acous_hidden_size = acous_hidden_size\n\t\tenc_params = (self.dim_model, self.dim_feedforward, self.num_heads,\n\t\t\tself.enc_layers, self.act, dropout, self.transformer_type)\n\t\tdec_params = (self.dim_model, self.dim_feedforward, self.num_heads,\n\t\t\tself.dec_layers, self.act, dropout, self.transformer_type)\n\n\t\t# LAS\n\t\tcomb_mode = '-'.join([self.mode,self.load_mode])\n\t\tif 'ASR' in comb_mode or 'ST' in comb_mode:\n\t\t\tself.las = LAS(\n\t\t\t\tself.enc_vocab_size,\n\t\t\t\tembedding_size=self.enc_embedding_size,\n\t\t\t\tacous_dim=self.acous_dim,\n\t\t\t\tacous_hidden_size=self.acous_hidden_size,\n\t\t\t\tacous_att_mode='bilinear',\n\t\t\t\thidden_size_dec=self.dim_model,\n\t\t\t\thidden_size_shared=self.dim_model,\n\t\t\t\tnum_unilstm_dec=3,\n\t\t\t\t#\n\t\t\t\tacous_norm=True,\n\t\t\t\tspec_aug=True,\n\t\t\t\tbatch_norm=False,\n\t\t\t\tenc_mode='pyramid',\n\t\t\t\t#\n\t\t\t\tembedding_dropout=embedding_dropout,\n\t\t\t\tdropout=dropout,\n\t\t\t\tresidual=True,\n\t\t\t\tbatch_first=True,\n\t\t\t\tmax_seq_len=self.max_seq_len_src,\n\t\t\t\tembedder=None, # do not share embedder with text encoder\n\t\t\t\tword2id=self.enc_word2id,\n\t\t\t\tid2word=self.enc_id2word,\n\t\t\t\thard_att=False\n\t\t\t)\n\n\t\t# En decode\n\t\tif 'AE' in comb_mode:\n\t\t\tself.out_src = self.las.decoder.acous_out # share with las out layer\n\n\t\t# En encode\n\t\t# De decode\n\t\tif 'ST' in comb_mode or 'MT' in comb_mode:\n\t\t\tself.enc_src = Encoder(*enc_params)\n\t\t\tself.dec_tgt = Decoder(*dec_params)\n\t\t\tself.out_tgt = nn.Linear(self.dim_model, self.dec_vocab_size, bias=False)\n\n\n\tdef _get_src_emb(self, src, emb_src_dyn, device):\n\t\t# En mask\n\t\tsrc_mask_input = _get_pad_mask(src).to(device=device).type(torch.uint8)\n\t\tsrc_mask = ((_get_pad_mask(src).to(device=device).type(torch.uint8)\n\t\t\t& _get_subsequent_mask(src.size(-1)).type(torch.uint8).to(device=device)))\n\t\temb_src_static = self.enc_embedder(src)\n\n\t\t# cat dynamic + static\n\t\temb_src_comb = torch.cat((emb_src_static, emb_src_dyn), dim=2)\n\n\t\t# map\n\t\tif self.enc_emb_proj_flag:\n\t\t\temb_src = self.enc_emb_proj(self.embedding_dropout(emb_src_comb))\n\t\telse:\n\t\t\temb_src = self.embedding_dropout(emb_src_comb)\n\n\t\treturn src_mask, emb_src, src_mask_input\n\n\n\tdef _get_tgt_emb(self, tgt, device):\n\t\t# De mask\n\t\ttgt_mask = ((_get_pad_mask(tgt).to(device=device).type(torch.uint8)\n\t\t\t& _get_subsequent_mask(tgt.size(-1)).type(torch.uint8).to(device=device)))\n\t\tif self.dec_emb_proj_flag:\n\t\t\temb_tgt = self.dec_emb_proj(self.embedding_dropout(self.dec_embedder(tgt)))\n\t\telse:\n\t\t\temb_tgt = self.embedding_dropout(self.dec_embedder(tgt))\n\n\t\treturn tgt_mask, emb_tgt\n\n\n\tdef _pre_proc_src(self, src, device):\n\n\t\t# remove initial BOS:to match with _encouder_acous output\n\t\tsrc_proc = src[:,1:]\n\n\t\treturn src_proc\n\n\n\tdef _encoder_acous(self, acous_feats, acous_lens, device, use_gpu, tgt=None,\n\t\tis_training=False, teacher_forcing_ratio=0.0, lm_mode='null', lm_model=None):\n\t\t# get acoustics - [batch_size, acous_len / 8, self.acous_hidden_size * 2]\n\t\temb_src, logps_src, preds_src, lengths = self.las(acous_feats,\n\t\t\tacous_lens=acous_lens, tgt=tgt, is_training=is_training,\n\t\t\tteacher_forcing_ratio=teacher_forcing_ratio, use_gpu=use_gpu,\n\t\t\tlm_mode=lm_mode, lm_model=lm_model)\n\n\t\treturn emb_src, logps_src, preds_src, lengths\n\n\n\tdef _encoder_en(self, emb_src, src_mask=None):\n\t\t# En encoder\n\t\tenc_outputs, *_ = self.enc_src(emb_src, src_mask=src_mask)\t# b x len x dim_model\n\n\t\treturn enc_outputs\n\n\n\tdef _decoder_en(self, emb_src):\n\t\t# En decoder\n\t\tlogits_src = self.out_src(emb_src)\t# b x len x vocab_size\n\t\tlogps_src = torch.log_softmax(logits_src, dim=2)\n\t\tscores_src, preds_src = logps_src.data.topk(1)\n\n\t\treturn logits_src, logps_src, preds_src, scores_src\n\n\n\tdef _decoder_de(self, emb_tgt, enc_outputs,\n\t\ttgt_mask=None, src_mask=None, beam_width=1):\n\t\t# De decoder\n\t\tdec_outputs_tgt, *_ = self.dec_tgt(emb_tgt, enc_outputs, tgt_mask=tgt_mask, src_mask=src_mask)\n\t\tlogits_tgt = self.out_tgt(dec_outputs_tgt)\t# b x len x vocab_size\n\t\tlogps_tgt = torch.log_softmax(logits_tgt, dim=2)\n\t\tscores_tgt, preds_tgt = logps_tgt.data.topk(beam_width)\n\n\t\treturn dec_outputs_tgt, logits_tgt, logps_tgt, preds_tgt, scores_tgt\n\n\n\tdef _prep_eval(self, batch, length_out, vocab_size, device):\n\n\t\t# eos\n\t\teos_mask = torch.BoolTensor([False]).repeat(batch).to(device=device)\n\t\t# record\n\t\tlogps = torch.Tensor([(1.0/vocab_size)]).log().repeat(batch,length_out,vocab_size).type(\n\t\t\ttorch.FloatTensor).to(device=device)\n\t\tdec_outputs = torch.Tensor([0]).repeat(batch,length_out,self.dim_model).type(\n\t\t\ttorch.FloatTensor).to(device=device)\n\t\tpreds_save = torch.Tensor([PAD]).repeat(batch,length_out).type(\n\t\t\ttorch.LongTensor).to(device=device) # used to update pred history\n\n\t\t# start from length = 1\n\t\tpreds = torch.Tensor([BOS]).repeat(batch,1).type(\n\t\t\ttorch.LongTensor).to(device=device)\n\t\tpreds_save[:, 0] = preds[:, 0]\n\n\t\treturn eos_mask, logps, dec_outputs, preds_save, preds, preds_save\n\n\n\tdef _step_eval(self, i, eos_mask, dec_output, logp, pred,\n\t\tdec_outputs, logps, preds_save, preds, batch, length_out):\n\n\t\t# import pdb; pdb.set_trace\n\t\teos_mask = ((pred[:, i-1].squeeze(1) == EOS).type(torch.uint8)\n\t\t\t+ eos_mask.type(torch.uint8)).type(torch.bool).type(torch.uint8) # >=pt1.1\n\n\t\t# b x len x dim_model - [:,0,:] is dummy 0's\n\t\tdec_outputs[:, i, :] = dec_output[:, i-1]\n\t\t# b x len x vocab_size - [:,0,:] is dummy - (1/vocab_size).log() # individual logps\n\t\tlogps[:, i, :] = logp[:, i-1, :]\n\t\t# b x len - [:,0] is BOS\n\t\tpreds_save[:, i] = pred[:, i-1].view(-1)\n\n\t\t# append current pred, length+1\n\t\tpreds = torch.cat((preds,pred[:, i-1]),dim=1)\n\t\tflag = 0\n\t\tif sum(eos_mask.int()) == eos_mask.size(0):\n\t\t\tflag = 1\n\t\t\tif length_out != preds.size(1):\n\t\t\t\tdummy = torch.Tensor([PAD]).repeat(batch, length_out-preds.size(1)).type(\n\t\t\t\t\ttorch.LongTensor).to(device=device)\n\t\t\t\tpreds = torch.cat((preds,dummy),dim=1) # pad to max length\n\n\t\treturn eos_mask, dec_outputs, logps, preds_save, preds, flag\n\n\n\tdef _prep_translate(self, batch, beam_width, device, length_in, enc_outputs,\n\t\tsrc_mask_input=None):\n\n\t\t# prep\n\t\teos_mask = torch.BoolTensor([False]).repeat(batch * beam_width).to(device=device)\n\t\tlen_map = torch.Tensor([1]).repeat(batch * beam_width).to(device=device)\n\t\tpreds = torch.Tensor([BOS]).repeat(batch, 1).type(\n\t\t\ttorch.LongTensor).to(device=device)\n\n\t\t# repeat for beam_width times\n\t\t# a b c d -> aaa bbb ccc ddd\n\n\t\t# b x len x dim_model -> (b x beam_width) x len x dim_model\n\t\tenc_outputs_expand = enc_outputs.repeat(1, beam_width, 1).view(-1, length_in, self.dim_model)\n\t\t# (b x beam_width) x len\n\t\tpreds_expand = preds.repeat(1, beam_width).view(-1, preds.size(-1))\n\t\t# (b x beam_width)\n\t\tscores_expand = torch.Tensor([0]).repeat(batch * beam_width).type(\n\t\t\ttorch.FloatTensor).to(device=device)\n\t\t# b x 1 x len -> (b x beam_width) x 1 x len\n\t\tif type(src_mask_input) != type(None):\n\t\t\tsrc_mask_input_expand = src_mask_input.repeat(\n\t\t\t\t1, beam_width, 1).view(-1, 1, src_mask_input.size(-1))\n\t\telse:\n\t\t\tsrc_mask_input_expand = None\n\n\t\treturn eos_mask, len_map, preds, enc_outputs_expand, preds_expand, \\\n\t\t\tscores_expand, src_mask_input_expand\n\n\n\tdef _step_translate(self, i, batch, beam_width, device,\n\t\tdec_output_expand, logp_expand, pred_expand, score_expand,\n\t\tpreds_expand, scores_expand, eos_mask, len_map, penalty_factor):\n\n\t\t# import pdb; pdb.set_trace()\n\t\t# select current slice\n\t\tdec_output = dec_output_expand[:, i-1]\t# (b x beam_width) x dim_model - no use\n\t\tlogp = logp_expand[:, i-1, :] \t# (b x beam_width) x vocab_size - no use\n\t\tpred = pred_expand[:, i-1] \t\t# (b x beam_width) x beam_width\n\t\tscore = score_expand[:, i-1]\t\t# (b x beam_width) x beam_width\n\n\t\t# select k candidates from k^2 candidates\n\t\tif i == 1:\n\t\t\t# inital state, keep first k candidates\n\t\t\t# b x (beam_width x beam_width) -> b x (beam_width) -> (b x beam_width) x 1\n\t\t\tscore_select = scores_expand + score.reshape(batch, -1)[:,:beam_width]\\\n\t\t\t\t.contiguous().view(-1)\n\t\t\tscores_expand = score_select\n\t\t\tpred_select = pred.reshape(batch, -1)[:, :beam_width].contiguous().view(-1)\n\t\t\tpreds_expand = torch.cat((preds_expand,pred_select.unsqueeze(-1)),dim=1)\n\n\t\telse:\n\t\t\t# keep only 1 candidate when hitting eos\n\t\t\t# (b x beam_width) x beam_width\n\t\t\teos_mask_expand = eos_mask.reshape(-1,1).repeat(1, beam_width)\n\t\t\teos_mask_expand[:,0] = False\n\t\t\t# (b x beam_width) x beam_width\n\t\t\tscore_temp = scores_expand.reshape(-1,1) + score.masked_fill(\n\t\t\t\teos_mask.reshape(-1,1), 0).masked_fill(eos_mask_expand, -1e9)\n\t\t\t# length penalty\n\t\t\tscore_temp = score_temp / (len_map.reshape(-1,1) ** penalty_factor)\n\t\t\t# select top k from k^2\n\t\t\t# (b x beam_width^2 -> b x beam_width)\n\t\t\tscore_select, pos = score_temp.reshape(batch, -1).topk(beam_width)\n\t\t\tscores_expand = score_select.view(-1) * (len_map.reshape(-1,1) ** penalty_factor).view(-1)\n\t\t\t# select correct elements according to pos\n\t\t\tpos = (pos.float() + torch.range(0, (batch - 1) * (beam_width**2), (beam_width**2)).to(\n\t\t\t\tdevice=device).reshape(batch, 1)).long()\n\t\t\tr_idxs, c_idxs = pos // beam_width, pos % beam_width # b x beam_width\n\t\t\tpred_select = pred[r_idxs, c_idxs].view(-1) # b x beam_width -> (b x beam_width)\n\t\t\t# Copy the corresponding previous tokens.\n\t\t\tpreds_expand[:, :i] = preds_expand[r_idxs.view(-1), :i] # (b x beam_width) x i\n\t\t\t# Set the best tokens in this beam search step\n\t\t\tpreds_expand = torch.cat((preds_expand, pred_select.unsqueeze(-1)),dim=1)\n\n\t\t# locate the eos in the generated sequences\n\t\t# eos_mask = (pred_select == EOS) + eos_mask # >=pt1.3\n\t\teos_mask = ((pred_select == EOS).type(torch.uint8)\n\t\t\t+ eos_mask.type(torch.uint8)).type(torch.bool).type(torch.uint8) # >=pt1.1\n\t\tlen_map = len_map + torch.Tensor([1]).repeat(batch * beam_width).to(\n\t\t\tdevice=device).masked_fill(eos_mask.type(torch.uint8), 0)\n\n\t\t# early stop\n\t\tflag = 0\n\t\tif sum(eos_mask.int()) == eos_mask.size(0): flag = 1\n\n\t\treturn scores_expand, preds_expand, eos_mask, len_map, flag\n\n\n\tdef forward_train(self, src, tgt=None, acous_feats=None, acous_lens=None,\n\t\tmode='ST', use_gpu=True, lm_mode='null', lm_model=None):\n\n\t\t\"\"\"\n\t\t\tmode: \tASR \t\tacous -> src\n\t\t\t\t\tAE \t\t\tsrc -> src\n\t\t\t\t\tST\t\t\tacous -> tgt\n\t\t\t\t\tMT \t\t\tsrc -> tgt\n\t\t\"\"\"\n\n\t\t# import pdb; pdb.set_trace()\n\t\t# note: adding .type(torch.uint8) to be compatible with pytorch 1.1!\n\t\tout_dict={}\n\n\t\t# check gpu\n\t\tglobal device\n\t\tdevice = check_device(use_gpu)\n\n\t\t# check mode\n\t\tmode = mode.upper()\n\t\tassert type(src) != type(None)\n\t\tif 'ST' in mode or 'ASR' in mode:\n\t\t\tassert type(acous_feats) != type(None)\n\t\tif 'ST' in mode or 'MT' in mode:\n\t\t\tassert type(tgt) != type(None)\n\n\t\tif 'ASR' in mode:\n\t\t\t\"\"\"\n\t\t\t\tacous -> EN: RNN\n\t\t\t\tin : length reduced fbk features\n\t\t\t\tout: w1 w2 w3 #=6\n\t\t\t\"\"\"\n\t\t\temb_src, logps_src, preds_src, lengths = self._encoder_acous(acous_feats, acous_lens,\n\t\t\t\tdevice, use_gpu, tgt=src, is_training=True, teacher_forcing_ratio=1.0,\n\t\t\t\tlm_mode=lm_mode, lm_model=lm_model)\n\n\t\t\t# output dict\n\t\t\tout_dict['emb_asr'] = emb_src # dynamic\n\t\t\tout_dict['preds_asr'] = preds_src\n\t\t\tout_dict['logps_asr'] = logps_src\n\t\t\tout_dict['lengths_asr'] = lengths\n\n\t\tif 'MT' in mode:\n\t\t\t\"\"\"\n\t\t\t\tEN -> DE: Transformer\n\t\t\t\tsrc: w1 w2 w3 #=7\n\t\t\t\tmid: w1 w2 w3 #=6\n\t\t\t\tout: c1 c2 c3 [dummy] #=7\n\n\t\t\t\tnote: add average dynamic embedding to static embedding\n\t\t\t\"\"\"\n\t\t\t# get tgt emb\n\t\t\ttgt_mask, emb_tgt = self._get_tgt_emb(tgt, device)\n\t\t\t# get src emb\n\t\t\tsrc_trim = self._pre_proc_src(src, device)\n\t\t\temb_dyn_ave = self.EMB_DYN_AVE\n\t\t\temb_dyn_ave_expand = emb_dyn_ave.repeat(\n\t\t\t\tsrc_trim.size(0), src_trim.size(1), 1).to(device=device)\n\t\t\tsrc_mask, emb_src, src_mask_input = self._get_src_emb(\n\t\t\t\tsrc_trim, emb_dyn_ave_expand, device)\n\n\t\t\t# encode decode\n\t\t\tenc_outputs = self._encoder_en(emb_src, src_mask=src_mask_input) # b x len x dim_model\n\t\t\t# decode\n\t\t\tdec_outputs_tgt, logits_tgt, logps_tgt, preds_tgt, _ = \\\n\t\t\t\tself._decoder_de(emb_tgt, enc_outputs, tgt_mask=tgt_mask, src_mask=src_mask_input)\n\n\t\t\t# output dict\n\t\t\tout_dict['emb_mt'] = emb_src # combined\n\t\t\tout_dict['preds_mt'] = preds_tgt\n\t\t\tout_dict['logps_mt'] = logps_tgt\n\n\t\tif 'ST' in mode:\n\t\t\t\"\"\"\n\t\t\t\tacous -> DE: Transformer\n\t\t\t\tin : length reduced fbk features\n\t\t\t\tmid: w1 w2 w3 #=6\n\t\t\t\tout: c1 c2 c3 [dummy] #=7\n\t\t\t\"\"\"\n\t\t\t# get tgt emb\n\t\t\ttgt_mask, emb_tgt = self._get_tgt_emb(tgt, device)\n\t\t\t# run ASR\n\t\t\tif 'ASR' in mode:\n\t\t\t\temb_src_dyn = out_dict['emb_asr']\n\t\t\t\tlengths = out_dict['lengths_asr']\n\t\t\t# else: # use free running if no 'ASR'\n\t\t\t# \temb_src_dyn, _, _, lengths = self._encoder_acous(acous_feats, acous_lens,\n\t\t\t# \t\tdevice, use_gpu, tgt=src, is_training=True, teacher_forcing_ratio=1.0)\n\t\t\telse: # use free running if no 'ASR'\n\t\t\t\temb_src_dyn, _, _, lengths = self._encoder_acous(acous_feats, acous_lens,\n\t\t\t\t\tdevice, use_gpu, is_training=False, teacher_forcing_ratio=0.0,\n\t\t\t\t\tlm_mode=lm_mode, lm_model=lm_model)\n\n\t\t\t# get combined embedding\n\t\t\tsrc_trim = self._pre_proc_src(src, device)\n\t\t\t_, emb_src, _ = self._get_src_emb(src_trim, emb_src_dyn, device)\n\n\t\t\t# get mask\n\t\t\tmax_len = emb_src.size(1)\n\t\t\tlengths = torch.LongTensor(lengths)\n\t\t\tsrc_mask_input = (torch.arange(max_len).expand(len(lengths), max_len)\n\t\t\t\t< lengths.unsqueeze(1)).unsqueeze(1).to(device=device)\n\t\t\t# encode\n\t\t\tenc_outputs = self._encoder_en(emb_src, src_mask=src_mask_input) # b x len x dim_model\n\t\t\t# decode\n\t\t\tdec_outputs_tgt, logits_tgt, logps_tgt, preds_tgt, _ = \\\n\t\t\t\tself._decoder_de(emb_tgt, enc_outputs, tgt_mask=tgt_mask, src_mask=src_mask_input)\n\n\t\t\t# output dict\n\t\t\tout_dict['emb_st'] = emb_src # combined\n\t\t\tout_dict['preds_st'] = preds_tgt\n\t\t\tout_dict['logps_st'] = logps_tgt\n\n\t\treturn out_dict\n\n\n\tdef forward_eval(self, src=None, acous_feats=None, acous_lens=None,\n\t\tmode='ST', use_gpu=True, lm_mode='null', lm_model=None):\n\n\t\t\"\"\"\n\t\t\tbeam_width = 1\n\t\t\tnote the output sequence different from training if using transformer model\n\t\t\"\"\"\n\n\t\t# import pdb; pdb.set_trace()\n\t\tout_dict={}\n\n\t\t# check gpu\n\t\tglobal device\n\t\tdevice = check_device(use_gpu)\n\n\t\t# check mode\n\t\tmode = mode.upper()\n\t\tif 'ST' in mode or 'ASR' in mode:\n\t\t\tassert type(acous_feats) != type(None)\n\t\t\tbatch = acous_feats.size(0)\n\t\tif 'MT' in mode or 'AE' in mode:\n\t\t\tassert type(src) != type(None)\n\t\t\tbatch = src.size(0)\n\n\t\tlength_out_src = self.max_seq_len_src\n\t\tlength_out_tgt = self.max_seq_len_tgt\n\n\t\tif 'ASR' in mode:\n\t\t\t\"\"\"\n\t\t\t\tacous -> EN: RNN\n\t\t\t\tin : length reduced fbk features\n\t\t\t\tout: w1 w2 w3 #=6\n\t\t\t\"\"\"\n\t\t\t# run asr\n\t\t\temb_src, logps_src, preds_src, lengths = self._encoder_acous(acous_feats, acous_lens,\n\t\t\t\tdevice, use_gpu, is_training=False, teacher_forcing_ratio=0.0,\n\t\t\t\tlm_mode=lm_mode, lm_model=lm_model)\n\n\t\t\t# output dict\n\t\t\tout_dict['emb_asr'] = emb_src\n\t\t\tout_dict['preds_asr'] = preds_src\n\t\t\tout_dict['logps_asr'] = logps_src\n\t\t\tout_dict['lengths_asr'] = lengths\n\n\t\tif 'MT' in mode:\n\t\t\t\"\"\"\n\t\t\t\tEN -> DE: Transformer\n\t\t\t\tin : w1 w2 w3 #=7\n\t\t\t\tmid: w1 w2 w3 #=7\n\t\t\t\tout: c1 c2 c3 #=7\n\t\t\t\"\"\"\n\t\t\t# get src emb\n\t\t\tsrc_trim = self._pre_proc_src(src, device)\n\t\t\temb_dyn_ave = self.EMB_DYN_AVE\n\t\t\temb_dyn_ave_expand = emb_dyn_ave.repeat(\n\t\t\t\tsrc_trim.size(0), src_trim.size(1), 1).to(device=device)\n\t\t\tsrc_mask, emb_src, src_mask_input = self._get_src_emb(\n\t\t\t\tsrc_trim, emb_dyn_ave_expand, device)\n\t\t\t# encoder\n\t\t\tenc_outputs = self._encoder_en(emb_src, src_mask=src_mask_input) # b x len x dim_model\n\n\t\t\t# prep\n\t\t\teos_mask_tgt, logps_tgt, dec_outputs_tgt, preds_save_tgt, preds_tgt, preds_save_tgt = \\\n\t\t\t\tself._prep_eval(batch, length_out_tgt, self.dec_vocab_size, device)\n\n\t\t\tfor i in range(1, self.max_seq_len_tgt):\n\n\t\t\t\ttgt_mask, emb_tgt = self._get_tgt_emb(preds_tgt, device)\n\t\t\t\tdec_output_tgt, logit_tgt, logp_tgt, pred_tgt, _ = \\\n\t\t\t\t\tself._decoder_de(emb_tgt, enc_outputs, tgt_mask=tgt_mask, src_mask=src_mask_input)\n\n\t\t\t\teos_mask_tgt, dec_outputs_tgt, logps_tgt, preds_save_tgt, preds_tgt, flag \\\n\t\t\t\t\t= self._step_eval(i, eos_mask_tgt, dec_output_tgt, logp_tgt, pred_tgt,\n\t\t\t\t\t\tdec_outputs_tgt, logps_tgt, preds_save_tgt, preds_tgt, batch, length_out_tgt)\n\t\t\t\tif flag == 1: break\n\n\t\t\t# output dict\n\t\t\tout_dict['emb_mt'] = emb_src\n\t\t\tout_dict['preds_mt'] = preds_tgt\n\t\t\tout_dict['logps_mt'] = logps_tgt\n\n\t\tif 'ST' in mode:\n\t\t\t\"\"\"\n\t\t\t\tacous -> DE: Transformer\n\t\t\t\tin : length reduced fbk features\n\t\t\t\tout: c1 c2 c3 #=7\n\t\t\t\"\"\"\n\t\t\t# get embedding\n\t\t\tif 'ASR' in mode:\n\t\t\t\tpreds_src = out_dict['preds_asr']\n\t\t\t\temb_src_dyn = out_dict['emb_asr']\n\t\t\t\tlengths = out_dict['lengths_asr']\n\t\t\telse:\n\t\t\t\temb_src_dyn, _, preds_src, lengths = self._encoder_acous(acous_feats, acous_lens,\n\t\t\t\t\tdevice, use_gpu, is_training=False, teacher_forcing_ratio=0.0,\n\t\t\t\t\tlm_mode=lm_mode, lm_model=lm_model)\n\t\t\t_, emb_src, _ = self._get_src_emb(preds_src.squeeze(2), emb_src_dyn, device)\n\n\t\t\t# get mask\n\t\t\tmax_len = emb_src.size(1)\n\t\t\tlengths = torch.LongTensor(lengths)\n\t\t\tsrc_mask_input = (torch.arange(max_len).expand(len(lengths), max_len)\n\t\t\t\t< lengths.unsqueeze(1)).unsqueeze(1).to(device=device)\n\t\t\t# encode\n\t\t\tenc_outputs = self._encoder_en(emb_src, src_mask=src_mask_input) # b x len x dim_model\n\n\t\t\t# prep\n\t\t\teos_mask_tgt, logps_tgt, dec_outputs_tgt, preds_save_tgt, preds_tgt, preds_save_tgt = \\\n\t\t\t\tself._prep_eval(batch, length_out_tgt, self.dec_vocab_size, device)\n\n\t\t\tfor i in range(1, self.max_seq_len_tgt):\n\n\t\t\t\ttgt_mask, emb_tgt = self._get_tgt_emb(preds_tgt, device)\n\t\t\t\tdec_output_tgt, logit_tgt, logp_tgt, pred_tgt, _ = \\\n\t\t\t\t\tself._decoder_de(emb_tgt, enc_outputs, tgt_mask=tgt_mask, src_mask=src_mask_input)\n\n\t\t\t\teos_mask_tgt, dec_outputs_tgt, logps_tgt, preds_save_tgt, preds_tgt, flag \\\n\t\t\t\t\t= self._step_eval(i, eos_mask_tgt, dec_output_tgt, logp_tgt, pred_tgt,\n\t\t\t\t\t\tdec_outputs_tgt, logps_tgt, preds_save_tgt, preds_tgt, batch, length_out_tgt)\n\t\t\t\tif flag == 1: break\n\n\t\t\t# output dict\n\t\t\tout_dict['emb_st'] = emb_src\n\t\t\tout_dict['preds_st'] = preds_tgt\n\t\t\tout_dict['logps_st'] = logps_tgt\n\n\t\treturn out_dict\n\n\n\tdef forward_translate(self, acous_feats=None, acous_lens=None, src=None,\n\t\tbeam_width=1, penalty_factor=1, use_gpu=True, max_seq_len=900, mode='ST',\n\t\tlm_mode='null', lm_model=None):\n\n\t\t\"\"\"\n\t\t\trun inference - with beam search (same output format as is in forward_eval)\n\t\t\"\"\"\n\n\t\t# import pdb; pdb.set_trace()\n\n\t\t# check gpu\n\t\tglobal device\n\t\tdevice = check_device(use_gpu)\n\n\t\tif mode == 'ASR':\n\t\t\t_, _, preds_src, _ = self._encoder_acous(acous_feats, acous_lens, device, use_gpu,\n\t\t\t\tis_training=False, teacher_forcing_ratio=0.0, lm_mode=lm_mode, lm_model=lm_model)\n\t\t\tpreds = preds_src\n\n\t\telif mode == 'MT':\n\t\t\tbatch = src.size(0)\n\n\t\t\t# txt encoder\n\t\t\tsrc_trim = self._pre_proc_src(src, device)\n\t\t\temb_dyn_ave = self.EMB_DYN_AVE\n\t\t\temb_dyn_ave_expand = emb_dyn_ave.repeat(\n\t\t\t\tsrc_trim.size(0), src_trim.size(1), 1).to(device=device)\n\t\t\tsrc_mask, emb_src, src_mask_input = self._get_src_emb(\n\t\t\t\tsrc_trim, emb_dyn_ave_expand, device)\n\t\t\tenc_outputs = self._encoder_en(emb_src, src_mask=src_mask_input)\n\t\t\tlength_in = enc_outputs.size(1)\n\n\t\t\t# prep\n\t\t\teos_mask, len_map, preds, enc_outputs_expand, preds_expand, \\\n\t\t\t\tscores_expand, src_mask_input_expand = self._prep_translate(\n\t\t\t\tbatch, beam_width, device, length_in, enc_outputs, src_mask_input)\n\n\t\t\t# loop over sequence length\n\t\t\tfor i in range(1, max_seq_len):\n\n\t\t\t\ttgt_mask_expand, emb_tgt_expand = self._get_tgt_emb(preds_expand, device)\n\t\t\t\tdec_output_expand, logit_expand, logp_expand, pred_expand, score_expand = \\\n\t\t\t\t\tself._decoder_de(emb_tgt_expand, enc_outputs_expand,\n\t\t\t\t\ttgt_mask=tgt_mask_expand, src_mask=src_mask_input_expand,\n\t\t\t\t\tbeam_width=beam_width)\n\n\t\t\t\tscores_expand, preds_expand, eos_mask, len_map, flag = \\\n\t\t\t\t\tself._step_translate(i, batch, beam_width, device,\n\t\t\t\t\t\tdec_output_expand, logp_expand, pred_expand, score_expand,\n\t\t\t\t\t\tpreds_expand, scores_expand, eos_mask, len_map, penalty_factor)\n\t\t\t\tif flag == 1: break\n\n\t\t\t# select the best candidate\n\t\t\tpreds = preds_expand.reshape(batch, -1)[:, :max_seq_len].contiguous() # b x len\n\t\t\tscores = scores_expand.reshape(batch, -1)[:, 0].contiguous() # b\n\n\t\telif mode == 'ST':\n\t\t\tbatch = acous_feats.size(0)\n\n\t\t\t# get embedding\n\t\t\temb_src_dyn, _, preds_src, lengths = self._encoder_acous(acous_feats, acous_lens, device, use_gpu,\n\t\t\t\tis_training=False, teacher_forcing_ratio=0.0, lm_mode=lm_mode, lm_model=lm_model)\n\t\t\t_, emb_src, _ = self._get_src_emb(preds_src.squeeze(2), emb_src_dyn, device)\n\n\t\t\t# get mask\n\t\t\tmax_len = emb_src.size(1)\n\t\t\tlengths = torch.LongTensor(lengths)\n\t\t\tsrc_mask_input = (torch.arange(max_len).expand(len(lengths), max_len)\n\t\t\t\t< lengths.unsqueeze(1)).unsqueeze(1).to(device=device)\n\t\t\t# encode\n\t\t\tenc_outputs = self._encoder_en(emb_src, src_mask=src_mask_input) # b x len x dim_model\n\t\t\tlength_in = enc_outputs.size(1)\n\n\t\t\t# prep\n\t\t\teos_mask, len_map, preds, enc_outputs_expand, preds_expand, \\\n\t\t\t\tscores_expand, src_mask_input_expand = self._prep_translate(\n\t\t\t\tbatch, beam_width, device, length_in, enc_outputs, src_mask_input)\n\n\t\t\t# loop over sequence length\n\t\t\tfor i in range(1, max_seq_len):\n\n\t\t\t\t# import pdb; pdb.set_trace()\n\n\t\t\t\t# Get k candidates for each beam, k^2 candidates in total (k=beam_width)\n\t\t\t\ttgt_mask_expand, emb_tgt_expand = self._get_tgt_emb(preds_expand, device)\n\t\t\t\tdec_output_expand, logit_expand, logp_expand, pred_expand, score_expand = \\\n\t\t\t\t\tself._decoder_de(emb_tgt_expand, enc_outputs_expand,\n\t\t\t\t\ttgt_mask=tgt_mask_expand, src_mask=src_mask_input_expand,\n\t\t\t\t\tbeam_width=beam_width)\n\n\t\t\t\tscores_expand, preds_expand, eos_mask, len_map, flag = \\\n\t\t\t\t\tself._step_translate(i, batch, beam_width, device,\n\t\t\t\t\t\tdec_output_expand, logp_expand, pred_expand, score_expand,\n\t\t\t\t\t\tpreds_expand, scores_expand, eos_mask, len_map, penalty_factor)\n\t\t\t\tif flag == 1: break\n\n\t\t\t# select the best candidate\n\t\t\tpreds = preds_expand.reshape(batch, -1)[:, :max_seq_len].contiguous() # b x len\n\t\t\tscores = scores_expand.reshape(batch, -1)[:, 0].contiguous() # b\n\n\t\telif mode == 'ST_BASE':\n\n\t\t\t\"\"\"\n\t\t\t\tonly for decoding before fine-tuning on ST data\n\t\t\t\tuse average dyn embedding\n\t\t\t\"\"\"\n\t\t\tbatch = acous_feats.size(0)\n\n\t\t\t# import pdb; pdb.set_trace()\n\t\t\t# run asr\n\t\t\t_, _, preds_src, lengths = self._encoder_acous(acous_feats, acous_lens, device, use_gpu,\n\t\t\t\tis_training=False, teacher_forcing_ratio=0.0, lm_mode=lm_mode, lm_model=lm_model)\n\t\t\t# ave embedding\n\t\t\temb_dyn_ave = self.EMB_DYN_AVE\n\t\t\temb_src_dyn = emb_dyn_ave.repeat(\n\t\t\t\t preds_src.size(0), preds_src.size(1), 1).to(device=device)\n\n\t\t\t_, emb_src, _ = self._get_src_emb(preds_src.squeeze(2), emb_src_dyn, device)\n\n\t\t\t# get mask\n\t\t\tmax_len = emb_src.size(1)\n\t\t\tlengths = torch.LongTensor(lengths)\n\t\t\tsrc_mask_input = (torch.arange(max_len).expand(len(lengths), max_len)\n\t\t\t\t< lengths.unsqueeze(1)).unsqueeze(1).to(device=device)\n\t\t\t# encode\n\t\t\tenc_outputs = self._encoder_en(emb_src, src_mask=src_mask_input) # b x len x dim_model\n\t\t\tlength_in = enc_outputs.size(1)\n\n\t\t\t# prep\n\t\t\teos_mask, len_map, preds, enc_outputs_expand, preds_expand, \\\n\t\t\t\tscores_expand, src_mask_input_expand = self._prep_translate(\n\t\t\t\tbatch, beam_width, device, length_in, enc_outputs, src_mask_input)\n\n\t\t\t# loop over sequence length\n\t\t\tfor i in range(1, max_seq_len):\n\n\t\t\t\t# import pdb; pdb.set_trace()\n\n\t\t\t\t# Get k candidates for each beam, k^2 candidates in total (k=beam_width)\n\t\t\t\ttgt_mask_expand, emb_tgt_expand = self._get_tgt_emb(preds_expand, device)\n\t\t\t\tdec_output_expand, logit_expand, logp_expand, pred_expand, score_expand = \\\n\t\t\t\t\tself._decoder_de(emb_tgt_expand, enc_outputs_expand,\n\t\t\t\t\ttgt_mask=tgt_mask_expand, src_mask=src_mask_input_expand,\n\t\t\t\t\tbeam_width=beam_width)\n\n\t\t\t\tscores_expand, preds_expand, eos_mask, len_map, flag = \\\n\t\t\t\t\tself._step_translate(i, batch, beam_width, device,\n\t\t\t\t\t\tdec_output_expand, logp_expand, pred_expand, score_expand,\n\t\t\t\t\t\tpreds_expand, scores_expand, eos_mask, len_map, penalty_factor)\n\t\t\t\tif flag == 1: break\n\n\t\t\t# select the best candidate\n\t\t\tpreds = preds_expand.reshape(batch, -1)[:, :max_seq_len].contiguous() # b x len\n\t\t\tscores = scores_expand.reshape(batch, -1)[:, 0].contiguous() # b\n\n\t\treturn preds\n\n\n\tdef forward_translate_refen(self, acous_feats=None, acous_lens=None, src=None,\n\t\tbeam_width=1, penalty_factor=1, use_gpu=True, max_seq_len=900, mode='ST',\n\t\tlm_mode='null', lm_model=None):\n\n\t\t\"\"\"\n\t\t\trun inference - with beam search (same output format as is in forward_eval)\n\t\t\"\"\"\n\n\t\t# import pdb; pdb.set_trace()\n\n\t\t# check gpu\n\t\tglobal device\n\t\tdevice = check_device(use_gpu)\n\n\t\tif mode == 'ASR':\n\t\t\t_, _, preds_src, _ = self._encoder_acous(acous_feats, acous_lens,\n\t\t\t\tdevice, use_gpu, tgt=src, is_training=False, teacher_forcing_ratio=1.0,\n\t\t\t\tlm_mode=lm_mode, lm_model=lm_model)\n\n\t\t\tpreds = preds_src\n\n\t\telif mode == 'MT':\n\t\t\tbatch = src.size(0)\n\n\t\t\t# txt encoder\n\t\t\tsrc_trim = self._pre_proc_src(src, device)\n\t\t\temb_dyn_ave = self.EMB_DYN_AVE\n\t\t\temb_dyn_ave_expand = emb_dyn_ave.repeat(\n\t\t\t\tsrc_trim.size(0), src_trim.size(1), 1).to(device=device)\n\t\t\tsrc_mask, emb_src, src_mask_input = self._get_src_emb(\n\t\t\t\tsrc_trim, emb_dyn_ave_expand, device)\n\t\t\tenc_outputs = self._encoder_en(emb_src, src_mask=src_mask_input)\n\t\t\tlength_in = enc_outputs.size(1)\n\n\t\t\t# prep\n\t\t\teos_mask, len_map, preds, enc_outputs_expand, preds_expand, \\\n\t\t\t\tscores_expand, src_mask_input_expand = self._prep_translate(\n\t\t\t\tbatch, beam_width, device, length_in, enc_outputs, src_mask_input)\n\n\t\t\t# loop over sequence length\n\t\t\tfor i in range(1, max_seq_len):\n\n\t\t\t\ttgt_mask_expand, emb_tgt_expand = self._get_tgt_emb(preds_expand, device)\n\t\t\t\tdec_output_expand, logit_expand, logp_expand, pred_expand, score_expand = \\\n\t\t\t\t\tself._decoder_de(emb_tgt_expand, enc_outputs_expand,\n\t\t\t\t\ttgt_mask=tgt_mask_expand, src_mask=src_mask_input_expand,\n\t\t\t\t\tbeam_width=beam_width)\n\n\t\t\t\tscores_expand, preds_expand, eos_mask, len_map, flag = \\\n\t\t\t\t\tself._step_translate(i, batch, beam_width, device,\n\t\t\t\t\t\tdec_output_expand, logp_expand, pred_expand, score_expand,\n\t\t\t\t\t\tpreds_expand, scores_expand, eos_mask, len_map, penalty_factor)\n\t\t\t\tif flag == 1: break\n\n\t\t\t# select the best candidate\n\t\t\tpreds = preds_expand.reshape(batch, -1)[:, :max_seq_len].contiguous() # b x len\n\t\t\tscores = scores_expand.reshape(batch, -1)[:, 0].contiguous() # b\n\n\t\telif mode == 'ST':\n\t\t\tbatch = acous_feats.size(0)\n\n\t\t\t# get embedding\n\t\t\temb_src_dyn, _, preds_src, lengths = self._encoder_acous(acous_feats, acous_lens,\n\t\t\t\tdevice, use_gpu, tgt=src, is_training=False, teacher_forcing_ratio=1.0,\n\t\t\t\tlm_mode=lm_mode, lm_model=lm_model)\n\t\t\tsrc_trim = self._pre_proc_src(src, device)\n\t\t\t_, emb_src, _ = self._get_src_emb(src_trim, emb_src_dyn, device) # use ref\n\n\t\t\t# get mask\n\t\t\tmax_len = emb_src.size(1)\n\t\t\tlengths = torch.LongTensor(lengths)\n\t\t\tsrc_mask_input = (torch.arange(max_len).expand(len(lengths), max_len)\n\t\t\t\t< lengths.unsqueeze(1)).unsqueeze(1).to(device=device)\n\t\t\t# encode\n\t\t\tenc_outputs = self._encoder_en(emb_src, src_mask=src_mask_input) # b x len x dim_model\n\t\t\tlength_in = enc_outputs.size(1)\n\n\t\t\t# prep\n\t\t\teos_mask, len_map, preds, enc_outputs_expand, preds_expand, \\\n\t\t\t\tscores_expand, src_mask_input_expand = self._prep_translate(\n\t\t\t\tbatch, beam_width, device, length_in, enc_outputs, src_mask_input)\n\n\t\t\t# loop over sequence length\n\t\t\tfor i in range(1, max_seq_len):\n\n\t\t\t\t# import pdb; pdb.set_trace()\n\n\t\t\t\t# Get k candidates for each beam, k^2 candidates in total (k=beam_width)\n\t\t\t\ttgt_mask_expand, emb_tgt_expand = self._get_tgt_emb(preds_expand, device)\n\t\t\t\tdec_output_expand, logit_expand, logp_expand, pred_expand, score_expand = \\\n\t\t\t\t\tself._decoder_de(emb_tgt_expand, enc_outputs_expand,\n\t\t\t\t\ttgt_mask=tgt_mask_expand, src_mask=src_mask_input_expand,\n\t\t\t\t\tbeam_width=beam_width)\n\n\t\t\t\tscores_expand, preds_expand, eos_mask, len_map, flag = \\\n\t\t\t\t\tself._step_translate(i, batch, beam_width, device,\n\t\t\t\t\t\tdec_output_expand, logp_expand, pred_expand, score_expand,\n\t\t\t\t\t\tpreds_expand, scores_expand, eos_mask, len_map, penalty_factor)\n\t\t\t\tif flag == 1: break\n\n\t\t\t# select the best candidate\n\t\t\tpreds = preds_expand.reshape(batch, -1)[:, :max_seq_len].contiguous() # b x len\n\t\t\tscores = scores_expand.reshape(batch, -1)[:, 0].contiguous() # b\n\n\t\treturn preds\n\n\n\tdef check_var(self, var_name, var_val_set=None):\n\n\t\t\"\"\" to make old models capatible with added classvar in later versions \"\"\"\n\n\t\tif not hasattr(self, var_name):\n\t\t\tvar_val = var_val_set if type(var_val_set) != type(None) else None\n\n\t\t\t# set class attribute to default value\n\t\t\tsetattr(self, var_name, var_val)\n","sub_path":"models/Seq2seq.py","file_name":"Seq2seq.py","file_ext":"py","file_size_in_byte":32382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"300854471","text":"# 23-regex\n# Created by Alexiuce at 2018/6/21\n\"\"\" 正则模块 re\n\n. 匹配任意一个字符(除了\\n)\n\n\\d 任意一个数字0-9\n\\D 非数字\n\n\\s 任意空白字符 : 空格 tab符 \\r \\t 等\n\n\\w 单词字符: 字母 数字 下划线 可以参考标识符命名规则\n\n\\b 单词边界\n\n[] 匹配[]中的任意一个元素\n[^] 同上相反,匹配不包含在[]中的元素\n=====================================================\n数量描述\n\n* : 任意个\n+ : 至少1个\n? : 0个或者1个\n{n} : 重复n个\n{m,} : 至少m个\n{m,n}: m~n 个\n\nre.match 方法\n\n* 默认情况下,match 方法从左向右的检查字符串,一旦不匹配,就结束并返回结果\n\n\"\"\"\n\nimport re\n\n\ndef test():\n \"\"\" re.match(正则,字符串) \"\"\"\n\n pattern = \"[a-z]+\\d{4}\"\n result = re.match(pattern,\"abc123\")\n print(result)\n\n print(\"*\"*30)\n s = r'\\?' # 原始字符串\n print(re.match(s,\"?hello?\"))\n\n p1 = r\"[1-9]\\d?\"\n r1 = re.match(p1,'200')\n print(r1)\n # 分组\n t1 = \"

(.*)

\"\n res2 = re.match(t1,\"

Hell p

\")\n print(res2.group(1)) # Hello p 获取第一组()中的匹配内容\n\n\n html = \"

Html body p

\"\n # ptn = r'<(.+)><(.+)><(.+)>(.+)'\n\n \"\"\"\n 定义组名 (?P)\n 使用组名 (?P=groupname)\n \"\"\"\n\n ptn = r'<(?P.+)><(?P.+)><(?P.+)>.+'\n r2 = re.match(ptn,html)\n print(r2.group(1))\n\n\"\"\" re 模块的其他用法\nsearch : 根据正则搜索字符串,默认情况下一旦找到后,就停止继续搜索,并返回结果\nfindall : 根据正则查找,匹配所有的部分\nsub : 根据正则进行替换, 替换规则可以是一个函数进行处理\nsplit : 根据正则进行字符串分隔\n\n\"\"\"\n\ndef search_test():\n html = \"

Html body p

\"\n result = re.search(r'p',html)\n print(result.group)\n\ndef sub_test():\n html = \"

Html body p90

\"\n # re.sub('html','Html',html)\n\n a = re.sub(r'Html',replace_handle,html)\n print(a)\n\n\n\ndef replace_handle(result):\n return \"Body\"\n\n\ndef regex_test():\n url1 = \"http://www.itcast.com/python/read/list?id=100&pay=yes\"\n url2 = \"http://www.intfin.com/news/read/list?id=100&id=2\"\n url3 = \"http://www.zy-ln.com/afl.asp?id=345\"\n url4 = \"http://3399574.com/class09/list?pay=yes\"\n\n\n reg_url1 = re.sub(r'(http://.+?/).+',handle_test,url1)\n print(reg_url1)\n\ndef handle_test(result):\n return result.group(1)\n\n\n\ndef main():\n # test()\n # search_test()\n sub_test()\n\n\nif __name__ == '__main__':\n main()\n regex_test()\n\n","sub_path":"PythonStudy/01-Day/src/main/23-regex.py","file_name":"23-regex.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"308071328","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport json\nimport csv\nimport argparse\nimport pandas as pd\nimport numpy as np\nfrom math import ceil\nfrom tqdm import tqdm\nimport pickle\nimport shutil\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn import CrossEntropyLoss\nfrom torchvision import datasets, models\nimport torch.backends.cudnn as cudnn\nimport torch.nn.functional as F\nimport cv2\n\n\nfrom transforms import transforms\nfrom models.LoadModel import MainModel\nfrom utils.dataset_DCL import collate_fn4train, collate_fn4test, collate_fn4val, dataset\nfrom config import LoadConfig, load_data_transformers\n# from utils.test_tool import set_text, save_multi_img, cls_base_acc\n\n# if int(torch.__version__.split('.')[0])< 1 and int(torch.__version__.split('.')[1])< 41:\nfrom tensorboardX import SummaryWriter\n# else:\n# from torch.utils.tensorboard import SummaryWriter\nimport pdb\nimport time\nos.environ['CUDA_DEVICE_ORDRE'] = 'PCI_BUS_ID'\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='dcl parameters')\n parser.add_argument('--data', dest='dataset',\n default='ItargeCar', type=str)\n parser.add_argument('--backbone', dest='backbone',\n default='resnet50', type=str)\n parser.add_argument('--b', dest='batch_size',\n default=8, type=int)\n parser.add_argument('--nw', dest='num_workers',\n default=0, type=int)\n parser.add_argument('--ver', dest='version',\n default='test', type=str)\n parser.add_argument('--detail', dest='discribe',\n default=None, type=str)\n parser.add_argument('--save', dest='resume',\n default=\"/NAS/shenjintong/DCL/net_model/training_descibe_41123_ItargeCar/model_best.pth\", type=str)\n parser.add_argument('--anno', dest='anno',\n default=None, type=str)\n parser.add_argument('--result_path', dest='result_path',\n default=\"/NAS/shenjintong/Dataset/ItargeCar/Result/DCL/raw_result/\", type=str)\n parser.add_argument('--size', dest='resize_resolution',\n default=512, type=int)\n parser.add_argument('--crop', dest='crop_resolution',\n default=448, type=int)\n parser.add_argument('--ss', dest='save_suffix',\n default=None, type=str)\n parser.add_argument('--acc_report', dest='acc_report',\n action='store_true')\n parser.add_argument('--swap_num', default=[7, 7],\n nargs=2, metavar=('swap1', 'swap2'),\n type=int, help='specify a range')\n parser.add_argument('--use_backbone', dest='use_backbone',\n action='store_false')\n parser.add_argument('--CAM', dest='CAM',\n action='store_true')\n parser.add_argument('--no_bbox', dest='no_bbox',\n action='store_true')\n parser.add_argument('--graph', dest='add_stureture_graph',\n action='store_true')\n parser.add_argument('--no_loc', dest='no_loc',\n action='store_true')\n parser.add_argument('--cv', dest='opencv_save',\n action='store_true')\n parser.add_argument('--log_dir', dest='log_dir',\n default=None, type=str)\n parser.add_argument('--feature', dest='feature',\n action='store_true')\n args = parser.parse_args()\n return args\n\n\ndef CAM_test(feature_conv, weight_softmax,shape,sw):\n # 挑选不同类别的图片进行验证,测试每张图在输入类别中的个数\n class_idx=[512,786,1078,1303,1869,1083,967,539,395,480,604,841]\n size_upsample = (shape[1], shape[0])\n nc, h, w = feature_conv.shape\n for i, idx in enumerate(class_idx):\n cam = np.dot(weight_softmax[idx],feature_conv.reshape((nc, h*w)))\n cam = cam.reshape(h, w)\n cam = cam - np.min(cam)\n cam_img = cam / np.max(cam)\n cam_img = np.uint8(255 * cam_img)\n heatmap = cv2.resize(cam_img, size_upsample)\n color_map = cv2.applyColorMap(heatmap.astype(np.uint8), cv2.COLORMAP_JET)\n attention_image = cv2.addWeighted(img, 0.5, color_map.astype(np.uint8), 0.5, 0)\n cv2.imwrite('imgs/test_%d_%d.jpg' % (i, idx), attention_image)\n attention_image = cv2.cvtColor(attention_image, cv2.COLOR_BGR2RGB)\n attention_image = attention_image.transpose((2, 0, 1))\n sw.add_image('attention_image', attention_image)\n\n\ndef returnCAM(args,feature_conv, weight_softmax, class_idx,img_name,dataset_pd,sw=None):\n if args.dataset=='ItargeCar'or args.dataset=='Itarge_car_no_wind':\n class_label = pd.read_csv(\"/NAS/shenjintong/Dataset/ItargeCar/csv_dataset/class_list.csv\")\n elif args.dataset=='ItargeCar_Brand':\n class_label = pd.read_csv(\"/NAS/shenjintong/Dataset/ItargeCar/csv_dataset/brand_class_list.csv\")\n class_label.columns = ['label','class']\n bz, nc, h, w = feature_conv.shape\n for i, idx in enumerate(class_idx):\n count = args.batch_cnt_val * args.batch_size + i\n # 提取预测和标签信息\n size_upsample = (448, 448)\n data=dataset_pd.loc[[count]]\n\n class_name = class_label.query('label==%d' % idx).values[0, 1]\n index=data['Unnamed: 0'].values[0]\n predicted=class_name.split('-')[0]\n true_class=data['class'].values[0].split('-')[0]\n x0 = data['x0'].values[0]\n x1 = data['x1'].values[0]\n y0 = data['y0'].values[0]\n y1 = data['y1'].values[0]\n # 只保存错误标记的图片\n if not predicted==true_class:\n # 图片读取与处理\n img = cv2.imread(img_name[i])\n if not args.no_bbox:\n if not x0 == y1 == x1 == 0:\n img = img[y0:y1, x0:x1]\n # CAM 提取\n cam = np.dot(weight_softmax[idx],feature_conv[i].reshape((nc, h*w)))\n cam = cam.reshape(h, w)\n cam = cam - np.min(cam)\n cam_img = cam / np.max(cam)\n cam_img = np.uint8(255 * cam_img)\n heatmap=cv2.resize(cam_img, size_upsample)\n img = cv2.resize(img, size_upsample)\n color_map = cv2.applyColorMap(heatmap.astype(np.uint8), cv2.COLORMAP_JET)\n attention_image = cv2.addWeighted(img, 0.5, color_map.astype(np.uint8), 0.5, 0)\n # 结果打印\n string=\"实际标签: %s\\n预测结果: %s\" %(data['class'].values[0].replace('(','').replace(')',''),class_name)\n attention_image = cv2ImgAddText(attention_image, string, 10, 10, (255, 0, 0), 20)\n # 输出方式选择\n # 保存路径为: ./imgs//test_\n if sw is not None:\n attention_image = cv2.cvtColor(attention_image, cv2.COLOR_BGR2RGB)\n attention_image = attention_image.transpose((2, 0, 1))\n sw.add_image('attention_image', attention_image)\n else:\n mkdir(os.path.join(\"imgs\", args.discribe))\n cv2.imwrite(os.path.join(\"imgs\", args.discribe,'%s_%d.jpg' % (data['class'].values[0],index)), attention_image)\n\ndef cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):\n from PIL import Image, ImageDraw, ImageFont\n if (isinstance(img, np.ndarray)): #判断是否OpenCV图片类型\n img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n draw = ImageDraw.Draw(img)\n fontText = ImageFont.truetype('/NAS/shenjintong/Dataset/ItargeCar/scripts/simsun.ttc', textSize, encoding=\"utf-8\")\n draw.text((left, top), text, textColor, font=fontText)\n return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)\n\n\ndef mkdir(path):\n path = path.strip()\n path = path.rstrip(\"\\\\\")\n isExists = os.path.exists(path)\n\n if not isExists:\n os.makedirs(path)\n print(path + ' successful created')\n return True\n\ndef single_CAM(feature_conv, weight_softmax, class_idx,shape,dataset_pd,sw=None):\n # generate the class activation maps upsample to 256x256\n # print predicted result\n class_label = pd.read_csv('/NAS/shenjintong/Dataset/ItargeCar/class_list.csv')\n class_name = class_label.query('model0219==%d' % class_idx).values[0, 1]\n\n index=dataset_pd['Unnamed: 0'].values[0]\n predicted=class_name.split('-')[0]\n true_brand=dataset_pd['class'].values[0].split('-')[0]\n if not predicted==true_brand:\n size_upsample = (shape[1], shape[0])\n nc, h, w = feature_conv.shape\n cam = np.dot(weight_softmax[class_idx],feature_conv.reshape((nc, h*w)))\n cam = cam.reshape(h, w)\n cam = cam - np.min(cam)\n cam_img = cam / np.max(cam)\n cam_img = np.uint8(255 * cam_img)\n heatmap=cv2.resize(cam_img, size_upsample)\n color_map = cv2.applyColorMap(heatmap.astype(np.uint8), cv2.COLORMAP_JET)\n attention_image = cv2.addWeighted(img, 0.5, color_map.astype(np.uint8), 0.5, 0)\n string=\"标签: %s\\n预测结果: %s \" %(dataset_pd['class'].values[0],class_name)\n attention_image = cv2ImgAddText(attention_image, string, 10, 10, (255, 0, 0), 20)\n if sw is not None:\n attention_image = cv2.cvtColor(attention_image, cv2.COLOR_BGR2RGB)\n attention_image = attention_image.transpose((2, 0, 1))\n sw.add_image('attention_image', attention_image)\n else:\n mkdir(os.path.join(\"imgs\",args.discribe))\n cv2.imwrite(os.path.join(\"imgs\", args.discribe,'test_%d.jpg' % (index)), attention_image)\n\n\nif __name__ == '__main__':\n args = parse_args()\n # args.dataset='ItargeCar_0520'\n # args.backbone='resnet50'\n # args.batch_size=2\n # args.num_workers=2\n # args.version='test'\n # args.resume=\"/NAS/shenjintong/DCL/net_model/DCL_0520data_147_129_refine_51415_ItargeCar_0520/model_best.pth\"\n # # args.resume =\"/NAS/shenjintong/Tools/mmdnn/pytorch2caffe/DCL/DCL.pth\"\n # args.discribe='feature'\n # args.resize_resolution=147\n # args.crop_resolution=129\n # # args.anno=\"/NAS/shenjintong/Tools/mmdnn/pytorch2caffe/inference_set.csv\"\n # args.result_path=\"/NAS/shenjintong/Tools/mmdnn/pytorch2caffe/\"\n # args.feature=True\n print(args)\n print(args.anno)\n # # todo: debug\n # args.anno = \"/NAS/shenjintong/Dataset/ItargeCar/class_originbox/test_info.csv\"\n # args.resume= \"/NAS/shenjintong/DCL/net_model/DCL_512_448_41123_ItargeCar/model_best.pth\"\n # args.CAM=True\n # args.opencv_save=True\n\n\n Config = LoadConfig(args, args.version)\n Config.cls_2xmul = True\n Config.cls_2 = False\n Config.no_loc = args.no_loc\n # sw define\n Config.size=(args.crop_resolution,args.crop_resolution)\n if args.log_dir:\n sw_log = args.log_dir\n sw = SummaryWriter(log_dir=sw_log)\n\n transformers = load_data_transformers(args.resize_resolution, args.crop_resolution, args.swap_num)\n\n # 由于args.version的作用只是自动选择对应的标记文件进行读取,去除version设置直接使用文件路径输入\n if args.anno:\n dataset_pd = pd.read_csv(args.anno)\n else:\n dataset_pd = Config.val_anno if args.version == 'val' else Config.test_anno\n\n data_set = dataset(Config,\\\n anno=dataset_pd,\\\n swap=transformers[\"None\"],\\\n totensor=transformers['test_totensor'],\\\n test=True)\n\n dataloader = torch.utils.data.DataLoader(data_set,\\\n batch_size=args.batch_size,\\\n shuffle=False,\\\n num_workers=args.num_workers,\\\n collate_fn=collate_fn4test)\n\n setattr(dataloader, 'total_item_len', len(data_set))\n\n cudnn.benchmark = True\n\n model = MainModel(Config)\n model_dict=model.state_dict()\n pretrained_dict=torch.load(args.resume)\n pretrained_dict = {k[7:]: v for k, v in pretrained_dict.items() if k[7:] in model_dict}\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)\n\n # add tensorboard graph of structure\n if args.log_dir:\n if args.add_stureture_graph:\n dummy_input = (torch.zeros(1, 3, args.crop_resolution, args.crop_resolution))\n outputs = model(dummy_input)\n sw.add_graph(model, dummy_input)\n\n # get weight of feature 3202*2048, DCL 对应着-4层全职,ResNet50 对应着\n params=list(model.parameters())\n weight_softmax = np.squeeze(params[-3].data.numpy())\n\n model.cuda()\n model = nn.DataParallel(model)\n model.train(False)\n\n if args.feature:\n result=[]\n # feature = pd.DataFrame(columns=range(len(data_set)))\n\n with torch.no_grad():\n result_1=[]\n confidence_1=[]\n all_result=[]\n feature=[]\n val_size = ceil(len(data_set) / dataloader.batch_size)\n result_gather = {}\n count_bar = tqdm(total=dataloader.__len__())\n count = 0\n Total_time = 0.0\n for batch_cnt_val, data_val in enumerate(dataloader):\n args.batch_cnt_val=batch_cnt_val\n count_bar.update(1)\n inputs, labels, img_name = data_val\n inputs = Variable(inputs.cuda())\n # labels = Variable(torch.from_numpy(np.array(labels)).long().cuda())\n T1=time.time()\n outputs = model(inputs)\n outputs_pred = outputs[0]\n # add all reuslt save\n # all_result.extend(outputs_pred.cpu().numpy().tolist())\n outputs_pred_soft=F.softmax(outputs_pred)\n # print(time.time()-T1)\n Total_time+=time.time()-T1\n\n # add all reuslt save\n all_result.extend(outputs_pred_soft.cpu().numpy().tolist())\n outputs_confidence, outputs_predicted = torch.max(outputs_pred_soft, 1)\n outputs_feature,_= torch.max(outputs_pred, 1)\n if args.feature:\n # result.append(outputs_pred.cpu().numpy()[0].tolist()[])\n result.append(outputs_confidence.cpu().numpy()[0].tolist())\n result.append(outputs_predicted.cpu().numpy()[0].tolist())\n if args.CAM:\n # visualization of the feature maps\n if args.opencv_save:\n # single_CAM(outputs[3].cpu().numpy()[image_in_batch], weight_softmax,\n # outputs_predicted[image_in_batch], img.shape,data)\n returnCAM(args, outputs[3].cpu().numpy(), weight_softmax, outputs_predicted, img_name, dataset_pd)\n else:\n returnCAM(args, outputs[3].cpu().numpy(), weight_softmax, outputs_predicted, img_name, dataset_pd,sw)\n # single_CAM(outputs[3].cpu().numpy()[image_in_batch], weight_softmax,\n # outputs_predicted[image_in_batch], img.shape,data,sw)\n # CAM_test(outputs[3].cpu().numpy()[image_in_batch], weight_softmax, img.shape, sw)\n\n result_1.extend(outputs_predicted.cpu().numpy().tolist())\n confidence_1.extend(outputs_confidence.cpu().numpy().tolist())\n feature.extend(outputs_feature.cpu().numpy().tolist())\n\n all_result=np.array(all_result)\n predicted_1 = pd.Series(result_1)\n\n dataset_pd['predicted'] = predicted_1\n dataset_pd['confidence']=pd.Series(confidence_1)\n dataset_pd['feature']=pd.Series(feature)\n average_time=Total_time/len(data_set)\n print(\"Average_time: %.4f\" %average_time)\n\n if args.discribe:\n if not os.path.exists(os.path.join(args.result_path, args.discribe)):\n os.mkdir(os.path.join(args.result_path, args.discribe))\n if args.version=='test':\n save_path = os.path.join(args.result_path, args.discribe,'test_raw_result.csv')\n else:\n save_path = os.path.join(args.result_path, args.discribe, 'val_raw_result.csv')\n dataset_pd.to_csv(save_path)\n if args.feature:\n m_index = pd.MultiIndex.from_product([['cv'], range(10), ['feature', 'index']],\n names=[\"resize_type\", \"image_index\", \"predicted\"])\n predicted = pd.DataFrame(result, index=m_index)\n predicted.columns.names = ['Top1-5']\n predicted.to_csv(\"/NAS/shenjintong/Tools/mmdnn/pytorch2caffe/predicted_cv.csv\")\n # save_path=os.path.join(args.result_path, args.discribe, 'feature.csv')\n # feature.to_csv(save_path)\n # save_npy = os.path.join(args.result_path, args.save_name.split('.')[0]+'.npy')\n # np.save(save_npy,all_result)\n\n\n","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":16822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"460873020","text":"#/usr/bin/env python3\n\"\"\"Small example OSC server\n\nfrom: https://github.com/kivy/oscpy\n\nServer (thread)\n\"\"\"\nfrom oscpy.server import OSCThreadServer\nfrom time import sleep\n\ndef callback(*values):\n print(\"got values: {}\".format(values))\n\nosc = OSCThreadServer() # See sources for all the arguments\n\n# You can also use an \\*nix socket path here\nsock = osc.listen(address='0.0.0.0', port=8000, default=True)\n# osc.bind(b'/address', callback)\nosc.bind(b'/ping', callback)\nsleep(1000)\nosc.stop() # Stop the default socket\n\nosc.stop_all() # Stop all sockets\n\n# Here the server is still alive, one might call osc.listen() again\n\nosc.terminate_server() # Request the handler thread to stop looping\n\nosc.join_server() # Wait for the handler thread to finish pending tasks and exit","sub_path":"osc/oscpy/osc_server.py","file_name":"osc_server.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"301747968","text":"import unittest\nfrom card.card import Card\nfrom deck.deck import Deck\n\n\nclass CardTestCase(unittest.TestCase):\n # Unit tests for Card class\n\n def test_card_representation(self):\n # Is card representation correct?\n card = Card(\"A\", \"\\u2666\")\n self.assertEqual(str(card), \"A\")\n card = Card(\"10\", \"\\u2666\")\n self.assertEqual(str(card), \"10\")\n\n def test_card_is_ace(self):\n # Is an Ace recognised correctly?\n card = Card(\"A\", \"\\u2666\")\n self.assertTrue(card.suit)\n\n\nclass DeckTestCase(unittest.TestCase):\n # Unit tests for Deck class\n\n def test_size_of_deck(self):\n # Are there 52 cards in the deck?\n new_deck = Deck()\n self.assertTrue(len(new_deck.deck), 52)\n\n def test_shuffle_randomizes_deck(self):\n # Does the deck get shuffled?\n first_deck = Deck()\n first_deck.shuffle()\n second_deck = Deck()\n second_deck.shuffle()\n self.assertNotEqual(str(first_deck), str(second_deck))\n\n def test_deal_removes_a_card(self):\n # Does a deal remove one card from the deck?\n deck = Deck()\n the_number_before = len(deck.deck)\n the_number_after = len(deck.deck)\n self.assertEqual(the_number_before, the_number_after)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_modules.py","file_name":"test_modules.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"192434315","text":"\n# coding: utf-8\n\n# In[ ]:\n\n# This does PCA background subtraction of the AC Her data, specifically\n# 1. reads in PCA component cube\n# 2. masks and subtracts the median (just a constant) from each science frame\n# 2. decomposes each science frame into its PCA components (with a mask over the PSF)\n# 3. subtracts the reconstructed background\n# 4. saves the background-subtracted images\n\n# created 2018 Aug 20 by E.S.\n\n\n# In[1]:\n\nfrom modules import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy\nfrom astropy.io import fits\nimport pandas as pd\nfrom datetime import datetime\nimport os\nimport sklearn\nfrom sklearn.decomposition import PCA\nfrom sklearn.decomposition import RandomizedPCA\nimport time\nfrom regions import PixCoord, CircleSkyRegion, CirclePixelRegion, PolygonPixelRegion\nfrom pyregion import read_region_as_imagecoord, get_mask\nimport time\nimport multiprocessing as mp\nfrom multiprocessing import Process, Queue, Pool\nget_ipython().magic(u'matplotlib inline')\n#%matplotlib qt\n\n\n# In[2]:\n\n# stem \n\nstem = ('/home/../../media/unasemaje/Elements/lbti_data_reduction/180524_fizeau_ac_her/01_BPM_readout_glitch_correction/')\n#stem = ('/Users/nyumbani/Downloads/')\n\n\n# # FCN TO DO PCA SUBTRACTION OF RANGE OF\n# # SCIENCE FRAMES\n\n# In[3]:\n\ndef pca_fit_and_subtract_parallel(inputArray):\n \n '''\n INPUTS:\n a 1D array with \n [0]: cube_start_framenum: starting frame number of the PCA component cube\n [1]: cube_stop_framenum: stopping frame number (inclusive) \" \"\n [2]: sci_framenum: science images to subtract from\n [3]: n_PCA: number of PCA components to reconstruct the background with\n \n OUTPUTS:\n none; the background-subtracted FITS files are written back out\n '''\n \n # unpack values\n cube_start_framenum = inputArray[0]\n cube_stop_framenum = inputArray[1]\n sci_framenum = inputArray[2]\n n_PCA = inputArray[3]\n\n # read in PCA cube\n cube_string = (stem+'pca_cubes/background_PCA_hunzikerStyle_seqStart_'\n +str(\"{:0>6d}\".format(cube_start_framenum))+'_seqStop_'+str(\"{:0>6d}\".format(cube_stop_framenum))+'.fits')\n pca_cube = fits.getdata(cube_string,0,header=False)\n\n # apply mask over weird detector regions to PCA cube\n pca_cube = np.multiply(pca_cube,mask_weird)\n \n # science filename string (note this has already been classically background-subtracted)\n img_string = stem+'../02_classical_background_subted/02b_second_attempt/lm_180524_'+str(\"{:0>6d}\".format(sci_framenum))+'.fits'\n\n # if FITS science file exists in the first place\n if ((np.mod(sci_framenum,1) == 0) & os.path.isfile(img_string)): \n \n start_time = time.time()\n print('Found file '+'lm_180524_'+str(\"{:0>6d}\".format(sci_framenum))+'.fits') \n \n # read in science image\n sciImg, header = fits.getdata(img_string,0,header=True)\n \n # apply mask over weird detector regions to science image\n sciImg = np.multiply(sciImg,mask_weird)\n \n ## mask the PSF\n \n # define region\n psf_loc = find_airy_psf(sciImg) # center of science PSF\n print('PSF location:')\n print(psf_loc)\n radius = 30.\n\n # skip frame if detected PSF is so close to the edge that the masked region goes off the frame\n # (this can be an issue if, for example, both Airy PSFs are not overlapped in the first place)\n if np.logical_or(psf_loc[0]+radius > np.shape(sciImg)[0], psf_loc[0]-radius < 0):\n continue\n\n center = PixCoord(x=psf_loc[1], y=psf_loc[0])\n region = CirclePixelRegion(center, radius)\n mask_psf_region = region.to_mask()\n # apply the mask to science array\n psf_mask = np.ones(np.shape(sciImg)) # initialize arrays of same size as science image\n mask_psf_region.data[mask_psf_region.data == 1] = np.nan # make zeros within mask cutout (but not in the mask itself) nans\n mask_psf_region.data[mask_psf_region.data == 0] = 1\n ##mask_psf_region.data[mask_psf_region.data == -99999] = 0 # have to avoid nans in the linear algebra\n psf_mask[mask_psf_region.bbox.slices] = mask_psf_region.data # place the mask cutout (consisting only of 1s) onto the array of nans\n sciImg_masked = np.multiply(sciImg,psf_mask) # this is now the masked science frame \n \n # subtract the median (just a constant) from the remaining science image\n sciImg_psf_masked = np.subtract(sciImg_masked,np.nanmedian(sciImg_masked)) # where PSF is masked\n sciImg_psf_not_masked = np.subtract(sciImg,np.nanmedian(sciImg_masked)) # where PSF is not masked\n \n # apply the PSF mask to PCA slices, with which we will do the fitting\n pca_cube_masked = np.multiply(pca_cube,psf_mask) \n \n ## PCA-decompose\n \n # flatten the science array and PCA cube \n pca_not_masked_1ds = np.reshape(pca_cube,(np.shape(pca_cube)[0],np.shape(pca_cube)[1]*np.shape(pca_cube)[2]))\n sci_masked_1d = np.reshape(sciImg_psf_masked,(np.shape(sciImg_masked)[0]*np.shape(sciImg_masked)[1]))\n pca_masked_1ds = np.reshape(pca_cube_masked,(np.shape(pca_cube_masked)[0],np.shape(pca_cube_masked)[1]*np.shape(pca_cube_masked)[2]))\n \n ## remove nans from the linear algebra\n \n # indices of finite elements over a single flattened frame\n idx = np.logical_and(np.isfinite(pca_masked_1ds[0,:]), np.isfinite(sci_masked_1d)) \n \n # reconstitute only the finite elements together in another PCA cube and a science image\n pca_masked_1ds_noNaN = np.nan*np.ones((len(pca_masked_1ds[:,0]),np.sum(idx))) # initialize array with slices the length of number of finite elements\n for t in range(0,len(pca_masked_1ds[:,0])): # for each PCA component, populate the arrays without nans with the finite elements\n pca_masked_1ds_noNaN[t,:] = pca_masked_1ds[t,idx]\n sci_masked_1d_noNaN = np.array(1,np.sum(idx)) # science frame\n sci_masked_1d_noNaN = sci_masked_1d[idx] \n \n # the vector of component amplitudes\n soln_vector = np.linalg.lstsq(pca_masked_1ds_noNaN[0:n_PCA,:].T, sci_masked_1d_noNaN)\n \n # reconstruct the background based on that vector\n # note that the PCA components WITHOUT masking of the PSF location is being\n # used to reconstruct the background\n recon_backgrnd_2d = np.dot(pca_cube[0:n_PCA,:,:].T, soln_vector[0]).T\n \n # do the actual subtraction\n sciImg_subtracted = np.subtract(sciImg_psf_not_masked,recon_backgrnd_2d)\n \n # save reconstructed background for checking\n hdul = fits.PrimaryHDU(recon_backgrnd_2d, header=header)\n hdul.writeto(stem + '../03_pca_background_subted/reconstructed_backgrounds/recon_background_'+str(\"{:0>6d}\".format(sci_framenum))+'_nPCA'+str(\"{:0>3d}\".format(n_PCA))+'.fits', \n overwrite=True)\n \n # save masked science frame BEFORE background-subtraction\n hdul = fits.PrimaryHDU(sciImg_psf_masked, header=header)\n hdul.writeto(stem + '../03_pca_background_subted/masked_science_frames/masked_science_frame_pre_bkgrnd_subt_'+str(\"{:0>6d}\".format(sci_framenum))+'.fits', \n overwrite=True) \n \n # write masked background-subtracted science frame (and occasionally background frames) out\n background_subtracted_masked = np.multiply(sciImg_subtracted,mask_weird)\n background_subtracted_masked = np.multiply(background_subtracted_masked,psf_mask)\n hdul = fits.PrimaryHDU(background_subtracted_masked, header=header)\n hdul.writeto(stem + '../03_pca_background_subted/masked_science_frames/masked_science_frame_post_bkgrnd_subt_'+str(\"{:0>6d}\".format(sci_framenum))+'_nPCA'+str(\"{:0>3d}\".format(n_PCA))+'.fits', \n overwrite=True)\n \n \n # write background-subtracted science frame (and occasionally background frames) out\n hdul = fits.PrimaryHDU(sciImg_subtracted, header=header)\n hdul.writeto(stem + '../03_pca_background_subted/lm_180524_'+str(\"{:0>6d}\".format(sci_framenum))+'_nPCA'+str(\"{:0>3d}\".format(n_PCA))+'.fits', \n overwrite=True)\n print('Frame '+str(\"{:0>6d}\".format(sci_framenum))+' written out. PCA = '+str(n_PCA))\n print('Elapsed time:')\n elapsed_time = time.time() - start_time\n print('--------------------------------------------------------------')\n print(elapsed_time)\n \n else:\n \n print('No file '+'lm_180524_'+str(\"{:0>6d}\".format(sci_framenum))+'.fits')\n print('--------------------------------------------------------------')\n\n\n# In[4]:\n\n# for background subtracting with 100 PCA components\n\nnPCA = 100\n\n# array containing, for each nod, \n# [0]: starting frame of background sequence\n# [1]: ending frame of background sequence (inclusive)\n# [2]: starting science frame to background-subtract\n# [3]: ending science frame to background-subtract (inclusive)\n# [4]: number of PCA components to use in background reconstruction\n\n# this contains the info for the whole dataset\nparameterArray = [[2083, 2282, 83, 2082, nPCA],\n [4683, 4882, 2283, 4682, nPCA],\n [7285, 7483, 4883, 7284, nPCA],\n [9484, 9683, 7484, 9483, nPCA],\n [11684, 11883, 9682, 11683, nPCA],\n [13884, 14083, 11884, 13883, nPCA],\n [16084, 16283, 14084, 16083, nPCA],\n [18284, 18483, 16284, 18283, nPCA],\n [25884, 26083, 18484, 21683, nPCA],\n [25884, 26083, 23884, 25883, nPCA],\n [28084, 28283, 26084, 28083, nPCA],\n [28084, 28283, 28284, 28883, nPCA],\n [30884, 31083, 28884, 30883, nPCA],\n [32884, 33083, 31084, 32883, nPCA],\n [34884, 35083, 33084, 34883, nPCA],\n [37084, 37283, 35084, 37083, nPCA],\n [39284, 39483, 37284, 39283, nPCA],\n [41484, 41683, 39484, 41483, nPCA],\n [44577, 44776, 41684, 44576, nPCA],\n [46777, 47076, 44777, 46776, nPCA],\n [49077, 49276, 47077, 49076, nPCA],\n [51277, 51676, 49277, 51276, nPCA],\n [53677, 53876, 51677, 53676, nPCA],\n [55877, 56108, 53877, 55876, nPCA],\n [58110, 58308, 56109, 58109, nPCA],\n [60309, 60508, 58309, 60308, nPCA],\n [62509, 62708, 60509, 62508, nPCA],\n [64709, 64908, 62709, 64708, nPCA],\n [66909, 67108, 64909, 66908, nPCA]]\n\n\n# In[5]:\n\ndef return_array_one_block(sliceArray):\n '''\n This takes a 1D array with background frame range, science frame range, and N_PCA information\n and returns an expanded array where each row corresponds to a single science array\n '''\n \n # INPUT: an array containing \n # [0]: starting frame of background sequence\n # [1]: ending frame of background sequence (inclusive)\n # [2]: starting science frame to background-subtract\n # [3]: ending science frame to background-subtract (inclusive)\n # [4]: number of PCA components to use in background reconstruction\n \n # OUTPUT: an array of arrays where each element corresponds to the \n # parameters of a single science image (i.e., the input array elements\n # [0], [1], [4] are replicated for each science frame. \n \n # unpack some values\n science_start_frame = sliceArray[2]\n science_end_frame = sliceArray[3]\n \n sliceArrayTiled = np.tile(sliceArray,(science_end_frame-science_start_frame+1,1)) # tile, where each row corresponds to a science frame\n sliceArrayTiled2 = np.delete(sliceArrayTiled,2,1) # delete col [2]\n\n # convert new col [2] (old col [3]) to be entries for individual frame numbers\n for sciframeNum in range(science_start_frame,science_end_frame+1):\n t = int(sciframeNum-science_start_frame) # index denoting the row\n sliceArrayTiled2[t][2] = int(sciframeNum) # insert frame number\n \n # The table now involves columns\n # [0]: background_start_frame\n # [1]: background_end_frame\n # [2]: science frame number\n # [3]: number of PCA components to reconstruct the background\n\n return sliceArrayTiled2\n\n\n# In[6]:\n\ndef main():\n \n ncpu = mp.cpu_count()\n print('Number cores: '+str(int(ncpu)))\n \n start_time_very_beginning = time.time()\n \n # loop over every nod position and pool the reduction over all science frames in that nod\n for r in range(0,np.shape(parameterArray)[0]):\n pool = Pool(ncpu) # create pool object\n print('Working on reducing parameter array')\n print(parameterArray[r])\n indivSciFramesArray = return_array_one_block(parameterArray[r]) # take info for that nod block, and return array for individual science frames\n list_dicts = pool.map(pca_fit_and_subtract_parallel,indivSciFramesArray) # map the individual science frames across cores\n print('---------------------------------')\n \n elapsed_time_since_beginning = time.time() - start_time_very_beginning\n print('Total elapsed time: '+str(elapsed_time_since_beginning))\n\n\n# In[ ]:\n\n##################\n\n# do it!\nif __name__ == '__main__':\n main()\n","sub_path":"pca_background_subtraction_parallel.py","file_name":"pca_background_subtraction_parallel.py","file_ext":"py","file_size_in_byte":13355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"554722285","text":"#!/usr/bin/env python\n\nimport argparse\nimport glob\n\nimport pandas as pd\n\n\n\"\"\"\nMerge multiple exomiser TSVS into a single TSV\n\n\"\"\"\n\n\nparser = argparse.ArgumentParser(description='Merge multiple exomiser TSVS into a single TSV')\nparser.add_argument('--tsv_variant_pattern', type=str, nargs=1, required=True,\n\t\t\t\thelp='TSV pattern e.g. *.variants.tsv')\nparser.add_argument('--output', type=str, nargs=1, required=False,\n\t\t\t\thelp='Output TSV location')\n\n\nargs = parser.parse_args()\n\ntsv_variant_pattern = args.tsv_variant_pattern[0]\noutput = args.output[0]\n\ncolumns = ['#CHROM',\n\t\t 'POS',\n\t\t 'REF',\n\t\t 'ALT',\n\t\t 'EXOMISER_GENE',\n\t\t 'EXOMISER_VARIANT_SCORE',\n\t\t 'EXOMISER_GENE_PHENO_SCORE',\n\t\t 'EXOMISER_GENE_VARIANT_SCORE',\n\t\t 'EXOMISER_GENE_COMBINED_SCORE',\n\t\t 'CONTRIBUTING_VARIANT']\n\n\n# get TSV files\ntsv_variant_files = glob.glob(tsv_variant_pattern)\n\nmaster_df = pd.DataFrame()\n\n# load TSV files\nfor tsv in tsv_variant_files:\n\t\n\ttemp_df = pd.read_csv(tsv, sep='\\t')\n\t\n\ttemp_df = temp_df[columns]\n\t\n\ttemp_df['file'] = tsv\n\t\n\tmaster_df = master_df.append(temp_df)\n\n\n# sort by score\nmaster_df.sort_values('EXOMISER_GENE_COMBINED_SCORE', ascending=False, inplace=True)\n\n# write to file\nmaster_df.to_csv(output, sep='\\t', index=False)\n","sub_path":"bin/merge_exomiser_tsvs.py","file_name":"merge_exomiser_tsvs.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"397346179","text":"#!/user/bin/env python\n# -*- coding: utf-8 -*-\n\nimport multiprocessing\nimport os\nimport random\n\nimport time\n\n\n# MultiProcess Lock\n# 当多个进程需要访问共享资源的时候,Lock可以用来避免访问的冲突。\ndef worker_with(lock, file):\n with lock:\n fs = open(file, 'a+')\n n = 2\n while n > 0:\n fs.write(\"Lock acquired via with\\n\")\n n -= 1\n fs.close()\n\n\ndef worker_no_with(lock, file):\n lock.acquire()\n try:\n fs = open(file, 'a+')\n n = 2\n while n > 0:\n fs.write(\"Lock acquired directly\\n\")\n n -= 1\n fs.close()\n finally:\n lock.release()\n\n\nif __name__ == \"__main__\":\n lock = multiprocessing.Lock()\n file = \"ReadFiles/LockTest.txt\"\n w = multiprocessing.Process(target=worker_with, args=(lock, file))\n nw = multiprocessing.Process(target=worker_no_with, args=(lock, file))\n w.start()\n nw.start()\n print('End!')\n\n\n# MultiProcess Semaphore\n# Semaphore 用来控制对共享资源的访问数量,例如池的最大连接数。\ndef work_semaphore(s, interval):\n s.acquire()\n print(multiprocessing.current_process().name + 'acquired')\n time.sleep(interval)\n print(multiprocessing.current_process().name + 'release')\n s.release()\n\n\nif __name__ == '__main__':\n semaphore = multiprocessing.Semaphore(3)\n for i in range(5):\n p = multiprocessing.Process(target=work_semaphore, args=(semaphore, 1))\n p.start()\n\n\n# running result:\n###########################################\n\n# Process-4acquired\n# Process-6acquired\n# Process-5acquired\n\n# Process-4release\n# Process-7acquired\n\n# Process-6release\n# Process-3acquired\n\n# Process-5release\n# Process-7release\n# Process-3release\n\n###########################################\n\n\n# Event\n# Event用来实现进程间同步通信。\n\n# 全局定义了一个“ Flag ”,如果“ Flag ”值为 False,那么当程序执行 event.wait 方法时就会阻塞\n# 如果“ Flag ”值为True,那么 event.wait 方法时便不再阻塞。\n# clear :将“Flag”设置为False\n# set :将“Flag”设置为True\ndef wait_for_event(e):\n print('Wait for event : start')\n # 一直等待 到 Flag 为 true\n e.wait()\n print('Wait for event : do.........')\n print('Wait for event : e.is_set() -> %s' % (str(e.is_set())))\n\n\ndef wait_for_timeout(e, t):\n print('Wait for timeout : start')\n # wait for t seconds and then timeout\n e.wait(t)\n print('Wait for timeout : do.........')\n print('Wait for timeout : e.is_set() -> %s' % str(e.is_set()))\n\n\nif __name__ == '__main__':\n e = multiprocessing.Event()\n w1 = multiprocessing.Process(target=wait_for_event, args=(e,))\n w2 = multiprocessing.Process(target=wait_for_timeout, args=(e, 2))\n w1.start()\n w2.start()\n time.sleep(3)\n e.set()\n print('main : event is set')\n\n\n# running result:\n###########################################\n\n# Wait for event : start\n# Wait for timeout : start\n# Wait for timeout : do.........\n# Wait for timeout : e.is_set() -> False\n# main : event is set\n# Wait for event : do.........\n# Wait for event : e.is_set() -> True\n\n###########################################\n\n\n# Queue\n# Queue 是多进程安全的队列,可以使用 Queue 实现多进程之间的数据传递。\n#\n# put 方法用以插入数据到队列中,put 方法还有两个可选参数: blocked 和timeout\n# 如果 blocked 为 True(默认值),并且 timeout 为正值,该方法会阻塞 timeout 指定的时间,直到该队列有剩余的空间\n# 如果超时,会抛出 Queue.Full 异常。如果 blocked 为 False,但该 Queue 已满,会立即抛出 Queue.Full 异常。\n#\n# get 方法可以从队列读取并且删除一个元素 , get方法有两个可选参数:blocked和timeout\n# 如果 blocked 为 True(默认值),并且 timeout 为正值,那么在等待时间内没有取到任何元素,会抛出 Queue.Empty 异常\n# 如果 blocked 为 False,有两种情况存在,如果 Queue 有一个值可用,则立即返回该值,否则,如果队列为空,则立即抛出 Queue.Empty 异常\n\ndef write_pro(q):\n print('Process to write : %s' % os.getpid())\n for value in ['A', 'B', 'C']:\n print('put %s into queue' % value)\n q.put(value)\n start = time.time()\n time.sleep(random.random() * 2)\n end = time.time()\n print('write process runs %s seconds' % (end - start))\n\n\ndef read_pro(q):\n print('Process to read : %s' % os.getpid())\n while True:\n value = q.get(True)\n print('get %s from queue' % value)\n\n\nif __name__ == '__main__':\n q = multiprocessing.Queue()\n write = multiprocessing.Process(target=write_pro, args=(q,))\n read = multiprocessing.Process(target=read_pro, args=(q,))\n write.start()\n read.start()\n # 等待 write 结束\n write.join()\n print('write end')\n # read 进程里是死循环,无法等待其结束,只能强行终止\n read.terminate()\n print('read end')\n\n\n# running result:\n###########################################\n\n# Process to write : 2316\n# Process to read : 2248\n# put A into queue\n# get A from queue\n# write process runs 0.6520373821258545 seconds\n# put B into queue\n# get B from queue\n# write process runs 0.36802101135253906 seconds\n# put C into queue\n# get C from queue\n# write process runs 1.2410709857940674 seconds\n# write end\n# read end\n\n###########################################\n\n\n# Pipe\n# Pipe 方法返回 (conn1, conn2) 代表一个管道的两个端\n# Pipe 方法有 duplex 参数,如果 duplex 参数为 True (默认值),那么这个管道是全双工模式,也就是说 conn1 和 conn2 均可收发\n# duplex 为 False,conn1 只负责接受消息,conn2 只负责发送消息。\n#\n# send 和 recv 方法分别是发送和接受消息的方法\n# 例如,在全双工模式下,可以调用 conn1.send 发送消息,conn1.recv 接收消息\n# 如果没有消息可接收,recv 方法会一直阻塞。如果管道已经被关闭,那么 recv 方法会抛出 EOFError。\n\ndef send_pro(pipe):\n while True:\n for i in range(1000000):\n print('send %s' % i)\n pipe.send(i)\n time.sleep(1)\n\n\ndef rec_pro(pipe):\n while True:\n print('receive :', pipe.recv())\n time.sleep(1)\n\n\nif __name__ == '__main__':\n pipe = multiprocessing.Pipe()\n\n print(dir(pipe[0]))\n print(dir(pipe[1]))\n\n send = multiprocessing.Process(target=send_pro, args=(pipe[0],))\n rec = multiprocessing.Process(target=rec_pro, args=(pipe[1],))\n\n send.start()\n rec.start()\n\n send.join()\n rec.join()\n\n# running result:\n###########################################\n\n# send 0\n# receive : 0\n# send 1\n# receive : 1\n# ... ... ...\n# send 1616\n# receive : 1616\n# send 1617\n# receive : 1617\n# send 1618\n# receive : 1618\n\n###########################################\n","sub_path":"P_12_MultiProcessing.py","file_name":"P_12_MultiProcessing.py","file_ext":"py","file_size_in_byte":6896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"651364474","text":"from django.shortcuts import render\n\n# Create your views here.\n\nfrom .serializers import grpmgmtserializer, grpmembersserializer\nfrom rest_framework.response import Response\nfrom .models import grpmgmt, grpmembers\nfrom rest_framework import status\nimport coreapi, coreschema\nfrom rest_framework.schemas import AutoSchema, ManualSchema\nfrom rest_framework.decorators import api_view, renderer_classes,permission_classes, schema\n\nfrom sambaAPI.services.GrpService import GrpService\nfrom sambaAPI.services.connection import ConnectionService\n\n\n\ncustom_schema = AutoSchema(manual_fields=[coreapi.Field(\"name\",required=True,location=\"form\",schema=coreschema.String()), \n\tcoreapi.Field(\"description\",required=True,location=\"form\",schema=coreschema.String()),\n\tcoreapi.Field(\"container\",required=True,location=\"form\",schema=coreschema.String()),\n\t])\n\n\ncreate_schema = AutoSchema(manual_fields=[coreapi.Field(\"name\",required=True,location=\"form\",schema=coreschema.String()),\n coreapi.Field(\"description\",required=True,location=\"form\",schema=coreschema.String()),\n coreapi.Field(\"container\",required=True,location=\"form\",schema=coreschema.String()),\n\tcoreapi.Field(\"mail_id\",required=True,location=\"form\",schema=coreschema.String()),\n#\tcoreapi.Field(\"group_type\",required=True,location=\"form\",schema=coreschema.String()),\n\tcoreapi.Field(\"notes\",required=True,location=\"form\",schema=coreschema.String()),\n ])\n\nadd_members_schema = AutoSchema(manual_fields=[coreapi.Field(\"groupname\", required=True,location=\"form\",schema=coreschema.String()),\n\tcoreapi.Field(\"listofnames\",required=True,location=\"form\",schema=coreschema.String()),\n\t])\n\n#list_members_schema = AutoSchema(manual_fields=[coreapi.Field(\"name\",required=True,location=\"form\",schema=coreschema.String()),\n#\t])\n\nremove_members_schema = AutoSchema(manual_fields=[coreapi.Field(\"groupname\", required=True,location=\"form\",schema=coreschema.String()),\n\tcoreapi.Field(\"listofnames\",required=True,location=\"form\",schema=coreschema.String()),\n\t])\n\n\n\n@api_view(['GET'])\ndef list(request, format=None):\n response = GrpService(ConnectionService('exza')).list()\n grps = []\n if response.data is None:\n print(response.description)\n return Response(response.description, status=response.status)\n for msg in response.data:\n print(msg)\n grp = grpmgmt()\n grp.name = msg.get('name')\n grp.description = msg.get('description')\n grp.container = msg.get('dn')\n grp.group_type = msg.get('group_type')\n grp.group_scope = msg.get('group_scope')\n grp.mail_id = msg.get('mail')\n grp.notes = msg.get('info')\n grps.append(grp)\n serializer = grpmgmtserializer(grps, many=True)\n return Response(serializer.data,status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\n#@schema(list_members_schema)\ndef list_members(request, name, format=None):\n response = GrpService(ConnectionService('exza')).list_members(name=name)\n if response.data is None:\n print(response.description)\n return Response(response.description, status=response.status)\n mems = []\n for msg in response.data:\n gm = grpmembers()\n gm.name = msg.get('name')\n mems.append(gm)\n print(response.description, response.status)\n serializer = grpmembersserializer(mems, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\n@api_view(['POST'])\n@schema(create_schema)\ndef create(request, format=None):\n print(request.data)\n gp = grpmgmt()\n if request.data != {}:\n if request.data['name'] != '':\n gp.name = request.data['name']\n else:\n return Response(\"group_name should not be empty\",status=status.HTTP_400_BAD_REQUEST)\n# if request.data['description'] != '':\n gp.description = request.data['description']\n# else:\n# return Response(\"description should not be empty\", status=status.HTTP_400_BAD_REQUEST)\n if request.data['container'] != '':\n gp.container = request.data['container']\n else:\n return Response(\"container should not be empty\", status=status.HTTP_400_BAD_REQUEST)\n# gp.group_type = request.data['group_type']\n gp.mail_id = request.data['mail_id']\n gp.notes = request.data['notes']\n else:\n return Response(\"Invalid request\", status=status.HTTP_400_BAD_REQUEST)\n response = GrpService(ConnectionService('exza')).create(grp=gp,request=request.data)\n return Response(response.description,response.status)\n\n\n@api_view(['DELETE'])\ndef delete(request, name, format=None):\n print(\"In delete: \" + name)\n response = GrpService(ConnectionService('exza')).delete(name=name)\n return Response(response.description, response.status)\n #return Response(\"In Tesing\")\n\n@api_view(['POST'])\n@schema(add_members_schema)\ndef add_members(request, format=None):\n gp = grpmgmt()\n if request.data != {}:\n if request.data['groupname'] != '':\n if request.data['listofnames'] != '':\n response = GrpService(ConnectionService('exza')).add_members(request=request.data)\n else:\n return Response(response.description, response.status)\n else:\n return Response(response.description, response.status)\n else:\n return Response(response.description, response.status)\n return Response(response.description, response.status)\n\n@api_view(['DELETE'])\n@schema(remove_members_schema)\ndef remove_members(request, format=None):\n if request.data != {}:\n if request.data['groupname'] != '':\n if request.data['listofnames'] != '':\n response = GrpService(ConnectionService('exza')).remove_members(request=request.data)\n else:\n return Response(response.description, response.status)\n else:\n return Response(response.description, response.status)\n else:\n return Response(response.description, response.status)\n return Response(response.description, response.status)\n\n@api_view(['GET'])\ndef show(request, name, format=None):\n response = GrpService(ConnectionService('exza')).show(name=name)\n grps = []\n for msg in response.data:\n print(msg)\n grp = grpmgmt()\n grp.name = msg.get('name')\n grp.description = msg.get('description')\n grp.container = msg.get('dn')\n grp.group_type = msg.get('group_type')\n grp.group_scope = msg.get('group_scope')\n grp.mail_id = msg.get('mail')\n grp.notes = msg.get('info')\n grps.append(grp)\n serializer = grpmgmtserializer(grps, many=True)\n return Response(serializer.data, response.status)","sub_path":"sambaAPI/groupmgt/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"476089922","text":"import numpy as np\n\n\ndef load_dataset(save_file, labels_present=True):\n \"\"\"Read data set from file.\n\n :param save_file:\n :return:\n - data points as array of shape (2, number of points)\n - labels as array. empty array is labels_present is False.\n \"\"\"\n f = open(save_file, 'r')\n points = []\n labels = []\n for line in f:\n proper_line = line.split(\"\\n\")[0]\n str_list = proper_line.split(\" \")\n points.append([float(str_list[0]), float(str_list[1])])\n if labels_present:\n label = float(str_list[2])\n labels.append(label)\n f.close()\n points = np.vstack(points)\n labels = np.array(labels)\n return points, labels\n\n\ndef center(data):\n mean = np.mean(data, axis=0)\n return data - mean\n\n\ndef standardize(data):\n \"\"\"Makes variance along all axis equal.\n\n :param data: CENTERED data\n :return: standardized data.\n \"\"\"\n variance = np.var(data, axis=0)\n return data / variance\n\n\ndef load_standard_dataset(save_file, labels_present=True):\n data, labels = load_dataset(save_file, labels_present)\n data = center(data)\n data = standardize(data)\n return data, labels\n\n\ndef load_standard_dataset_with_bias(save_file, labels_present=True):\n data, labels = load_standard_dataset(save_file, labels_present)\n data = np.hstack([data, np.ones((len(data), 1))])\n return data, labels\n\n\ndef load_dataset_with_bias(save_file, labels_present=True):\n data, labels = load_dataset(save_file, labels_present)\n data = np.hstack([data, np.ones((len(data), 1))])\n return data, labels\n","sub_path":"tools/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"410940640","text":"from stanfordcorenlp import StanfordCoreNLP\r\nfrom termcolor import colored\r\nimport json\r\nimport sys\r\nimport os\r\nimport string\r\nimport re\r\nfrom gender import GenderRecoginition\r\nimport spacy\r\nimport copy\r\nimport shutil\r\nimport math\r\nimport multiprocessing\r\nimport nltk\r\nfrom nltk.tokenize.treebank import TreebankWordDetokenizer\r\nfrom mycorenlp import MyCoreNLP\r\n\r\nALLOWED_PARALLEL_PROCESS = 8\r\nMAX_SENTENCES_IN_ONE_DOCUMENT = 30\r\nREMOVE_TAG = \"#remove#\"\r\nPRONOUNS = {'singular':\r\n {'female': {'subj': 'she', 'obj': 'her', 'possadj': 'her', 'posspro': 'hers', 'reflx': 'herself'},\r\n 'male': {'subj': 'he', 'obj': 'him', 'possadj': 'his', 'posspro': 'his', 'reflx': 'himself'},\r\n 'neutral': {'subj': 'it', 'obj': 'it', 'possadj': 'its', 'posspro': 'its', 'reflx': 'itself'}\r\n },\r\n 'plural':\r\n {'female': {'subj': 'they', 'obj': 'them', 'possadj': 'their', 'posspro': 'theirs',\r\n 'reflx': 'themselves'},\r\n 'male': {'subj': 'they', 'obj': 'them', 'possadj': 'their', 'posspro': 'theirs', 'reflx': 'themselves'},\r\n 'neutral': {'subj': 'they', 'obj': 'them', 'possadj': 'their', 'posspro': 'theirs',\r\n 'reflx': 'themselves'}\r\n }\r\n }\r\nAll_PRONOUNS = ['I', 'me', 'my', 'mine', 'myself', 'you', 'your', 'yours', 'yourself', 'he', \\\r\n 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself', 'it', 'its', 'itself', \\\r\n 'we', 'us', 'our', 'ours', 'ourselves', 'they', 'them', 'their', 'themselves']\r\n\r\n# the machine which runs StanfordCoreNLPServer\r\nUKP_SERVER = 'http://krusty.ukp.informatik.tu-darmstadt.de'\r\nUKP_SERVER_NED = \"http://ned.ukp.informatik.tu-darmstadt.de\"\r\nLOCALHOST = \"http://localhost\"\r\n\r\n\r\n# Use Spacy to identify NER\r\nclass SpacyNLP:\r\n def __init__(self):\r\n self.nlp = spacy.load('en_core_web_sm')\r\n\r\n def ents(self, text):\r\n ent_list = []\r\n doc = self.nlp(text)\r\n\r\n for ent in doc.ents:\r\n ent_list.append((ent.text, ent.label_))\r\n # print(\"Space:\", ent_list)\r\n return ent_list\r\n\r\n\r\n# given a certain index, find the corresponding dependent parsing result\r\ndef find_deparse_with_index(index, deparses):\r\n index += 1 # deparse result does not have index 0, only 1...length(deparses)\r\n for deparse in deparses:\r\n if deparse[2] == index:\r\n return deparse\r\n else:\r\n continue\r\n\r\n return deparses[0]\r\n\r\n\r\ndef chunks(l, n):\r\n # Yield successive n-sized chunks from l\r\n for k in range(0, len(l), n):\r\n yield l[k:k + n]\r\n\r\n\r\n# get the corresponding pronoun with the given information.\r\ndef get_personal_pronoun(deparse_label, gender, number):\r\n if deparse_label in ('nsubj', 'nsubjpass', 'xsubj'):\r\n key = 'subj'\r\n elif deparse_label in ('dobj', 'iobj', 'pobj'):\r\n key = 'obj'\r\n elif deparse_label == 'poss':\r\n key = 'possadj'\r\n else: # get the object form of the pronoun by default\r\n key = 'obj'\r\n return PRONOUNS[number][gender][key]\r\n\r\n\r\n# @entity is a tuple like this: ('Florence', 'CITY') or ('Ptolemy', 'PERSON'), or ('Lucy Green', 'PERSON'),\r\ndef replace_ner(entity, ner_label, postag, deparse, index):\r\n gender = 'neutral' if ner_label != 'PERSON' or postag[1] != 'NNP' \\\r\n else GenderRecoginition().gender_identify(entity.upper(), False)\r\n number = 'plural' if postag[1] == 'NNS' or postag[1] == 'NNPS' else 'singular'\r\n pronoun = get_personal_pronoun(deparse[0], gender, number)\r\n\r\n if index == 0:\r\n pronoun = string.capwords(pronoun)\r\n return pronoun\r\n\r\n\r\ndef update_pronoun(final_result, start_word_id, length, pronoun, ner_ids):\r\n final_result[start_word_id][3] = pronoun\r\n final_result[start_word_id][4] = \"PRP\"\r\n ner_ids[start_word_id] = ner_ids[start_word_id] + ')' if ner_ids[start_word_id][-1] != ')' else ner_ids[start_word_id]\r\n for j in range(1, length):\r\n if ner_ids[start_word_id+j] != '-' and ner_ids[start_word_id+j] != ner_ids[start_word_id]:\r\n ner_ids[start_word_id] = ner_ids[start_word_id] + '|' + str(ner_ids[start_word_id+j])\r\n final_result[start_word_id+j][3] = REMOVE_TAG\r\n\r\n #ner_ids[start_word_id] = ner_ids[start_word_id].replace('-', '({}'.format(ner_id)) \\\r\n # if ner_ids[start_word_id] == '-' else ner_ids[start_word_id] + '|(' + str(ner_id)\r\n\r\n\r\ndef generate_conll_style_output(sentence, sid, doc_id, part, final_result, dcoref):\r\n sta_nlp = MyCoreNLP()\r\n # part: _part 002 or _part 011\r\n part_number = int(part[7:])\r\n tokens = sta_nlp.word_tokenize(sentence)\r\n poss = sta_nlp.pos_tag(sentence)\r\n assert(len(tokens) == len(poss))\r\n ner_ids = ['-']*len(tokens)\r\n\r\n for i in range(len(tokens)):\r\n # stanford word tokenize do not identify \"1 1/2\" as two token but one token\r\n token = tokens[i].replace(\" \", \"-\")\r\n pos = poss[i][1]\r\n line_element = [doc_id, part_number, '-', token, pos, '*', '-', '-', '-', '-', '*', '-']\r\n final_result.append(line_element)\r\n\r\n for id, mentions in dcoref.items():\r\n # a mention is like this (2, 1, 3, 'The city'), sentence id starts from 1,\r\n for mention in mentions:\r\n if mention[0] == sid + 1:\r\n update_ner_id(ner_ids, mention[1]-1, mention[2]-mention[1], id, mention[-1], sid, dcoref)\r\n\r\n return ner_ids\r\n\r\n\r\ndef dcoref_record(doc, doc_id):\r\n sta_nlp = MyCoreNLP()\r\n coref_id = 0\r\n dcoref_dict = {}\r\n try:\r\n #dcoref = sta_nlp.dcoref(\" \".join(ss))\r\n dcoref = sta_nlp.dcoref(doc)\r\n except BaseException as e:\r\n print(\"Time out when call stanford dcoref annotator in \", doc_id)\r\n #print(doc)\r\n #raise e\r\n else:\r\n for coref_chain in dcoref:\r\n dcoref_dict[coref_id] = coref_chain\r\n coref_id += 1\r\n\r\n #print(\"dcoref:\", dcoref_dict)\r\n return dcoref_dict\r\n\r\n\r\n# generate an output CoNLL file for every document.\r\ndef output(ss, dict_n, doc_id, part, file_path, doc):\r\n # if ss is empty, simply return\r\n if not ss:\r\n return\r\n fo = open(file_path, 'w')\r\n fo.write(\"#begin document (\"+doc_id+\"); \"+part[1:])\r\n annotated_ner = []\r\n replaced_ner = []\r\n np_id = {\"id\": len(dict_n)}\r\n dcoref = dcoref_record(doc, doc_id+part)\r\n for sentence in ss:\r\n try:\r\n # this assumes that there will not be two identical sentences\r\n if sentence == \" \":\r\n continue\r\n final_result = []\r\n ner_ids = generate_conll_style_output(sentence, ss.index(sentence), doc_id, part, final_result, dcoref)\r\n self_labeling(sentence, ss.index(sentence), dict_n, annotated_ner, np_id, replaced_ner, final_result, ner_ids, dcoref, fo)\r\n except BaseException as e:\r\n print(colored(e, 'green'))\r\n print(colored((\"found exception in sentence: \" + sentence, \"should not happen very offen\"), 'green'))\r\n #raise e\r\n else:\r\n continue\r\n fo.write(\"\\n#end document\")\r\n fo.close()\r\n\r\n\r\n# get all the named entities in a given sentence\r\ndef get_ner_in_sentence(sid, dict_n):\r\n ner_in_sentence = {}\r\n for key, value in dict_n.items():\r\n if sid in value['sId']:\r\n ner_in_sentence[key] = value\r\n return ner_in_sentence\r\n\r\n\r\ndef identity_ner_in_dcoref(ner, start_index, sid, dcoref):\r\n for coref_id, mentions in dcoref.items():\r\n for mention in mentions:\r\n # pronoun and named entity should be treated differently\r\n if mention[-1].lower() in All_PRONOUNS:\r\n if ner.lower() == mention[-1].lower() and sid == mention[0]-1:\r\n if start_index == mention[1] - 1:\r\n return coref_id\r\n else:\r\n #if ner.lower() == mention[-1].lower():\r\n if ner.lower().strip(\"the \") == mention[-1].lower().strip(\"the \") or \\\r\n ner.lower().strip(\"the \") == mention[-1].lower()[:-2]:\r\n return coref_id\r\n return None\r\n\r\n\r\n# add the coreference annotation, i.e., ner id\r\ndef update_ner_id(ner_ids, start_word_id, length, ner_id, ner, sid, dcoref, replace_flag=False):\r\n # check if this named entity is already annotated in dcoref\r\n new_id = identity_ner_in_dcoref(ner, start_word_id, sid, dcoref)\r\n ner_id = new_id if new_id is not None else ner_id + len(dcoref)\r\n\r\n end_word_id = start_word_id + length - 1\r\n # to deal with the case that sentence in dcoref is longer than the existing one\r\n if start_word_id > len(ner_ids) - 1 or end_word_id > len(ner_ids) - 1:\r\n return\r\n\r\n # check if there already exists an annotation for this mention\r\n if ner_ids[start_word_id] != '-' and ner_ids[end_word_id] != '-':\r\n if any(ner_id == int(old_id) for old_id in re.findall(\"\\d+\", ner_ids[start_word_id])) and \\\r\n any(ner_id == int(old_id) for old_id in re.findall(\"\\d+\", ner_ids[end_word_id])):\r\n return\r\n\r\n if ner_ids[start_word_id] == '-':\r\n ner_ids[start_word_id] = ner_ids[start_word_id].replace('-', '({}'.format(ner_id))\r\n else:\r\n #if not any(ner_id == int(old_id) for old_id in re.findall(\"\\d+\", ner_ids[start_word_id])):\r\n ner_ids[start_word_id] = ner_ids[start_word_id] + '|(' + str(ner_id)\r\n\r\n if not replace_flag:\r\n if end_word_id != start_word_id:\r\n if ner_ids[end_word_id] == '-':\r\n ner_ids[end_word_id] = ner_ids[end_word_id].replace('-', '{})'.format(ner_id))\r\n else:\r\n #if not any(ner_id == int(old_id) for old_id in re.findall(\"\\d+\", ner_ids[end_word_id])):\r\n ner_ids[end_word_id] = ner_ids[end_word_id] + '|' + str(ner_id) + ')'\r\n else:\r\n ner_ids[end_word_id] = str(ner_ids[end_word_id]) + ')'\r\n\r\n\r\n# replace existing possessive entity appearing more than once with possessive pronoun.\r\ndef update_possesive(result, dict_n, sid, replaced_flag):\r\n for i in range(len(result)):\r\n if result[i][3] == \"'s\" and result[i][-1] == \"-\":\r\n former_index = i - 1\r\n former = result[former_index][3]\r\n if former.lower() == 'he' or former.lower() == 'him':\r\n pronoun = 'his'\r\n elif former.lower() == 'she' or former.lower() == 'her':\r\n pronoun = 'her'\r\n elif former.lower() == 'it':\r\n pronoun = 'its'\r\n # if this entity is the first time appear, may result in a pronoun before mention\r\n elif former.lower() in dict_n and sid != dict_n[former.lower()][\"sId\"][0] and not replaced_flag and \\\r\n len(dict_n[former.lower()][\"sId\"]) > 1 and result[former_index][-1] == '-':\r\n label = dict_n[former.lower()][\"label\"]\r\n gender = 'neutral' if label != 'PERSON' else GenderRecoginition().gender_identify(former.upper(), False)\r\n pronoun = get_personal_pronoun(\"poss\", gender, \"singular\")\r\n result[former_index][-1] = \"(\"+str(dict_n[former.lower()][\"nerId\"])+\")\"\r\n else:\r\n continue\r\n result[i][3] = REMOVE_TAG\r\n result[former_index][4] = \"PRP$\"\r\n replaced_flag = True\r\n\r\n # if this entity is the first word of the sentence, the first letter of pronoun should be capital\r\n if former_index == 0:\r\n pronoun = string.capwords(pronoun)\r\n else:\r\n pronoun = remove_the_before_pronoun(result, former_index, pronoun)\r\n\r\n result[former_index][3] = pronoun\r\n\r\n\r\ndef remove_the_before_pronoun(result, pronoun_index, pronoun):\r\n # the lowercase it ensures this \"it\" wont be the first token, so former_index wont be less than zero\r\n if pronoun_index > 1:\r\n former_index = pronoun_index - 1\r\n former = result[former_index][3]\r\n if former.lower() == \"the\":\r\n result[former_index][3] = REMOVE_TAG\r\n # if ner id of token 'the' is not '-', then it may be something like this (3\r\n ner_id = result[former_index][-1]\r\n if ner_id != '-':\r\n result[pronoun_index][-1] = ner_id if result[pronoun_index][-1] == '-' else result[pronoun_index][-1] + '|' + ner_id\r\n # if former token \"the\" is the first word of the sentence, the first letter of pronoun should be capital\r\n if former_index == 0:\r\n pronoun = string.capwords(pronoun)\r\n return pronoun\r\n\r\n\r\ndef identity_nested_relation_dcoref(ner, srt, lth, sid, dcoref):\r\n for mentions in dcoref.values():\r\n for mention in mentions:\r\n if sid == mention[0]-1 and ner in mention[-1]:\r\n if srt >= mention[1]-1 and (srt+lth) < mention[2]-1:\r\n return mention[-1]\r\n elif srt > mention[0]-1 and (srt+lth) <= mention[2]-1:\r\n return mention[-1]\r\n else:\r\n pass\r\n return None\r\n\r\n\r\n# check if some ne is nested inside other longer named entities, eg: \"Central European\" in \"Central European Time\"\r\ndef identify_nested_relation(ne, srt, lth, ner_in_sentence_pos):\r\n for start, length, ner, value in ner_in_sentence_pos:\r\n if ne in ner:\r\n if srt >= start and (srt+lth) < (start+length):\r\n return [(start, length, ner, value)]\r\n elif srt > start and (srt+lth) <= (start+length):\r\n return [(start, length, ner, value)]\r\n else:\r\n pass\r\n return None\r\n\r\n\r\n# get all the named entities that appear more than once in a sentence\r\ndef get_ner_to_mark(exist_ner_with_pos):\r\n ner_to_mark = []\r\n for start, length, ner, value in exist_ner_with_pos:\r\n # if this ner appears only once, no need to annotation\r\n if len(value['sId']) <= 1:\r\n continue\r\n # if this ner appears more than once, need to mark or replace with pronoun\r\n else:\r\n ner_to_mark.append((start, length, ner, value))\r\n\r\n return ner_to_mark\r\n\r\n\r\n# a helper function for find_possessive_pronoun\r\ndef add_result(rst_dict, start, np_len, possessive_idx):\r\n if start not in rst_dict.keys():\r\n rst_dict[start] = {\"len\": np_len, \"prp_indices\": [possessive_idx]}\r\n else:\r\n rst_dict[start][\"prp_indices\"].append(possessive_idx)\r\n\r\n\r\n# given the possessive pronoun and possible noun phrase tree, check whether PRP$ and np match\r\ndef check_possessive_pronoun_number(possprn, np_tree):\r\n head_word = find_head_word(np_tree)\r\n if head_word is not None:\r\n index = np_tree.leaves().index(head_word[0])\r\n else:\r\n index = len(np_tree.leaves()) - 1\r\n head_word = [np_tree.leaves()[-1]]\r\n\r\n head_position = np_tree.leaf_treeposition(index)\r\n head_tree = np_tree[head_position[:-1]]\r\n # if the noun phrase is a pronoun, donot mark\r\n if head_tree.label() == \"PRP\":\r\n return False\r\n\r\n if head_tree.label() == \"NNS\" or head_tree.label() == \"NNPS\":\r\n if possprn == \"their\":\r\n return True\r\n else:\r\n if possprn in PRONOUNS['singular']['male'].values():\r\n if GenderRecoginition().gender_identify(head_word[0].upper(), False) == \"male\":\r\n return True\r\n elif possprn in PRONOUNS['singular']['female'].values():\r\n if GenderRecoginition().gender_identify(head_word[0].upper(), False) == \"female\":\r\n return True\r\n else:\r\n if possprn != \"their\":\r\n return True\r\n\r\n return False\r\n\r\n\r\n# find possessive pronouns and the matching noun phrases step2\r\ndef find_possessive_pronoun(tree, dparses, rst_dict, ner_ids, clause_idx=0, subj_idx=0):\r\n subj_token_index = -1\r\n poss_indices = []\r\n if len(tree.leaves()) != len(dparses):\r\n return None\r\n\r\n for dparse in dparses:\r\n if dparse[0] in ('nsubj', 'nsubjpass', 'xsubj'):\r\n # dependency parse starts from 1\r\n subj_token_index = dparse[2] - 1\r\n break\r\n\r\n for dparse in dparses:\r\n if dparse[0] in (\"nmod:poss\", \"poss\"):\r\n possessive_index = dparse[2] - 1\r\n # there may be multiple possesive pronouns in one sentence, should find all of them.\r\n if possessive_index > 0 and ner_ids[possessive_index] == '-':\r\n poss_indices.append(possessive_index)\r\n\r\n leaves = tree.leaves()\r\n for possessive_index in poss_indices:\r\n possessive_position = tree.leaf_treeposition(possessive_index)\r\n possessive_pronoun = tree.leaves()[possessive_index]\r\n prp_tree = tree[possessive_position[:-1]]\r\n if prp_tree.label() == \"PRP$\":\r\n offset = 1\r\n if leaves[possessive_index-1] == \"and\" and possessive_index >= 2:\r\n left_position = tree.leaf_treeposition(possessive_index - 2)\r\n if tree.leaves()[possessive_index-2] == ',':\r\n np_tree = tree[left_position[:-1]].left_sibling()\r\n offset += 1\r\n else:\r\n np_tree = tree[left_position[:-2]]\r\n\r\n if np_tree.label() == \"NP\":\r\n np = np_tree.leaves()\r\n start = possessive_index - len(np) - offset + clause_idx + subj_idx\r\n if start in range(0, possessive_index):\r\n if check_possessive_pronoun_number(possessive_pronoun, np_tree):\r\n add_result(rst_dict, start, len(np), possessive_index+clause_idx+subj_idx)\r\n\r\n # and_position = tree.leaf_treeposition(possessive_index-1)\r\n # left = tree[and_position[:-1]].left_sibling()\r\n # if left is not None and left.label() == \"NP\":\r\n # np = left.leaves()\r\n # start = possessive_index - len(np) - 1 + clause_idx + subj_idx\r\n # add_result(rst_dict, start, len(np), possessive_index+clause_idx+subj_idx)\r\n\r\n elif subj_token_index != -1 and subj_token_index < possessive_index:\r\n subj_position = tree.leaf_treeposition(subj_token_index)\r\n # find the nearest NP of subject\r\n subj_tree = tree[subj_position[:-2]]\r\n if subj_tree.label() == \"NP\":\r\n np = subj_tree.leaves()\r\n # if the subject token repeats in the noun phrase, there may be a wrong annotation\r\n for i in range(len(np)):\r\n if np[i] == tree.leaves()[subj_token_index]:\r\n start = subj_token_index - i + clause_idx + subj_idx\r\n break\r\n # solved the problem when prp$ not part of a VP\r\n #vb_tree = tree[possessive_position[:2]]\r\n #if vb_tree.label() == \"VP\":\r\n if check_possessive_pronoun_number(possessive_pronoun, subj_tree):\r\n add_result(rst_dict, start, len(np), possessive_index+clause_idx+subj_idx)\r\n else:\r\n break\r\n return rst_dict\r\n\r\n\r\n# find possessive pronouns and the matching noun phrases step1\r\ndef mark_possesive_pronoun(tree, dparses, nlp, ner_ids):\r\n rst_dict = {}\r\n clause_start_index, clause_len = find_clause(tree, nlp)\r\n # if may be that the whole sentence is a SBAR clause\r\n if clause_start_index == -1 or clause_start_index == 0 and clause_len == len(tree.leaves()):\r\n find_possessive_pronoun(tree, dparses, rst_dict, ner_ids)\r\n else:\r\n # clause sentence is in the beginning of a sentence\r\n if clause_start_index == 0:\r\n subj_start_idx = clause_len\r\n subj = TreebankWordDetokenizer().detokenize(tree.leaves()[clause_len:])\r\n clause = TreebankWordDetokenizer().detokenize(tree.leaves()[:clause_len])\r\n else:\r\n subj_start_idx = 0\r\n subj = TreebankWordDetokenizer().detokenize(tree.leaves()[:clause_start_index])\r\n clause = TreebankWordDetokenizer().detokenize(tree.leaves()[clause_start_index:])\r\n\r\n subj_tree = nltk.tree.ParentedTree.fromstring(nlp.parse(subj))\r\n clause_tree = nltk.tree.ParentedTree.fromstring(nlp.parse(clause))\r\n\r\n find_possessive_pronoun(subj_tree, nlp.dependency_parse(subj), rst_dict, ner_ids, 0, subj_start_idx)\r\n find_possessive_pronoun(clause_tree, nlp.dependency_parse(clause), rst_dict, ner_ids, clause_start_index, 0)\r\n\r\n return rst_dict\r\n\r\n\r\n# find clause of a sentence\r\ndef find_clause(tree, nlp):\r\n clause = \"***###***\"\r\n clause_idx_str = -1\r\n # list to string\r\n str_leaves = \" \".join(tree.leaves())\r\n for node in tree.subtrees(lambda x: x.label() == \"SBAR\"):\r\n clause = node.leaves()\r\n clause_idx_str = str_leaves.find(\" \".join(clause))\r\n if clause_idx_str != -1:\r\n # string to token list\r\n clause_start_index = nlp.word_tokenize(str_leaves[:clause_idx_str])\r\n # clause start index, eg: if len(sub_sentence)=8, then leaves[8:] is the clause leaves\r\n return len(clause_start_index), len(clause)\r\n else:\r\n return -1, 0\r\n\r\n\r\n# find the head word of a noun phrase\r\ndef find_head_word(ne_tree):\r\n for sub_tree in reversed(ne_tree):\r\n if isinstance(sub_tree, nltk.tree.Tree):\r\n if sub_tree.label() in ['NP', 'NN', 'NNS', 'NNP', 'NNPS', 'FRAG']:\r\n return find_head_word(sub_tree)\r\n else:\r\n return ne_tree.leaves()\r\n\r\n\r\n# find the index of the head word in a noun phrase\r\ndef find_head_word_index(start, length, ne_tree):\r\n head_word = find_head_word(ne_tree)\r\n if head_word is not None:\r\n head_word_index = start + ne_tree.leaves().index(head_word[0])\r\n # if cannot find a head word,use the rightmost word instead.\r\n else:\r\n head_word_index = start + length - 1\r\n return head_word_index\r\n\r\n\r\n# the key function for processing a sentence\r\ndef self_labeling(sentence, sid, dict_n, annotated_ner, np_id, replaced_ner, final_result, ner_ids, dcoref, fp):\r\n replace_flag = False\r\n sta_nlp = MyCoreNLP()\r\n # potential error: \"32 1/5\" will be considered as one token with a space, should be considered as two tokens\r\n tokens = sta_nlp.word_tokenize(sentence)\r\n poss = sta_nlp.pos_tag(sentence)\r\n # assert(len(tokens) == len(poss))\r\n # the nlp parse annotator can only handle a sentence with less than 80 tokens\r\n if len(tokens) >= 75:\r\n return\r\n # solve the problem of sentence starts with \"\r\n cparses = sta_nlp.parse(sentence.replace(\"\\\"\", \"'\"))\r\n dparses = sta_nlp.dependency_parse(sentence)\r\n # ner_ids = ['-']*len(tokens)\r\n # final_result = []\r\n # for i in range(len(tokens)):\r\n # token = tokens[i]\r\n # pos = poss[i][1]\r\n # line_element = [doc_id, part_number, '-', token, pos, '*', '-', '-', '-', '-', '*', '-']\r\n # final_result.append(line_element)\r\n\r\n # get all the ner that appears in this sentence from ner_dict map\r\n ner_in_sentence = get_ner_in_sentence(sid, dict_n)\r\n exist_ner_with_pos = []\r\n for ner, value in ner_in_sentence.items():\r\n ner_tokens = sta_nlp.word_tokenize(ner)\r\n # find all the ner appeared in one sentence\r\n for n in re.finditer(ner.replace(\".\", \"\\.\"), sentence.lower()):\r\n # solve the problem of tokenize difference for concatenate words like Austria-Hungary or GMT/UTC.\r\n if n.start() > 0 and sentence[n.start()-1] not in (' ', '(', '\"', '\\''):\r\n continue\r\n # \" ' \" should also be considered as end of a word, like \"Lucy's\"\r\n if n.end() < len(sentence) and sentence[n.end()] not in (' ', '.', ',', ':', '\"', '?', ')', '\\'', '!'):\r\n continue\r\n\r\n start_index = len(sta_nlp.word_tokenize(sentence[:n.start()]))\r\n ne_len = len(ner_tokens)\r\n # added on 20190408, since all the ne in the record do not contain the, it is necessary to add here.\r\n if start_index > 0 and tokens[start_index-1].lower() == \"the\":\r\n start_index = start_index - 1\r\n ne_len = len(ner_tokens) + 1\r\n ner = \"the \"+ner\r\n ner_tokens_with_the = ['the']\r\n ner_tokens_with_the.extend(ner_tokens)\r\n ner_tokens = ner_tokens_with_the\r\n\r\n # to check whether the found start index and len correspond to the ne\r\n if \" \".join(ner_tokens).lower() == \" \".join(tokens[start_index:start_index+ne_len]).lower():\r\n exist_ner_with_pos.append((start_index, ne_len, ner, value))\r\n\r\n # identify nested ner\r\n exist_ner_with_pos_copy = copy.deepcopy(exist_ner_with_pos)\r\n ner_to_mark = get_ner_to_mark(exist_ner_with_pos_copy)\r\n\r\n # add annotation for exsiting possessive pronouns\r\n tree = nltk.tree.ParentedTree.fromstring(cparses)\r\n possessive_pronoun = mark_possesive_pronoun(tree, dparses, sta_nlp, ner_ids)\r\n # if there is no annotation in this sentence, skip\r\n # if len(ner_to_mark) == 0 and possessive_pronoun == {}:\r\n # return\r\n\r\n for start, value in possessive_pronoun.items():\r\n np_id[\"id\"] = np_id[\"id\"] + 1\r\n tmp_id = np_id[\"id\"]\r\n # check if this noun phrase is also a named entity\r\n np = \" \".join(tokens[start:start+value[\"len\"]]).lower()\r\n # since all the ne in dict do not have \"the \", need to consider that\r\n np_key = np[4:] if np.startswith(\"the \") else np\r\n if np_key in dict_n.keys():\r\n tmp_id = dict_n[np_key][\"nerId\"]\r\n # if this np is a named entity and already appears more than once, it will be annotated later\r\n if len(dict_n[np_key][\"sId\"]) <= 1:\r\n update_ner_id(ner_ids, start, value[\"len\"], tmp_id, np, sid, dcoref)\r\n else:\r\n update_ner_id(ner_ids, start, value[\"len\"], tmp_id, np, sid, dcoref)\r\n\r\n for prp_index in value[\"prp_indices\"]:\r\n update_ner_id(ner_ids, prp_index, 1, tmp_id, np, sid, dcoref)\r\n\r\n ner_to_replace_with_pos = []\r\n # sort the ner based in descending order of ner length\r\n ner_to_mark = sorted(ner_to_mark, key=lambda x: x[1], reverse=True)\r\n for start, length, ner, value in ner_to_mark:\r\n super_ner = identify_nested_relation(ner, start, length, exist_ner_with_pos_copy)\r\n # need to check if this ne is a sub entity in the dcoref\r\n super_ner_in_docref = identity_nested_relation_dcoref(ner, start, length, sid, dcoref)\r\n # if this ner appears more than once, this is the first time\r\n if ner not in annotated_ner:\r\n update_ner_id(ner_ids, start, length, value['nerId'], ner, sid, dcoref)\r\n annotated_ner.append(ner)\r\n else:\r\n # need to check if this ne is a sub entity in the dcoref\r\n if super_ner is None and super_ner_in_docref is None:\r\n # find all the ner need to be replaced in one sentence\r\n ner_to_replace_with_pos.append((start, length, ner, value))\r\n\r\n # if there are ners to be replaced\r\n if len(ner_to_replace_with_pos) == 0:\r\n pass\r\n else:\r\n start, length, ner, value = ner_to_replace_with_pos[0]\r\n ne_tree = nltk.tree.ParentedTree.fromstring(sta_nlp.parse(ner))\r\n deparse_index = find_head_word_index(start, length, ne_tree)\r\n deparse = find_deparse_with_index(deparse_index, dparses)\r\n pronoun = replace_ner(ner, value['label'], poss[deparse_index], deparse, start)\r\n\r\n # check if the ne be replaced has already been marked\r\n #if any(ner_ids[j] != '-' for j in range(start, start+length)):\r\n # check if the ne be replaced has already been marked and replace only the nearest ner with pronoun\r\n if any(ner_ids[j] != '-' for j in range(start, start+length)) or ner in replaced_ner:\r\n update_ner_id(ner_ids, start, length, value['nerId'], ner, sid, dcoref)\r\n else:\r\n update_ner_id(ner_ids, start, length, value['nerId'], ner, sid, dcoref, True)\r\n update_pronoun(final_result, start, length, pronoun, ner_ids)\r\n replaced_ner.append(ner)\r\n replace_flag = True\r\n\r\n # the second ne need to be replaced will only be annotated, not replaced\r\n if len(ner_to_replace_with_pos) > 1:\r\n for j in range(1, len(ner_to_replace_with_pos)):\r\n start, length, ner, value = ner_to_replace_with_pos[j]\r\n if not any(final_result[k][3] == REMOVE_TAG for k in range(start, start+length)):\r\n update_ner_id(ner_ids, start, length, value['nerId'], ner, sid, dcoref)\r\n\r\n write_final_result(final_result, ner_ids, dict_n, sid, replace_flag, fp)\r\n\r\n\r\ndef write_final_result(final_result, ids, dict_n, sid, flag, fp):\r\n fp.write(\"\\n\")\r\n\r\n for k in range(len(final_result)):\r\n final_result[k][-1] = ids[k]\r\n\r\n # this step is very necessary\r\n final_result = [line for line in final_result if line[3] != REMOVE_TAG]\r\n update_possesive(final_result, dict_n, sid, flag)\r\n final_result = [line for line in final_result if line[3] != REMOVE_TAG]\r\n merge_dt_and_ne(final_result)\r\n\r\n for n in range(len(final_result)):\r\n final_result[n][2] = n\r\n for line_elem in final_result:\r\n line_str = [\"{}\".format(element) for element in line_elem]\r\n line_width = [20, 5, 5, 30, 5, 5, 5, 5, 5, 5, 5, 5, 10]\r\n line = \" \".join(line_str[i].rjust(line_width[i]) for i in range(0, len(line_str)))\r\n fp.write(line)\r\n fp.write(\"\\n\")\r\n\r\n\r\ndef merge_dt_and_ne(results):\r\n for i in range(len(results)):\r\n if i == len(results) - 1:\r\n continue\r\n if results[i][3].lower() == 'the' and results[i][-1] != '-':\r\n latter_index = i + 1\r\n if results[latter_index][-1] != '-':\r\n for id in re.findall(\"\\d+\", results[i][-1]):\r\n if \"(\"+id in results[i][-1] and \"(\"+id in results[latter_index][-1]:\r\n end_index = find_the_end_of_ne(results, i, id)\r\n if id+\")|\"+id+\")\" not in results[end_index][-1]:\r\n continue\r\n if latter_index == end_index:\r\n results[latter_index][-1] = results[latter_index][-1].replace(\"(\"+id+\")\", \"\")\r\n else:\r\n # fix bug caused by strip(\"(\"+id)\r\n results[latter_index][-1] = results[latter_index][-1].replace(\"(\"+id, \"\")\r\n results[end_index][-1] = results[end_index][-1].replace(id+\")|\"+id+\")\", id+\")\", 1)\r\n results[latter_index][-1] = check_split_line_in_nerid(results[latter_index][-1])\r\n results[end_index][-1] = check_split_line_in_nerid(results[end_index][-1])\r\n\r\n\r\ndef find_the_end_of_ne(results, index, id):\r\n for i in range(index, len(results)):\r\n if id+\")\" in results[i][-1]:\r\n return i\r\n\r\n\r\ndef check_split_line_in_nerid(id):\r\n new_id = id.replace(\"||\", \"|\")\r\n if id.startswith(\"|\"):\r\n new_id = new_id[1:]\r\n if id == '':\r\n new_id = '-'\r\n elif id[-1] == \"|\":\r\n new_id = new_id[:-1]\r\n else:\r\n pass\r\n\r\n return new_id\r\n\r\n\r\n# merge the tokens in standford corenlp ner result to named entities.\r\ndef get_merged_ner(orig_list):\r\n merged_ner_list = []\r\n for i in range(len(orig_list)):\r\n en = orig_list[i]\r\n current_index = i\r\n # merge entity_list next to each other with the same NER\r\n if en[1] != 'O':\r\n merged_ner_list.append(en)\r\n if current_index > 0:\r\n former_ne = orig_list[current_index - 1]\r\n if en[1] == former_ne[1]:\r\n orig_list[current_index] = (former_ne[0] + ' ' + en[0], en[1])\r\n merged_ner_list.append(orig_list[current_index])\r\n merged_ner_list.remove(en)\r\n merged_ner_list.remove(former_ne)\r\n # added on 20190114, solve problem caused by \"the it\" begin\r\n elif former_ne[0].lower() == \"the\":\r\n orig_list[current_index] = (\"the\" + ' ' + en[0], en[1])\r\n merged_ner_list.append(orig_list[current_index])\r\n merged_ner_list.remove(en)\r\n else:\r\n continue\r\n # end\r\n # print(\"stanford:\", merged_ner_list)\r\n return merged_ner_list\r\n\r\n\r\n# get all the sentences in a paragraph\r\ndef get_sentences(paragraph):\r\n all_context = []\r\n # parse json style data, get the context field, split each context into sentences, and process each sentences\r\n for paraph in paragraph:\r\n context = paraph['context']\r\n all_context.append(context)\r\n\r\n ss_in_context = \" \".join(all_context).replace('\\n', ' ').replace('\\xa0', ' ')\r\n #ss.extend(nltk.tokenize.sent_tokenize(ss_in_context))\r\n #return nlp.ssplit(ss_in_context)\r\n return ss_in_context\r\n #ss.extend(nlp.ssplit(ss_in_context))\r\n #return ss\r\n\r\n\r\n# extract all named entities in a document with given sentences.\r\ndef extract_ner(sentences, ner_dict, title, path):\r\n sta_nlp = MyCoreNLP()\r\n spa_nlp = SpacyNLP()\r\n\r\n for j in range(len(sentences)):\r\n sentence_id = j\r\n sentence = sentences[j]\r\n # print(sentence_id, sentence)\r\n # get ner identified by stanford corenlp\r\n sta_ner_list = sta_nlp.ner(sentence)\r\n # get ner identified by spacy\r\n spa_ner_list = spa_nlp.ents(sentence)\r\n merged_ner_coren = get_merged_ner(sta_ner_list)\r\n write_ner_coren(merged_ner_coren, sentence_id, ner_dict)\r\n integrate_ner(spa_ner_list, merged_ner_coren, sentence_id, ner_dict)\r\n\r\n fn = open(path + '/' + title + '.txt', 'a+')\r\n json.dump(ner_dict, fn, indent=4, ensure_ascii=False)\r\n fn.close()\r\n return ner_dict\r\n\r\n\r\n# ner: identified by spacy, name: ner name already identified by CoreNLP\r\ndef check_two_ner(ner, name):\r\n # added on 20190114 \"the Navy\" and \"Navy\" should be identified as one ne begin\r\n if ner[0].lower() in name:\r\n return True\r\n # end\r\n\r\n i = 0\r\n for word in ner[0].lower().split(' '):\r\n if word in name:\r\n i += 1\r\n return True if i > 3 else False\r\n\r\n\r\n# check if the ner identified by spacy already exists in the record which stores all the ner identified by standford.\r\ndef check_ner_in_result(ner, tlist):\r\n ner_names = [ner[0].lower() for ner in tlist]\r\n # ignore the possessive entity detected by Spacy\r\n if ner[0][-2:] == \"'s\":\r\n return True\r\n\r\n for name in ner_names:\r\n if check_two_ner(ner, name):\r\n return True\r\n\r\n if ner[0].lower() in ner_names or ner[0].strip(\"the \") in ner_names:\r\n return True\r\n\r\n elif ner[0].lower().startswith('the '):\r\n if ner[0][4:].lower() in ner_names or ner[0][4:][:-1].lower() in ner_names:\r\n return True\r\n elif ner[0].strip('-').lower() in ner_names:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n# merge the Spacy result to the record\r\ndef integrate_ner(plist, tlist, sid, ner_dict):\r\n for ner in plist:\r\n if is_valid_ne(ner):\r\n if not check_ner_in_result(ner, tlist):\r\n update_ner_dict(ner, sid, ner_dict)\r\n else:\r\n continue\r\n\r\n\r\n# update named entity record, strip \"the \" in the entity name.\r\ndef update_ner_dict(ner, sid, ner_dict):\r\n ner_id = len(ner_dict)\r\n # there may be two \"the\" in one NE, so simply strip(\"the \") is not enough.\r\n ne_key = ner[0].lower()[4:] if ner[0].lower().startswith(\"the \") else ner[0].lower()\r\n #if ner[0].lower() in ner_dict:\r\n if ne_key in ner_dict:\r\n ner_dict[ne_key]['sId'].append(sid)\r\n #ner_dict[ner[0].lower()]['sId'].append(sid)\r\n else:\r\n ner_dict[ne_key] = {'label': ner[1], 'sId': [sid], 'nerId': ner_id}\r\n #ner_dict[ner[0].lower()] = {'label': ner[1], 'sId': [sid], 'nerId': ner_id}\r\n\r\n\r\n# check if a given named entity will be considered or not.\r\ndef is_valid_ne(ner):\r\n special_symbol = ['(', ')', '[', '±', ']', '+', '\\xa0', '&']\r\n if ner[0].startswith('A '):\r\n return False\r\n if any(sb in ner[0] for sb in special_symbol):\r\n return False\r\n if ner[0] == '\\n' or ner[0] == ' ':\r\n return False\r\n if ner[1] in ('NUMBER', 'CARDINAL', 'NATIONALITY', 'PERCENT', 'ORDINAL', 'DATE', 'DURATION', 'SET'):\r\n return False\r\n else:\r\n return True\r\n\r\n\r\n# write the named entities recognized by stanford corenlp to the record.\r\ndef write_ner_coren(nerlist, sid, ner_dict):\r\n for ner in nerlist:\r\n if is_valid_ne(ner):\r\n update_ner_dict(ner, sid, ner_dict)\r\n\r\n\r\ndef paragraph_process(sentences, paraph_name, path):\r\n # split one paragraph into several parts\r\n if len(sentences) > MAX_SENTENCES_IN_ONE_DOCUMENT:\r\n parts_number = math.ceil(len(sentences) / MAX_SENTENCES_IN_ONE_DOCUMENT)\r\n for k in range(int(parts_number)):\r\n if k < 10:\r\n part = \"_part 00\" + str(k)\r\n else:\r\n part = \"_part 0\" + str(k)\r\n part_sentences = sentences[(MAX_SENTENCES_IN_ONE_DOCUMENT*k):(MAX_SENTENCES_IN_ONE_DOCUMENT*(k+1))]\r\n document_process(part_sentences, paraph_name, part, path, \" \".join(part_sentences))\r\n else:\r\n document_process(sentences, paraph_name, \"_part 000\", path, \" \".join(sentences))\r\n\r\n\r\ndef document_process(sentences, paraph_name, part, path, doc):\r\n ner_dict = {}\r\n ner_dict = extract_ner(sentences, ner_dict, paraph_name + part, path)\r\n doc_id = path + '/' + paraph_name.replace(\" \", \"/00/\")\r\n file_path = path + '/' + paraph_name + part + \"_annotate.txt\"\r\n output(sentences, ner_dict, doc_id, part, file_path, doc)\r\n\r\n\r\ndef annotate_document(file_name, doc, output_path, doc_index):\r\n print(colored((\"paragraph \" + str(doc_index) + \" \" + file_name), 'yellow'))\r\n nlp = MyCoreNLP()\r\n ss = []\r\n try:\r\n sentences = nlp.ssplit(doc)\r\n except BaseException as e:\r\n ss.extend(nltk.tokenize.sent_tokenize(doc.replace(\"\\n\", ' ').replace('\\xa0', ' ')))\r\n print(\"The document \"+file_name+\" is too long, will be split automatically.\")\r\n #raise e\r\n\r\n if not ss:\r\n paragraph_process(sentences, file_name, output_path)\r\n # if the document has too many sentences, then split into two documents\r\n else:\r\n part_doc1 = \" \".join(ss[:int(len(ss)/2)])\r\n part_doc2 = \" \".join(ss[int(len(ss)/2):])\r\n annotate_document(file_name, part_doc1, output_path, doc_index)\r\n annotate_document(file_name+\"1\", part_doc2, output_path, doc_index)\r\n\r\n\r\nif __name__ == '__main__':\r\n doc_path = sys.argv[1]\r\n output_path = sys.argv[2]\r\n if os.path.exists(output_path):\r\n shutil.rmtree(output_path)\r\n os.makedirs(output_path)\r\n\r\n pool = multiprocessing.Pool(processes=ALLOWED_PARALLEL_PROCESS)\r\n i = 0\r\n for file in os.listdir(doc_path):\r\n i += 1\r\n with open(doc_path+'/'+file) as f:\r\n doc = f.read()\r\n filename = file[:-4] if file.endswith(\".txt\") else file\r\n #annotate_document(filename, doc, output_path, i)\r\n pool.apply_async(annotate_document, (filename, doc, output_path, i, ))\r\n pool.close()\r\n pool.join()\r\n print(\"the end!\")\r\n\r\n # sentences = []\r\n # ner_dict = {}\r\n # nlp = StanfordNLP()\r\n # context=\"Most locations used a 32 1/5 ft ( 9.8 meters ) - diameter version that straddles the building and is aimed at the intersection .\"\r\n # sentences = nlp.ssplit(context)\r\n #sentences.extend(nltk.tokenize.sent_tokenize(context.replace(\"\\n\", ' ').replace('\\xa0', ' ')))\r\n # print(nlp.dcoref(context))\r\n # for s in sentences:\r\n # print(nlp.pos(s))\r\n # print(nlp.parse(s))\r\n # tree = nltk.tree.ParentedTree.fromstring(nlp.parse(s))\r\n # tree.pretty_print()\r\n # print(find_head_word(tree))\r\n # print(nlp.dependency_parse(s))\r\n # #ner_dict = extract_ner(sentences, ner_dict, 'input_part 000', path)\r\n # file_path = path + '/' + \"input_part 000_annotate.txt\"\r\n # output(sentences, ner_dict, 'input', '_part 000', file_path)\r\n\r\n","sub_path":"self-labeling/annotate.py","file_name":"annotate.py","file_ext":"py","file_size_in_byte":40398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"12099746","text":"from django import template\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef replace_GET_param(request, param, value):\n get_params = request.GET.copy()\n get_params[param] = value\n return f\"{request.path}?{get_params.urlencode()}\"","sub_path":"core/templatetags/querystring.py","file_name":"querystring.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"67596197","text":"\n\n\nimport random\nimport time\nfrom matplotlib import pyplot as plt\nfrom termcolor import colored\n\n\n\"\"\"\nFunction: createMatrix\nParameters: # rows, # columns\nPurpose: create a 2d matrix\nMethod: Creates a 2d list and fills the list with numbers from 0 to N^2\nResult: returns NxN matrix as a 2D list\n\"\"\"\ndef createMatrix(row, col):\n number = 0\n matrix = [[0 for x in range(row)] for y in range(col)]\n for x in range(row):\n for y in range(col):\n matrix[x][y] = number\n number+=1\n return matrix\n\n\"\"\"\nFunction: isSick\nParameters: agent object\nPurpose: determine if agent is sick\nMethod: Checks if agent's health is infected\nResult: Boolean determining if agent is sick or not\n\"\"\"\ndef isSick(agent):\n if (agent.getHealth() == \"infected\"):\n return True\n return False\n\n\"\"\"\nFunction: whichAgentisInfected\nParameters: agent1 object, agent2 object\nPurpose: determine which agent is sick\nMethod: checks if either agent is sick\nResult: A string for agent1, agent2, or neither\n\"\"\"\ndef whichAgentisInfected(agent1, agent2):\n if (isSick(agent1)):\n return \"agent1\"\n elif (isSick(agent2)):\n return \"agent2\"\n else: return \"none\"\n\n\"\"\"\nFunction: createAgentID\nParameters: number of agents in simulation\nPurpose: create a list of agent ids\nMethod: creates a list of agent ids where each id begins with the letter\nA and is a number\nResult: A list of containing agent id strings\n\"\"\"\ndef createAgentID(size):\n agentList = []\n for x in range(size):\n agentList.append(\"A\" + str(x))\n return agentList\n\n\"\"\"\nFunction: generateRandomX\nParameters: number of rows in grid\nPurpose: generate random value\nMethod: calls randint function in random library to generate random value\nResult: random value between 0 and rows-1.\n\"\"\"\ndef generateRandomX(row):\n return random.randint(0,row-1)\n\n\"\"\"\nFunction: generateRandomY\nParameters: number of columns in grid\nPurpose: generate random value\nMethod: calls randint function in random library to generate a random value\nResult: random value between 0 and columns-1\n\"\"\"\ndef generateRandomY(col):\n return random.randint(0,col-1)\n\n\"\"\"\nFunction: getX\nParameters: a tuple corresponding to an (X,Y) coordinate\nPurpose: get x value\nMethod: indexes the first value in the tuple\nResult: integer corresponding to x value\n\"\"\"\ndef getX(location):\n return location[0]\n\n\"\"\"\nFunction: getY\nParameters: a tuple corresponding to an (X,Y) coordinate\nPurpose: get y value\nMethod: indexes the second value in the tuple\nResult: integer corresponding to y value\n\"\"\"\ndef getY(location):\n return location[1]\n\n\"\"\"\nFunction: whichWayToMove\nParameters: None\nPurpose: get a random direction to move\nMethod: gets a random value between 1 and 5 to determine the direction to move\nResult: A string representing the direction the agent will move\n\"\"\"\ndef whichWayToMove():\n direction = random.randint(1,5)\n if (direction == 1): return \"north\"\n if (direction == 2): return \"east\"\n if (direction == 3): return \"south\"\n if (direction == 4): return \"west\"\n if (direction == 5): return \"stays\"\n\n\n\"\"\"\nFunction: movesLeftRight\nParameters: y coordinate, the direction to move, and number of columns in the grid\nPurpose: move the agent in a specific direction left or right\nMethod: first checks boundary cases (if at edges) and if direction moves agent\noutside grid. If agent moves off grid, do not move agent. If boundary case not an\nissue, then move agent left or right depending on direction\nResult: y value with new location\n\"\"\"\ndef movesLeftRight(y, direction, cols):\n if (y == 0 and direction == \"east\"):\n newYLocation = y+1\n elif (y == 0 and direction == \"west\"):\n newYLocation = y\n elif (y == cols-1 and direction == \"west\"):\n newYLocation = y-1\n elif (y == cols-1 and direction == \"east\"):\n newYLocation = y\n elif (direction == \"stays\"): newYLocation = y\n else:\n if (direction == \"east\"):\n newYLocation = y+1\n else:\n newYLocation = y-1\n return newYLocation\n\n\"\"\"\nFunction: movesUpDown\nParameters: x coordinate, the direction to move, and number of rows in the grid\nPurpose: move the agent in a specific direction up or down\nMethod: first checks boundary cases (if at edges) and if direction moves agent\noutside the grid. If agent moves off grid, do not move agent. If boundary case is\nnot an issue, then move agent up or down depending on direction.\nResult: x value with new location.\n\"\"\"\ndef movesUpDown(x, direction, rows):\n if (x == 0 and direction == \"south\"):\n newXLocation = x+1\n elif (x == 0 and direction == \"north\"):\n newXLocation = x\n elif (x == rows-1 and direction == \"north\"):\n newXLocation = x-1\n elif (x == rows-1 and direction == \"south\"):\n newXLocation = x\n elif (direction == \"stays\"): newYLocation = x\n else:\n if (direction == \"south\"):\n newXLocation = x+1\n else:\n newXLocation = x-1\n return newXLocation\n\n\"\"\"\nFunction: addAgentsToMatrix\nParameters: matrix and the agent object\nPurpose: add an agent object to the matrix\nMethod: goes through each location in the matrix and compares is to where the agent is\nsupposed to be located. When it matches it adds the agent to that location in the matrix\nResult: A new matrix with an agent object added\n\"\"\"\ndef addAgentsToMatrix(matrix, agent):\n for x in range(len(matrix)):\n for y in range(len(matrix[x])):\n if (x == getX(agent.getLocation()) and y == getY(agent.getLocation())):\n matrix[x][y] = agent.getAgentId()\n return matrix\n\n\"\"\"\nFunction: locationSame\nParameters: list of agent objects\nMethod: Goes through the list of agent objects (assuming there are at least 2) and compares\nPurpose: get a list of collisions that occur in the matrix\neach agent object's location to all other agent objects following it. If the locations are same,\nthen a tuple of both agents are added to a new list.\nFor example: if we have a list of agent objects [a1, a2, a3, a4], then we start with a1 and compare it's\nlocation to a2, a3, and a4. Then we go to a2 and compare it to a3 and a4. Then we go to a3 and compare it to\na4. Any collisions are added to the collision list.\nResult: list containing tuples of agents that collided are returned.\n\"\"\"\ndef locationSame(agents):\n sameLocation = None\n collisions = []\n if (len(agents) > 1):\n for x in range(0, len(agents)):\n compLocation = agents[x]\n for y in range (x+1, len(agents)):\n if(compLocation.getLocation() == agents[y].getLocation()):\n sameLocation = agents[y]\n collisions.append((compLocation, sameLocation))\n else:\n continue;\n return collisions\n\n\"\"\"\nFunction: displayGrid\nParameters: matrix and list of agents\nPurpose: display the matrix to the user with the agents\nMethod: First we get a list of agents that collided. We then go through the list and print a \"!\"\nat any location a collision occured. Then we go through the matrix and at each location, check if\nan agent object resides there. If not, print a white \"-\". If an agent object does live there, and there\nwas no collision, then we get the health of the agent. If the agent is healthy, then we print a the agent\nid as white. If the agent is infected, we print the agent id as blue and add an \"*\" next to the id.\nWe modify the print methods so they print the results evenly and ensure the spacing is satisfied.\nResult: None\n\"\"\"\ndef displayGrid(matrix, agentList):\n listOfSameLocations = locationSame(agentList)\n for x in range(len(listOfSameLocations)):\n locationX = getX(listOfSameLocations[x][0].getLocation())\n locationY = getY(listOfSameLocations[x][0].getLocation())\n matrix[locationX][locationY] = \"! \"\n for x in range(len(matrix)):\n for y in range(len(matrix[x])):\n if (isinstance(matrix[x][y], int)): print(colored(\"- \", 'white'), end=\" \")\n if (isinstance(matrix[x][y], str)):\n if (matrix[x][y] != \"! \"):\n health = getHealth(matrix[x][y], agentList)\n counter = locationInList(matrix[x][y], agentList)\n if(health == \"susceptible\"):\n if (counter < 10): print(matrix[x][y], end=\" \")\n else: print(matrix[x][y], end=\" \")\n if(health == \"infected\"):\n if (counter < 10): print(colored(matrix[x][y] + \"*\", 'blue'), end=\" \")\n else: print(colored(matrix[x][y] + \"*\", 'blue'), end=\"\")\n else: print(colored(matrix[x][y], 'yellow'), end=\" \")\n print()\n\n\"\"\"\nFunction: getHealth\nParameters: agent id and list of agents\nPurpose: get the health of the agent given their id\nMethod: go through list of agent ids and if the id provided matches the id in the list,\nget the health of the agent object\nResult: string giving health of the agent object\n\"\"\"\ndef getHealth(id, list):\n for x in range(len(list)):\n if (id == list[x].getAgentId()): return list[x].getHealth()\n\n\"\"\"\nFunction: locationInList\nParameters: agent id and list of agents\nPurpose: get the index of the id in the agent list\nMethod: go through list of agent ids and if the id provided matches the id in the list,\nreturn the index position\nResult: index position in list of agent provided\n\"\"\"\ndef locationInList(id, list):\n for x in range(len(list)):\n if (id == list[x].getAgentId()): return x\n\n\n\"\"\"\nFunction: setNewLocations\nParameters: direction to move, agent object, the number of rows, and number of columns\nPurpose: Set the location of the agent object with a new location\nMethod: Get the old x and y value location of the agent object. Then using the direction,\ndetermine if the agent should move left right, up down, or stay in place. Then set the new x\nor new y value depending on which way the agetn moves.\nResult: return the agent object with the new location.\n\"\"\"\ndef setNewLocations(direction, agent, rows, cols):\n agentLocation = agent.getLocation()\n oldX = agentLocation[0]\n oldY = agentLocation[1]\n #once x and y values obtained, set the new agent location\n #if direction is stays don't move agent\n if (direction == \"stays\"):\n return agent\n #if direction is east or west, move left/right if possible\n elif (direction == \"east\" or direction == \"west\"):\n newY = movesLeftRight(oldY, direction, cols)\n agent.setAgentLocation(oldX, newY)\n #otherwise move the agent north south\n else:\n newX = movesUpDown(oldX, direction, rows)\n agent.setAgentLocation(newX, oldY)\n return agent\n\n\n\"\"\"\nFunction: createAgent\nParameters: current agent object, type of agent to create\nPurpose: convert an agent object to one of the given type\nMethod:\nReturn:\n\"\"\"\ndef createAgent(agent, type):\n originalLocation = agent.getLocation()\n originalId = agent.getAgentId()\n if (type == \"infected\"): agent = InfectedAgent()\n if (type == \"removed\"): agent = RemovedAgent()\n # agent = InfectedAgent()\n agent.setAgentId(originalId)\n agent.setAgentLocation(getX(originalLocation), getY(originalLocation))\n return agent\n\n\n#main function that runs the simulation\ndef runSimulation():\n print(\"Press q at any time to go back to the main menu\")\n rows = input(\"Enter the size of your matrix (please enter # rows you want): \")\n if (rows == \"q\"): return\n rows = int(rows)\n columns = rows\n #These three lists are for the purpose of statistical gathering, nothing more\n sickAgents = []\n susceptibleAgents = []\n removedAgents = []\n counter = 0\n\n #sets the rows and columns\n numberOfAgents = input(\"Enter the number of agents on the board: \")\n if (numberOfAgents == \"q\"): return\n numberOfAgents = int(numberOfAgents)\n while(numberOfAgents >= columns*rows):\n numberOfAgents = input(\"You are putting too many agents on the board. Try Again: \")\n numberOfAgents = int(numberOfAgents)\n #sets number of agents in the grid\n iterations = input(\"How many iterations should this simulation run: \")\n if (iterations == \"q\"): return\n iterations = int(iterations)\n\n timeBeforeDeath = input(\"How many iterations should a sick agent last for before dying: \")\n if (timeBeforeDeath == \"q\"): return\n timeBeforeDeath = int(timeBeforeDeath)\n nameOfDisease = input(\"What is the name of the disease: \")\n proportionVaccinated = input(\"What percentage of the population is vaccinated? \")\n proportionVaccinated = int(proportionVaccinated)\n proportionVaccinated = float(proportionVaccinated/100)\n\n matrix = createMatrix(rows, columns) #creates the grid\n agentIDList = createAgentID(numberOfAgents) #creates a list of id's for each agent\n agentList = []\n print()\n for x in range(len(agentIDList)): #go through list of ids and create a susceptible agent for each id and add it to list\n agentList.append(SusceptibleAgent())\n agentList[x].setAgentId(agentIDList[x]) #set id of agent to the agent id from list\n agentList[x].setAgentLocation(generateRandomX(rows), generateRandomY(columns)) #set the location of each agent randomly\n susceptibleAgents.append(agentList[x])\n print()\n #go through and add each agent to the matrix\n for x in range(len(agentList)):\n matrix = addAgentsToMatrix(matrix, agentList[x])\n displayGrid(matrix, agentList) #prints the matrix with agents on the matrix\n print()\n #randomly generate a sick agent to be patient zero from the list of agents\n #make them an infected agent and put them in the agent list\n sickAgentIndex = random.randint(0, len(agentList) - 1)\n agentList[sickAgentIndex] = createAgent(agentList[sickAgentIndex], \"infected\")\n print(\"The sick agent is \" + agentList[sickAgentIndex].getAgentId())\n sickAgents.append(agentList[sickAgentIndex])\n for x in range(len(susceptibleAgents)):\n if (x == sickAgentIndex):\n susceptibleAgents.remove(susceptibleAgents[x])\n biggestWave = 0\n timeOfBiggestWave = 0\n\n\n #randomly generate 10% of the total agents and set their vaccination status to true\n numberToVaccinate = int(proportionVaccinated * numberOfAgents)\n for x in range(numberToVaccinate):\n vaccinatedAgentIndex = random.randint(0, len(agentList)-1)\n while(agentList[vaccinatedAgentIndex].getHealth() == \"infected\"):\n vaccinatedAgentIndex = random.randint(0, len(agentList)-1)\n while(agentList[vaccinatedAgentIndex].getVaccinationStatus() == True):\n vaccinatedAgentIndex = random.randint(0, len(agentList)-1)\n agentList[vaccinatedAgentIndex].setVaccinationStatus()\n\n #This will store the (x,y) data collected from the simulation - x will measure time and y will\n #measure the changing data\n survivalDataX = []\n survivalDataY = []\n infectedDataX = []\n infectedDataY = []\n removedDataX = []\n removedDataY = []\n precentDiffData = []\n changeInInfectionX = []\n changeInInfectionY = []\n previousIteration = 0\n for i in range(iterations):\n counter += 1;\n #sleep for 2 seconds so you can see each iteration progress. Lower this to speed up the iterations or increase this to slow down th iterations\n time.sleep(2)\n #This is the loop that would simulate agents moving on the grid\n print()\n #get a direction that each agent will move (each agent moves in a random direction, independent of the other agents.\n for x in range(len(agentList)):\n direction = whichWayToMove()\n #go through the list of agents and set their location to the new direction\n agentList[x] = setNewLocations(direction, agentList[x], rows, columns)\n print()\n print()\n matrix = createMatrix(rows, columns) #reinitialize the grid (a.k.a clear the matrix)\n for x in range(len(agentList)):\n matrix = addAgentsToMatrix(matrix, agentList[x]) #add agents with new positions to the grid\n #go through agent list and check for collisions\n listOfCollisions = locationSame(agentList)\n #if there were any collision, do this\n if(len(listOfCollisions) != 0):\n #go through list of collisions and see if either agent that collided was infected\n for x in range(len(listOfCollisions)):\n #checks if an agent in the collision was infected\n agent = whichAgentisInfected(listOfCollisions[x][0], listOfCollisions[x][1])\n #if no agent infected, continue through collision list\n if (agent == \"none\"): continue\n #if the first agent was sick and second was not (a.ka. susceptible) then make the second agent in the collision infected\n if (agent == \"agent1\" and not isSick(listOfCollisions[x][1])):\n if (listOfCollisions[x][1].getVaccinationStatus() == True): continue\n #go through the list of agents and figure out which agent in the list is infected and make that agent infected\n for y in range(len(agentList)):\n if (listOfCollisions[x][1].getAgentId() == agentList[y].getAgentId()):\n agentList[y] = createAgent(agentList[y], \"infected\")\n #if the second agent was sick and first was not (a.ka. susceptible) then make the first agent in the collision infected\n elif (agent == \"agent2\" and not isSick(listOfCollisions[x][0])):\n #go through the list of agents and figure out which agent in the list is infected and make that agent infected\n if (listOfCollisions[x][0].getVaccinationStatus() == True): continue\n for y in range(len(agentList)):\n if (listOfCollisions[x][0].getAgentId() == agentList[y].getAgentId()):\n agentList[y] = createAgent(agentList[y], \"infected\")\n print()\n tempAgentList = []\n tempAgent = None\n flag = False\n finishedBecauseAllDead = False\n finishedBecauseAllSafe = False\n for x in range(len(agentList)):\n if (len(susceptibleAgents) == 0):\n finishedBecauseAllDead = True\n flag = True\n break\n if (agentList[x].getHealth() == \"infected\" and locationInList(agentList[x].getAgentId(), sickAgents) != None):\n if (timeBeforeDeath == agentList[x].getTimeOfSickness()):\n tempAgent = agentList[x]\n for y in range(len(sickAgents)):\n if (tempAgent.getAgentId() == sickAgents[y].getAgentId()):\n sickAgents.remove(tempAgent)\n break\n tempAgent = createAgent(tempAgent, \"removed\")\n removedAgents.append(tempAgent);\n else:\n agentList[x].increaseTimeOfSickness();\n elif (agentList[x].getHealth() == \"infected\" and locationInList(agentList[x].getAgentId(), sickAgents) == None):\n sickAgents.append(agentList[x])\n tempIndex = x\n for y in range(len(susceptibleAgents)):\n if (agentList[x].getAgentId() == susceptibleAgents[y].getAgentId()):\n susceptibleAgents.remove(susceptibleAgents[y])\n break;\n if (tempAgent != None):\n if (len(tempAgentList) != 0):\n for x in range(len(tempAgentList)):\n if (tempAgent.getAgentId() == tempAgentList[x].getAgentId):\n tempAgentList.remove(tempAgentList[x])\n else:\n for i in range(len(agentList)):\n if (agentList[i].getAgentId() != tempAgent.getAgentId()):\n tempAgentList.append(agentList[i])\n\n tempAgent = None\n if (len(tempAgentList) > 0): agentList = tempAgentList\n newSickAgents = []\n for x in range(len(sickAgents)):\n if (sickAgents[x].getTimeOfSickness() < timeBeforeDeath): newSickAgents.append(sickAgents[x])\n agentList = susceptibleAgents + newSickAgents\n if(len(newSickAgents) > biggestWave):\n biggestWave = len(newSickAgents)\n timeOfBiggestWave = counter\n if (len(newSickAgents) == 0):\n finishedBecauseAllSafe = True\n flag = True\n break\n displayGrid(matrix, agentList) #print new grid\n if (flag): break\n print(\"At the end of Time: \" + str(counter) + \", here are the stats: \")\n print(colored(\"--------------------------------------------------------\", 'red'))\n print(\"The number of Susceptible Agents is : \" + str(len(susceptibleAgents)))\n print(\"The number of Infected Agents is : \" + str(len(newSickAgents)))\n if (len(removedAgents) + len(newSickAgents) + len(susceptibleAgents) != numberOfAgents):\n numberOfRemovedAgents = numberOfAgents - len(susceptibleAgents) - len(newSickAgents)\n print(\"The number of Removed Agents is : \" + str(numberOfAgents - len(susceptibleAgents) - len(newSickAgents)))\n else:\n numberOfRemovedAgents = len(removedAgents)\n print(\"The number of Removed Agents is : \" + str(len(removedAgents)))\n survivalDataX.append(counter)\n survivalDataY.append(len(susceptibleAgents))\n infectedDataX.append(counter)\n infectedDataY.append(len(newSickAgents))\n removedDataX.append(counter)\n removedDataY.append(numberOfRemovedAgents)\n changeInInfectionX.append(counter)\n if (counter == 1): changeInInfectionY.append(len(newSickAgents))\n else:\n if (previousIteration == len(newSickAgents)):\n valueToAppend = len(changeInInfectionY) - 1\n changeInInfectionY.append(changeInInfectionY[valueToAppend])\n else:\n changeInInfectionY.append(len(newSickAgents)/previousIteration)\n\n previousIteration = len(newSickAgents)\n print()\n print()\n if (finishedBecauseAllDead):\n print(colored(\"**************************************************************\", 'red'))\n print(colored(\" SIMULATION TERMINATED - NO MORE HEALTHY AGENTS REMAINING\", 'red'))\n print(colored(\"**************************************************************\", 'red'))\n if (finishedBecauseAllSafe):\n print(colored(\"**************************************************************\", 'red'))\n print(colored(\" SIMULATION TERMINATED - NO MORE SICK AGENTS REMAINING\", 'red'))\n print(colored(\"**************************************************************\", 'red'))\n\n print()\n print(colored(\"SIMULATION COMPLETED\", 'yellow'))\n print(\"--------------------\")\n print()\n print(\"After \" + str(counter) + \" iterations of the simulator produced the following results:\")\n print()\n print(\"Number of Susceptible: \\t\\t\\t\" + str(len(susceptibleAgents)))\n print(\"Number of Infected: \\t\\t\\t\" + str(len(newSickAgents)))\n if (len(removedAgents) + len(newSickAgents) + len(susceptibleAgents) != numberOfAgents):\n removed = (numberOfAgents - len(susceptibleAgents) - len(newSickAgents))\n print(\"Number of Removed : \\t\\t\\t\" + str(numberOfAgents - len(susceptibleAgents) - len(newSickAgents)))\n else:\n removed = len(removedAgents)\n print(\"Number of Removed : \\t\\t\\t\" + str(len(removedAgents)))\n print()\n if (len(susceptibleAgents) == 0): percentRemoved = 1.00\n else: percentRemoved = (removed+(len(newSickAgents)))/numberOfAgents\n percentSurvived = len(susceptibleAgents)/numberOfAgents\n print(format(percentRemoved * 100, ',.0f') + \"% of the population was killed by \" + nameOfDisease)\n print(format(percentSurvived*100, ',.0f') + \"% of the population survived \" + nameOfDisease)\n print(\"At it's peak \" + str(biggestWave) + \" agents were infected by \" + nameOfDisease + \" at time \" + str(timeOfBiggestWave))\n averageRate = 0\n for x in range(len(changeInInfectionY)):\n averageRate += changeInInfectionY[x]\n averageRate = averageRate/len(changeInInfectionY)\n print(\"The Average rate of infection is : \" + str(averageRate))\n print()\n print(colored(\"SIMULATION TERMINATED\", 'yellow'))\n print(\"--------------------\")\n print()\n plt.plot(survivalDataX, survivalDataY)\n plt.xlabel(\"Time(t)\")\n plt.ylabel(\"# Susceptible Agents\")\n plt.title(\"Change in Susceptible Population after introduction of \" + nameOfDisease )\n plt.axis([0, counter, 0, numberOfAgents])\n plt.show()\n plt.plot(infectedDataX, infectedDataY)\n plt.xlabel(\"Time(t)\")\n plt.ylabel(\"# Infected Agents\")\n plt.title(\"Change in Infected Population after introduction of \" + nameOfDisease)\n plt.axis([0, counter, 0, numberOfAgents])\n plt.show()\n plt.plot(removedDataX, removedDataY)\n plt.xlabel(\"Time(t)\")\n plt.ylabel(\"# Removed Agents\")\n plt.title(\"Change in Removed Population after introduction of \" + nameOfDisease)\n plt.axis([0, counter, 0, numberOfAgents])\n plt.show()\n plt.plot(changeInInfectionX, changeInInfectionY)\n plt.xlabel(\"Time(t)\")\n plt.ylabel(\"R0\")\n plt.title(\"Change in infection rate over time for \" + nameOfDisease)\n maxRate = 0;\n for x in range(len(changeInInfectionY)):\n if (changeInInfectionY[x] > maxRate): maxRate = changeInInfectionY[x]\n plt.axis([0, counter, 0, maxRate])\n plt.show()\n\n exit = input(\"Press Any Key to Return to the Main Screen \")\n\n\n\n\n\n#Help section\ndef help():\n selection = \" \"\n while(selection != \"q\"):\n selection = input(\"Welcome to the COMP4206 Epidemic Simulator, developed by Ravi Gupta and Shruti Bahl. In this simulator, you can view the spread of infections in an NxN matrix. When you select the run option (2), you will be allowed to enter in the size of the matrix, the number of agents to display, and how many steps the agents should make. The matrix will display and then an agent will be randomly infected. You will then see the matrix after each iteration so you can track their progress. Press q to return to the menu: \")\n print()\n\n\n#object definitions\nclass agent(object):\n def __init__(self,params=None):\n self.id = params\n self.location = (0, 0)\n self.health = None\n self.timeOfSickness = 0\n self.vaccination = False\n def setAgentLocation(self, x, y):\n locationx = x\n locationy = y\n self.location = (locationx, locationy)\n return self.location\n def setAgentId(self, agentID):\n self.id = agentID\n def getAgentId(self):\n return self.id\n def getLocation(self):\n return self.location\n def getHealth(self):\n return self.health\n def setHealth(self, status):\n self.health = status\n def increaseTimeOfSickness(self):\n self.timeOfSickness += 1\n def getTimeOfSickness(self):\n return self.timeOfSickness\n def setVaccinationStatus(self):\n self.vaccination = True\n def getVaccinationStatus(self):\n return self.vaccination\n\nclass SusceptibleAgent(agent):\n def __init__(self,params=None):\n super().__init__()\n self.health = \"susceptible\"\n\nclass InfectedAgent(agent):\n def __init__(self,params=None):\n super().__init__()\n self.health = \"infected\"\n\nclass RemovedAgent(agent):\n def __init__(self,params=None):\n super().__init__()\n self.health = \"removed\"\n\n\n\n#menu\ndef main():\n selection = 0\n while(selection != 3):\n print(\"Welcome to The COMP 4206 Epidemic Simulator!\")\n print(\"Menu\")\n print(\"(1) -- Help\")\n print(\"(2) -- Run\")\n print(\"(3) -- Quit\")\n print()\n selection = input(\"Choose a menu option: \")\n while (selection == \"q\" or selection == \"Q\"):\n selection = input(\"Invalid entry. Try again: \")\n selection = int(selection)\n if (selection == 1): help()\n if (selection == 2): runSimulation()\n if (selection == 3): print(\"Goodbye!\")\n\nmain()\n","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":28732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"12369339","text":"import os\nfrom pygame import image\n\n_sprites = {}\n\n\ndef canonical(path: str,) -> str:\n return path.replace(\"/\", os.sep).replace(\"\\\\\", os.sep)\n\n\ndef get_image(path: str,):\n global _sprites\n sprite = _sprites.get(path)\n if not sprite:\n sprite = image.load(canonical(path))\n _sprites[path] = sprite\n return sprite\n","sub_path":"system/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"426779238","text":"from ua.univer.lesson07.inheritance.vehicle import *\n\n\nclass Vehicles:\n def __init__(self,vehicles =[]):\n self.vehicles = vehicles\n\n def add_vehicle(self, vehicle):\n for vehicle in self.vehicles:\n if isinstance(vehicle, CCar) or isinstance(vehicle, CShip) or isinstance(vehicle, CPlane):\n self.vehicles.append(vehicle)\n return self.vehicles\n\n def get_maxprice(self):\n max = self.vehicles[0][1]\n for max_price in self.vehicles :\n if max_price[1] > max :\n max = max_price[1]\n return max, max_price\n\n def get_minprice(self):\n min = self.vehicles[0][1]\n for min_price in self.vehicles :\n if min_price[1] < min :\n min = min_price[1]\n return min\n\n def get_price_less(self, less_price = 10000, after_year = 2000):\n for item in self.vehicles:\n if item[1] < less_price and item[3] > after_year:\n return item\n\n\n def get_class_objects(self):\n car_list =[]\n plane_list = []\n car_count = 0\n plane_count = 0\n for machine in self.vehicles:\n if isinstance(machine, CCar):\n car_count+=1\n car_list.append(machine)\n if isinstance(machine, CPlane):\n plane_count+=1\n plane_list.append(machine)\n print('the car objects are ', len(car_list))\n print('the plane objects are ', len(plane_list))\n return len(car_list), len(plane_list)\n # print('the plane objects are ')\n # return len(car_list)\n # return car_list\n\n def __repr__(self):\n return f\"{self.vehicles}\"\n\nif __name__ == '__main__':\n # vehicles_list = Vehicles()\n #\n # plane1 = \"12°05'20'\", 6720, 235, 2007, 2100, 59\n # car1 = \"14°08'70'\", 7678, 80, 1995\n # ship1 = \"14°08'70'\", 7678, 80, 1900, 'Cuba', 520\n # plane2 = \"12°05'20'\", 56596, 235, 1958, 2100, 59\n # car2 = \"14°08'70'\", 7378, 80, 1995\n # ship2 = \"14°08'70'\", 7678, 80, 1900, 'Chernomorsk', 520\n #\n # vehicles = [plane1,car1, ship1, plane2, car2, ship2]\n # for each in vehicles:\n # print(each)\n # vehicles_list.add_vehicle(each)\n # print(vehicles_list)\n #\n # print(vehicles_list.get_minprice())\n # print(vehicles_list.get_maxprice())\n # print(vehicles_list.get_price_less())\n\n\n plane11 = CPlane(\"12°05'20'\", 56576, 235, 1958, 2100, 59)\n car11 = CCar(\"14°08'70'\", 7678, 80, 1995)\n ship11 = CShip(\"14°08'70'\", 7678, 80, 1900, 'Cuba', 520)\n plane22 = CPlane(\"12°05'20'\", 56576, 235, 1958, 2100, 59)\n car22 = CCar(\"14°08'70'\", 67678, 90, 1995)\n ship22 = CShip(\"14°08'70'\", 7678, 80, 1900, 'Chernomorsk', 520)\n # Test_Vehicle.test_get_minprice(vehicles.get_minprice())\n\n\n machines_list = Vehicles([])\n machines = [plane11, car11, ship11, plane22, car22, ship22]\n for machine in machines :\n print(machine)\n machines_list.add_vehicle(machine)\n print(machines_list)\n # print('the car objects are ')\n print(machines_list.get_class_objects())\n print(machines_list.get_maxprice())\n\n","sub_path":"univer/lesson07/inheritance/veh.py","file_name":"veh.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"566799063","text":"import os\nimport sys\nfrom sqlalchemy.sql import text\nfrom typing import List\nimport sqlalchemy\n\n\"\"\"\n Example\n APILogicServer run --project_name='~/dev/servers/sqlserver-types' --db_url='mssql+pyodbc://sa:posey386!@localhost:1433/SampleDB?driver=ODBC+Driver+17+for+SQL+Server?trusted_connection=no' --extended_builder='*'\n\"\"\"\n\n\ndef log(msg: any) -> None:\n print(msg, file=sys.stderr)\n\n\nlog(\"Extended builder 1.2\")\n\n\nclass DotDict(dict):\n \"\"\" dot.notation access to dictionary attributes \"\"\"\n # thanks: https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary/28463329\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n\nclass TvfBuilder(object):\n\n def __init__(self, db_url, project_directory):\n\n self.db_url = db_url\n self.project_directory = project_directory\n\n self.number_of_services = 0\n\n self.tvf_services = []\n ''' TVFs have cols, SCFs do not '''\n\n self.tvf_contents = \"\"\"# coding: utf-8\nfrom sqlalchemy import Boolean, Column, DECIMAL, DateTime, Float, ForeignKey, Integer, LargeBinary, String, Table, Text, UniqueConstraint, text\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.sql.sqltypes import NullType\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom flask_sqlalchemy import SQLAlchemy\nfrom safrs import SAFRSAPI, jsonapi_rpc\nfrom safrs import JABase, DB\n\n########################################################################################################################\n# Classes describing database for SqlAlchemy ORM, initially created by schema introspection.\n#\nfrom safrs import SAFRSBase\n\nimport safrs\ndb = safrs.DB\n\nBase = db.Model\nmetadata = Base.metadata\n\nNullType = db.String # datatype fixup\nTIMESTAMP= db.TIMESTAMP\n\nfrom sqlalchemy.dialects.mysql import *\n\n########################################################################################################################\n\n\"\"\"\n\n def build_tvf_class(self, cols: List[DotDict]):\n\n self.tvf_services.append(cols[0].Function)\n\n self.tvf_contents += f't_{cols[0].Function} = Table( # define result for {cols[0].Function}\\n'\n self.tvf_contents += f'\\t\"{cols[0].Function}\", metadata,\\n'\n col_count = 0\n for each_col in cols:\n self.tvf_contents += f'\\tColumn(\"{each_col.Column}\", '\n if each_col.Data_Type == \"int\":\n self.tvf_contents += f'Integer)'\n elif each_col.Data_Type == \"nvarchar\":\n self.tvf_contents += f'String({each_col.Char_Max_Length}))'\n else: # TODO - support additional data types\n self.tvf_contents += f'String(8000))'\n col_count += 1\n if col_count < len(cols):\n self.tvf_contents += \",\\n\"\n else:\n self.tvf_contents += \")\\n\"\n self.tvf_contents += f'\\n\\n'\n\n def get_os_url(self, url: str) -> str:\n \"\"\" idiotic fix for windows (\\ --> \\\\\\\\) \"\"\"\n return url.replace('\\\\', '\\\\\\\\')\n\n def build_tvf_service(self, args: List[DotDict]):\n if args[0].ObjectName not in self.tvf_services:\n log(f'.. Skipping Scalar Value Function: {args[0].ObjectName}')\n else:\n self.tvf_contents += f'class {args[0].ObjectName}(JABase):\\n'\n self.tvf_contents += f'\\t\"\"\"\\n\\t\\tdescription: define service for {args[0].ObjectName}\\n\\t\"\"\"\\n\\n'\n self.tvf_contents += f'\\t_s_type = \"{args[0].ObjectName}\"\\n\\n'\n self.tvf_contents += f\"\\t@staticmethod\\n\"\n self.tvf_contents += f\"\\t@jsonapi_rpc(http_methods=['POST'], valid_jsonapi=False)\\n\"\n\n # def udfEmployeeInLocationWithName(location, Name):\n self.tvf_contents += f\"\\tdef {args[0].ObjectName}(\"\n arg_number = 0\n has_args = args[0].ParameterName is not None\n if has_args:\n for each_arg in args:\n self.tvf_contents += each_arg.ParameterName[1:]\n arg_number += 1\n if arg_number < len(args):\n self.tvf_contents += \", \"\n self.tvf_contents += \"):\\n\"\n self.tvf_contents += f'\\t\\t\"\"\"\\n'\n self.tvf_contents += f\"\\t\\tdescription: expose TVF - {args[0].ObjectName}\\n\"\n self.tvf_contents += f\"\\t\\targs:\\n\"\n if has_args:\n for each_arg in args:\n self.tvf_contents += f'\\t\\t\\t{each_arg.ParameterName[1:]} : value\\n'\n self.tvf_contents += f'\\t\\t\"\"\"\\n'\n\n # sql_query = db.text(\"SELECT * FROM udfEmployeeInLocationWithName(:location, :Name)\")\n self.tvf_contents += f'\\t\\tsql_query = db.text(\"SELECT * FROM {args[0].ObjectName}(' # :arg)\")\\n'\n arg_number = 0\n if has_args:\n for each_arg in args:\n self.tvf_contents += \":\" + each_arg.ParameterName[1:]\n arg_number += 1\n if arg_number < len(args):\n self.tvf_contents += \", \"\n self.tvf_contents += ')\")\\n'\n\n # query_result = db.engine.execute(sql_query, location=location, Name=Name)\n self.tvf_contents += f'\\t\\tquery_result = db.engine.execute(sql_query, ' # arg=arg)\\n'\n arg_number = 0\n if has_args:\n for each_arg in args:\n self.tvf_contents += each_arg.ParameterName[1:] + \"=\" + each_arg.ParameterName[1:]\n arg_number += 1\n if arg_number < len(args):\n self.tvf_contents += \", \"\n self.tvf_contents += \")\\n\"\n self.tvf_contents += f'\\t\\tresult = query_result.fetchall()\\n'\n self.tvf_contents += '\\t\\treturn {\"result\" : list(result)}\\n'\n self.tvf_contents += f'\\n\\n'\n\n def write_tvf_file(self):\n \"\"\" write tvf_contents -> api/tvf.py \"\"\"\n file_name = self.get_os_url(self.project_directory + '/api/tvf.py')\n tvf_file = open(file_name, 'w')\n tvf_file.write(self.tvf_contents)\n tvf_file.close()\n\n def append_expose_services_file(self):\n \"\"\" append import to -> append_expose_services_file \"\"\"\n import_statement = f'\\n\\n from api import tvf\\n'\n import_statement += f' tvf.expose_tvfs(api)\\n'\n file_name = self.get_os_url(self.project_directory + '/api/customize_api.py')\n expose_services_file = open(file_name, 'a')\n expose_services_file.write(import_statement)\n expose_services_file.close()\n\n def run(self):\n \"\"\" call by ApiLogicServer CLI -- scan db_url schema for TVFs, create api/tvf.py\n for each TVF:\n class t_ -- the model\n class -- the service\n\n \"\"\"\n print(f'extended_builder.extended_builder(\"{self.db_url}\", \"{self.project_directory}\"')\n\n cols_sql = \"\" \\\n \"SELECT TABLE_CATALOG AS [Database], TABLE_SCHEMA AS [Schema], TABLE_NAME AS [Function], \" \\\n \"COLUMN_NAME AS [Column], DATA_TYPE AS [Data_Type], CHARACTER_MAXIMUM_LENGTH AS [Char_Max_Length] \" \\\n \"FROM INFORMATION_SCHEMA.ROUTINE_COLUMNS \" \\\n \"WHERE TABLE_NAME IN \" \\\n \"(SELECT ROUTINE_NAME FROM INFORMATION_SCHEMA.ROUTINES WHERE ROUTINE_TYPE = 'FUNCTION' AND DATA_TYPE = 'TABLE') \" \\\n \"ORDER BY TABLE_NAME, COLUMN_NAME;\"\n engine = sqlalchemy.create_engine(self.db_url, echo=False) # sqlalchemy sqls...\n cols = []\n current_table_name = \"\"\n with engine.connect() as connection:\n result = connection.execute(text(cols_sql))\n for row_dict in result:\n row = DotDict(row_dict)\n log(f'col row: {row}, database: {row.Database}')\n function_name = row.Function\n if function_name != current_table_name:\n if len(cols) > 0:\n self.number_of_services += 1\n self.build_tvf_class(cols)\n current_table_name = function_name\n cols = []\n cols.append(row)\n\n # connection.close()\n engine.dispose() # fixed some no-result errors\n\n if len(cols) > 0:\n self.number_of_services += 1\n self.build_tvf_class(cols)\n\n args_sql = \"SELECT \" \\\n \"SCHEMA_NAME(SCHEMA_ID) AS [Schema]\" \\\n \",SO.name AS [ObjectName]\" \\\n \",SO.Type_Desc AS [ObjectType (UDF/SP)]\" \\\n \",P.parameter_id AS [ParameterID]\" \\\n \",P.name AS [ParameterName]\" \\\n \",TYPE_NAME(P.user_type_id) AS [ParameterDataType]\" \\\n \",P.max_length AS [ParameterMaxBytes]\" \\\n \",P.is_output AS [IsOutPutParameter]\" \\\n \" FROM sys.objects AS SO\" \\\n \" LEFT OUTER JOIN sys.parameters AS P ON SO.OBJECT_ID = P.OBJECT_ID\" \\\n \" WHERE SO.Type_Desc = 'SQL_INLINE_TABLE_VALUED_FUNCTION'\" \\\n \" OR SO.Type_Desc = 'SQL_TABLE_VALUED_FUNCTION'\" \\\n \" ORDER BY [Schema], SO.name, P.parameter_id\"\n args = []\n current_object_name = \"\"\n\n with engine.connect() as connection:\n result = connection.execute(text(args_sql))\n for row_dict in result:\n row = DotDict(row_dict)\n log(f'arg row: {row}, database: {row.Database}')\n object_name = row.ObjectName\n if object_name != current_object_name:\n if len(args) > 0:\n self.build_tvf_service(args)\n current_object_name = object_name\n args = []\n args.append(row)\n # connection.close()\n if len(args) > 0:\n self.build_tvf_service(args)\n\n self.tvf_contents += f'def expose_tvfs(api):\\n'\n for each_service in self.tvf_services:\n self.tvf_contents += f'\\tapi.expose_object({each_service})\\n'\n self.tvf_contents += f'\\n# {self.number_of_services} services created.\\n'\n\n self.write_tvf_file()\n\n self.append_expose_services_file()\n\n\ndef extended_builder(db_url, project_directory):\n \"\"\" called by ApiLogicServer CLI -- scan db_url schema for TVFs, create api/tvf.py\n for each TVF:\n class t_ -- the model\n class -- the service\n args\n db_url - use this to open the target database, e.g. for meta data\n project_directory - the created project... create / alter files here\n \"\"\"\n log(f'extended_builder.extended_builder(\"{db_url}\", \"{project_directory}\"')\n tvf_builder = TvfBuilder(db_url, project_directory)\n tvf_builder.run()\n","sub_path":"api_logic_server_cli/extended_builder.py","file_name":"extended_builder.py","file_ext":"py","file_size_in_byte":10916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"55996611","text":"\"\"\"vemdr_blade URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.conf.urls import url\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.home, name=\"home\"),\n path('masterfile', views.masterfile, name=\"masterfile\"),\n\n path('admin_invite', views.admin_invite, name=\"admin_invite\"), \n path('admin_confirmation', views.admin_confirmation, name=\"admin_confirmation\"), \n\n path('contact_acknowledgement', views.contact_acknowledgement, name=\"contact_acknowledgement\"),\n path('story_acknowledgement', views.story_acknowledgement, name=\"story_acknowledgement\"),\n\n path('mailing_list_signup', views.mailing_list_signup, name=\"mailing_list_signup\"),\n path('sample_session_signup', views.sample_session_signup, name=\"sample_session_signup\"),\n path('bibeats_signup', views.bibeats_signup, name=\"bibeats_signup\"),\n path('webinar_signup', views.webinar_signup, name=\"webinar_signup\"),\n path('ebook_signup', views.ebook_signup, name=\"ebook_signup\"), \n\n path('therapist_listing', views.therapist_listing, name=\"therapist_listing\"),\n path('therapist_delisting', views.therapist_delisting, name=\"therapist_delisting\"), \n\n path('welcome_regular', views.welcome_regular, name=\"welcome_regular\"),\n path('welcome_coaching', views.welcome_coaching, name=\"welcome_coaching\"),\n path('welcome_gift', views.welcome_gift, name=\"welcome_gift\"),\n path('welcome_gift_sender', views.welcome_gift_sender, name=\"welcome_gift_sender\"),\n path('welcome_access_code', views.welcome_access_code, name=\"welcome_access_code\"),\n path('welcome_first_responder', views.welcome_first_responder, name=\"welcome_first_responder\"),\n\n path('coaching_confirmation', views.coaching_confirmation, name=\"coaching_confirmation\"),\n path('coaching_reminder', views.coaching_reminder, name=\"coaching_reminder\"),\n path('coaching_noshow', views.coaching_noshow, name=\"coaching_noshow\"), \n \n path('create_profile', views.create_profile, name=\"create_profile\"),\n path('password_reset', views.password_reset, name=\"password_reset\"),\n path('update_profile', views.update_profile, name=\"update_profile\"),\n path('change_plan', views.change_plan, name=\"change_plan\"),\n path('account_lock', views.account_lock, name=\"account_lock\"),\n\n path('trial_end', views.trial_end, name=\"trial_end\"),\n path('gift_end', views.gift_end, name=\"gift_end\"),\n path('renewal_upcoming', views.renewal_upcoming, name=\"renewal_upcoming\"),\n path('renewal_confirmation', views.renewal_confirmation, name=\"renewal_confirmation\"),\n\n path('cancellation', views.cancellation, name=\"cancellation\"),\n]\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"49120781","text":"#\n# Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n########################################################################\n#\n# Gaussian prediction mechanism.\n#\n# ----------------------------------------------------------------------\n# Input:\n# CSV data with a control command.\n# If CSV begins \"data,\" the next fields are the clickstream key, two timestamps, \n# then 23 true/false values (0==false, 1==true) for clickstream actions.\n# Eg. \"data,neil,123,456,1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1\"\n# If the CSV begins with anything else, it is ignored\n# ----------------------------------------------------------------------\n# Output:\n# CSV data with a control command\n# For input command \"data,\" output is \"data,\" + clickstream key and timestamps,\n# the model version, and finally buy or not-buy prediction (0==not-buy, 1==buy). \n# So for above input the output \"neil,123,456,somethig,1\" for a buy prediction\n# ------------s----------------------------------------------------------\n########################################################################\nimport numpy as numpy\nimport pandas as pandas\nfrom sklearn.model_selection import train_test_split\nimport sklearn.metrics\nfrom sklearn.naive_bayes import GaussianNB\n\n# Replaced by Maven\nversion = \"@maven.build.timestamp@\"\ncol = ['basket_icon_click', 'basket_add_list',\n 'basket_add_detail', 'sort_by', 'image_picker', 'account_page_click',\n 'promo_banner_click', 'detail_wishlist_add', 'list_size_dropdown', \n 'closed_minibasket_click', 'checked_delivery_detail', \n 'checked_returns_detail', 'sign_in', 'saw_checkout', \n 'saw_sizecharts', 'saw_delivery', 'saw_account_upgrade',\n 'saw_homepage', 'device_mobile', 'device_computer', 'device_tablet',\n 'returning_user', 'loc_uk', 'ordered']\n\ndef predict(input_list):\n global version\n global col\n result = []\n\n print('pip freeze!')\n print('pip freeze!')\n print('pip freeze!')\n for entry in input_list:\n values = entry.replace(\", \", \",\").split(\",\")\n if values[0] == \"data\":\n # append values to features\n key = values[1]\n publish = values[2]\n ingest = values[3]\n diagnostic = \"\"\n values = [int(it) for it in values[4:]]\n\n # FIXME\n results = []\n\n print(\"len\", len(col), len(values))\n numpy_array = numpy.array(results)\n df = pandas.DataFrame(numpy_array, columns=col)\n\n correlation = df.corr()['ordered'].tolist()\n to_drop = []\n for i in range(len(correlation)):\n if correlation[i] < 0:\n to_drop.append(col[i])\n\n to_drop.append('ordered')\n predictors = df.drop(to_drop, axis=1)\n targets = df.ordered\n try: \n print(\"A:\")\n X_train, X_test, y_train, y_test = train_test_split(predictors, targets, test_size=.3)\n\n print(\"A1:\")\n classifier=GaussianNB()\n print(\"A2:\")\n classifier=classifier.fit(X_train,y_train)\n\n print(\"B:\")\n predictions=classifier.predict(X_test)\n\n print(\"C:\")\n predictors['propensity'] = classifier.predict_proba(predictors)\n accuracy = str(sklearn.metrics.accuracy_score(y_test, predictions))\n\n output = predictors.values.tolist()\n print(\"D:\")\n for val in output:\n val.insert(0, accuracy)\n\n print(\"E:\")\n resultX = [str(val[0]) for val in output]\n\n print(\"F:\", str(resultX))\n #return result\n except: \n print(\"except:\")\n #return[str(1.0) for _ in input_list]\n\n result.append(str(key) + \",\" + str(publish) + \",\" + str(ingest) + \",\" + version + \",\" + str(prediction[0]) + \",\" + diagnostic)\n else:\n raise NotImplementedError('Unexpected control', values[0])\n\n return result\n","sub_path":"retail/clickstream/job-gaussian/src/main/resources/python/gaussian_predict2.py","file_name":"gaussian_predict2.py","file_ext":"py","file_size_in_byte":4669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"382925848","text":"from utilities import *\nimport random\nMCL_data = pickle.load(open('MCL_data11_18_2015v0.9.dict'))\npid = 'MCL041'\none_gene_path = '/scratch/users/bchen45/HLA_prediction/IEDB/test0/human_proteinome_oneline.str'\nonegenestr = pickle.load(open(one_gene_path,'r'))\nlen_one = len(onegenestr)\npath0 = 'secondary_prediction/'\nset_1 = []\nset_2 = []\nset_r = []\nfor type0 in ['MHC1','MHC2']:\n file_random = path0+type0+'random_protein.fasta'\n file_random0 = open(file_random,'w+')\n name0 = type0+'_frag'\n file_MHC1_name = path0+ pid+name0+'.fasta'\n file_out = open(file_MHC1_name,'w+')\n set0 = set(MCL_data[pid][name0])\n for x in set0:\n file_out.write('>pid\\n'+x+'\\n')\n rand0 = random.randint(0,len_one)\n neg0 = onegenestr[rand0:rand0+len(x)]\n file_random0.write('>pid\\n'+neg0+'\\n')\n file_out.close()\n file_random0.close()\n\n\n \n \n \n","sub_path":"scripts_post_MCL_data/make_scratch_input.py","file_name":"make_scratch_input.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"232106974","text":"import numpy as np\n\n\nclass SimilarityCalculator:\n def __init__(self, dataset: np.ndarray, mu=0.5, sigma=0.2, a=10, b=-10):\n self.mu = mu\n self.sigma = sigma\n self.a = a\n self.b = b\n self.dataset = dataset\n\n def similarity_score(self, h1, h2):\n average_response = self._average_response(self.dataset)\n distinctive_score = self._distinctive_score(average_response)\n matched_features = SimilarityCalculator._match_features(h1, h2)\n weighted_distances = SimilarityCalculator._compute_weighted_distances(matched_features, distinctive_score)\n return self._calculate_similarity_from_distances(weighted_distances)\n\n @staticmethod\n def _average_response(dataset):\n x0 = dataset.shape[0] * dataset.shape[1]\n x1 = dataset.shape[2]\n return np.average(dataset.reshape(x0, x1), axis=0)\n\n def _distinctive_score(self, h):\n a = - ((h - self.mu) ** 2) / (2 * self.sigma ** 2)\n return np.exp(a)\n\n @staticmethod\n def _match_features(m1, m2):\n matched_features = []\n for i, mi in enumerate(m1):\n absolute_distance = m2 - mi\n norms = np.linalg.norm(absolute_distance, axis=1)\n min_idx = np.argmin(norms)\n matched_features.append((mi, m2[min_idx]))\n return matched_features\n\n @staticmethod\n def _compute_weighted_distances(feature_matches, distinctive_score):\n def feature_match_weighted_distance(match):\n wd = np.matmul(distinctive_score, (match[0] - match[1]))\n return np.linalg.norm(wd)\n\n return list(map(feature_match_weighted_distance, feature_matches))\n\n def _calculate_similarity_from_distances(self, distances):\n similarities = list(map(lambda sk: self.a + self.b * np.log(sk), distances))\n return np.sum(similarities)\n","sub_path":"src/sdav/similarity/SimilarityCalculator.py","file_name":"SimilarityCalculator.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"489926488","text":"from typing import Any, List, Tuple\nimport unittest\n\nimport numpy as np\nimport pandas as pd\nfrom ddt import data, ddt, unpack\n\nfrom data_processing import utils\n\n\n@ddt\nclass UtilsTest(unittest.TestCase):\n\n @data((np.array([0, 0, 3, 2, 1, 0, 3, 3, 3, 3, 2]),\n pd.DataFrame([[3, 5], [0, 3], [2, 2], [1, 1]],\n columns=['labels', 'count']),\n \"test multiple labels out of order\"),\n )\n @unpack\n def test_get_top_labels(self,\n y_train: np.array,\n expected: pd.DataFrame,\n test_description: str\n ) -> None:\n \"\"\"Tests that the labels are ordered in decreasing order\"\"\"\n count_df = utils.get_top_labels(y_train)\n self.assertTrue(count_df.equals(expected), test_description)\n\n @data((np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0],\n [1, 0, 1]]),\n np.array([0, 3, 2, 0, 3, 3]),\n pd.DataFrame([[3, 3], [0, 2], [2, 1]], columns=['labels', 'count']),\n 2, np.array([[0, 0, 0], [0, 0, 1], [0, 1, 1], [1, 0, 0], [1, 0, 1]]),\n np.array([0, 3, 0, 3, 3]),\n \"Remove single element\"),\n )\n @unpack\n def test_get_top_n_labels(self,\n x_train: np.array,\n y_train: np.array,\n label_counts_df: pd.DataFrame,\n n_labels: int,\n expected_x: np.array,\n expected_y: np.array,\n test_description: str\n ) -> None:\n \"\"\"Tests that the labels are ordered in decreasing order\"\"\"\n x, y = utils.get_top_n_labels(x_train, y_train, label_counts_df, n_labels)\n self.assertTrue(np.array_equal(x, expected_x), test_description)\n self.assertTrue(np.array_equal(y, expected_y), test_description)\n\n @data((np.array([[0, 0], [0, 1], [1, 0], [1, 1]]), np.array([0, 0, 1, 2]),\n [1], np.array([[0, 0], [0, 1], [1, 1]]), np.array([0, 0, 2]),\n \"Test removal of single data element\"),\n (np.array([[0, 0], [0, 1], [1, 0], [1, 1]]), np.array([0, 0, 1, 2]),\n [], np.array([[0, 0], [0, 1], [1, 0], [1, 1]]),\n np.array([0, 0, 1, 2]),\n \"Test no filter\"),\n )\n @unpack\n def test_filter_data_based_on_labels(self,\n x: np.array,\n y: np.array,\n labels_to_filter: List[int],\n expected_x: np.array,\n expected_y: np.array,\n test_description: str\n ):\n \"\"\"Tests that the correct labels are removed\"\"\"\n x_filtered, y_filtered = (utils\n .filter_data_based_on_labels(x,\n y,\n labels_to_filter)\n )\n self.assertTrue(np.array_equal(x_filtered, expected_x),\n test_description + \" x\")\n self.assertTrue(np.array_equal(y_filtered, expected_y),\n test_description + \" y\")\n\n @data((np.array([0, 0, 3, 2, 1, 0, 3, 3, 3, 3, 2]),\n pd.DataFrame([[3, 5], [0, 3], [2, 2], [1, 1]],\n columns=['labels', 'count']),\n \"test multiple labels out of order\"),\n )\n @unpack\n def test_get_top_labels(self,\n y_train: np.array,\n expected: pd.DataFrame,\n test_description: str\n ) -> None:\n \"\"\"Tests that the labels are ordered in decreasing order\"\"\"\n count_df = utils.get_top_labels(y_train)\n self.assertTrue(count_df.equals(expected), test_description)\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"data_processing/utils_test.py","file_name":"utils_test.py","file_ext":"py","file_size_in_byte":4215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"168243715","text":"\"\"\"\nsentry.models.counter\n~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2010-2015 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom django.db import connection\n\nfrom sentry.db.models import (\n FlexibleForeignKey, Model, sane_repr, BoundedBigIntegerField\n)\nfrom sentry.utils import db\n\n\nclass Counter(Model):\n \"\"\"\n A ReleaseFile is an association between a Release and a File.\n\n The ident of the file should be sha1(name) and must be unique per release.\n \"\"\"\n __core__ = False\n\n project = FlexibleForeignKey('sentry.Project', unique=True)\n value = BoundedBigIntegerField()\n\n __repr__ = sane_repr('project')\n\n class Meta:\n app_label = 'sentry'\n db_table = 'sentry_projectcounter'\n\n @classmethod\n def increment(cls, project, delta=1):\n \"\"\"Increments a counter. This can never decrement.\"\"\"\n return increment_project_counter(project, delta)\n\n\ndef increment_project_counter(project, delta=1):\n \"\"\"This method primarily exists so that south code can use it.\"\"\"\n if delta <= 0:\n raise ValueError('There is only one way, and that\\'s up.')\n\n cur = connection.cursor()\n try:\n if db.is_postgres():\n cur.execute('''\n select sentry_increment_project_counter(%s, %s)\n ''', [project.id, delta])\n return cur.fetchone()[0]\n elif db.is_sqlite():\n value = cur.execute('''\n insert or ignore into sentry_projectcounter\n (project_id, value) values (%s, 0);\n ''', [project.id])\n value = cur.execute('''\n select value from sentry_projectcounter\n where project_id = %s\n ''', [project.id]).fetchone()[0]\n while 1:\n cur.execute('''\n update sentry_projectcounter\n set value = value + %s\n where project_id = %s;\n ''', [delta, project.id])\n changes = cur.execute('''\n select changes();\n ''').fetchone()[0]\n if changes != 0:\n return value + delta\n elif db.is_mysql():\n cur.execute('''\n insert into sentry_projectcounter\n (project_id, value)\n values (%s, @new_val := %s)\n on duplicate key\n update value = @new_val := value + %s;\n select @new_val;\n ''', [project.id, delta, delta])\n return cur.fetchone()[0]\n else:\n raise AssertionError(\"Not implemented database engine path\")\n finally:\n cur.close()\n","sub_path":"src/sentry/models/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"419250691","text":"# Project Euler - 001\nimport timeit as t\n\n\n\"\"\"\nNumber of divisors of x less than or equal to n is n//x.\nx = 3, n = 14 => 14//3 = 4 (3,6,9,12) Factoring x out, we get 3*(1,2,3,4).\nIf we want the sum, we can apply the n*(n+1)//2 formula and multiply by x\nin the end.\nSince 15 = 3*5, it's multiples get counted twice, so we subtract them using\nthe inclusion–exclusion principle. Finishes instantly.\n\"\"\"\n\n\ndef solution():\n def sum_multiples(x, n):\n mul = n // x\n return x * (mul * (mul + 1) // 2)\n limit = 1000\n a, b, c = (sum_multiples(x, limit - 1) for x in [3, 5, 15])\n return a + b - c\n\n\n# Benchmarks\nprint(\"Timing 1 run.\")\nprint(\"Inclusion–exclusion principle:\",\n t.timeit(solution, number=1), \"seconds\")\nprint(\"Answer:\", solution())\n","sub_path":"001 - 100/P001.py","file_name":"P001.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"166328118","text":"import unittest\n\nfrom lib.linkedlist import LinkedList\nfrom problem_3 import deleteMiddle\n\nclass TestDeleteMiddle(unittest.TestCase):\n\n def test_three_elements(self):\n three_elem = LinkedList()\n three_elem.add(1)\n three_elem.add(2)\n three_elem.add(3)\n node = three_elem.root.next\n deleteMiddle(node)\n\n result = LinkedList()\n result.add(1)\n result.add(3)\n\n self.assertEqual(result, three_elem)\n\n def test_four_elements(self):\n four_elem = LinkedList()\n four_elem.add(1)\n four_elem.add(2)\n four_elem.add(3)\n four_elem.add(4)\n\n node = four_elem.root.next.next\n deleteMiddle(node)\n\n result = LinkedList()\n result.add(1)\n result.add(2)\n result.add(4)\n\n self.assertEqual(result, four_elem)\n\n def test_different_nums(self):\n diff_nums = LinkedList()\n diff_nums.add(3)\n diff_nums.add(4)\n diff_nums.add(2)\n diff_nums.add(1)\n diff_nums.add(7)\n\n node = diff_nums.root.next.next\n deleteMiddle(node)\n\n result = LinkedList()\n result.add(3)\n result.add(4)\n result.add(1)\n result.add(7)\n self.assertEqual(result, diff_nums)\n","sub_path":"Chapter2/test_problem_3.py","file_name":"test_problem_3.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"156948568","text":"\"\"\"\nZoneLoad class - post-process zone load data\nand form it into a pandas dataframe\n\"\"\"\n\ntry:\n import pandas as pd\nexcept ImportError:\n pd = None\n print('pandas is not installed')\n\n\nclass ZoneLoad:\n def __init__(self, load_profile):\n \"\"\"\n Construct zoneload class\n\n :param load_profile: data returned from zone_load api call\n \"\"\"\n # reform the dict\n index_list = list()\n self._cooling_unit = ''\n self._heating_unit = ''\n self._cooling_density_unit = ''\n self._heating_density_unit = ''\n data = list()\n for d_dict in load_profile:\n data_dict = dict()\n if len(d_dict.keys()) == 1:\n continue\n index_list.append(d_dict['zone_name'].upper())\n if self._cooling_unit == '':\n self._cooling_unit = d_dict['cooling_unit']\n if self._heating_unit == '':\n self._heating_unit = d_dict['heating_unit']\n if self._heating_density_unit == '':\n self._heating_density_unit = d_dict['heating_load_density_unit']\n if self._cooling_density_unit == '':\n self._cooling_density_unit = d_dict['cooling_load_density_unit']\n # remove name and units from the dict\n data_dict['heating_load'] = d_dict['heating_load']\n data_dict['heating_peak_load_time'] = d_dict['heating_peak_load_time']\n data_dict['cooling_load'] = d_dict['cooling_load']\n data_dict['cooling_peak_load_time'] = d_dict['cooling_peak_load_time']\n data_dict['heating_load_density'] = d_dict['heating_load_density']\n data_dict['cooling_load_density'] = d_dict['cooling_load_density']\n data.append(data_dict)\n self._df = pd.DataFrame(data, index=index_list)\n\n @property\n def cooling_load_unit(self):\n return self._cooling_unit\n\n @property\n def heating_load_unit(self):\n return self._heating_unit\n\n @property\n def cooling_load_density_unit(self):\n return self._cooling_density_unit\n\n @property\n def heating_load_density_unit(self):\n return self._heating_density_unit\n\n def get_df(self):\n \"\"\"get the dataframe\"\"\"\n return self._df\n\n def get_zone_heat_load(self, zone):\n zone_name = zone.upper()\n return self._df.at[zone_name, 'heating_load']\n\n def get_zone_cool_load(self, zone):\n zone_name = zone.upper()\n return self._df.at[zone_name, 'cooling_load']\n\n def get_zone_heat_load_time(self, zone):\n zone_name = zone.upper()\n return self._df.at[zone_name, 'heating_peak_load_time']\n\n def get_zone_cool_load_time(self, zone):\n zone_name = zone.upper()\n return self._df.at[zone_name, 'cooling_peak_load_time']\n","sub_path":"BuildSimHubAPI/postprocess/zone_load.py","file_name":"zone_load.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"400295838","text":"import linecache\nimport os\npath = \"results/\"\nresult = []\nfor root, dir, files in os.walk(path):\n\tfor file in files:\n\t\tprint(\"Get last line of \",file)\n\t\tif file.endswith(\".txt\"):\n\t\t\tfile_path = os.path.join(path,file)\n\t\t\ttarget = linecache.getline(file_path,18)\n\t\t\tprint(\"target:\",target)\n\t\t\tlatency = target.split(\",\")[-1]\n\t\t\tresult.append(int(latency))\n\nprint(\"RESULTs:\",result)\n","sub_path":"c_2.py","file_name":"c_2.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"587852218","text":"from django.urls import path\nfrom . import views\nfrom rest_framework.authtoken.views import obtain_auth_token\n\nurlpatterns = [\n path(\"\", views.index),\n path('create', views.create),\n path(\"login\", obtain_auth_token),\n path(\"signup\", views.api_signup),\n path('store', views.CreateBook.as_view()),\n path('list', views.ListBook.as_view()),\n path('delete/', views.delete),\n path('update/', views.update),\n path('', views.CrudBook.as_view()),\n]\n","sub_path":"store/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"427139389","text":"# -*- coding: utf-8 -*-\n# MegEngine is Licensed under the Apache License, Version 2.0 (the \"License\")\n#\n# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nimport numpy as np\nimport pytest\n\nimport megengine as mge\nimport megengine._internal as mgb\nfrom megengine.core import tensor\nfrom megengine.test import assertTensorClose\n\n\ndef test_recoverable():\n a = tensor()\n b = tensor()\n a_np = np.random.random((4, 3)).astype(\"float32\")\n b_np = np.random.random((3, 7)).astype(\"float32\")\n a.set_value(a_np)\n b.set_value(b_np)\n\n # Do some normal computation.\n a2 = a * 2\n ab = a @ b\n\n # Raise a computation error.\n with pytest.raises(mgb.MegBrainError):\n _ = a * b\n\n # Variable a2 and ab should be still usable after error happened.\n assertTensorClose(a2.numpy(), a_np * 2)\n assertTensorClose(ab.numpy(), a_np @ b_np)\n\n # Should allow computation as well.\n ab2 = ab ** 2\n assertTensorClose(ab2.numpy(), (a_np @ b_np) ** 2)\n","sub_path":"python_module/test/unit/core/test_recoverable.py","file_name":"test_recoverable.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"478775228","text":"'''\nCreated on Nov 8, 2012\n\n@author: Wighton\n'''\n\nfrom Planner import *\nfrom Renderer import *\n\nplanner = Planner(500,500)\n\nplanner.loadObstacles(open(\"/Users/Wighton/Documents/Aptana_Workspace/MotionPlanner/obstacles\", \"r\"));\nplanner.loadRobot(open(\"/Users/Wighton/Documents/Aptana_Workspace/MotionPlanner/robot\", \"r\"));\nplanner.getRoadmap()\n\nrenderer = Renderer(700, 700)\nrenderer.addSceneObject(planner)\n\nrenderer.start()","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"385539357","text":"from tareas.configuracion import *\nfrom tareas.excepciones.discapacidad_error import DiscapacidadError\nfrom requests.status_codes import codes\nfrom tareas.api.base_api import BaseApi\n\n\nclass CargaMasivaApi(BaseApi):\n recurso_obtener_registros = \"archivos/%s/registros\"\n recurso_registros_validos = \"registrosValidos\"\n recurso_archivos_validos = \"archivosValidos\"\n recurso_registros_errores = \"registrosErrores\"\n recurso_registros_procesados = \"archivos/registrosEnProceso/%s\"\n recurso_actualizar_discapacidad = \"matriculados\"\n\n def __init__(self):\n super(CargaMasivaApi, self).__init__()\n\n def obtener_archivo_por_id(self, archivo_id):\n url = DOMINIO_CARGA_MASIVA + \\\n self.recurso_obtener_registros % archivo_id\n return self.get(url, timeout=20)\n\n def agregar_registro_error(self, registro_id, error_id):\n url = DOMINIO_CARGA_MASIVA + self.recurso_registros_errores\n registro_error = {\n 'registroId': registro_id,\n 'error': {'id': error_id}\n }\n return self.post(url, json=registro_error)\n\n def guardar_registro_estado(self, registro_id):\n url = DOMINIO_CARGA_MASIVA + self.recurso_registros_validos\n registro_sin_estado = {\n 'registroId': registro_id\n }\n return self.post(url, json=registro_sin_estado)\n\n def guardar_estado_archivo(self, archivo_id):\n url = DOMINIO_CARGA_MASIVA + self.recurso_archivos_validos\n archivo_sin_estado = {\n 'archivoId': archivo_id\n }\n return self.post(url, json=archivo_sin_estado)\n\n def evaluar_estado_archivo(self, archivo_id):\n url = DOMINIO_CARGA_MASIVA + \\\n self.recurso_registros_procesados % archivo_id\n return self.get(url)\n\n def actualizar_discapacidad(self, registro_id):\n url = DOMINIO_CARGA_MASIVA + \\\n self.recurso_actualizar_discapacidad\n registro_discapacidad = {\n 'registroId': registro_id\n }\n respuesta = self.put(url, json=registro_discapacidad)\n if(respuesta.status_code == codes.OK):\n return True\n else:\n raise DiscapacidadError(\n ERROR_ID_SERVICIO_DISCAPACIDAD_NO_DISPONIBLE)\n","sub_path":"tareas/api/carga_masiva_api.py","file_name":"carga_masiva_api.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"315168600","text":"import os\r\nimport sys\r\nimport string\r\nimport random\r\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\r\nfrom gva.utils.trace_blocks import TraceBlocks\r\nfrom gva.utils.json import parse, serialize\r\ntry:\r\n from rich import traceback\r\n traceback.install()\r\nexcept ImportError: # pragma: no cover\r\n pass\r\n\r\ndef random_string(length):\r\n return ''.join(random.choice(string.hexdigits) for i in range(length))\r\n\r\n\r\ndef test_hashes():\r\n\r\n data_hashes = []\r\n data_hashes.append(random_string(32))\r\n data_hashes.append(random_string(32))\r\n\r\n tb = TraceBlocks()\r\n tb.add_block(data_hash=data_hashes[0])\r\n tb.add_block(data_hash=data_hashes[1])\r\n blocks = parse(str(tb))\r\n\r\n previous_block = ''\r\n\r\n for index, block in enumerate(blocks):\r\n \r\n if index > 0: # the first block is a seed - it looks different\r\n\r\n # check the data is being written as expected\r\n assert block.get('data_hash') == data_hashes[index - 1]\r\n \r\n # Check the prev hash\r\n rehash = tb.hash(previous_block)\r\n assert rehash == block.get('previous_block_hash')\r\n\r\n # Check the proof - the proof is when the number prepended to the\r\n # previous block's hash and reshashed resultant hash ends with \r\n # either 0 or 5.\r\n reproof = tb.hash(''.join([block.get('proof',''), block.get('previous_block_hash', '')]))\r\n assert reproof[-1] in ['0', '5'], reproof\r\n\r\n previous_block = block\r\n\r\n print(type(serialize(tb.blocks)))\r\n\r\nif __name__ == \"__main__\":\r\n test_hashes()\r\n\r\n print('okay')\r\n","sub_path":"tests/test_tracing.py","file_name":"test_tracing.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"36046575","text":"from imutils.perspective import four_point_transform\nfrom imutils import contours\nimport numpy as np\nimport imutils\nimport cv2\n\n\n\n\n\nANSWER_KEY = {}\noutput={}\nascii_conv ={0:'A',1:'B',2:'C',3:'D',4:'E',5:'F'}\nqc = 0\nfilled_circles_contours = list()\n\n#image = cv2.imread(\"/home/prakash/projects/eduscanner-debug/web/actual.jpg\")\nimage = cv2.imread(\"/home/prakash/Downloads/Scan123.jpg\")\n\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))\n\nimg_erosion = cv2.erode(image, kernel, iterations=1)\nimg_dilation = cv2.dilate(image, kernel, iterations=1)\n\n#print(image.shape())\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nblurred = cv2.GaussianBlur(gray, (5, 5), 0)\nedged = cv2.Canny(blurred, 75, 200)\ncv2.imwrite('edged.jpg', edged)\n\n_,cnts,hierarchy = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\nprint(len(cnts))\n#cnts = [cnts[idx] for idx, val in enumerate(hierarchy[0]) if val[3] == -1]\n#cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n\n\n#print(len(cnts))\n#print(type(cnts))\n\ndocCnt = []\nif len(cnts) > 0:\n cnts = sorted(cnts, key=cv2.contourArea, reverse=True)\n\n for c in cnts:\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.02 * peri, True)\n\n if len(approx) == 4:\n docCnt.append(approx)\n\ndocCnt=contours.sort_contours(docCnt, method=\"left-to-right\")[0]\ndocCnt = sorted(docCnt,key= lambda docCnt : cv2.contourArea(docCnt),reverse=True)\n\nboxes=docCnt[0:2]\n\nbox = 0\nfor b in boxes:\n #print(box);\n print(b.shape)\n _,_,_,height = cv2.boundingRect(b)\n print(height)\n paper = four_point_transform(image, b.reshape(4,2))\n cv2.imwrite('actual'+str(box)+'.jpg', paper)\n warped = cv2.cvtColor(paper, cv2.COLOR_BGR2GRAY)\n warped = cv2.GaussianBlur(warped, (5, 5), 0)\n #warped = cv2.adaptiveThreshold(warped, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 21, 7)\n cv2.imwrite('warped'+str(box)+'.jpg', warped)\n #otsu = cv2.adaptiveThreshold(warped, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV ,11,2)\n ret3,otsu = cv2.threshold(warped, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n\n # kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))\n # im = np.zeros(otsu.shape, dtype=np.uint8)\n # im[50:, 50:] = 255\n # otsu = cv2.dilate(im, kernel, iterations=1)\n\n cv2.imwrite('otsu' + str(box) + '.jpg', otsu)\n\n _,cnts,hire = cv2.findContours(otsu.copy(), cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\n #cnts = [cnts[idx] for idx, val in enumerate(hire[0]) if val[3] !=-1]\n #cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n\n print(len(cnts))\n # cv2.drawContours(paper, cnts, -1, (0, 255, 0), thickness=1)\n # cv2.imwrite('paper'+str(box)+'.jpg',paper)\n #print(type(cnts))\n\n questionCnts = []\n\n for c in cnts:\n (x, y, w, h) = cv2.boundingRect(c)\n\n ar = w / float(h)\n #print(w,h,ar)\n if w >= 70 and h >= 70 and ar >= 0.8 and ar <= 1.2:\n print(w, h, ar)\n\n questionCnts.append(c)\n\n qc=qc+len(questionCnts)\n if (len(questionCnts)<2):\n continue\n print(\"questoind found \"+str(qc))\n questionCnts = contours.sort_contours(questionCnts,method=\"top-to-bottom\")[0]\n cv2.drawContours(paper, questionCnts, -1, (0, 0, 255), thickness=1)\n cv2.imwrite('paper'+str(box)+'.jpg',paper)\n pixels = []\n \n for (q, i) in enumerate(np.arange(0, len(questionCnts), 4)):\n cnts = contours.sort_contours(questionCnts[i:i + 4])[0]\n for (j, c) in enumerate(cnts):\n mask = np.zeros(otsu.shape, dtype=\"uint8\")\n cv2.drawContours(mask, [c], -1, 1, -1)\n mask = cv2.bitwise_and(otsu, otsu, mask=mask)\n total = cv2.countNonZero(mask)\n (x, y, w, h) = cv2.boundingRect(c)\n area=w*h\n white_ratio = float(total) / area\n\n if white_ratio >0.65:\n filled_circles_contours.append(c)\n print(cv2.boundingRect(c))\n print(j)\n\n box = +1\n \n\nprint(\"filled bubbles found are \"+str(len(filled_circles_contours)))\nprint(\"questoind found \"+str(qc))\ncv2.drawContours(paper, filled_circles_contours, -1, (255, 0, 0), thickness=1)\n#cv2.imwrite('paper'+str(box)+'.jpg',paper)","sub_path":"src/grader2.py","file_name":"grader2.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"240351146","text":"from flask import render_template, flash, redirect, url_for, request\nfrom flask_login import login_user, logout_user, current_user, login_required\nfrom werkzeug.urls import url_parse\nfrom app import app, db\nfrom app.forms import LoginForm,RegistrationForm,VoziloForm,ServisForm,VlasnistvoForm,ChoiceVozilo\nfrom app.forms import MajstorServisPretragaForm,VoziloServisPretragaForm, VozilaPregledForm\nfrom app.models import Korisnik,Vozilo,Servis,Vlasnistvo\nfrom app.tables import ResultsVoziloServis, ResultsMajstorServis, ResultVozila\n\n# === Routes for AutoFlask application ===\n\n@app.route('/')\n@app.route('/index')\n@login_required\ndef index():\n \"\"\"\n Ovo je View funkcija za potrebe realizacije aplikativne rute /index.\n \"\"\"\n return render_template('index.html', title='Home')\n\n\n@app.route('/pristup', methods=['GET', 'POST'])\ndef login():\n \"\"\"\n Ovo je View funkcija za potrebe realizacije aplikativne rute /index.\n \"\"\"\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = LoginForm()\n if form.validate_on_submit():\n user = Korisnik.query.filter_by(korisnik_login=form.username.data).first()\n if user is None or not user.check_password(form.password.data):\n flash('Pogresan korisnicki nalog ili lozinka')\n return redirect(url_for('pristup'))\n login_user(user, remember=form.remember_me.data)\n next_page = request.args.get('next')\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('index')\n return redirect(next_page)\n return render_template('pristup.html', title='Sign In', form=form)\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('login'))\n form = RegistrationForm()\n if form.validate_on_submit():\n user = Korisnik(ime=form.ime.data,prezime=form.prezime.data,adresa_ptt=form.adresa_ptt.data, adresa_mesto=form.adresa_mesto.data, adresa_ulica_broj=form.adresa_ulica_broj.data, korisnik_email=form.korisnik_email.data,id_korisnik_tip=form.id_korisnik_tip.data, korisnik_login=form.korisnik_login.data,korisnik_pass=form.korisnik_pass.data)\n user.set_password(form.korisnik_pass.data)\n db.session.add(user)\n db.session.commit()\n flash('Bravo, upravo ste postali registrovani korisnik!')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n\n@app.route('/vozilo', methods=['GET', 'POST'])\ndef vozilo():\n if not current_user.is_authenticated:\n return redirect(url_for('login'))\n results = []\n\n form = VoziloForm()\n if form.validate_on_submit():\n vozilo = Vozilo(broj_sasije=form.broj_sasije.data,marka=form.marka.data,tip=form.tip.data)\n db.session.add(vozilo)\n db.session.commit()\n flash('Bravo, upravo ste evidentirali vozilo!')\n return redirect(url_for('index'))\n return render_template('vozilo.html', title='Vozilo', form=form)\n\n@app.route('/vozilapregled',methods=['GET','POST'])\ndef vozilapregled():\n search = VozilaPregledForm(request.form)\n if request.method == 'POST':\n return search_results_vozila(search)\n\n return render_template('vozilapregled.html', title='Vozila', form=search)\n\n@app.route('/vozilapregledrezultat',methods=['GET','POST'])\ndef search_results_vozila(search):\n if not current_user.is_authenticated:\n return redirect(url_for('login'))\n results = []\n\n #search_string = search.data['izbor_brsas']\n #results = db.session.execute(\"select id_vozilo, broj_sasije, marka, tip from vozilo where broj_sasije like ':val%';\",{'val':search_string})\n\n trazim = '%{0}%'.format(search.data['izbor_brsas'])\n results = Vozilo.query.filter(Vozilo.broj_sasije.like(trazim))\n\n if not results:\n flash('Nije pronadjen rezultat')\n return redirect(url_for('vozilapregled'))\n else:\n table = ResultVozila(results)\n table.border = True\n return render_template('vozilapregledrezultat.html', table=table)\n\n@app.route('/servis', methods=['GET', 'POST'])\ndef servis():\n if not current_user.is_authenticated:\n return redirect(url_for('login'))\n form = ServisForm()\n if form.validate_on_submit():\n servis = Servis(id_vozilo=form.id_vozilo.data,datum=form.datum.data,opis_radova=form.opis_radova.data, iznos_radova=form.iznos_radova.data, id_vlasnik=form.id_vlasnik.data, id_automehanicar=form.id_automehanicar.data)\n db.session.add(servis)\n db.session.commit()\n flash('Bravo, upravo ste evidentirali uradjeni servis na vozilu!')\n return redirect(url_for('index'))\n return render_template('servis.html', title='Servsi', form=form)\n\n\n@app.route('/vlasnistvo', methods=['GET', 'POST'])\ndef vlasnistvo():\n if not current_user.is_authenticated:\n return redirect(url_for('login'))\n form = VlasnistvoForm()\n if form.validate_on_submit():\n vlasnistvo = Vlasnistvo(id_vozilo=form.id_vozilo.data,datum_od=form.datum_od.data,datum_do=form.datum_do.data,id_vlasnik=form.id_vlasnik.data)\n db.session.add(vlasnistvo)\n db.session.commit()\n flash('Bravo, upravo ste evidentirali vlasnistvo nad vozilom!')\n return redirect(url_for('index'))\n return render_template('vlasnistvo.html', title='vlasnistvo', form=form)\n\n@app.route('/voziloservisrezultat')\ndef search_results( search ):\n results = []\n search_string = search.data['izbor']\n\n if search.data['izbor'] != '':\n results = db.session.execute('select vozilo.broj_sasije,servis.datum,servis.opis_radova,servis.iznos_radova\\\n from servis, vozilo\\\n where vozilo.broj_sasije = :val\\\n and vozilo.id_vozilo = servis.id_vozilo;',{'val':search_string})\n\n if not results:\n flash('Nije pronadjen rezultat')\n return redirect(url_for('voziloservis'))\n else:\n # display results\n table = ResultsVoziloServis(results)\n table.border = True\n return render_template('voziloservisrezultat.html', table=table)\n\n\n@app.route('/voziloservis', methods=['GET', 'POST'])\ndef voziloservis():\n search = VoziloServisPretragaForm(request.form)\n if request.method == 'POST':\n return search_results(search)\n\n return render_template('voziloservis.html', title='Servis', form=search)\n\n\n@app.route('/majstorservis', methods=['GET', 'POST'])\ndef majstorservis():\n search1 = MajstorServisPretragaForm(request.form)\n if request.method == 'POST':\n return search_majstorservis_results(search1)\n\n return render_template('majstorservis.html', title='Servis', form=search1)\n\n@app.route('/majstorservisrezultat')\ndef search_majstorservis_results( search ):\n results = []\n search_string = search.data['izbor_majstor']\n\n if search.data['izbor_majstor'] != '':\n results = db.session.execute('select korisnik.ime,korisnik.prezime,vozilo.broj_sasije,servis.datum,servis.opis_radova,servis.iznos_radova\\\n from korisnik, servis, vozilo\\\n where korisnik.ime = :val\\\n and servis.id_automehanicar = korisnik.id_korisnik\\\n and servis.id_vozilo = vozilo.id_vozilo;',{'val':search_string})\n\n if not results:\n flash('Nije pronadjen rezultat!')\n return redirect(url_for('majstorservis'))\n else:\n # display results\n table1 = ResultsMajstorServis(results)\n table1.border = True\n return render_template('majstorservisrezultat.html', table=table1)\n\n@app.route('/graf')\ndef graf():\n results = []\n labels = []\n values = []\n\n results = db.session.execute('select vozilo.broj_sasije,servis.iznos_radova\\\n from servis,vozilo\\\n where servis.id_vozilo = vozilo.id_vozilo\\\n group by vozilo.broj_sasije;')\n\n for row in results:\n labels.append(row[\"broj_sasije\"])\n values.append(row[\"iznos_radova\"])\n\n \"\"\"\n labels = [\n 'AA', 'BB'\n ]\n\n values = [\n 967.67, 1190.89, 1079.75, 1349.19,\n 2328.91, 2504.28, 2873.83, 4764.87,\n 4349.29, 6458.30, 9907, 16297\n ]\n \"\"\"\n colors = [\n \"#F7464A\", \"#46BFBD\", \"#FDB45C\", \"#FEDCBA\",\n \"#ABCDEF\", \"#DDDDDD\", \"#ABCABC\", \"#4169E1\",\n \"#C71585\", \"#FF4500\", \"#FEDCBA\", \"#46BFBD\"]\n\n\n line_labels=labels\n line_values=values\n return render_template('graf.html', title='Troskovnik vozlila', max=45000, labels=line_labels, values=line_values)\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":8850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"73467089","text":"# -*- coding: utf-8 -*-\n\n#############################\n##### LIBRERIAS #####\n#############################\n\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nfrom sklearn.model_selection import GridSearchCV, train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.base import BaseEstimator\n\nimport warnings\n\n# --------------------------------------------------------------------------------------\n# Semilla\nSEED = 150\nnp.random.seed(SEED)\n\n# Clase que funciona como cualquier estimador\nclass ClfSwitcher(BaseEstimator):\n def __init__(\n self,\n estimator = LogisticRegression(),\n ):\n \"\"\"\n A Custom BaseEstimator that can switch between classifiers.\n :param estimator: sklearn object - The classifier\n \"\"\"\n\n self.estimator = estimator\n\n def fit(self, X, y=None, **kwargs):\n self.estimator.fit(X, y)\n return self\n\n def predict(self, X, y=None):\n return self.estimator.predict(X)\n\n def predict_proba(self, X):\n return self.estimator.predict_proba(X)\n\n def score(self, X, y):\n return self.estimator.score(X, y)\n\n# Lectura de los datos de entrenamiento\ndatos = pd.read_csv(\"./datos/OnlineNewsPopularity.csv\", delimiter = ', ', engine = 'python')\n# Quitamos los atributos no predictivos\ndatos = datos.drop(columns=['url', 'timedelta'])\nprint(datos)\n\n# Datos perdidos\ndatos_perdidos = datos.columns[datos.isnull().any()]\nprint(len(datos_perdidos))\ndatos_perdidos = datos.columns[datos.isna().any()]\nprint(len(datos_perdidos))\n\ny = datos.iloc[:, -1]\nX = datos.iloc[:, :-1]\n\ny = y.apply(lambda x: -1.0 if x < 1400 else 1.0)\n\nprint(\"Valor mínimo de las caraterísticas del conjunto de datos: {}\".format(X.values.min()))\nprint(\"Valor máximo de las caraterísticas del conjunto de datos: {}\".format(X.values.max()))\n\n# Vemos si las clases estan bien balanceadas\ny_df = pd.DataFrame(data = y)\nnumero_elementos = []\nclases = [1.0,-1.0]\nfor i in clases:\n numero_elementos.append(y_df['shares'].value_counts()[i])\n\ndf_plot = pd.DataFrame(columns= [\"Clases\", \"Número de ejemplos\"], data =[[c,n] for c, n in zip(clases,numero_elementos)])\nsns.barplot(x=\"Clases\", y =\"Número de ejemplos\", data = df_plot)\nplt.title(\"Número de ejemplos de cada clase en el conjunto de datos\")\nplt.show()\ninput(\"\\n--- Pulsar tecla para continuar ---\\n\")\n\nX_train, X_test, y_train, y_test = train_test_split(X,y,test_size = 0.20)\n\n# Preprocesado\npreprocesado = [(\"escalado\", StandardScaler()),\n (\"PCA\", PCA(n_components=0.95))]\n\npreprocesador = Pipeline(preprocesado)\n\n# Mostramos la matriz de correlaciones antes del preprocesado de datos\ndef mostrar_correlaciones(datos):\n f, ax = plt.subplots(figsize=(10, 8))\n corr = datos.corr()\n sns.heatmap(corr,\n mask=np.zeros_like(corr, dtype=np.bool),\n cmap=sns.diverging_palette(220, 10, as_cmap=True),\n square=True,\n ax=ax)\n f.suptitle('Matriz Correlaciones')\n plt.show()\n\nmostrar_correlaciones(X_train)\ninput(\"\\n--- Pulsar tecla para continuar ---\\n\")\n\n# Mostramos la matriz de correlaciones después del preprocesado de datos\ndef muestra_correlaciones_procesados(datos):\n f, ax = plt.subplots(figsize=(10, 8))\n corr = np.corrcoef(datos.T)\n sns.heatmap(corr,\n mask=np.zeros_like(corr, dtype=np.bool),\n cmap=sns.diverging_palette(220, 10, as_cmap=True),\n square=True,\n ax=ax)\n f.suptitle('Matriz Correlaciones')\n plt.show()\n\ndatos_preprocesados = preprocesador.fit_transform(X_train)\nmuestra_correlaciones_procesados(datos_preprocesados)\ninput(\"\\n--- Pulsar tecla para continuar ---\\n\")\n\n# Entrenamiento\n# Añadimos el clasificador ClfSwitcher para evitar errores de compilación\npreprocesado = [(\"escalado\", StandardScaler()),\n (\"PCA\", PCA(n_components=0.95)), ('clf', ClfSwitcher())]\n\npreprocesador = Pipeline(preprocesado)\n\n# Modelos\nmodelos = [\n {'clf': [LogisticRegression(penalty='l2', # Regularización Ridge (L2)\n solver='lbfgs', # Algoritmo a utilizar en el problema de optimización, aunque es el dado por defecto\n max_iter=1000)],\n 'clf__C':[2.0, 1.0, 0.1, 0.01, 0.001]},\n {'clf': [SVC(kernel='rbf', # kernel gausiano\n class_weight=\"balanced\", # clases balanceadas\n random_state=SEED)],\n 'clf__C': [10**a for a in range(-4, 2)]},\n {'clf': [RandomForestClassifier(random_state=SEED,\n class_weight=\"balanced\")],\n 'clf__max_depth': [10, 20, 30, 40, 50],\n 'clf__n_estimators': [50, 100, 150, 200]},\n]\n\n# cross-validation\ngrid = GridSearchCV(preprocesador, modelos, scoring='accuracy', cv=5, n_jobs=-1)\n\ngrid.fit(X_train, y_train)\ndf_cv_results = pd.DataFrame(grid.cv_results_)\n# compression_opts = dict(method='zip', archive_name='results.csv')\n# df_cv_results.to_csv('results.zip', index=False, compression=compression_opts)\n\nclasificador = grid.best_estimator_\n\n# Mostramos el clasificador elegido\nprint(\"Clasificador elegido: {}\".format(clasificador))\ny_predict = clasificador.predict(X_test)\n\n# Matriz de confusion\ncm = confusion_matrix(y_test, y_predict)\ncm = 100*cm.astype(\"float64\")/cm.sum(axis=1)[:,np.newaxis]\nfig = plt.figure()\nax = fig.add_subplot()\ncax = ax.matshow(cm, cmap =\"BuGn\")\nplt.title('Confusion matrix of the classifier')\nfig.colorbar(cax)\nax.set(title=\"Matriz de confusión\",\n xticks=range(2),\n yticks=range(2),\n xlabel=\"Etiqueta real\",\n ylabel=\"Etiqueta predicha\")\n\n# Añadimos los porcentajes a las celdas\nfor i in range(2):\n for j in range(2):\n ax.text(j, i, \"{:.0f}%\".format(cm[i, j]), ha=\"center\", va=\"center\")\n\nplt.show()\ninput(\"\\n--- Pulsar tecla para continuar ---\\n\")\n\n# Resultados\nprint(\"E_in: {}\".format(1 - clasificador.score(X_train, y_train)))\nprint(\"E_test: {}\".format(1 - clasificador.score(X_test, y_test)))\n","sub_path":"pfinal.py","file_name":"pfinal.py","file_ext":"py","file_size_in_byte":6359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"176151771","text":"# -*- coding: utf-8 -*-\nimport os\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.rst')).read()\nCHANGES = open(os.path.join(here, 'CHANGES.rst')).read()\n\nsetup(name=\"xpinyin\",\n version='0.5.3',\n description=\"translate chinese hanzi to pinyin by python\",\n long_description=README + '\\n\\n' + CHANGES,\n author=\"Eric Lo\",\n author_email=\"lxneng@gmail.com\",\n url=\"https://github.com/lxneng/xpinyin\",\n packages=find_packages('src'),\n test_suite='xpinyin.tests',\n package_dir={'': 'src'},\n include_package_data=True,\n license=\"MIT License\")\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"354736163","text":"# -*- coding: utf-8 -*-\r\n# __Author__: PanDongLin\r\nfrom django.conf.urls import url\r\nfrom devops import views\r\n\r\n\r\nurlpatterns = [\r\n url(r'^show_message/', views.show_message, name='show_message'),\r\n url(r'^multi_cmd/', views.multi_cmd, name='multi_cmd'),\r\n url(r'^task_center/$', views.task_center, name=\"task_center\"),\r\n url(r'^task_center/result/$', views.get_task_result, name=\"get_task_result\"),\r\n url(r'^code_commit/', views.code_commit, name='code_commit'),\r\n url(r'^code_audit/', views.code_audit, name='code_audit'),\r\n url(r'^code_list/', views.code_list, name='code_list'),\r\n]","sub_path":"mysite/devops/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"507011225","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nInstall PlexAPI\n\"\"\"\nimport re\nfrom distutils.core import setup\nfrom setuptools import find_packages\n\n# Convert markdown readme to rst\ntry:\n from pypandoc import convert\n read_md = lambda f: convert(f, 'rst')\nexcept ImportError:\n print(\"Warn: pypandoc not found, not converting Markdown to RST\")\n read_md = lambda f: open(f, 'r').read()\n\n\n# Fetch the current version\nwith open('plexapi/__init__.py') as handle:\n for line in handle.readlines():\n if line.startswith('VERSION'):\n VERSION = re.findall(\"'([0-9\\.]+?)'\", line)[0]\n\nsetup(\n name='PlexAPI',\n version=VERSION,\n description='Python bindings for the Plex API.',\n author='Michael Shepanski',\n author_email='mjs7231@gmail.com',\n url='https://github.com/mjs7231/plexapi',\n packages=find_packages(),\n install_requires=['requests'],\n long_description=read_md('README.md'),\n keywords=['plex', 'api'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"96628180","text":"\n\nsuits = ['s', 'h', 'd', 'c']\nhands = ['2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', 'A']\n\ncards = []\nfor h in hands:\n for s in suits:\n cards.append(h+s)\n\ncard_len = len(cards)\n\nfor i in range(0, card_len):\n for j in range(i+1, card_len):\n for k in range(j+1, card_len):\n print(cards[i]+cards[j]+cards[k])\n\n\n","sub_path":"src/all_board.py","file_name":"all_board.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"305789673","text":"#!/usr/bin/python\nimport ctypes\nimport os\nimport random\nimport functools\n\nimport schedule\n\nindex = 0\n\n\ndef change_background(picture_path: str) -> None:\n ctypes.windll.user32.SystemParametersInfoW(20, 0, picture_path, 3)\n\n\ndef get_pictures(dir_path: str) -> list:\n return [os.path.join(root, name)\n for root, dirs, files in os.walk(dir_path, topdown=False)\n for name in files\n if name.endswith('jpg') or name.endswith('png')]\n\n\ndef log(text):\n def decorator(f):\n @functools.wraps(f)\n def wrap(*args, **kwargs):\n p = f(*args, **kwargs)\n print(f'{text}: {p}')\n return p\n\n return wrap\n\n return decorator\n\n\n@log(f'DESKTOP_BG_IMG switch to')\ndef change_background_job(dir_path) -> None:\n if dir_path.__class__.__name__ == 'list':\n dir_path = dir_path[0]\n pictures = get_pictures(dir_path)\n index = random.randint(0, len(pictures) - 1)\n change_background(pictures[index])\n return pictures[index]\n\n\ndef scheduler(job: staticmethod, interval, arg_num, *args) -> None:\n if arg_num <= 0:\n schedule.every(interval).seconds.do(job)\n else:\n schedule.every(interval).seconds.do(job, [args[i] for i in range(arg_num)])\n while True:\n schedule.run_pending()\n\n\nif __name__ == '__main__':\n scheduler(change_background_job, 10, 1, r'C:\\Users\\Aragorn II\\Desktop\\Des', 'hello', 'world')\n","sub_path":"Change.py","file_name":"Change.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"195509691","text":"import json\nimport csv\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nclass LemmaTokenizer(object):\n def __init__(self):\n self.tokenizer = RegexpTokenizer('(?u)\\w\\w+')\n self.wnl = WordNetLemmatizer()\n def __call__(self, doc):\n return([self.wnl.lemmatize(t) for t in self.tokenizer.tokenize(doc)])\n\ndef read_data(fin):\n info_li = []\n\n with open(fin, 'r', newline='', encoding='utf-8') as filereader:\n info_li = list(csv.reader(filereader))\n\n return info_li\n\n# 파일 입력\nfin_movie = 'movies_plot.csv'\nmovie_info_li = read_data(fin_movie)\n\nresult_lines = []\nmovie_plot_li = []\n\nfor movie_info in movie_info_li:\n if movie_info != []:\n try:\n movie_plot = movie_info[2]\n except KeyError:\n print('incomplete json: %s' %(movie_info[0]))\n result_lines.append([movie_info[0], movie_info[1], movie_plot])\n movie_plot_li.append(movie_plot)\nvectorizer2 = TfidfVectorizer(min_df=1, tokenizer=LemmaTokenizer(), stop_words='english')\nX = vectorizer2.fit_transform(movie_plot_li)\n\n# 코사인 유사도\nmovie_sim = cosine_similarity(X)\n\ndef similar_recommend_by_movie_id(movielens_id, m_id):\n movie_index = movielens_id - 1\n\n similar_movies = sorted(list(enumerate(movie_sim[movie_index])),\n key=lambda x: x[1], reverse=True)\n recommended = 1\n for movie_info in similar_movies[1:6]:\n movie_title=movie_info_li[movie_info[0]]\n jsonResult.append({\"m_id\": m_id, \"similar_id\":movie_title[1]})\n recommended += 1\n\n# 비슷한것 찾기 json 파일 생성\ni = 1\njsonResult = []\nfor movie_info in movie_info_li:\n if movie_info != []:\n similar_recommend_by_movie_id(i, movie_info[1])\n print(\"%s_json save\"%movie_info[1])\n i += 1\nwith open('output3/%s_movie.json' % (i), 'w', encoding='utf-8') as outfile:\n retJson = json.dumps(jsonResult, indent=4, sort_keys=True, ensure_ascii=False)\n outfile.write(retJson)","sub_path":"etc/simliar/tmdb_total_file.py","file_name":"tmdb_total_file.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"35455233","text":"import random\n\ndef insertion_sort(l):\n \"\"\"\n This is the standard insertion sort which sorts in place\n by moving all nodes over one when inserting\n \"\"\"\n for i in range(1,len(l)):\n new_node = l[i]\n rev_index = i - 1\n # Compare the new node to the previous ones starting at the right\n print(f\"i: {i}\")\n print(l)\n while (rev_index >= 0 and new_node < l[rev_index]):\n print(f\"rev_index: {rev_index}\")\n # if the new node is less, then move the \n # current node one over and check again\n l[rev_index+1] = l[rev_index] \n rev_index -= 1\n # when the new_node is greater than the value at index rev_index\n # place the new_node value to the right of that index\n l[rev_index+1] = new_node\n return l \n \ndef insertion_sort_temparray(l):\n \"\"\"\n This insertion sort uses a temp array but it is\n actually possible (better?) to sort it in place by moving all the nodes\n over when inserting\n \"\"\"\n temp = []\n for i in range(len(l)):\n inserted = False\n print(temp, i)\n # compare to elements in temp and insert\n for j in range(len(temp)):\n print(j, temp[j])\n if l[i] < temp[j]:\n #insert it here\n temp.insert(j, l[i])\n inserted = True\n break\n # if it wasn't inserted put it at the end\n if not inserted:\n temp.append(l[i])\n return temp\n\n\ndef compare_sort(l):\n for i in range(0,len(l)):\n print(l)\n # compare to rest of list\n for j in range(i+1,len(l)):\n if l[i] > l[j]:\n l[i], l[j] = l[j], l[i] #swap\n return l\n\nif __name__ == \"__main__\":\n size = 10\n l = []\n for i in range(size):\n l.append(random.randint(1,100))\n\n print(l)\n l = insertion_sort(l)\n print(l)\n","sub_path":"python_reference_scripts/insertionsort.py","file_name":"insertionsort.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"457150215","text":"# Copyright 2017 The Bazel Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Definitions to create BUILD files for rbe_autoconfig.\"\"\"\n\nload(\n \"//rules/rbe_repo:util.bzl\",\n \"CC_CONFIG_DIR\",\n \"JAVA_CONFIG_DIR\",\n \"PLATFORM_DIR\",\n)\nload(\"//rules/exec_properties:exec_properties.bzl\", \"create_rbe_exec_properties_dict\")\n\n_CC_TOOLCHAIN = \":cc-compiler-k8\"\n\n# Defining a local version of dicts.add in order not to create a dependency on bazel_skylib.\ndef _merge_dicts(*dict_args):\n result = {}\n for dictionary in dict_args:\n if dictionary:\n result.update(dictionary)\n return result\n\ndef create_config_aliases(ctx, toolchain_config_spec_name):\n \"\"\"Produces BUILD files with alias for the C++ and Java toolchain targets.\n\n Java toolchain aliases are only created if configs are exported.\n\n Args:\n ctx: the Bazel context object.\n toolchain_config_spec_name: name of the toolchain config spec\n \"\"\"\n if ctx.attr.create_cc_configs:\n # Create the BUILD file with the alias for the cc_toolchain_suite\n template = ctx.path(Label(\"@bazel_toolchains//rules/rbe_repo:BUILD.cc_alias.tpl\"))\n toolchain = (\"@{toolchain_config_repo}//{config_output_base}/{toolchain_config_spec_name}/bazel_{bazel_version}/{cc_dir}:toolchain\".format(\n toolchain_config_spec_name = toolchain_config_spec_name,\n bazel_version = ctx.attr.bazel_version,\n cc_dir = CC_CONFIG_DIR,\n config_output_base = ctx.attr.toolchain_config_suite_spec[\"output_base\"],\n toolchain_config_repo = ctx.attr.toolchain_config_suite_spec[\"repo_name\"],\n ))\n ctx.template(\n CC_CONFIG_DIR + \"/BUILD\",\n template,\n {\n \"%{toolchain}\": toolchain,\n },\n False,\n )\n if ctx.attr.create_java_configs and ctx.attr.export_configs:\n # Create the BUILD file with the alias for the java_runtime\n template = ctx.path(Label(\"@bazel_toolchains//rules/rbe_repo:BUILD.java_alias.tpl\"))\n java_runtime = (\"@{toolchain_config_repo}//{config_output_base}/{toolchain_config_spec_name}/bazel_{bazel_version}/{java_dir}:jdk\".format(\n toolchain_config_spec_name = toolchain_config_spec_name,\n bazel_version = ctx.attr.bazel_version,\n java_dir = JAVA_CONFIG_DIR,\n config_output_base = ctx.attr.toolchain_config_suite_spec[\"output_base\"],\n toolchain_config_repo = ctx.attr.toolchain_config_suite_spec[\"repo_name\"],\n ))\n ctx.template(\n JAVA_CONFIG_DIR + \"/BUILD\",\n template,\n {\n \"%{java_runtime}\": java_runtime,\n },\n False,\n )\n\ndef create_java_runtime(ctx, java_home):\n \"\"\"Creates a BUILD file with the java_runtime target. \n\n Args:\n ctx: the Bazel context object.\n java_home: the seleceted/resolved location for java_home.\n \"\"\"\n template = ctx.path(Label(\"@bazel_toolchains//rules/rbe_repo:BUILD.java.tpl\"))\n ctx.template(\n JAVA_CONFIG_DIR + \"/BUILD\",\n template,\n {\n \"%{java_home}\": java_home,\n },\n False,\n )\n\ndef create_export_platform(ctx, exec_properties, image_name, name, toolchain_config_spec_name, use_legacy_platform_definition):\n \"\"\"Creates a BUILD file (to be exported to output_base) with the cc_toolchain and platform targets.\n\n Args:\n ctx: the Bazel context object.\n exec_properties: A string->string dict containing execution properties to\n be used when creating the platform. Will be used only when\n use_legacy_platform_definition == False. This dict must not contain\n \"container-image\".\n image_name: the name of the image.\n name: name of rbe_autoconfig repo rule.\n toolchain_config_spec_name: name of the toolchain config spec\n use_legacy_platform_definition: Whether to create a platform with\n remote_execution_properties (legacy) or with exec_properties.\n \"\"\"\n cc_toolchain_target = \"//\" + ctx.attr.toolchain_config_suite_spec[\"output_base\"]\n if toolchain_config_spec_name:\n cc_toolchain_target += \"/\" + toolchain_config_spec_name\n cc_toolchain_target += \"/bazel_\" + ctx.attr.bazel_version\n cc_toolchain_target += \"/cc\" + _CC_TOOLCHAIN\n _create_platform(ctx, exec_properties, image_name, name, cc_toolchain_target, use_legacy_platform_definition)\n\ndef create_external_repo_platform(ctx, exec_properties, image_name, name, use_legacy_platform_definition):\n \"\"\"Creates a BUILD file (to be used with configs in the external repo) with the cc_toolchain and platform targets.\n\n Args:\n ctx: the Bazel context object.\n exec_properties: A string->string dict containing execution properties to\n be used when creating the platform. Will be used only when\n use_legacy_platform_definition == False. This dict must not contain\n \"container-image\".\n image_name: the name of the image.\n name: name of rbe_autoconfig repo rule.\n use_legacy_platform_definition: Whether to create a platform with\n remote_execution_properties (legacy) or with exec_properties.\n \"\"\"\n cc_toolchain_target = \"@\" + ctx.attr.name + \"//\" + CC_CONFIG_DIR + _CC_TOOLCHAIN\n _create_platform(ctx, exec_properties, image_name, name, cc_toolchain_target, use_legacy_platform_definition)\n\ndef create_alias_platform(ctx, exec_properties, image_name, name, toolchain_config_spec_name, use_legacy_platform_definition):\n \"\"\"Creates a BUILD file (pointing to checked in config) with the cc_toolchain and platform targets.\n\n Args:\n ctx: the Bazel context object.\n exec_properties: A string->string dict containing execution properties to\n be used when creating the platform. Will be used only when\n use_legacy_platform_definition == False. This dict must not contain\n \"container-image\".\n image_name: the name of the image.\n name: name of rbe_autoconfig repo rule.\n toolchain_config_spec_name: name of the toolchain config spec.\n use_legacy_platform_definition: Whether to create a platform with\n remote_execution_properties (legacy) or with exec_properties.\n \"\"\"\n cc_toolchain_target = (\"@{toolchain_config_repo}//{config_output_base}/{toolchain_config_spec_name}/bazel_{bazel_version}/{cc_dir}{target}\".format(\n toolchain_config_spec_name = toolchain_config_spec_name,\n bazel_version = ctx.attr.bazel_version,\n cc_dir = CC_CONFIG_DIR,\n config_output_base = ctx.attr.toolchain_config_suite_spec[\"output_base\"],\n target = _CC_TOOLCHAIN,\n toolchain_config_repo = ctx.attr.toolchain_config_suite_spec[\"repo_name\"],\n ))\n _create_platform(ctx, exec_properties, image_name, name, cc_toolchain_target, use_legacy_platform_definition)\n\n# Creates a BUILD file with the cc_toolchain and platform targets\ndef _create_platform(ctx, exec_properties, image_name, name, cc_toolchain_target, use_legacy_platform_definition):\n template = ctx.path(Label(\"@bazel_toolchains//rules/rbe_repo:BUILD.platform_legacy.tpl\")) if use_legacy_platform_definition else ctx.path(Label(\"@bazel_toolchains//rules/rbe_repo:BUILD.platform.tpl\"))\n exec_compatible_with = (\"\\\"\" +\n (\"\\\",\\n \\\"\").join(ctx.attr.exec_compatible_with) +\n \"\\\",\")\n target_compatible_with = (\"\\\"\" +\n (\"\\\",\\n \\\"\").join(ctx.attr.target_compatible_with) +\n \"\\\",\")\n\n platform_exec_properties = create_rbe_exec_properties_dict(\n container_image = \"docker://%s\" % image_name,\n os_family = \"Linux\",\n )\n platform_exec_properties = _merge_dicts(platform_exec_properties, exec_properties)\n\n ctx.template(\n PLATFORM_DIR + \"/BUILD\",\n template,\n {\n \"%{cc_toolchain}\": cc_toolchain_target,\n \"%{exec_compatible_with}\": exec_compatible_with,\n \"%{image_name}\": image_name,\n \"%{platform_exec_properties}\": \"%s\" % platform_exec_properties,\n \"%{target_compatible_with}\": target_compatible_with,\n },\n False,\n )\n","sub_path":"rules/rbe_repo/build_gen.bzl","file_name":"build_gen.bzl","file_ext":"bzl","file_size_in_byte":8751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"565838157","text":"class CurrencyPairsConverter:\n PPC = 'ppc'\n USD = 'usd'\n ETH = 'eth'\n RUR = 'rur'\n BTC = 'btc'\n DSH = 'dsh'\n LTC = 'ltc'\n NMC = 'nmc'\n EUR = 'eur'\n CODES = [PPC, USD, ETH, RUR, BTC, DSH, LTC, NMC, EUR]\n ASSOC = {\n PPC: 'Peercoin',\n USD: 'US Dollar',\n ETH: 'Ethereum',\n RUR: 'Ruble',\n BTC: 'Bitcoin',\n DSH: 'Dash',\n LTC: 'Litecoin',\n NMC: 'Namecoin',\n EUR: 'Euro',\n }\n\n def __init__(self):\n pass\n\n @staticmethod\n def build_pair(left, right):\n return left + '_' + right\n\n def readable(self, code):\n return self.ASSOC[code]\n\n def code_pairs_to_readable(self, code_pair):\n chunks = code_pair.split('_')\n if chunks[0] in self.CODES and chunks[1] in self.CODES:\n first = self.readable(chunks[0])\n second = self.readable(chunks[1])\n return first + ' - ' + second\n","sub_path":"components/converter/currency_pairs.py","file_name":"currency_pairs.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"163083452","text":"\n\n\nimport scipy.signal as ssig\nfrom os.path import join\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nbase_folder = r'Z:\\n\\Neuroseeker Probe Recordings\\Neuroseeker Chronic Rat 22.1\\2017_06_02\\13_20_21\\Analysis\\Kilosort'\ndata_folder = r'Z:\\n\\Neuroseeker Probe Recordings\\Neuroseeker Chronic Rat 22.1\\2017_06_02\\13_20_21\\Data'\nbinary_data_filename = join(data_folder, r'2017_05_29T13_30_53_Amp_S16_LP3p5KHz_uV.bin')\n\nprobe_info_folder = r'E:\\George\\Python35Projects\\TheMeaningOfBrain\\Layouts\\Probes\\Neuroseeker'\nprb_file = join(probe_info_folder, 'prb.txt')\n\ntime_points = 100\nsampling_frequency = 20000\n\nnumber_of_channels_in_binary_file = 1440\n\n\n\n'''\n# Call to clean the kilosort generated templates\nfrom GUIs.Kilosort import clean_kilosort_templates as clean\n\nclean.cleanup_kilosorted_data(base_folder, number_of_channels_in_binary_file, binary_data_filename, prb_file,\n sampling_frequency=20000)\n'''\n\n\n\n\n\n# Generate Square wave pulse train to recapture Camera TTL information\nnumber_of_channels_in_binary_file = 1440\n\nbinary_data_filename = join(data_folder, r'InBehaviour_2017-06-02T13_20_21_Amp_S16_uV.bin')\npulse_data_trace_filename = r'InBehaviour_2017-06-02T13_20_21_Sync_U16_uV.bin'\n\nraw_data = np.memmap(binary_data_filename, dtype=np.int16, mode='r')\nnumber_of_timepoints_in_raw = int(raw_data.shape[0] / number_of_channels_in_binary_file)\nraw_data = np.reshape(raw_data, (number_of_channels_in_binary_file, number_of_timepoints_in_raw), order='F')\n\npulse_data = np.memmap(join(data_folder, pulse_data_trace_filename), dtype=np.uint16, mode='r')\n\n\ndef plot_both_pulses(pulse_data, pulse_square=None, pulse_freq=None, sampling_frequency = 20000, start_time=0, end_time=3600, step_time=1, time_window=0.5):\n plt.interactive(True)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n start_time = start_time\n end_time = end_time\n step_time = step_time\n time_window = time_window\n num_of_windows = int((end_time - start_time) / step_time)\n\n for win in range(num_of_windows):\n st = start_time + step_time * win\n et = st + time_window\n stp = int(st * sampling_frequency)\n etp = int(et * sampling_frequency)\n t = np.linspace(st, et, etp - stp)\n if pulse_square is None:\n square = ssig.square(2 * np.pi * pulse_freq * (t + 40/sampling_frequency), duty=1.0-0.0434) / 2 + 0.5 + 65278\n else:\n square = pulse_square[stp:etp]\n\n ax.clear()\n ax.plot(t, square, t, pulse_data[stp:etp])\n\n plt.waitforbuttonpress()\n\n\n\ntop_of_pulse_points = np.argwhere(pulse_data==65279)\nbottom_of_pulse_points = np.argwhere(pulse_data==65278)\nstarting_pulse = bottom_of_pulse_points[0][0]-167\nend_pulse = top_of_pulse_points[-1][0]\ntime_points_in_ttl_train = end_pulse - starting_pulse\ntime_of_frames_train = 4217.07639 # From the csv file\n# sampling_frequency_corrected = time_points_in_ttl_train / time_of_frames_train\n\n\nsampling_frequency_corrected = 19998.5\npulse_freq = 119.6058485 # 119.6058485\nfull_time = pulse_data.shape[0] / sampling_frequency_corrected\nplot_both_pulses(pulse_data, pulse_freq=pulse_freq, sampling_frequency=sampling_frequency_corrected,\n start_time=44.5, end_time=full_time, step_time=20)\n\n\n\n\n\n\n\nt = np.linspace(0, full_time, pulse_data.shape[0])\n\nsquare = ssig.square(2 * np.pi * pulse_freq * (t + 40/sampling_frequency_corrected), duty=1.0-0.0434) / 2 + 0.5 + 65278\nsquare[:starting_pulse] = 65278\nsquare[end_pulse:] = 65278\n\nplot_both_pulses(pulse_data, pulse_square=square, start_time=full_time-60, end_time=full_time+1, step_time=0.5)\n\nstp = int(44 * sampling_frequency_corrected)\netp = int(48 * sampling_frequency_corrected)\nplt.plot(t[stp:etp], square[stp:etp], t[stp:etp], pulse_data[stp:etp])\n\nstp = int((full_time - 85) * sampling_frequency_corrected)\netp = int((full_time - 55) * sampling_frequency_corrected)\nplt.plot(t[stp:etp], square[stp:etp], t[stp:etp], pulse_data[stp:etp])\n\nnp.save(join(data_folder, r'corrected_camera_ttl_pulses.npy'), square)\nsquare = np.load(join(data_folder, r'corrected_camera_ttl_pulses.npy'))\n\n\n\n\ntransitions = np.diff(square)\nnum_of_pulses = np.sum(transitions==-1)\n\n\n\n\n\n\nframe_times = np.load(join(data_folder, r'frame_times.npy'))\n\ncsv_frames = np.ones(pulse_data.shape[0])*65278\nframe_times_offseted = frame_times + 15.9348\nfor frame_time in frame_times_offseted:\n csv_frames[int(frame_time*sampling_frequency_corrected)] = 65279\n\nstp = int(44 * sampling_frequency_corrected)\netp = int(48 * sampling_frequency_corrected)\nplt.plot(t[stp:etp], csv_frames[stp:etp], t[stp:etp], pulse_data[stp:etp])\n\nstp = int(4255 * sampling_frequency_corrected)\netp = int(full_time * sampling_frequency_corrected)\nplt.plot(t[stp:etp], csv_frames[stp:etp], t[stp:etp], pulse_data[stp:etp])\n\n\nplot_both_pulses(pulse_data, pulse_square=csv_frames, start_time=0, end_time=full_time+1, step_time=2)","sub_path":"ExperimentSpecificCode/_2017_05_Neuroseeker_Chronic_Rat_22.1/2017_06_02/13_20_21/notebook.py","file_name":"notebook.py","file_ext":"py","file_size_in_byte":4897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"453495034","text":"import tensorflow as tf\n\ncluster_spec = tf.train.ClusterSpec({\n \"ps\": [\n \"localhost:2223\", # /job:ps/task:0\n ],\n \"worker\": [\n \"localhost:2224\", # /job:worker/task:0\n \"localhost:2225\", # /job:worker/task:1\n ]})\nserver = tf.train.Server(cluster_spec, job_name=\"ps\", task_index=0)\nserver.join() # blocks until the server stops (i.e., never)","sub_path":"boom/12-2 ps.py","file_name":"12-2 ps.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"463905575","text":"\"\"\"\nThis script creates voronoi polygons around major metro centers in the US, with\nmodifications of the NYC and Long Island areas to keep them as distinct IPM regions.\n\nTo add additional metro areas for a new region, use the --extra-metro-cbsa-ids flag,\nonce for each additional cbsa_id to include:\n\npython create_voronoi_polygons.py --extra-metro-cbsa-ids 12100 --extra-metro-cbsa-ids 41540\n\"\"\"\n\nfrom typing import List\n\nimport pandas as pd\nimport geopandas as gpd\nimport shapely.ops\nfrom shapely.ops import cascaded_union\nfrom geovoronoi import voronoi_regions_from_coords\nimport typer\n\nfrom site_interconnection_costs import (\n load_ipm_shapefile,\n load_metro_areas_shapefile,\n)\n\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nhandler = logging.StreamHandler()\nformatter = logging.Formatter(\n # More extensive test-like formatter...\n \"%(asctime)s [%(levelname)8s] %(name)s:%(lineno)s %(message)s\",\n # This is the datetime format string.\n \"%Y-%m-%d %H:%M:%S\",\n)\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\ndef load_us_outline():\n \"Load a gdf of US states and return the outline of lower-48\"\n us_states = gpd.read_file(\n \"https://eric.clst.org/assets/wiki/uploads/Stuff/gz_2010_us_040_00_5m.json\"\n )\n drop_states = [\"Puerto Rico\", \"Alaska\", \"Hawaii\"]\n us_states = us_states.loc[~(us_states[\"NAME\"].isin(drop_states)), :]\n\n us_outline = shapely.ops.unary_union(us_states[\"geometry\"])\n\n return us_outline\n\n\ndef find_largest_cities(\n metro_areas_gdf,\n ipm_gdf,\n min_population=750000,\n max_cities_per_region=None,\n extra_metro_cbsa_ids=[],\n):\n _metro_areas_gdf = metro_areas_gdf.copy()\n _metro_areas_gdf[\"geometry\"] = _metro_areas_gdf[\"center\"]\n # metro_ipm_gdf = gpd.sjoin(ipm_gdf, _metro_areas_gdf, how=\"left\", op=\"intersects\")\n metro_ipm_gdf = gpd.sjoin(ipm_gdf, _metro_areas_gdf, how=\"left\", op=\"contains\")\n\n df_list = []\n # nw_areas[\"latitude\"] = 0\n # nw_areas[\"longitude\"] = 0\n grouped = metro_ipm_gdf.groupby(\"IPM_Region\", as_index=False)\n for _, _df in grouped:\n # n_df = _df.nlargest(5, \"population\")\n n_df = _df.loc[\n (_df[\"population\"] >= min_population)\n | (_df[\"cbsa_id\"].isin(extra_metro_cbsa_ids)),\n :,\n ]\n if max_cities_per_region:\n n_df = n_df.nlargest(max_cities_per_region, \"population\")\n # If there aren't any city that meet population criteria keep the largest city\n if n_df.empty:\n n_df = _df.nlargest(1, \"population\")\n df_list.append(n_df)\n largest_cities = pd.concat(df_list, ignore_index=True)\n\n lats = [center.y for center in largest_cities.center]\n lons = [center.x for center in largest_cities.center]\n\n largest_cities[\"latitude\"] = lats\n largest_cities[\"longitude\"] = lons\n\n return largest_cities\n\n\ndef main(fn: str = \"large_metro_voronoi.geojson\", extra_metro_cbsa_ids: List[str] = []):\n\n logger.info(\"Loading files\")\n us_outline = load_us_outline()\n ipm_gdf = load_ipm_shapefile()\n ipm_gdf[\"convex_hull\"] = ipm_gdf.convex_hull\n # site_locations = load_site_locations()\n metro_gdf = load_metro_areas_shapefile()\n\n logger.info(\"Finding largest metros\")\n if extra_metro_cbsa_ids:\n logger.info(f\"The extra metros {extra_metro_cbsa_ids} will be included\")\n largest_metros = find_largest_cities(\n metro_areas_gdf=metro_gdf,\n ipm_gdf=ipm_gdf,\n min_population=750000,\n extra_metro_cbsa_ids=extra_metro_cbsa_ids,\n )\n\n logger.info(\"Making voronoi polygons\")\n poly_shapes, pts, poly_to_pt_assignments = voronoi_regions_from_coords(\n largest_metros[[\"longitude\", \"latitude\"]].values, us_outline\n )\n\n metro_voronoi = largest_metros.iloc[[x[0] for x in poly_to_pt_assignments], :]\n metro_voronoi[\"metro_id\"] = metro_voronoi[\"cbsa_id\"]\n metro_voronoi.geometry = poly_shapes\n\n logger.info(\"Fixing NYC/Long Island\")\n ny_z_j_poly = ipm_gdf.loc[ipm_gdf[\"IPM_Region\"] == \"NY_Z_J\", \"convex_hull\"].values[\n 0\n ]\n ny_z_k_poly = ipm_gdf.loc[ipm_gdf[\"IPM_Region\"] == \"NY_Z_K\", \"convex_hull\"].values[\n 0\n ]\n ny_z_j_k_poly = cascaded_union([ny_z_j_poly, ny_z_k_poly])\n\n for cbsa_id in metro_voronoi.query(\n \"IPM_Region.isin(['NENG_CT', 'PJM_EMAC']).values\"\n )[\"cbsa_id\"].to_list():\n # print(cbsa_id)\n metro_voronoi.loc[\n metro_voronoi[\"cbsa_id\"] == cbsa_id, \"geometry\"\n ] = metro_voronoi.loc[\n metro_voronoi[\"cbsa_id\"] == cbsa_id, \"geometry\"\n ].difference(\n ny_z_j_k_poly\n )\n\n # Need the unary_union to make geometries valid\n ny_z_j_ipm = shapely.ops.unary_union(\n ipm_gdf.loc[ipm_gdf[\"IPM_Region\"] == \"NY_Z_J\", \"geometry\"].values[0]\n )\n ny_z_k_ipm = shapely.ops.unary_union(\n ipm_gdf.loc[ipm_gdf[\"IPM_Region\"] == \"NY_Z_K\", \"geometry\"].values[0]\n )\n\n # Get a simplified outline of Long Island\n # Start with the zone K convex hull, remove the overlap with zone J IPM region,\n # then take the intersection with the US outline.\n ny_z_k_ipm = ny_z_k_poly.difference(ny_z_j_ipm).intersection(us_outline)\n\n # Same with NYC, zone J. Remove the bordering regions (zone K, other IPM regions)\n # from the convex hull, then take intersection with US outline.\n ny_z_j_ipm = (\n ny_z_j_poly.difference(ny_z_k_ipm)\n .difference(\n shapely.ops.unary_union(\n ipm_gdf.query(\"IPM_Region=='PJM_EMAC'\")[\"geometry\"].values[0]\n )\n )\n .difference(\n shapely.ops.unary_union(\n ipm_gdf.query(\"IPM_Region=='NY_Z_G-I'\")[\"geometry\"].values[0])\n )\n .intersection(us_outline)\n )\n\n data_dict = {\n \"IPM_Region\": [\"NY_Z_J\", \"NY_Z_K\"],\n \"state\": [\"NY\", \"NY\"],\n \"metro_id\": [\"NY_Z_J\", \"NY_Z_K\"],\n \"latitude\": [ny_z_j_ipm.centroid.y, ny_z_k_ipm.centroid.y],\n \"longitude\": [ny_z_j_ipm.centroid.x, ny_z_k_ipm.centroid.x],\n }\n\n ny_z_j_k_df = gpd.GeoDataFrame(\n data=data_dict, geometry=[ny_z_j_ipm, ny_z_k_ipm], crs=metro_voronoi.crs\n )\n\n final_metro_voronoi = pd.concat(\n [metro_voronoi, ny_z_j_k_df], ignore_index=True, sort=False\n )\n\n logger.info(\"Writing polygons to file\")\n cols = [\"IPM_Region\", \"geometry\", \"latitude\", \"longitude\", \"metro_id\"]\n final_metro_voronoi[cols].to_file(fn, driver=\"GeoJSON\")\n\n\nif __name__ == \"__main__\":\n typer.run(main)\n","sub_path":"create_clusters/create_voronoi_polygons.py","file_name":"create_voronoi_polygons.py","file_ext":"py","file_size_in_byte":6529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"628690198","text":"# N개의 수가 주어졌을 때, 이를 오름차순으로 정렬하는 프로그램을 작성하시오.\n# 연습하려고 힙 정렬 써봣음\n\nn = int(input())\nans=[]\nfor i in range(n):\n a = int(input())\n ans.append(a)\n\ndef heap_sort(a):\n def down_heap(a,left,right):\n temp = a[left] #루트\n parent = left\n while parent < (right+1) // 2:\n cl = parent*2+1\n cr = cl + 1\n child = cr if cr <= right and a[cr] > a[cl] else cl # 둘 중 큰 값\n # print(a[cr], a[cl])\n if temp >= a[child]:\n break\n a[parent] = a[child]\n parent = child\n a[parent] = temp \n n = len(a)\n for i in range((n-1)//2,-1,-1):\n down_heap(a,i,n-1)\n for i in range(n-1,0,-1):\n a[0], a[i] = a[i], a[0]\n down_heap(a,0,i-1)\n\nheap_sort(ans)\n\nfor i in range(len(ans)):\n print(ans[i])","sub_path":"Python/1주차_정렬,재귀/정글_1_2751.py","file_name":"정글_1_2751.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"459743615","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport enum\nimport math\nfrom .multiagentenv import MultiAgentEnv\nimport random\nimport numpy as np\n\n\nclass Direction(enum.IntEnum):\n NORTH = 0\n SOUTH = 1\n EAST = 2\n WEST = 3\n\n\nclass Pos:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n\nclass Unit:\n def __init__(self, x, y, health_max, n_resources):\n self.pos = Pos(x, y)\n self.health_max = health_max\n self.health = health_max\n self.resources_loaded = np.array([False for _ in range(n_resources)])\n self.loaded = False\n\n\nclass Resource:\n def __init__(self, x, y):\n self.pos = Pos(x, y)\n\n\nclass Building:\n def __init__(self, x, y, health, n_resources):\n self.pos = Pos(x, y)\n self.health = health\n self.max_health = health\n\n self.resources_amount = [0. for _ in range(n_resources)]\n\n\nresources_pos = [Pos(1, 5),\n Pos(5, 1),\n Pos(1, 1)]\n\n\n# action_name = {0: 'noop',\n# 1: 'step',\n# 2: 'north',\n# 3: 'south',\n# 4: 'east',\n# 5: 'west',\n# 6: 'attack 0',\n# 7: \"attack 1\",\n# 8: \"gather res 1\",\n# 9: \"put res 1\",\n# 10: \"gather res 2\",\n# 11: \"put res 2\"\n# }\n\naction_name = {0: 'noop',\n 1: 'step',\n 2: 'north',\n 3: 'south',\n 4: 'east',\n 5: 'west',\n 6: 'attack 0',\n 7: \"gather res 1\",\n 8: \"put res 1\",\n 9: \"gather res 2\",\n 10: \"put res 2\"\n }\n\n\nclass GatherDefendEnv(MultiAgentEnv):\n \"\"\"The StarCraft II environment for decentralised multi-agent\n micromanagement scenarios.\n \"\"\"\n\n def __init__(\n self,\n n_agents=10,\n n_enemies=2,\n episode_limit=200,\n move_amount=1,\n continuing_episode=False,\n obs_all_health=True,\n obs_enemy_health=True,\n obs_own_health=True,\n obs_last_action=False,\n obs_pathing_grid=False,\n obs_terrain_height=False,\n obs_instead_of_state=False,\n obs_timestep_number=False,\n obs_resources=False,\n obs_base_resources_amount=False,\n state_last_action=True,\n state_timestep_number=False,\n reward_sparse=False,\n reward_only_positive=True,\n reward_death_value=0.5,\n reward_win=5,\n reward_defeat=0.01,\n reward_pick_up=0.5,\n reward_integrate=20,\n reward_gather=2,\n reward_negative_scale=0.5,\n reward_scale=True,\n reward_scale_rate=40,\n debug=False,\n is_replay=False,\n sight_range=9,\n shoot_range=1,\n map_x=10,\n map_y=10,\n agent_health=10,\n enemy_health=10,\n agent_attack=5,\n enemy_attack=2,\n base_health=150,\n n_resources=2,\n seed=None,\n proficiency=False,\n proficiency_start=0.4,\n proficiency_end=0.9,\n barrack=True\n ):\n # Map arguments\n self.sight_range = sight_range\n self.shoot_range = shoot_range\n\n self.n_agents = n_agents\n self.episode_limit = episode_limit\n self._move_amount = move_amount\n self.n_enemies = n_enemies\n self.n_resources = n_resources\n\n # Observations and state\n self.obs_own_health = obs_own_health\n self.obs_all_health = obs_all_health\n self.obs_enemy_health = obs_enemy_health\n self.obs_instead_of_state = obs_instead_of_state\n self.obs_last_action = obs_last_action\n self.obs_resources = obs_resources\n self.state_last_action = state_last_action\n if self.obs_all_health:\n self.obs_own_health = True\n self.obs_base_resources_amount = obs_base_resources_amount\n\n # Rewards args\n self.reward_sparse = reward_sparse\n self.reward_only_positive = reward_only_positive\n self.reward_negative_scale = reward_negative_scale\n self.reward_death_value = reward_death_value\n self.reward_integrate = reward_integrate\n self.reward_win = reward_win\n self.reward_defeat = reward_defeat\n self.reward_pick_up = reward_pick_up\n self.reward_gather = reward_gather\n self.reward_scale = reward_scale\n self.reward_scale_rate = reward_scale_rate\n\n # Other\n self.continuing_episode = continuing_episode\n # self._seed = seed\n self._seed = random.randint(0, 9999)\n np.random.seed(self._seed)\n self.debug = debug\n self.is_replay = is_replay\n\n # Actions\n self.n_actions_no_attack = 6\n self.n_actions_move = 4\n self.n_actions_resources = 2 * n_resources\n self.n_actions_no_resources = self.n_actions_no_attack + self.n_enemies\n self.n_actions = self.n_actions_no_resources + self.n_actions_resources\n\n # Property\n self.agent_health = agent_health\n self.enemy_health = enemy_health\n self.agent_attack = agent_attack\n self.enemy_attack = enemy_attack\n self.base_health = base_health\n self.barrack_health = base_health\n self.has_barrack = barrack\n\n # Resources\n self.base_x = 5\n self.base_y = 5\n self.barrack_x = 7\n self.barrack_y = 7\n self.resources = dict()\n for resources_id in range(self.n_resources):\n resource_pos = resources_pos[resources_id]\n self.resources[resources_id] = Resource(resource_pos.x, resource_pos.y) # TODO: observe base, resources\n self.base = Building(self.base_x, self.base_y, self.base_health, self.n_resources) # TODO: Initialize\n self.barrack = Building(self.barrack_x, self.barrack_y, self.base_health, self.n_resources)\n self.integrated = 0\n self.kill_number = 0\n\n # Map info\n max_kill = self.episode_limit // (self.enemy_health // self.agent_attack) * self.n_enemies\n max_integrate = self.episode_limit / 8\n self.max_reward = (max_kill * (self.reward_death_value + self.enemy_health * self.reward_defeat)\n + self.reward_win\n + max_integrate * self.reward_integrate) * 2\n\n self.agents = {}\n self.enemies = {}\n self._episode_count = 0\n self._episode_steps = 0\n self._total_steps = 0\n # self._obs = None\n self.battles_won = 0\n self.battles_game = 0\n self.timeouts = 0\n self.force_restarts = 0\n self.last_stats = None\n # self.death_tracker_ally = np.zeros(self.n_agents)\n # self.death_tracker_enemy = np.zeros(self.n_enemies)\n self.previous_ally_units = None\n self.previous_enemy_units = None\n self.last_action = np.zeros((self.n_agents, self.n_actions))\n self.map_x = map_x\n self.map_y = map_y\n self.proficiency = proficiency\n self.proficiency_start = proficiency_start\n self.proficiency_max = proficiency_end\n self.proficiency_step = 2 * (proficiency_end - proficiency_start) / (episode_limit / 8)\n\n if self.debug:\n self.action_count = {agent_i: [0 for _ in range(self.n_resources * 2 + 1)] for agent_i in range(self.n_agents)}\n self.reset()\n\n def reset(self):\n \"\"\"Reset the environment. Required after each full episode.\n Returns initial observations and states.\n \"\"\"\n self._episode_steps = 0\n self.reset_resources_and_base()\n self.kill_number = 0\n\n # Information kept for counting the reward\n # self.death_tracker_ally = np.zeros(self.n_agents)\n # self.death_tracker_enemy = np.zeros(self.n_enemies)\n self.previous_ally_units = None\n self.previous_enemy_units = None\n\n self.last_action = np.zeros((self.n_agents, self.n_actions))\n self.n_pickup = np.zeros([self.n_agents, self.n_resources])\n\n # self._obs = self._controller.observe()\n self.init_units()\n\n return self.get_obs(), self.get_state()\n\n def reset_resources_and_base(self):\n for resources_id in range(self.n_resources):\n resource_pos = resources_pos[resources_id]\n self.resources[resources_id] = Resource(resource_pos.x, resource_pos.y)\n\n self.base = Building(self.base_x, self.base_y, self.base_health, self.n_resources)\n self.barrack = Building(self.barrack_x, self.barrack_y, self.base_health, self.n_resources)\n\n self.integrated = 0\n\n def init_units(self):\n self.agents = {}\n if self.has_barrack:\n for agent_id in range(self.n_agents):\n self.agents[agent_id] = Unit(random.randint(1, self.barrack_x),\n random.randint(1, self.barrack_y),\n self.agent_health,\n self.n_resources)\n else:\n for agent_id in range(self.n_agents):\n self.agents[agent_id] = Unit(random.randint(1, self.base_x),\n random.randint(1, self.base_y),\n self.agent_health,\n self.n_resources)\n\n self.enemies = {}\n for enemy_id in range(self.n_enemies):\n # self.enemies[enemy_id] = Unit(random.randint(self.base_x + 2, self.map_x),\n # random.randint(self.base_y + 2, self.map_y),\n # self.enemy_health,\n # self.n_resources)\n self.enemies[enemy_id] = Unit(self.map_x,\n self.map_y,\n self.enemy_health,\n self.n_resources)\n\n def ally_step(self, actions):\n attack_reward = 0\n attack_value = [0 for _ in range(self.n_enemies)]\n for agent_id, action in enumerate(actions):\n avail_actions = self.get_avail_agent_actions(agent_id)\n if avail_actions[action] ==0:\n avail_actions = self.get_avail_agent_actions(agent_id)\n\n assert avail_actions[action] == 1, \\\n \"Agent {} cannot perform action {}\".format(agent_id, action)\n\n unit = self.get_unit_by_id(agent_id)\n if action == 2:\n unit.pos.y += self._move_amount\n elif action == 3:\n unit.pos.y -= self._move_amount\n elif action == 4:\n unit.pos.x += self._move_amount\n elif action == 5:\n unit.pos.x -= self._move_amount\n elif self.n_actions_no_attack <= action < self.n_actions_no_attack + self.n_enemies:\n target_id = action - self.n_actions_no_attack\n attack_value[target_id] += self.agent_attack\n elif action >= self.n_actions_no_resources:\n res_i = (action - self.n_actions_no_resources) // 2\n gather_down = (action - self.n_actions_no_attack - self.n_enemies) % 2\n\n if gather_down:\n assert unit.resources_loaded[res_i], \"Agent {} does not have resource {}\".format(agent_id, res_i)\n\n reward_gather = self.reward_gather\n if res_i == 1:\n if self.base.resources_amount[0] >= self.base.resources_amount[1] / 2:\n reward_gather *= 5\n else:\n reward_gather /= 2 * 2\n else:\n if self.base.resources_amount[0] <= self.base.resources_amount[1] / 2:\n reward_gather *= 5\n else:\n reward_gather /= 2 * 2\n\n self.base.resources_amount[res_i] += 1\n unit.resources_loaded[res_i] = False\n unit.resources_loaded[res_i] = False\n unit.loaded = False\n\n attack_reward += reward_gather\n else:\n reward_pickup = self.reward_pick_up\n if res_i == 1:\n if self.base.resources_amount[0] >= self.base.resources_amount[1] / 2:\n reward_pickup *= 5\n else:\n reward_pickup /= 2\n else:\n if self.base.resources_amount[0] <= self.base.resources_amount[1] / 2:\n reward_pickup *= 5\n else:\n reward_pickup /= 2\n\n if self.proficiency:\n gather_prob = self.proficiency_start + self.proficiency_step * self.n_pickup[agent_id][res_i]\n if random.random() < gather_prob:\n assert unit.loaded is False, \"Agent {} is loaded when trying to gather resource {}\".format(agent_id, res_i)\n\n unit.resources_loaded[res_i] = True\n unit.loaded = True\n attack_reward += reward_pickup / gather_prob\n\n self.n_pickup[agent_id][res_i] += 1\n else:\n assert unit.loaded is False, \"Agent {} is loaded when trying to gather resource {}\".format(\n agent_id, res_i)\n\n unit.resources_loaded[res_i] = True\n unit.loaded = True\n attack_reward += reward_pickup\n self.n_pickup[agent_id][res_i] += 1\n\n # Attack\n for enemy_id in range(self.n_enemies):\n if self.enemies[enemy_id].health - attack_value[enemy_id] <= 0:\n attack_reward += self.reward_death_value\n attack_reward += self.reward_defeat * self.enemies[enemy_id].health\n\n # self.enemies[enemy_id] = Unit(random.randint(self.base_x + 2, self.map_x),\n # random.randint(self.base_y + 2, self.map_y),\n # self.enemy_health,\n # self.n_resources)\n self.enemies[enemy_id] = Unit(self.map_x,\n self.map_y,\n self.enemy_health,\n self.n_resources)\n self.kill_number += 1\n else:\n attack_reward += self.reward_defeat * attack_value[enemy_id]\n self.enemies[enemy_id].health -= attack_value[enemy_id]\n\n return attack_reward\n\n def enemy_step(self):\n game_end_code = None\n\n if self.has_barrack:\n for enemy_id, enemy in self.enemies.items():\n if self.can_reach(enemy.pos, self.barrack.pos):\n self.barrack.health -= self.enemy_attack\n else:\n if enemy.pos.x > self.barrack_x:\n enemy.pos.x -= 1\n\n if enemy.pos.y > self.barrack_y:\n enemy.pos.y -= 1\n\n if self.barrack.health <= 0:\n game_end_code = -1\n else:\n for enemy_id, enemy in self.enemies.items():\n if self.can_reach(enemy.pos, self.base.pos):\n self.base.health -= self.enemy_attack\n else:\n if enemy.pos.x > self.base_x:\n enemy.pos.x -= 1\n\n if enemy.pos.y > self.base_y:\n enemy.pos.y -= 1\n\n if self.base.health <= 0:\n game_end_code = -1\n\n return game_end_code\n\n def update_units(self, actions):\n \"\"\"Update units after an environment step.\n This function assumes that self._obs is up-to-date.\n \"\"\"\n attack_reward = self.ally_step(actions)\n game_end_code = self.enemy_step()\n\n return attack_reward, game_end_code\n\n def base_integrate(self):\n number = 0\n while True:\n can_integrate = True\n for res_i in range(self.n_resources):\n if self.base.resources_amount[res_i] <= res_i:\n can_integrate = False\n break\n\n if can_integrate:\n for res_i in range(self.n_resources):\n self.base.resources_amount[res_i] -= (res_i + 1)\n number += 1\n self.integrated += 1\n # print('!!!!!!!!!!!!!')\n else:\n break\n\n return number * self.reward_integrate\n\n def step(self, actions):\n \"\"\"A single environment step. Returns reward, terminated, info.\"\"\"\n if self.is_replay:\n positions = []\n for agent_id in range(self.n_agents):\n unit = self.get_unit_by_id(agent_id)\n positions.append([agent_id, unit.pos.x, unit.pos.y, list(unit.resources_loaded)])\n for e_id, e_unit in self.enemies.items():\n positions.append([e_id, e_unit.pos.x, e_unit.pos.y, e_unit.health])\n positions.append(self.base.resources_amount*2)\n # positions.insert(0,self._episode_steps)\n print(positions, \",\")\n\n actions = [int(a) for a in actions]\n\n if self.debug:\n print(\">>>\")\n for agent_id, action_ in enumerate(actions):\n print(agent_id, self.agents[agent_id].pos.x, self.agents[agent_id].pos.y,\n action_name[action_])\n\n if self.n_actions_no_attack <= action_ < self.n_actions_no_attack + self.n_enemies:\n self.action_count[agent_id][0] += 1\n elif action_ == self.n_actions_no_resources:\n self.action_count[agent_id][1] += 1\n elif action_ == self.n_actions_no_resources+1:\n self.action_count[agent_id][2] += 1\n elif action_ == self.n_actions_no_resources+2:\n self.action_count[agent_id][3] += 1\n elif action_ == self.n_actions_no_resources+3:\n self.action_count[agent_id][4] += 1\n\n for enemy in self.enemies.values():\n print(enemy.pos.x, enemy.pos.y)\n\n self.last_action = np.eye(self.n_actions)[np.array(actions)]\n\n # Collect individual actions\n # self._obs = self._controller.observe()\n\n self._total_steps += 1\n self._episode_steps += 1\n\n # Update units\n reward, game_end_code = self.update_units(actions)\n # Update base\n resource_reward = self.base_integrate()\n reward += resource_reward\n\n terminated = False\n info = {\"battle_won\": False}\n\n if game_end_code is not None:\n # Battle is over\n terminated = True\n self.battles_game += 1\n if game_end_code == 1:\n self.battles_won += 1\n info[\"battle_won\"] = True\n if not self.reward_sparse:\n reward += self.reward_win\n else:\n reward = 1\n elif game_end_code == -1:\n if not self.reward_sparse:\n reward += self.reward_defeat\n else:\n reward = -1\n\n elif self._episode_steps >= self.episode_limit:\n # Episode limit reached\n terminated = True\n self.battles_won += 1\n info[\"battle_won\"] = True\n if self.continuing_episode:\n info[\"episode_limit\"] = True\n self.battles_game += 1\n\n if terminated:\n self._episode_count += 1\n info[\"integrated\"] = self.integrated\n for resource_i in range(self.n_resources):\n info[\"remaining_{}\".format(resource_i)] = self.base.resources_amount[resource_i]\n info[\"kill\"] = self.kill_number\n\n if self.is_replay:\n positions = []\n for agent_id in range(self.n_agents):\n unit = self.get_unit_by_id(agent_id)\n positions.append([agent_id, unit.pos.x, unit.pos.y, list(unit.resources_loaded)])\n for e_id, e_unit in self.enemies.items():\n positions.append([e_id, e_unit.pos.x, e_unit.pos.y, e_unit.health])\n positions.append(self.base.resources_amount*2)\n # positions.insert(0,self._episode_steps)\n print(positions, \",\")\n\n if self.debug:\n if info[\"battle_won\"]:\n print(\"win\")\n else:\n print(\"lose\")\n\n for agent_id in range(self.n_agents):\n print('Agent {} attack {} times, hold {} times, gather {}, hold {}, gather{}'.format(\n agent_id, *self.action_count[agent_id]))\n self.action_count = {agent_i: [0 for _ in range(self.n_resources * 2 + 1)] for agent_i in\n range(self.n_agents)}\n print('Kill:', self.kill_number)\n print(\"Gather:\", self.integrated)\n print('Leave:', self.base.resources_amount)\n\n if self.reward_scale:\n reward /= self.max_reward / self.reward_scale_rate\n\n # print(reward)\n\n return reward, terminated, info\n\n def get_total_actions(self):\n \"\"\"Returns the total number of actions an agent could ever take.\"\"\"\n return self.n_actions\n\n @staticmethod\n def distance(x1, y1, x2, y2):\n \"\"\"Distance between two points.\"\"\"\n return math.hypot(x2 - x1, y2 - y1)\n\n def unit_shoot_range(self, agent_id):\n \"\"\"Returns the shooting range for an agent.\"\"\"\n return self.shoot_range\n\n def unit_sight_range(self, agent_id):\n \"\"\"Returns the sight range for an agent.\"\"\"\n return self.sight_range\n\n def get_obs_agent(self, agent_id):\n \"\"\"Returns observation for agent_id.\n NOTE: Agents should have access only to their local observations\n during decentralised execution.\n \"\"\"\n unit = self.get_unit_by_id(agent_id)\n\n nf_al = 4\n nf_en = 4\n\n if self.obs_all_health:\n nf_al += 1\n nf_en += 1\n\n if self.obs_enemy_health:\n nf_en += 1\n\n if self.obs_last_action:\n nf_al += self.n_actions\n\n nf_own = 0\n if self.obs_own_health:\n nf_own += 1\n nf_own += 1 + self.n_resources\n\n move_feats_len = self.n_actions_move\n\n move_feats = np.zeros(move_feats_len, dtype=np.float32)\n enemy_feats = np.zeros((self.n_enemies, nf_en), dtype=np.float32)\n ally_feats = np.zeros((self.n_agents - 1, nf_al), dtype=np.float32)\n own_feats = np.zeros(nf_own, dtype=np.float32)\n resources_feats = np.zeros(2*self.n_resources, np.float32)\n if self.obs_base_resources_amount:\n base_feats = np.zeros(3 + self.n_resources, np.float32)\n else:\n base_feats = np.zeros(3, np.float32)\n barrack_feats = np.zeros(3, np.float32)\n\n if unit.health > 0: # otherwise dead, return all zeros\n x = unit.pos.x\n y = unit.pos.y\n sight_range = self.unit_sight_range(agent_id)\n\n # Movement features\n avail_actions = self.get_avail_agent_actions(agent_id)\n for m in range(self.n_actions_move):\n move_feats[m] = avail_actions[m + 2]\n\n ind = self.n_actions_move\n\n # Enemy features\n for e_id, e_unit in self.enemies.items():\n e_x = e_unit.pos.x\n e_y = e_unit.pos.y\n dist = self.distance(x, y, e_x, e_y)\n\n if (\n dist < sight_range and e_unit.health > 0\n ): # visible and alive\n # Sight range > shoot range\n enemy_feats[e_id, 0] = avail_actions[\n self.n_actions_no_attack + e_id\n ] # available\n enemy_feats[e_id, 1] = dist / sight_range # distance\n enemy_feats[e_id, 2] = (e_x - x) / sight_range # relative X\n enemy_feats[e_id, 3] = (e_y - y) / sight_range # relative Y\n\n ind = 4\n if self.obs_all_health or self.obs_enemy_health:\n enemy_feats[e_id, ind] = (\n e_unit.health / e_unit.health_max\n ) # health\n ind += 1\n\n # Ally features\n al_ids = [\n al_id for al_id in range(self.n_agents) if al_id != agent_id\n ]\n for i, al_id in enumerate(al_ids):\n\n al_unit = self.get_unit_by_id(al_id)\n al_x = al_unit.pos.x\n al_y = al_unit.pos.y\n dist = self.distance(x, y, al_x, al_y)\n\n if (dist < sight_range and al_unit.health > 0): # visible and alive\n ally_feats[i, 0] = 1 # visible\n ally_feats[i, 1] = dist / sight_range # distance\n ally_feats[i, 2] = (al_x - x) / sight_range # relative X\n ally_feats[i, 3] = (al_y - y) / sight_range # relative Y\n\n ind = 4\n if self.obs_all_health:\n ally_feats[i, ind] = (\n al_unit.health / al_unit.health_max\n ) # health\n ind += 1\n\n if self.obs_last_action:\n ally_feats[i, ind:] = self.last_action[al_id]\n\n # Own features\n ind = 0\n if self.obs_own_health:\n own_feats[ind] = unit.health / unit.health_max\n ind += 1\n own_feats[ind] = float(unit.loaded)\n ind += 1\n for resource_i in range(self.n_resources):\n own_feats[ind] = float(unit.resources_loaded[resource_i])\n ind += 1\n\n x = unit.pos.x\n y = unit.pos.y\n sight_range = self.unit_sight_range(agent_id)\n\n for res_i in range(self.n_resources):\n resources_feats[res_i*2] = (self.resources[res_i].pos.x - x) / sight_range\n resources_feats[res_i*2+1] = (self.resources[res_i].pos.y - y) / sight_range\n\n base_feats[0] = (self.base_x - x) / sight_range\n base_feats[1] = (self.base_y - y) / sight_range\n base_feats[2] = self.base.health / self.base.max_health\n if self.obs_base_resources_amount:\n for res_i in range(self.n_resources):\n base_feats[3 + res_i] = self.base.resources_amount[res_i] / self.episode_limit * 10\n\n if self.has_barrack:\n barrack_feats[0] = (self.barrack_x - x) / sight_range\n barrack_feats[1] = (self.barrack_y - y) / sight_range\n barrack_feats[2] = self.barrack.health / self.barrack.max_health\n\n if self.obs_resources:\n agent_obs = np.concatenate(\n (\n move_feats.flatten(),\n enemy_feats.flatten(),\n ally_feats.flatten(),\n own_feats.flatten(),\n resources_feats.flatten(),\n base_feats.flatten()\n )\n )\n else:\n agent_obs = np.concatenate(\n (\n move_feats.flatten(),\n enemy_feats.flatten(),\n ally_feats.flatten(),\n own_feats.flatten(),\n base_feats.flatten()\n )\n )\n\n if self.has_barrack:\n agent_obs = np.concatenate(\n (\n agent_obs,\n barrack_feats.flatten()\n )\n )\n\n return agent_obs\n\n def get_obs(self):\n \"\"\"Returns all agent observations in a list.\n NOTE: Agents should have access only to their local observations\n during decentralised execution.\n \"\"\"\n agents_obs = [self.get_obs_agent(i) for i in range(self.n_agents)]\n return agents_obs\n\n def get_state(self):\n \"\"\"Returns the global state.\n NOTE: This functon should not be used during decentralised execution.\n \"\"\"\n if self.obs_instead_of_state:\n obs_concat = np.concatenate(self.get_obs(), axis=0).astype(\n np.float32\n )\n return obs_concat\n\n nf_al = 3 + 1 + self.n_resources\n nf_en = 3\n\n ally_state = np.zeros((self.n_agents, nf_al))\n enemy_state = np.zeros((self.n_enemies, nf_en))\n\n center_x = self.map_x / 2\n center_y = self.map_y / 2\n\n for al_id, al_unit in self.agents.items():\n if al_unit.health > 0:\n x = al_unit.pos.x\n y = al_unit.pos.y\n\n ally_state[al_id, 0] = (al_unit.health / al_unit.health_max) # health\n ally_state[al_id, 1] = (x - center_x) / self.map_x # relative X\n ally_state[al_id, 2] = (y - center_y) / self.map_y # relative Y\n ally_state[al_id, 3] = float(al_unit.loaded)\n ind = 4\n for resource_i in range(self.n_resources):\n ally_state[al_id, ind] = float(al_unit.resources_loaded[resource_i])\n ind += 1\n\n for e_id, e_unit in self.enemies.items():\n if e_unit.health > 0:\n x = e_unit.pos.x\n y = e_unit.pos.y\n\n enemy_state[e_id, 0] = (e_unit.health / e_unit.health_max) # health\n enemy_state[e_id, 1] = (x - center_x) / self.map_x # relative X\n enemy_state[e_id, 2] = (y - center_y) / self.map_y # relative Y\n\n ind = 3\n\n state = np.append(ally_state.flatten(), enemy_state.flatten())\n if self.state_last_action:\n state = np.append(state, self.last_action.flatten())\n\n for resource_i in range(self.n_resources):\n state = np.append(state, np.array([(self.resources[resource_i].pos.x-center_x) / self.map_x,\n (self.resources[resource_i].pos.y-center_y) / self.map_y]))\n\n if self.obs_base_resources_amount:\n state = np.append(state, np.array([(self.base_x - center_x) / self.map_x,\n (self.base_y - center_y) / self.map_y,\n self.base.health / self.base.max_health] +\n [ras / self.episode_limit for ras in self.base.resources_amount]))\n else:\n state = np.append(state, np.array([(self.base_x - center_x) / self.map_x,\n (self.base_y - center_y) / self.map_y,\n self.base.health / self.base.max_health]))\n\n if self.has_barrack:\n state = np.append(state, np.array([(self.barrack_x - center_x) / self.map_x,\n (self.barrack_y - center_y) / self.map_y,\n self.barrack.health / self.barrack.max_health]))\n state = state.astype(dtype=np.float32)\n\n return state\n\n def get_obs_size(self):\n \"\"\"Returns the size of the observation.\"\"\"\n nf_al = 4\n nf_en = 4\n\n if self.obs_all_health:\n nf_al += 1\n nf_en += 1\n\n if self.obs_enemy_health:\n nf_en += 1\n\n own_feats = 1 + self.n_resources\n if self.obs_own_health:\n own_feats += 1\n\n if self.obs_last_action:\n nf_al += self.n_actions\n\n move_feats = self.n_actions_move\n\n enemy_feats = self.n_enemies * nf_en\n ally_feats = (self.n_agents - 1) * nf_al\n\n if self.obs_base_resources_amount:\n base_feats = self.n_resources + 3 # TODO: Add n_resources? If so, role can be dynamic. Thus, leave it only for now\n else:\n base_feats = 3\n\n resources_feats = 2 * self.n_resources if self.obs_resources else 0\n barrack_feats = 3 if self.has_barrack else 0\n\n return move_feats + enemy_feats + ally_feats + own_feats + base_feats + resources_feats + barrack_feats\n\n def get_state_size(self):\n \"\"\"Returns the size of the global state.\"\"\"\n if self.obs_instead_of_state:\n return self.get_obs_size() * self.n_agents\n\n nf_al = 3 + 1 + self.n_resources\n nf_en = 3\n\n enemy_state = self.n_enemies * nf_en\n ally_state = self.n_agents * nf_al\n\n size = enemy_state + ally_state\n\n if self.state_last_action:\n size += self.n_agents * self.n_actions\n\n if self.obs_base_resources_amount:\n size += 3 + self.n_resources + 2 * self.n_resources\n else:\n size += 3 + 2 * self.n_resources\n\n if self.has_barrack:\n size += 3\n\n return size\n\n def check_bounds(self, x, y):\n \"\"\"Whether a point is within the map bounds.\"\"\"\n return (1 <= x <= self.map_x and 1 <= y <= self.map_y)\n\n def can_move(self, unit, direction):\n \"\"\"Whether a unit can move in a given direction.\"\"\"\n m = self._move_amount\n\n if direction == Direction.NORTH:\n x, y = int(unit.pos.x), int(unit.pos.y + m)\n elif direction == Direction.SOUTH:\n x, y = int(unit.pos.x), int(unit.pos.y - m)\n elif direction == Direction.EAST:\n x, y = int(unit.pos.x + m), int(unit.pos.y)\n else:\n x, y = int(unit.pos.x - m), int(unit.pos.y)\n\n if self.check_bounds(x, y):\n return True\n\n return False\n\n def can_reach(self, pos1, pos2):\n return ((abs(pos1.x - pos2.x) <= 0) and (abs(pos1.y - pos2.y) <= 0))\n\n def get_avail_agent_actions(self, agent_id):\n \"\"\"Returns the available actions for agent_id.\"\"\"\n unit = self.get_unit_by_id(agent_id)\n if unit.health > 0:\n # cannot choose no-op when alive\n avail_actions = [0] * self.n_actions\n\n # stop should be allowed\n avail_actions[1] = 1\n\n # see if we can move\n if self.can_move(unit, Direction.NORTH):\n avail_actions[2] = 1\n if self.can_move(unit, Direction.SOUTH):\n avail_actions[3] = 1\n if self.can_move(unit, Direction.EAST):\n avail_actions[4] = 1\n if self.can_move(unit, Direction.WEST):\n avail_actions[5] = 1\n\n # Can attack only alive units that are alive in the shooting range\n shoot_range = self.unit_shoot_range(agent_id)\n\n target_items = self.enemies.items()\n\n for t_id, t_unit in target_items:\n if t_unit.health > 0:\n if self.can_reach(unit.pos, t_unit.pos):\n avail_actions[t_id + self.n_actions_no_attack] = 1\n\n index = self.n_actions_no_attack + self.n_enemies\n\n for res_i in range(self.n_resources):\n if unit.resources_loaded[res_i] and unit.loaded: # Put Down\n if self.can_reach(unit.pos, self.base.pos):\n avail_actions[index + res_i * 2 + 1] = 1\n\n if not unit.loaded: # Gather\n if self.can_reach(unit.pos, self.resources[res_i].pos):\n avail_actions[index + res_i * 2] = 1\n\n return avail_actions\n else:\n # only no-op allowed\n return [1] + [0] * (self.n_actions - 1)\n\n def get_avail_actions(self):\n \"\"\"Returns the available actions of all agents in a list.\"\"\"\n avail_actions = []\n for agent_id in range(self.n_agents):\n avail_agent = self.get_avail_agent_actions(agent_id)\n avail_actions.append(avail_agent)\n return avail_actions\n\n def seed(self):\n \"\"\"Returns the random seed used by the environment.\"\"\"\n return self._seed\n\n def render(self):\n \"\"\"Not implemented.\"\"\"\n pass\n\n def get_unit_by_id(self, a_id):\n \"\"\"Get unit by ID.\"\"\"\n return self.agents[a_id]\n\n def get_stats(self):\n stats = {\n \"battles_won\": self.battles_won,\n \"battles_game\": self.battles_game,\n \"battles_draw\": self.timeouts,\n \"win_rate\": self.battles_won / self.battles_game,\n \"timeouts\": self.timeouts,\n \"restarts\": self.force_restarts,\n }\n return stats\n\n def get_own_feature_size(self):\n return self.get_obs_size()\n\n def close(self):\n return\n\n def save_replay(self):\n return\n\n def get_shield_bits_ally(self):\n return 0\n def get_unit_type_bits(self):\n return 0\n def get_map_size(self):\n return (self.map_x, self.map_y)\n\n def get_health_max(self):\n return [0 for _ in range(self.n_agents)]\n\n def get_shield_max(self):\n return [0 for _ in range(self.n_agents)]\n\n\nif __name__ == '__main__':\n env = GatherDefendEnv()\n env.reset()\n print(env.get_obs_size())\n print(env.get_state_size())\n\n for t in range(150):\n actions = []\n avail_actions = env.get_avail_actions()\n for agent_i in range(10):\n action = 0\n while True:\n action = random.randint(0, 11)\n\n if avail_actions[agent_i][action]:\n break\n\n actions.append(action)\n\n reward, terminate, info = env.step(actions)\n\n print(\">>>\", t)\n print(\"state size:\", env.get_state().shape)\n print(\"obs size:\", env.get_obs_agent(0).shape)\n print(\"reward:\", reward)\n print(env.base.pos.x, env.base.pos.y, env.resources[0].pos.x, env.resources[0].pos.y)\n for i in range(10):\n print(env.agents[i].pos.x, env.agents[i].pos.y, env.agents[i].resources_loaded)\n\n for i in range(2):\n print(env.enemies[i].pos.x, env.enemies[i].pos.y, env.enemies[i].health)\n\n print(\"base health:\", env.base.health)\n print(env.base.resources_amount)\n\n print('\\n\\n\\n\\n')\n\n if terminate:\n break","sub_path":"src/envs/gather_and_defend.py","file_name":"gather_and_defend.py","file_ext":"py","file_size_in_byte":38918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"68153328","text":"import os\nimport json\nimport sys, getopt\n# os.system(\"kill -9 $(ps -aux | grep RP_sync.py | awk '{print $2}')\")\n# os.system(\"kill -9 $(ps -aux | grep modified_filter_twoCameras.py | awk '{print $2}')\")\n# os.system(\"kill -9 $(ps -aux | grep axis_cameras_single_cam_v2_copy.py | awk '{print $2}')\")\n\nwith open(\"./room_information.json\") as f:\n\tinfo = json.load(f)\n\ntry:\n\topts, args = getopt.getopt(sys.argv[1:], \"R:\")\nexcept getopt.GetoptError:\n\tprint(\"Error\")\n\tsys.exit()\n\nfor opt, arg in opts:\n\tif opt in (\"-R\", \"roomnum\"):\n\t\troomnum = int(arg)\n\nfor ip in info[roomnum - 1]['thermal']:\n\tprint(\"ssh \" + ip['thermal_ip'] + \" python3 end.py\")\n\tos.system(\"ssh \" + ip['thermal_ip'] + \" python3 end.py &\")","sub_path":"COSSY-stop.py","file_name":"COSSY-stop.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"109821557","text":"import pygame\nfrom Scenes import Scene\nfrom Shared.GameConstants import GameConstants\n\n\nclass MenuScene(Scene):\n\n def __init__(self, game):\n super(MenuScene, self).__init__(game)\n\n self.addText(\"F1 - Start Game\", x=300, y=400, size=30)\n self.addText(\"F2 - High Scores\", x=300, y=440, size=30)\n self.addText(\"F3 - Quit\", x=300, y=480, size=30)\n\n sprite = pygame.image.load(GameConstants.SPRITE_MENU).convert_alpha()\n self.__menuSprite = pygame.transform.smoothscale(sprite, GameConstants.SCREEN_SIZE)\n\n def render(self):\n self.getGame().screen.blit(self.__menuSprite, (0, 0))\n super(MenuScene, self).render()\n\n def handleEvents(self, events):\n super(MenuScene, self).handleEvents(events)\n\n for event in events:\n if event.type == pygame.QUIT:\n exit()\n\n if event.type == pygame.KEYDOWN:\n if event.type == pygame.K_ESCAPE:\n exit()\n if event.key == pygame.K_F1:\n self.getGame().changeScene(GameConstants.PLAYING_SCENE)\n if event.key == pygame.K_F2:\n self.getGame().changeScene(GameConstants.HIGHSCORE_SCENE)\n if event.key == pygame.K_F3:\n exit()\n","sub_path":"Scenes/MenuScene.py","file_name":"MenuScene.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"569922855","text":"import json\n\n\nclass Player:\n\n \"\"\"\n Class representing a Player\n\n Fields:\n name: str value for a player name\n symbol: str value for the player's board symbol\n \"\"\"\n\n X: int = 1\n O: int = -1\n\n with open('constants.json') as f:\n __constants = json.load(f)\n\n def __init__(self, symbol: str, name: str) -> None:\n\n \"\"\"\n Initializes a new instance with values of the symbol and name parameters as it's instance variables\n\n Args:\n symbol - represents a character of 'X' or 'Y' value\n name - represents a value for the player's nickname\n\n Raises:\n ValueError if the value of the symbol parameter is 'X' or 'O'\n \"\"\"\n\n self.name: str = name\n\n if symbol.lower() == 'x':\n self.symbol: int = self.X\n\n elif symbol.lower() == 'o':\n self.symbol: int = self.O\n\n else:\n raise ValueError(\"Unrecognized symbol value\")\n\n def return_optimal_move(self, board) -> tuple:\n\n while True:\n try:\n move = input(str(Player.__constants['error_player_move']).format(self.name))\n move = tuple([int(x) for x in move.split(' ')])\n return move[0], move[1], None\n\n except:\n print(Player.__constants['error_invalid_move'])\n","sub_path":"src/board/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"591452411","text":"import os\nimport random\nimport shutil\nfrom os import listdir, getcwd\n\ndef split(trainval_percent=0.1,train_percent = 0.9,xml_file_path='xml' ,txt_save_path='ImageSets'):\n total_xml = os.listdir(xml_file_path)\n num = len(total_xml)\n list = range(num)\n tv = int(num * trainval_percent)\n tr = int(tv * train_percent)\n trainval = random.sample(list, tv) #从所有list中返回tv个数量的项目\n train = random.sample(trainval, tr)\n if not os.path.exists(txt_save_path):\n os.makedirs(txt_save_path)\n ftrainval = open(txt_save_path+'/trainval.txt', 'w')\n ftest = open(txt_save_path+'/test.txt', 'w')\n ftrain = open(txt_save_path+'/train.txt', 'w')\n fval = open(txt_save_path+'/val.txt', 'w')\n for i in list:\n name = total_xml[i][:-4] + '\\n'\n if i in trainval:\n ftrainval.write(name)\n if i in train:\n ftest.write(name)\n else:\n fval.write(name)\n else:\n ftrain.write(name)\n ftrainval.close()\n ftrain.close()\n fval.close()\n ftest.close()\n\n sets = ['train', 'trainval']\n wd = getcwd()\n print(wd)\n for image_set in sets:\n image_ids = open('ImageSets/%s.txt' % (image_set)).read().strip().split()\n # print(image_ids)\n image_list_file = open('images_%s.txt' % (image_set), 'w')\n labels_list_file = open('labels_%s.txt' % (image_set), 'w')\n for image_id in image_ids:\n image_list_file.write('%s.png\\n' % (image_id))\n labels_list_file.write('%s.xml\\n' % (image_id))\n image_list_file.close()\n labels_list_file.close()\n\ndef copy_file(new_path,path_txt,search_path):#参数1:存放新文件的位置 参数2:为上一步建立好的train,val训练数据的路径txt文件 参数3:为搜索的文件位置\n if not os.path.exists(new_path):\n os.makedirs(new_path)\n with open(path_txt, 'r') as lines:\n filenames_to_copy = set(line.rstrip() for line in lines)\n # print('filenames_to_copy:',filenames_to_copy)\n # print(len(filenames_to_copy))\n for root, _, filenames in os.walk(search_path):\n # print('root',root)\n # print(_)\n # print(filenames)\n for filename in filenames:\n if filename in filenames_to_copy:\n shutil.copy(os.path.join(root, filename), new_path)\n\nif __name__ == '__main__':\n split()\n #按照划分好的训练文件的路径搜索目标,并将其复制到yolo格式下的新路径\n copy_file('./images_data/train/','./images_train.txt','./images')\n copy_file('./images_data/val/','./images_trainval.txt','./images')\n copy_file('./labels_data/train/','./labels_train.txt','./xml')\n copy_file('./labels_data/val/','./labels_trainval.txt','./xml')\n\n\n","sub_path":"split_data.py","file_name":"split_data.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"648342421","text":"import logging\nimport random\nimport time\n\nfrom thespian.actors import ActorExitRequest, ActorSystem\n\nfrom thor.actors import DirectoryServer\nfrom thor.clerk import Clerk\n\nKEYSPACE = 1000\n\n\ndef spawn(sys_base, app_id):\n asys = ActorSystem(sys_base)\n clerk = asys.createActor(Clerk, globalName=\"clerk-%d\" % app_id)\n asys.ask(clerk,\n Clerk.View(\n asys.createActor(\n DirectoryServer, globalName=\"directory-server\"),\n KEYSPACE,\n ))\n\n oids = set()\n while len(oids) != 10:\n key_ = random.randint(1, KEYSPACE)\n if key_ in oids:\n continue\n oids.add(key_)\n\n success = False\n while not success:\n time.sleep(0.5)\n trx = asys.ask(clerk, Clerk.Read(list(oids)))\n if trx is False:\n continue\n mods = random.sample(oids, 5)\n for mod in mods:\n trx.write_set[mod] = app_id\n logging.debug(\"Clients initialized\")\n success = asys.ask(clerk, Clerk.Commit(trx))\n print(success)\n\n asys.tell(clerk, ActorExitRequest())\n\n\nif __name__ == \"__main__\":\n spawn(\"multiprocTCPBase\", 0)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"39849989","text":"from django.utils import timezone\nfrom .models import Asset, SMU, PM, Workorder, ModelWorkorder, AssetFilter, SMUFilter, Utilisation, UtilisationFilter, WorkorderFilter, PMFilter, ModelWorkorderFilter\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom .forms import AssetForm, SMUForm, PMForm, WorkorderForm, AssetStatusForm, AssetLocationForm, WorkorderStatusForm, \\\n AssetAddForm, WorkorderCommentForm, WorkorderEditForm, WorkorderCompleteForm, ModelWorkorderForm, UtilisationForm\nfrom django_tables2 import RequestConfig\nfrom .tables import AssetTable, SMUTable, WorkorderTable, PMTable, ModelWorkorderTable, UtilisationTable, WorkorderAssetTable, PMAssetTable, SMUDashboardTable\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.models import User\nimport json\nfrom django.http import HttpResponse\nimport datetime\n\n\n\"\"\"\n Home Module\n\n\n\"\"\"\n\n\n@login_required(login_url=\"login/\")\ndef home(request):\n workorders_as = Workorder.objects.filter(status='AS').count()\n workorders_sc = Workorder.objects.filter(status='SC').count()\n workorders_ap = Workorder.objects.filter(status='AP').count()\n\n data = {\"AS\": workorders_as, \"SC\": workorders_sc, \"AP\": workorders_ap}\n json_string = json.dumps(data)\n\n readings_filtered = SMU.objects.filter(reading_date__lte=datetime.date.today())\n table_smu = SMUDashboardTable(readings_filtered)\n RequestConfig(request).configure(table_smu)\n\n return render(request, 'maintenance/home.html', {'workorders_as': workorders_as, 'workorders_sc': workorders_sc, 'workorders_ap': workorders_ap, 'data': json_string, 'table_smu': table_smu})\n\n\n\"\"\"\n Assets Module\n\n\n\"\"\"\n\n\n@login_required(login_url=\"login/\")\ndef assets(request):\n assets_filtered = AssetFilter(request.GET, queryset=Asset.objects.all())\n table = AssetTable(assets_filtered.qs)\n RequestConfig(request).configure(table)\n return render(request, 'maintenance/assets.html', {'table': table, 'filter': assets_filtered})\n\n\n@login_required(login_url=\"login/\")\ndef asset_view(request, pk):\n asset = get_object_or_404(Asset, pk=pk)\n\n time = datetime.datetime.now()\n time_date = datetime.date(time.year, time.month, time.day)\n overdue = 0\n\n workorders_filtered = Workorder.objects.filter(asset_id=asset.id, status__in=['AC', 'AS']).order_by('start_date')\n\n if workorders_filtered.count() > 0:\n for wo in workorders_filtered:\n if wo.start_date < time_date:\n overdue += 1\n else:\n overdue = 0\n\n if len(workorders_filtered) > 0:\n table_wo = WorkorderAssetTable(workorders_filtered)\n RequestConfig(request).configure(table_wo)\n else:\n table_wo = \"None\"\n\n pms_filtered = PM.objects.filter(asset_id=asset.id)\n\n if len(pms_filtered) > 0:\n table_pm = PMAssetTable(pms_filtered)\n RequestConfig(request).configure(table_pm)\n else:\n table_pm = \"None\"\n\n return render(request, 'maintenance/asset_view.html', {'asset': asset, 'table_wo': table_wo, 'table_pm': table_pm, 'overdue': overdue})\n\n\n@login_required(login_url=\"login/\")\ndef asset_edit(request, pk):\n asset = get_object_or_404(Asset, pk=pk)\n if request.method == \"POST\":\n edit_form = AssetForm(request.POST, instance=asset)\n if edit_form.is_valid():\n asset = edit_form.save(commit=False)\n asset.user = request.user\n asset.last_mod = timezone.now()\n asset.save()\n return redirect('asset_view', pk)\n else:\n edit_form = AssetForm(instance=asset)\n return render(request, 'maintenance/asset_edit.html', {'form': edit_form, 'asset': asset})\n\n\n@login_required(login_url=\"login/\")\ndef asset_remove(request, pk):\n asset = get_object_or_404(Asset, pk=pk)\n asset.delete()\n return redirect('assets')\n\n\n@login_required(login_url=\"login/\")\ndef asset_add(request):\n if request.method == \"POST\":\n form = AssetAddForm(request.POST)\n if form.is_valid():\n asset = form.save(commit=False)\n asset.user = request.user\n asset.last_mod = timezone.now()\n asset.created_date = timezone.now()\n asset.save()\n return redirect('assets')\n else:\n form = AssetAddForm()\n return render(request, 'maintenance/asset_add.html', {'form': form})\n\n\n@login_required(login_url=\"login/\")\ndef asset_change_status(request, pk):\n asset = get_object_or_404(Asset, pk=pk)\n if request.method == \"POST\":\n status_form = AssetStatusForm(request.POST, instance=asset)\n if status_form.is_valid():\n asset = status_form.save(commit=False)\n asset.user = request.user\n asset.last_mod = timezone.now()\n asset.save()\n return redirect('asset_view', pk)\n else:\n status_form = AssetStatusForm(instance=asset)\n return render(request, 'maintenance/asset_change_status.html', {'form': status_form, 'asset': asset})\n\n\n@login_required(login_url=\"login/\")\ndef asset_change_location(request, pk):\n asset = get_object_or_404(Asset, pk=pk)\n if request.method == \"POST\":\n location_form = AssetLocationForm(request.POST, instance=asset)\n if location_form.is_valid():\n asset = location_form.save(commit=False)\n asset.user = request.user\n asset.last_mod = timezone.now()\n asset.save()\n return redirect('asset_view', pk)\n else:\n location_form = AssetLocationForm(instance=asset)\n return render(request, 'maintenance/asset_change_location.html', {'form': location_form, 'asset': asset})\n\n\n\"\"\"\n Work Orders Module\n\n\n\"\"\"\n\n\n@login_required(login_url=\"login/\")\ndef workorders(request):\n workorders_filtered = WorkorderFilter(request.GET, queryset=Workorder.objects.all())\n table = WorkorderTable(workorders_filtered.qs)\n RequestConfig(request).configure(table)\n return render(request, 'maintenance/workorders.html', {'table': table, 'filter': workorders_filtered})\n\n\n@login_required(login_url=\"login/\")\ndef workorder_view(request, asset_pk, workorder_pk):\n workorder = get_object_or_404(Workorder, pk=workorder_pk)\n asset = get_object_or_404(Asset, pk=asset_pk)\n return render(request, 'maintenance/workorder_view.html', {'workorder': workorder, 'asset': asset})\n\n\n@login_required(login_url=\"login/\")\ndef workorder_edit(request, asset_pk, workorder_pk):\n workorder = get_object_or_404(Workorder, pk=workorder_pk)\n asset = get_object_or_404(Asset, pk=asset_pk)\n if request.method == \"POST\":\n edit_form = WorkorderEditForm(request.POST, instance=workorder)\n if edit_form.is_valid():\n workorder = edit_form.save(commit=False)\n workorder.save()\n return redirect('workorder_view', asset_pk, workorder_pk)\n else:\n edit_form = WorkorderEditForm(instance=workorder)\n return render(request, 'maintenance/workorder_edit.html', {'form': edit_form, 'asset': asset, 'workorder': workorder})\n\n\n@login_required(login_url=\"login/\")\ndef workorder_comment(request, asset_pk, workorder_pk):\n workorder = get_object_or_404(Workorder, pk=workorder_pk)\n asset = get_object_or_404(Asset, pk=asset_pk)\n if request.method == \"POST\":\n comment_form = WorkorderCommentForm(request.POST, instance=workorder)\n if comment_form.is_valid():\n workorder = comment_form.save(commit=False)\n workorder.save()\n return redirect('workorder_view', asset_pk, workorder_pk)\n else:\n comment_form = WorkorderCommentForm(instance=workorder)\n return render(request, 'maintenance/workorder_comments.html', {'form': comment_form, 'asset': asset, 'workorder': workorder})\n\n\n@login_required(login_url=\"login/\")\ndef workorder_add(request, asset_pk=None):\n if asset_pk is not None:\n asset_default = get_object_or_404(Asset, pk=asset_pk)\n else:\n asset_default = None\n\n if request.method == \"POST\":\n asset = get_object_or_404(Asset, num=request.POST.get(\"input-asset\"))\n form = WorkorderForm(request.POST)\n if form.is_valid():\n workorder = form.save(commit=False)\n workorder.asset = asset\n workorder.user = request.user\n workorder.created_date = timezone.now()\n workorder.save()\n if asset_default is not None:\n return redirect('asset_view', asset_default.pk)\n else:\n return redirect('workorders')\n else:\n form = WorkorderForm()\n return render(request, 'maintenance/workorder_add.html', {'form': form, 'asset_default': asset_default})\n\n\n@login_required(login_url=\"login/\")\ndef workorder_change_status(request, asset_pk, workorder_pk):\n workorder = get_object_or_404(Workorder, pk=workorder_pk)\n asset = get_object_or_404(Asset, pk=asset_pk)\n if request.method == \"POST\":\n status_form = WorkorderStatusForm(request.POST, instance=workorder)\n if status_form.is_valid():\n workorder = status_form.save(commit=False)\n workorder.last_mod = timezone.now()\n workorder.save()\n return redirect('workorder_view', asset_pk, workorder_pk)\n else:\n status_form = WorkorderStatusForm(instance=workorder)\n return render(request, 'maintenance/workorder_change_status.html', {'form': status_form, 'workorder': workorder,\n 'asset': asset})\n\n\n@login_required(login_url=\"login/\")\ndef workorder_complete(request, asset_pk, workorder_pk):\n workorder = get_object_or_404(Workorder, pk=workorder_pk)\n asset = get_object_or_404(Asset, pk=asset_pk)\n\n if workorder.pm is not None:\n pm = get_object_or_404(PM, pk=workorder.pm.pk)\n\n if request.method == \"POST\":\n complete_form = WorkorderCompleteForm(request.POST, instance=workorder)\n if complete_form.is_valid():\n workorder = complete_form.save(commit=False)\n workorder.last_mod = timezone.now()\n\n if complete_form['act_end_date'].value() != \"\":\n workorder.status = 'CO'\n\n workorder.save()\n\n if workorder.pm is not None:\n pm.ld_date = complete_form.cleaned_data['act_start_date']\n pm.ld_smu = complete_form.cleaned_data['reading']\n pm.save()\n\n return redirect('workorder_view', asset_pk, workorder_pk)\n else:\n complete_form = WorkorderCompleteForm(instance=workorder)\n return render(request, 'maintenance/workorder_complete.html', {'form': complete_form, 'workorder': workorder,\n 'asset': asset})\n\n\n\"\"\"\n Model Work Orders Module\n\n\n\"\"\"\n\n\n@login_required(login_url=\"login/\")\ndef model_workorders(request):\n model_workorders_filtered = ModelWorkorderFilter(request.GET, queryset=ModelWorkorder.objects.all())\n table = ModelWorkorderTable(model_workorders_filtered.qs)\n RequestConfig(request).configure(table)\n return render(request, 'maintenance/model_workorders.html', {'table': table, 'filter': model_workorders_filtered})\n\n\n@login_required(login_url=\"login/\")\ndef model_workorder_add(request):\n if request.method == \"POST\":\n form = ModelWorkorderForm(request.POST)\n if form.is_valid():\n workorder = form.save(commit=False)\n workorder.created_date = timezone.now()\n workorder.save()\n return redirect('model_workorders')\n else:\n form = ModelWorkorderForm()\n return render(request, 'maintenance/model_workorder_add.html', {'form': form})\n\n\n@login_required(login_url=\"login/\")\ndef model_workorder_view(request, workorder_pk):\n workorder = get_object_or_404(ModelWorkorder, pk=workorder_pk)\n return render(request, 'maintenance/model_workorder_view.html', {'workorder': workorder})\n\n\n@login_required(login_url=\"login/\")\ndef model_workorder_edit(request, workorder_pk):\n workorder = get_object_or_404(ModelWorkorder, pk=workorder_pk)\n if request.method == \"POST\":\n edit_form = ModelWorkorderForm(request.POST, instance=workorder)\n if edit_form.is_valid():\n workorder = edit_form.save(commit=False)\n workorder.save()\n return redirect('model_workorder_view', workorder_pk)\n else:\n edit_form = ModelWorkorderForm(instance=workorder)\n return render(request, 'maintenance/model_workorder_edit.html', {'form': edit_form, 'workorder': workorder})\n\n\n\"\"\"\n Preventative Maintenance Module\n\n\n\"\"\"\n\n\n@login_required(login_url=\"login/\")\ndef pms(request):\n pms_filtered = PMFilter(request.GET, queryset=PM.objects.all())\n table = PMTable(pms_filtered.qs)\n RequestConfig(request).configure(table)\n return render(request, 'maintenance/pms.html', {'table': table, 'filter': pms_filtered})\n\n\n@login_required(login_url=\"login/\")\ndef pm_view(request, asset_pk, pm_pk):\n pm = get_object_or_404(PM, pk=pm_pk)\n asset = get_object_or_404(Asset, pk=asset_pk)\n return render(request, 'maintenance/pm_view.html', {'pm': pm, 'asset': asset})\n\n\n@login_required(login_url=\"login/\")\ndef pm_add(request):\n if request.method == \"POST\":\n asset = get_object_or_404(Asset, num=request.POST.get(\"input-asset\"))\n model_wo = get_object_or_404(ModelWorkorder, id=request.POST.get(\"input-model-wo\"))\n form = PMForm(request.POST)\n if form.is_valid():\n pm = form.save(commit=False)\n pm.asset = asset\n pm.model_wo = model_wo\n pm.save()\n return redirect('pms')\n else:\n form = PMForm()\n return render(request, 'maintenance/pm_add.html', {'form': form})\n\n\n@login_required(login_url=\"login/\")\ndef pm_edit(request, asset_pk, pm_pk):\n pm = get_object_or_404(PM, id=pm_pk)\n asset = get_object_or_404(Asset, pk=asset_pk)\n model_wo_default = get_object_or_404(ModelWorkorder, pk=pm.model_wo.pk)\n if request.method == \"POST\":\n model_wo = get_object_or_404(ModelWorkorder, id=request.POST.get(\"input-model-wo\"))\n form = PMForm(request.POST, instance=pm)\n if form.is_valid():\n pm = form.save(commit=False)\n pm.model_wo = model_wo\n pm.save()\n return redirect('pm_view', asset_pk, pm_pk)\n else:\n form = PMForm(instance=pm)\n return render(request, 'maintenance/pm_edit.html', {'form': form, 'pm': pm, 'asset': asset, 'model_wo_default': model_wo_default})\n\n\n@login_required(login_url=\"login/\")\ndef pm_generate_wo(request, asset_pk, pm_pk):\n pm = get_object_or_404(PM, id=pm_pk)\n asset = get_object_or_404(Asset, pk=asset_pk)\n\n workorder = Workorder()\n workorder.user = request.user\n workorder.asset = asset\n workorder.desc = pm.model_wo.desc\n workorder.workorder_type = pm.model_wo.workorder_type\n workorder.status = pm.model_wo.status\n workorder.comments = pm.model_wo.comments\n workorder.start_date = pm.get_fc_date\n workorder.pm = pm\n workorder.origin = \"PM Schedule\"\n workorder.save()\n\n return redirect('pm_view', asset_pk, pm_pk)\n\n\n@login_required(login_url=\"login/\")\ndef pm_remove(request, pk):\n pm = get_object_or_404(PM, id=pk)\n pm.delete()\n return redirect('pms')\n\n\n\"\"\"\n Meter Readings Module\n\n\n\"\"\"\n\n\n@login_required(login_url=\"login/\")\ndef readings(request):\n readings_filtered = SMUFilter(request.GET, queryset=SMU.objects.all())\n table = SMUTable(readings_filtered.qs)\n RequestConfig(request).configure(table)\n return render(request, 'maintenance/readings.html', {'table': table, 'filter': readings_filtered})\n\n\n@login_required(login_url=\"login/\")\ndef reading_add(request, asset_pk=None):\n if asset_pk is not None:\n asset_default = get_object_or_404(Asset, pk=asset_pk)\n else:\n asset_default = None\n\n if request.method == \"POST\":\n asset = get_object_or_404(Asset, num=request.POST.get(\"input-asset\"))\n form = SMUForm(request.POST)\n if form.is_valid():\n reading = form.save(commit=False)\n reading.created_date = timezone.now()\n reading.asset = asset\n reading.save()\n if asset_default is not None:\n return redirect('asset_view', asset_default.pk)\n else:\n return redirect('readings')\n else:\n form = SMUForm()\n return render(request, 'maintenance/reading_add.html', {'form': form, 'asset_default': asset_default})\n\n\n@login_required(login_url=\"login/\")\ndef reading_view(request, asset_pk, reading_pk):\n reading = get_object_or_404(SMU, pk=reading_pk)\n asset = get_object_or_404(Asset, pk=asset_pk)\n return render(request, 'maintenance/reading_view.html', {'reading': reading, 'asset': asset})\n\n\n@login_required(login_url=\"login/\")\ndef reading_edit(request, asset_pk, reading_pk):\n reading = get_object_or_404(SMU, pk=reading_pk)\n asset = get_object_or_404(Asset, pk=asset_pk)\n if request.method == \"POST\":\n form = SMUForm(request.POST, instance=reading)\n if form.is_valid():\n reading = form.save(commit=False)\n reading.save()\n return redirect('reading_view', asset_pk, reading_pk)\n else:\n form = SMUForm(instance=reading)\n return render(request, 'maintenance/reading_edit.html', {'form': form, 'reading': reading, 'asset': asset})\n\n\n@login_required(login_url=\"login/\")\ndef reading_remove(request, pk):\n reading = get_object_or_404(SMU, id=pk)\n reading.delete()\n return redirect('readings')\n\n\n\"\"\"\n Utilisation Module\n\n\n\"\"\"\n\n\n@login_required(login_url=\"login/\")\ndef utilisations(request):\n utilisations_filtered = UtilisationFilter(request.GET, queryset=Utilisation.objects.all())\n table = UtilisationTable(utilisations_filtered.qs)\n RequestConfig(request).configure(table)\n return render(request, 'maintenance/utilisations.html', {'table': table, 'filter': utilisations_filtered})\n\n\n@login_required(login_url=\"login/\")\ndef utilisation_view(request, asset_pk, util_pk):\n utilisation = get_object_or_404(Utilisation, pk=util_pk)\n asset = get_object_or_404(Asset, pk=asset_pk)\n return render(request, 'maintenance/utilisation_view.html', {'utilisation': utilisation, 'asset': asset})\n\n\n@login_required(login_url=\"login/\")\ndef utilisation_add(request, asset_pk=''):\n if asset_pk != '':\n asset_default = get_object_or_404(Asset, pk=asset_pk)\n else:\n asset_default = ''\n\n if request.method == \"POST\":\n asset = get_object_or_404(Asset, num=request.POST.get(\"input-asset\"))\n form = UtilisationForm(request.POST)\n if form.is_valid():\n utilisation = form.save(commit=False)\n utilisation.asset = asset\n utilisation.save()\n return redirect('utilisations')\n else:\n form = UtilisationForm()\n return render(request, 'maintenance/utilisation_add.html', {'form': form, 'asset_def': asset_default})\n\n\n@login_required(login_url=\"login/\")\ndef utilisation_edit(request, asset_pk, utilisation_pk):\n utilisation = get_object_or_404(Utilisation, pk=utilisation_pk)\n asset = get_object_or_404(Asset, pk=asset_pk)\n if request.method == \"POST\":\n form = UtilisationForm(request.POST, instance=utilisation)\n if form.is_valid():\n utilisation = form.save(commit=False)\n utilisation.save()\n return redirect('utilisation_view', asset_pk, utilisation_pk)\n else:\n form = UtilisationForm(instance=utilisation)\n return render(request, 'maintenance/utilisation_edit.html', {'form': form, 'utilisation': utilisation, 'asset': asset})\n\n\n@login_required(login_url=\"login/\")\ndef utilisation_remove(request, pk):\n utilisation = get_object_or_404(Utilisation, id=pk)\n utilisation.delete()\n return redirect('utilisations')\n\n\n\"\"\"\n Reporting Module\n\n\n\"\"\"\n\n\n# To be built\n\n\n\"\"\"\n Administration Module\n\n\n\"\"\"\n\n\n@login_required(login_url=\"login/\")\ndef reports(request):\n return render(request, 'maintenance/reporting.html')\n\n\n@login_required(login_url=\"login/\")\ndef account(request):\n return render(request, 'maintenance/account.html')\n\n\n@login_required(login_url=\"login/\")\ndef change_password(request):\n if request.method == \"POST\":\n curr = request.POST.get(\"curr-pass\")\n new = request.POST.get(\"new-pass\")\n conf = request.POST.get(\"conf-pass\")\n\n if new == conf:\n user = User.objects.get(id__exact=request.user.id)\n user.set_password(new)\n user.save()\n\n form = \"Password changed successfully.\"\n else:\n form = \"New and confirmed passwords do not match. Please try again.\"\n else:\n form = \"\"\n return render(request, 'maintenance/account_change_password.html', {'form': form})\n\n\n@login_required(login_url=\"login/\")\ndef signout_view(request):\n logout(request)\n return redirect('home')\n\n\n\"\"\"\n Autocomplete Module\n\n\n\"\"\"\n\n\nASSET_TYPE = [\n (1, \"LV\", \"LV | Light Vehicle\"),\n (2, \"EX\", \"EX | Excavator\"),\n (3, \"DZ\", \"DZ | Track Dozer\"),\n (4, \"MG\", \"MG | Motor Grader\"),\n]\nASSET_STATUS = [\n (1, \"AC\", \"AC | Active\"),\n (2, \"ID\", \"ID | Idle\"),\n]\nASSET_LOCATION = [\n (1, \"BRIS\", \"BRIS | Brisbane\"),\n (2, \"SYDN\", \"SYDN | Sydney\"),\n (3, \"MELB\", \"MELB | Melbourne\"),\n (4, \"SING\", \"SING | Singapore\"),\n (5, \"ADEL\", \"ADEL | Adelaide\"),\n (6, \"PAPU\", \"PAPU | Papua New Guinea\"),\n]\nASSET_OWNER = [\n (1, \"OWNED\", \"OWNED | Owned\"),\n (2, \"LEASED\", \"LEASED | Leased\"),\n (3, \"HIRED\", \"HIRED | Externally Hired\"),\n]\nASSET_METER_TYPE = [\n (1, \"HR\", \"HR | Hours\"),\n (2, \"KM\", \"KM | Kilometres\"),\n]\nWORK_ORDER_TYPE = [\n (1, \"REPAIR\", \"REPAIR | Corrective Repair\"),\n (2, \"SERVICE\", \"SERVICE | Scheduled Service\"),\n (3, \"COMPONENT\", \"COMPONENT | Component Change Out\"),\n]\nWORK_ORDER_STATUS = [\n (1, \"AS\", \"AS | Awaiting Scheduling\"),\n (2, \"SC\", \"SC | Scheduled\"),\n (3, \"AP\", \"AP | Awaiting Parts\"),\n #(4, \"CO\", \"CO | Completed\"),\n]\n\n\ndef asset_suggest(request):\n if request.is_ajax():\n q = request.GET.get('term', '')\n assets_suggest = Asset.objects.filter(num__icontains=q)[:10]\n results = []\n for asset in assets_suggest:\n asset_json = {}\n asset_json['id'] = asset.id\n asset_json['label'] = asset.num + \" | \" + asset.desc\n asset_json['value'] = asset.num\n results.append(asset_json)\n data = json.dumps(results)\n else:\n data = 'fail'\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\n\ndef model_wo_suggest(request):\n if request.is_ajax():\n q = request.GET.get('term', '')\n model_wos_suggest = ModelWorkorder.objects.filter(id__icontains=q)[:10]\n results = []\n for model_wo in model_wos_suggest:\n model_wo_json = {}\n model_wo_json['id'] = model_wo.id\n model_wo_json['label'] = model_wo.id + \" | \" + model_wo.desc\n model_wo_json['value'] = model_wo.id\n results.append(model_wo_json)\n data = json.dumps(results)\n else:\n data = 'fail'\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\n\ndef autocomplete(request):\n if request.is_ajax():\n field = request.GET[\"field\"]\n q = request.GET.get('term', '')\n suggestions = []\n\n if field == \"ASSET_TYPE\":\n field = ASSET_TYPE\n elif field == \"ASSET_STATUS\":\n field = ASSET_STATUS\n elif field == \"ASSET_LOCATION\":\n field = ASSET_LOCATION\n elif field == \"ASSET_OWNER\":\n field = ASSET_OWNER\n elif field == \"ASSET_METER_TYPE\":\n field = ASSET_METER_TYPE\n elif field == \"WORK_ORDER_TYPE\":\n field = WORK_ORDER_TYPE\n elif field == \"WORK_ORDER_STATUS\":\n field = WORK_ORDER_STATUS\n\n for i in range(0, len(field)):\n if q.upper() in field[i][2].upper():\n suggestions.append(field[i])\n\n results = []\n for suggestion in suggestions:\n suggestion_json = {}\n suggestion_json['id'] = suggestion[0]\n suggestion_json['label'] = suggestion[2]\n suggestion_json['value'] = suggestion[1]\n results.append(suggestion_json)\n data = json.dumps(results)\n\n else:\n data = 'fail'\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n","sub_path":"maintenance_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":24505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"354313022","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef second_task():\n print(df[['DEPT', 'K', 'MAG(XM,FM)', 'RMS']].describe())\n\n\ndef third_task():\n plt.hist(df.DEPT, color='r')\n plt.xlabel('Depth,km')\n plt.ylabel('N')\n plt.savefig('images/third_task.png')\n plt.show()\n\n\ndef fourth_task():\n plt.hist(df.K, color='g', bins=15)\n plt.xlabel('K')\n plt.ylabel('N')\n plt.savefig('images/fourth_task.png')\n plt.show()\n\n\ndef fifth_task():\n plt.rcParams['figure.figsize'] = (15, 3)\n plt.plot(df.ORIGIN, df.RMS, 'o-')\n plt.xlabel('Datetime')\n plt.ylabel('RMS')\n plt.rcParams['figure.figsize'] = (15, 3)\n plt.savefig('images/fifth_task.png')\n plt.show()\n\n\ndef sixth_task():\n plt.plot(df['LONG E'], df['LAT N'], 'go', markersize=15)\n plt.savefig('images/sixth_task.png')\n plt.show()\n\n\ndef seventh_task():\n df.plot.scatter('MAG(XM,FM)', 'K', color='r')\n # a•n + b∑x = ∑y\n # a∑x + b∑x2 = ∑y•x\n\n # where x is Magnitude and x is 'K'\n n = df['MAG(XM,FM)'].count()\n sum_of_x = df['MAG(XM,FM)'].sum()\n sum_of_y = df['K'].sum()\n multiplication_x = (df['MAG(XM,FM)'] * df['MAG(XM,FM)']).sum()\n multiplication_xy = (df['MAG(XM,FM)'] * df['K']).sum()\n\n # with Kramer's Method\n d = n * multiplication_x - sum_of_x * sum_of_x\n d1 = sum_of_y * multiplication_x - multiplication_xy * sum_of_x\n d2 = n * multiplication_xy - sum_of_x * sum_of_y\n\n a = d1 / d\n b = d2 / d\n\n plt.title(f'K={a:.{2}f} + {b:.{2}f}*mag')\n plt.xlabel('Magnitude')\n plt.plot(df['MAG(XM,FM)'], df['MAG(XM,FM)'] * b + a, linestyle='--', linewidth=5)\n plt.savefig('images/seventh_task.png')\n plt.show()\n\n\nif __name__ == '__main__':\n file_with_data = \"cat2010.xlsx\"\n df = pd.read_excel(file_with_data)\n second_task()\n third_task()\n fourth_task()\n sixth_task()\n seventh_task()\n fifth_task()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"7779776","text":"#\n# CS1010S --- Programming Methodology\n#\n# Mission 2 - 3D Contest\n#\n# Note that written answers are commented out to allow us to run your\n# code easily while grading your problem set.\n\nfrom runes import *\n\n########\n# Task #\n########\n\n# You may submit up to three entries. Please update your entry number below.\n\n# Entry 1 of 3\n# ============\n# Write your function here. It should return a rune.\ndef create_tile(n=10, pic=black_bb):\n pic = scale_independent(1, 1/6, black_bb)\n res = translate(0, 0.42, pic)\n y = 0.32\n for i in range(9):\n res = overlay_frac(2/9, translate(0,y,pic), res)\n y -= 0.1\n return res\n\ndef fifty_shades_of_gray(tile, n=5):\n if n == 1:\n return tile\n return stack(beside(tile, tile), fifty_shades_of_gray(stackn(n-1, tile), n-1))\n \nhollusion(fifty_shades_of_gray(create_tile()))\n\n# Entry 2 of 3\n# ============\n# Write your function here. It should return a rune.\ndef get_peak(n=25, pic=circle_bb):\n res = pic\n for i in range(2, n+1):\n layer = scale((n+1-i)/n, pic)\n res = overlay_frac(1/i, layer, res)\n return res\n\ndef abstract_art(peak):\n res = peak\n for i in range(5):\n x,y = uniform(-1,1), uniform(-1,1)\n layer = translate(x, y, peak)\n res = overlay_frac(1/8, layer, res)\n return res\n\npeak = get_peak()\nanaglyph(abstract_art(peak))\n\n\n# Use one of the following methods to display your rune:\n# stereogram()\n# anaglyph()\n# hollusion()\n","sub_path":"Contest 2.3 3D Runes/contest02.3-template.py","file_name":"contest02.3-template.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"227677659","text":"# Copyright (c) 2015, Alphamonak Solutions Ltd.\n\nfrom __future__ import unicode_literals\n\nimport redapp, unittest\n\nfrom redapp.model.delete_doc import delete_doc\n\ntest_records = redapp.get_test_records('User')\n\nclass TestUser(unittest.TestCase):\n\tdef test_delete(self):\n\t\tredapp.get_doc(\"User\", \"test@example.com\").add_roles(\"_Test Role 2\")\n\t\tself.assertRaises(redapp.LinkExistsError, delete_doc, \"Role\", \"_Test Role 2\")\n\t\tredapp.db.sql(\"\"\"delete from tabUserRole where role='_Test Role 2'\"\"\")\n\t\tdelete_doc(\"Role\",\"_Test Role 2\")\n\n\t\tif redapp.db.exists(\"User\", \"_test@example.com\"):\n\t\t\tdelete_doc(\"User\", \"_test@example.com\")\n\n\t\tuser = redapp.copy_doc(test_records[1])\n\t\tuser.email = \"_test@example.com\"\n\t\tuser.insert()\n\n\t\tredapp.get_doc({\"doctype\": \"ToDo\", \"description\": \"_Test\"}).insert()\n\n\t\tdelete_doc(\"User\", \"_test@example.com\")\n\n\t\tself.assertTrue(not redapp.db.sql(\"\"\"select * from `tabToDo` where owner=%s\"\"\",\n\t\t\t(\"_test@example.com\",)))\n\n\t\tfrom redapp.core.doctype.role.test_role import test_records as role_records\n\t\tredapp.copy_doc(role_records[1]).insert()\n\n\tdef test_get_value(self):\n\t\tself.assertEquals(redapp.db.get_value(\"User\", \"test@example.com\"), \"test@example.com\")\n\t\tself.assertEquals(redapp.db.get_value(\"User\", {\"email\":\"test@example.com\"}), \"test@example.com\")\n\t\tself.assertEquals(redapp.db.get_value(\"User\", {\"email\":\"test@example.com\"}, \"email\"), \"test@example.com\")\n\t\tself.assertEquals(redapp.db.get_value(\"User\", {\"email\":\"test@example.com\"}, [\"first_name\", \"email\"]),\n\t\t\t(\"_Test\", \"test@example.com\"))\n\t\tself.assertEquals(redapp.db.get_value(\"User\",\n\t\t\t{\"email\":\"test@example.com\", \"first_name\": \"_Test\"},\n\t\t\t[\"first_name\", \"email\"]),\n\t\t\t\t(\"_Test\", \"test@example.com\"))\n\n\t\ttest_user = redapp.db.sql(\"select * from tabUser where name='test@example.com'\",\n\t\t\tas_dict=True)[0]\n\t\tself.assertEquals(redapp.db.get_value(\"User\", {\"email\":\"test@example.com\"}, \"*\", as_dict=True),\n\t\t\ttest_user)\n\n\t\tself.assertEquals(redapp.db.get_value(\"User\", \"xxxtest@example.com\"), None)\n\n\t\tredapp.db.set_value(\"Website Settings\", \"Website Settings\", \"_test\", \"_test_val\")\n\t\tself.assertEquals(redapp.db.get_value(\"Website Settings\", None, \"_test\"), \"_test_val\")\n\t\tself.assertEquals(redapp.db.get_value(\"Website Settings\", \"Website Settings\", \"_test\"), \"_test_val\")\n\n\tdef test_high_permlevel_validations(self):\n\t\tuser = redapp.get_meta(\"User\")\n\t\tself.assertTrue(\"user_roles\" in [d.fieldname for d in user.get_high_permlevel_fields()])\n\n\t\tme = redapp.get_doc(\"User\", \"testperm@example.com\")\n\t\tme.remove_roles(\"System Manager\")\n\n\t\tredapp.set_user(\"testperm@example.com\")\n\n\t\tme = redapp.get_doc(\"User\", \"testperm@example.com\")\n\t\tme.add_roles(\"System Manager\")\n\n\t\tself.assertTrue(\"System Manager\" not in [d.role for d in me.get(\"user_roles\")])\n\n\t\tredapp.set_user(\"Administrator\")\n\n\t\tme = redapp.get_doc(\"User\", \"testperm@example.com\")\n\t\tme.add_roles(\"System Manager\")\n\n\t\tself.assertTrue(\"System Manager\" in [d.role for d in me.get(\"user_roles\")])\n","sub_path":"redapp/core/doctype/user/test_user.py","file_name":"test_user.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"93022432","text":"from rest_framework import serializers\n\nfrom . import models\n\n\nclass FieldTypeSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for FieldType model.\"\"\"\n class Meta:\n model = models.FieldType\n fields = (\n 'id',\n 'name',\n 'slug',\n 'data_type',\n 'help_text',\n 'risk',\n 'display_order',\n 'enum_options',\n )\n read_only_fields = ('id', 'slug', 'risk')\n\n\nclass RiskTypeSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for RiskType model.\"\"\"\n fields = FieldTypeSerializer(many=True)\n\n class Meta:\n model = models.RiskType\n fields = ('id', 'name', 'slug', 'description', 'fields')\n\n def create(self, validated_data):\n fields_data = validated_data.pop('fields')\n risk, rcreated = models.RiskType.objects.get_or_create(\n **validated_data\n )\n risk.bulk_add_fields(fields_data)\n return risk","sub_path":"backend/risks/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"468974221","text":"from django.shortcuts import render, redirect\nfrom .models import *\nfrom .forms import SearchFrom\n\n# Create your views here.\ndef leaderboard(request):\n\tplayers = ArenaLB33.objects.all().order_by('-rating')\n\treturn render(request, 'leaderboard.html', { 'players' : players })\n\n\ndef search(request, name):\n\tprint(\"search string : {0}\".format(name))\n\tplayers = ArenaLB33.objects.get(name=name)\n\treturn render(request, 'search.html', { 'players': players })\n\n\ndef searchPage(request):\n\tif request.method == 'POST':\n\t\tname = request.POST['name']\n\t\tprint('search for {0}'.format(name))\n\t\ttry:\n\t\t\tplayers_3v3 = ArenaLB33.objects.filter(name__icontains=name)\n\t\t\tprint('player 3v3 count : {0}'.format(len(players_3v3)))\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\n\t\ttry:\n\t\t\tplayers_rbg = ArenaLBrbg.objects.filter(name__icontains=name)\n\t\t\tprint('player rbg count : {0}'.format(len(players_rbg)))\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\n\t\treturn render(request, 'search.html', { 'players_3v3': players_3v3, 'players_rbg':players_rbg })\n\t\t\n\telse:\n\t\treturn render(request, 'searchPage.html')","sub_path":"WebService/arena/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"495652896","text":"# 07-list&gen.py\n# Created by Alexcai at 2018/5/12\n\n\n\"\"\" 列表推导式\n[x for x in range(1, 10) if 条件]\n生成一个推导公式 每次取值时,根据推导计算后生成\n\"\"\"\n# 生成 包含 1~100 的列表\n\narr = []\ni = 1\nwhile i <= 100:\n arr.append(i)\n i += 1\nprint(arr)\narr.clear()\n\nfor i in range(1, 100):\n arr.append(i)\nprint(\"#\" * 50)\nprint(arr)\n\na = range(10, 20, 3)\nprint(a)\nfor i in a:\n print(i)\n# 列表生成式\nb = [i for i in range(0, 23)]\nprint(b)\nc = [\"dell\" for _ in range(0, 10)]\nprint(c)\n\nd = [i for i in range(9) if i % 3 == 0]\nprint(d)\n\n\"\"\" 集合\n{元素} : 集合中的元素都不会重复(重复添加没有效果)\n\"\"\"\n\nj = {11, 22, 33, 11, 44}\nprint(j)\n\n# 列表去重\n\nt = [1, 32, 44, 1, 5, 32]\nb = []\nfor i in t:\n if i not in b:\n b.append(i)\nprint(b)\n\n# 使用集合对数组进行去重 : set转换-> list 转换\ndf = set(t)\nda = list(df)\nprint(da)\n\n\n\n\n","sub_path":"PythonStudy/01-Day/src/main/07-list&gen.py","file_name":"07-list&gen.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"282161851","text":"# ../core/cfg/defaults.py\r\n\r\n'''\r\n$Rev$\r\n$LastChangedBy$\r\n$LastChangedDate$\r\n'''\r\n\r\n# =============================================================================\r\n# >> IMPORTS\r\n# =============================================================================\r\n# EventScripts Imports\r\nfrom es import ServerVar\r\n\r\n\r\n# =============================================================================\r\n# >> CLASSES\r\n# =============================================================================\r\nclass _CvarDefaults(dict):\r\n '''Class that stores cvars with their default value'''\r\n\r\n def clear(self):\r\n '''Resets all cvars in the dictionary and then clears itself'''\r\n\r\n # Loop through all cvars in the dictionary\r\n for cvar in self:\r\n\r\n # Set the cvar to its default value\r\n ServerVar(cvar).set(self[cvar])\r\n\r\n # Remove the notify flag from the cvar\r\n ServerVar(cvar).removeFlag('notify')\r\n\r\n # Clear the dictionary\r\n super(_CvarDefaults, self).clear()\r\n\r\n# Get the CvarDefaults instance\r\nCvarDefaults = _CvarDefaults()\r\n","sub_path":"cstrike/addons/eventscripts/gungame51/core/cfg/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"290834494","text":"# -*- coding: utf-8 -*-\nimport os\nimport argparse\nimport sys\nimport csv\nfrom os import listdir\nfrom os.path import isfile, join\nDATA_SCIENCE_PKG_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(DATA_SCIENCE_PKG_PATH)\nDATA_SCIENCE_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nfrom datascience.utils.cele_utils import * # noqa E402\nfrom datascience.services.email_classifier import * # noqa E402\n\n\nclass EmailTrainsetGenerator:\n #\n # EmailClassificationRunner loads a set of email text file\n # run the classifier and generate an output file in out_dir\n #\n def __init__(self, args):\n self.logger = logging.getLogger('EmailTrainsetGenerator')\n cwd = os.path.dirname(os.path.abspath(__file__))\n self.in_dir = os.path.join(cwd, args.in_dir)\n self.out_dir = os.path.join(cwd, args.out_dir)\n if not os.path.exists(self.out_dir):\n os.makedirs(self.out_dir)\n self.classifier = EmailClassifier()\n\n def process(self):\n all_email_files = [join(self.in_dir, fn) for fn in listdir(self.in_dir) if isfile(join(self.in_dir, fn))]\n training_file = self.out_dir + '/' + 'email_training.csv'\n training_cnt = 0\n with open(training_file, 'wb') as f:\n writer = csv.writer(f)\n title_row = [\"L_POS\", \"L_REV\", \"L_LATER\", \"L_NEG\", \"L_STOP\"] + sorted(EMAIL_SCORING_FEATURES)\n writer.writerow(title_row)\n for f_name in all_email_files:\n if is_in_text_old(f_name, ['e_neg_', 'e_Neg_', 'e_pos_', 'e_Pos_', 'e_rev_', 'e_Rev_']):\n if is_in_text_old(f_name, ['_pos_', '_Pos_']):\n # email_class = EMAIL_POSITIVE\n class_label = [1, 0, 0, 0, 0]\n elif is_in_text_old(f_name, ['_rev_', '_Rev_']):\n # email_class = EMAIL_REVIEW\n class_label = [0, 1, 0, 0, 0]\n elif is_in_text_old(f_name, ['_neg_later_', '_Neg_Later_']):\n # email_class = EMAIL_NEG_LATER\n class_label = [0, 0, 1, 1, 0]\n elif is_in_text_old(f_name, ['_neg_stop_', '_Neg_Stop_']):\n # email_class = EMAIL_NEG_STOP\n class_label = [0, 0, 0, 1, 1]\n elif is_in_text_old(f_name, ['_neg_', '_Neg_']):\n # email_class = EMAIL_NEGATIVE\n class_label = [0, 0, 0, 1, 0]\n else:\n continue\n\n lines = []\n with open(f_name) as rf:\n for i, line in enumerate(rf):\n if line:\n lines.append(line)\n if not lines:\n continue\n self.logger.info(\"Processing \" + f_name + \" ......\")\n feature_row = self.classifier.predicting_features(lines)\n training_cnt += 1\n writer.writerow(class_label + feature_row)\n self.logger.info(\"Done.\")\n self.logger.info(\"A total \" + str(training_cnt) + \" examples captured.\")\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--in_dir', default='../data/in/email_all', help=\"Path to read email content files\")\n parser.add_argument('--out_dir', default='../data/training/email_all', help=\"Path to write result file\")\n return parser.parse_args()\n\n\ndef main():\n logging.basicConfig(level=logging.INFO)\n logging.getLogger(__name__).setLevel(logging.INFO)\n\n args = parse_args()\n gen = EmailTrainsetGenerator(args)\n gen.process()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"datascience/scripts/email_trainset_gen.py","file_name":"email_trainset_gen.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"306525313","text":"from tkinter import *\n\ndef convert_func():\n pound = float(e1_var.get()) * 2.20462\n gram = float(e1_var.get()) * 1000\n ounce = float(e1_var.get()) * 35.274\n t1_pound.delete(1.0,END)\n t1_pound.insert(END,pound)\n t1_gram.delete(1.0,END)\n t1_gram.insert(END,gram)\n t1_ounce.delete(1.0,END)\n t1_ounce.insert(END,ounce)\n\n\nw1 = Tk()\n#Labels for all entry and text widgets\nl1_var = StringVar()\nl1 = Label(w1,textvariable= l1_var)\nl1.grid(row=0,column=1)\nl1_var.set(\"Kilograms\")\n\nl1 = Label(w1,text = \"Pounds\")\nl1.grid(row=1,column=1)\n\nl2 = Label(w1,text = \"Grams\")\nl2.grid(row=1,column=2)\n\nl3 = Label(w1,text= \"Ounces\")\nl3.grid(row=1, column = 3)\n\n\n\n#Entry widget for KG\ne1_var = StringVar()\ne1 = Entry(w1,textvariable=e1_var)\ne1.grid(row=0,column=2)\n\n#Button widget to perfrom conversion\n\nb1 = Button(w1,text=\"Convert\",command=convert_func)\nb1.grid(row = 0 , column = 3)\n\n#All text widgets to display converted values\nt1_pound = Text(w1,height =1, width=10)\nt1_pound.grid(row=2, column=1)\n\nt1_gram = Text(w1,height =1, width = 10)\nt1_gram.grid(row =2, column = 2)\n\nt1_ounce = Text(w1,height=1,width = 10)\nt1_ounce.grid(row=2,column = 3)\n\nw1.mainloop()\n","sub_path":"Tkinter/KGConversions.py","file_name":"KGConversions.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"312505498","text":"\"\"\" A convenient holder for horizon detection steps:\n - creating dataset with desired properties\n - training a model\n - making an inference on selected data\n - evaluating predictions\n - and more\n\"\"\"\n#pylint: disable=import-error, no-name-in-module, wrong-import-position, protected-access\nimport os\nimport gc\nimport logging\nimport random\nfrom time import perf_counter\nfrom copy import copy\nfrom glob import glob\nimport psutil\n\nimport numpy as np\nimport torch\n\nfrom tqdm.auto import tqdm\n\nfrom ...batchflow import Pipeline, FilesIndex\nfrom ...batchflow import B, V, C, D, P, R\nfrom ...batchflow.models.torch import EncoderDecoder\n\nfrom ..cubeset import SeismicCubeset, Horizon\nfrom ..metrics import HorizonMetrics\nfrom ..plotters import plot_loss, plot_image\n\n\n\nclass BaseController:\n \"\"\" Provides interface for train, inference and quality assesment for the task of horizon detection.\n\n Parameters\n ----------\n batch_size : int\n Size of batches for train and inference.\n crop_shape : tuple of 3 ints\n Size of sampled crops for train and inference.\n model_config : dict\n Neural network architecture.\n model_path : str\n Path for pre-trained model.\n device : str or int\n Device specification.\n show_plots : bool\n Whether to show plots to the current output stream.\n save_dir : str\n Path to save images, logs, and other data.\n logger : None or callable\n If None, then logger is created inside `save_dir`.\n If callable, then it is used directly to log messages.\n bar : bool\n Whether to show progress bars for training and inference.\n \"\"\"\n #pylint: disable=unused-argument, logging-fstring-interpolation, no-member, too-many-public-methods\n #pylint: disable=access-member-before-definition, attribute-defined-outside-init\n def __init__(self, batch_size=64, crop_shape=(1, 256, 256),\n model_config=None, model_path=None, device=None,\n show_plots=False, save_dir=None, logger=None, bar=True):\n for key, value in locals().items():\n if key != 'self':\n setattr(self, key, value)\n\n self.targets, self.predictions = None, None\n self.model_pipeline = None\n self.make_logger()\n\n # Utility functions\n def make_pbar(self, iterator, ncols=800, **kwargs):\n \"\"\" Wrap supplied iterator with progress bar. \"\"\"\n if self.bar:\n return tqdm(iterator, total=len(iterator), ncols=ncols, **kwargs)\n return iterator\n\n def make_save_path(self, *postfix):\n \"\"\" Create nested path from provided strings; create corresponding directories.\n\n If `save_dir` attribute is None, then None is returned: that is used as signal to omit saving\n of, for example, metric map images, etc.\n \"\"\"\n if self.save_dir is not None:\n path = os.path.join(self.save_dir, *postfix[:-1])\n os.makedirs(path, exist_ok=True)\n return os.path.join(self.save_dir, *postfix)\n return None\n\n def make_logger(self):\n \"\"\" Create logger inside `save_dir`.\n\n Note that logging is important.\n \"\"\"\n #pylint: disable=access-member-before-definition\n if self.logger is None and self.save_dir is not None:\n handler = logging.FileHandler(self.make_save_path('controller.log'), mode='w')\n handler.setFormatter(logging.Formatter('%(asctime)s %(message)s'))\n\n logger = logging.getLogger('controller_logger')\n logger.setLevel(logging.INFO)\n logger.addHandler(handler)\n self.logger = logger.info\n\n def log(self, msg):\n \"\"\" Log supplied message. \"\"\"\n if self.logger is not None:\n process = psutil.Process(os.getpid())\n uss = process.memory_full_info().uss / (1024 ** 3)\n self.logger(f'{self.__class__.__name__} ::: {uss:2.4f} ::: {msg}')\n\n # Dataset creation: geometries, labels, grids, samplers\n def make_dataset(self, cube_paths, horizon_paths=None):\n \"\"\" Create an instance of :class:`.SeismicCubeset` with cubes and horizons.\n\n Parameters\n ----------\n cube_paths : sequence or str\n Cube path(s) to load into dataset.\n horizon_paths : dict or str\n Horizons for each cube. Either a mapping from cube name to paths, or path only (if only one cube is used).\n\n Logs\n ----\n Inferred cubes and horizons for them.\n\n Returns\n -------\n Instance of dataset.\n \"\"\"\n cube_paths = cube_paths if isinstance(cube_paths, (tuple, list)) else [cube_paths]\n\n dsi = FilesIndex(path=cube_paths, no_ext=True)\n dataset = SeismicCubeset(dsi)\n\n dataset.load_geometries()\n\n if horizon_paths:\n if isinstance(horizon_paths, str):\n horizon_paths = {dataset.indices[0]: glob(horizon_paths)}\n dataset.create_labels(horizon_paths)\n\n msg = '\\n'\n for idx in dataset.indices:\n msg += f'{idx}\\n'\n for hor in dataset.labels[idx]:\n msg += f' {hor.name}'\n self.log(f'Created dataset ::: {msg}')\n return dataset\n\n def make_dataset_from_horizon(self, horizon):\n \"\"\" Create an instance of :class:`.SeismicCubeset` from a given horizon.\n\n Parameters\n ----------\n horizon : instance of :class:`.Horizon`\n Horizon for the inferred cube.\n \"\"\"\n cube_path = horizon.geometry.path\n\n dsi = FilesIndex(path=[cube_path], no_ext=True)\n dataset = SeismicCubeset(dsi)\n dataset.geometries[dataset.indices[0]] = horizon.geometry\n dataset.labels[dataset.indices[0]] = [horizon]\n\n self.log(f'Created dataset from horizon {horizon.name}')\n return dataset\n\n\n def make_grid(self, dataset, frequencies, **kwargs):\n \"\"\" Create a grid, based on quality map, for each of the cubes in supplied `dataset`.\n Works inplace.\n\n Parameters\n ----------\n dataset : :class:`.SeismicCubeset`\n Dataset with cubes.\n frequencies : sequence of ints\n List of frequencies, corresponding to `easy` and `hard` places in the cube.\n kwargs : dict\n Other arguments, passed directly in quality grid creation function.\n\n Logs\n ----\n Grid coverage: ratio of the number of points inside the grid to the total number of non-bad traces in cube.\n\n Plots\n -----\n Map with quality grid.\n \"\"\"\n grid_coverages = []\n for idx in dataset.indices:\n geometry = dataset.geometries[idx]\n geometry.make_quality_grid(frequencies, **kwargs)\n plot_image(\n geometry.quality_grid, title='quality grid',\n cmap='Reds', interpolation='bilinear', show=self.show_plots,\n savepath=self.make_save_path(f'quality_grid_{idx}.png')\n )\n\n grid_coverage = (np.nansum(geometry.quality_grid) /\n (np.prod(geometry.cube_shape[:2]) - np.nansum(geometry.zero_traces)))\n self.log(f'Created grid on {idx}; coverage is: {grid_coverage}')\n grid_coverages.append(grid_coverage)\n return grid_coverages\n\n\n def make_sampler(self, dataset, bins=None, use_grid=False, grid_src='quality_grid', side_view=False, **kwargs):\n \"\"\" Create sampler. Works inplace.\n\n Plots\n -----\n Maps with examples of sampled slices of `crop_shape` size, both normalized and not.\n \"\"\"\n if use_grid:\n grid = getattr(dataset.geometries[0], grid_src) if isinstance(grid_src, str) else grid_src\n else:\n grid = None\n\n dataset.create_sampler(quality_grid=grid, bins=bins)\n dataset.modify_sampler('train_sampler', finish=True, **kwargs)\n dataset.train_sampler(random.randint(0, 100000))\n self.log('Created sampler')\n\n # Cleanup\n dataset.sampler = None\n\n for i, idx in enumerate(dataset.indices):\n dataset.show_slices(\n src_sampler='train_sampler', normalize=False, shape=self.crop_shape,\n idx=i, adaptive_slices=use_grid, grid_src=grid_src, side_view=side_view,\n cmap='Reds', interpolation='bilinear', show=self.show_plots, figsize=(15, 15),\n savepath=self.make_save_path(f'slices_{idx}.png')\n )\n\n dataset.show_slices(\n src_sampler='train_sampler', normalize=True, shape=self.crop_shape,\n idx=i, adaptive_slices=use_grid, grid_src=grid_src, side_view=side_view,\n cmap='Reds', interpolation='bilinear', show=self.show_plots, figsize=(15, 15),\n savepath=self.make_save_path(f'slices_n_{idx}.png')\n )\n\n # Train model on a created dataset\n def train(self, dataset, model_config=None, device=None, n_iters=300, prefetch=1,\n use_grid=False, grid_src='quality_grid', side_view=False,\n width=3, batch_size_multiplier=1, rebatch_threshold=0.00, **kwargs):\n \"\"\" Train model for horizon detection.\n If `model_path` was supplied during instance initialization, model is loaded instead.\n\n In order to change architecture of the model, pass different `model_config` to the instance initialization.\n In order to change training procedure, re-define :meth:`.get_train_template`.\n\n Parameters\n ----------\n n_iters : int\n Number of iterations to train for.\n use_grid : bool\n Whether to sample crops only from `quality_grid`.\n side_view : bool or float\n If False, then has no effect.\n If float, then probability of crop being sampled along `x` axis instead of regular `i`-axis sampling.\n If True, then the same as 0.5.\n\n Logs\n ----\n Start of training; end of training; average loss at the last 50 iterations.\n\n Plots\n -----\n Graph of loss over iterations.\n \"\"\"\n model_config = model_config or self.model_config\n device = device or self.device\n\n # Prepare parameters\n self.log('Train started')\n pipeline_config = {\n 'model_config': {**model_config, 'device': device},\n 'crop_shape': self.crop_shape,\n 'adaptive_slices': use_grid, 'grid_src': grid_src,\n 'side_view': side_view,\n 'width': width,\n 'rebatch_threshold': rebatch_threshold,\n **kwargs\n }\n\n # Test batch: get statistics and time separately\n bs = self.batch_size\n self.batch_size = int(self.batch_size * batch_size_multiplier)\n model_pipeline = (self.get_train_template(**kwargs) << pipeline_config) << dataset\n batch = model_pipeline.next_batch(D('size'))\n self.log(f'Used batch size is: {self.batch_size}; actual batch size is: {len(batch)}')\n self.log(f'Cache sizes: {[item.cache_size for item in dataset.geometries.values()]}')\n self.log(f'Cache lengths: {[item.cache_length for item in dataset.geometries.values()]}')\n self.batch_size = bs\n\n # Run training procedure\n start_time = perf_counter()\n self.log(f'Prefetch is: {prefetch}')\n model_pipeline.run(D('size'), n_iters=n_iters + np.random.randint(100),\n bar={'bar': 'n' if self.bar else False, 'monitors': 'loss_history'},\n prefetch=prefetch)\n plot_loss(model_pipeline.v('loss_history'), show=self.show_plots,\n savepath=self.make_save_path('model_loss.png'))\n self.train_time = perf_counter() - start_time\n\n # Log stats and store model\n self.model_pipeline = model_pipeline\n last_loss = np.mean(model_pipeline.v('loss_history')[-50:])\n self.log(f'Train finished in {self.train_time:4.1f}; last loss is {last_loss:4.4f}')\n self.log(f'Cache sizes: {[item.cache_size for item in dataset.geometries.values()]}')\n self.log(f'Cache lengths: {[item.cache_length for item in dataset.geometries.values()]}')\n\n # Cleanup\n torch.cuda.empty_cache()\n self.model_pipeline.reset('variables')\n batch.images, batch.masks = None, None\n for item in dataset.geometries.values():\n item.reset_cache()\n return last_loss\n\n def load_model(self, path=None):\n \"\"\" Load pre-trained model from disk. \"\"\"\n path = path or self.model_path\n raise NotImplementedError('Yet to be implemented!')\n\n # Inference on a chosen set of data\n def inference(self, dataset, version=1, orientation='i', overlap_factor=2, heights_range=None,\n batch_size_multiplier=1, **kwargs):\n \"\"\" Make inference with trained/loaded model on supplied dataset.\n Works by splitting the into `crop_shape` chunks, making predict for each of them,\n then aggregating into one horizon.\n\n Parameters\n ----------\n version : int\n How to do splitting:\n If 0, then cube is split into chunks of `crop_shape` size,\n model is used to create predictions for each of them,\n then chunks are aggregated into huge 3D array, from which the horizon surface is extracted.\n This approach is fast but very memory intensive: it is advised to use it only on small (<10GB) cubes.\n\n If 1, then cube is split into `big` chunks, each of them is split again into `crop_shape` pieces,\n model is used to create predictions for the latter,\n which are aggregated into 3D array of `big` chunks size, from which the horizon surfaces are extracted.\n At last, all of the horizons are merged into one.\n This approach is a tad slower, yet allows for finer memory control by controlling how big `big` chunks are.\n Additional parameters are:\n chunk_size : int\n Size of `big` chunks along smallest dimension.\n chunk_overlap : float\n Overlap percentage of successive chunks. Must be in 0, 1 range.\n\n orientation : {'i', 'x', 'ix'}\n Orientation of the inference:\n If 'i', then cube is split into inline-oriented slices.\n If 'x', then cube is split into crossline-oriented slices.\n If 'ix', then both of previous approaches applied, and results are merged.\n overlap_factor : number\n Overlapping ratio of successive crops. Can be seen as `how many crops would cross every through point`.\n heights_range : None or sequence of two ints\n If None, then heights are inffered: from minimum of heights of all horizons in dataset to the maximum.\n If sequence of two ints, heights to inference on.\n\n Logs\n ----\n Inference start along with its parameters; inference end along with the number of predicted horizons,\n total amount of predicted points and size of the biggest horizon.\n \"\"\"\n self.log(f'Starting {orientation} inference_{version} with overlap of {overlap_factor}')\n self.targets = dataset.labels[0]\n method = getattr(self, f'inference_{version}')\n\n bs = self.batch_size\n self.batch_size = int(self.batch_size * batch_size_multiplier)\n\n start_time = perf_counter()\n if len(orientation) == 1:\n horizons = method(dataset, orientation=orientation, overlap_factor=overlap_factor,\n heights_range=heights_range, **kwargs)\n else:\n horizons_i = method(dataset, orientation='i', overlap_factor=overlap_factor,\n heights_range=heights_range, **kwargs)\n self.log('Done i-inference')\n\n horizons_x = method(dataset, orientation='x', overlap_factor=overlap_factor,\n heights_range=heights_range, **kwargs)\n self.log('Done x-inference')\n\n horizons = Horizon.merge_list(horizons_i + horizons_x, minsize=1000)\n self.inference_time = perf_counter() - start_time\n self.log(f'Inference done in {self.inference_time:4.1f}')\n\n # Log some results\n if horizons:\n horizons.sort(key=len, reverse=True)\n self.log(f'Num of predicted horizons: {len(horizons)}')\n self.log(f'Total number of points in all of the horizons {sum(len(item) for item in horizons)}')\n self.log(f'Len max: {len(horizons[0])}')\n else:\n self.log('Zero horizons were predicted; possible problems..?')\n\n self.predictions = horizons\n self.batch_size = bs\n torch.cuda.empty_cache()\n\n def make_inference_ranges(self, dataset, heights_range):\n \"\"\" Ranges of inference. \"\"\"\n geometry = dataset.geometries[0]\n spatial_ranges = [[0, item-1] for item in geometry.cube_shape[:2]]\n if heights_range is None:\n if self.targets:\n min_height = max(0,\n min(horizon.h_min for horizon in self.targets) - self.crop_shape[2]//2)\n max_height = min(geometry.depth,\n max(horizon.h_max for horizon in self.targets) + self.crop_shape[2]//2)\n heights_range = [min_height, max_height]\n else:\n heights_range = [0, geometry.depth-1]\n return spatial_ranges, heights_range\n\n def make_inference_config(self, orientation):\n \"\"\" Parameters depending on orientation. \"\"\"\n config = {'model_pipeline': self.model_pipeline}\n if orientation == 'i':\n crop_shape_grid = self.crop_shape\n config['side_view'] = False\n config['order'] = (0, 1, 2)\n else:\n crop_shape_grid = np.array(self.crop_shape)[[1, 0, 2]]\n config['side_view'] = 1.0\n config['order'] = (1, 0, 2)\n return config, crop_shape_grid\n\n\n def inference_0(self, dataset, heights_range=None, orientation='i', overlap_factor=2,\n filtering_matrix=None, filter_threshold=0, prefetch=1, **kwargs):\n \"\"\" Inference on chunks, assemble into massive 3D array, extract horizon surface. \"\"\"\n _ = kwargs\n spatial_ranges, heights_range = self.make_inference_ranges(dataset, heights_range)\n config, crop_shape_grid = self.make_inference_config(orientation)\n\n # Actual inference\n horizons = self._inference_chunk(dataset=dataset, ranges=(*spatial_ranges, heights_range),\n pipeline_config=config, crop_shape=crop_shape_grid,\n overlap_factor=overlap_factor, filtering_matrix=filtering_matrix,\n filter_threshold=filter_threshold, prefetch=prefetch)\n\n # Log memory usage info and clean up\n self.log(f'Cache sizes: {[item.cache_size for item in dataset.geometries.values()]}')\n self.log(f'Cache lengths: {[item.cache_length for item in dataset.geometries.values()]}')\n total_length = dataset.grid_info['length']\n total_unfiltered_length = dataset.grid_info['unfiltered_length']\n self.log(f'Inferenced total of {total_length} out of {total_unfiltered_length} crops possible')\n\n for item in dataset.geometries.values():\n item.reset_cache()\n gc.collect()\n return horizons\n\n def inference_1(self, dataset, heights_range=None, orientation='i', overlap_factor=2, prefetch=1,\n chunk_size=100, chunk_overlap=0.2, filtering_matrix=None, filter_threshold=0, **kwargs):\n \"\"\" Split area for inference into `big` chunks, inference on each of them, merge results. \"\"\"\n _ = kwargs\n geometry = dataset.geometries[0]\n spatial_ranges, heights_range = self.make_inference_ranges(dataset, heights_range)\n config, crop_shape_grid = self.make_inference_config(orientation)\n\n # Actual inference\n axis = np.argmin(crop_shape_grid[:2])\n iterator = range(spatial_ranges[axis][0], spatial_ranges[axis][1], int(chunk_size*(1 - chunk_overlap)))\n self.log(f'Starting chunk {orientation} inference with {len(iterator)} chunks ' +\n f'over {spatial_ranges}, {heights_range}')\n\n horizons = []\n total_length, total_unfiltered_length = 0, 0\n for chunk in self.make_pbar(iterator, desc=f'Inference on {geometry.name}| {orientation}'):\n current_spatial_ranges = copy(spatial_ranges)\n current_spatial_ranges[axis] = [chunk, min(chunk + chunk_size, spatial_ranges[axis][-1])]\n\n chunk_horizons = self._inference_chunk(dataset=dataset, ranges=(*current_spatial_ranges, heights_range),\n pipeline_config=config, crop_shape=crop_shape_grid,\n overlap_factor=overlap_factor, filtering_matrix=filtering_matrix,\n filter_threshold=filter_threshold, prefetch=prefetch)\n horizons.extend(chunk_horizons)\n\n total_length += dataset.grid_info['length']\n total_unfiltered_length += dataset.grid_info['unfiltered_length']\n\n # Log and cleanup\n self.log(f'Cache sizes: {[item.cache_size for item in dataset.geometries.values()]}')\n self.log(f'Cache lengths: {[item.cache_length for item in dataset.geometries.values()]}')\n self.log(f'Inferenced total of {total_length} out of {total_unfiltered_length} crops possible')\n for item in dataset.geometries.values():\n item.reset_cache()\n gc.collect()\n\n return Horizon.merge_list(horizons, mean_threshold=5.5, adjacency=3, minsize=500)\n\n\n def _inference_chunk(self, dataset, ranges, pipeline_config, crop_shape,\n overlap_factor, filtering_matrix, filter_threshold, prefetch):\n \"\"\" Inference on a chunk of cube, parametrized by `ranges`. \"\"\"\n dataset.make_grid(dataset.indices[0], crop_shape,\n *ranges,\n batch_size=self.batch_size,\n overlap_factor=overlap_factor,\n filtering_matrix=filtering_matrix,\n filter_threshold=filter_threshold)\n\n inference_pipeline = (self.get_inference_template() << pipeline_config) << dataset\n\n predicted_crops = []\n for _ in range(dataset.grid_iters):\n batch = inference_pipeline.next_batch(D('size'))\n predicted_crops.extend(item for item in batch.predictions)\n\n # Assemble crops together in accordance to the created grid\n assembled_pred = dataset.assemble_crops(predicted_crops, order=pipeline_config.get('order'))\n\n # Extract Horizon instances\n chunk_horizons = Horizon.from_mask(assembled_pred, dataset.grid_info, threshold=0.5, minsize=50)\n\n # Cleanup\n inference_pipeline.reset('variables')\n inference_pipeline = None\n gc.collect()\n return chunk_horizons\n\n\n def evaluate(self, n=5, add_prefix=False, dump=False, supports=50, name=''):\n \"\"\" Assess quality of predictions, created by :meth:`.inference`, against targets and seismic data.\n\n Parameters\n ----------\n n : int\n Number of the best horizons to evaluate.\n add_prefix : bool\n Whether to add add prefix to created images and other files.\n dump : bool\n Whether to store horizons on disk.\n supports : int\n Number of support traces for metric computation.\n\n Logs\n ----\n Basic stats like coverage, size, number of holes.\n If targets are provided, adds `window_rate` and mean difference.\n\n Plots\n -----\n Maps of computed metrics: correlation, local correlation.\n If targets are provided, also l1 differences.\n \"\"\"\n #pylint: disable=cell-var-from-loop, invalid-name, protected-access\n results = []\n for i in range(n):\n info = {}\n horizon = self.predictions[i]\n horizon._horizon_metrics = None\n hm = HorizonMetrics((horizon, self.targets))\n prefix = [horizon.geometry.short_name, f'{i}_horizon'] if add_prefix else []\n\n # Basic demo: depth map and properties\n horizon.show(show=self.show_plots,\n savepath=self.make_save_path(*prefix, name + 'horizon_img.png'))\n\n with open(self.make_save_path(*prefix, name + 'self_results.txt'), 'w') as result_txt:\n horizon.evaluate(compute_metric=False, printer=lambda msg: print(msg, file=result_txt))\n\n # Metric maps\n corrs = hm.evaluate(\n 'support_corrs',\n supports=supports,\n plot=True, show=self.show_plots,\n savepath=self.make_save_path(*prefix, name + 'corrs.png')\n )\n\n phase = hm.evaluate(\n 'instantaneous_phase',\n plot=True, show=self.show_plots,\n savepath=self.make_save_path(*prefix, name + 'instantaneous_phase.png')\n )\n\n perturbed_mean, perturbed_max = hm.evaluate(\n 'perturbed',\n plot=True, show=self.show_plots, device='gpu',\n savepath=self.make_save_path(*prefix, name + 'perturbed.png')\n )\n\n # Compare to targets\n if self.targets:\n _, oinfo = hm.evaluate('find_best_match', agg=None)\n info = {**info, **oinfo}\n\n with open(self.make_save_path(*prefix, name + 'results.txt'), 'w') as result_txt:\n hm.evaluate(\n 'compare', agg=None, hist=False,\n plot=True, show=self.show_plots,\n printer=lambda msg: print(msg, file=result_txt),\n savepath=self.make_save_path(*prefix, name + 'l1.png')\n )\n self.log(f'horizon {i}: wr {info[\"window_rate\"]}, mean {info[\"mean\"]}')\n\n # Save surface to disk\n if dump:\n dump_name = name + '_' if name else ''\n dump_name += f'{i}_' if n > 1 else ''\n dump_name += horizon.name or 'predicted'\n horizon.dump(path=self.make_save_path(*prefix, dump_name), add_height=False)\n\n info['corrs'] = np.nanmean(corrs)\n info['phase'] = np.nanmean(np.abs(phase))\n info['perturbed_mean'] = np.nanmean(perturbed_mean)\n info['perturbed_max'] = np.nanmean(perturbed_max)\n results.append((info))\n\n self.log(f'horizon {i}: len {len(horizon)}, cov {horizon.coverage:4.4}, '\n f'corrs {info[\"corrs\"]:4.4}, phase {info[\"phase\"]:4.4}, depth {horizon.h_mean}')\n\n return results\n\n # Pipelines\n def load_pipeline(self, dynamic_factor=1, dynamic_low=None, dynamic_high=None, **kwargs):\n \"\"\" Define data loading pipeline.\n\n Following parameters are fetched from pipeline config: `adaptive_slices`, 'grid_src' and `rebatch_threshold`.\n \"\"\"\n _ = kwargs\n self.log(f'Generating data with dynamic factor of {dynamic_factor}')\n return (\n Pipeline()\n .init_variable('shape', None)\n .call(generate_shape, shape=C('crop_shape'),\n dynamic_factor=dynamic_factor, dynamic_low=dynamic_low, dynamic_high=dynamic_high,\n save_to=V('shape'))\n .make_locations(points=D('train_sampler')(self.batch_size),\n shape=V('shape'),\n side_view=C('side_view', default=False),\n adaptive_slices=C('adaptive_slices'),\n grid_src=C('grid_src', default='quality_grid'))\n\n .create_masks(dst='masks', width=C('width', default=3))\n .mask_rebatch(src='masks', threshold=C('rebatch_threshold', default=0.1))\n .load_cubes(dst='images')\n .adaptive_reshape(src=['images', 'masks'], shape=V('shape'))\n .normalize(mode='q', src='images')\n )\n\n def augmentation_pipeline(self, **kwargs):\n \"\"\" Define augmentation pipeline. \"\"\"\n _ = kwargs\n return (\n Pipeline()\n .transpose(src=['images', 'masks'], order=(1, 2, 0))\n .flip(axis=1, src=['images', 'masks'], seed=P(R('uniform', 0, 1)), p=0.3)\n .additive_noise(scale=0.005, src='images', dst='images', p=0.3)\n .rotate(angle=P(R('uniform', -15, 15)),\n src=['images', 'masks'], p=0.3)\n .scale_2d(scale=P(R('uniform', 0.85, 1.15)),\n src=['images', 'masks'], p=0.3)\n .elastic_transform(alpha=P(R('uniform', 35, 45)), sigma=P(R('uniform', 4, 4.5)),\n src=['images', 'masks'], p=0.2)\n .transpose(src=['images', 'masks'], order=(2, 0, 1))\n )\n\n def train_pipeline(self, **kwargs):\n \"\"\" Define model initialization and model training pipeline.\n\n Following parameters are fetched from pipeline config: `model_config`.\n \"\"\"\n _ = kwargs\n return (\n Pipeline()\n .init_variable('loss_history', [])\n .init_model('dynamic', EncoderDecoder, 'model', C('model_config'))\n\n .train_model('model',\n fetches='loss',\n images=B('images'),\n masks=B('masks'),\n save_to=V('loss_history', mode='a'))\n )\n\n def get_train_template(self, **kwargs):\n \"\"\" Define the whole training procedure pipeline including data loading, augmentation and model training. \"\"\"\n return (\n self.load_pipeline(**kwargs) +\n self.augmentation_pipeline(**kwargs) +\n self.train_pipeline(**kwargs)\n )\n\n\n def get_inference_template(self):\n \"\"\" Defines inference procedure.\n\n Following parameters are fetched from pipeline config: `model_pipeline`, `crop_shape`, `side_view` and `order`.\n \"\"\"\n inference_template = (\n Pipeline()\n # Initialize everything\n .import_model('model', C('model_pipeline'))\n\n # Load data\n .make_locations(points=D('grid_gen')(), shape=self.crop_shape,\n side_view=C('side_view', default=False))\n .load_cubes(dst='images')\n .adaptive_reshape(src='images', shape=self.crop_shape)\n .normalize(mode='q', src='images')\n\n # Predict with model, then aggregate\n .predict_model('model',\n B('images'),\n fetches='predictions',\n save_to=B('predictions'))\n )\n return inference_template\n\n\ndef generate_shape(_, shape, dynamic_factor=1, dynamic_low=None, dynamic_high=None):\n \"\"\" Dynamically generate shape of a crop to get. \"\"\"\n dynamic_low = dynamic_low or dynamic_factor\n dynamic_high = dynamic_high or dynamic_factor\n\n i, x, h = shape\n x_ = np.random.randint(x // dynamic_low, x * dynamic_high + 1)\n h_ = np.random.randint(h // dynamic_low, h * dynamic_high + 1)\n return (i, x_, h_)\n","sub_path":"seismiqb/src/controllers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":31503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"422413697","text":"from objects.objects import Office, LivingSpace\nfrom random import choice\n\noffices = {'available': [], 'unavailable': []}\nliving_spaces = {'available': [], 'unavailable': []}\nrooms = []\nunallocated_persons = []\n\n# # Regarding allocations\nstaff_allocations = [] # Stores data(dict) fo randomly allocated office for staff\nfellow_allocations = [] # Stores data(dict) fo randomly allocated livingspace & office for fellow\nunallocated_persons = [] # Stores names for persons not allocated rooms due to capacity or choice for livingspaces\n\n\ndef create_room(room_type, room_name):\n # Creates room in Dojo && Creates Multiple rooms\n\n for single_room_name in room_name:\n if room_type.strip().lower() not in ['office', 'livingspace']:\n print('TypeError for room_type %s' % room_type.title())\n return 'TypeError for room_type'\n if len(single_room_name.strip()) == 0:\n print('Invalid room name %s' % room_type.title())\n return 'Invalid room name'\n if room_type.strip().lower() == 'office':\n if single_room_name in offices['available'] or single_room_name in offices['unavailable']:\n print('%s %s already exists' % (room_type.title(), single_room_name.title()))\n return 'duplicate'\n new_office = Office(single_room_name)\n offices['available'].append(new_office.name)\n rooms.append(new_office)\n print('%s has been created as %s' % (single_room_name.title(), room_type.title()))\n\n\n elif room_type.strip().lower() == 'livingspace':\n if single_room_name in living_spaces['available'] or single_room_name in living_spaces['unavailable']:\n print('%s %s already exists' % (room_type.title(), single_room_name.title()))\n return 'duplicate'\n new_livingspace = LivingSpace(single_room_name)\n living_spaces['available'].append(new_livingspace.name)\n rooms.append(new_livingspace)\n print('%s has been created as %s' %(single_room_name.title(), room_type.title()))\n\n\ndef add_person(person_name, person_type, wants_accommodation='n'):\n # random allocation\n # only a fellow can be allocated a living space\n # a staff can only be allocated an office\n if wants_accommodation is None:\n wants_accommodation = 'n'\n if person_type.strip().lower() not in ['staff', 'fellow']:\n print('Invalid Person Type')\n return 'Invalid Person Type'\n\n p_i = person_name.split()\n if not str(p_i[0]).isalpha() or not str(p_i[1]).isalpha():\n if isinstance(person_name, int):\n return ''\n else:\n print('Non-Alphabetical names added %s' % person_name)\n return 'Non-Alphabetical names added'\n if len(offices['available']) == 0 and len(living_spaces['available']) == 0:\n print('There are no rooms in the system.')\n\n if wants_accommodation.strip().lower() != 'y' and wants_accommodation.strip().lower() != 'n':\n print('Wants accommodation not Y or N')\n return 'Wants accommodation not Y or N'\n if person_type.strip().lower() == 'staff':\n staff_allocation = dict()\n staff_allocation[person_name] = choice(offices['available'])\n\n staff_allocations.append(staff_allocation)\n for room in rooms:\n if room.name == staff_allocation[person_name]:\n if room.capacity > 0:\n room.capacity -= 1\n room.occupants.append(person_name)\n else:\n offices['available'].remove(room.name)\n offices['unavailable'].append(room.name)\n unallocated_persons.append(person_name)\n print('%s %s has been successfully added \\n' % (person_type.title(), person_name.title()))\n person_office = staff_allocation[person_name]\n\n print('%s has been allocated the office %s \\n' % (person_name.title(), person_office.title()))\n\n elif person_type.strip().lower() == 'fellow':\n\n fellow_allocation = dict()\n fellow_allocation['name'] = person_name\n fellow_allocation['office'] = choice(offices['available'])\n fellow_allocations.append(fellow_allocation)\n for room in rooms:\n if room.name == fellow_allocation['office']:\n if room.capacity > 0:\n room.capacity -= 1\n room.occupants.append(person_name)\n else:\n offices['available'].remove(room.name)\n offices['unavailable'].append(room.name)\n unallocated_persons.append(person_name)\n person_office = fellow_allocation['office']\n print('%s %s has been successfully added \\n' % (person_type.title(), person_name.title()))\n print('%s has been allocated the office %s \\n' % (person_name.title(), person_office.title()))\n if wants_accommodation == 'y':\n if len(living_spaces['available']) == 0:\n print('Sorry there are no remaining livingspaces')\n unallocated_persons.append(person_name)\n else:\n fellow_allocation['living_space'] = choice(living_spaces['available'])\n for room in rooms:\n if room.name == fellow_allocation['living_space']:\n if room.capacity > 0:\n room.capacity -= 1\n room.occupants.append(person_name)\n else:\n living_spaces['available'].remove(room.name)\n living_spaces['unavailable'].append(room.name)\n unallocated_persons.append(person_name)\n fellow_allocations[-1] = fellow_allocation\n print('%s has been allocated the livingspace %s \\n' % (person_name.title(), fellow_allocation['living_space'].title()))\n\n\ndef print_room(room_name):\n # This function gets a room name as an argument and proceeds\n # to display the results on the occupants of the room if any\n if len(rooms) == 0:\n print('THERE ARE NO ROOMS IN THE SYSTEM YET. \\n')\n return 'No rooms exist at the moment.'\n all_rooms = []\n for room in rooms:\n all_rooms.append(room.name)\n if room_name not in all_rooms:\n print('The room %s does not exist on our system. \\n' % room_name.title())\n return 'Room does not exist.'\n\n for room in rooms:\n if room.name == room_name:\n print('ROOM NAME:%s(%s) \\n' % (room_name, room.type))\n print('=' * 20)\n if room.occupants:\n for occupant in room.occupants:\n print(occupant)\n else:\n print('This room is empty. \\n')\n return False\n\n\ndef print_allocations(filename):\n if len(rooms) == 0:\n print('THERE ARE NO ROOMS IN THE SYSTEM. \\n ')\n return 'Error. No rooms within system.'\n output_text = ''\n for room in rooms:\n output_text += '__' * 10\n output_text += '\\n'\n output_text += room.name.upper() + \" \" + room.type.upper()\n output_text += '\\n'\n output_text += '__' * 10\n output_text += '\\n'\n if room.occupants:\n for occupant in room.occupants:\n output_text += occupant.upper()\n output_text += '\\n'\n else:\n output_text += 'There are no people in %s yet.' % room.name.upper()\n output_text += '\\n'\n if filename:\n file = open(filename + '.txt', 'w')\n file.write(output_text)\n file.close()\n print('Printed to %s.txt' % filename.title())\n return 'Printed to file'\n else:\n print(output_text)\n return 'Printed to screen \\n'\n\n\ndef print_unallocated(filename):\n # '''\n # After Max capacity has been recorded in a particular\n # room, the person is thereafter appended to a the unallocated\n # persons list.\n # '''\n output_text = ''\n if not unallocated_persons:\n print('There are no unallocated people as of now. \\n')\n return 'No unallocated people as per now.'\n else:\n if filename is None:\n print('UNALLOCATED PEOPLE.')\n for unallocated in unallocated_persons:\n output_text += unallocated\n return 'Some people are unallocated. \\n'\n else:\n file = open(filename + '.txt', 'w')\n output_text += \"UNALLOCATED PEOPLE.\\n\"\n output_text += '\\n'\n for unallocated in unallocated_persons:\n output_text += unallocated.title()\n output_text += '\\n'\n file.write(output_text)\n file.close()\n print('Print out made to %s.txt \\n' % filename.title())\n\n\ndef load_people(file_name):\n fullname = None\n person_type = None\n wants_alloc = None\n \"\"\"Add people to rooms from a txt file\"\"\"\n with open(file_name + '.txt', 'r') as my_file:\n people = my_file.readlines()\n print_room(\"Started loading....\")\n for p in people:\n p = p.split()\n if len(p) > 0:\n if len(p) < 4:\n fullname = p[0].title() + ' ' + p[1].title()\n person_type = p[2]\n else:\n fullname = p[0].title() + ' ' + p[1].title()\n person_type = p[2]\n wants_alloc = p[3].lower()\n add_person(fullname, person_type, wants_alloc)\n print('DONE \\n')\n\n\ndef reallocate_person():\n pass\n","sub_path":"logic.py","file_name":"logic.py","file_ext":"py","file_size_in_byte":9595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"487159900","text":"# python3\n#homework for week 1\n\ndef solution(NumberList):\n Sorted = sorted(NumberList)\n ans = Sorted[-2]*Sorted[-1]\n return ans\n\n\nif __name__ == \"__main__\":\n num = int(input())\n numberList=[]\n listType = input()\n numberList = [int(x) for x in listType.split()]\n print(solution(numberList))","sub_path":"Coursera-Data-Structure/DataStructure1_warmUp/max_pairwise_productSelf.py","file_name":"max_pairwise_productSelf.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"235946822","text":"import json\nimport csv\nclass Customer:\n def __init__(self, name, membership_type):\n self.name = name\n self.membership_type = membership_type\n print(name, membership_type)\n def upgrade_membership(self, new_membership):\n self.membership_type = new_membership\n\ncustomers = []\nmembership_types = [\"Bronze\", \"Silver\", \"Gold\", \"Platinum\", \"Diamond\"]\nfrom random import randrange\nnames = json.load(open('first-names.json'))\nfor i in names:\n customers.append(Customer(names[names.index(i)], membership_types[randrange(5)]))\n\nwith open('person.csv', 'w', newline='') as file:\n writer = csv.writer(file)\n for j in customers:\n writer.writerow([customers[customers.index(j)].name, customers[customers.index(j)].membership_type])\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"406053683","text":"#!/usr/bin/python\nimport os\nimport time\nfrom daemon import runner\n\nclass App():\n def __init__(self):\n self.root = os.path.abspath(os.path.dirname(__file__))\n #Root directory\n self.run_dir = os.path.join(self.root, \"run\")\n self.stdin_path = '/dev/null'\n #File logs\n self.stdout_path = os.path.join(self.run_dir, 'stdout.txt') #this file is the output of deamon\n self.stderr_path = os.path.join(self.run_dir, 'stderr.txt') #If you have a error\n self.pidfile_path = os.path.join(self.run_dir,'test.pid') #Deamon Process ID\n self.pidfile_timeout = 5\n def run(self):\n while True:\n print(\"Hi I'am a Deamon... Created in Python....\")\n time.sleep(10)\n\napp = App()\ndaemon_runner = runner.DaemonRunner(app)\ndaemon_runner.do_action()\n","sub_path":"deamon.py","file_name":"deamon.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"230326263","text":"from rest_framework import serializers\nfrom django.contrib.auth.models import User\nfrom APIsApp.models import Route\n\n\nclass RouteSerializer(serializers.ModelSerializer):\n class Meta:\n model = Route\n fields = [\n \"id\",\n \"longitude_start\",\n \"latitude_start\",\n \"longitude_end\",\n \"latitude_end\",\n \"distance\",\n \"coordinates_json\",\n \"created_at\",\n ]\n extra_kwargs = {\n \"created_at\": {\"read_only\": True}\n }\n","sub_path":"TripPlanner - Project/APIsApp/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"389994194","text":"# -*- coding: utf-8 -*-\nimport os\nimport csv\n\ndef csvToList(csvFile):\n\twith open(csvFile, encoding=\"utf-8\") as f_csv:\n\t\treader = csv.reader(f_csv)\n\t\tafuckList = []\n\t\tfor row in reader:\n\t\t\tafuckList.append(row)\n\t\treturn afuckList\n\ndef export(fromList, toList, newFileName):\n\twith open(newFileName, \"w\", newline=\"\", encoding=\"utf-8\") as new_csv:\n\t\twriter = csv.writer(new_csv)\n\t\tfor row, cRow in zip(toList, fromList):\n\t\t\trow[2] = cRow[2]\n\t\t\twriter.writerow(row)\n\nfor item in os.listdir('.'):\n\tif (item.endswith('NameDB.csv') and not(item.startswith('SOR'))):\n\t\t# fileName = item\n\t\tnewname = (\"SOR Names - \" + item)\n\t\t\n\t\ttry:\n\t\t\tfromTable = csvToList(item)\n\t\t\ttoTable = csvToList(newname)\n\t\t\texport(fromTable, toTable, newname)\n\t\t\tprint(newname, 'overwritten.')\n\t\texcept FileNotFoundError:\n\t\t\tprint(\"没源文件怎么覆盖(╯‵□′)╯︵┻━┻\")","sub_path":"im_export/translationIMPORT.py","file_name":"translationIMPORT.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"244320423","text":"__author__ = 'Cherry'\nfrom kivy.app import App\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.floatlayout import FloatLayout\nimport random\n\n\nclass MainApp(BoxLayout):\n def change_color(self):\n color = [random.random() for i in xrange(3)] + [1]\n lbl1 = self.ids.lbl1\n lbl1.color = color\n\n\nclass TestApp(App):\n def build(self):\n return MainApp()\n\n\nif __name__ == '__main__':\n TestApp().run()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"532822947","text":"from django.conf.urls import url\nfrom django.contrib.auth.views import login, logout\n\nfrom .views import ProfileView, RegisterFormView, UserActivationView\n\nurlpatterns = [\n # accounts/...\n url(r'login/$', login, kwargs={'template_name': 'users/login.html',\n 'redirect_authenticated_user': True}, name='login'),\n\n url(r'^logout/$', logout, name='logout'), # used based logout view\n url(r'^profile/(?P[\\w.@+-]+)/$', ProfileView.as_view(), name='profile'),\n url(r'^signup/$', RegisterFormView.as_view(), name='register'),\n url(r'^activate/(?P.+)$', UserActivationView.as_view(), name='activation'),\n]","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"123930457","text":"import pathlib\nimport random\nimport math\nimport os\n\nfrom typing import List, Optional, Tuple\n\n\nCell = Tuple[int, int]\nCells = List[int]\nGrid = List[Cells]\n\n\nclass GameOfLife:\n\n def __init__(\n self,\n size: Tuple[int, int],\n randomize: bool = True,\n max_generations: Optional[float] = float('inf'),\n ) -> None:\n # Размер клеточного поля\n self.rows, self.cols = size\n # Предыдущее поколение клеток\n self.prev_generation = self.create_grid()\n\n # Текущее поколение клеток\n self.curr_generation = self.create_grid(randomize=randomize)\n\n # Максимальное число поколений\n if max_generations:\n self.max_generations = max_generations\n\n # Текущее число поколений\n # FIXED from self.generations\n self.n_generation = 1\n\n def create_grid(self, randomize: bool = False) -> Grid:\n \"\"\"\n Создание списка клеток.\n Клетка считается живой, если ее значение равно 1, в противном случае клетка\n считается мертвой, то есть, ее значение равно 0.\n Parameters\n ----------\n randomize : bool\n Если значение истина, то создается матрица, где каждая клетка может\n быть равновероятно живой или мертвой, иначе все клетки создаются мертвыми.\n Returns\n ----------\n out : Grid\n Матрица клеток размером `cols` х `rows`.\n \"\"\"\n\n if randomize:\n grid = [[random.choice([0, 1]) for i in range(\n self.cols)] for j in range(self.rows)]\n else:\n grid = [[0 for i in range(self.cols)]\n for j in range(self.rows)]\n\n return grid\n\n def get_neighbours(self, cell: Cell) -> Cells:\n \"\"\"\n Вернуть список соседних клеток для клетки `cell`.\n Соседними считаются клетки по горизонтали, вертикали и диагоналям,\n то есть, во всех направлениях.\n Parameters\n ----------\n cell : Cell\n Клетка, для которой необходимо получить список соседей. Клетка\n представлена кортежем, содержащим ее координаты на игровом поле.\n Returns\n ----------\n out : Cells\n Список соседних клеток.\n \"\"\"\n row, col = cell\n neighbours_arr = []\n\n # -┙ bottom right border\n if (row + 1 < self.rows) and (col + 1 < self.cols):\n neighbours_arr.append(self.curr_generation[row + 1][col + 1])\n # *| right border\n if (row + 1 < self.rows):\n neighbours_arr.append(self.curr_generation[row + 1][col])\n\n # ┍- top left border\n if (row - 1 >= 0) and (col - 1 >= 0):\n neighbours_arr.append(self.curr_generation[row - 1][col - 1])\n # |* left border\n if (row - 1 >= 0):\n neighbours_arr.append(self.curr_generation[row - 1][col])\n\n # -┐ top right border\n if (row + 1 < self.rows) and (col - 1 >= 0):\n neighbours_arr.append(self.curr_generation[row + 1][col - 1])\n # ^^ top border\n if (col - 1 >= 0):\n neighbours_arr.append(self.curr_generation[row][col - 1])\n\n # └- bottom left border\n if (row - 1 >= 0) and (col + 1 < self.cols):\n neighbours_arr.append(self.curr_generation[row - 1][col + 1])\n # __ bottom border\n if (col + 1 < self.cols):\n neighbours_arr.append(self.curr_generation[row][col + 1])\n\n return neighbours_arr\n\n def get_next_generation(self) -> Grid:\n \"\"\"\n Получить следующее поколение клеток.\n Returns\n ----------\n out : Grid\n Новое поколение клеток.\n \"\"\"\n\n # Create empty grid\n next_generation = self.create_grid()\n\n for row in range(self.rows):\n for col in range(self.cols):\n neighbours_count = sum(self.get_neighbours((row, col)))\n\n # Determine if cell stays form previous grid\n if (neighbours_count >= 2) and (neighbours_count <= 3) and self.curr_generation[row][col]:\n next_generation[row][col] = 1\n # Determine if new cell appears\n elif neighbours_count == 3:\n next_generation[row][col] = 1\n else:\n next_generation[row][col] = 0\n\n return next_generation\n\n def step(self) -> None:\n \"\"\"\n Выполнить один шаг игры.\n \"\"\"\n self.prev_generation = self.curr_generation.copy()\n self.curr_generation = self.get_next_generation()\n self.n_generation += 1\n\n @property\n # FIXED from is_max_generations_exceeded\n def is_max_generations_exceed(self) -> bool:\n \"\"\"\n Не превысило ли текущее число поколений максимально допустимое.\n \"\"\"\n if self.max_generations:\n return self.n_generation >= self.max_generations\n else:\n return False\n\n # Basically just a shortcut for creating readonly properties\n # is_changing = property(is_changing)\n @property\n def is_changing(self) -> bool:\n \"\"\"\n Изменилось ли состояние клеток с предыдущего шага.\n \"\"\"\n return self.curr_generation != self.prev_generation\n\n @staticmethod\n def from_file(filename: pathlib.Path) -> 'GameOfLife':\n \"\"\"\n Прочитать состояние клеток из указанного файла.\n \"\"\"\n file = [c for c in open(filename).read() if c in '01\\n']\n\n grid = [[]] # type: List[List]\n j = 0\n # Split number rows into array of numbers, forming 2D matrix\n for i in range(len(file) - 1):\n if file[i] != '\\n':\n number = [int(file[i])]\n grid[j].extend(number)\n else:\n grid.append([])\n j += 1\n rows = len(grid)\n cols = len(grid[0])\n life = GameOfLife((rows, cols))\n life.curr_generation = grid\n\n return life\n\n def save(self, filename: pathlib.Path) -> None:\n \"\"\"\n Сохранить текущее состояние клеток в указанный файл.\n \"\"\"\n\n file = open(filename, 'w+')\n for row in range(self.rows):\n for col in range(self.cols):\n number = str(self.curr_generation[row][col])\n file.write(number)\n file.write('\\n')\n\n file.close()\n","sub_path":"homework03/life.py","file_name":"life.py","file_ext":"py","file_size_in_byte":7230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"137733221","text":"#!/usr/bin/env python3\n\nfrom random import randrange\n\ndef main( ):\n\n numOfWins = ( int( input( \"How many times do you wish to win: \" ) ) )\n sumAttempts = ( 0 )\n avgAttempts = ( 0 )\n count = ( 0 )\n winningArray = []\n userArray = []\n\n for i in range( 0, numOfWins, 1 ): \n count += ( 1 )\n numOfAttempts = ( 0 )\n\n while True:\n isWin = ( True )\n userNumbers = ( \"\" )\n winningNumbers = ( \"\" )\n numOfAttempts += ( 1 )\n\n for i in range( 0, 3, 1 ):\n winningNumbers += ( str( randrange( 0, 10, 1 ) ) )\n winningArray = ( winningNumbers.split( ' ' ) )\n\n for i in range( 0, 3, 1 ):\n userNumbers += ( str( randrange( 0, 10, 1 ) ) )\n userArray = ( userNumbers.split( ' ' ) )\n\n for x in winningArray:\n for y in userArray:\n if( y not in x ):\n isWin = ( False )\n\n if( isWin ):\n break\n\n print( \"Win #:\", ( count ), \"\\tAttempts:\", numOfAttempts, \"\\tWinning #\\'s:\", userNumbers, sep = ( \" \" ) )\n sumAttempts += ( numOfAttempts ) \n\n avgAttempts = ( sumAttempts / ( count ) )\n\n print( \"\\nSum Attempts: \", sumAttempts )\n print( \"Average Attempts: \", avgAttempts )\n\nif( __name__ == ( \"__main__\" ) ):\n main( )\n","sub_path":"Daily3/daily3.py","file_name":"daily3.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"165698125","text":"# Copyright (c) 2014 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Shows details of builds which did not meet our SLO.\n\nSee template at templates/build_details.html for sortable table.\n\"\"\"\nimport jinja2\nimport json\nimport os\nimport webapp2\n\nfrom google.appengine.ext import ndb\n\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__),\n 'templates')),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\n\nclass BuildDetailsHandler(webapp2.RequestHandler):\n\n def get(self, key):\n stats = ndb.Key(urlsafe=key).get()\n builds = [{\n 'tree': stat.tree,\n 'master': stat.master,\n 'builder': stat.builder,\n 'buildnumber': stat.buildnumber,\n 'buildtime': stat.buildtime,\n 'result': stat.result,\n 'revision': stat.revision,\n } for stat in stats.slo_offenders]\n template = JINJA_ENVIRONMENT.get_template('build_details.html')\n self.response.write(template.render({\n 'builds': json.dumps(builds),\n }))\n","sub_path":"appengine/trooper_o_matic/appengine_module/trooper_o_matic/build_details.py","file_name":"build_details.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"186876145","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport socket\nimport time\nimport ftplib\nfrom utils import util\nfrom utils import fileoperation\nfrom framework.log import getlogger\nfrom ftplib import FTP\n\nclass FtpClient:\n def __init__(self):\n self.ftp = FTP()\n self.log = getlogger()\n self.connected = False\n self.logined = False\n self.strIp = \"\"\n self.strPort = \"\"\n self.username = \"\"\n self.password = \"\"\n self.retryInterval = 3\n self.cwd = \"\"\n self.timeout = 30\n #self.ftp.set_debuglevel(2)\n\n\n def connect(self,strIp,strPort):\n if self.connected:\n return True\n try:\n socket.setdefaulttimeout(self.timeout)\n self.ftp.connect(strIp,strPort) \n self.log.info('connect to %s:%s OK',strIp,strPort)\n self.connected = True\n return True\n except:\n self.connected = False\n self.logined = False\n self.log.error('connect to %s:%s failed for:%s',strIp,strPort,util.getExceptInfo())\n return False\n\n\n\n def login(self,username,password):\n if self.logined:\n return True\n\n if not self.connected:\n self.log.warning(\"ftp conn have not created yet\")\n return False\n try: \n self.ftp.login(username,password)\n self.log.info('user:%s password:%s ok',username,password)\n self.logined = True\n self.cwd = \"\"\n return True\n except:\n self.log.error('user:%s password:%s failed for %s',username,password,util.getExceptInfo())\n return False\n\n def checkStatus(self):\n if self.logined:\n return True\n return False\n\n def changedir(self,dataDir):\n if not self.checkStatus():\n self.log.error(\"ftp client have not logined yet\")\n return False\n\n if self.cwd == dataDir:\n return True\n \n try:\n #print self.ftp.getwelcome()\n self.ftp.cwd(dataDir)\n self.log.info('change dir:%s ok' ,dataDir)\n self.cwd = dataDir\n return True\n except:\n self.log.error('change dir:%s failed for:%s' ,dataDir,util.getExceptInfo())\n return False\n\n\n\n\n # called when you want to resume the ftp object\n def reinit(self,retries):\n return self.init(self.strIp,self.strPort,self.username,self.password,retries)\n\n\n\n # called after constructor,init will not change remote dir which is a application action\n # this verion don't check the arguments's validation\n def init(self,ip,port,username,password,retries):\n for retry in xrange(retries):\n if not self.connect(ip,port):\n time.sleep(self.retryInterval)\n else:\n break\n\n if not self.connected:\n self.log.warning('connect %s:%s retries:%d' ,ip,port,retries)\n return False\n self.strIp = ip\n self.strPort = port\n self.username = username\n self.password = password\n if not self.logined:\n return self.login(username,password)\n #self.log.log(logging.WARNING,\"processName:%s ftp changedir(%s) failed give up current task\",self.runCtx.name,self.taskDesc.remotedir)\n else:\n return True\n\n\n\n def rename(self, fromname, toname):\n if not self.checkStatus():\n self.log.warning('Ftp client have not logined yet')\n return False\n try:\n self.ftp.rename(fromname, toname)\n return True\n except:\n self.log.error('ftp rename from %s to %s failed for:%s' ,fromname,toname,util.getExceptInfo())\n return False\n\n\n def checkDownloadFile(self,localfile,remotefile): \n remotefileSize = self.getSize(remotefile)\n localfileSize = fileoperation.fileSize(localfile)\n if remotefileSize == localfileSize:\n return True\n else:\n self.log.warning(\"ftp remotefile:%s(size:%d) localfile:%s(size:%d)\",remotefile,remotefileSize,localfile,localfileSize)\n return False\n\n #need to check filename is fullpath \n #Ret val have diff meaning\n # 0 ---- ok\n # 1 ---- net error\n # 2 ---- io error\n # 3 ---- other error\n def download(self, localfile, remotefile):\n self.log.info(\"ftp try to download:%s\",remotefile)\n iRet = 0\n if not self.checkStatus():\n iRet = 1\n self.log.error(\"ftp client have not logined yet\")\n return iRet\n \n file_handler = None\n try:\n file_handler = open(localfile, 'wb')\n self.ftp.retrbinary('RETR %s'%(remotefile), file_handler.write)\n file_handler.close()\n self.log.info('ftp download:%s success',remotefile)\n return iRet\n #self.ftp.set_debuglevel(0)\n except socket.error:\n self.log.error(\"ftp download:%s error %s\",remotefile,util.getExceptInfo())\n iRet = 1 \n except IOError:\n self.log.error(\"source file:%s load error %s\",remotefile,util.getExceptInfo())\n iRet = 2\n except ftplib.error_perm:\n self.log.error(\"ftp download:%s for permanate error\",remotefile)\n iRet = 3\n except ftplib.error_temp:\n self.log.error(\"ftp download:%s for temporary error\",remotefile)\n iRet = 4\n except:\n self.log.error(\"ftp download:%s for error:%s\",remotefile,util.getExceptInfo())\n iRet = 5\n\n finally:\n if file_handler != None: file_handler.close()\n return iRet\n\n def download_ex(self,localfile,remotefile):\n iRet = self.download(localfile,remotefile)\n if iRet == 0:\n if not self.checkDownloadFile(localfile,remotefile):\n fileoperation.removeFile(localfile)\n return False\n return True\n else:\n fileoperation.removeFile(localfile)\n if iRet == 4:\n return True\n else:\n return False\n\n #need to check filename is fullpath \n #Ret val have diff meaning\n # 0 ---- ok\n # 1 ---- net error\n # 2 ---- io error\n # 3 ---- ftp permanet error\n # 4 ---- ftp temporay error\n # 5 ---- unknown error\n def upload(self,filename,interPostfix=None):\n bufsize = 1024\n iRet = 0\n fp = None\n if not self.checkStatus():\n iRet = 1\n self.log.error(\"ftp client have not logined yet\")\n return iRet\n\n try:\n self.ftp.set_pasv(False)\n fp = open(filename,\"rb\")\n srcFileName = os.path.basename(filename)\n #upFileName = \"%s.%s\" % (srcFileName,interPostfix)\n #self.ftp.storbinary('STOR %s' % upFileName,fp,bufsize)\n self.ftp.storbinary('STOR %s' % (srcFileName),fp,bufsize)\n #self.ftp.rename(upFileName,srcFileName)\n self.log.info('ftp upload:%s success',filename)\n return iRet\n #self.ftp.set_debuglevel(0)\n except socket.error:\n self.log.error(\"ftp upload:%s error %s\",filename,util.getExceptInfo())\n iRet = 1 \n except IOError:\n self.log.error(\"source file:%s load error %s\",filename,util.getExceptInfo())\n iRet = 2\n except ftplib.error_perm:\n self.log.error(\"ftp upload:%s for permanate error %s \",filename,util.getExceptInfo())\n iRet = 3\n except ftplib.error_temp:\n self.log.error(\"ftp upload:%s for temporary error %s\",filename,util.getExceptInfo())\n iRet = 4\n except:\n self.log.error(\"ftp upload:%s for other error %s\",filename,util.getExceptInfo())\n iRet = 5\n finally:\n if fp != None: fp.close()\n return iRet\n\n # in the future version the retval meanings may change\n # retval format operRes,needRetryOrNot\n # False,True ------ need retry\n # True,* ------ not need retry\n def upload_ex(self,filename):\n iRet = self.upload(filename)\n if iRet == 0:\n return True,False\n elif iRet == 1:\n self.quit()\n return False,True\n elif iRet == 4:\n return False,True\n else:\n return False,False\n\n \n def getfiles(self,dir=None):\n files = []\n if not self.checkStatus():\n self.log.error(\"ftp client have not logined yet\")\n return files\n try:\n #self.changedir(dir) \n self.log.info(\"current path:%s\",self.ftp.pwd())\n files = files + self.ftp.nlst()\n except:\n self.log.error(\"Get remote files in the directory:%s failed for:%s\",dir,util.getExceptInfo())\n return files\n\n\n def getSize(self,filename):\n if not self.checkStatus():\n self.log.error(\"ftp client have not logined yet\")\n return -1\n try:\n sizeInfo = self.ftp.size(filename)\n return sizeInfo\n except:\n self.log.error(\"Get remote files(%s) size failed for:%s\",filename,util.getExceptInfo())\n return -1\n \n def getlist(self,dir):\n lists = None\n if not self.checkStatus():\n self.log.error(\"ftp client have not logined yet\")\n return lists\n try:\n self.ftp.cwd(dir)\n files = self.ftp.dir(dir)\n except:\n self.log.error(\"Get remote files and folders in the directory:%s failed for %s\",dir,util.getExceptInfo())\n return lists\n\n def deletefile(self,filepath):\n if not self.checkStatus():\n self.log.error(\"ftp client have not logined yet\")\n return False\n try:\n self.ftp.delete(filepath)\n return True\n except:\n self.log.error(\"Delete file:%s failed for :%s\",filepath,util.getExceptInfo())\n return False\n \n def deletedir(self, dirpath):\n if not self.checkStatus():\n self.log.error(\"ftp client have not logined yet\")\n return False\n try:\n self.ftp.rmd(dirpath)\n return True\n except:\n self.log.error(\"Delete folder:%s failed for :%s\",dirpath,util.getExceptInfo())\n return False\n \n def quit(self):\n iRet = 0\n if not self.checkStatus():\n iRet = 1\n self.log.error(\"ftp client have not logined yet\")\n return iRet\n self.log.debug(\"ftp quit\")\n try:\n self.ftp.quit()\n except ftplib.all_errors:\n iRet = 1\n self.log.error(\"ftp related error %s\",util.getExceptInfo())\n except socket.error:\n iRet = 2\n self.log.error(\"net error %s\",util.getExceptInfo())\n except IOError:\n iRet = 3\n self.log.error(\"IO error %s\",util.getExceptInfo())\n finally:\n self.connected = False\n self.logined = False\n return iRet\n \n \n \n\n#if __name__==\"__main__\":\n'''\n ftpCli = FtpClient()\n ftpCli.connect(\"121.32.136.197\",\"21\")\n ftpCli.login('zhengs','123456')\n #ftpCli.changedir('result')\n files = ftpCli.getfiles('result')\n print files\n for item in files:\n ftpCli.download(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\python\\\\run\"+item,item)\n ftpCli.quit()\n'''\n \n \n \n \n\n \n \n \n","sub_path":"loadhdfs/lib/ftpclient.py","file_name":"ftpclient.py","file_ext":"py","file_size_in_byte":11711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"79383251","text":"import socket\nimport sys\n\nclass Client:\n def __init__(self):\n # Create a TCP/IP socket\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\n def connect(self, address = None):\n # Connect the socket to the port where the server is listening\n if address == None:\n server_address = ('localhost', 10000)\n else:\n server_address = (address, 10000)\n\n print('connecting to {} port {}'.format(*server_address))\n try:\n self.sock.connect(server_address)\n except Exception as e:\n print(e)\n\n\n def sendMessage(self, message):\n try:\n self.sock.sendall(message)\n print(\"Message sent.\")\n except Exception as e:\n print(e)\n\n def recv(self):\n totalData = None\n\n # While there is data to receive.\n while True:\n data = self.sock.recv(16)\n \n if not data:\n break\n \n totalData += data\n \n return totalData\n\n def disconnect(self):\n print(\"Closing socket.\")\n self.sock.close()\n\n\nclass Server:\n def __init__(self):\n # Create a TCP/IP socket.\n self.sock = socket.socket()\n\n # Bind the socket to the port 10000.\n server_address = ('',10000)\n print(\"Starting up on {} port {}\".format(*server_address))\n self.sock.bind(server_address)\n\n print(\"Listening for connections\")\n self.sock.listen(1)\n\n self.conn, self.client_address = self.sock.accept()\n #print(self.connection, self.client_address)\n\n def sendMessage(self,message):\n if message:\n self.conn.sendall(message)\n print(\"Data sent!\")\n\n \n def recv(self):\n totalData = b'' \n\n # While there is data to receive.\n while True:\n data = self.conn.recv(2048)\n if not data:\n break\n totalData += data\n\n return totalData\n\n def disconnect(self):\n self.conn.close()\n\n","sub_path":"clientserver.py","file_name":"clientserver.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"313103795","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/1/28 18:01\n# @Author : xxx\n# @Email : xxx@admin.com\n# @File : person.py\n# @Software: PyCharm\nimport pickle\nfrom conf.settings import course_info\nclass Person:\n def show_courses(self): # 查看所有可选课程\n course_gen = self.pickle_load(course_info)\n for index,course in enumerate(course_gen,1):\n print('%s、%s' % (index, course))\n\n @staticmethod\n def pickle_load(file_name):\n with open(file_name, 'rb') as f:\n while True:\n try:\n obj = pickle.load(f)\n yield obj\n except EOFError:\n break\n","sub_path":"day28/elective_course2/core/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"381174157","text":"#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n# MakeStandardInputs.py ///////////////////////////////////////////////////////////\n#----------------------------------------------------------------------------------\n# Author(s): Reyer Band, Johan S. Bonilla, Brendan Regnary ////////////////////////\n# This program makes Standardize Inputs ///////////////////////////////////////////\n#----------------------------------------------------------------------------------\n\n# modules\nimport numpy\nimport h5py\n# get stuff from modules\nfrom sklearn import preprocessing\nfrom sklearn.externals import joblib\nimport json\nimport argparse, os\n\nlistOfSamples = [\"b\",\"Higgs\",\"QCD\",\"Top\",\"W\",\"Z\"]\nsetTypes = [\"\",\"train\",\"validation\",\"test\"]\n\n\n#==================================================================================\n# Standardize BES Vars /////////////////////////////////////////////////////////////////\n#==================================================================================\ndef standardizeBESTVars(fileDir, outDir, sampleTypes = [\"QCD\",\"Higgs\",\"Top\",\"W\",\"Z\",\"b\"], setTypes = [\"\"], suffix = \"\"):\n # put BES variables in data frames\n for mySet in setTypes:\n jetBESDF = {}\n for mySample in sampleTypes:\n print(\"Getting\", mySample, mySet)\n filePath = fileDir+mySample+\"Sample_BESTinputs\"\n if not mySet == \"\":\n filePath = filePath + \"_\" + mySet\n if suffix == \"\":\n filePath = filePath + \".h5\"\n else:\n filePath = filePath + \"_\" + suffix + \".h5\"\n myF = h5py.File(filePath,\"r\")\n jetBESDF[mySample] = myF['BES_vars'][()]\n print(type(jetBESDF[mySample]), jetBESDF[mySample].shape)\n myF.close()\n print(\"Got\", mySample, mySet)\n\n print(\"Accessed BES variables for\", mySet)\n\n allBESinputs = numpy.concatenate([jetBESDF[mySample] for mySample in sampleTypes])\n print(\"Shape allBESinputs\", allBESinputs.shape)\n scaler = preprocessing.StandardScaler().fit(allBESinputs)\n\n with open('ScalerParameters_'+mySet+'.txt', 'w') as outputFile:\n for mean,var in zip(scaler.mean_, scaler.var_):\n outputFile.write('{},{}\\n'.format(mean, var))\n\n print(\"JetBESDF\", jetBESDF.keys())\n for mySample in sampleTypes:\n jetBESDF[mySample] = scaler.transform(jetBESDF[mySample])\n print(\"Transformed\", mySample)\n #if infParticle == 'H' : infParticle = 'Higgs'\n #if infParticle == 'T' : infParticle = 'Top'\n #if infParticle == 'B' : infParticle = 'b'\n outFilePath = outDir+mySample+\"Sample_BESTinputs\"\n if not mySet == \"\":\n outFilePath = outFilePath + \"_\" + mySet\n if not suffix == \"\":\n outFilePath = outFilePath + \"_\" + suffix\n outFilePath = outFilePath + \"_standardized.h5\"\n outF = h5py.File(outFilePath, \"w\")\n print(\"Creating Standarized Dataset for \", mySample, len(jetBESDF[mySample]))\n outF.create_dataset('BES_vars', data=jetBESDF[mySample], compression='lzf')\n\n inFilePath = fileDir+mySample+\"Sample_BESTinputs\"\n if not mySet == \"\":\n inFilePath = inFilePath + \"_\" + mySet\n if not suffix == \"\":\n inFilePath = inFilePath + \"_\" + suffix\n inFilePath = inFilePath + \".h5\"\n inF = h5py.File(inFilePath, \"r\")\n #Copy the images to the new file\n #Treat QCD separately because of dumb labeling scheme I introduced\n for myFrame in ['HiggsFrame_images','TopFrame_images','ZFrame_images','WFrame_images']:\n print(\"Copying\", myFrame)\n outF.create_dataset(myFrame, data=inF[myFrame], compression='lzf')\n inF.close()\n outF.close()\n print(\"Done creating\", outFilePath)\n print(\"Finished making datasets for\", mySet)\n\n# Main function should take in arguments and call the functions you want\nif __name__ == \"__main__\":\n \n # Take in arguments\n parser = argparse.ArgumentParser(description='Parse user command-line arguments to execute format conversion to prepare for training.')\n parser.add_argument('-s', '--samples',\n dest='samples',\n help=' Which (comma separated) samples to process. Examples: 1) --all; 2) W,Z,b',\n required=True)\n parser.add_argument('-hd','--h5Dir',\n dest='h5Dir',\n default=\"~/nobackup/h5samples/\")\n parser.add_argument('-o','--outDir',\n dest='outDir',\n default=\"~/nobackup/h5samples/\")\n parser.add_argument('-sf','--suffix',\n dest='suffix',\n default=\"\")\n parser.add_argument('-st','--setType',\n dest='setType',\n help=' Which (comma separated) sets to process. Examples: 1) all; 2) train,validation,test',\n required=True)\n args = parser.parse_args()\n if not args.samples == \"all\": listOfSamples = args.samples.split(',')\n if not args.setType == \"all\": setTypes = args.setType.split(',')\n\n # Make directories you need\n if not os.path.isdir(args.h5Dir): print(args.h5Dir, \"does not exist\")\n\n standardizeBESTVars(args.h5Dir, args.outDir, listOfSamples, setTypes, args.suffix)\n \n ## Plot total pT distributions\n \n print(\"Done\")\n\n","sub_path":"training/MakeStandardInputs.py","file_name":"MakeStandardInputs.py","file_ext":"py","file_size_in_byte":5432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"499173772","text":"\"\"\"\nGiven an array nums, we call (i, j) an important reverse pair if i < j and nums[i] > 2*nums[j].\n\nYou need to return the number of important reverse pairs in the given array.\n\"\"\"\nclass Solution(object):\n def reversePairs(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # Brute Force TLE\n res = 0\n for i in range(len(nums) - 1):\n for j in range(i + 1, len(nums)):\n if nums[i] > 2 * nums[j]:\n res += 1\n\n return res\n\n def reversePairs2(self, nums):\n '''\n Count \"important reverse pairs\" while doing mergesort:\n When we're doing mergesort, original index of elements in left part (smaller side), i, must less than those in right part, j.\n Simply compare nums[i] and 2*nums[j] and sum them up.\n '''\n if len(nums) <= 1:\n return 0\n count = [0]\n\n def merge(nums):\n if len(nums) <= 1: return nums\n\n left, right = merge(nums[:len(nums) // 2]), merge(nums[len(nums) // 2:])\n L = R = 0\n\n while L < len(left) and R < len(right):\n if left[L] <= 2 * right[R]:\n L += 1\n else:\n count[0] += len(left) - L\n R += 1\n return sorted(left + right) # those partial lists induced during mergesort here are generated by sorted()\n\n merge(nums)\n return count[0]\n\n\n\n# Anther solution is Fenwick Tree\n\nnums = [1,3,2,3,1]\nnums = [2,4,3,5,1]\nprint(Solution().reversePairs2(nums))\n","sub_path":"493RevPairs.py","file_name":"493RevPairs.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"67476625","text":"from src import config\nimport transformers\nimport torch.nn as nn\n\nclass BertBaseUncased(nn.Module):\n def __init__(self,n_classes):\n super(BertBaseUncased, self).__init__()\n self.bert = transformers.BertModel.from_pretrained(config.BERT_PATH)\n self.bert_drop = nn.Dropout(0.3)\n self.out = nn.Linear(self.bert.config.hidden_size,n_classes)\n\n def forward(self,input_ids,attention_mask):\n _, pooled_output = self.bert(\n input_ids = input_ids,\n attention_mask = attention_mask\n )\n\n output = self.bert_drop(pooled_output)\n return self.out(output)\n\n\n\n\n","sub_path":"Machine_Learning_Projects/NLP/Sentiment_Classification_with_BERT/src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"147101080","text":"import os\r\nimport sys\r\nConfigDirectory = \"Config\"\r\nDirectorys = [\"Framework\", \"Support\"]\r\nModules = [\"Application\", \"Communication\", \"Control\", \"Input\", \"Output\"]\r\n\r\n\r\ndef FrameworkInit():\r\n project_path = FetchProjectPath()\r\n CheckProjectIntegrity(project_path)\r\n ProjectPathAppend(project_path)\r\n FrameworkGlobalConstantInit(project_path)\r\n\r\ndef FetchProjectPath():\r\n return os.path.dirname(os.path.realpath(__file__))\r\n\r\ndef CheckProjectIntegrity(path):\r\n for directory_name in Directorys:\r\n directory = '%s%s%s' % (path, os.sep, directory_name)\r\n if not os.path.exists(directory):\r\n raise Exception(\r\n 'Project Integrity check Failed: Missing %s directory.' % directory_name)\r\n for module_name in Modules:\r\n module = '%s%sFramework%s%s' % (path, os.sep, os.sep, module_name)\r\n if not os.path.exists(module):\r\n raise Exception('Project Integrity check Failed: Missing %s module.' % module_name)\r\n\r\ndef ProjectPathAppend(project_path):\r\n for directory_name in Directorys:\r\n directory = '%s%s%s' % (project_path, os.sep, directory_name)\r\n for dirpath, dirnames, filenames in os.walk(directory):\r\n if dirpath not in sys.path:\r\n sys.path.append(dirpath)\r\n\r\ndef ConfigDirectorySet(config_dir_name):\r\n global ConfigDirectory\r\n import FrameworkSupport\r\n config_dir_path = \"%s%s%s\" % (FrameworkSupport.PROJECT_PATH, os.sep, config_dir_name)\r\n if config_dir_path not in sys.path:\r\n sys.path.append(config_dir_path)\r\n ConfigDirectory = config_dir_name\r\n\r\ndef FrameworkGlobalConstantInit(project_path):\r\n import inspect\r\n import FrameworkSupport\r\n FrameworkSupport.EXECUTE_FILE = inspect.getframeinfo(\r\n inspect.currentframe().f_back.f_back.f_back)[0]\r\n FrameworkSupport.PID = os.getpid()\r\n FrameworkSupport.PROJECT_PATH = project_path\r\n FrameworkSupport.PYTHON_PREFIX = os.path.dirname(sys.executable)\r\n FrameworkSupport.PYTHON_EXECUTOR = sys.executable\r\n\r\ndef FrameworkStart(load_plugins):\r\n import Framework\r\n import FrameworkSupport\r\n ALPSDebug.alps_print(\"WSDT Framework v%s Start\" % Framework.VERSION)\r\n ALPSDebug.alps_print(ALPSDebug.LEVEL.DEBUG,\r\n'''Debug Information:\r\n PID = %d\r\n EXECUTE_FILE = \"%s\"\r\n CONFIG_DIR_NAME = \"%s\"\r\n DEBUG_LEVEL = %s\r\n''' % (FrameworkSupport.PID,\r\n FrameworkSupport.EXECUTE_FILE,\r\n ConfigDirectory,\r\n ALPSDebug.LEVEL.reverse_map[ALPSDebug.Debug_Setting.Debug_Print_Level]))\r\n\r\n framework_thread = ALPSThread(threadfunc=getattr(Framework, '__ALPSMODULE__').api_initialize,\r\n threadname='Wireless Tool Framework thread')\r\n initialize_done = ALPSThread.allocate_event()\r\n framework_thread.start(plugins=load_plugins, initialize_done=initialize_done)\r\n if initialize_done.wait(30) and framework_thread.thread_func_ret:\r\n for plugin in load_plugins:\r\n ALPSDebug.alps_error(\"Plugin[%s] is not found\" % plugin)\r\n ALPSDebug.alps_print(\"WSDT Framework initialized done\")\r\n\r\ndef hideConsoleWindow():\r\n import ctypes\r\n whnd = getattr(ctypes.windll.kernel32, \"GetConsoleWindow\")()\r\n if whnd != 0:\r\n ctypes.windll.user32.ShowWindow(whnd, 0)\r\n getattr(ctypes.windll.kernel32, \"CloseHandle\")(whnd)\r\n\r\ndef logFileRecordStart(path):\r\n ALPSDebug.EnableLogFile(path)\r\n\r\ndef logSocketStart(type_str, ip, port):\r\n ALPSDebug.Debug_Log_Socket_Type = type_str\r\n ALPSDebug.EnableSocketLog(ip, port)\r\n\r\ndef start(load_plugins,\r\n hideConsole=False,\r\n logSocketOutput=None,\r\n logSocketType='UDP',\r\n logFileOutput=None,\r\n configDirecotryName=ConfigDirectory):\r\n\r\n ConfigDirectorySet(configDirecotryName)\r\n\r\n if hideConsole:\r\n if os_system == OS_WINDOWS:\r\n hideConsoleWindow()\r\n else:\r\n ALPSDebug.alps_print(ALPSDebug.LEVEL.ERROR,\r\n \"The hideConsole parameter can be only used in Windows\")\r\n\r\n logFileRecordStart(logFileOutput)\r\n\r\n if logSocketOutput:\r\n ip_addr, port = logSocketOutput.split(\":\")\r\n logSocketStart(logSocketType, ip_addr, int(port))\r\n\r\n FrameworkStart(load_plugins)\r\n\r\nFrameworkInit()\r\nfrom ALPSCommon import *\r\n","sub_path":"WLAN Software Development Test Tool/WSDT/FrameworkStarter.py","file_name":"FrameworkStarter.py","file_ext":"py","file_size_in_byte":4330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"90294055","text":"from django.contrib import admin\nfrom django.conf.urls import patterns, url, include\nfrom storeys.views import StoreysView\nfrom storeys.utils import urlref\nfrom django.views.generic.base import TemplateView\n\n\n\nurlpatterns = patterns(\n 'test',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^test_success_1/', include('test_dir.additional_app.urls')),\n url(r'^additional_app2/', include('test_dir.additional_app2.urls')),\n url(r'^test_success_2_(?P[0-9]+)$',\n StoreysView.as_view(\n template_name='storeys_urls_js/main.html',\n prerender_content='receipts/actions.htm'\n ),\n name='receipts-index-view'\n ),\n url(r'^test_3_(?P[0-9]+)$',\n StoreysView.as_view(\n template_name='storeys_urls_js/main.html',\n prerender_content='receipts/actions.htm'\n ),\n name='test_exclude'\n ),\n)\n\n\nnon_exported_urlpatterns = (\n urlref(module_name='admin.site.urls'),\n urlref(module_name='test_dir.additional_app2.urls'),\n urlref(name='test_exclude')\n)\n","sub_path":"test_dir/storeys/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"484139659","text":"# -*- coding: utf-8 -*-\n\n\nfrom unittest import TestCase\nfrom effectivepython.chapter2.function import *\n\n\nclass TestDivide(TestCase):\n def test_divide(self):\n self.assertRaises(ValueError, lambda: divide(2, 0))\n try:\n result = divide(5, 2)\n except ValueError:\n print(\"Invalid inputs\")\n else:\n print(\"Result is %.1f\" % result)\n\n def test_sort_priority(self):\n numbers = [8, 3, 1, 2, 5, 4, 7, 6]\n group = {2, 3, 5, 7}\n result = sort_priority(numbers, group)\n self.assertEquals(result, [2, 3, 5, 7, 1, 4, 6, 8])\n self.assertEquals(sort_priority2(numbers, group), True)\n\n def test_index_words(self):\n address = 'Four score and seven years ago ...'\n self.assertEquals(index_words(address)[:3], [0, 5, 11])\n result = index_words_iter(address)\n self.assertEquals(next(result), 0)\n self.assertEquals(next(result), 5)\n\n def test_normalize(self):\n visits = [15, 35, 80]\n percentages = normalize(visits)\n self.assertEquals([11.538461538461538, 26.923076923076923, 61.53846153846154], percentages)\n\n def test_read_visits(self):\n data_path = 'data.txt'\n result = read_visits(data_path)\n self.assertEquals([15, 35, 80], list(result))\n\n\n def test_normalize_copy(self):\n visits = [15, 35, 80]\n percentages = normalize_copy(visits)\n print(percentages)\n\n def test_normalize_func(self):\n data_path = 'data.txt'\n result = norvalize_func(lambda: read_visits(data_path))\n print(result)\n\n def test_norvalize_defensive(self):\n visits = [15, 35, 80]\n self.assertRaises(TypeError, lambda: normalize_defensive(iter(visits)))","sub_path":"test/chapter2/test_function.py","file_name":"test_function.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"395978065","text":"\n\nclass BaseUtil:\n\n def __init__(self):\n self.language = 'en'\n self._errors = []\n self._util_name = ''\n self._form = None\n self._user = None\n self._message_info = {}\n self.__form = None\n self._object = None\n\n #\n # HELPER METHODS\n #\n\n def return_list(self, l):\n if len(l) > 0:\n return l\n return []\n\n def remove_notifications(self, obj):\n if self._user:\n if self._user.groups.filter(name='staff').exists():\n obj.notified = False\n obj.tab_notified = ''\n obj.save()\n elif self._user.groups.filter(name='client').exists():\n obj.staff_notified = False\n obj.staff_tab_notified = ''\n obj.save()\n\n #\n # ERROR METHODS\n #\n\n def get_errors(self):\n return self._errors\n\n def get_error_message(self):\n msg = ''\n for x in self._errors:\n msg += x + '\\n'\n return msg\n\n def add_error(self, err):\n msg = self._util_name + ': ' + err\n self._errors.append(msg)\n\n def add_form_errors(self, form=None):\n if form:\n f = form\n else:\n f = self._form\n for key, value in f.errors.items():\n for x in value:\n msg = key + ': ' + x\n self.add_error(msg)\n self.save_form(f)\n\n def add_error_list(self, l):\n for x in l:\n if x not in self._errors:\n self._errors.append(x)\n\n def has_errors(self):\n return len(self._errors) > 0\n\n #\n # FORM METHODS\n #\n\n def validate_form(self):\n if self._form:\n if self._form.is_valid():\n self._object = self._form.save()\n return True\n else:\n self.add_form_errors()\n return False\n self.add_error('Form is empty.')\n return False\n\n def save_form(self, form=None):\n self.__form = (form if form else self._form)\n\n def get_form(self):\n return self.__form\n\n def get_form_errors(self):\n if self.__form:\n return self.__form.errors\n return {}\n\n\n\n\n\n\n","sub_path":"home/util_base.py","file_name":"util_base.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"624195176","text":"#!/usr/bin/env python\n# coding=utf-8\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\n# sopel imports\nimport sopel.module\n\n\n# imports for system and OS access, directories\nimport os\nimport sys\n\n# imports based on THIS file\nmoduledir = os.path.dirname(__file__)\nshareddir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.append(shareddir)\nfrom BotShared import *\nimport re\n\n\ncomdict = {\n \"author\": \"dysonparkes\",\n \"contributors\": [],\n \"description\": \"A tool for converting weights\",\n 'privs': [],\n \"example\": \".weight 24 kg\",\n \"exampleresponse\": \"Instigator: 24.00kg = 52 pounds 14.58 ounces\",\n }\n\n\n\"\"\"\nBased on the default units module.\n\"\"\"\n\n\nfind_mass = re.compile(r'([0-9]*\\.?[0-9]*)[ ]*(lb|lbm|pound[s]?|ounce|oz|(?:kilo|)gram(?:me|)[s]?|[k]?g)', re.IGNORECASE)\n\n\n@sopel.module.commands('weight', 'mass')\ndef mainfunction(bot, trigger):\n \"\"\"Check to see if the module is enabled.\"\"\"\n botcom = bot_module_prerun(bot, trigger)\n if not botcom.modulerun:\n return\n\n if not botcom.multiruns:\n execute_main(bot, trigger, botcom)\n else:\n # IF \"&&\" is in the full input, it is treated as multiple commands, and is split\n commands_array = spicemanip.main(botcom.triggerargsarray, \"split_&&\")\n if commands_array == []:\n commands_array = [[]]\n for command_split_partial in commands_array:\n botcom.triggerargsarray = spicemanip.main(command_split_partial, 'create')\n execute_main(bot, trigger, botcom)\n\n botdict_save(bot)\n\n\ndef execute_main(bot, trigger, botcom):\n \"\"\"Convert mass.\"\"\"\n try:\n source = find_mass.match(trigger.group(2)).groups()\n except (AttributeError, TypeError):\n bot.reply(\"That's not a valid mass unit.\")\n return NOLIMIT\n unit = source[1].lower()\n numeric = float(source[0])\n metric = 0\n if unit in (\"gram\", \"grams\", \"gramme\", \"grammes\", \"g\"):\n metric = numeric\n elif unit in (\"kilogram\", \"kilograms\", \"kilogramme\", \"kilogrammes\", \"kg\"):\n metric = numeric * 1000\n elif unit in (\"lb\", \"lbm\", \"pound\", \"pounds\"):\n metric = numeric * 453.59237\n elif unit in (\"oz\", \"ounce\"):\n metric = numeric * 28.35\n\n if metric >= 1000:\n metric_part = '{:.2f}kg'.format(metric / 1000)\n else:\n metric_part = '{:.2f}g'.format(metric)\n\n ounce = metric * .035274\n pound = int(ounce) // 16\n ounce = ounce - (pound * 16)\n\n if pound > 1:\n stupid_part = '{} pounds'.format(pound)\n if ounce > 0.01:\n stupid_part += ' {:.2f} ounces'.format(ounce)\n else:\n stupid_part = '{:.2f} oz'.format(ounce)\n\n bot.reply('{} = {}'.format(metric_part, stupid_part))\n","sub_path":"Modules/Tools/SpiceBot/Weight.py","file_name":"Weight.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"456922670","text":"from functools import partial\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom solarforecastarbiter.metrics import deterministic\n\n\n@pytest.fixture\ndef error_func_deadband():\n return partial(deterministic.error_deadband, deadband=0.05)\n\n\n@pytest.mark.parametrize('deadband,expected', [\n (0., [1, 0, 0, 0, 0]),\n (0.1, [1, 0, 0, 0, 1]),\n (1., [1, 0, 1, 0, 1]),\n])\ndef test_deadband_mask(deadband, expected):\n obs = np.array([0, 0, 1, 0, 1.])\n fx = np.array([0, 1, 0, 1.05, 0.95])\n expected = np.array(expected, dtype=bool)\n out = deterministic.deadband_mask(obs, fx, deadband)\n assert_allclose(out, expected)\n\n\ndef test_error():\n obs = np.array([2, 1, 0.])\n fx = np.array([1, 2, 0.])\n expected = np.array([-1, 1, 0.])\n out = deterministic.error(obs, fx)\n assert_allclose(out, expected)\n\n\n@pytest.mark.parametrize('deadband,expected', [\n (0., [0, 1, -1, 1.05, -0.05]),\n (0.1, [0, 1, -1, 1.05, 0]),\n (1., [0, 1, 0, 1.05, 0]),\n])\ndef test_error_deadband(deadband, expected):\n obs = np.array([0, 0, 1, 0, 1.])\n fx = np.array([0, 1, 0, 1.05, 0.95])\n expected = np.array(expected)\n out = deterministic.error_deadband(obs, fx, deadband)\n assert_allclose(out, expected)\n\n\n@pytest.mark.parametrize(\"obs,fx,value\", [\n (np.array([0, 1, 2]), np.array([0, 1, 2]), 0.0),\n (np.array([0, 1, 2]), np.array([0, 1, 1]), 1 / 3),\n (np.array([0, 1, 2]), np.array([0, 1, 3]), 1 / 3),\n])\ndef test_mae(obs, fx, value):\n mae = deterministic.mean_absolute(obs, fx)\n assert mae == value\n\n\n@pytest.mark.parametrize(\"obs,fx,value\", [\n (np.array([0, 1, 2]), np.array([0, 1, 2]), 0.0),\n (np.array([0, 1, 2]), np.array([1, 0, 2]), 0.0),\n (np.array([0, 1, 2]), np.array([1, 2, 3]), 1.0),\n (np.array([0, 1, 2]), np.array([1, 3, 4]), (1 + 2 + 2) / 3),\n (np.array([5, 5, 5]), np.array([4, 4, 4]), -1.0),\n (np.array([5, 5, 5]), np.array([4, 3, 3]), -(1 + 2 + 2) / 3),\n])\ndef test_mbe(obs, fx, value):\n mbe = deterministic.mean_bias(obs, fx)\n assert mbe == value\n\n\n@pytest.mark.parametrize(\"obs,fx,value\", [\n (np.array([0, 1]), np.array([0, 1]), 0.0),\n (np.array([0, 1]), np.array([1, 2]), 1.0),\n (np.array([1, 2]), np.array([0, 1]), 1.0),\n])\ndef test_rmse(obs, fx, value):\n rmse = deterministic.root_mean_square(obs, fx)\n assert rmse == value\n\n\n@pytest.mark.parametrize(\"obs,fx,value\", [\n (np.array([1, 1]), np.array([2, 2]), 100.0),\n (np.array([2, 2]), np.array([3, 3]), 50.0),\n (np.array([1, 2]), np.array([1, 2]), 0.0),\n])\ndef test_mape(obs, fx, value):\n mape = deterministic.mean_absolute_percentage(obs, fx)\n assert mape == value\n\n\n@pytest.mark.parametrize(\"obs,fx,norm,value\", [\n (np.array([0, 1, 2]), np.array([0, 1, 2]), 55, 0.0),\n (np.array([0, 1, 2]), np.array([0, 1, 1]), 20, 1 / 3 / 20 * 100),\n])\ndef test_nmae(obs, fx, norm, value):\n nmae = deterministic.normalized_mean_absolute(obs, fx, norm)\n assert nmae == value\n\n\n@pytest.mark.parametrize(\"obs,fx,norm,value\", [\n (np.array([0, 1, 2]), np.array([0, 1, 2]), 55, 0.0),\n (np.array([0, 1, 2]), np.array([1, 0, 2]), 20, 0.0),\n (np.array([0, 1, 2]), np.array([1, 3, 4]), 7, (1 + 2 + 2) / 3 / 7 * 100),\n (np.array([5, 5, 5]), np.array([4, 4, 4]), 2, -1.0 / 2 * 100),\n (np.array([5, 5, 5]), np.array([4, 3, 3]), 2, -(1 + 2 + 2) / 3 / 2 * 100),\n])\ndef test_nmbe(obs, fx, norm, value):\n nmbe = deterministic.normalized_mean_bias(obs, fx, norm)\n assert nmbe == value\n\n\n@pytest.mark.parametrize(\"obs,fx,norm,value\", [\n (np.array([0, 1, 2]), np.array([0, 1, 2]), 1.0, 0.0),\n (np.array([0, 1, 2]), np.array([0, 1, 2]), 55.0, 0.0),\n (np.array([0, 1]), np.array([1, 2]), 1.0, 100.0),\n (np.array([0, 1]), np.array([1, 2]), 100.0, 1.0),\n])\ndef test_nrmse(obs, fx, norm, value):\n nrmse = deterministic.normalized_root_mean_square(obs, fx, norm)\n assert nrmse == value\n\n\n@pytest.mark.parametrize(\"obs,fx,ref,value\", [\n (np.array([0, 1]), np.array([0, 2]), np.array([0, 1]), np.NINF),\n (np.array([0, 1]), np.array([0, 1]), np.array([0, 1]), 0.0),\n (np.array([0, 1]), np.array([0, 2]), np.array([0, 2]), 0.0),\n (np.array([0, 1]), np.array([0, 2]), np.array([0, 3]), 0.5),\n])\ndef test_skill(obs, fx, ref, value):\n s = deterministic.forecast_skill(obs, fx, ref)\n assert s == value\n\n\n@pytest.mark.parametrize(\"obs,fx,value\", [\n (np.array([0, 1]), np.array([0, 1]), 1.0),\n (np.array([1, 2]), np.array([-1, -2]), -1.0),\n])\ndef test_r(obs, fx, value):\n r = deterministic.pearson_correlation_coeff(obs, fx)\n assert r == value\n\n\n@pytest.mark.parametrize(\"obs,fx\", [\n # len(obs) < 2 or len(fx) < 2\n (np.array([0]), np.array([1])),\n\n # len(obs) != len(fx)\n (np.array([0, 1, 2]), np.array([0, 1, 2, 3])),\n (np.array([2, 3, 4]), np.array([2, 3, 5, 6])),\n])\ndef test_r_nan(obs, fx):\n r = deterministic.pearson_correlation_coeff(obs, fx)\n assert np.isnan(r)\n\n\n@pytest.mark.parametrize(\"obs,fx,value\", [\n (np.array([0, 1]), np.array([0, 1]), 1.0),\n (np.array([1, 2, 3]), np.array([2, 2, 2]), 0.0),\n])\ndef test_r2(obs, fx, value):\n r2 = deterministic.coeff_determination(obs, fx)\n assert pytest.approx(r2) == value\n\n\n@pytest.mark.parametrize(\"obs,fx,value\", [\n (np.array([0, 1]), np.array([0, 1]), 0.0),\n (np.array([0, 2]), np.array([0, 4]), 1.0),\n (np.array([0, 2]), np.array([0, 6]), 2.0),\n])\ndef test_crmse(obs, fx, value):\n crmse = deterministic.centered_root_mean_square(obs, fx)\n assert crmse == value\n\n\n@pytest.mark.parametrize(\"obs,fx,value\", [\n ([0, 1], [0, 1], 0.0),\n ([1, 2], [1, 2], 0.0),\n ([0, 1], [0, 2], 0.5),\n ([0, 1, 2], [0, 0, 2], 1.0 / 3.0),\n])\ndef test_ksi(obs, fx, value):\n ksi = deterministic.kolmogorov_smirnov_integral(obs, fx)\n assert pytest.approx(ksi) == value\n\n\n@pytest.mark.parametrize(\"obs,fx,value\", [\n ([0, 1], [0, 1], 0.0),\n ([1, 2], [1, 2], 0.0),\n ([0, 1, 2], [0, 0, 2], 1 / 3 / (1.63 / np.sqrt(3) * 2) * 100),\n])\ndef test_ksi_norm(obs, fx, value):\n ksi = deterministic.kolmogorov_smirnov_integral(\n obs, fx, normed=True\n )\n assert pytest.approx(ksi) == value\n\n\n@pytest.mark.parametrize(\"obs,fx,value\", [\n ([0, 1], [0, 1], 0.0),\n ([1, 2], [1, 2], 0.0),\n ([0, 1, 2, 3, 4], [0, 0, 0, 0, 0], 0.8 - 1.63 / np.sqrt(5)),\n])\ndef test_over(obs, fx, value):\n ov = deterministic.over(obs, fx)\n assert ov == value\n\n\n@pytest.mark.parametrize(\"obs,fx,value\", [\n (np.array([0, 1]), np.array([0, 1]), 0.0),\n (np.array([1, 2]), np.array([1, 2]), 0.0),\n (\n np.array([0, 1, 2]),\n np.array([0, 0, 2]),\n 1/4 * (1/3 + 0 + 2 * np.sqrt(1/3))\n ),\n])\ndef test_cpi(obs, fx, value):\n cpi = deterministic.combined_performance_index(obs, fx)\n assert pytest.approx(cpi) == value\n\n\n@pytest.fixture\ndef deadband_obs_fx():\n obs = np.array([1, 2, 3, 4])\n # 2.1 and 3.8 are outside the 5% deadband on some platforms due to\n # floating point arithmetic errors\n fx = np.array([2, 2.09, 2, 3.81])\n return obs, fx\n\n\n@pytest.mark.parametrize('func,expect,expect_deadband,args', [\n (deterministic.mean_absolute, 0.57, 0.5, []),\n (deterministic.mean_bias, -0.025, 0., []),\n (deterministic.root_mean_square,\n 0.7148776119029046, 0.7071067811865476, []),\n (deterministic.mean_absolute_percentage,\n 35.64583333333333, 33.33333333333333, []),\n (deterministic.normalized_mean_absolute, 5.7, 5.0, [10.]),\n (deterministic.normalized_mean_bias, -0.25, 0., [10.]),\n (deterministic.normalized_root_mean_square,\n 7.148776119029046, 7.071067811865476, [10.]),\n]\n)\ndef test_deadband(func, error_func_deadband, deadband_obs_fx, expect,\n expect_deadband, args):\n obs, fx = deadband_obs_fx\n out = func(obs, fx, *args)\n out_deadband = func(obs, fx, *args, error_fnc=error_func_deadband)\n assert_allclose(out, expect)\n assert_allclose(out_deadband, expect_deadband)\n","sub_path":"solarforecastarbiter/metrics/tests/test_deterministic.py","file_name":"test_deterministic.py","file_ext":"py","file_size_in_byte":7901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"104908023","text":"import os\nfrom flask import Flask\nfrom flask import json\nfrom flask import request\nfrom github import Github\nfrom github import GithubObject\nfrom github import Label\n\n\napp = Flask(__name__)\nyoda = Github(\"glamyoda\", \"blue1289\").get_user()\n\ndef getRepo(reponame):\n for repo in yoda.get_repos():\n if repo.name == reponame:\n return repo\n\n return None\n\ndef options():\n pass\n\n@app.after_request\ndef after_request(response):\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization,Access-Control-Allow-Origin,Access-Control-Allow-Headers')\n response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')\n return response\n\n@app.route('/repos', methods = ['GET'])\ndef repos():\n return json.dumps([repo.name for repo in yoda.get_repos()])\n\n@app.route('/issues/', methods = ['GET'])\ndef issues(reponame):\n repo = getRepo(reponame)\n return json.dumps([issue.title for issue in repo.get_issues()]) if repo != None else \"Nenhuma issue para o repo \" + reponame\n\n@app.route('/issue/', methods = ['POST'])\ndef issue(reponame):\n repo = getRepo(reponame)\n\n return repo.create_issue(\n title = request.form['title'],\n body = request.form['body'],\n labels = [str(lbl) for lbl in request.form['labels'].split(',')]).title\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.debug = True\n app.run(host = '0.0.0.0', port = port)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"222123383","text":"from erdos.data_stream import DataStream\n\n\nclass RayInputDataStream(DataStream):\n def __init__(self, actor_handle, data_stream):\n super(RayInputDataStream, self).__init__(\n data_type=data_stream.data_type,\n name=data_stream.name,\n labels=data_stream.labels,\n callbacks=data_stream.callbacks,\n completion_callbacks=data_stream.completion_callbacks,\n uid=data_stream.uid)\n self._actor_handle = actor_handle\n\n def setup(self):\n for on_msg_callback in self.callbacks:\n self._actor_handle.register_callback.remote(\n self.uid, on_msg_callback)\n \n for on_watermark_callback in self.completion_callbacks:\n self._actor_handle.register_completion_callback.remote(\n self.uid, on_watermark_callback)\n","sub_path":"erdos/ray/ray_input_data_stream.py","file_name":"ray_input_data_stream.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"436092493","text":"from argparse import ArgumentParser,Action\n\nknown_drivers = ['local','s3']\nclass DriverAction(Action):\n def __call__(self, parser, namespace, values, option_string=None):\n driver, destination = values\n if driver.lower() not in known_drivers:\n parser.error(\"Unknown driver. Available drivers are 'local'\\\n and 'S3'\")\n namespace.driver = driver.lower()\n namespace.destination = destination\n\n\n\ndef create_parser():\n parser = ArgumentParser()\n parser.add_argument('url',help=\"URL of the PostgreSQL database to backup\")\n parser.add_argument('--driver',\n help=\"How and where to store the backup\",\n nargs=2,\n action=DriverAction,\n required=True)\n return parser\n\n","sub_path":"src/pgbackup/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"57559447","text":"import sqlite3\nimport sys\n\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QTableWidgetItem\n\nfrom addEditCoffeeForm_py import AddEditCoffeeForm\n\n\nclass Window(QMainWindow):\n def __init__(self):\n super().__init__()\n uic.loadUi(\"main.ui\", self)\n self.refresh()\n self.initUI()\n\n def refresh(self):\n self.con = sqlite3.connect(\"coffee.sqlite\")\n self.cur = self.con.cursor()\n self.data = list(self.cur.execute(\"SELECT * FROM coffee\").fetchall())\n self.table.setRowCount(len(self.data))\n for i in range(len(self.data)):\n for j in range(len(self.data[i])):\n self.table.setItem(i, j, QTableWidgetItem(str(self.data[i][j])))\n self.con.close()\n\n def initUI(self):\n self.add.clicked.connect(self.do)\n self.change.clicked.connect(self.do)\n\n def do(self):\n button = self.sender().text()\n data = None\n if button == \"Изменить\":\n data = self.data[self.table.currentRow()]\n edit = AddEditCoffeeForm(data)\n edit.show()\n edit.exec()\n self.refresh()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = Window()\n ex.show()\n sys.exit(app.exec())\n","sub_path":"main_py.py","file_name":"main_py.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"401297899","text":"def sixmultssamedigits(n):\n string_rep = sorted(str(n))\n return all(sorted(str(n * mult)) == string_rep for mult in range(2, 7))\n\nnum = 1\n\nwhile True:\n if sixmultssamedigits(num):\n print(num)\n break\n else:\n num += 1\n\n# This answer should be obvious for anyone who has ever typed 1/7\n","sub_path":"Problems 051 - 100/Problem 052.py","file_name":"Problem 052.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"560595376","text":"from inky import InkyPHAT\nfrom PIL import Image, ImageFont, ImageDraw\nfrom font_fredoka_one import FredokaOne\n\n# get inky display variable\ninky_display = InkyPHAT(\"yellow\")\n\n# set inky display border (YELLOW/BLACK/WHITE)\ninky_display.set_border(inky_display.YELLOW)\n\nimg = Image.new(\"P\", (inky_display.WIDTH, inky_display.HEIGHT))\ndraw = ImageDraw.Draw(img)\n\n# decide font and size\nfont = ImageFont.truetype(FredokaOne, 32)\n\n# message to write\nmessage = \"Hello, World!\"\n\n# get width and height of the message to write\nw, h = font.getsize(message)\n\n# The x and y variables will tell the draw.text() function where to place the top left corner of our text\nx = (inky_display.WIDTH / 2) - (w / 2)\ny = (inky_display.HEIGHT / 2) - (h / 2)\n\n# draw message at a starting point, with decided font and colour\ndraw.text((x, y), message, inky_display.YELLOW, font)\n\ninky_display.set_image(img)\ninky_display.show()\n","sub_path":"examples/myTests/test_1.py","file_name":"test_1.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"227587190","text":"import csv\nimport numpy as np\n\nwith open('trainLabels.csv', 'rb') as csvTrainLabels:\n\ttrainLabels = csv.reader(csvTrainLabels, delimiter=',')\n\tlabels = []\n\tfor row in trainLabels:\n\t\tlabels.append(int(row[0]))\n\t# print labels\n\nwith open('trainFeatures.csv', 'rb') as csvTrainFeatures:\n\ttrainFeatures = csv.reader(csvTrainFeatures, delimiter=',')\n\ti = 0\n\tdic = {}\n\tfor row in trainFeatures:\n\t\tdic[tuple(row)] = labels[i]\n\t\ti += 1\n\n# with open('digitsOutput1.csv', 'wb') as csv1:\n# \twriter1 = csv.writer(csv1, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n# \twith open('digitsOutput2.csv', 'wb') as csv2:\n# \t\twriter2 = csv.writer(csv2, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n# \t\twith open('digitsOutput5.csv', 'wb') as csv5:\n# \t\t\twriter5 = csv.writer(csv5, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n# \t\t\twith open('digitsOutput10.csv', 'wb') as csv10:\n# \t\t\t\twriter10 = csv.writer(csv10, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n# \t\t\t\twith open('digitsOutput25.csv', 'wb') as csv25:\n# \t\t\t\t\twriter25 = csv.writer(csv25, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\nwith open('digitsOutput.csv', 'wb') as csvW:\n\twriter = csv.writer(csvW, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\twith open('testFeatures.csv', 'rb') as csvVal:\n\t\tvalFeatures = csv.reader(csvVal, delimiter=',')\n\t\titeration = 0\n\t\tfor valRow in valFeatures:\n\t\t\ta = np.array(map(float, valRow))\n\t\t\t# lst: first element = Euclidean distance, second element = corresponding digit\n\t\t\tlst = []\n\t\t\twith open('trainFeatures.csv', 'rb') as csvTrain:\n\t\t\t\ttrainFeatures = csv.reader(csvTrain, delimiter=',')\n\t\t\t\tfor trainRow in trainFeatures:\n\t\t\t\t\tb = np.array(map(float, trainRow))\n\t\t\t\t\tcurr_distance = float(sum(np.sqrt((a-b) * (a-b))))\n\t\t\t\t\tif len(lst) < 25:\n\t\t\t\t\t\tlst.append((curr_distance, dic[tuple(trainRow)]))\n\t\t\t\t\t\tlst = sorted(lst, key=lambda x: x[0])\n\t\t\t\t\telif curr_distance < lst[len(lst)-1][0]:\n\t\t\t\t\t\tlst.pop()\n\t\t\t\t\t\tlst.append((curr_distance, dic[tuple(trainRow)]))\n\t\t\t\t\t\tlst = sorted(lst, key=lambda x: x[0])\n\n\t\t\t# k = 1, 2\n\t\t\tguess = lst[0][1]\n\t\t\tprint(\"k=1 Iteration \" + str(iteration) + \": \" + str(guess))\n\t\t\twriter.writerow([guess])\n\n\t\t\t# writer1.writerow([guess])\n\t\t\t# writer2.writerow([guess])\n\n\t\t\t# # k = 5,10,25\n\t\t\t# for k in [5,10,25]:\n\t\t\t# \tvalues = [[0.0,0,i] for i in range(10)]\n\t\t\t# \tfor item in lst[:k]:\n\t\t\t# \t\tdist = item[0]\n\t\t\t# \t\tdigit = item[1]\n\t\t\t# \t\tvalues[digit][0] += dist\n\t\t\t# \t\tvalues[digit][1] += 1\n\t\t\t# \tfor i in values:\n\t\t\t# \t\tavg_dist = i[0]\n\t\t\t# \t\tnum = i[1]\n\t\t\t# \t\tif num != 0:\n\t\t\t# \t\t\ti[0] = avg_dist / num\n\t\t\t# \t# print values\n\t\t\t# \tmax_occurrences = 0\n\t\t\t# \ttie = []\n\t\t\t# \tfor i in values:\n\t\t\t# \t\tif i[1] > max_occurrences:\n\t\t\t# \t\t\ttie = [i]\n\t\t\t# \t\t\tmax_occurrences = i[1]\n\t\t\t# \t\telif i[1] == max_occurrences:\n\t\t\t# \t\t\ttie.append(i)\n\t\t\t# \ttie = sorted(tie, key=lambda x: x[0])\n\t\t\t# \t# print tie\n\t\t\t# \tguess = tie[0][2]\n\t\t\t# \tif k == 5:\n\t\t\t# \t\tprint(\"k=\" + str(k) + \" Iteration \" + str(iteration) + \": \" + str(guess))\n\t\t\t# \t# elif k==10 or k==25:\n\t\t\t# \t# \tprint(\"k=\" + str(k) + \" Iteration \" + str(iteration) + \": \" + str(guess))\n\t\t\t# \tif k == 5:\n\t\t\t# \t\twriter5.writerow([guess])\n\t\t\t# \telif k == 10:\n\t\t\t# \t\twriter10.writerow([guess])\n\t\t\t# \telif k == 25:\n\t\t\t# \t\twriter25.writerow([guess])\n\t\t\titeration += 1","sub_path":"knearest.py","file_name":"knearest.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"432950015","text":"#!/usr/bin/python\n\n#-------------------------------------------------------------------------------\n#run from the command line using './extractSAS output_file_name'\n#extracts the reactivity components, temperatures, power, flow, etc for each printed \n#timestep. uses matlab to plot data in peak channel. \n#user is required to input which is the peak channel.\n#-------------------------------------------------------------------------------\n\n#####\n#user input\n#####\n\nchannelNums = [4] #[1,2,3,4] #channel numbers of to be plotted\nrhoLimits = '[]' #range of reactivity to be plotted, ($), no spaces allowed, leave as '[]' if you want code to decide\nshortTimeLimit = 500 #range of time to be plotted in short time scale plots, (s)\nIHXintermediateSide = 13 #element number of intermediate side of IHX (tube side)\nIHXpump = 2 #element number of intermediate pump\nprecursorDecayConstants = [1.3377E-2, 3.1026E-2, 1.1763E-1, 3.0917E-1, 8.8605E-1, 2.9416E0]\n#topOfActiveCore = '0.81280' #string with height of top of active core. enter exactly as printed in SAS output\ntopOfActiveCore = '1.06680'\n#matlabExe = '/Applications/MATLAB_R2014b.app/bin/matlab' #for running locally\nmatlabExe = 'matlab' #for running on savio\n\n\n#####\n#imports\n#####\n\nfrom os import chdir\nfrom os import getcwd\nfrom os import mkdir\nfrom os import remove\nfrom subprocess import Popen\nfrom sys import argv\n\nfrom matlabPlotCommands import matlabPlotCommands\nimport modules\n\n\n#####\n#do extraction for each channel\n#####\n\n#make folders for global plots (power, rho, precursors)\nmkdir('globalPlots')\nmkdir('globalFigs')\n\nfor channel in channelNums:\n\n print('channel '+str(channel)+' extraction:\\n')\n\n #####\n #initialize stuff\n #####\n \n print('initializing stuff...\\n')\n \n #reactivity parameters\n rhoStep = []\n rhoTime = [] #[s]\n power = [] #normalized\n decayPower = [] #fraction of normalized total power\n fissionPower = [] #fraction of normalized total power\n netReactivity = [] #[$]\n CRDL = [] #[$]\n radExpansion = [] #[$]\n doppler = [] #[$]\n fuelAxialExpansion = [] #[$]\n cladAxialExpansion = [] #[$]\n coolant = [] #[$]\n structureAxialExpansion = [] #[$]\n controlSystem = [] #[$]\n \n #primary loop parameters\n tempStep = [] #time steps in temperature table\n tempTime = [] #times in temperature tables [s]\n saturation = [] #[K]\n fuelPeak = [] #[K]\n cladPeak = [] #[K]\n coolantPeak = [] #[K]\n flowRate = [] #normalized\n coolantInlet = [] #[K]\n coolantOutlet = [] #[K]\n fuelAve = [] #[K]\n cladAve = [] #[K]\n topActiveCoreTemp = [] #[K]\n \n #intermediate loop parameters\n IHXintermediateInlet = [] #[K]\n IHXintermediateOutlet = [] #[K]\n IHXflow = [] #[normalized]\n \n #delayed neutron precursor decay rates\n group1 = []\n group2 = []\n group3 = []\n group4 = []\n group5 = []\n group6 = []\n \n #put all entries into tables\n rhoTab = [rhoStep, rhoTime, power, decayPower, fissionPower, netReactivity, CRDL, radExpansion, doppler, fuelAxialExpansion, cladAxialExpansion, coolant, structureAxialExpansion, controlSystem]\n primaryTab = [tempStep, tempTime, saturation, fuelPeak, cladPeak, coolantPeak, flowRate, coolantInlet, coolantOutlet, fuelAve, cladAve, topActiveCoreTemp]\n intermediateTab = [tempStep, tempTime, IHXintermediateInlet, IHXintermediateOutlet, IHXflow]\n precursorTab = [tempTime, group1, group2, group3, group4, group5, group6]\n \n \n #####\n #open file and read\n #####\n \n print('reading from SAS output file...\\n')\n \n outputFile = str(argv[-1])\n \n fs = open(outputFile, 'r')\n \n tempTableFlag = 0\n for line in fs:\n try:\n if line[0:3] == ' ++': #get reactivity at steps\n rhoTab = modules.getStepReactivity(line, rhoStep, rhoTime, power, decayPower, fissionPower, netReactivity, CRDL, radExpansion, doppler, fuelAxialExpansion, cladAxialExpansion, coolant, structureAxialExpansion, controlSystem)\n elif line[0:34] == ' MAIN TIME STEP': #get times at steps\n [tempStep, tempTime] = modules.tempStepTime(line, tempStep, tempTime)\n elif line[0:24] == ' FINISHED NULL TRANSIENT': #alter primaryTab to remove null transient info\n [tempStep, tempTime, IHXflow, IHXintermediateInlet, IHXintermediateOutlet] = modules.removeSteadyState(tempStep, tempTime, IHXflow, IHXintermediateInlet, IHXintermediateOutlet)\n elif line.split()[0] == 'MAXIMUM' and line.split()[1] == 'TEMPERATURES': #if at table of max temps, go through following lines to find peak channel info\n nextLine = fs.next()\n chanFlag = 0\n while chanFlag == 0:\n if nextLine[19:20] == str(channel): #if peak channel, save info\n [saturation, fuelPeak, cladPeak, coolantPeak, chanFlag] = modules.channelPeakValues(nextLine, saturation, fuelPeak, cladPeak, coolantPeak)\n else: #if peak channel not on this line, skip to next\n nextLine = fs.next()\n elif line.split()[0] == '***' and line.split()[1] == 'TRANSIENT' and int(line.split()[-2]) == channel: #if spot for transient normalized flow\n nextLine = fs.next()\n flowRate.append(float(nextLine.split()[-1]))\n inletFlag = 0\n while inletFlag == 0: #get inlet, outlet, and top of active core temps\n if nextLine.split()[0] == 'VESSEL' and nextLine.split()[1] == 'OUTLET': #get outlet temp\n nextLine = fs.next()\n coolantOutlet.append(float(nextLine[15:23]))\n elif nextLine.split()[0] == topOfActiveCore: #get temp at top of active core\n topActiveCoreTemp.append(float(nextLine[15:23]))\n nextLine = fs.next()\n elif nextLine.split()[0] == '0.00000': #get inlet temp\n coolantInlet.append(float(nextLine[15:23]))\n inletFlag = 1\n else:\n nextLine = fs.next()\n fuelFlag = 0\n while fuelFlag == 0:\n if nextLine[0:39] == ' INNER MIDPOINT OUTER': #get average fuel temp\n nextLine = fs.next()\n nextLine = fs.next() #skip two lines\n fuelNodeMidHeight = []\n fuelNodeAveTemp = []\n cladNodeAveTemp = []\n fuelNodeFlag = 0\n while fuelNodeFlag == 0:\n if nextLine == '\\n': #if blank line, table is over\n fuelNodeFlag = 1\n else: #read in values\n [fuelNodeMidHeight, fuelNodeAveTemp, cladNodeAveTemp] = modules.nodeTemps(nextLine, fuelNodeMidHeight, fuelNodeAveTemp, cladNodeAveTemp)\n nextLine = fs.next()\n [fuelNodeMidHeight, fuelNodeAveTemp, cladNodeAveTemp] = modules.reverseNodeOrder(fuelNodeMidHeight, fuelNodeAveTemp, cladNodeAveTemp)\n #find average temperature of fuel/clad by volume-weighted average\n [fuelAve, cladAve] = modules.aveFuelCladTemp(fuelNodeMidHeight, fuelNodeAveTemp, cladNodeAveTemp, fuelAve, cladAve)\n fuelFlag = 1\n else: #move to next line to find table\n nextLine = fs.next()\n elif line[0:36] == ' PUMPS': #get intermediate loop flow rate from pump info\n nextLine = fs.next() #skip a line\n if nextLine == ' PUMP FLOW HEAD SPEED PUMP TORQUE MOTOR TORQUE HYDRAULIC EFFICIENCY': #if its first instance, skip it\n pass\n else: #if not first instance, record normalized flow\n nextLine = fs.next() #skip line\n IHXflow.append(float(nextLine.split()[7]))\n elif line[0:20] == ' IHX TEMPERATURES, K' and int(line.split()[-1]) == IHXintermediateSide: #get temps at inlet and outlet of IHX intermediate side (tube side)\n nextLine = fs.next() #skip 4 lines\n nextLine = fs.next()\n nextLine = fs.next()\n nextLine = fs.next()\n IHXintermediateInlet.append(float(nextLine.split()[4]))\n IHXnodeFlag = 0\n while IHXnodeFlag == 0: #go through table until reaching end\n previousLine = nextLine\n nextLine = fs.next()\n if nextLine == '\\n': #if its empty, the previous line has outlet coolant temp\n IHXintermediateOutlet.append(float(previousLine.split()[2]))\n IHXnodeFlag = 1\n elif line[0:40] == ' NUMBER CONCENTRATION': #get delayed neutron precursor concentrations and multiply by decay constant\n i = 1 #iterate for group\n for group in precursorTab[1:]:\n nextLine = fs.next()\n if nextLine[27:28] == '-': #if value is negative, set it to zero\n group.append(0.0)\n else:\n group.append(float(nextLine.split()[1][0:7]+'E'+nextLine.split()[1][8:])*precursorDecayConstants[i-1])\n i = i + 1\n else: #not of interest\n pass\n except (KeyError, ValueError, IndexError): #if the line is shit\n pass\n \n fs.close()\n \n #alter table to include SS temps (approximating SS by values at first step)\n [flowRate, coolantInlet, coolantOutlet, fuelAve, cladAve, precursorTab, topActiveCoreTemp] = modules.addSteadyStateValues(flowRate, coolantInlet, coolantOutlet, fuelAve, cladAve, precursorTab, topActiveCoreTemp)\n \n #find min and max rho components if not specified by user\n if rhoLimits == '[]':\n rhoLimits = modules.findRhoLimits(rhoTab)\n\n #alter tables if only part of the info was printed out/read in (i.e. if SAS printed out saturation temp but aborted before printing out coolant peak temp)\n primaryTab = modules.correctPrimaryTab(primaryTab)\n precursorTab = modules.correctPrecursorTab(precursorTab)\n\n \n #####\n #print to temporary file\n #####\n \n print('printing temporary files...\\n')\n \n #make new directory and move into it\n mkdir('chan'+str(channel))\n chdir('./chan'+str(channel))\n\n #make tmp\n runDir = getcwd()\n fr = open('rho.txt', 'w')\n fp = open('temp.txt', 'w')\n fi = open('intermediate.txt', 'w')\n fpr = open('precursor.txt', 'w')\n \n #print reactivity tables\n modules.printReactivityTables(fr, rhoTab)\n fr.close()\n \n #print primary tables\n modules.printPrimaryTables(fp, primaryTab)\n fp.close()\n \n #print intermediate tables\n modules.printIntermediateTables(fi, intermediateTab)\n fi.close()\n \n #print precursor tables\n modules.printPrecursorTables(fpr, precursorTab)\n fpr.close()\n \n \n #####\n #make matlab produce plots\n #####\n \n #matlab commands, #write out matlab commands here with no spaces, end with the quit command\n command = matlabPlotCommands(runDir, shortTimeLimit, rhoLimits, matlabExe)\n \n print('plotting...')\n \n plotRun = Popen(command)\n plotRun.wait()\n \n print('plotting complete...\\n')\n \n #####\n #clean up\n #####\n \n print('cleaning up...\\n')\n \n #delete temporary files\n modules.deleteTmpFiles()\n\n #move power, rho, and precursor plots out of channel-specific folders, as they are global to whole core\n if channel == channelNums[0]: #if this is first channel in list, move plots\n modules.moveGlobalPlots()\n else: #if not first channel in list, just delete files, they are duplicate\n modules.deleteGlobalPlots()\n\n #move out of channel directory\n chdir('../')\n\nprint('data extraction complete')\n\n#print max temp\nfm = open('../../../max.txt', 'a')\nfm.write(str(max(primaryTab[5]))+'\\n')\nfm.close()\n\n#print asmptotic temp\nfa = open('../../../asymptotic.txt', 'a')\nfa.write(str(max(primaryTab[-1]))+'\\n')\nfa.close()\n\n#####\n#delete mini.out\n#####\n\nremove('mini.out')\n","sub_path":"extractSAS.py","file_name":"extractSAS.py","file_ext":"py","file_size_in_byte":12454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"99538119","text":"import numpy as np\nimport astropy.io.fits as fits\nimport astropy.wcs as pw\nimport time, re\nimport univ\n\nclass FrameParameters:\n \"\"\"\n class holder of the frame parameters.\n \"\"\"\n def __init__(self):\n pass\n\ndef poet_dataread(event, type=0, log=None):\n \"\"\"\n This function reads a set of IRAC AORS, (or IRAC Subarray AORS),\n sorting by dither position, if any.\n\n Parameters:\n ----------\n event : An event object. \n type : integer\n Specifies the type of data to read. \n 0 = data, 1 = precalibration data, 2 = postcalibration data.\n log : A logedit object that keeps the log.\n\n Outputs:\n -------\n data : [maxnimpos, nx, ny, npos] float array containing the data \n frames, sorted into dither positions.\n head : header data\n uncd : [maxnimpos, nx, ny, npos] float array, uncertainties\n bdmskd: [maxnimpos, nx, ny, npos] int array, per-pixel data flag\n nimpos: array like\n array containing the number of frames in each position.\n fp: FrameParameters object containing [npos, maxnimpos] double arrays \n of per-frame parameters.\n\t\n Example:\n -------\n\n Modification History:\n --------------------\n Written by:\tJoseph Harrington, Cornell.\n 2005-09-16 jh@oobleck.astro.cornell.edu\n 2005-10-26 jh Fixed frame times.\n 2005-10-27\tjh Moved calculation of some constants out of the\n\t\t routine. Filled in header. Corrected some\n\t\t array datatypes.\n 2005-11-25\tjh Converted to using FP array.\n 2006-01-04\tjh Header tweak.\n 2006-03-20 jh Added zodi, ism, cib header values.\n 2007-03-07 khorning Adapted program to use for non-subarray data\n 2007-07-15 jh Made nimpos be a long, not integer, array.\n 2010-08-24 patricio Converted to python.\n 2010-10-27 patricio Comments added.\n 2014-08-13 garland switched the pyfits package to astropy.io.fits\n\t zabblleon@gmail.com \n 2017-06-20 zacchaeus Fixed None comparisons.\n 2018-01-10 zacchaeus Updated to Python 3\n zaccysc@gmail.com\n \"\"\"\n # General variables\n dpref = event.dpref # data directory preffix\n expadj = event.expadj # id number of fisrt image\n ndcenum = event.ndcenum # number of dcenum\n npos = event.npos # number of position\n nnod = event.nnod # number of nodding positions\n #fpref = event.fpref # file names prefix\n pipev = event.pipev # spitzer pipeline version\n bcddir = event.inst.bcddir # directory containing bcd files\n bcdsuf = event.inst.bcdsuf # bcd file suffix\n buncsuf = event.inst.buncsuf # uncertainities file preffix\n #bdmsksuf = event.inst.bdmsksuf # badpixelmask file suffix\n brmsksuf = event.inst.brmsksuf # badpixelmask file suffix\n if not event.nomask:\n masksuf = event.masksuf # badpixelmask file suffix\n nx = event.nx # \n ny = event.ny # \n nz = event.nz # number of subarrays in datafile\n nh = event.nh # \n framtime = event.framtime # \n \n # AORs/cal AORs variables\n aorname = event.aorname[np.where(event.aortype==type)]\n if type == 2: # Post calibration:\n naor = event.postnaor # number of AORs\n nexpid = event.postnexpid \n maxnimpos = int(event.postmaxnimpos)\n nmcyc = event.postnmcyc\n bcdlist = event.postbcdfiles # List of files to read\n elif type == 1: # Preflash:\n naor = event.prenaor\n nexpid = event.prenexpid \n maxnimpos = int(event.premaxnimpos)\n nmcyc = event.prenmcyc\n bcdlist = event.prebcdfiles\n elif type == 0: # Event:\n naor = event.naor\n nexpid = event.nexpid\n maxnimpos = int(event.maxnimpos)\n nmcyc = event.nmcyc\n nscyc = event.nscyc\n bcdlist = event.bcdfiles\n\n # Allocate space for returned arrays\n headerdtype = 'S'+str(nh*81)\n head = np.zeros( (maxnimpos // nz, npos), dtype=headerdtype)\n data = np.zeros( (maxnimpos, ny, nx, npos), dtype=float)\n uncd = np.zeros( (maxnimpos, ny, nx, npos), dtype=float)\n bdmskd = np.zeros( (maxnimpos, ny, nx, npos), dtype=int)\n brmskd = np.zeros( (maxnimpos, ny, nx, npos), dtype=int)\n\n # Allocate space for the frame paramaters\n fp = FrameParameters()\n fpsize = np.zeros((npos, maxnimpos))\n fp.frmobs = np.copy(fpsize) # sequential frame number\n fp.pos = np.copy(fpsize) # position number\n fp.aor = np.copy(fpsize) # sequential AOR number\n fp.expid = np.copy(fpsize) # EXPosure ID\n fp.dce = np.copy(fpsize) # Data Collection Event\n fp.subarn = np.copy(fpsize) # subarray frame number\n fp.time = np.copy(fpsize) # frame mid-time, seconds J2000.0\n fp.zodi = np.copy(fpsize) # zodiacal light estimate, see header comment\n fp.ism = np.copy(fpsize) # interstellar medium estimate,see head comment\n fp.cib = np.copy(fpsize) # cosmic infrared background,see header comment\n fp.afpat2b = np.copy(fpsize) # temperatures, K, see header comment\n fp.afpat2e = np.copy(fpsize) \n fp.ashtempe = np.copy(fpsize) \n fp.atctempe = np.copy(fpsize) \n fp.acetempe = np.copy(fpsize) \n fp.apdtempe = np.copy(fpsize) \n fp.acatmp1e = np.copy(fpsize) \n fp.acatmp2e = np.copy(fpsize)\n fp.acatmp3e = np.copy(fpsize) \n fp.acatmp4e = np.copy(fpsize) \n fp.acatmp5e = np.copy(fpsize) \n fp.acatmp6e = np.copy(fpsize) \n fp.acatmp7e = np.copy(fpsize) \n fp.acatmp8e = np.copy(fpsize)\n fp.avrstucc = np.copy(fpsize) # volatages, Volts, see header comments\n fp.avrstbeg = np.copy(fpsize)\n fp.avdetc = np.copy(fpsize)\n fp.avdetbeg = np.copy(fpsize)\n fp.avgg1beg = np.copy(fpsize)\n fp.avdducc = np.copy(fpsize)\n fp.avddubeg = np.copy(fpsize)\n fp.avggclc = np.copy(fpsize)\n fp.avggcbeg = np.copy(fpsize)\n fp.ahtribeg = np.copy(fpsize) # heater current (uA) at start of integration\n fp.ahtrvbeg = np.copy(fpsize) # heater voltage (V) at start of integration\n # mips frame parameters\n fp.cmd_t_24 = np.copy(fpsize)\n fp.ad24tmpa = np.copy(fpsize)\n fp.ad24tmpb = np.copy(fpsize)\n fp.acsmmtmp = np.copy(fpsize)\n fp.aceboxtm = np.copy(fpsize)\n fp.pxscl2 = np.copy(fpsize)\n fp.pxscl1 = np.copy(fpsize)\n \n fp.heady = np.copy(fpsize) \n fp.headx = np.copy(fpsize) \n fp.filename = np.zeros((npos, maxnimpos), dtype='S150')\n\n nimpos = np.zeros(npos, np.long)\n\n # conveniences\n salist = np.arange(nz)\n sadind = np.arange(nz, dtype=np.double)\n\n # position of the star\n sky = [[event.ra*180./np.pi, event.dec*180./np.pi]]\n\n # dictionary to get position in MIPS\n mirind = {1929.:0, 2149.5:1, 1907.5:2, 2128.:3,\n 1886.:4, 2106.5:5, 1864.5:6}\n\n # Write to log first line\n title=[\"\\nEvent data:\\n\", \"\\nPreflash data:\\n\", \"\\nPost-calibration data:\\n\"]\n if log is not None:\n log.writelog(title[type] + ' aor expid dcenum pos')\n else:\n print(title[type] + ' aor expid dcenum pos')\n\n # pattern to find expid dcenum \n pattern = re.compile(\"_([0-9]{4})_([0-9]{4})_\")\n\n\n # Obtain data\n for aor in np.arange(naor):\n dir = dpref + aorname[aor] + bcddir\n bcd = bcdlist[aor]\n\n for i in np.arange(len(bcd)):\n # Read data\n try:\n dataf, bcdhead = fits.getdata(dir + bcd[i], header=True)\n except: # If a file doesn't exist, skip to next file.\n log.writelog(dir + bcd[i] + \" File not found!\")\n continue\n\n try: # Read uncertainity and mask files\n # Replace suffix in bcd file to get the corresponding file.\n uncfile = re.sub(bcdsuf, buncsuf, dir + bcd[i])\n uncf = fits.getdata(uncfile)\n mskfile = re.sub(bcdsuf, masksuf, dir + bcd[i])\n bdmskf = fits.getdata(mskfile)\n except:\n pass\n\n try: # Mips\n brmskfile = re.sub(bcdsuf, brmsksuf, dir + bcd[i])\n brmskf = fits.getdata(brmskfile)\n except:\n brmskf = -np.ones((nz, ny, nx), np.long)\n\n # Obtain expid and dcenum\n index = pattern.search(bcd[i])\n expid = int(index.group(1))\n dcenum = int(index.group(2))\n\n # Find dither position\n try:\n pos = bcdhead['DITHPOS'] - 1\n except: \n pos = 0 # No dither position in stare data\n if event.inst.name == 'irs':\n pos = expid % npos\n elif event.inst.name == 'mips':\n nod = expid % nnod\n pos = nod * nscyc + mirind[bcdhead['CSM_PRED']]\n\n be = nimpos[pos] # begining\n en = nimpos[pos] + nz # end\n\n # Store data\n data [be:en, :, :, pos] = dataf.reshape( (nz,ny,nx))\n uncd [be:en, :, :, pos] = uncf.reshape( (nz,ny,nx))\n \n if not event.nomask:\n bdmskd[be:en, :, :, pos] = bdmskf.reshape((nz,ny,nx))\n brmskd[be:en, :, :, pos] = brmskf.reshape((nz,ny,nx))\n else: # If no masks supplied, set to 1 everywhere\n bdmskd[be:en, :, :, pos] = 1\n brmskd[be:en, :, :, pos] = 1\n # All the single numbers per frame that we care about\n fp.frmobs[pos, be:en] = np.sum(nimpos) + salist\n fp.pos [pos, be:en] = pos\n fp.aor [pos, be:en] = aor\n fp.expid [pos, be:en] = expid\n fp.dce [pos, be:en] = dcenum\n fp.subarn[pos, be:en] = salist\n # ccampo 2011/3/18: changed to UTC from SCLK to avoid timing inconsistencies\n fp.time [pos, be:en] = bcdhead['UTCS_OBS'] + framtime*(sadind+0.5)\n\n # Header info to read out\n keys = ['ZODY_EST',\n 'ISM_EST' ,\n 'CIB_EST' ,\n 'AFPAT2B' ,\n 'AFPAT2E' ,\n 'ASHTEMPE',\n 'ATCTEMPE',\n 'ACETEMPE',\n 'APDTEMPE',\n 'ACATMP1E',\n 'ACATMP2E',\n 'ACATMP3E',\n 'ACATMP4E',\n 'ACATMP5E',\n 'ACATMP6E',\n 'ACATMP7E',\n 'ACATMP8E',\n 'AVRSTUCC',\n 'AVRSTBEG',\n 'AVDETC' ,\n 'AVDETBEG',\n 'AVGG1BEG',\n 'AVDDUCC' ,\n 'AVDDUBEG',\n 'AVGGCLC' ,\n 'AVGGCBEG',\n 'AHTRIBEG',\n 'AHTRVBEG']\n\n # Arrays to fill in (same order as keys)\n headarrs = [fp.zodi ,\n fp.ism ,\n fp.cib ,\n fp.afpat2b ,\n fp.afpat2e ,\n fp.ashtempe,\n fp.atctempe,\n fp.acetempe,\n fp.apdtempe,\n fp.acatmp1e,\n fp.acatmp2e,\n fp.acatmp3e,\n fp.acatmp4e,\n fp.acatmp5e,\n fp.acatmp6e,\n fp.acatmp7e,\n fp.acatmp8e,\n fp.avrstucc,\n fp.avrstbeg,\n fp.avdetc ,\n fp.avdetbeg,\n fp.avgg1beg,\n fp.avdducc ,\n fp.avddubeg,\n fp.avggclc ,\n fp.avggcbeg,\n fp.ahtribeg,\n fp.ahtrvbeg]\n\n # Read in header info. Sometimes keys are missing, hence\n # the try/except\n for k in range(len(keys)):\n try:\n headarrs[k][pos, be:en] = bcdhead[keys[k]]\n except:\n pass\n\n try:\n fp.pxscl2[pos, be:en] = np.abs(bcdhead['PXSCAL2'])\n fp.pxscl1[pos, be:en] = np.abs(bcdhead['PXSCAL1'])\n fp.acatmp5e[pos, be:en] = bcdhead['CMD_T_24']\n fp.acatmp6e[pos, be:en] = bcdhead['AD24TMPA']\n fp.acatmp6e[pos, be:en] = bcdhead['AD24TMPB']\n fp.acatmp5e[pos, be:en] = bcdhead['ACSMMTMP']\n fp.acatmp6e[pos, be:en] = bcdhead['ACEBOXTM'] + 273.0\n except:\n pass\n\n # Store filename\n fp.filename[pos, be:en] = dir + bcd[i]\n\n # Store header\n head[np.int(nimpos[pos] / nz), pos] = np.str(bcdhead)\n\n # Header position of the star:\n bcdhead[\"NAXIS\"] = 2\n wcs = pw.WCS(bcdhead, naxis=2)\n pix = wcs.wcs_world2pix(sky,0)\n fp.headx[pos, be:en] = pix[0][0]\n fp.heady[pos, be:en] = pix[0][1]\n\n # Print to log and screen:\n if log is not None:\n log.writelog('%4d'%aor + '%7d'%expid + '%7d'%dcenum + '%7d'%pos)\n else:\n print('%4d'%aor + '%7d'%expid + '%7d'%dcenum + '%7d'%pos)\n\n nimpos[pos] += nz\n\n # frame tags in fp\n\n # where there exist data\n fp.exist = np.zeros((npos, maxnimpos), np.long)\n for pos in np.arange(npos):\n fp.exist[pos, 0:nimpos[pos]] = 1\n \n fp.im = np.copy(fpsize) # Frame within position\n for pos in np.arange(npos):\n fp.im[pos, 0:nimpos[pos]] = np.arange(nimpos[pos], dtype=np.double)\n\n if event.inst.name != 'mips':\n fp.cycpos = np.trunc(fp.frmobs / (npos * nmcyc * nz)) # Cycle number\n fp.visobs = np.trunc(fp.frmobs / (nmcyc * nz))# Visit number within obs. set\n fp.frmvis = fp.im % (nmcyc * nz) # Frame within visit\n\n else:\n fp.cycpos = np.trunc(fp.frmobs / (2*ndcenum)) # Cycle number\n fp.visobs = np.trunc(fp.frmobs / ndcenum) # Visit number within obs. set\n fp.frmvis = np.trunc(fp.frmobs % ndcenum) # Frame within visit\n\n # Image scale:\n for pos in np.arange(npos):\n last = nimpos[pos]\n if np.all(fp.pxscl1[pos, 0:last] == fp.pxscl1[pos, 0]):\n event.posscl[1, pos] = np.abs(fp.pxscl1[pos, 0])\n if np.all(fp.pxscl2[pos, 0:last] == fp.pxscl2[pos, 0]):\n event.posscl[0, pos] = np.abs(fp.pxscl2[pos, 0])\n\n # Update event:\n if type == 0:\n event.data = data\n event.uncd = uncd\n event.bdmskd = bdmskd\n event.brmskd = brmskd\n event.head = head\n event.fp = fp\n event.nimpos = nimpos\n elif type == 1:\n event.predata = data\n event.preuncd = uncd\n event.prebdmskd = bdmskd\n event.prebrmskd = brmskd\n event.prehead = head\n event.prefp = fp\n event.prenimpos = nimpos\n elif type == 2:\n event.postdata = data\n event.postuncd = uncd\n event.postbdmskd = bdmskd\n event.postbrmskd = brmskd\n event.posthead = head\n event.postfp = fp\n event.postnimpos = nimpos\n\n event.fp.filename = event.fp.filename.astype(np.unicode_)\n\n return\n","sub_path":"lib/pdataread.py","file_name":"pdataread.py","file_ext":"py","file_size_in_byte":14243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"73483329","text":"############################################################\n# Given a docs file containing abstracts for results from the 30 topics in topics2017.xml and qrels files and docs,\n# does cross-validation of the given LeToR configuration.\n#\n# Lowell Milliken\n############################################################\nimport random\nimport learning_to_rank as l2r\nimport pickle\nimport os\n\nfrom learning_to_rank import load_indriscores\n\ncv_dir = 'cv_files'\n# m - meta\n# sd - splitdrugs\n# f - filter\n# t - target\n# jd - journal disease\n# tl - text length\n# is - indri scores\nfeatures_template = 's_{}_known_features_{}'\nunknown_template = 's_{}_{}_unknown_features_{}'\nmodel_name = cv_dir + os.sep + '{}_{}_model_{}'\nscore_filename = cv_dir + os.sep + '{}_{}_model_{}scores'\n\n\ndef gen_cv_sets():\n cv_sets = [1]*3 + [2]*3 + [3]*3 + [4]*3 + [5]*3 + [6]*3 + [7]*3 + [8]*3 + [9]*3 + [10]*3\n random.seed()\n random.shuffle(cv_sets)\n return cv_sets\n\n\n# ListNet rparams = {'-lr': 0.1, '-epoch': 3000}\ndef do_cv(unknown_docs_filename='topics2017_m_as_ex_tr_nd_ft_nsh_prf-2-20-0.5-0.5_large_gfix_alldocs.txt',\n meta=False, splitdrugs=False, metric='P@10', program='RankLib', filtered=False, targetproxy=False, dist=5,\n journaldisease=False, textlen=True, indriscore=False, otherscore=False, ranker='ListNet', rparams=None,\n kscorefile='topics2017_m_as_ex_tr_nd_ft_nsh_prf-2-10-0.5-0.8_basescores_large_gfix_run.txt',\n scorefile='topics2017_m_as_ex_tr_nd_ft_nsh_prf-2-10-0.5-0.8_ob-topics2017_m_as_ex_tr_nd_ft_nsh_prf-2-20-0.5-0.5_large_gfix_large_gfix_run.txt',\n fixparts=True, normscores=False, phraseterms=False, intval=True, termfile=None, termkeyfile=None, nodrugs=False):\n \"\"\"Creates a ranked list output file in TREC format doing training and cross validation for LeToR.\n\n :param unknown_docs_filename: name of file containing abstracts from the current Retrieval stage run\n :param meta: Boolean. Use metamap CUIs or not. Requires unknown_docs_filename + '.meta' file containing CUIs for each abstract.\n :param splitdrugs: split drugs into multiple features?\n :param metric: metric to train on. See RankLib help for options.\n :param program: Program to do LeToR with. Default RankLib.\n :param filtered: Filter CUIs to use with meta option. Requires either fterms.pickle or terms_filtered.pickle (for phraseterms) file.\n :param targetproxy: Use proximity to the work 'target' as a feature.\n :param dist: distance threshold for 'target' proximity\n :param journaldisease: Use disease presence in journal name as a feature.\n :param textlen: Use abtract length as a feature.\n :param indriscore: Use the indri score as a feature. Requires Indri scores for the qrel documents called unknown_docs_filename[:-11] + basescores_run.txt and a Indri results file called unknown_docs_filename[:-11] + run.txt\n :param otherscore: Use tf-idf and bm25 scores as a feature. Requires 'qrel_tfidfbase_run.txt' and 'qrel_bm25base_run.txt' as well as unknown_docs_filename[:-11] + 'tfidfbase_run.txt' and unknown_docs_filename[:-11] + 'bm25base_run.txt'\n :param ranker: LeToR ranker to use. See RankLib help for options.\n :param rparams: LeToR parameters in a dictionary. See RankLib help for options. Parameter name including leading '-' is key and parameter value is value.\n :param kscorefile: Alternate score file for use as a feature. This should be scores for the known qrels for training.\n :param scorefile: Alternate score file for use as a feature. This should be scores for the unknown documents for testing.\n :param fixparts: Boolean. Fixed cross-valiation partitions if True.\n :param normscores: Boolean. If True, Indri scores are normalized by (score - minscore)/(maxscore - minscore). Using the '-norm' in rparams with a norm type is preferred. See RankLib help.\n :param phraseterms: Boolean. Use only metamapped CUI terms from original terms that are not unigrams.\n :param intval: Boolean. Use RankLib internal validation. True preferred.\n :param termfile: Explicit set of CUI terms to use. A list in a pickle file.\n :param termkeyfile: Keys for mapping terms in the term file to features. Dict in a pickle file. Key = term. Value = term number (which maps to a feature number).\n :param nodrugs: Boolean. If True, do not use any drug information as a feature.\n \"\"\"\n unknown_base = unknown_docs_filename[:-11]\n parastr = 'n'\n\n if meta:\n parastr += '_m'\n if splitdrugs:\n parastr += '_sd'\n if nodrugs:\n parastr += '_nd'\n if filtered:\n parastr += '_f'\n if targetproxy:\n parastr += '_t{}'.format(dist)\n if journaldisease:\n parastr += '_jd'\n if textlen:\n parastr += '_tl'\n if indriscore:\n parastr += '_is'\n if otherscore:\n parastr += '_os'\n if normscores:\n parastr += '_ns'\n\n if scorefile:\n parastr += '_sf'\n if phraseterms:\n parastr += '_pt'\n if not intval:\n parastr += '_nov'\n\n topics = l2r.load_topics(distance=dist)\n\n filteredstr = '_filtered'\n if termfile is None:\n if not phraseterms:\n if filtered:\n termfile = 'terms{}.pickle'.format(filteredstr)\n termkeyfile = 'term_keys{}.pickle'.format(filteredstr)\n else:\n termfile = 'terms{}.pickle'.format('')\n termkeyfile = 'term_keys{}.pickle'.format('')\n else:\n termfile = 'fterms.pickle'\n termkeyfile = 'fterms_keys.pickle'\n else:\n parastr += '_' + termfile[:-11]\n\n if not os.path.exists(termfile):\n if not filtered:\n meta_docs = l2r.load_docs('qrel_docs.txt.meta')\n else:\n meta_docs = l2r.load_docs('qrel_docs.txt.meta.filtered5')\n\n l2r.save_terms(meta_docs, filtered)\n\n with open(termfile, 'rb') as infile:\n terms = pickle.load(infile)\n with open(termkeyfile, 'rb') as infile:\n term_keys = pickle.load(infile)\n\n meta_docs = None\n unknown_meta_docs = None\n\n if indriscore:\n basescores = load_indriscores(unknown_base + 'basescores_run.txt', normscores)\n unknownscores = load_indriscores(unknown_base + 'run.txt', normscores)\n else:\n basescores = None\n unknownscores = None\n\n if otherscore:\n basetfidfscores = load_indriscores('qrel_tfidfbase_run.txt', normscores)\n basebm25scores = load_indriscores('qrel_bm25base_run.txt', normscores)\n\n unknownitftdfscores = load_indriscores(unknown_base + 'tfidfbase_run.txt', normscores)\n unknownbm25scores = load_indriscores(unknown_base + 'bm25base_run.txt', normscores)\n else:\n basetfidfscores = None\n basebm25scores = None\n\n unknownitftdfscores = None\n unknownbm25scores = None\n\n if scorefile:\n kprecscores = load_indriscores(kscorefile, normscores)\n precscores = load_indriscores(scorefile, normscores)\n else:\n kprecscores = None\n precscores = None\n\n train_all = cv_dir + os.sep + features_template.format(parastr, 'all')\n test_all = cv_dir + os.sep + unknown_template.format(unknown_base, parastr, 'all')\n if filtered:\n train_all += filteredstr\n test_all += filteredstr\n # if not os.path.exists(train_all) or indriscore:\n known_docs = l2r.load_docs()\n if meta:\n if not filtered:\n meta_docs = l2r.load_docs('qrel_docs.txt.meta')\n else:\n meta_docs = l2r.load_docs('qrel_docs.txt.meta.filtered5')\n\n l2r.save_all_features(topics, known_docs, train_all, known=True, metadocs=meta_docs, terms=terms,\n term_keys=term_keys, splitdrugs=splitdrugs, targetproxy=targetproxy, journaldisease=journaldisease,\n textlen=textlen, scores=basescores, tfidfscores=basetfidfscores, bm25scores=basebm25scores,\n precscores=kprecscores, nodrugs=nodrugs)\n # if not os.path.exists(test_all):\n unknown_docs = l2r.load_docs(unknown_docs_filename)\n if meta:\n unknown_meta_docs = l2r.load_docs(unknown_docs_filename + '.meta')\n l2r.save_all_features(topics, unknown_docs, test_all, known=False, metadocs=unknown_meta_docs, terms=terms,\n term_keys=term_keys, splitdrugs=splitdrugs, targetproxy=targetproxy, journaldisease=journaldisease,\n textlen=textlen, scores=unknownscores, tfidfscores=unknownitftdfscores, bm25scores=unknownbm25scores,\n precscores=precscores, nodrugs=nodrugs)\n\n cv_file = cv_dir + os.sep + 'cv_sets.txt'\n if fixparts and os.path.exists(cv_file):\n cv_sets = []\n with open(cv_file, 'r') as cvsetfile:\n for line in cvsetfile:\n cv_sets.append(int(line.strip()))\n else:\n cv_sets = gen_cv_sets()\n with open(cv_file, 'w') as cvsetfile:\n for i in cv_sets:\n cvsetfile.write('{}\\n'.format(i))\n\n all_qnos = list(range(1, 31))\n qscores ={}\n pmids = {}\n for i in range(1, 11):\n model_file = model_name.format(parastr, ranker, i)\n train_filename = cv_dir + os.sep + features_template.format(parastr, i)\n test_filename = cv_dir + os.sep + unknown_template.format(unknown_base, parastr, i)\n training_set = [str(x) for x in all_qnos if cv_sets[x-1] != i]\n test_set = [str(x) for x in all_qnos if cv_sets[x-1] == i]\n\n filter_file(train_all, train_filename, training_set)\n filter_file(test_all, test_filename, test_set)\n\n # if not os.path.exists(model_file) or indriscore:\n l2r.train_model(train_filename, model_file, ranker=l2r.rankers[ranker], metric=metric, program=program, params=rparams, validation=intval)\n\n l2r.predict(model_file, test_filename, score_filename.format(parastr, ranker, i), metric=metric, program=program, params=rparams)\n\n if program == 'RankLib':\n qscores.update(l2r.load_rankings(score_filename.format(parastr, ranker, i)))\n pmids.update(l2r.load_pmids_from_features(test_filename))\n elif program == 'Quickrank':\n qpmids = l2r.load_pmids_from_features(test_filename)\n qscores.update(l2r.load_quickrank_scores(qpmids, score_filename.format(parastr, ranker, i)))\n pmids.update(qpmids)\n\n runfilename = unknown_base + 'tvs_L2R_{}_{}_{}_run.txt'.format(ranker, metric, parastr)\n l2r.save_reranked(qscores, pmids, runfilename)\n\n return runfilename\n\n\n# create new file with only docs\ndef filter_file(infilename, outfilename, filter_):\n count = 0\n print('')\n with open(infilename, 'r') as infile, open(outfilename, 'w') as outfile:\n for line in infile:\n count += 1\n print('\\rFiltering on {}'.format(count), end='')\n qno = line.split()[1].split(':')[1]\n if qno in filter_:\n outfile.write(line)\n","sub_path":"validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":10926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"347250443","text":"from flask import (\n\tBlueprint, flash, render_template, request\n)\n\nfrom app.db import get_db\n\nbp = Blueprint('math', __name__)\n\n# @bp.route('/', methods=['GET'])\n# def home():\n# \treturn render_template('index.html')\n\n@bp.route('/', methods=['GET'])\ndef game():\n\treturn render_template('game.html')\n\n@bp.route('/result', methods=['POST'])\ndef result():\n\tscore = request.form['score']\n\tdb = get_db()\n\tif not score:\n\t\tflash(error)\n\telse:\n\t\tdb.execute('INSERT INTO scores (score) VALUES (?)',\n\t\t\t(score,)\n\t\t)\n\t\tdb.commit()\n\n\ttotal = tuple(db.execute('SELECT COUNT(*) FROM scores').fetchone())[0]\n\tbeat = tuple(db.execute('SELECT COUNT(*) FROM scores WHERE score <= ?', (score,)).fetchone())[0]\n\tpercentile = (beat * 100.0) / total\n\treturn \"%.1f\" % percentile\n\n@bp.route('/results', methods=['GET'])\ndef results():\n\trows = []\n\tfor row in db.execute('SELECT score, COUNT(*) as count FROM scores GROUP BY score ORDER BY score'):\n\t\tprint(row.keys())\n\t\tprint(tuple(row))\n\t\trows.append(tuple(row))\n\treturn rows","sub_path":"app/math.py","file_name":"math.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"325643979","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='bags',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('comes_with_clubs', models.BooleanField()),\n ('club_set', models.CharField(max_length=40, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='balls',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('package_size', models.DecimalField(max_digits=4, decimal_places=0)),\n ],\n ),\n migrations.CreateModel(\n name='carts',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('motor', models.CharField(max_length=40)),\n ('speed', models.CharField(max_length=40)),\n ('roof', models.BooleanField()),\n ('capacity', models.DecimalField(max_digits=4, decimal_places=0)),\n ],\n ),\n migrations.CreateModel(\n name='clothing',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('customizable', models.BooleanField()),\n ],\n ),\n migrations.CreateModel(\n name='clubs',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('customizable', models.BooleanField()),\n ('hand', models.CharField(max_length=10)),\n ('club_type', models.CharField(max_length=40)),\n ('grip', models.CharField(max_length=40)),\n ],\n ),\n migrations.CreateModel(\n name='order',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('ship_to_shipping_address', models.BooleanField()),\n ('shipping_address', models.CharField(max_length=40, null=True)),\n ('postal_code', models.CharField(max_length=10, null=True)),\n ('city', models.CharField(max_length=40, null=True)),\n ('country', models.CharField(default=b'Canada', max_length=40, null=True)),\n ('province', models.CharField(max_length=40, null=True)),\n ('tax_rate', models.DecimalField(default=0.15, max_digits=4, decimal_places=2)),\n ('customer', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='order_line',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('quantity', models.DecimalField(max_digits=4, decimal_places=0)),\n ('order', models.ForeignKey(to='products.order')),\n ],\n ),\n migrations.CreateModel(\n name='product',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('price', models.DecimalField(max_digits=4, decimal_places=2)),\n ('model_name', models.CharField(max_length=40)),\n ('brand', models.CharField(max_length=40)),\n ('decription', models.TextField(max_length=999)),\n ('photo', models.ImageField(upload_to=b'')),\n ('color', models.CharField(max_length=40, null=True)),\n ('number_in_stock', models.DecimalField(max_digits=4, decimal_places=0)),\n ],\n ),\n migrations.AddField(\n model_name='order_line',\n name='product',\n field=models.ForeignKey(to='products.product'),\n ),\n ]\n","sub_path":"products/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"518019406","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass ViralstoriesSpider(scrapy.Spider):\n name = 'viralstories'\n allowed_domains = ['viralstories.in']\n start_urls = ['http://viralstories.in/']\n\n\n def parse(self, response):\n for div in response.css('article a::text').getall()[2::4]:\n yield {\n 'headline': div\n }\n \n try:\n older = response.css('.pagination a::attr(href)').getall()[0]\n except:\n older = None\n \n if older is not None: \n yield response.follow(url=older, callback=self.parse)","sub_path":"ViralStories/ViralStories/spiders/viralstories.py","file_name":"viralstories.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"607356061","text":"# coding: utf-8\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport time\nimport numpy as np\nimport pandas as pd\nimport re\nimport matplotlib.pyplot as plt\n\n##############################\n# 指定した複数銘柄の基本情報を取得する\n##############################\ndef get_basic_infos(codes):\n \"\"\" 指定した複数銘柄の基本情報を取得する。\n \n Args:\n codes (dict) : 証券コードと名称のディクショナリ\n (ex){'JR東日本':9020, 'JR西日本': 9021}\n Returns:\n DataFrame : 取得した情報を格納したDataFrame\n \"\"\"\n \n basic_df = None\n for name in codes.keys():\n \n code = codes[name]\n basic_info = get_basic_info(code)\n \n # ディクショナリからSeriesを生成\n sr = pd.Series(basic_info.values(), index=basic_info.keys(), name=name)\n\n if basic_df is None:\n basic_df = pd.DataFrame([sr])\n else:\n basic_df = basic_df.append(sr)\n \n # 1秒ディレイ\n time.sleep(1)\n \n return basic_df\n\n##############################\n# 指定した銘柄の基本情報を取得する\n##############################\ndef get_basic_info(code):\n \"\"\" 指定した銘柄の基本情報を取得する。\n \n Args:\n code (int) : 証券コード\n\n Returns:\n dict: 取得した情報\n \"\"\"\n # 指定URLのHTMLデータを取得\n url = \"https://minkabu.jp/stock/{0:d}\".format(code)\n html = requests.get(url)\n \n # BeautifulSoupのHTMLパーサーを生成\n soup = BeautifulSoup(html.content, \"html.parser\")\n \n # データ格納用のディクショナリを準備\n basic_info = {}\n \n # 全
  • 要素を抽出\n li_all = soup.find_all('li')\n \n for li in li_all:\n \n #
  • 要素内の
    要素を抽出\n dt = li.find('dt')\n if dt is None:\n #
    要素がなければ処理不要\n continue\n \n #
  • 要素内の
    要素を抽出\n dd = li.find('dd')\n \n #
    要素から文字列を取得\n key = dt.text\n value = dd.text\n \n # ディクショナリに格納\n basic_info[key] = value\n \n return basic_info\n \n##############################\n# DataFrameから単位を削る。\n##############################\ndef trim_unit_from_dataframe(df):\n \"\"\" DataFrameから単位を削る。\n \n Args:\n df (DataFrame) : データフレーム\n\n Returns:\n DataFrame : 単位削除後のDataFrame\n \"\"\"\n \n # 単位を削除する関数\n def trim_unit(x):\n \n # 単位=円を削除\n yen_re = re.search(r\"(\\d{1,3}(,\\d{3})*\\.\\d+)円\", x)\n if yen_re:\n value = yen_re.group(1)\n value = value.replace(',', '')\n return np.float64(value)\n \n # 単位=%を削除\n per_re = re.search(r\"(\\d+\\.\\d+)%\", x)\n if per_re:\n value = per_re.group(1)\n return np.float64(value)\n \n # 単位=株を削除\n st_re = re.search(r\"(\\d{1,3}(,\\d{3})*)株\", x)\n if st_re:\n value = st_re.group(1)\n value = value.replace(',', '')\n return np.int64(value)\n \n # 単位=倍を削除\n times_re = re.search(r\"(\\d+\\.\\d+)倍\", x)\n if times_re:\n value = times_re.group(1)\n return np.float64(value)\n \n # 単位=百万円を削除\n million_yen_re = re.search(r\"(\\d{1,3}(,\\d{3})*)百万円\", x)\n if million_yen_re:\n value = million_yen_re.group(1)\n value = value.replace(',', '')\n value = np.int64(value) * 1000000\n return value\n \n # 単位=千株を削除\n thousand_st_re = re.search(r\"(\\d{1,3}(,\\d{3})*)千株\", x)\n if thousand_st_re:\n value = thousand_st_re.group(1)\n value = value.replace(',', '')\n value = np.int64(value) * 1000\n return value\n \n return x\n \n # 各列に対して、trim_unitを適用する\n new_df = df.copy()\n for col in df.columns:\n new_df[col] = df[col].map(lambda v : trim_unit(v))\n\n return new_df\n\n##############################\n# 複数銘柄の基本情報を整形する\n##############################\ndef reshape_basic_info(df):\n \"\"\" 複数銘柄の基本情報を整形する。\n \n Args:\n df (DataFrame) : 複数銘柄の基本情報が格納されたデータフレーム\n\n Returns:\n DataFrame : 整形後のDataFrame\n \"\"\"\n \n # DataFrameから単位を削る。\n new_df = trim_unit_from_dataframe(df)\n\n # 統計量(平均値と標準偏差)を算出する。\n statistics = pd.DataFrame({'平均値': new_df.mean(), '標準偏差': new_df.std()})\n\n # 各銘柄のデータと統計量を結合する。\n new_df = new_df.append(statistics.T)\n \n # 出来高,時価総額,発行済株数の単位を変換する。\n new_df['出来高'] = new_df['出来高'] / 1.0e+3\n new_df['時価総額'] = new_df['時価総額'] / 1.0e+12\n new_df['発行済株数'] = new_df['発行済株数'] / 1.0e+6\n new_df = new_df.rename(columns={\n '出来高' : '出来高(千株)', \n '時価総額' : '時価総額(兆円)',\n '発行済株数' : '発行済株数(百万株)', \n })\n \n # 不要な列を削除する。\n new_df = new_df.drop(columns=['始値', '高値', '安値', '単元株数', '購入金額'])\n \n return new_df\n \n##############################\n# 複数銘柄の基本情報を可視化する\n##############################\ndef visualize_basic_info(df, columns, filepath):\n \"\"\" 複数銘柄の基本情報を整形する。\n \n Args:\n df (DataFrame) : 複数銘柄の基本情報が格納されたデータフレーム\n columns (list) : 可視化する列名のリスト\n filepath(string) : 可視化したグラフを保存するファイルパス\n \n Returns:\n \"\"\"\n \n # FigureとAxesを取得\n fig = plt.figure(figsize=(9.0, 5.4))\n ax = fig.add_subplot(1,1,1)\n \n # データ数を取得\n num_data = df.shape[0] # 銘柄の数\n num_column = len(columns) # 可視化する列の数\n \n # 棒グラフを横並びで表示するためのパラメータ\n width = 0.8 / num_column # 棒グラフの幅\n xpos = np.arange(num_data) # X軸上の位置\n \n # 指定した列数分ループ\n for i in range(num_column):\n \n col = columns[i]\n x = xpos + width * i\n y = df[col]\n \n # 棒グラフを表示\n ax.bar(x, y, width=width, align='center')\n \n # X軸の目盛位置を調整し、銘柄名を表示\n labels = df.index.values\n offset = width / 2 * (num_column - 1)\n ax.set(xticks=xpos + offset, xticklabels=labels)\n \n # 補助線を描画\n ax.grid(axis='y', color='gray', ls='--')\n \n # 凡例を表示\n ax.legend(columns)\n \n # 不要な余白を削る\n plt.tight_layout()\n \n # グラフを表示\n #fig.show()\n fig.savefig(filepath)\n \n # グラフを閉じる\n plt.close()\n \n","sub_path":"01.stock_investment/02.industry_analysis/stinfo/company_info.py","file_name":"company_info.py","file_ext":"py","file_size_in_byte":7318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"90160903","text":"import solution2\n\n\"\"\"\nFor example, if your list is the following:\n\npbga (66)\nxhth (57)\nebii (61)\nhavc (66)\nktlj (57)\nfwft (72) -> ktlj, cntj, xhth\nqoyq (66)\npadx (45) -> pbga, havc, qoyq\ntknk (41) -> ugml, padx, fwft\njptl (61)\nugml (68) -> gyxo, ebii, jptl\ngyxo (61)\ncntj (57)\n\n...then you would be able to recreate the structure of the towers that looks like this:\n\n gyxo\n /\n ugml - ebii\n / \\\n | jptl\n |\n | pbga\n / /\ntknk --- padx - havc\n \\ \\\n | qoyq\n |\n | ktlj\n \\ /\n fwft - cntj\n \\\n xhth\nIn this example, tknk is at the bottom of the tower\n\"\"\"\n\ndef test_solve():\n testdata = (\n \"pbga (66)\\n\",\n \"xhth (57)\\n\",\n \"ebii (61)\\n\",\n \"havc (66)\\n\",\n \"ktlj (57)\\n\",\n \"fwft (72) -> ktlj, cntj, xhth\\n\",\n \"qoyq (66)\\n\",\n \"padx (45) -> pbga, havc, qoyq\\n\",\n \"tknk (41) -> ugml, padx, fwft\\n\",\n \"jptl (61)\\n\",\n \"ugml (68) -> gyxo, ebii, jptl\\n\",\n \"gyxo (61)\\n\",\n \"cntj (57)\\n\",)\n assert solution2.solve(testdata) == 60\n\n","sub_path":"2017/07/solution2_test.py","file_name":"solution2_test.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"358929408","text":"import numpy as np\nfrom scipy.optimize import curve_fit\nimport functions as func\nimport matplotlib.pyplot as plt\n\n\ndef batch_analysis(beads, data_lines, headers, time, diff_magnet, force, title, file_name, analysis_path):\n # constants\n\n kBT = 4.114 # (pN nm) - Boltzmann factor\n Lc = 1500 # contour length (bp)\n p = 50 # persistence length (nm)\n S = 1000 # stretch modulus (pN)\n L = Lc * 0.34 # contour length (nm)\n x0 = 0 # offset (nm)\n\n for bead in range(0, beads):\n\n print(\"Processing bead \" + str(bead) + \" of \" + str(beads))\n\n # load the data\n Z = []\n for x in data_lines:\n Z.append(float(x.split()[headers.index('Z' + str(bead) + ' (um)')]))\n\n # calculate drift for the individual bead\n slope = func.calc_drift_self(data_lines, headers, time, bead)\n\n # correcting drift\n Z_drift = []\n for n, t in enumerate(time):\n Z_drift.append(Z[n] - (slope / 1000) * t)\n Z = np.array(Z_drift)\n\n # split the data in pull/release-curve\n f_pull = []\n f_release = []\n z_pull = []\n z_release = []\n time_pull=[]\n time_release=[]\n\n trigger = [] # from what data point does the pulling trace start\n\n # if the differential of the magnet is positive -> pull, else -> release ('factor' since 0 does not work)\n for n, i in enumerate(diff_magnet):\n factor = max(diff_magnet / 1000)\n if i < -factor:\n trigger.append(n)\n f_pull.append(force[n])\n z_pull.append(Z[n])\n time_pull.append(time[n])\n if i > factor:\n f_release.append(force[n])\n z_release.append(Z[n])\n time_release.append(time[n])\n\n # wlc for reference\n wlc = []\n for f in f_pull:\n wlc.append(func.WLC(f, p, L, S, x0))\n\n # select data\n select_f = []\n select_z = []\n for n, f in enumerate(f_pull):\n if 20 < f < 30:\n select_f.append(f)\n select_z.append(Z[n + min(trigger)])\n\n # initial guesses\n x_init = 1\n\n # fit the WLC in fashion (x,y) - only fit offset, fix everything else\n popt, pcov = curve_fit(lambda f, x0: func.WLC(f, p, L, S, x0), select_f, select_z, p0=(x_init))\n std = np.sqrt(np.diag(pcov)) # returns the standard deviation\n\n x_fit = popt[0]\n\n z_pull -= x_fit # subtract fitted offset from data\n z_release -= x_fit # subtract fitted offset from data\n select_z -= x_fit\n\n a = np.percentile(z_pull, 1)\n dZ = \"{0:.3f}\".format(a - np.percentile(z_pull, 99))\n\n # plotting + saving\n\n # marker_size = 10\n #\n # plt.subplot(2, 1, 1)\n #\n # plt.title(str(title) + \" / \" + str(file_name) + \" / bead \" + str(bead) + \" (dZ = \" + str(dZ) + \" nm)\")\n # plt.scatter(time_pull, z_pull-a, facecolor='None', edgecolors=\"darkgreen\", s=marker_size)\n # plt.scatter(time_release, z_release-a, facecolor='None', edgecolors=\"darkgrey\", s=marker_size)\n # plt.ylim(0, 0.75)\n # plt.xlabel(\"Time (s)\")\n # plt.ylabel(\"Extension ($\\mu$m)\")\n #\n # plt.subplot(2, 2, 3)\n #\n # plt.plot(wlc, f_pull, color='black', zorder=100)\n # plt.scatter(z_pull, f_pull, facecolor='None', edgecolors=\"darkgreen\", s = marker_size)\n # plt.ylabel(\"Force (pN)\")\n # plt.xlabel(\"Extension ($\\mu$m)\")\n # plt.xlim(0, 0.75)\n #\n # plt.subplot(2, 2, 4)\n #\n # # plt.plot(wlc, f_pull, color='black', zorder=100)\n # plt.scatter(z_release, f_release, facecolor='None', edgecolors=\"darkgrey\", s = marker_size)\n # plt.ylabel(\"Force (pN)\")\n # plt.xlabel(\"Extension ($\\mu$m)\")\n # plt.xlim(0, 0.75)\n #\n # plt.savefig(analysis_path + \"dZ_\" + str(dZ) + \"_\" + str(title) + \"_\" + str(file_name) + \"_bead\" + str(bead) + '_subplot.png', dpi=300, bbox_inches='tight')\n #\n # plt.close()\n\n return\n","sub_path":"Chromatin/batch_analysis.py","file_name":"batch_analysis.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"439990798","text":"from pyspark.sql import SQLContext, Window\nfrom pyspark import SparkConf, SparkContext\nimport argparse\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description = 'remove line breaks from a CSV file'\n )\n\n parser.add_argument(\n 'in_reviews_file',\n type=str,\n help='The input reviews file'\n )\n\n parser.add_argument(\n 'in_id_file',\n type=str,\n help='The input Business IDs file'\n )\n\n args = parser.parse_args()\n\n in_csv_file1 = args.in_reviews_file\n in_id_file1 = args.in_id_file\n\n in_csv_file2 = in_csv_file1.split('/')\n in_id_file2 = in_id_file1.split('/')\n \n in_csv_file3 = in_csv_file2[len(in_csv_file2) - 1]\n in_id_file3 = in_id_file2[len(in_id_file2) - 1]\n\n conf = SparkConf().setMaster(\"local\")\n sc = SparkContext(conf = conf)\n\n sqlContext = SQLContext(sc)\n\n In_review = sqlContext.read.csv(\"/Temp/{}\".format(in_csv_file3) , header=True, inferSchema=True)\n In_Subset = sqlContext.read.csv(\"/Temp/{}\".format(in_id_file3) , header=False, inferSchema=True)\n\n In_review.createOrReplaceTempView(\"In_review\")\n In_Subset.createOrReplaceTempView(\"In_Subset\")\n\n Out_DF = sqlContext.sql(\"\"\"select bse.*\n from In_review as bse\n inner join In_Subset as sub on trim(upper(bse.business_id)) = trim(upper(sub._c0))\n \"\"\")\n\n\n #Out_DF.createOrReplaceTempView(\"Out_DF\")\n #sqlContext.sql(\"\"\"select count(*) from Out_DF\"\"\").show()\n #sqlContext.sql(\"\"\"select count(*) from In_review\"\"\").show()\n\n Out_DF.coalesce(1).write.mode('overwrite').csv('/Temp/Output' , header=True , escape=\"\\\\\", quote=\"'\", encoding = 'UTF-8',sep=\"|\")\n \n sc.stop()\n","sub_path":"Spark-Subset/Review_Subset.py","file_name":"Review_Subset.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"447065694","text":"#!/usr/bin/env python\n\nimport argparse\nimport fastgpu\nimport os\nimport time\nimport sys\n\nfrom fastgpu import fastgpu_globals\nfastgpu_globals.set_should_disable_nas(True)\n\nINSTANCE_TYPE = 'ecs.gn6v-c8g1.2xlarge' # V100\n#INSTANCE_TYPE = 'ecs.gn6v-c8g1.16xlarge'\n#INSTANCE_TYPE = 'ecs.gn5-c8g1.14xlarge'\nNUM_GPUS = 1\nIMAGE_TYPE = 'aiacc'\nCONDA_ENVS = [\n \"mxnet_1.4.1_cu10.0_py36\",\n \"mxnet_1.4.1_cu10.1_py36\",\n \"mxnet_1.5.0_cu10.0_py36\",\n \"mxnet_1.5.0_cu10.1_py36\",\n \"mxnet_1.6.0_cu10.1_py36\",\n \"mxnet_1.6.0_cu10.2_py36\",\n \"mxnet_1.7.0_cu10.0_py36\",\n \"mxnet_1.7.0_cu10.1_py36\",\n \"mxnet_1.7.0_cu10.2_py36\",\n \"mxnet_1.9.0_cu10.1_py36\",\n \"mxnet_1.9.0_cu10.2_py36\",\n \"mxnet_1.9.0_cu11.0_py36\"\n]\n\nfastgpu.set_backend('aliyun')\nparser = argparse.ArgumentParser()\nparser.add_argument('--name', type=str, default='perseus-faster-rcnn',\n help=\"name of the current run, used for machine naming and tensorboard visualization\")\nparser.add_argument('--machines', type=int, default=1,\n help=\"how many machines to use\")\nargs = parser.parse_args()\n\ndef main():\n start_time = time.time()\n # 1. Create infrastructure\n supported_regions = ['cn-huhehaote', 'cn-zhangjiakou', 'cn-shanghai', 'cn-hangzhou', 'cn-beijing']\n assert fastgpu.get_region() in supported_regions, f\"required AMI {IMAGE_NAME} has only been made available in regions {supported_regions}, but your current region is {fastgpu.get_region()} (set $ALYUN_DEFAULT_REGION)\"\n \n fastgpu_globals.set_should_disable_nas(True)\n\n job = fastgpu.make_job(name=args.name,\n run_name=f\"{args.name}-{args.machines}\",\n #image_name='aiacc-dlimg-centos7:1.3.0.post3',\n num_tasks=args.machines,\n instance_type=INSTANCE_TYPE,\n spot=True,\n disable_nas=True,\n image_type=IMAGE_TYPE\n )\n # 2. Upload perseus faster-rcnn code.\n job.upload('gluon-cv')\n job.run('conda activate mxnet_1.9.0_cu11.0_py36')\n\n # 2.5(alternative) install nccl-2.9.6\n # job.run(\"sudo apt update\")\n # job.run(\"sudo apt install -y software-properties-common\")\n # job.run(\"wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin\")\n # job.run(\"sudo mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600\")\n # job.run(\"sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub\")\n # job.run('sudo add-apt-repository \"deb http://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/ /\"')\n # job.run(\"sudo apt-get update\")\n # job.run(\"sudo apt install -y libnccl2=2.9.6-1+cuda11.0 libnccl-dev=2.9.6-1+cuda11.0\")\n\n\n # 2.5(old) nccl install\n job.run(\"wget https://ali-perseus-release.oss-cn-huhehaote.aliyuncs.com/AIACC-Dev/zijian/nccl_2.9.6-1%2Bcuda11.0_x86_64.txz -O nccl_2.9.6-1+cuda11.0_x86_64.txz\")\n job.run(\"tar -Jxvf nccl_2.9.6-1+cuda11.0_x86_64.txz\")\n job.run(\"cp -f nccl_2.9.6-1+cuda11.0_x86_64/include/*.h /usr/local/cuda/include/\")\n job.run(\"cp -f nccl_2.9.6-1+cuda11.0_x86_64/lib/libnccl* /usr/local/cuda/lib64/\")\n \n # 3. Download pretrain model and dataset.\n job.run('if [ ! -d /root/mscoco ];then mkdir /root/mscoco;fi')\n job.run('cd /root/mscoco && wget -c -t 10 http://public-ai-datasets.oss-cn-huhehaote.aliyuncs.com/coco2017/annotations/annotations_trainval2017.zip')\n job.run('wget -c -t 10 http://public-ai-datasets.oss-cn-huhehaote.aliyuncs.com/coco2017/zips/train2017.zip')\n job.run('wget -c -t 10 http://public-ai-datasets.oss-cn-huhehaote.aliyuncs.com/coco2017/zips/test2017.zip')\n job.run('wget -c -t 10 http://public-ai-datasets.oss-cn-huhehaote.aliyuncs.com/coco2017/zips/val2017.zip')\n\n job.run('mkdir -p /root/.mxnet/models')\n job.run('cd /root/.mxnet/models && wget -c -t 10 http://public-ai-datasets.oss-cn-huhehaote.aliyuncs.com/pretrain_model/resnet50_v1b-0ecdba34.params')\n\n # 4. install requirements.\n job.run('chmod -R 744 /root/gluon-cv/')\n job.run('cd /root/gluon-cv/')\n job.run('pip install -r requirements.txt')\n \n job.run('python mscoco.py')\n\n # 5. Run the training job.\n hosts = [task.ip + f':{NUM_GPUS}' for task in job.tasks]\n host_str = ','.join(hosts)\n\n mpi_cmd = ['mpirun --allow-run-as-root',\n f'-np {args.machines * NUM_GPUS}',\n f'--npernode {NUM_GPUS}',\n f'--host {host_str}',\n '--bind-to none',\n '-x NCCL_DEBUG=INFO',\n '-x PATH',\n '-x LD_LIBRARY_PATH',]\n\n insightface_cmd = './train-perseus.sh'\n \n cmd = mpi_cmd \n cmd = \" \".join(cmd) + \" \" + insightface_cmd\n job.tasks[0].run(f'echo {cmd} > {job.logdir}/task-cmd')\n job.tasks[0].run(cmd)\n print(f\"Logging to {job.logdir}\")\n\n eclapse_time = time.time() - start_time\n print(f'training deploy time is: {eclapse_time} s.')\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"mxnet/faster-rcnn/train_faster_rcnn.py","file_name":"train_faster_rcnn.py","file_ext":"py","file_size_in_byte":4951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"15777300","text":"import pymongo\n\n\n\ndef csv_commit(\n DB: pymongo.MongoClient,\n reports: list\n ) -> None:\n '''\n Create and update the csv_commit charts\n '''\n\n for report in reports['capacity']:\n result = {\n 'cluster' : report['cluster'],\n 'type' : 'csv_commit',\n 'commit' : [],\n 'volumes' : []\n }\n\n for csv in report['CSV']:\n result['commit'].append(csv['LUN Commit (%)'])\n result['volumes'].append(csv['CSV Name'])\n \n DB['compute_charts'].find_one_and_update(\n {\n 'cluster' : report['cluster'],\n 'type' : 'csv_commit'\n },\n {\n '$set' : result,\n },\n upsert=True\n )\n\n\ndef cpu_commit(\n DB: pymongo.MongoClient,\n reports: list\n ) -> None:\n '''\n Create and update the cpu_commit charts\n '''\n\n for report in reports['capacity']:\n result = {\n 'cluster' : report['cluster'],\n 'type' : 'cpu_commit',\n 'commit' : [],\n 'host' : []\n }\n\n for cpu in report['Processor']:\n result['commit'].append(cpu['CPU Oversubscription (%)'])\n result['host'].append(cpu['Host Name'])\n \n DB['compute_charts'].find_one_and_update(\n {\n 'cluster' : report['cluster'],\n 'type' : 'cpu_commit'\n },\n {\n '$set' : result,\n },\n upsert=True\n )\n\n\ndef ram_commit(\n DB: pymongo.MongoClient,\n reports: list\n ) -> None:\n '''\n Create and update the ram_commit charts\n '''\n\n \n ram_commit = {}\n for report in reports['guest']:\n ram = {}\n for vm in report['VM Summary']:\n try:\n ram[vm['Current Host']] += float(vm['RAM (GB)'])\n except KeyError:\n ram[vm['Current Host']] = float(vm['RAM (GB)'])\n ram_commit[report['cluster']] = ram\n \n for report in reports['capacity']:\n cluster = report['cluster']\n result = {\n 'cluster' : cluster,\n 'type' : 'ram_commit',\n 'commit' : [],\n 'host' : []\n }\n total = float(report['Memory'][0]['Total Memory (GB)'])\n nodes = len(ram_commit[cluster])\n node_ram = total / nodes\n for node in ram_commit[cluster]:\n node_commit = ram_commit[cluster][node]\n node_ram_commt_percentage = round((node_ram/node_commit) * 100)\n result['commit'].append(node_ram_commt_percentage)\n result['host'].append(node)\n\n DB['compute_charts'].find_one_and_update(\n {\n 'cluster' : report['cluster'],\n 'type' : 'ram_commit'\n },\n {\n '$set' : result,\n },\n upsert=True\n )\n\n\n\ndef storage_tree(\n DB: pymongo.MongoClient,\n reports: dict\n ) -> None:\n '''\n Create and update the storage tree chart\n '''\n\n clusters = {}\n\n for report in reports['capacity']:\n clusters[report['cluster']] = {\n 'size' : 0\n }\n for csv in report['CSV']:\n csv_size = round(float(csv['Total Size (GB)']))\n clusters[report['cluster']]['size'] += csv_size\n clusters[report['cluster']][csv['CSV Name']] = {}\n clusters[report['cluster']][csv['CSV Name']]['size'] = csv_size\n\n for report in reports['guest']:\n for vm in report['VM Summary']:\n vm_csv = vm['Storage Path'].split('\\\\')[2]\n clusters[report['cluster']][vm_csv][vm['VM Name']] = {}\n try:\n clusters[report['cluster']][vm_csv][vm['VM Name']]['size'] = round(float(vm['Max Total Disk Size (GB)']))\n except ValueError:\n clusters[report['cluster']][vm_csv][vm['VM Name']]['size'] = 0\n for disk in report['VM Disk Detail']:\n disk_csv = disk['Disk Path'].split('\\\\')[2]\n disk_csv = disk_csv.upper()\n disk_name = disk['Disk Path'].split('\\\\')[-1]\n disk_name = disk_name.lower()\n try:\n clusters[report['cluster']][disk_csv][disk['VM Name']][disk_name] = {}\n clusters[report['cluster']][disk_csv][disk['VM Name']][disk_name]['size'] = round(float(disk['Maximum Disk Size (GB)']))\n except KeyError:\n pass\n\n for cluster in clusters:\n cluster_tree = {\n 'name' : cluster,\n 'size' : clusters[cluster]['size'],\n 'children' : []\n }\n for csv in clusters[cluster]:\n if csv == 'size':\n continue\n csv_tree = {\n 'name' : csv,\n 'size' : clusters[cluster][csv]['size'],\n 'children' : []\n }\n\n for vm in clusters[cluster][csv]:\n if vm == 'size':\n continue\n vm_tree = {\n 'name' : vm,\n 'size' : clusters[cluster][csv][vm]['size'],\n 'children' : []\n }\n\n for disk in clusters[cluster][csv][vm]:\n if disk == 'size':\n continue\n disk_tree = {\n 'name' : disk,\n 'size' : clusters[cluster][csv][vm][disk]['size'],\n 'value' : clusters[cluster][csv][vm][disk]['size']\n }\n vm_tree['children'].append(disk_tree)\n csv_tree['children'].append(vm_tree)\n cluster_tree['children'].append(csv_tree)\n\n DB['compute_charts'].find_one_and_update(\n {\n 'cluster' : cluster,\n 'type' : 'storage_tree'\n },\n {\n '$set' : cluster_tree,\n },\n upsert=True\n )","sub_path":"backend/mm_compute/src/charts.py","file_name":"charts.py","file_ext":"py","file_size_in_byte":6015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"499400499","text":"__author__ = \"Teeraphat Kullanankanjana\"\r\n__version__ = \"Prototype 0.0.0\"\r\n\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import messagebox as msg\r\nfrom datetime import date\r\n\r\n\r\ndef on_click():\r\n try:\r\n room_number, room_price, elect_price, water_price, service, year = e1.get(), e2.get(), e3.get(), e4.get(), e5.get(), e6.get()\r\n month_name, internet_number = combo1.get(), combo2.get()\r\n if int(room_number) < 0 or int(room_price) < 0 or int(elect_price) < 0 or int(water_price) < 0 or \\\r\n int(service) < 0 or int(year) < 0 or int(internet_number) < 0 or month_name not in month_list_TH:\r\n msg.showerror(\"แจ้งเตือน\", \"บันทึกรายการล้มเหลว\\nการป้อนค่าไม่ถูกต้อง\\nกรุณาลองใหม่\")\r\n else:\r\n file_name = \"ใบแจ้งหนี้เดือน\" + str(month_name) + str(year) + \".txt\"\r\n fo = open(str(file_name), \"w+\", encoding=\"utf-8\")\r\n fo.write(\"\\t\\t\\tใบแจ้งหนี้/ใบเสร็จรับเงิน\\nหมายเลขห้อง: \" + str(room_number) + \"\\t\\t\\t\\t\\t\"+\"ยอดชำระเดือน: \"+str(month_name)+\" \"+str(year)+\"\\nลงวันที่บันทึก: \"+str(date.today().strftime(\"%b-%d-%Y\"))+\"\\n\")\r\n fo.write(\"____________________\\n\")\r\n fo.write(\"รายการที่ต้องชำระ\\n\")\r\n fo.write(\"ลำดับที่\\t\\t\\tรายการ\\t\\t\\t\\t\\t\\tราคา(บาท)\\n\")\r\n fo.write(\"1\\t\\t\\t\\tค่าห้อง\"+\"\\t\\t\\t\\t\\t\\t\"+str(room_price)+\"\\n\")\r\n fo.write(\"2\\t\\t\\t\\tค่าไฟฟ้า\"+\"\\t\\t\\t\\t\\t\\t\"+str(elect_price)+\"\\n\")\r\n fo.write(\"3\\t\\t\\t\\tค่าไฟฟ้า\"+\"\\t\\t\\t\\t\\t\\t\"+str(water_price)+\"\\n\")\r\n fo.write(\"4\\t\\t\\t\\tค่าบริการ\"+\"\\t\\t\\t\\t\\t\\t\"+str(water_price)+\"\\n\")\r\n fo.write(\"5\\t\\t\\t\\tค่าอินเตอร์เน็ต\"+\"\\t\\t\\t\\t\\t\"+str(int(internet_number)*250)+\"\\n\")\r\n fo.write(\"____________________\\nรวมทั้งสิ้น \"+str(int(room_price)+int(elect_price)+int(water_price)+int(service)+int(internet_number)*250)+\" บาท\\n\")\r\n fo.close()\r\n msg.showinfo(\"แจ้งเตือน\", \"บันทึกรายการเสร็จสิ้น\")\r\n except(ValueError or TypeError):\r\n msg.showerror(\"พบข้อผิดพลาด\", \"บันทึกรายการล้มเหลว\\nการป้อนค่าไม่ถูกต้อง\\nกรุณาลองใหม่\")\r\n\r\n\r\nroot = Tk()\r\nroot.title(\"Recorder\")\r\nroot.resizable(width=FALSE, height=FALSE)\r\nmonth_list_TH = (\"มกราคม\", \"กุมภาพันธ์\", \"มีนาคม\", \"เมษายน\", \"พฤษภาคม\", \"มิถุนายน\",\r\n \"กรกฎาคม\", \"สิงหาคม\", \"กันยายน\", \"ตุลาคม\", \"พฤศจิกายน\", \"ธันวาคม\")\r\nnumber_internet_list = (\"0\", \"1\", \"2\", \"3\", \"4\",\r\n \"5\", \"6\", \"7\", \"8\", \"9\", \"10\")\r\nL1 = LabelFrame(root).grid()\r\nL2 = LabelFrame(root).grid()\r\nL3 = LabelFrame(root).grid()\r\nLabel(L1, text=\"\\nข้อมูลทั่วไป\").grid(row=0, sticky=W)\r\nLabel(L1, text=\"หมายเลขห้อง\").grid(row=1, sticky=W)\r\nLabel(L1, text=\"กรุณาเลือกเดือนที่ชำระ\").grid(row=2, sticky=W)\r\nLabel(L1, text=\"ปีที่ต้องชำระ\").grid(row=3, sticky=W)\r\nLabel(L2, text=\"\\nรายการค่าชำระ\").grid(row=4, sticky=W)\r\nLabel(L2, text=\"1.ค่าห้อง(บาท)\").grid(row=5, sticky=W)\r\nLabel(L2, text=\"2.ค่าไฟฟ้า(บาท)\").grid(row=6, sticky=W)\r\nLabel(L2, text=\"3.ค่าน้ำ(บาท)\").grid(row=7, sticky=W)\r\nLabel(L2, text=\"4.ค่าบริการ(บาท)\").grid(row=8, sticky=W)\r\nLabel(L2, text=\"5.ค่าอินเตอร์เน็ต(ใบ)\").grid(row=9, sticky=W)\r\n\r\ne1 = Entry(L1)\r\ne1.grid(row=1, column=1) # room number\r\ne6 = Entry(L1)\r\ne6.grid(row=3, column=1) # year\r\ne2 = Entry(L2)\r\ne2.grid(row=5, column=1) # room price\r\ne3 = Entry(L2)\r\ne3.grid(row=6, column=1) # elect price\r\ne4 = Entry(L2)\r\ne4.grid(row=7, column=1) # water price\r\ne5 = Entry(L2)\r\ne5.grid(row=8, column=1) # service\r\n\r\ncombo1 = ttk.Combobox(L1, textvariable=\"month_list_TH\", width=17)\r\ncombo1[\"values\"] = month_list_TH\r\ncombo1.grid(row=2, column=1)\r\n\r\ncombo2 = ttk.Combobox(L1, textvariable=number_internet_list, width=17)\r\ncombo2[\"values\"] = number_internet_list\r\ncombo2.grid(row=9, column=1)\r\n\r\nb1 = Button(L3, text=\"\\nบันทึกรายการ\\n\", command=on_click).grid(row=10)\r\nroot.mainloop()\r\n","sub_path":"[GUI]Apartment Record.py","file_name":"[GUI]Apartment Record.py","file_ext":"py","file_size_in_byte":4927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"395828479","text":"from PIL import Image\n\n\ndef copyImage(inputImage, imageWidth, imageHeight):\n copyImageOutput = Image.new('RGB', (imageWidth, imageHeight), 'white')\n\n for i in range(imageWidth):\n for j in range(imageHeight):\n pixelColors = inputImage.getpixel((i, j))\n copyImageOutput.putpixel((i, j), pixelColors)\n\n copyImageOutput.save(\"copy.png\")\n\n\n\ndef main():\n # Change the path in Line 6 to the path of the image you want to use as input \n # for Windows users the path specify the path as \"c:\\\\users\\\\alark1\\\\Pictures\\\\usfca.png\"\n inputImage = Image.open('usfca_logo.png')\n imageWidth, imageHeight = inputImage.size\n initialAnswer = 0\n while initialAnswer >= 0 and initialAnswer <=10:\n print(\"\"\" What would you like to do?\n 1. Copy image\n 2. Flip the image Vertically\n 3. Flip the image Horizontally\n 4. Brighten Image\n 5. Darken Image\n 6. Scroll Image Horizontally\n 7. Scroll Image Vertically\n 8. Grey Scale Image\n 9. Rotate\n 10. Swap Corners\n \"\"\")\n initialAnswer=int(input(\"Enter the corresponding number:\"))\n if initialAnswer == 1:\n copyImage(inputImage, imageWidth, imageHeight)\n img = Image.open(\"copy.png\")\n img.show()\n\n if initialAnswer ==2:\n img = Image.open('vflip.png')\n img.show()\n if initialAnswer == 3:\n img = Image.open('hflip.png')\n img.show()\n if initialAnswer== 4:\n amount = 0\n print(\"Enter a number between 0 and 1. Higher is brighter.\")\n amount = float(input(\"Here:\"))\n lighten(inputImage,imageWidth,imageHeight,amount)\n img = Image.open(\"light.png\")\n img.show()\n if initialAnswer == 5:\n amount = 0\n print(\"Enter a number between 0 and 1. Higher is darker \")\n amount = float(input(\"Here:\"))\n darken(inputImage,imageWidth,imageHeight,amount)\n img = Image.open(\"darken.png\")\n img.show()\n if initialAnswer == 6:\n amount = 0\n print(\"Enter a number of pixels to scroll.\")\n numpixels = int(input(\"Here:\"))\n scrollHorizontal(inputImage,imageWidth,imageHeight,numpixels)\n img = Image.open(\"scrollhorizontal.png\")\n img.show()\n if initialAnswer == 7:\n amount = 0\n numpixels = int(input(\"Here:\"))\n scrollVertical(inputImage, imageWidth, imageHeight, numpixels)\n img = Image.open(\"scrollvertical.png\")\n img.show()\n if initialAnswer == 8:\n\n greyscale(inputImage,imageWidth,imageHeight)\n img = Image.open(\"greyscale.png\")\n img.show()\n if initialAnswer == 9:\n\n rotate(inputImage,imageWidth,imageHeight)\n img = Image.open(\"rotate.png\")\n img.show()\n if initialAnswer == 10:\n\n swapCorners(inputImage,imageWidth,imageHeight)\n img = Image.open(\"swapcorners.png\")\n img.show()\n\n\n\n else:\n\n print(\"\"\"\n\n >>>>Enter a number 1 through 10<<<<\n\n \"\"\")\n main()\n\n\n# Creates a copy of an image given the image variable, its width, and height\ndef copyImage(inputImage, imageWidth, imageHeight):\n copyImageOutput = Image.new('RGB', (imageWidth, imageHeight), 'white')\n\n for i in range(imageWidth):\n for j in range(imageHeight):\n pixelColors = inputImage.getpixel((i, j))\n copyImageOutput.putpixel((i, j), pixelColors)\n\n copyImageOutput.save(\"copy.png\")\n\n#Flips the image horizontally\ndef flipHorizontal(inputImage,imageWidth,imageHeight):\n flipHorizontalOutput = Image.new('RGB', (imageWidth, imageHeight), 'white')\n for j in range(imageHeight):\n for i in range(imageWidth):\n pixelcoordinate = inputImage.getpixel((i,j))\n flipHorizontalOutput.putpixel(((imageWidth-i)-1,j),pixelcoordinate)\n\n\n flipHorizontalOutput.save(\"hflip.png\")\n\n\n#Flips the image vertically\ndef flipVertical(inputImage,imageWidth,imageHeight):\n flipverticalOutput = Image.new('RGB', (imageWidth, imageHeight), 'white')\n for j in range(imageHeight):\n for i in range(imageWidth):\n pixelcoordinate = inputImage.getpixel((i,j))\n flipverticalOutput.putpixel((i,(imageHeight-j)-1),pixelcoordinate)\n\n\n flipverticalOutput.save(\"vflip.png\")\n\n\n#lightens the image\ndef lighten(inputImage,imageWidth,imageHeight,amount):\n lightenImageOutput = Image.new('RGB',(imageWidth,imageHeight),'white')\n for j in range(imageHeight):\n for i in range(imageWidth):\n pixel = inputImage.getpixel((i,j))\n red = pixel[0]\n green = pixel[1]\n blue= pixel[2]\n newred = (1-amount)* red + amount *255\n newgreen =(1-amount)* green + amount *255\n newblue= (1-amount)* blue + amount *255\n newpixel = (int(newred),int(newgreen), int(newblue))\n\n lightenImageOutput.putpixel((i,j), newpixel)\n\n lightenImageOutput.save(\"light.png\")\n\n\ndef darken(inputImage, imageWidth, imageHeight, amount):\n darkenImageOutput= Image.new('RGB',(imageWidth,imageHeight),'white')\n for j in range(imageHeight):\n for i in range(imageWidth):\n pixel = inputImage.getpixel((i,j))\n red = pixel[0]\n green = pixel[1]\n blue= pixel[2]\n newred = (1-amount)* red\n newgreen =(1-amount)* green\n newblue= (1-amount)* blue\n newpixel = (int(newred),int(newgreen), int(newblue))\n\n darkenImageOutput.putpixel((i,j), newpixel)\n\n darkenImageOutput.save(\"darken.png\")\n \n\n\n\n\ndef scrollHorizontal(inputImage, imageWidth, imageHeight, numpixels):\n scrollHOutput = Image.new('RGB', (imageWidth, imageHeight), 'white')\n\n \n #part B of the image\n for i in range(numpixels,imageWidth):\n for j in range(0,imageHeight):\n\n pixelColors = inputImage.getpixel((i, j))\n scrollHOutput.putpixel((i-numpixels, j), pixelColors)\n #part A of the image\n for i in range(0,numpixels):\n\n for j in range(0,imageHeight):\n\n pixelColors = inputImage.getpixel((i, j))\n scrollHOutput.putpixel((imageWidth-numpixels+i, j), pixelColors)\n\n\n\n scrollHOutput.save(\"scrollhorizontal.png\")\n\ndef scrollVertical(inputImage, imageWidth, imageHeight, numpixels):\n scrollVOutput = Image.new('RGB', (imageWidth, imageHeight), 'white')\n\n \n #part B of the image\n for i in range(numpixels,imageHeight):\n for j in range(0,imageWidth):\n\n pixelColors = inputImage.getpixel((j,i))\n scrollVOutput.putpixel((j, i-numpixels), pixelColors)\n\n #part A of the image\n for i in range(0,numpixels):\n for j in range(0,imageWidth):\n\n pixelColors = inputImage.getpixel((j, i))\n scrollVOutput.putpixel((j,(i+(imageHeight-numpixels))), pixelColors)\n\n\n\n scrollVOutput.save(\"scrollvertical.png\")\n\n\ndef greyscale(inputImage, imageWidth, imageHeight):\n greyscaleOutput= Image.new('RGB',(imageWidth,imageHeight),'white')\n for j in range(imageHeight):\n for i in range(imageWidth):\n pixel = inputImage.getpixel((i,j))\n red = pixel[0]\n green = pixel[1]\n blue = pixel[2]\n newred = red *.3\n newgreen = green * .59\n newblue = blue* .11\n greypixel = (int(newred)+int(newgreen)+int(newblue))\n newpixel = (int(greypixel),int(greypixel),int(greypixel))\n \n greyscaleOutput.putpixel((i,j),newpixel)\n\n greyscaleOutput.save(\"greyscale.png\")\n\n\ndef swapCorners(inputImage,imageWidth,imageHeight):\n swapCornersOutput= Image.new('RGB',(imageWidth,imageHeight),'white')\n\n for i in range(imageWidth):\n for j in range(imageHeight):\n pixelColors= inputImage.getpixel((i,j))\n\n cut_height = imageHeight//2\n cut_width = imageWidth//2\n\n if j< cut_height:\n new_height = j + cut_height\n if j >= cut_height:\n new_height = j - cut_height\n if i < cut_width: \n new_width = i+ cut_width\n elif i>= cut_width:\n new_width = i - cut_width\n\n swapCornersOutput.putpixel((new_width,new_height), pixelColors)\n\n \n\n swapCornersOutput.save(\"swapcorners.png\")\n\n\ndef rotate(inputImage, imageWidth, imageHeight):\n newWidth = imageHeight\n newHeight= imageWidth\n\n rotateOutput = Image.new('RGB',(imageHeight,imageWidth),'white')\n\n for i in range(newWidth):\n for j in range(newHeight):\n pixelColors = inputImage.getpixel((j,i))\n rotateOutput.putpixel((i,newHeight -1 -j), pixelColors)\n\n rotateOutput.save(\"rotate.png\")\nmain()\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"project2/part2/project2part2.py","file_name":"project2part2.py","file_ext":"py","file_size_in_byte":8982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"231258205","text":"from tkinter import *\r\nfrom random import choices\r\nfrom operator import itemgetter\r\nfrom functools import partial\r\n\r\nrootCol = \"lightgoldenrodyellow\"\r\nroot = Tk()\r\nroot.geometry(\"500x400\")\r\nroot.title(\"Mastermind Game\")\r\nroot.resizable(False,False)\r\nroot.configure(bg=rootCol)\r\n\r\n# Constants\r\npossibleColours = [\"Blue\", \"White\", \"Orange\", \"Green\", \"Red\", \"Yellow\"]\r\nfontSize = 20\r\nfontType = \"Arial\"\r\nfontInfo = (fontType, fontSize)\r\nbgHex = \"#ADBEC9\" # Background hex colour\r\nfolder = \"Mastermind_Assets\"\r\nleaderboardFile = f\"{folder}/leaderboard.txt\"\r\nrows = 12\r\nrulesString = f'''Mastermind Rules\r\n - Computer generates a random 4 colour code (colours can repeat).\r\n - The player chooses four code pegs per attempt to try and crack the code.\r\n - If a colour is correct, but in the wrong place, a white peg will be placed into the small holes.\r\n - If a colour is correct, and in the right place, a black peg will be placed into the small holes.\r\n - If duplicate colours are guessed, the max amount of pegs placed (for that colour) correspond to the amount of times that colour appears in the code.\r\n - If the player is unable to do this within {rows} tries, they lose.\r\n - If they're able to crack the code, they win!\r\n'''\r\n\r\nloseString = '''\r\nUnfortunately, %s, you lost!\r\nThe correct solution was:\r\n'''\r\n\r\nwinString = '''\r\nCongratulations, %s, you won!\r\nIt took you %d %s!\r\n''' \r\n\r\nimages = {\r\n \"PegHole\": PhotoImage(file=f\"{folder}/pegHole.png\"),\r\n \"MiniHole\": PhotoImage(file=f\"{folder}/feedbackPegHole.png\"),\r\n \"MiniHole_Black\": PhotoImage(file=f\"{folder}/feedbackPegHole_B.png\"),\r\n \"MiniHole_White\": PhotoImage(file=f\"{folder}/feedbackPegHole_W.png\"),\r\n \"Blue_Peg\": PhotoImage(file=f\"{folder}/BluePeg.png\"),\r\n \"White_Peg\": PhotoImage(file=f\"{folder}/WhitePeg.png\"),\r\n \"Orange_Peg\": PhotoImage(file=f\"{folder}/OrangePeg.png\"),\r\n \"Green_Peg\": PhotoImage(file=f\"{folder}/GreenPeg.png\"),\r\n \"Red_Peg\": PhotoImage(file=f\"{folder}/RedPeg.png\"),\r\n \"Yellow_Peg\": PhotoImage(file=f\"{folder}/YellowPeg.png\"),\r\n \"PegHole_Blue\": PhotoImage(file=f\"{folder}/pegHole_Blue.png\"),\r\n \"PegHole_Red\": PhotoImage(file=f\"{folder}/pegHole_Red.png\"),\r\n \"PegHole_Orange\": PhotoImage(file=f\"{folder}/pegHole_Orange.png\"),\r\n \"PegHole_White\": PhotoImage(file=f\"{folder}/pegHole_White.png\"),\r\n \"PegHole_Yellow\": PhotoImage(file=f\"{folder}/pegHole_Yellow.png\"),\r\n \"PegHole_Green\": PhotoImage(file=f\"{folder}/pegHole_Green.png\") \r\n }\r\n\r\n\r\n# These change during the code (variables)\r\ncurrRow = 0\r\nplayerName = StringVar()\r\nrowList = []\r\npegList = []\r\nsolutionList = []\r\ncorrectCode = choices(possibleColours, k=4) # generate code\r\n\r\nclass PegHole(): \r\n def __init__(self, position, colour, parent):\r\n self.colour = colour\r\n if self.colour != None:\r\n self.image = images[f\"PegHole_{colour}\"]\r\n backGround = rootCol\r\n else:\r\n self.image = images[\"PegHole\"]\r\n backGround = bgHex\r\n self.label = Label(parent, image=self.image, bg=backGround)\r\n if self.colour == None:\r\n self.label.bind(\"<1>\", lambda e: self.removeColour())\r\n self.label.place(relx=position/5)\r\n \r\n def changeColour(self, colour):\r\n self.colour = colour\r\n self.image = images[f\"PegHole_{colour}\"]\r\n self.label[\"image\"] = self.image\r\n\r\n def removeColour(self):\r\n self.image = images[\"PegHole\"]\r\n self.label[\"image\"] = self.image\r\n self.colour = None\r\n checkButton.place_forget()\r\n\r\ndef placePNEntries(): # place PlayerName entries\r\n playerNameLabel.place(relx=0.5, rely=0.4, anchor=\"s\")\r\n playerNameInput.place(relx=0.5, rely=0.5, anchor=CENTER)\r\n startButton.place(relx=0.5, rely=0.65, anchor=\"n\")\r\n rulesButton.place(relx=0.1, rely=0.8)\r\n # hide rule-related widgets\r\n backButton.place_forget()\r\n rulesLabel.place_forget()\r\n \r\n \r\ndef hidePNEntries(showRules): # hides playerName widgets, can show rule widgets\r\n # hide PlayerName Entries\r\n playerNameLabel.place_forget()\r\n playerNameInput.place_forget()\r\n startButton.place_forget()\r\n rulesButton.place_forget()\r\n if showRules:\r\n # show rule-related widgets\r\n backButton.place(relx=0.1, rely=0.8)\r\n rulesLabel.place(relx=0.5, rely=0.4, anchor = CENTER)\r\n\r\ndef pegMoving(pegImage, event):\r\n duplicateHolder[\"image\"] = pegImage\r\n x, y = root.winfo_pointerxy()\r\n x -= root.winfo_rootx()\r\n y -= root.winfo_rooty()\r\n duplicateHolder.place(x=x,y=y, anchor=CENTER)\r\n \r\ndef pegDropped(colour, event):\r\n # find the widget under the cursor\r\n duplicateHolder.place_forget()\r\n x,y = event.widget.winfo_pointerxy()\r\n target = event.widget.winfo_containing(x,y)\r\n try:\r\n # fixed row input\r\n x, y = int(target.winfo_x()/60), currRow\r\n except: pass\r\n\r\n # not correct widget, so change last one in row\r\n if target.winfo_width() != 64:\r\n y = currRow\r\n x = -2\r\n # get next empty spot\r\n for i, obj in enumerate(rowList[y]):\r\n if type(obj) == list:\r\n break\r\n elif not obj.colour:\r\n x = i\r\n break\r\n\r\n # update row colours\r\n rowList[y][x].changeColour(colour)\r\n currCode = []\r\n for obj in rowList[y]:\r\n if type(obj) == list or not obj.colour:\r\n break\r\n else:\r\n currCode.append(obj.colour)\r\n\r\n # check whether they're able to validate their code\r\n if len(currCode) == 4:\r\n checkButton.place(relx=0.02, rely=0.83)\r\n checkButton[\"command\"] = lambda: validateCode(currCode)\r\n else:\r\n checkButton.place_forget()\r\n \r\ndef validateCode(currCode):\r\n global currRow\r\n checkButton.place_forget()\r\n for obj in rowList[currRow]:\r\n if type(obj) != list:\r\n obj.label.unbind(\"<1>\")\r\n currRow += 1\r\n if currCode == correctCode:\r\n # won\r\n gameOver(True)\r\n elif currRow >= rows:\r\n # lost\r\n gameOver(False)\r\n else:\r\n # still playing\r\n ''' feedback format\r\n \"R\" = Correct colour, wrong place (White)\r\n \"P\" = Correct Colour, Right Place (Black)\r\n otherwise Wrong colour, wrong place (no image change)\r\n '''\r\n feedback = []\r\n remCorrectCode = [] # remaining correct code\r\n appeared = {}\r\n for index, colour in enumerate(currCode):\r\n # Check whether both correct\r\n if colour == correctCode[index]:\r\n feedback.append(\"P\")\r\n else: # else, collect incorrectly placed colours\r\n try:\r\n appeared[colour] += 1\r\n except:\r\n appeared[colour] = 1\r\n remCorrectCode.append(correctCode[index])\r\n \r\n # check incorrectly placed colours for correct colours\r\n for colour in appeared:\r\n if appeared[colour] > remCorrectCode.count(colour): # too many appearances\r\n appeared[colour] = remCorrectCode.count(colour)\r\n # if >0 appearances of that colour, then append it to feedback\r\n for i in range(appeared[colour]):\r\n feedback.append(\"R\")\r\n\r\n # visually show feedback\r\n container = rowList[currRow-1][-1]\r\n # variable used in favour of enumerate() index, to avoid having gaps in the feedback\r\n for n, v in enumerate(feedback):\r\n if v == \"P\":\r\n # change Black\r\n container[n][\"image\"] = images[\"MiniHole_Black\"]\r\n else:\r\n container[n][\"image\"] = images[\"MiniHole_White\"] \r\n \r\ndef resetRow():\r\n for obj in rowList[currRow]:\r\n if type(obj) != list:\r\n obj.removeColour()\r\n\r\ndef newGame():\r\n global correctCode, currRow\r\n # Re-adjust window\r\n root.geometry(\"500x730\")\r\n board.place(relx = .98, rely = 0.5, anchor=\"e\")\r\n pegFrame.place(relx=0.02, rely=0.02)\r\n resetButton.place(relx=0.02, rely=0.73)\r\n newGameButton.place_forget()\r\n solutionFrame.place_forget()\r\n gameOverLabel.place_forget()\r\n leaderboardButton.place_forget()\r\n # Reset variables\r\n correctCode = choices(possibleColours, k=4)\r\n currRow = 0\r\n # Update solution for game over\r\n for i, obj in enumerate(solutionList):\r\n obj.changeColour(correctCode[j])\r\n\r\n # Reset board\r\n for row in rowList:\r\n for obj in row:\r\n if type(obj) != list:\r\n obj.removeColour()\r\n else: # list\r\n for hole in obj:\r\n hole[\"image\"] = images[\"MiniHole\"]\r\n\r\ndef hideLeaderboard():\r\n hideLeaderboardButton.place_forget()\r\n leaderboardLabel.place_forget()\r\n newGameButton.place(relx=0.1, rely=0.8)\r\n gameOverLabel.place(relx=0.5, rely=0.4, anchor = CENTER)\r\n leaderboardButton.place(relx=0.9, rely=0.9, anchor=\"se\")\r\n solutionFrame.place(relx=0.5, rely=0.7, anchor=CENTER)\r\n \r\ndef showLeaderboard(text):\r\n leaderboardButton.place_forget()\r\n newGameButton.place_forget()\r\n gameOverLabel.place_forget()\r\n solutionFrame.place_forget()\r\n hideLeaderboardButton.place(relx=0.1, rely=0.8)\r\n leaderboardLabel.place(relx=0.5, rely=0.4, anchor = CENTER)\r\n leaderboardLabel[\"text\"] = text\r\n \r\ndef gameOver(won):\r\n data = {}\r\n if won: # they won!\r\n if currRow == 1:\r\n gameOverLabel[\"text\"] = winString % (playerName, 1, \"try\")\r\n else:\r\n gameOverLabel[\"text\"] = winString % (playerName, currRow, \"tries\")\r\n with open(leaderboardFile, \"a\") as aFile:\r\n aFile.write(f\"{playerName}; {currRow}\\n\")\r\n else: # they lost! :(\r\n gameOverLabel[\"text\"] = loseString % playerName\r\n \r\n board.place_forget()\r\n pegFrame.place_forget()\r\n resetButton.place_forget()\r\n root.geometry(\"500x400\")\r\n newGameButton.place(relx=0.1, rely=0.8)\r\n gameOverLabel.place(relx=0.5, rely=0.4, anchor = CENTER)\r\n leaderboardButton.place(relx=0.9, rely=0.9, anchor=\"se\")\r\n solutionFrame.place(relx=0.5, rely=0.7, anchor=CENTER)\r\n '''\r\n Question asks for the 3 players with the least tries\r\n Not the 3 lowest tries and who got them\r\n So only record the least tries for each player\r\n '''\r\n with open(leaderboardFile)as aFile:\r\n for line in aFile:\r\n fields = line.rstrip(\"\\n\").split(\"; \")\r\n name = fields[0]\r\n try:\r\n if data[name] > fields[-1]: # old value took more tries\r\n data[name] = fields[-1]\r\n except: # no prior data\r\n data[name] = fields[-1]\r\n\r\n # turn dictionary into list of tuples\r\n data = list(data.items())\r\n leaderboardString = \"Leaderboard:\\n\"\r\n # get 3 lowest, or as many as possible (if <3)\r\n # if 3>data then use data, else use 3\r\n for i in range((3>len(data) and len(data)) or 3):\r\n # get tuple with lowest tries\r\n tup = min(data, key=itemgetter(1))\r\n tries = \"tries\"\r\n if tup[0] == 1:\r\n tries = \"try\" \r\n leaderboardString += f\"{tup[0]}: {tup[1]} {tries}\\n\"\r\n data.remove(tup)\r\n # when click show leaderboard, show leaderboard (obviously)\r\n leaderboardButton[\"command\"] = lambda: showLeaderboard(leaderboardString)\r\n \r\n# Initalise entry for entering playerName\r\nplayerNameLabel = Label(root, height=3, font=fontInfo, bg=rootCol, text=\"Enter your name: \")\r\nplayerNameInput = Text(root, font=(fontType, 50), width=10, height=1, wrap=None)\r\nstartButton = Button(root, text=\"START\", font=fontInfo, command = lambda: playerName.set(playerNameInput.get(\"1.0\", \"end-1c\")))\r\n# Initialise rule-related widgets\r\nrulesButton = Button(root, text=\"Rules\", font=fontInfo, command = lambda: hidePNEntries(True))\r\nbackButton = Button(root, text=\"Back\", font=fontInfo, command = placePNEntries)\r\nrulesLabel = Label(root, font=(fontType, 13), bg=rootCol, text=rulesString, wraplength=400, justify=LEFT)\r\n# Show playerName-related widgets\r\nplacePNEntries()\r\n# Wait for button press\r\nstartButton.wait_variable(playerName)\r\n# Hide all previously created widgets.\r\nhidePNEntries(False)\r\n\r\n# Validate playerName input\r\nif playerName.get().isalpha() and len(playerName.get()) >= 3:\r\n playerName = playerName.get()\r\nelse:\r\n playerName = \"Player\"\r\n\r\n# Create board\r\nroot.geometry(\"500x730\")\r\nboard = Canvas(root, width= 300, height=720, bg=bgHex)\r\nboard.place(relx = .98, rely = 0.5, anchor=\"e\")\r\n \r\n'''\r\nrowList layout:\r\n\r\nrowList = [\r\n row [pegHole, pegHole, pegHole, pegHole, feedBackPegs [pegHole, pegHole, pegHole, pegHole]],\r\n row [pegHole, pegHole, pegHole, pegHole, feedBackPegs [pegHole, pegHole, pegHole, pegHole]],\r\n row ...\r\n\r\n hence:\r\n rowList[-1] = last row\r\n rowList[0-12] = rows\r\n \r\n rowList[-1][0-4] = peg holes\r\n rowList[-1][-1] = feedback peg container\r\n rowList[-1][-1][0-4] = feedback peg holes\r\n\r\n in the following code, \"-1\"s can usually be replaced by the for loop variable names (e.g. i, j etc)\r\n However, -1 is used to make it easier to understand.\r\n]\r\n'''\r\n\r\n# Create invisible solution frame (for game over)\r\nsolutionFrame = Frame(root ,width=300, height=720/rows, bg=rootCol)\r\nfor j in range(4):\r\n solutionList.append(PegHole(j, correctCode[j], solutionFrame))\r\n \r\n# Create canvas board\r\nfor i in range(rows):\r\n # Create row container\r\n frame = Frame(board, width = 300, height=720/rows, bg=bgHex)\r\n frame.place(relx = 0.5, rely=i/rows, anchor=\"n\")\r\n rowList.append([])\r\n # create main row objects\r\n for j in range(4):\r\n rowList[-1].append(PegHole(j, None, frame))\r\n # create feedback pin container\r\n pegContainer = Frame(frame, width= 300/5, height=780/rows, bg=bgHex)\r\n pegContainer.place(relx=4/5)\r\n rowList[-1].append([])\r\n # create feedback pin holes\r\n for j in range(2):\r\n for k in range(2):\r\n rowList[-1][-1].append(Label(pegContainer, bg=bgHex, image=images[\"MiniHole\"]))\r\n rowList[-1][-1][-1].place(relx=k/2,rely=j/2)\r\n \r\n# Create pegs\r\npegFrame = Frame(root, width=69, height=(len(possibleColours)+1)*74, bg=rootCol) # pegs = 68x74 pixels\r\npegFrame.place(relx=0.02, rely=0.02)\r\n\r\nfor i, v in enumerate(possibleColours):\r\n pegImage = images[f\"{v}_Peg\"]\r\n pegList.append(Label(pegFrame, image=pegImage, bg=rootCol))\r\n pegList[-1].place(rely=i/6)\r\n pegList[-1].bind(\"\", partial(pegDropped, v))\r\n pegList[-1].bind(\"\", partial(pegMoving, pegImage))\r\n\r\n# Make buttons\r\ncheckButton = Button(root, text=\"CHECK\", font=fontInfo)\r\nresetButton = Button(root, text=\"RESET\", font=fontInfo, command=resetRow)\r\nresetButton.place(relx=0.02, rely=0.73)\r\nnewGameButton = Button(root, text=\"NEW GAME\", font=fontInfo, command=newGame)\r\nleaderboardButton = Button(root, text=\"SHOW LEADERBOARD\", font=(fontType,12))\r\nhideLeaderboardButton = Button(root, text=\"BACK\", font=fontInfo, command=hideLeaderboard)\r\n# Make labels\r\ngameOverLabel = Label(root, font=fontInfo, bg=rootCol, text=rulesString, wraplength=400, justify=LEFT)\r\nleaderboardLabel = Label(root, font=fontInfo, bg=rootCol, text=rulesString, wraplength=400, justify=LEFT)\r\nduplicateHolder = Label(root, bg=rootCol)\r\n","sub_path":"Mastermind.py","file_name":"Mastermind.py","file_ext":"py","file_size_in_byte":15296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"148402293","text":"# import os,sys\n# PROGRAM_DIR = os.path.dirname(os.path.abspath(sys.argv[0]))\n\n# sys.path.append(PROGRAM_DIR+ \"\\\\windows\\\\ffmpeg.exe\")\n# print(sys.path)\nfrom pyAudioAnalysis import audioBasicIO\nfrom pyAudioAnalysis import ShortTermFeatures\nimport matplotlib.pyplot as plt\nfrom numpy import fft\nimport numpy as np\nimport pyspark\nimport time\nimport matplotlib.pyplot as plt\nimport math\nimport numpy as np\nfrom scipy.spatial.distance import euclidean\n\nfrom fastdtw import fastdtw\nDEBUG = True\n\n\ndef cov_fft(image_link): # name\n [Fs, x] = audioBasicIO.read_audio_file(image_link)\n x = audioBasicIO.stereo_to_mono(x)\n F, f_names = ShortTermFeatures.feature_extraction(\n x, Fs, 0.050*Fs, 0.025*Fs)\n# for k in F:\n# norm = np.linalg.norm(k)\n# k = k/norm\n if DEBUG and False:\n for k in range(33):\n plt.subplot(2, 1, 1)\n plt.plot(F[k, :], label=str(k))\n plt.xlabel('Frame no')\n plt.ylabel(f_names[k])\n\n plt.show()\n F = F / np.linalg.norm(F, axis = 1, keepdims = True)\n\n return F, fft.fftn(F)\n\n\ndef cal_ifft(arr, ffty, maxcount=5):\n resarr = []\n for item in arr:\n ifftres = np.array([np.real(fft.irfft(p))\n for p in item * np.conj(ffty)])\n ifftres = ifftres / np.linalg.norm(ifftres, axis = 1, keepdims = True)\n # from scipy.signal import savgol_filter\n # ifftres = savgol_filter(ifftres, 51, 2)\n\n # box = np.ones(20)/20\n # ifftres = np.array([np.convolve(item, box, mode='full') for item in ifftres])\n if DEBUG:\n # for item in ifftres.copy():\n # plt.plot(item)\n # plt.show()\n plt.plot(ifftres.T)\n plt.show()\n zscore = []\n maxidx = np.argmax(ifftres, axis=1)\n for idx, row in zip(maxidx, ifftres):\n zscore.append([idx, (idx - row.mean(axis=0)) / row.std(axis=0)])\n\n print(sorted(zscore, key=lambda p: p[0]), ifftres.shape)\n cost = ifftres/np.max(ifftres)\n resarr.append(cost)\n return resarr[:maxcount]\n\ndef cov_dtw(image_link): # name\n [Fs, x] = audioBasicIO.read_audio_file(image_link)\n x = audioBasicIO.stereo_to_mono(x)\n F, f_names = ShortTermFeatures.feature_extraction(\n x, Fs, 0.050*Fs, 0.025*Fs)\n\n if DEBUG and False:\n for k in range(33):\n plt.subplot(2, 1, 1)\n plt.plot(F[k, :], label=str(k))\n plt.xlabel('Frame no')\n plt.ylabel(f_names[k])\n\n plt.show()\n F = F / np.linalg.norm(F, axis=1, keepdims=True)\n\n return F\n\n\nif '__main__' == __name__:\n start_time = time.time()\n F, fff = cov_fft('back.mp3')\n #F = np.random.randn(100, 1)\n print(F.shape, fff.shape)\n plt.plot(F.T)\n plt.show()\n\n #V, ffv = cov_fft('iyah.mp3')\n # print(V.shape, V.shape)\n\n #V = V[:,720:1000].copy()\n V = F[:, 999:2000].copy()\n if F.shape[1] > V.shape[1]:\n V = np.concatenate(\n (V,np.zeros((F.shape[0], F.shape[1]-V.shape[1]))), axis=1) #np.random.rand(F.shape[0], F.shape[1]-V.shape[1])), axis=1)\n fff = fft.fftn(F)\n vvv =fft.fftn(V)\n else:\n F = np.concatenate(\n (F, np.zeros((F.shape[0], F.shape[1]-V.shape[1]))), axis=1)\n fff = fft.fftn(F)\n vvv =fft.fftn(V)\n plt.plot(V.T)\n plt.show()\n print(\"---{}s seconds---\".format(time.time()-start_time))\n cal_ifft([fff], vvv)\n print(\"---{}s seconds---\".format(time.time()-start_time))\n\n distance, path = fastdtw(F.T, V.T, dist=euclidean)\n print(distance)\n plt.plot(path)\n plt.show()","sub_path":"featureextraction.py","file_name":"featureextraction.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"11048113","text":"\"\"\" email sending \n\"\"\"\n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\nimport smtplib\n\n\ndef email(FROM,\n TO,\n subject=\"\",\n text=\"\",\n html=\"\",\n SMTP='127.0.0.1',\n LOGIN=[],\n sender=\"\",\n replyto=\"\",\n attachments={}):\n \"\"\"send a multipart plain text (or html) message, using given SMTP\n - Optional LOGIN (ie SMTP validation) must give (,)\n - allows for a list of recipients in TO: each gets a separate email, ie bcc\n\n - attachment expects a dictionary of {filename:content}\n \"\"\"\n if not (FROM and TO and SMTP):\n # print \"EMAIL DISABLED >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\"\n return # email is disabled or invalid, so do nothing\n # set up our message\n root = MIMEMultipart('related')\n root['Subject'] = subject\n if sender:\n root['From'] = '\"%s\" <%s>' % (sender, FROM)\n else:\n root['From'] = FROM\n if replyto:\n root['Reply-To'] = replyto\n if isinstance(TO, str):\n TO = [TO]\n root.preamble = 'This is a multi-part message in MIME format.'\n # add our alternative versions\n alt = MIMEMultipart('alternative')\n root.attach(alt)\n if html:\n alt.attach(MIMEText(html, 'html'))\n else:\n alt.attach(MIMEText(text))\n\n # include attachments\n for filename, content in list(attachments.items()):\n part = MIMEBase('application', 'octet-stream')\n part.set_payload(content)\n encoders.encode_base64(part)\n part.add_header('Content-Disposition',\n 'attachment; filename=%s' % filename)\n root.attach(part)\n\n # send our message(s)\n try:\n smtp = smtplib.SMTP()\n smtp.connect(SMTP)\n if LOGIN:\n smtp.login(*LOGIN)\n for t in TO:\n try:\n root['To'] = t\n smtp.sendmail(FROM, t, root.as_string())\n # print \"SENT: FROM=\",FROM,' TO=',t,' ROOT=', root.as_string()\n del root[\n 'To'] # need to del this, as the message class __setitem__ appends rather than replaces\n except:\n print(\"SENDMAIL REFUSAL: FROM=\", FROM, ' TO=', t, ' ROOT=',\n root.as_string())\n smtp.quit()\n except:\n print(\"SMTP CONNECT ERROR: FROM=\", FROM, ' TO=', TO, ' ROOT=',\n root.as_string())\n","sub_path":"evoke/lib/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"462850239","text":"import json\nimport os\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nimport data_loader\nfrom model import resnet\nfrom util import util\n\n# For updating learning rate\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\n# get params from config file\nconfig = util.read_config()\nconfig_train = config['train_param']\n\ntotal_epochs = config_train['total_epochs']\nlearning_rate = config_train['learning_rate']\nbatch_size = config_train['batch_size']\n\n# set model\nresnet = resnet.resnet50(num_classes=6)\nresnet.cuda()\n\n# set loss function and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(resnet.parameters(), lr=learning_rate)\n\nlr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=10)\n\n# 학습과 테스트에 필요한 데이터 셋 객체를 만든다.\n# 그리고 데이터 셋 객체를 활용해 데이터 로더 객체를 만든다.\ntrain_data = data_loader.CustomDataset(is_train=True)\ntest_data = data_loader.CustomDataset(is_train=False)\n\ntrain_loader = torch.utils.data.DataLoader(dataset=train_data,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True)\n\ntest_loader = torch.utils.data.DataLoader(dataset=test_data,\n batch_size=batch_size,\n shuffle=False)\n\ncurr_lr = learning_rate\ntotal_iter = int(train_data.__len__() / batch_size)\n\nfor epoch in range(total_epochs):\n resnet.train()\n for i, sample in enumerate(train_loader):\n images = Variable(sample['image'].cuda())\n labels = Variable(sample['label'].cuda())\n labels = labels.squeeze()\n\n optimizer.zero_grad()\n outputs = resnet(images)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n if (i + 1) % 20 == 0:\n print(\"Epoch [%d/%d], Iter [%d/%d] Loss: %.4f\" % (epoch + 1, total_epochs,\n (i + 1), total_iter, loss.item()))\n\n # Decay learning rate\n if (epoch + 1) % 20 == 0:\n curr_lr /= 3\n update_lr(optimizer, curr_lr)\n\n if (epoch + 1) % 5 != 0:\n continue\n\n # Test\n correct = 0\n total = 0\n resnet.eval()\n for i, sample in enumerate(test_loader):\n images = Variable(sample['image'].cuda())\n labels = sample['label']\n sq_label = labels.squeeze()\n\n outputs = resnet(images)\n _, predicted = torch.max(outputs.data, 1)\n total += sq_label.size(0)\n correct += (predicted.cpu() == sq_label).sum()\n print('Accuracy of the model on the sample_dir images: %d%%\\n' % (100 * correct / total))\n\n if (epoch + 1) % 10 == 0:\n checkpoint_dir = os.path.abspath('./checkpoint/')\n model_name = '%s_%d.pth' % ('scratch_v2', (epoch + 1))\n save_path = os.path.join(checkpoint_dir, model_name)\n torch.save(resnet.state_dict(), save_path)\n","sub_path":"face_classifier/train_scratch.py","file_name":"train_scratch.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"94962188","text":"import random\r\n\r\ndef rockPaperScissors():\r\n programSelect = (random.choice([\"rock\", \"paper\", \"scissors\"]))\r\n playerChoice = input(\"Rock, paper, scissors? \").lower()\r\n\r\n if programSelect == playerChoice:\r\n restart = input(\"Draw, play again? [y or n]\").lower()\r\n playAgain(restart)\r\n elif (programSelect == \"rock\" and playerChoice == \"scissors\") or (programSelect == \"scissors\" and playerChoice == \"paper\") or (programSelect == \"paper\" and playerChoice == \"rock\"):\r\n restart = input(\"You lose! Play again? [y or n]\").lower()\r\n playAgain(restart)\r\n elif (playerChoice == \"rock\" and programSelect == \"scissors\") or (playerChoice == \"scissors\" and programSelect == \"paper\") or (playerChoice == \"paper\" and programSelect == \"rock\"):\r\n restart = input(\"You win! Play again? [y or n]\").lower()\r\n playAgain(restart)\r\n else:\r\n restart = input(\"Computer *facepalms: Play again? [y or n]\").lower()\r\n playAgain(restart)\r\n\r\ndef playAgain(restart):\r\n if restart != \"y\" and restart != \"n\":\r\n print(\"Oy-vey. Let's just play again (--_)\")\r\n elif restart == \"n\":\r\n exit()\r\n\r\n#main\r\ngameRunning = True\r\nwhile gameRunning:\r\n rockPaperScissors()\r\n","sub_path":"RockPaperScissors.py","file_name":"RockPaperScissors.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"118692222","text":"class Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n nums1 = sorted(nums)\n i, j = 0, len(nums1)-1 \n while(i target):\n j -= 1\n elif(temp < target):\n i += 1\n elif(temp==target):\n # print(\"index1=%d, index2=%d\"%(i+1,j+1))\n a = nums.index(nums1[i])\n if(nums1[i]==nums1[j]):\n b = nums.index(nums1[j], a+1)\n else:\n b = nums.index(nums1[j])\n return [min(a,b)+1,max(a,b)+1]\n \n\ns = Solution()\nnums=[0,3,2,4,0]\nprint(s.twoSum(nums,5)) ","sub_path":"ok_p_1.py","file_name":"ok_p_1.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"653506697","text":"from django.shortcuts import render\n\n# Create your views here.\nimport logging\n\n\nfrom base import resp\nfrom base.common.decorators import check_params_not_null\nfrom base.common.param_utils import get_id_list\nfrom base.views import BaseAPIView\nfrom nmis.hospitals.permissions import HospitalStaffPermission\nfrom nmis.notices.models import Notice, UserNotice\nfrom nmis.notices.serializers import UserNoticeSerializer\nfrom utils import times\n\nlogger = logging.getLogger(__name__)\n\n\nclass NoticeListView(BaseAPIView):\n permission_classes = (HospitalStaffPermission,)\n\n def get(self, req):\n \"\"\"\n 获取消息列表(筛选调教:已读/未读)\n is_read: false: 未读, true: 已读 None: 全部\n \"\"\"\n self.check_object_any_permissions(req, None)\n staff = req.user.get_profile()\n is_read = req.GET.get('is_read', '').strip()\n if is_read:\n if is_read not in ('False', 'True'):\n return resp.failed('is_read参数异常')\n query_set = UserNoticeSerializer.setup_eager_loading(\n UserNotice.objects.filter(staff=staff, is_read=is_read, is_delete=False).order_by('-created_time')\n )\n else:\n query_set = UserNoticeSerializer.setup_eager_loading(\n UserNotice.objects.filter(staff=staff, is_delete=False).order_by('-created_time')\n )\n return self.get_pages(query_set, results_name='notices')\n\n\nclass NoticeReadOrDeleteView(BaseAPIView):\n\n permission_classes = (HospitalStaffPermission, )\n\n @check_params_not_null(['notice_ids', 'op_type'])\n def put(self, req):\n \"\"\"\n 读取消息/删除消息(标记单个/多条消息为删除状态,标记单个/多条消息为已读状态)\n type: 操作类型(删除操作:DE、读取操作:RE)必传字段\n notice_ids: 消息ids集合字符串,如:\"1,2,3\" 必传字段\n \"\"\"\n staff = req.user.get_profile()\n self.check_object_permissions(req, staff)\n\n notice_ids = get_id_list(req.data.get('notice_ids', '').strip())\n\n operation_type = req.data.get('op_type', '').strip()\n if operation_type not in ('RE', 'DE'):\n return resp.failed('不合法的操作类型数据')\n\n user_notices = UserNotice.objects.filter(notice_id__in=notice_ids, staff=staff)\n if not len(notice_ids) == len(user_notices):\n return resp.failed('检查是否存在不匹配的消息')\n try:\n if operation_type == 'RE':\n user_notices = user_notices.filter(is_read=False)\n if not user_notices:\n return resp.failed('当前页不存在未读消息')\n user_notices.update(is_read=True, read_time=times.now())\n else:\n # 选中的消息存在未读的情况下未考虑,直接标记成删除状态,如需考虑,后续改进\n user_notices.update(is_delete=True, delete_time=times.now())\n return resp.ok('操作成功')\n except Exception as e:\n logger.exception(e)\n return resp.failed('操作失败')\n\n\nclass NoticeReadOrDeleteAllView(BaseAPIView):\n\n permission_classes = (HospitalStaffPermission, )\n\n @check_params_not_null(['op_type'])\n def put(self, req):\n \"\"\"\n op_type: 操作类型(全部标为已读(RE): 所有未读消息标记为已读, 删除全部已读(DE): 所以已读消息标记为删除)\n 根据操作类型把消息标记为相对应的状态\n \"\"\"\n staff = req.user.get_profile()\n self.check_object_permissions(req, staff)\n\n operation_type = req.data.get('op_type', '').strip()\n if operation_type not in ('ARE', 'ADE'):\n return resp.failed('不合法的操作类型数据')\n\n try:\n if operation_type == 'ARE':\n query_set = UserNotice.objects.filter(staff=staff, is_read=False, is_delete=False)\n if not query_set:\n return resp.failed('不存在未读消息')\n query_set.update(is_read=True, read_time=times.now())\n else:\n query_set = UserNotice.objects.filter(staff=staff, is_read=True, is_delete=False)\n if not query_set:\n return resp.failed('当前用户不存在可删除的消息')\n query_set.update(is_delete=True, delete_time=times.now())\n return resp.ok('操作成功')\n except Exception as e:\n logger.info(e)\n return resp.failed('操作失败')\n\n","sub_path":"apps/nmis/notices/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"168608951","text":"def multi_dict(dicts):\n\tval_final = 1\n\tfor key in dicts:\n\t\tval_final = val_final * dicts[key] \n\treturn val_final\n\n\nprint(multi_dict({1:2,2:3,3:55,4:75,5:100}))\t\n\n#https://www.programiz.com/python-programming/methods/dictionary/values","sub_path":"tech_tests/dict drills/py_dict_11.py","file_name":"py_dict_11.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"487887824","text":"#begin\nimport sys\nfrom OriginalScanner import Token\nfrom OriginalScanner import Escaner\nfrom OriginalScanner import Posicion\n\nclass Parser( object ):\n#!constantes\n T= True\n x= False\n distancia_minima_de_error = 2\n\n#!declarations\n def __init__( self ):\n self.escaner= None\n self.token= None \n self.lookahead_token= None \n self.escaner_generado = False\n self.string_del_token = '' \n self.tokens_literales= '-none-' \n self.error_de_distancia= Parser.distancia_minima_de_error\n\n def obtener_posicion_del_parser( self ):\n return self.lookahead_token.tipo_token, self.lookahead_token.columna_token\n\n def sincronizar_errores( self, error_numeral ):\n if self.error_de_distancia >= Parser.distancia_minima_de_error:\n print(\"errores de sync\")\n\n self.error_de_distancia = 0\n\n def error_semantico( self, mensaje ):\n if self.error_de_distancia >= Parser.distancia_minima_de_error:\n print(\"Errores semanticos\")\n\n self.error_de_distancia = 0\n\n def mensaje_de_aviso( self, mensaje ):\n if self.error_de_distancia >= Parser.distancia_minima_de_error:\n print(\"advertir errores\")\n\n self.error_de_distancia = 0\n\n def logro_entrar_el_mensaje( self ):\n print(\"contador de errores\")\n\n def string_lexico( self ):\n return self.token.token_valor\n\n def string_look_ahead( self ):\n return self.lookahead_token.token_valor\n\n def Get( self ):\n while True:\n self.token = self.lookahead_token\n self.lookahead_token = self.escaner.Escanear( )\n if self.lookahead_token.tipo_token <= Parser.maxT:\n self.error_de_distancia += 1\n break\n#!pragmas\n self.lookahead_token = self.token\n\n def Expect( self, i ):\n if self.lookahead_token.tipo_token == i:\n self.Get( )\n else:\n self.sincronizar_errores(i)\n\n def Marcar_inicio( self,i):\n return self.set[i][self.lookahead_token.tipo_token]\n\n def Esperar_Bajo( self, n, follow ):\n if self.lookahead_token.tipo_token == n:\n self.Get( )\n else:\n self.sincronizar_errores( n )\n while not self.Marcar_inicio(follow):\n self.Get( )\n\n def separador_bajo( self, n, syFollow, repFollow ):\n a = [ False for i in xrange( Parser.maxT+1 ) ]\n if self.lookahead_token.tipo_token == n:\n self.Get( )\n return True\n elif self.Marcar_inicio(repFollow):\n return False\n else:\n for i in xrange( Parser.maxT ):\n a[i] = self.set[syFollow][i] or self.set[repFollow][i] or self.set[0][i]\n self.sincronizar_errores( n )\n while not a[self.lookahead_token.tipo_token]:\n self.Get( )\n return self.Marcar_inicio( syFollow )\n\n#!productions\n\n def Parsear( self, escaner ):\n self.escaner = escaner\n self.lookahead_token = Token( )\n self.lookahead_token.token_valor = u''\n self.Get( )\n \n#!parseRoot\n\n set = [\n#!initialization\n ]\n\n mensaje_de_error = {\n#!errors\n }\n archivo_seleccionado = open(\"grammar_values.txt\", \"r+\")\n archivo_seleccionado = archivo_seleccionado.read()\n x = archivo_seleccionado.split(\",\")\n #print(\"Arreglo\", x)\n reglas = open(\"reglas.txt\", \"r+\")\n reglas = reglas.read()\n follows = open(\"follows.txt\", \"r+\")\n follows = follows.read()\n firsts = open(\"firsts.txt\", \"r+\")\n firsts = firsts.read()\n n = -1 \n for i in x:\n n = n + 1 \n #get_index = (archivo_seleccionado.index(i))\n print(\"VALUE: \" + str(n) + \" \" + i)\n \n \n print(\"-----------------------------------------\")\n reglas = reglas.replace(\"\\n\", \" \")\n reglas = reglas.replace(\"\\t\", \" \")\n print(\"REGLAS\", reglas)\n print(\"-----------------------------------------\")\n follows = follows.replace(\"\\n\", \" \")\n follows = follows.replace(\"\\t\", \" \")\n print(\"FOLLOW\", follows)\n print(\"-----------------------------------------\")\n firsts = firsts.replace(\"\\n\", \" \")\n firsts = firsts.replace(\"\\t\", \" \")\n print(\"FIRSTS\", firsts)\n \n \n \n\n\n\n","sub_path":"Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"470053056","text":"n=85\nm=(85**2)\ne=n/3\nif n%2==0:\n\tprint('Chetnoe')\nif n%2!=0:\n\tprint('Ne chotn')\nif (m>1000):\n\tprint('Yes')\nif (m<1000):\n\tprint('No')\nif e==True:\n\tprint('Da')\nelse:\n\tprint('No')\n\n\t\n\n","sub_path":"pr 5.py","file_name":"pr 5.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"54388243","text":"from django.test import TestCase\n\nfrom .models import Task\nfrom django.utils import timezone\n\n# Create your tests here.\nclass ToDoModelTests(TestCase):\n def test_text_field(self):\n text_field = \"Task #1\"\n task = Task(task_text=text_field)\n self.assertEqual(task.task_text, text_field)\n \n def test_date_field(self):\n date = timezone.now()\n task = Task(task_text=\"#\", pub_date=date)\n self.assertEqual(task.pub_date, date)\n ","sub_path":"django_project/to_do/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"339039228","text":"import pytest\n\nfrom .base_integration_test import BaseIntegrationTest\nfrom tests.sampleresponse.disbursement import disbursement_response\nfrom tests.sampleresponse.disbursement import disbursement_banks_response\n\n\nclass TestDisbursement(BaseIntegrationTest):\n @pytest.fixture(scope=\"class\")\n def Disbursement(self, xendit_instance):\n return xendit_instance.Disbursement\n\n @pytest.fixture(scope=\"class\")\n def disbursement_data(self, Disbursement):\n disbursement = Disbursement.create(\n external_id=\"demo_1475459775872\",\n bank_code=\"BCA\",\n account_holder_name=\"Bob Jones\",\n account_number=\"1231242311\",\n description=\"Reimbursement for shoes\",\n amount=17000,\n )\n return disbursement\n\n def test_create_disbursement_return_correct_keys(self, disbursement_data):\n disbursement = disbursement_data\n self.assert_returned_object_has_same_key_as_sample_response(\n disbursement, disbursement_response()\n )\n\n def test_get_disbursement_by_id_return_correct_keys(\n self, Disbursement, disbursement_data\n ):\n disbursement = disbursement_data\n\n disbursement = Disbursement.get(id=disbursement.id)\n self.assert_returned_object_has_same_key_as_sample_response(\n disbursement, disbursement_response()\n )\n\n def test_get_disbursement_by_external_id_return_correct_keys(self, Disbursement):\n disbursement = Disbursement.get_by_ext_id(external_id=\"demo_1475459775872\")\n self.assert_returned_object_has_same_key_as_sample_response(\n disbursement[0], disbursement_response()\n )\n\n def test_get_disbursement_banks_return_correct_keys(self, Disbursement):\n disbursement_banks = Disbursement.get_available_banks()\n self.assert_returned_object_has_same_key_as_sample_response(\n disbursement_banks[0], disbursement_banks_response()[0]\n )\n","sub_path":"tests/integration/test_disbursement.py","file_name":"test_disbursement.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"372009584","text":"# This function takes a string of characters and returns a list of two numbers\n# in which the first number is the number of capital letters per line\n# and the second is the number of lowercase letters per line.\n\n\ndef rec(string):\n \"\"\"\n\n :param string: str, wAt’rh7rJjoa\n :return: list, [2, 8]\n\n :param string: int, 57679\n :return: list, [0, 0]\n\n \"\"\"\n if len(string) == 0:\n return [0, 0]\n res = rec(string[1:])\n if 96 < ord(string[0]) < 123:\n return [res[0], res[1] + 1]\n elif 64 < ord(string[0]) < 91:\n return [res[0] + 1, res[1]]\n return res\n\n\nprint(rec(input()))\n","sub_path":"n10.py","file_name":"n10.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"83494880","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 28 10:49:38 2018\n\n@author: Shubham\n\"\"\"\nimport re\nimport operator\n\nclass Solution:\n \n def mostCommonWord(self, paragraph, banned):\n paragraph = re.sub(r'[^\\w\\s]','',paragraph)\n paragraph.lower()\n word_list = paragraph.split()\n freq_dict = {i:word_list.count(i) for i in set(word_list)}\n freq_dict_sorted = sorted(freq_dict.items(), key = operator.itemgetter(1), reverse = True)\n for ele in freq_dict_sorted:\n if ele[0] in banned:\n continue\n ans = ele[0]\n return ans\n \nif __name__ == \"__main__\":\n \n sol = Solution()\n str1 = \"Bob hit a ball, the hit BALL flew far after it was hit.\"\n banned = [\"hit\"]\n ans = sol.mostCommonWord(str1, banned)\n print (ans)","sub_path":"leetcode/Python/819_most_common_word.py","file_name":"819_most_common_word.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"280861519","text":"import sys\nimport os\nimport time\nimport random\nimport pyautogui as pg\npg.PAUSE = 1\n\nfrom pyscreeze import ImageNotFoundException\n\ndef handler(func, *args):\n return func(*args)\n\ndef hoge():\n print(\"hoge\")\n\ndef ckick(locate):\n print(\"click\", locate)\n return pg.click(locate)\n\ndef wait(locate):\n print(\"wait\", locate)\n pg.click(locate)\n time.sleep(61 * 60)\n # time.sleep(5)\n pg.press('esc')\n\ndef sttup(locate):\n print(\"click\", locate)\n pg.click(locate)\n time.sleep(3 * 60)\n\ndef crash(locate):\n print(\"\\ndetect crash\\n\")\n locate = pg.locateCenterOnScreen(\"crash_ok.png\", confidence=0.90)\n click(locate)\n\nd = {\n \"sttup\": {\n \"pic\": \"sttup.png\",\n \"func\": sttup,\n },\n \"creative_button\": {\n \"pic\": \"creative_button.png\",\n \"func\": ckick,\n },\n \"play\": {\n \"pic\": \"play.png\",\n \"func\": ckick,\n },\n \"play2\": {\n \"pic\": \"play2.png\",\n \"func\": ckick,\n },\n \"pickel\": {\n \"pic\": \"pickel.png\",\n \"func\": wait,\n },\n \"leave\": {\n \"pic\": \"leave.png\",\n \"func\": ckick,\n },\n \"leave_from_creative\": {\n \"pic\": \"leave_from_creative.png\",\n \"func\": ckick,\n },\n \"leave_from_creative_red\": {\n \"pic\": \"leave_from_creative_red.png\",\n \"func\": ckick,\n },\n \"kakutoku\": {\n \"pic\": \"kakutoku.png\",\n \"func\": ckick,\n },\n \"kakutoku2\": {\n \"pic\": \"kakutoku2.png\",\n \"func\": ckick,\n },\n \"tojiru\": {\n \"pic\": \"tojiru.png\",\n \"func\": ckick,\n },\n}\n\nwhile True:\n locate = None\n for x in d.keys():\n locate = pg.locateCenterOnScreen(d[x][\"pic\"], confidence=0.90)\n if not locate is None:\n print(\">>> \", x, locate)\n handler(d[x][\"func\"], locate)\n break\n # return x, locate\n if locate is None:\n print(\"targets not found\")\n time.sleep(2)\n\n pg.moveTo(1, 1, duration=random.random()+0.1)\n","sub_path":"f_nite_auto.py","file_name":"f_nite_auto.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"471620965","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport re\nimport sys\nimport requests\n\nisPy2 = sys.version_info < (3, 0)\n\n\nclass ComicSite(object):\n encoding = \"utf-8\"\n header = {\n \"User-Agent\": \"Mozilla/5.0 Gecko/2010 Firefox/5\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Language\": \"en-us,en;q=0.5\",\n \"Accept-Encoding\": \"deflate\",\n }\n root = os.path.expanduser(\"~\")\n os.chdir(root)\n\n def openTrackList(self):\n p = \"%s-tracklist.txt\" % self.path()\n if not os.path.exists(p):\n open(p, 'a').close()\n if sys.platform == 'darwin':\n os.system('open %s' % p)\n elif sys.platform == 'win32':\n os.system(p)\n\n def trackList(self):\n p = \"%s-tracklist.txt\" % self.path()\n if os.path.exists(p):\n return map(str.strip, open(p).readlines())\n else:\n return []\n\n def execJs(self, s):\n import subprocess\n import tempfile\n fd, path = tempfile.mkstemp(suffix='.js')\n f = os.fdopen(fd, 'w')\n if sys.platform == 'win32':\n s = s.replace('print', 'WScript.Echo')\n f.write(s)\n f.close()\n if sys.platform == 'darwin':\n jsc_cmd = (\"/System/Library/Frameworks/JavaScriptCore.framework/\"\n \"Versions/Current/Resources/jsc \" + path)\n ret = subprocess.check_output(jsc_cmd, shell=True).decode('utf-8')\n elif sys.platform == 'win32':\n ret = ''.join(os.popen(\"cscript %s\" % path).readlines()[3:]).strip()\n os.remove(path)\n return ret\n\n def path(self):\n return self.__class__.__name__\n\n def urlopen(self, url, opts=None):\n for i in range(10):\n try:\n headers = self.header.copy()\n if opts:\n headers.update(opts)\n r = requests.get(url, headers=headers, timeout=5)\n r.encoding = self.encoding\n return r.text\n except:\n import traceback\n print('??', i, url)\n print(traceback.format_exc())\n import time\n time.sleep(.5)\n pass\n\n def untag(self, s):\n return re.sub(\"<.*?>\", \"\", s)\n\n def chdir(self, p):\n if isPy2 and not type(p) == unicode:\n p = p.decode('utf-8')\n try:\n os.mkdir(p)\n os.chdir(p)\n return True\n except:\n os.chdir(p)\n return False\n\n def getPic(self, url, opts={}):\n if os.path.exists('/usr/local/bin/wget'):\n for x in range(3):\n if os.system(\n '/usr/local/bin/wget %s -c \"%s\"' % (\n ' '.join(map(lambda i: \"%s %s\" % i, opts.items())), url)) == 0:\n break\n else:\n headers = None\n if '--referer' in opts:\n headers = {'Referer': opts['--referer']}\n open(url.rsplit('/', 1)[-1].split('?', 1)[0], 'wb').write(self.urlopen(url, headers))\n\n def comicPath(self, i):\n return\n\n def toUrl(self, url):\n return\n\n def getCid(self, url):\n return\n\n def getTitle(self, page):\n return\n\n def getVolumnsUrl(self, url, page, skip=0):\n return\n\n def getVolumn(self, url, force=True):\n return\n\n def getAll(self, url, skip=0, force=True):\n self.chdir(self.path())\n url = self.toUrl(url)\n page = self.urlopen(url)\n volumns = self.getVolumnsUrl(url, page, skip)\n cid = self.getCid(url)\n title = self.getTitle(page)\n self.chdir(title)\n\n ch = []\n for urlparam in volumns:\n c = self.getVolumn(urlparam, force)\n if c:\n ch.append(c)\n\n os.chdir('../..')\n\n for c in ch:\n yield (title, cid, c)\n\n def notify(self, updateList):\n import pkgutil\n if not pkgutil.find_loader('pync'):\n return\n try:\n import pync\n t = ' '.join(['%s-%s' % (n[0].split()[0], n[2]) for n in updateList])\n cmd = 'open %s -a /Applications/Simple\\ Comic.app/' % ' '.join(\n map(self.comicPath, updateList)\n )\n if isPy2:\n t = t.encode('utf8')\n cmd = cmd.encode('utf8')\n pync.Notifier.notify(t, title=self.__class__.__name__, sound='ping', execute=cmd)\n except:\n import traceback\n traceback.print_exc()\n\n def getUpdate(self):\n updateList = [comic for i in self.trackList() for comic in self.getAll(i, -2, False)]\n if updateList:\n self.notify(updateList)\n\n\ndef main():\n import comic8\n import dm5\n comic8.comic8().getUpdate()\n dm5.dm5().getUpdate()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/pycomics/ComicSite.py","file_name":"ComicSite.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"575451424","text":"import requests\nfrom bs4 import BeautifulSoup\nimport json\n\n\nBASE_URL = \"https://www.ihbristol.com\"\nEXPRESSIONS_URL = \"https://www.ihbristol.com/useful-english-expressions\"\n\n\ndef buildData():\n blocks = []\n r = requests.get(EXPRESSIONS_URL)\n if r.status_code == 404 or r.text is None:\n print(\"\")\n raise Exception(f\"No content at {EXPRESSIONS_URL}\")\n\n html = BeautifulSoup(r.text, \"html.parser\")\n block_views = html.find_all(\"section\", \"block-views\")\n for block_view in block_views:\n level = block_view.find(\"h2\").text\n rows = block_view.find_all(\"h3\")\n for row in rows:\n header = row.find(\"a\").text\n existing_block_i = [\n i for i, b in enumerate(blocks) if b[\"header\"] == header\n ]\n\n if len(existing_block_i) > 0:\n index = existing_block_i[0]\n blocks[index][\"levels\"] = blocks[index][\"levels\"] + [level]\n continue\n\n href = f\"\"\"{BASE_URL}{row.find(\"a\")[\"href\"]}\"\"\"\n sub_page = requests.get(href)\n if sub_page.status_code == 404 or sub_page.text is None:\n print(\"\")\n raise Exception(f\"No content at sub page: {href}\")\n\n sub_html = BeautifulSoup(sub_page.text, \"html.parser\")\n sumary = sub_html.select_one(\"div[property='content:encoded'] > p\").text\n\n print(\"\")\n print(sumary)\n\n expressions = []\n expression_blocks = sub_html.select(\n \".node-useful-expressions > div:nth-child(2) > div:nth-child(1) li\"\n )\n for idx, expression_block in enumerate(expression_blocks):\n expressions.append(f\"\"\"{idx + 1}. {expression_block.text}\"\"\")\n\n howtouses = []\n howtouse_blocks = sub_html.select(\n \".node-useful-expressions > div:nth-child(2) > div:nth-child(2) li\"\n )\n for howtouse_block in howtouse_blocks:\n howtouses.append(howtouse_block.text)\n\n blocks.append(\n {\n \"levels\": [level],\n \"header\": header,\n \"sumary\": sumary,\n \"expressions\": expressions,\n \"howtouses\": howtouses,\n }\n )\n\n for block in blocks:\n search_level_st = f\"{' '.join(block['levels'])} {block['header']}\".lower()\n search_level_nd = f\"{block['sumary']}\".lower()\n search_level_rd = (\n f\"{' '.join(block['expressions'])} {' '.join(block['howtouses'])}\".lower()\n )\n block[\"search_level_st\"] = \" \".join(search_level_st.split())\n block[\"search_level_nd\"] = \" \".join(search_level_nd.split())\n block[\"search_level_rd\"] = \" \".join(search_level_rd.split())\n\n print(blocks)\n with open(\"../app/src/data.json\", \"w\") as f:\n json.dump(blocks, f)\n\n\ndef process():\n try:\n buildData()\n except Exception as e:\n print(\"Exception!!\")\n print(\"\")\n print(e)\n print(\"\")\n print(\"Build data failed!!\")\n\n\nprocess()","sub_path":"buildDataSource/build_data_source.py","file_name":"build_data_source.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"590426399","text":"#While loop\n\n# a structure in python that allows us to loop through and execute a block of code multiple times \n\ni = 1 \nwhile i <= 10: \n print(i)\n i += 1\nprint(\"Done with loop\")\n\n\n#build a guessing game \n\nsecret_word = \"giraffe\"\nguess = \"\"\nguess_count = 0 \nguess_limit = 3 \nout_of_guesses = False\n\nwhile guess != secret_word and not(out_of_guesses):\n if guess_count < guess_limit:\n guess = input(\"Enter guess: \")\n guess_count += 1\n else:\n out_of_guesses = True\n\nif out_of_guesses: \n print(\"Out of Guesses, YOU LOSE!\")\nelse:\n print(\"YOU WIN\")\n\n\n","sub_path":"notes_and_examples/l_while_loop.py","file_name":"l_while_loop.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"364929614","text":"from flask import Flask, render_template, request\nimport datetime, os, math\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_script import Manager\nfrom flask_migrate import Migrate, MigrateCommand\nfrom sqlalchemy import or_\n\napp = Flask(__name__)\n\n# 指定数据库的配置信息,连接到flaskDB的数据库上\napp.config['SQLALCHEMY_DATABASE_URI'] = \"mysql+pymysql://root:123456@127.0.0.1:3306/flaskDB\"\n# 指定信号追踪\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n# 指定启动模式为调试模式\napp.config['DEBUG'] = True\n\n# 指定增删改操作完成后自动提交\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True\n\n# 创建SQLAlchemy的示例 - db , 用于做数据库的操作\ndb = SQLAlchemy(app)\n\n# 创建Manager实例并指定要管理哪个app\nmanager = Manager(app)\n# 创建Migrate对象,并指定要关联的app和db\nmigrate = Migrate(app, db)\n# 为manager增加数据迁移的子命令\nmanager.add_command('db', MigrateCommand)\n\n\n# 创建实体类 - Users,映射到数据库中叫users表\n# 创建字段id,主键,自增\n# 创建字段username,长度为80的字符串,不允许为空,值唯一,加索引\n# 创建字段age,整数,允许为空\n# 创建字段email,长度为120的字符串,值唯一\nclass Users(db.Model):\n __tablename__ = \"users\"\n\n id = db.Column(\n db.Integer, primary_key=True\n )\n\n username = db.Column(\n db.String(80), # 长度为80\n nullable=False, # 不允许为空\n unique=True, # 值唯一\n index=True, # 增加索引\n )\n\n age = db.Column(\n db.Integer,\n nullable=True # 允许为空\n )\n\n email = db.Column(\n db.String(120),\n unique=True\n )\n\n # 增加一个字段 isActive ,默认值为True\n isActive = db.Column(db.Boolean, default=True)\n\n def __repr__(self):\n return \"\" % self.username\n\n\n# 创建Student实体类\nclass Student(db.Model):\n __tablename__ = \"student\"\n id = db.Column(db.Integer, primary_key=True)\n sname = db.Column(db.String(30), nullable=False)\n sage = db.Column(db.Integer, nullable=False)\n isActive = db.Column(db.Boolean, default=True)\n\n\n# 创建Teacher实体类\nclass Teacher(db.Model):\n __tablename__ = \"teacher\"\n id = db.Column(db.Integer, primary_key=True)\n tname = db.Column(db.String(30), nullable=False)\n tage = db.Column(db.Integer, nullable=True)\n\n\n# 创建Course实体类\nclass Course(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n cname = db.Column(db.String(30), nullable=False)\n\n\n# db.drop_all()\n# 作用:删除所有的表结构\n# db.drop_all()\n\n\n# db.create_all()\n# 作用:将所有的实体类生成对应的数据表\n# 前提:对应的表不存在的前提下才能生成\n# db.create_all()\n\ndef generate_timestr():\n \"\"\"\n 根据当前的系统日期时间拼时间字符串\n :return: 年月日时分秒微妙 所组成的字符串\n \"\"\"\n s = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S%f\")\n return s\n\n\ndef getext(filename):\n \"\"\"\n 根据传递过来的文件名称,返回对应的文件扩展名\n :param filename: 传递进来的文件名称\n :return: 文件的扩展名\n \"\"\"\n ext = filename.split('.')[-1]\n return ext\n\n\n@app.route('/01-file', methods=['GET', 'POST'])\ndef file_views():\n if request.method == 'GET':\n return render_template('01-file.html')\n else:\n uname = request.form['uname']\n if request.files:\n f = request.files['uimg']\n # 直接使用源文件名进行上传\n # f.save('static/'+f.filename)\n\n # 使用时间作为文件名 : 时间.扩展名\n\n # 获取系统时间\n ftime = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S%f\")\n # 获取文件扩展名\n ext = f.filename.split('.')[-1]\n # 将 ftime.ext拼接到一起\n filename = ftime + '.' + ext\n # 将文件保存在相对路径的static中\n # f.save('static/'+filename)\n\n # 通过当前文件的地址找到static的地址(绝对路径)\n basedir = os.path.dirname(__file__)\n # 拼完整的保存路径\n upload_path = os.path.join(basedir, 'static', filename)\n f.save(upload_path)\n return \"数据处理成功\"\n\n\n@app.route('/02-release', methods=['GET', 'POST'])\ndef release():\n if request.method == 'GET':\n return render_template('02-release.html')\n else:\n title = request.form['title']\n type = request.form['type']\n content = request.form['content']\n print(\"标题:%s,类型:%s,内容:%s\" % (title, type, content))\n\n if request.files:\n file = request.files['pic']\n ftime = generate_timestr()\n ext = getext(file.filename)\n filename = ftime + '.' + ext\n base_dir = os.path.dirname(__file__)\n upload_path = os.path.join(base_dir, 'static/upload', filename)\n file.save(upload_path)\n print(\"上传路径:\" + upload_path)\n return \"发表博客成功\"\n\n\n@app.route('/03-add')\ndef add_views():\n user = Users()\n user.username = '老魏'\n user.age = 30\n user.email = \"laowei@163.com\"\n\n db.session.add(user)\n # db.session.commit()\n return \"增加数据成功\"\n\n\n@app.route('/04-register', methods=['GET', 'POST'])\ndef register():\n if request.method == 'GET':\n return render_template('04-register.html')\n else:\n # 接收前端数据\n username = request.form['username']\n age = request.form['age']\n email = request.form['email']\n isActive = False\n if 'isActive' in request.form:\n isActive = True\n # 创建Users对象,并赋值\n user = Users()\n user.username = username\n user.email = email\n user.age = age\n user.isActive = isActive\n\n # 将Users对象保存回数据库\n db.session.add(user)\n return \"增加数据成功\"\n\n\n@app.route('/05-query')\ndef query_views():\n # 1. 测试 db.session.query() 方法\n # query = db.session.query(Users)\n # print(query)\n # print(\"type:\", type(query))\n\n # 2.查询 users 表中所有的数据\n # users = db.session.query(Users).all()\n # for user in users:\n # # user 表示每一个 Users 类型的对象\n # print(\"id:%s,姓名:%s,年龄:%s,邮箱:%s,激活:%s\" % (user.id,user.username,user.age,user.email,user.isActive))\n\n # 3.查询 users 表中的第一条数据并打印输出\n user = db.session.query(Users).first()\n print(\"id:%s,姓名:%s,年龄:%s,邮箱:%s,激活:%s\" % (user.id, user.username, user.age, user.email, user.isActive))\n # 4.查询 users 表中共有多少条数据\n count = db.session.query(Users).count()\n print(\"users表中共有%d条数据\" % count)\n\n return \"查询成功\"\n\n\n@app.route('/06-filter')\ndef filter_views():\n # 1. 测试filter方法的使用和返回值\n # 1. 查询Users实体中age大于30岁的users的信息\n # result = db.session.query(Users).filter(Users.age>30)\n # print(result)\n # print(\"type:\",type(result))\n\n # 2.查询email中包含ao的users的信息\n # users = db.session.query(Users).filter(\n # Users.email.like(\"%ao%\")\n # ).all()\n # print(users)\n\n # 3.通过 filter_by 查询年龄=30的users的信息\n users = db.session.query(Users).filter_by(age=30).all()\n print(users)\n return \"执行查询成功\"\n\n\n@app.route('/07-query', methods=['GET', 'POST'])\ndef query07_views():\n if request.method == 'GET':\n users = db.session.query(Users).all()\n return render_template('07-query.html', users=users)\n else:\n kw = request.form['kw']\n users = db.session.query(Users).filter(\n or_(\n Users.username.like('%' + kw + '%'),\n Users.email.like('%' + kw + '%')\n )\n ).all()\n return render_template('07-query.html', users=users, kw=kw)\n\n\n@app.route('/08-page')\ndef page_views():\n # 1.每页显示的记录数 - pageSize\n pageSize = 2\n # 2.当前想看的页数 - page\n # 接收前端传递过来的参数 - page ,如果没传递参数的则默认为1\n page = int(request.args.get('page',1))\n\n # 查询第page页的数据\n # 跳过(page-1)*pageSize条数据,再获取前pageSize条\n # ost:通过page以及pageSize计算出来要跳过的记录数\n ost = (page - 1) * pageSize\n\n # 通过pageSize 和 ost 查询对应的数据\n users = db.session.query(Users).limit(pageSize).offset(ost).all()\n\n # 计算尾页页码\n # 通过 pageSize 和 总记录数 计算尾页页码\n totalCount = db.session.query(Users).count()\n lastPage = math.ceil(totalCount / pageSize)\n\n # 计算上一页页码\n # 如果page大于1的话,上一页则为page-1,否则上一页为1,将结果保存在 prevPage\n prevPage = 1\n if page > 1:\n prevPage = page - 1\n\n # 计算下一页页码\n # 如果page 小于 lastPage 的话,下一页则为page+1,否则下一页就是lastPage,将结果保存在nextPage\n nextPage = lastPage\n if page < lastPage:\n nextPage = page + 1\n\n return render_template('08-page.html', users=users, prevPage=prevPage, nextPage=nextPage, lastPage=lastPage)\n\n\nif __name__ == \"__main__\":\n # app.run(debug=True)\n manager.run()\n","sub_path":"NOTE/12_Flask/day06/FlaskDemo04/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":9350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"617023131","text":"# !/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\nERP+\n\"\"\"\n__author__ = 'António Anacleto'\n__credits__ = []\n__version__ = \"1.0\"\n__maintainer__ = \"António Anacleto\"\n__status__ = \"Development\"\n__model_name__ = 'xml_anexo_reg_fornecedor_m106.XMLAnexoRegFornecedorM106'\nimport auth, base_models\nfrom orm import *\nfrom form import *\n\n\nclass XMLAnexoRegFornecedorM106(Model, View):\n def __init__(self, **kargs):\n #depois por aqui entre datas e só de um diario ou periodo, etc, etc.\n Model.__init__(self, **kargs)\n self.__name__ = 'xml_anexo_reg_fornecedor_m106'\n self.__title__ = 'Anexos Reg. Fornecedores - MOD 106'\n self.__model_name__ = __model_name__\n self.__list_edit_mode__ = 'edit'\n self.__get_options__ = ['nome']\n\n \n self.__workflow_auth__ = {\n 'Visualizar':['All'],\n 'Gerar':['All'],\n 'Rascunho':['All'],\n 'Confirmar':['All'],\n 'full_access':['All']\n } \n\n self.__auth__ = {\n 'read':['All'],\n 'write':['All'],\n 'create':['All'],\n 'delete':['All'],\n 'full_access':['All']\n }\n\n \n self.nome = string_field(view_order = 1, name = 'Nome do documento', size = 60, args = 'readonly')\n\n self.xml_modelo_106 = parent_field(view_order = 2, name = 'Modelo 106', hidden=True, model_name = 'xml_modelo_106.XMLModelo106',nolabel=True, onlist = False) \n\n self.nif_entidade = string_field(view_order=4, name='Nif', size=45, args = 'readonly')\n\n self.ano = string_field(view_order = 5, name ='Ano', size=45, args = 'readonly')\n\n self.mes = string_field(view_order = 6, name ='Mês', size=45, args = 'readonly')\n\n self.area_fiscal = string_field(view_order = 7, name = 'Área Fiscal', size = 50, args = 'readonly')\n\n # linhas do anexo\n self.xml_linha_anexo_reg_fornecedor_m106 = list_field(view_order = 8, name = 'Linhas', condition = \"xml_anexo_reg_fornecedor_m106='{id}'\", model_name = 'xml_linha_anexo_reg_fornecedor_m106.LinhaAnexoRegFornecedor', list_edit_mode = 'inline', onlist = False)\n \n self.data_entrega = string_field(view_order=9, name ='Data Entrega', args = 'readonly')\n \n self.total_factura = string_field(view_order = 10, name = 'Total Facturas', size = 45,args = 'readonly')\n \n self.total_base_incidencia = string_field(view_order = 11, name = 'Total Incidência', size = 45, args = 'readonly')\n\n self.total_suportado = string_field(view_order = 12, name = 'Total Suportado', size = 45, args = 'readonly') \n\n self.total_dedutivel = string_field(view_order = 13, name = 'Total Dedutivel', size = 45, args = 'readonly')\n\n self.estado = info_field(view_order = 14, name ='Estado', default='Rascunho', args = 'readonly')\n\n self.xml_gerado = text_field(view_order = 15,name='Conteudo XML Gerado', size=100, args='readonly', onlist=False)","sub_path":"core/objs/xml_anexo_reg_fornecedor_m106.py","file_name":"xml_anexo_reg_fornecedor_m106.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"630715893","text":"import numpy as np\n\ndata=np.loadtxt(\"RoeEntrFix_cfl0.1.dat\")\nroe1_entrfix_xc_arr=data[:,0]\nroe1_entrfix_rho_arr=data[:,1]\nroe1_entrfix_u_arr=data[:,2]\nroe1_entrfix_p_arr=data[:,3]\n\ndata=np.loadtxt(\"AUSM_cfl0.1.dat\")\nausm1_xc_arr=data[:,0]\nausm1_rho_arr=data[:,1]\nausm1_u_arr=data[:,2]\nausm1_p_arr=data[:,3]\n\nimport matplotlib.pyplot as plt\nplt.style.use('sjc')\n\nroe1_noentrfix_label=\"Roe,NoEntropyFix,CFL=0.1\"\nroe3_noentrfix_label=\"Roe,NoEntropyFix,CFL=0.3\"\nroe9_noentrfix_label=\"Roe,NoEntropyFix,CFL=0.9\"\nroe1_entrfix_label=\"Roe,EntropyFix,CFL=0.1\"\nroe3_entrfix_label=\"Roe,EntropyFix,CFL=0.3\"\nroe9_entrfix_label=\"Roe,EntropyFix,CFL=0.9\"\nausm1_label=\"AUSM,CFL=0.1\"\nausm3_label=\"AUSM,CFL=0.3\"\n\nds_step=4\nds_longshort_1=[ds_step*2,ds_step,ds_step*4,ds_step]\nds_step=2\nds_longshort_2=[ds_step*2,ds_step,ds_step*4,ds_step]\nds_step=4\nds_shortlong_1=[ds_step,ds_step*2,ds_step,ds_step*2]\nds_step=2\nds_shortlong_2=[ds_step,ds_step*2,ds_step,ds_step*2]\n\nxlim=[-1.0,1.0]\nylim=[-0.05,1.0]\n\n# Compare AUSM against Roe-EntropyFix\nfig=plt.figure()\nax=fig.gca()\nax.plot(ausm1_xc_arr,ausm1_rho_arr,'-',label=ausm1_label)\nax.plot(roe1_entrfix_xc_arr,roe1_entrfix_rho_arr,':',label=roe1_entrfix_label)\nax.set_xlabel(\"X\")\nax.set_ylabel(r\"$\\rho$\")\nax.legend()\nax.set_xlim(xlim)\nax.set_ylim(ylim)\nplt.savefig(\"AUSMRoe_rho.png\")\n\nfig=plt.figure()\nax=fig.gca()\nax.plot(ausm1_xc_arr,ausm1_u_arr,'-',label=ausm1_label)\nax.plot(roe1_entrfix_xc_arr,roe1_entrfix_u_arr,':',label=roe1_entrfix_label)\nax.set_xlabel(\"X\")\nax.set_ylabel(\"u\")\nax.legend()\nax.set_xlim(xlim)\nax.set_ylim(ylim)\nplt.savefig(\"AUSMRoe_u.png\")\n\nfig=plt.figure()\nax=fig.gca()\nax.plot(ausm1_xc_arr,ausm1_p_arr,'-',label=ausm1_label)\nax.plot(roe1_entrfix_xc_arr,roe1_entrfix_p_arr,':',label=roe1_entrfix_label)\nax.set_xlabel(\"X\")\nax.set_ylabel(\"p\")\nax.legend()\nax.set_xlim(xlim)\nax.set_ylim(ylim)\nplt.savefig(\"AUSMRoe_p.png\")\n\n","sub_path":"cmp_results.py","file_name":"cmp_results.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"611902565","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\nsetuptools.setup(\n name=\"redditmirror\",\n version=\"1.0.0\",\n author=\"Sam McCreery\",\n author_email=\"4602020+mccreery@users.noreply.github.com\",\n description=\"Takes posts from your Reddit saved tab and x-posts them to a subreddit of your choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n url=\"https://github.com/mccreery/reddit-mirror\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Operating System :: OS Independent\",\n \"License :: OSI Approved :: MIT License\"\n ],\n install_requires=\"praw>=6.0\"\n)\n","sub_path":"pypi_install_script/redditmirror-1.0.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"455800714","text":"import pymysql\nfrom config import db_config,args_curl,args_ping,users\nfrom flask_login import UserMixin\nfrom . import db,login_manager\n\n\nclass User(UserMixin,db.Model):\n __tablename__ = 'users'\n user_id = db.Column(db.Integer,primary_key=True)\n user_name = db.Column(db.String(64),unique=True,index=True)\n user_passwd = db.Column(db.String(256),index=True)\n user_admin = db.Column(db.Integer)\n @property\n def id(self):\n return self.user_id\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\ndef data_query(tables):\n con = pymysql.connect(**db_config)\n cursor = con.cursor()\n if tables == \"users\":\n try:\n sql = \"\"\"select * from users\"\"\"\n cursor.execute(sql)\n except:\n print(\"There is no table named users\")\n con.rollback()\n row_users = cursor.fetchall()\n user_list=list()\n for i in range(len(row_users)):\n user_dict = dict()\n for j in range(len(users)):\n user_dict[users[j]]=row_users[i][j]\n user_list.append(user_dict)\n return user_list\n elif tables == \"args_ping\":\n try:\n sql = \"\"\"select * from args_ping\"\"\"\n cursor.execute(sql)\n except:\n print(\"There is no table named args_ping\")\n con.rollback()\n row_ping = cursor.fetchall()\n # con.close()\n ping_list = list()\n for i in range(len(row_ping)):\n ping_dict = dict()\n for j in range(len(args_ping)):\n if j==0:\n continue\n ping_dict[args_ping[j]] = row_ping[i][j]\n ping_list.append(ping_dict)\n return ping_list\n\n elif tables == \"args_curl\":\n try:\n sql = \"\"\"select * from args_curl \"\"\"\n cursor.execute(sql)\n except:\n print(\"There is no table named args_curl\")\n con.rollback()\n row_curl = cursor.fetchall()\n # con.close()\n curl_list = []\n for i in range(len(row_curl)):\n curl_dict = {}\n for j in range(len(args_curl)):\n curl_dict[args_curl[j]] = row_curl[i][j]\n curl_list.append(curl_dict)\n return curl_list\n\n\ndef insert_tables(tables,*kwargs):\n con = pymysql.connect(**db_config)\n cursor = con.cursor()\n print (tables)\n if tables == \"args_curl\":\n try:\n sql = \"INSERT INTO args_curl VALUES ('%d','%d','%s','%d')\" % (kwargs[0],kwargs[1],kwargs[2],kwargs[3])\n cursor.execute(sql)\n con.commit()\n except:\n con.rollback()\n elif tables == \"args_ping\":\n try:\n sql = \"\"\"INSERT INTO args_ping VALUES(\"%s\",\"%s\",\"%s\",\"%s\",\"%s\",\"%s\")\"\"\" % (kwargs[0],kwargs[1],kwargs[2],kwargs[3],kwargs[4],kwargs[5])\n cursor.execute(sql)\n con.commit()\n except:\n con.rollback()\n\n\n\n\n\n\n","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"45086285","text":"from PyQt5 import QtWidgets as qtw\nfrom PyQt5 import QtCore as qtc\n\nimport arrowArc\nimport misc\n\n# base class for data storage object for sequence feature\n\nclass Feature(qtw.QGraphicsPathItem):\n\t'''Base abstract class for storing features and all related items including\n\tQTreeWidgetItem, QGraphicsPathItem, and others.'''\n\n\t# class level storage container and variables\n\tall_features = []\n\tradius = 200\n\n\tdef __init__(self,feature,track,gb,label=False):\n\t\tsuper().__init__()\n\n\t\tstart = -(feature.location.start.position + 1) / len(gb) * 360\n\t\tend = -feature.location.end.position / len(gb) * 360\n\t\tif feature.strand == 1:\n\t\t\tnewCustomPath = arrowArc.arrowArc(self.radius,self.radius,\n\t\t\t\t\tself.radius,track,.1,start,end,label=label)\n\t\telif feature.strand == -1:\n\t\t\tnewCustomPath = arrowArc.arrowArc(self.radius,self.radius,\n\t\t\t\t\tself.radius,track,.1,end,start,label=label)\n\n\t\tself.args = newCustomPath.__init__.args\n\t\tself.setPath(newCustomPath)\n\n\tdef createTreeItem(self,feature):\n\t\tpass\n\n\tdef createTreeLabel(self,feature):\n\t\tpass\n\n\tdef createGraphicsItem(self,feature):\n\t\t'''Creates QTreeWidgetItem, inserts into docWindow.treeNodes. Input is\n\t\tSeqFeature object.'''\n\n\t\tdef create(feature,track=0,gb=None,label=False):\n\t\t\t'''Helper function for abstraction.'''\n\t\t\tstart = -(feature.location.start.position + 1) / len(gb) * 360\n\t\t\tend = -feature.location.end.position / len(gb) * 360\n\t\t\tif feature.strand == 1:\n\t\t\t\tnewCustomPath = arrowArc.arrowArc(self.radius,self.radius,\n\t\t\t\t\t\tself.radius,track,.1,start,end,label=label)\n\t\t\telif feature.strand == -1:\n\t\t\t\tnewCustomPath = arrowArc.arrowArc(self.radius,self.radius,\n\t\t\t\t\t\tself.radius,track,.1,end,start,label=label)\n\t\t\t#print('init args: {}'.format(newCustomPath.__init__.args))\n\t\t\tnewItem = qtw.QGraphicsPathItem()\n\t\t\tnewItem.args = newCustomPath.__init__.args\n\t\t\tnewItem.setPath(newCustomPath)\n\t\t\treturn(newItem)\n\n\t\t@misc.debug\n\t\tdef getColliders(graphicsItem):\n\t\t\t'''Input variable is QGraphicsPathItem.'''\n\t\t\tassert isinstance(graphicsItem,qtw.QGraphicsItem)\n\t\t\tcolliders = graphicsItem.collidingItems()\n\t\t\treturn(colliders)\n\n\t\tdef arrange(objs):\n\t\t\t'''Input is list of QPainterPaths'''\n\t\t\t\n\n\t\tnew = create(feature,0,self.doc_window.gb)\n\t\tprint('new: {}'.format(new))\n\t\tself.doc_window.scene.addItem(new)\n\t\tcolliders = getColliders(new)\n\t\tprint('colliders: {}'.format(colliders))\n\t\tfeat_objects = [x for x in self.all_features if x.graphics_path\n\t\t\t\t\tin colliders]\n\t\tprint('list: {}'.format(feat_objects))\n\t\tfeat_objects.append(new)\n\t\t#feat_objects.sort(key = lambda s:len(s.seq_feature),reverse=True)\n\n\t\tif len(colliders) != 0:\n\t\t\tarrange(feat_objects)\n\n\t\treturn(new)\n","sub_path":"Feature.py","file_name":"Feature.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"136593666","text":"import datetime\nimport tushare as ts\nimport pymysql\nimport baostock as bs\nimport pandas as pd\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec#分割子图\nimport numpy as np\nfrom scipy import signal #滤波等\nimport matplotlib.dates as mdates #處理日期\nimport mpl_finance as mpf\nimport talib\nfrom matplotlib.pylab import date2num\nimport matplotlib.ticker as ticker\nfrom sympy import *\nimport os\nfrom strategy import *\nfrom myOperator2 import *\n\ndef myRead_csv(_code):\n code=_code\n filename3='G:\\\\stockData\\\\myData\\\\'+code\n try:\n df = pd.read_csv(filename3,encoding='utf_8_sig')#index_col=flase 不会将第一列作为index ,converters={'trade_date':str}\n pass\n except Exception as err:\n print('myRead_csv err', err)\n return df\ndef myPlot(_code):#画图单个显示K线\n code=_code\n stock= myRead_csv(code)\n start = time.perf_counter()#计时开始\n ###画图开始\n spaceDays=10 #显示间隔日期数\n # stock=stock[stock['date']>'2016-01-01']#选择回测的截止日期数据\n stock =stock.reset_index(drop=True)#去掉原序,重新0开始\n length=len(stock)\n quotes = []\n for row in range(length):\n sdate_plt = stock.index.values[row] #提取索引单个值 \n sopen = stock.loc[row,'open']\n shigh = stock.loc[row,'high']\n slow = stock.loc[row,'low']\n sclose = stock.loc[row,'close']\n datas = (sdate_plt,sopen,shigh,slow,sclose) # 按照 candlestick_ohlc 要求的数据结构准备数据\n quotes.append(datas)\n x_ticks = [i[0] for i in quotes]#从list矩阵里取出列元素的方法 \n\n plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签 \n plt.rcParams['axes.unicode_minus']=False #用来正常显示负号 \n fig = plt.figure(figsize=(20,12), dpi=100,facecolor=\"white\") #创建fig对象\n gs = gridspec.GridSpec(1, 1, left=0.05, bottom=0.12, right=0.98, top=0.96, wspace=None, hspace=0.2)#, height_ratios=[3.5,1,1,1]\n graph_KAV = fig.add_subplot(gs[0,:])\n\n # 添加网格\n graph_KAV.grid(True) \n graph_KAV.set_title(code)\n mpf.candlestick_ohlc(graph_KAV,quotes,width=0.5,colorup='r',colordown='green') # 上涨为红色K线,下跌为绿色,K线宽度为0.7\n\n graph_KAV.plot(x_ticks,stock['MA5'],'b')\n # graph_KAV.plot(x_ticks,stock['MA10'] ,'y')#画出5/10金叉\n graph_KAV.plot(x_ticks,stock['MA120'],'k')\n graph_KAV.plot(x_ticks,stock['middleBoll'],'m--')#这样可以加进去新的线\n # graph_KAV.plot(x_ticks,stock['upBoll'],'r')\n # graph_KAV.plot(x_ticks,stock['downBoll'],'g')\n # graph_KAV.plot(x_ticks, stock['buyBoll'],'y.',markersize=11,label = \"buyBoll\")#标志处买点\n # graph_KAV.plot(x_ticks, stock['sellBoll'],'k.',markersize=11)#标志处卖点 ,label = \"sellBoll\"\n graph_KAV.plot(x_ticks,stock['buyBIAS120'] ,'y+',markersize=18,label = \"buyBIAS120\")#画出乖离率都大的\n graph_KAV.plot(x_ticks,stock['sellBIAS120'] ,'k+',markersize=18)#画出乖离率都大的 ,label = \"sellBIAS120\"\n graph_KAV.plot(x_ticks, stock['turtleBuy'],'y*',markersize=9,label = \"turtleBuy\")\n graph_KAV.plot(x_ticks, stock['turtleSell'],'k*',markersize=9)# ,label = \"turtleSell\"\n graph_KAV.plot(x_ticks, stock['bandBuy'],'y^',markersize=9,label = \"bandBuy\")\n graph_KAV.plot(x_ticks, stock['bandSell'],'kv',markersize=9)# ,label = \"peak\"\n #同图绘制KDJ\n b=stock['low'].min()#平移小于最小值以下\n b=0.618 *b #黄金比例\n coef=(stock.loc[stock.index.max(),'close'])#放大的倍率\n coef=0.1 *coef\n # graph_KAV.plot(np.arange(0, length), coef*stock['K']+b, 'y', label='K') # K\n # graph_KAV.plot(np.arange(0, length), coef*stock['D']+b, 'c-', label='D') # D\n # graph_KAV.plot(np.arange(0, length), coef*stock['J']+b, 'm-', label='J') # J\n # graph_KAV.axhline(y=coef*0.8+b, color='r', linestyle='-')#超买线\n # graph_KAV.axhline(y=coef*0.2+b, color='g', linestyle='-')#超卖线\n #同图绘制MACD\n #归一化映射到(-1,1)内\n macd_dif_max=stock['macd_dif'].max()\n macd_dif_min=stock['macd_dif'].min()\n stock['macd_dif'] = -1 + 2 / (macd_dif_max - macd_dif_min) * (stock['macd_dif'] - macd_dif_min)\n macd_dea_max=stock['macd_dea'].max()\n macd_dea_min=stock['macd_dea'].min()\n stock['macd_dea'] = -1 + 2 / (macd_dea_max - macd_dea_min) * (stock['macd_dea'] - macd_dea_min)\n macd_bar_max=stock['macd_bar'].max()\n macd_bar_min=stock['macd_bar'].min()\n stock['macd_bar'] = -1 + 2 / (macd_bar_max - macd_bar_min) * (stock['macd_bar'] - macd_bar_min)\n \n b=stock['low'].min()#平移小于最小值以下\n b=0.382 *b\n coef=(stock.loc[stock.index.max(),'close'])#放大的倍率\n coef=0.0618 *coef\n # graph_KAV.plot(np.arange(0, length), coef*stock['macd_dif']+b, 'red', label='macd_dif') # dif\n # graph_KAV.plot(np.arange(0, length), coef*stock['macd_dea']+b, 'blue', label='macd_dea') # dea\n # stock['bar_red'] = np.where(stock['macd_bar'] > 0, stock['macd_bar'], 0)# 绘制BAR>0 柱状图\n # stock['bar_green'] = np.where(stock['macd_bar'] < 0, stock['macd_bar'], 0)# 绘制BAR<0 柱状图\n # graph_KAV.bar(np.arange(0, length), coef*stock['bar_red'], bottom=b, facecolor='red')#上移0轴 bottom=b\n # graph_KAV.bar(np.arange(0, length), coef*stock['bar_green'], bottom=b,facecolor='green')\n ######实际买卖点位的可视检测开始########## \n filename3='G:\\\\stockData\\\\tradeInfo\\\\'+code[:6]+'.csv'\n ex=os.path.isfile(filename3) #当前文件是否存在\n if ex==True:\n try:\n df3=pd.read_csv(filename3,encoding='utf_8_sig')#\n df3=df3[(df3['bandBuy']>0) | (df3['bandSell']>0)]\n stock['trade']=stock[stock['date'].isin(list(df3['date']))]['close']\n graph_KAV.plot(x_ticks,stock['trade'] ,'c+',markersize=15,label = \"trade\")#显示实际买卖点\n except Exception as err:\n print('read_csv err', err)\n ##实际操作可视化结束######## \n # graph_KAV.set_xlim(0,length) #设置一下x轴的范围\n graph_KAV.legend(loc='best', shadow=True, fontsize='10') #主图生成的标签 \n # 生成横轴的刻度名字\n date_tickers=stock.date.values#日期\n def format_date(x,pos=None):#把int的x刻度转回日期格式y-m-d,回调函数\n if x<0 or x>len(date_tickers)-1:\n return ''\n return date_tickers[int(x)]\n graph_KAV.xaxis.set_major_locator(ticker.MultipleLocator(spaceDays))#原始显示间距6\n graph_KAV.xaxis.set_major_formatter(ticker.FuncFormatter(format_date))#重设x轴日期格式\n\n for label in graph_KAV.xaxis.get_ticklabels():\n label.set_rotation(45)\n label.set_fontsize(11) # 设置标签字体\n\n plt.legend()\n plt.show()\n########\ndef everyDayOperatorT(_date):##合并所有的交易数据,买卖信息,个股是分开存放的所以要合并\n path1='G:\\\\stockData\\\\tradeInfo\\\\'\n path2='G:\\\\stockData\\\\test\\\\'\n files= os.listdir(path1) #得到文件夹下的所有文件名称\n for code in files: #遍历文件夹\n print(code+' start')\n filename=path1+code \n try: \n df=pd.read_csv(filename)#,encoding='utf_8_sig'\n df=df.drop_duplicates()#删除重复行\n except Exception as err:\n print(filename+'read_csv err', err)\n # df=df.tail(1)\n df=df[(df['date']==_date) & (df['buy']> 0)]\n if len(df)==0:\n continue\n ex=os.path.isfile(path2+_date+'.csv') #当前文件是否存在,存在即添加,不存在新建\n if ex==False : \n df.to_csv(path2+_date+'.csv',index=None,encoding='utf_8_sig')#新建保存每个的数据 \n else :\n df.to_csv(path2+_date+'.csv',mode='a',index=None,header=False,encoding='utf_8_sig')#后尾追加保存每个的数据 \n print(_date+':everydayList finish')\n# everyDayOperatorT('2020-12-08')\n\ndef buy_sellResult(df,buyName):#显示买卖回测数据统计结果\n df=df[(df['buy']>0) | (df['sell']>0)]#选出交易数据\n df =df.reset_index(drop=True)#去掉原序,重新0开始 \n df['profit_curve'] = df['profit'].cumsum()#资金曲线\n allTimes = len(df[(df['sell']>0)]) \n if allTimes==0:\n return\n df.loc[0,'allTimes']= allTimes #卖出次数作为总次数\n winTimes= len(df[(df['profit']>0)])\n df.loc[0,'winTimes']= winTimes #盈利次数\n df.loc[0,'winRate']= winTimes / allTimes#胜率\n df.loc[0,'allMoney']= df[df['sell']>0]['money'].sum()#总盈利额\n df.loc[0,'allProfits']= df['profit'].sum()#总盈利额\n df.loc[0,'Risk']= (df['profit'].sum() / allTimes) / 800 #Risk 盈亏比 800= 200000 * 0.4% :总资产*风险因子\n q=df['days'].sum()\n df.loc[0,'daysMean']= (df['days'].sum() / allTimes)#持仓平均天数\n df.loc[0,'maxWin']= df['profit'].max()\n df.loc[0,'maxLose']= df['profit'].min()\n tempMax = -9999999\n maxReturn = -9999999#最大回撤\n for row in range(len(df)):#\n if df.loc[row,'profit_curve'] > tempMax:\n tempMax = df.loc[row,'profit_curve']\n if df.loc[row,'profit_curve'] <= tempMax:\n t= (tempMax - df.loc[row,'profit_curve'])\n if t > maxReturn:\n maxReturn = t\n df.loc[0,'maxReturn']= maxReturn #最大回撤\n code =(df.loc[0,'ts_code'])\n filename='G:\\\\stockData\\\\tradeInfo\\\\'+ code + '_'+buyName+'.csv'\n df.to_csv(filename,index=None,encoding='utf_8_sig')#新建保存每个的数据\n # df=df.loc[0,:] #取单行时默认返回series\n df=df.loc[[0]] #即可返回一个dataframe\n df=df[['ts_code','allTimes','winTimes','winRate','allMoney','allProfits','Risk','daysMean','maxWin','maxLose','maxReturn']]\n # df= df[df['allTimes']>0]\n filename = 'G:\\\\stockData\\\\test\\\\'+buyName+'.csv'\n ex=os.path.isfile(filename) #当前文件是否存在,存在即添加,不存在新建\n if ex==False: \n df.to_csv(filename,index=None,encoding='utf_8_sig')#新建保存每个的数据 \n else :\n pass\n df.to_csv(filename,mode='a',index=None,header=False,encoding='utf_8_sig')#后尾追加保存每个的数据\n # df1 = pd.read_csv(filename,encoding='utf_8_sig')#index_col=0 不新设index\n # df1=df.drop_duplicates(['ts_code']) #去掉重复的行\n # df1.to_csv(filename,index=None,encoding='utf_8_sig')#新建保存每个的数据\n\n\ndef myFor():#显示mydata文件下所有回测数据\n # path1='G:\\\\stockData\\\\myData\\\\'\n path1='G:\\\\stockData\\\\tradeInfo\\\\'\n files= os.listdir(path1) #得到文件夹下的所有文件名称\n files=[(x[:6] + '.csv') for x in files]\n files=list(set(files))#set 可以去重复\n # files=['000629.csv']\n for code in files: #遍历文件夹\n myPlot(code)\n# myFor()\n\ndef myFor1():#显示回测完成结果,有交易数据\n # everyDayOperatorT('test') #合并买卖所以个股数据 已经不用合并啦-20201119\n filename='G:\\\\stockData\\\\test\\\\120up&all_macd金叉死叉2.csv'\n try:\n df=pd.read_csv(filename,encoding='utf_8_sig')#新建保存每个的数据\n except Exception as err:\n print('myFor1 read_csv err', err) \n codeList=list(df['ts_code'])\n for code in codeList: #遍历文件夹\n code=code[:6]+'.csv'\n myPlot(code)\nmyFor1()\n\ndef myFor3(filename):#统计回测数据:所有个股30年数据的成功率 、 交易次数、盈亏比R、盈利总数,并分别R和盈利总数排序,画出盈利曲线\n start = time.perf_counter()#计时开始\n print('count start....')\n path1='G:\\\\stockData\\\\myData\\\\'\n filename='allStockList' # allStockList sz50s zz500s hs300s\n codeList= stockList(filename) #得到目标股票池下的所有代码名称\n for code in codeList: #遍历文件夹\n try:\n code= code+'.csv'\n print(code + ' count start....')\n df= myRead_csv(code)\n df1=df.copy()\n #band\n df= df[['ts_code','date','ATR','top_level','buttom_level','bandBuy','bandSell','band_num','band_money','band_tax','band_profit','band_days']]\n df= df.rename(columns={'bandBuy': 'buy','bandSell': 'sell','band_num': 'num','band_money': 'money','band_tax': 'tax','band_profit': 'profit','band_days': 'days'})#选择性更改列名\n buyName='120up&all_macd金叉死叉2'\n buy_sellResult(df,buyName)#band\n #21 turtle\n df1= df1[['ts_code','date','ATR','turtleBuy','turtleSell','turtle_num','turtle_money','turtle_tax','turtle_profit','turtle_days']]\n df1= df1.rename(columns={'turtleBuy': 'buy','turtleSell': 'sell','turtle_num': 'num','turtle_money': 'money','turtle_tax': 'tax','turtle_profit': 'profit','turtle_days': 'days'})#选择性更改列名\n buyName='120up&allStockList_55天唐奇安突破0.618'\n # buy_sellResult(df1,buyName)\n # myPlot(code)#显示K线\n except Exception as err:\n print(code +'read_csv err', err)\n continue\n print('...count end...')\n elapsed = time.perf_counter() - start\n print(\"all Time used:\",elapsed) \nmyFor3('1')\n# everyDayOperatorT('2020-12-11')\n#\ndef chooseNewHigh1(beforeDays):#龙头选股,更新每天的历史新高股\n start = time.perf_counter()#计时开始\n print('...choose start....')\n filename='allStockList' \n codeList= stockList(filename) #得到目标股票池下的所有代码名称\n for code in codeList: #遍历文件夹\n path1='G:\\\\stockData\\\\originData\\\\'+ code[:6] +'.csv'\n try:\n df=pd.read_csv(path1,encoding='utf_8_sig')#新建保存每个的数据\n except Exception as err:\n print(code +'read_csv err', err)\n continue\n if len(df) < 100 | df['ts_code'].str.contains('ST')[0]:\n continue\n today=datetime.datetime.now().strftime('%Y%m%d')\n # today='20201126'\n filename1 = 'G:\\\\stockData\\\\newHighPool\\\\'+ today +'.csv'\n ex=os.path.isfile(filename1) #当前文件是否存在,存在即添加,不存在新建\n d=str(df.iloc[-1]['trade_date'])\n if d== today :\n # length=len(df)\n m=df['high'].max()\n for row in range(-beforeDays,-1):\n t=df.iloc[row]['high']\n if t >= m:\n df=df.loc[[0]]\n df=df[['ts_code']]\n if ex==False: \n df.to_csv(filename1,index=None,encoding='utf_8_sig')#新建保存每个的数据 \n else :\n df.to_csv(filename1,mode='a',index=None,header=False,encoding='utf_8_sig')#后尾追加保存每个的数据\n break\n else:\n continue\n if ex==True:\n df=pd.read_csv(filename1,encoding='utf_8_sig')#\n f='G:\\\\stockData\\\\newHighPool\\\\'+ today +'.txt'\n df.to_csv(f, sep='\\t', index=False)\n # print('choose end.')\n elapsed = time.perf_counter() - start\n print(\"all Time used:\",elapsed)\n# chooseNewHigh1(5)\n# ","sub_path":"showTestData.py","file_name":"showTestData.py","file_ext":"py","file_size_in_byte":15120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"273970400","text":"# coding=utf-8\n\n\"\"\"\nSource files and annotated source files.\n\"\"\"\n\nimport os\n\nimport fragments\n\nclass SourceFile(object):\n \"\"\"\n A source file seen as as sequence of lines. The source file may contains\n some list of errors.\n \"\"\"\n def __init__(self, fileName):\n if not os.path.isfile(fileName):\n raise Exception('File \"%s\" not found' \\\n % fileName)\n\n self.fileName = fileName\n \"\"\" The filename as given when creating the source file\"\"\"\n\n self.name = \\\n os.path.splitext(os.path.basename(self.fileName))[0]\n \"\"\" The short file name with extension included \"\"\"\n\n f = open(fileName, 'r')\n self.sourceLines = tuple(f.read().splitlines())\n \"\"\" The list of lines of the source file\"\"\"\n f.close()\n\n self.errors = []\n \"\"\" The list of errors \"\"\"\n\n def addError(self, sourceError):\n self.errors.append(sourceError)\n\n def clearErrors(self):\n self.errors = []\n\n def __repr__(self):\n return ('SourceFile(%s)'%self.fileName)\n\n\n\nclass AnnotatedSourceFile(SourceFile):\n \"\"\"\n A source file with annotated fragments. The source can be viewed\n both as a flat sequence of line or as a fragment trees.\n The annotation markers can be defined when building the source file.\n \"\"\"\n def __init__(self, fileName,\n openingMark = r'--oo<< *(?P[^ \\n]+) *$',\n closingMark = r'--oo>> *$',\n hereMark = r'--oo== *(?P[^ \\n]+) *$'):\n \"\"\"\n Create a annotated source file. The mark have to be provided\n in the form of regular expression with sometimes an optional\n named group with the named value. That is a regexp group like\n (?P ... ). This part will be extracted and will\n constitute the name of the mark.\n :param fileName: the file name\n :type fileName: str\n :param openingMark: The opening mark with ?P group\n :type openingMark: str\n :param closingMark: The closing mark\n :type closingMark: str\n :param hereMark: The here mark with ?P group\n :type hereMark: str\n :return: AnnotatedSourceFile\n :rtype: AnnotatedSourceFile\n \"\"\"\n\n super(AnnotatedSourceFile,self).__init__(fileName)\n self.openingMark = openingMark\n self.closingMark = closingMark\n self.hereMark = hereMark\n\n fragmenter = fragments.RegexpFragmenter(\n self.sourceLines,\n openingMark, closingMark, hereMark,\n mainValue = self, firstPosition = 1)\n\n self.fragment = fragmenter.fragment\n \"\"\" The root fragment according to the given mark \"\"\"\n\n def __repr__(self):\n return ('AnnotatedSourceFile(%s)'%self.fileName)\n\n\n\n\n","sub_path":"pyuseocl/utils/sources.py","file_name":"sources.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"439732544","text":"'''\nAuthor : Oguzhan Gencoglu\nContact : oguzhan.gencoglu@tut.fi\nCreated : 18.07.2015\nLatest Version : 18.07.2015\n'''\n\nimport numpy as np\n\ndef unique_rows(mat):\n # Return unique rows of a numpy 2D array \n\n b = np.ascontiguousarray(mat).view(np.dtype((np.void, mat.dtype.itemsize * mat.shape[1])))\n _, idx = np.unique(b, return_index=True)\n unique_rows = mat[idx]\n \n return(unique_rows)","sub_path":"Python/Other/unique_rows.py","file_name":"unique_rows.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"297597398","text":"import CoolProp.CoolProp as CP\n\nclass Node(object):\n\n \"define internal properties at node\"\n def __init__(self):\n self.fluid = 'R134a'\n self.p = None\n self.t = None\n self.h = None\n self.s = None\n self.x = None\n\n \"given two properties at node, calculate the rest\"\n def pt(self):\n self.h = CP.PropsSI('H','P',self.p,'T',self.t,self.fluid)\n self.s = CP.PropsSI('S','P',self.p,'T',self.t,self.fluid)\n self.x = CP.PropsSI('Q','P',self.p,'T',self.t,self.fluid)\n \n def ps(self):\n self.h = CP.PropsSI('H','P',self.p,'S',self.s,self.fluid)\n self.x = CP.PropsSI('Q','P',self.p,'S',self.s,self.fluid)\n self.t = CP.PropsSI('T','P',self.p,'S',self.s,self.fluid)\n \n def ph(self):\n self.t = CP.PropsSI('T','P',self.p,'H',self.h,self.fluid)\n self.s = CP.PropsSI('S','P',self.p,'H',self.h,self.fluid)\n self.x = CP.PropsSI('Q','P',self.p,'H',self.h,self.fluid)\n def px(self):\n self.h = CP.PropsSI('H','P',self.p,'Q',self.x,self.fluid)\n self.s = CP.PropsSI('S','P',self.p,'Q',self.x,self.fluid)\n self.t = CP.PropsSI('T','P',self.p,'Q',self.x,self.fluid)\n\n def tx(self):\n self.h = CP.PropsSI('H','T',self.t,'Q',self.x,self.fluid)\n self.s = CP.PropsSI('S','T',self.t,'Q',self.x,self.fluid)\n self.p = CP.PropsSI('P','T',self.t,'Q',self.x,self.fluid)\n\n def th(self):\n \"use T to find hg,hf, then use h to find x, hence actually tx\"\n \"this is for throttle, only works in vapour dome\"\n hg = CP.PropsSI('H','T',self.t,'Q',1.0,self.fluid)\n hf = CP.PropsSI('H','T',self.t,'Q',0.0,self.fluid)\n self.x = (self.h - hf)/(hg - hf)\n self.s = CP.PropsSI('S','T',self.t,'Q',self.x,self.fluid)\n self.p = CP.PropsSI('P','T',self.t,'Q',self.x,self.fluid)\n\n\n\n\n\n ","sub_path":"original/Nodes.py","file_name":"Nodes.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"586250605","text":"# coding=utf8\n\n# Copyright 2018 JDCLOUD.COM\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# NOTE: This class is auto generated by the jdcloud code generator program.\n\n\nclass VmInfo(object):\n\n def __init__(self, id=None, region=None, az=None, name=None, hostName=None, imageType=None, instanceType=None, description=None, subnetId=None, tags=None, cloudID=None, keyNames=None, elasticIpAddress=None, privateIpAddress=None, status=None, createdTime=None, imageId=None, securityGroupIds=None):\n \"\"\"\n :param id: (Optional) 资源ID,如果为空,则执行创建操作,否则执行修改操作\n :param region: (Optional) 可用区,根据各云平台规范填写\n :param az: (Optional) 云主机所属的可用区\n :param name: (Optional) 云主机名称\n :param hostName: (Optional) 云主机\n :param imageType: (Optional) \n :param instanceType: (Optional) \n :param description: (Optional) 云主机描述\n :param subnetId: (Optional) 子网ID\n :param tags: (Optional) \n :param cloudID: (Optional) 所属云提供商ID\n :param keyNames: (Optional) 密钥对名称,jd当前只支持传入一个\n :param elasticIpAddress: (Optional) 主网卡主IP绑定弹性IP的地址\n :param privateIpAddress: (Optional) 私有ip地址\n :param status: (Optional) 云主机状态\n :param createdTime: (Optional) 创建时间\n :param imageId: (Optional) 镜像ID\n :param securityGroupIds: (Optional) 安全组ID\n \"\"\"\n\n self.id = id\n self.region = region\n self.az = az\n self.name = name\n self.hostName = hostName\n self.imageType = imageType\n self.instanceType = instanceType\n self.description = description\n self.subnetId = subnetId\n self.tags = tags\n self.cloudID = cloudID\n self.keyNames = keyNames\n self.elasticIpAddress = elasticIpAddress\n self.privateIpAddress = privateIpAddress\n self.status = status\n self.createdTime = createdTime\n self.imageId = imageId\n self.securityGroupIds = securityGroupIds\n","sub_path":"jdcloud_sdk/services/jdfusion/models/VmInfo.py","file_name":"VmInfo.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"552891690","text":"'''\n@author: wei,xiang\n'''\n\nimport fs_wrapper\nimport settings.common as SC\nfrom case_utility import *\nfrom logging_wrapper import log_test_case, save_fail_log, print_report_line\nfrom test_case_base import TestCaseBase\nfrom qrd_shared.case import *\nimport re\n\n\n\nclass test_suit_cmcc_devci_phone_case7(TestCaseBase): \n '''\n\n @see: L{TestCaseBase }\n '''\n\n \n #def exe_command(self,pipe, command=\"\"):\n # ret_val = \"\"\n #print \"exec command:\", command\n\n #if no command arg, it means reading output only\n # if ( command != \"\" ):\n # pipe.stdin.write(command)\n # pipe.stdin.flush()\n # pipe.stdout.readline() #this will read the command itself\n #print \"command itself:\", str\n\n #this will read the command output\n # ch = pipe.stdout.readline(1)\n # one_line = ''\n # while ( re.match(r'^[$#]', ch, re.M|re.I) == None ):\n #print \"ch:\", ch\n # if ( ch != '\\r'): one_line = one_line + ch #\\n is for newline\n #add the line\n # if ( ch == '\\n' ):\n # ret_val = ret_val + one_line\n # one_line = \"\"\n # ch = pipe.stdout.readline(1)\n\n #print \"command output:\", ret_val\n # return ret_val\n \n \n def test_case_main(self, case_results):\n \n incommon.record_video()\n # self.adb_pipe = subprocess.Popen('adb shell', stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=1)\n # while ( re.match( r'^[$#]', self.adb_pipe.stdout.readline(1), re.M|re.I) == None ):\n # pass\n # pid = self.exe_command(self.adb_pipe,'screenrecord --verbose /storage/sdcard0/Record.mp4 &\\n') \n # apid=pid.strip('\\r\\n').split(' ')\n # self.dog = apid[1]\n \n \n global case_flag ,case_flag_slot1,case_flag_slot2, TAG\n case_flag_slot1=False\n case_flag_slot2=False\n case_flag = False\n TAG = \"Dev-ci cases: Phone \"\n log_test_framework(TAG, self.name + \" -Start\")\n \n \n \"\"\"\n \n cases contnets you need to add\n \n \n \"\"\"\n #modified by c_yazli\n #send_key(KEYCODE_POWER)\n #sleep(1)\n #send_key(KEYCODE_POWER)\n #sleep(1)\n #click_textview_by_text('EMERGENCY CALL')\n start_activity('com.android.settings','.Settings')\n settings.set_default_voice(1)\n send_key(KEY_BACK)\n send_key(KEY_BACK)\n sleep(2)\n start_activity(\"com.android.phone\", \"com.android.phone.EmergencyDialer\")\n sleep(1)\n if search_text('Emergency Dialer'):\n phone.dial(\"911\")\n sleep(1)\n func1=lambda:search_text('Emergency number', searchFlag=TEXT_CONTAINS) and search_text('Mobile',searchFlag=TEXT_CONTAINS)\n sleep(1)\n if wait_for_fun(func1, True, 30):\n case_flag_slot1=True \n log_test_framework(TAG, \"Dial emergency call from card 1 successfully \")\n else:\n log_test_framework(TAG, \"Dial emergency call from card 1 unsuccessfully\")\n if search_view_by_desc('End'):\n click_imageview_by_desc('End')\n \n \n start_activity('com.android.settings','.Settings')\n settings.set_default_voice(2)\n send_key(KEY_BACK)\n send_key(KEY_BACK)\n sleep(2)\n start_activity(\"com.android.phone\", \"com.android.phone.EmergencyDialer\") \n sleep(1)\n if search_text('Emergency Dialer'): \n phone.dial('911')\n sleep(2)\n func2=lambda:search_text('Emergency number', searchFlag=TEXT_CONTAINS) and search_text('Unicom',searchFlag=TEXT_CONTAINS)\n sleep(2)\n if wait_for_fun(func2, True, 30):\n case_flag_slot2=True \n log_test_framework(TAG, \"Dial emergency call from card 2 successfully \")\n else:\n log_test_framework(TAG, \"Dial emergency call from card 2 unsuccessfully\")\n if search_view_by_desc('End'):\n click_imageview_by_desc('End')\n #click_button_by_id(\"digits\")\n #entertext_edittext_by_id(\"digits\", \"911\")\n case_flag=case_flag_slot1 and case_flag_slot2\n \n \n \n #Note: I do not actually place an emergency call as false emergency calls are illegal; I simply enter the emergency call mode for the purposes of the test\n \n \n if case_flag:\n qsst_log_case_status(STATUS_SUCCESS, \"\" , SEVERITY_HIGH)\n else:\n qsst_log_case_status(STATUS_FAILED, \"\", SEVERITY_HIGH)\n \n case_results.append((self.case_config_map[fs_wrapper.CASE_NAME_ATTR], case_flag))\n \n \n def test_case_end(self):\n '''\n record the case result\n\n '''\n log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], TAG + ' : end')\n if can_continue() and case_flag == True:\n # shutdown()\n log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], TAG + ': case pass')\n print_report_line(self.case_config_map[fs_wrapper.CASE_NAME_ATTR] + TAG + ' : \\tpass')\n else:\n log_test_case(self.case_config_map[fs_wrapper.CASE_NAME_ATTR], TAG + ' : case fail')\n print_report_line(self.case_config_map[fs_wrapper.CASE_NAME_ATTR] + TAG + ' : \\tfail')\n save_fail_log()\n \n \n # self.exe_command(self.adb_pipe,'kill -2 %s\\n'%self.dog)\n #time.sleep(3)\n # os.system('adb pull /storage/sdcard0/Record.mp4 C:/1/1.mp4')\n #self.adb_pipe.kill()\n incommon.stop_video_record(\"phonecase7\")","sub_path":"Source/QSST/Config/data/L/test_env/test_suit_cmcc_devci_phone/test_suit_cmcc_devci_phone_case7.py","file_name":"test_suit_cmcc_devci_phone_case7.py","file_ext":"py","file_size_in_byte":5696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"150422998","text":"# Django settings for sview project.\n\n#DEBUG = True\ntry:\n from local_settings import *\nexcept ImportError:\n raise Exception('no local_settings found!')\n\nTEMPLATE_DEBUG = DEBUG\n\nimport os\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n#MOVED TO LOCAL SETTINGS\n# ADMINS = (\n# # ('Your Name', 'your_email@example.com'),\n# )\n\n#MOVED TO LOCAL SETTINGS\n#DOMAIN = \"supplierview.com\"\n\nPROJECT_NAME = \"SupplierView\"\n\nMANAGERS = ADMINS\n\n#MOVED TO LOCAL SETTINGS\n# DATABASES = {\n# 'default': {\n# 'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\n# 'NAME': 'sview_dev', # Or path to database file if using sqlite3.\n# 'USER': 'sview', # Not used with sqlite3.\n# 'PASSWORD': '5a15a', # Not used with sqlite3.\n# 'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.\n# 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n# }\n# }\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'America/Chicago'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = os.path.join(ROOT_PATH, 'user_media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\n##OVERRIDE IN LOCAL SETTINGS \n#MEDIA_URL = \"http://devmedia.supplierview.com\"\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = os.path.abspath(os.path.dirname(__file__)) + '/user_media/'\nUSER_MEDIA_PATH = os.path.join(ROOT_PATH, 'user_media')\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\n##OVERRIDE IN LOCAL SETTINGS \n#STATIC_URL = 'http://devstatic.supplierview.com/media/'\n\n# URL prefix for admin static files -- CSS, JavaScript and images.\n# Make sure to use a trailing slash.\n# Examples: \"http://foo.com/static/admin/\", \"/static/admin/\".\n##OVERRIDE IN LOCAL SETTINGS \n#ADMIN_MEDIA_PREFIX = 'http://devstatic.supplierview.com'\n\n# Additional locations of static files\nSTATICFILES_DIRS = [\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n\n]\nif DEBUG:\n STATICFILES_DIRS.append( os.path.join( ROOT_PATH, \"media/\" ) )\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'zd=#_5%6bthx=8bs-@v#c-%4185cq1g&ah4@8-f*&01&sd53-5'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n #'userena.middleware.UserenaLocaleMiddleware',\n 'pagination.middleware.PaginationMiddleware',\n)\nif DEBUG == False:\n MIDDLEWARE_CLASSES += (\n 'sview.supplierviewmiddleware.SupplierViewMiddleware',\n)\n\nROOT_URLCONF = 'sview.urls'\n\nTEMPLATE_DIRS = (\n # Put strings here, like \"/home/html/django_templates\" or \"C:/www/django/templates\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(ROOT_PATH, \"templates\"),\n)\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.i18n\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.static\",\n \"django.contrib.messages.context_processors.messages\",\n \"django.core.context_processors.request\",\n)\n\nACCOUNT_ACTIVATION_DAYS = 7\nDEFAULT_FROM_EMAIL = \"signup@%s\" % DOMAIN\n\nAUTHENTICATION_BACKENDS = (\n #'userena.backends.UserenaAuthenticationBackend',\n #'guardian.backends.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n)\nAUTH_PROFILE_MODULE = \"profile.UserProfile\" \n#ANONYMOUS_USER_ID = -1\n#MEDIA_PATH = os.path.join(ROOT_PATH, \"user_media/mugshots\")\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n # Uncomment the next line to enable the admin:\n 'django.contrib.admin',\n # Uncomment the next line to enable admin documentation:\n # 'django.contrib.admindocs',\n \"django_extensions\",\n \"taggit\", \n \"south\",\n #\"userena\", \n #\"guardian\",\n #\"easy_thumbnails\",\n \"cachetree\",\n\n 'indexer',\n 'paging',\n \"sentry\", \n \"raven.contrib.django\",\n \"djcelery\",\n \"haystack\",\n \"pagination\",\n \"sview.registration\",\n \"sview.location\",\n \"sview.supplier\",\n \"sview.survey\",\n \"sview.profile\",\n \"sview.company\",\n \"sview.feeds\",\n \"sview.home\",\n)\n\nif DEBUG:\n INSTALLED_APPS += (\n \"debug_toolbar\",\n )\n MIDDLEWARE_CLASSES += ( \n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n )\n INTERNAL_IPS = ('127.0.0.1',)\n DEBUG_TOOLBAR_PANELS = (\n 'debug_toolbar.panels.version.VersionDebugPanel',\n 'debug_toolbar.panels.timer.TimerDebugPanel',\n 'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',\n 'debug_toolbar.panels.headers.HeaderDebugPanel',\n 'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',\n 'debug_toolbar.panels.template.TemplateDebugPanel',\n 'debug_toolbar.panels.sql.SQLDebugPanel',\n 'debug_toolbar.panels.signals.SignalDebugPanel',\n 'debug_toolbar.panels.logger.LoggingPanel',\n )\n def custom_show_toolbar(request):\n return True # Always show toolbar, for example purposes only.\n\n DEBUG_TOOLBAR_CONFIG = {\n 'INTERCEPT_REDIRECTS': False,\n 'HIDE_DJANGO_SQL': False,\n 'SHOW_TOOLBAR_CALLBACK': custom_show_toolbar,\n 'TAG': 'div',\n }\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(pathname)s:L%(lineno)d %(message)s'\n #'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n },\n 'root': {\n 'level':'INFO', # CHANGE TO DEFAULT_LEVEL\n 'handlers':['file', ],\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler',\n 'formatter': 'verbose'\n },\n 'file': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n 'formatter': 'verbose',\n 'filename': os.path.join(ROOT_PATH, 'debug_log')\n },\n 'sentry': {\n 'level': 'DEBUG',\n 'class': 'raven.contrib.django.handlers.SentryHandler',\n 'formatter': 'verbose'\n },\n },\n 'loggers': {\n 'sentry': {\n 'handlers': ['sentry', 'mail_admins'],\n 'level': 'DEBUG',\n 'propagate': True,\n }\n }\n}\nif DEBUG:\n LOGGING['loggers']['sentry']['handlers'] = ['sentry', 'file',]\n LOGGING['loggers']['sentry']['level'] = 'DEBUG'\n\n#CELERY CONFIGURATION\nimport djcelery\ndjcelery.setup_loader()\nBROKER_TRANSPORT = \"redis\"\nBROKER_HOST = \"localhost\" # Maps to redis host.\nBROKER_PORT = 6379 # Maps to redis port.\n\n##OVERRIDE IN LOCAL SETTINGS \n#BROKER_VHOST = \"7\" # Maps to database number.\n\nfrom datetime import timedelta\nfrom celery.schedules import crontab\nCELERYBEAT_SCHEDULE = {\n \"fetch-google-news-every-30-mins\": {\n \"task\": \"sview.supplier.tasks.fetch_news\",\n \"schedule\": crontab(minute=\"*/30\"),\n },\n \"runs-every-10-minutes\": {\n \"task\": \"sview.supplier.tasks.fetch_feeds\",\n \"schedule\": crontab(minute=\"*/10\"),\n \"kwargs\": {'google': False}\n },\n \"commits-to-solr-every-10-minutes\": {\n \"task\": \"sview.supplier.tasks.commit_solr\",\n \"schedule\": crontab(minute=\"*/5\"),\n },\n \"update_index_every_hour\": {\n \"task\": \"sview.supplier.tasks.update_index\",\n \"schedule\": crontab(hour=\"*\", minute=1),\n \"kwargs\": {'google': False}\n },\n \"runs-every-night\": {\n \"task\": \"sview.supplier.tasks.nightly_feeds\",\n \"schedule\": crontab(hour=4, minute=0),\n \"kwargs\": {'google': False}\n },\n \"runs-every-night-cleanupregistration\": {\n \"task\": \"sview.profile.tasks.cleanupregistration\",\n \"schedule\": crontab(minute=35, hour=3),\n },\n}\n\n##OVERRIDE IN LOCAL SETTINGS \n##REDIS CACHE SETTINGS##\n# CACHES = {\n# 'default': {\n# 'BACKEND': 'redis_cache.RedisCache',\n# 'LOCATION': '127.0.0.1:6379',\n# 'OPTIONS': { # optional\n# 'DB': 6,\n# },\n# }\n# }\n\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n\nLINKEDIN_API_KEY = \"ieztrvk7d24a\" \nLINKEDIN_API_SECRET = \"ikR46qrstSFEkI0g\" \nDEFAULT_LINKEDIN_OAUTH_TOKEN = \"59c29412-cbe3-41ce-a98d-78e90b63a36a\"\nDEFAULT_LINKEDIN_OAUTH_SECRET = \"a64432a0-3f18-453b-ae60-def4116300b5\"\n\n#MOVED TO LOCAL_SETTINGS\n#COMPANY ONLY REGISTATION\n# BAD_DOMAINS = ['aim.com', 'aol.com', 'email.com', 'gmail.com',\n# 'googlemail.com', 'hotmail.com', 'hushmail.com',\n# 'msn.com', 'mail.ru', 'mailinator.com', 'live.com',\n# 'yahoo.com']\n\n##DEFINED IN LOCAL SETTINGS\n# HAYSTACK_CONNECTIONS = {\n# 'default': {\n# # For Solr:\n# 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n# 'URL': 'http://localhost:8080/solr/core1/', #this is for a solr multicore setup - change accordingly\n# 'TIMEOUT': 60 * 5,\n# },\n# }\n\n#YAHOO PIPE FOR COMPANY INFO\nYAHOO_PIPE_URL = \"http://pipes.yahoo.com/pipes/pipe.run?_id=30338e76805ca1c98525b00f7520415d&company=%s&_render=json&_callback=Cn\"\n\n\n##Google apps webmaster email\nGOOGLE_APPS_WEBMASTER_EMAIL = \"webmaster@supplierview.com\"\nGOOGLE_APPS_WEBMASTER_PASSWORD = \"M*yPnU2N\"\n\n\n","sub_path":"sview/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":12017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"18845163","text":"import numpy as np\nimport warnings\n\nfrom cvxopt import solvers, matrix\n\n\ndef manifold_simcap_analysis(XtotT, n_rep, seed=0):\n '''\n Computes the simulation capacity of the given data\n\n Args:\n XtotT: Sequence of 2D arrays of shape (N, P_i) where N is the dimensionality\n of the space, and P_i is the number of sampled points for the i_th manifold.\n n_rep: Number of random draws to try at each feature dimension\n seed: Random seed\n\n Returns:\n asim: Simulation capacity\n P: Number of objects in XtotT\n Nc0: Number of features to separate with 0.5 chance\n N_vec: Values of N used in bisection search\n p_vec: Fraction of separable trials at each value of N\n '''\n # Get the number of objects and the total number of features\n P, N = len(XtotT), XtotT[0].shape[0]\n # Concatenate all the data and compute the global mean\n Xori = np.concatenate(XtotT, axis=1)\n global_mean = np.mean(Xori, axis=1, keepdims=True)\n # Subtract the global mean\n Xtot0 = [x - global_mean for x in XtotT]\n # Find the number of features for separability with 0.5 chance\n Nc, N_vec, p_vec = bisection_Nc_general(Xtot0, n_rep, 2, N, 0.05, seed)\n # Check if there was a solution, if so interpolate betweeen the boundaries for the capacity\n if Nc is np.nan:\n asim0 = np.nan\n Nc0 = np.nan\n else:\n # Find the boundary points\n below = np.max([i for i, p in enumerate(p_vec) if p < 0.5])\n above = np.min([i for i, p in enumerate(p_vec) if p >= 0.5])\n bounds = [below, above]\n # Interpolate between the bounds for the number of features where p=0.5\n N_vals = [N_vec[i] for i in bounds]\n p_vals = [p_vec[i] for i in bounds]\n Nc0 = np.interp(0.5, p_vals, N_vals)\n asim0 = P/Nc0\n return asim0, P, Nc0, N_vec, p_vec\n\n\ndef bisection_Nc_general(Xtot, n_rep, Nmin, Nmax, p_tol, seed, verbose=False):\n '''\n Performs a bisection search for the number of features such that the probability that the data\n is linearly separable is 0.5. Implements the flag_n = 2 case from the original matlab code.\n\n Args:\n Xtot: Sequence of 2D arrays of shape (N, P_i) where N is the dimensionality\n of the space, and P_i is the number of sampled points for the i_th manifold.\n n_rep: Number of random draws to try at each feature number N\n Nmin: Minimum number of features to try\n Nmax: Maximum number of features to try\n p_tol:\n seed: Random seed\n\n Returns:\n Ncur: Number of features at the end of the bisection search\n Nall_vec: Every value of N tried during the search\n pall_vec: Computed separation probability at each value of N\n '''\n # Get the number of input objects\n P = len(Xtot)\n # Configure the separability check\n def create_f_pdiff(Xtot, n_rep, seed):\n def f_pdiff(N):\n return compute_sep_Nc_general(Xtot, N, n_rep=n_rep, seed=seed) - 0.5\n return f_pdiff\n f_pdiff = create_f_pdiff(Xtot, n_rep, seed)\n # Initialize the bisection search\n fmin = f_pdiff(Nmin)\n fmax = f_pdiff(Nmax)\n pmin_vec = [fmin + 0.5]\n pmax_vec = [fmax + 0.5]\n Nmin_vec = [Nmin]\n Nmax_vec = [Nmax]\n Ncur = int((Nmin + Nmax)/2 + 0.5)\n\n # Check that there is something to search over\n if pmax_vec[0] == 0:\n warnings.warn(\"Maximum N gives zero separability. Need more neurons.\")\n Ncur = np.nan\n Nall_vec = np.nan\n pall_vec = np.nan\n\n # Check that the target value is between the max and the min\n if fmin * fmax > 0:\n warnings.warn(\"Wrong choice of Nmin and Nmax\")\n Ncur = np.nan\n Nall_vec = np.nan\n pall_vec = np.nan\n\n # If there is something to seach over, do the bisection search\n if Ncur is not np.nan:\n # Check separability at this N\n fcur = f_pdiff(Ncur)\n # Set up ending conditions for the search\n err = np.abs(fcur)\n kk = 0\n dN = 1000\n # Search for the target value of Ncur\n Ncur_vec = []\n pcur_vec = []\n while err > p_tol and dN > 1 and kk < 100:\n kk += 1\n if verbose:\n print(\"{}th bisection run, P={}, Ncur={}, Nmin={}, pmin={}, Nmax={}, pmax={}\".format(kk, P, Ncur, Nmin, fmin + 0.5, Nmax, fmax + 0.5))\n # Check that the target value is between the max and the current N value\n # Adjust the bounds of the search appropriately\n if fmin * fcur < 0:\n Nmax = Ncur\n fmax = fcur\n else:\n Nmin = Ncur\n fmin = fcur\n # Store results of this step\n pmin_vec.append(fmin + 0.5)\n pmax_vec.append(fmax + 0.5)\n Nmin_vec.append(Nmin)\n Nmax_vec.append(Nmax)\n # Get the next N to check\n Ncur = int((Nmin + Nmax)/2 + 0.5)\n fcur = f_pdiff(Ncur)\n err = np.abs(fcur)\n if verbose:\n print(\"err={}, p_tol={}\".format(err, p_tol))\n dN = Nmax - Nmin\n Ncur_vec.append(Ncur)\n pcur_vec.append(fcur + 0.5)\n\n # Get the final quantities\n combined_quantities = [(n, pcur_vec[i]) for i, n in enumerate(Ncur_vec)]\n combined_quantities += [(n, pmin_vec[i]) for i, n in enumerate(Nmin_vec)]\n combined_quantities += [(n, pmax_vec[i]) for i, n in enumerate(Nmax_vec)]\n sorted_quantities = sorted(combined_quantities, key=lambda x: x[0])\n Nall_vec = [q[0] for q in sorted_quantities]\n pall_vec = [q[1] for q in sorted_quantities]\n return Ncur, Nall_vec, pall_vec\n\n\ndef compute_sep_Nc_general(Xtot, N_cur, n_rep, seed, reduced=False):\n '''\n Computes the separability of the input data using N_cur features. Only implements the\n flag_n = 2 case from the original matlab code.\n\n Args:\n Xtot: Sequence of 2D arrays of shape (N, P_i) where N is the dimensionality\n of the space, and P_i is the number of sampled points for the i_th manifold.\n N_cur: Number of features to use when checking linear separability\n n_rep: Number of random label assignments to try\n seed: Random seed\n reduced: Optionally use a smaller number of repetitions for large numbers of features.\n\n Returns\n p_conv: Fraction of the n_rep runs that were separable\n '''\n # Set the random seed\n np.random.seed(seed=seed)\n # Get the number of manifolds and dimensionality of data\n P, N = len(Xtot), Xtot[0].shape[0]\n # Use a smaller number of runs if the current number of features is high\n if N_cur > 1500 and reduced:\n n_rep = 5\n # Pick P/2 random objects to assign a positive label to for each repetition\n indpAll = [np.random.choice(range(P), size=P//2, replace=False) for i in range(n_rep)]\n # For each repetition, compute the separability of the randomly labeled data\n sep_vec = []\n for i in range(n_rep):\n # Create the label array\n indp = indpAll[i]\n labels00 = - np.ones((P))\n labels00[indp] = 1\n # Create a (normalized) random projection from N dimensions to N_cur dimensions\n try:\n W = np.random.randn(N, N_cur)\n W = W / np.sqrt(np.sum(np.square(W), axis=0, keepdims=True))\n # Project the data for each manifold into the lower dimensional space\n Xsub = [np.matmul(W.T, X) for X in Xtot]\n # Check separability in this subspace\n sep0, w0, bias0, margin0 = check_data_separability_general(Xsub, labels00)\n sep_vec.append(sep0)\n except ValueError as e:\n warnings.warn('Could not find solution')\n sep_vec.append(False)\n p_conv = np.mean(sep_vec)\n return p_conv\n\n\ndef check_data_separability_general(X, labels):\n '''\n Checks if a dichotomy of X given by labels is linearly separable.\n\n Args:\n X: Sequence of 2D arrays of shape (N, P_i) where N is the dimensionality\n of the space, and P_i is the number of sampled points for the i_th manifold.\n labels: Labels (+1 or -1). Should be a 1D array of shape (P) where P is number of manifolds.\n\n Returns:\n sep: Whether or not the dichotomy is linearly separable\n w: Weights of the optimal hyperplane\n bias: Bias for the separating plane\n margin: Size of margin\n '''\n # Get the indicies of the positive and negative labels\n pos = [i for i, l in enumerate(labels) if l == 1]\n neg = [i for i, l in enumerate(labels) if l == -1]\n # Get the number of classes and feature dimensin\n P, N = len(X), X[0].shape[0]\n # Combine the data and labels\n X_tot = np.concatenate(X, axis=1)\n y_tot = np.concatenate([labels[i] * np.ones(x.shape[1]) for i, x in enumerate(X)])\n y_tot = y_tot.reshape(1, -1)\n assert X_tot.shape[1] == y_tot.shape[1]\n\n # Initialize weights and biases to zero\n w_ini = np.zeros((N, 1))\n bias_ini = 0\n # Set margin to zero\n kappa = 0\n # Set tolerance for solver\n tolerance = 1e-8\n # Find the optimal hyperplane\n sep, w, margin, flag, u, bias = find_svm_sep_primal_wb(X_tot, y_tot, w_ini, kappa=kappa, tolerance=tolerance, flag_wb=1)\n return sep, w, bias, margin\n\n\ndef find_svm_sep_primal_wb(X, y, w_ini, kappa=0, tolerance=1e-8, flag_wb=1):\n '''\n Finds the optimal separating hyperplane for data X given the dichotomy specified by y.\n The plane is defined by the vector w and is found by minimizing\n 1/2 * w.T * w\n Subject to the constraint\n y * (x.T * w + b) >= 1\n For all data points, and an optional bias b.\n\n Args:\n X: Data matrix of shape (N, M) where N is the number of features, and M is the number of data points.\n y: Matrix of shape (1, M) containing the label for each of the M data points. Labels must be +1 or -1\n flag_wb: Option to include a bias. Uses a bias if set to 1.\n\n Returns:\n sep: Whether or not the dichotomy is linearly separable\n w: Weights of the optimal hyperplane\n margin: Size of margin\n flag: Not used.\n u: Unormalized weights of the optimal hyperplane\n bias: Bias for the separating plane\n '''\n # Configure the solver\n solvers.options['show_progress'] = False\n solvers.options['maxiters'] = 1000000\n solvers.options['feastol'] = tolerance\n solvers.options['abstol'] = tolerance\n solvers.options['reltol'] = tolerance\n\n # Get the shape of X\n M, N = X.shape[1], X.shape[0]\n # Verify there are the right number of labels and that they are +/- 1\n assert M == y.shape[1]\n assert all([np.abs(l[0]) == 1 for l in y])\n\n # Optionally add a constant component to X, otherwise plane is constrained to pass through the origin\n if flag_wb == 1:\n offset = np.ones((1, M))\n else:\n offset = np.zeros((1, M))\n Xb = np.concatenate([X, offset], axis=0)\n\n # Construct the input to the solver\n # Want to minimize 1/2 * w.T * P * w subject to the constrant that -y * X.T * w <= -1\n # P ignores the component of w that corresponds to offset, the constraint does not.\n\n # P should be identity with the final component set to zero\n P = np.identity(N + 1)\n P[-1, -1] = 0\n P = matrix(P)\n\n # q should be zero, (no term like q.T * w)\n q = np.zeros(N + 1)\n q = matrix(q)\n\n # Specify the constraint. Ab is -y * X.T, bb is a vector of -1s\n Ab = - y * Xb # (N, M)\n Ab = matrix(Ab.T) # (M, N)\n bb = - np.ones(M)\n bb = matrix(bb)\n\n # Solve using cvxopt\n output = solvers.qp(P, q, Ab, bb)\n ub = np.array(output['x'])\n # Separate the bias\n u = ub[0:-1, 0]\n b = ub[-1, 0]\n # Normalize the outputs\n u_norm = np.linalg.norm(u)\n b /= u_norm\n w = u/u_norm\n # Compute the margin\n Pr = (np.matmul(w.T, X) + b)/np.linalg.norm(w.T)\n margin = np.min(y * Pr )\n # Check seperability\n seperable = np.all(np.sign(Pr) == y)\n return seperable, w, margin, 1, u, b\n\n\ndef run_mftma_simcap(layer_data,par=10):\n mfmta_data_ = {'mftma_results': []}\n # run mftma on all layers and hierarchies\n mftmas_cell = []\n for hier_id, activ_hier in enumerate(layer_data):\n data_ = {'a_sim': [],'P': [],'Nc0': [],'N_vec': [],\n 'p_vec': [],\n 'layer': [],'n_hier_class': [],'hierarchy': hier_id}\n a_sim = []\n P = []\n Nc0 = []\n N_vec = []\n p_vec=[]\n for k, X, in activ_hier.items():\n data_['layer'] = k\n data_['n_hier_class'] = len(X)\n # Analyze each layer's activations\n try:\n a_sim0, P0, Nc00, N_vec0, p_vec0 = manifold_simcap_analysis(X, par)\n # Compute the mean values\n print(\"{} a_sim: {:4f}, P {:4f}, Nc0 {:4f}, N_vec {:4f}\".format(a_sim0, P0, Nc00, N_vec0))\n except :\n\n a_sim0 = np.nan\n P0 = np.nan\n Nc00 = np.nan\n p_vec0 = np.nan\n # Store for later\n a_sim.append(a_sim0)\n P.append(P0)\n Nc0.append(Nc00)\n N_vec.append(N_vec0)\n p_vec.append(p_vec0)\n # combine the results\n data_['a_sim'] = a_sim\n data_['P'] = P\n data_['Nc0'] = Nc0\n data_['N_vec'] = N_vec\n data_['p_vec'] = p_vec\n mftmas_cell.append(data_)\n return mftmas_cell","sub_path":"utils/capacity_utils.py","file_name":"capacity_utils.py","file_ext":"py","file_size_in_byte":13472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"139358930","text":"#!/home/dmer/.pyenv/versions/env3/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\n'''\r\n---------------------------------------------------------------------------\r\nFile Name: image_match_brute.py\r\nDescription: \r\n\t\t\t 目标: 蛮力匹配\r\nVariables: None\r\nAuthor: \r\nChange Activity: First coding on 2018/6/8\r\n---------------------------------------------------------------------------\r\n'''\r\nimport os\r\nimport math\r\nimport sys\r\nimport timeit\r\nimport numpy as np\r\nimport pandas as pd\r\nimport cv2\r\n\r\nsys.path.append(\"/home/dmer/models/pub\")\r\nimport mysql_conn as ms\r\n\r\nsrc_folder = \"/data/image_query/news_video_company/website/comdir\"\r\nlib_folder = \"/data/image_file/creative_raw/creative/download/20180601/image/jrtt\"\r\nrst_folder = \"/data/image_file/siftmatch_rst\"\r\npm_t = 0.6\r\nnm_t = 3\r\n\r\nif __name__ == '__main__':\r\n\r\n sift = cv2.xfeatures2d.SIFT_create()\r\n for fnsrc in os.listdir(src_folder):\r\n if fnsrc.split('.')[-1].upper() in (\"JPG\", \"JPEG\", \"PNG\", \"BMP\", \"GIF\"):\r\n fns = src_folder + '/' + fnsrc\r\n print('searching for: ', fns)\r\n img1 = cv2.imdecode(np.fromfile(fns, dtype=np.uint8), -1)\r\n kp1, des1 = sift.detectAndCompute(img1, None)\r\n\r\n for fnlib in os.listdir(lib_folder):\r\n if fnlib.split('.')[-1].upper() in (\"JPG\", \"JPEG\", \"PNG\", \"BMP\", \"GIF\"):\r\n fng = lib_folder + '/e62041927c5033bf0c3ffe84de07e4d5.jpeg'\r\n img2 = cv2.imdecode(np.fromfile(fng, dtype=np.uint8), -1)\r\n kp2, des2 = sift.detectAndCompute(img2, None)\r\n minkpnum = min(len(kp1), len(kp2))\r\n if minkpnum > 20: \r\n\t # 蛮力匹配算法,有两个参数,距离度量(L2(default),L1),是否交叉匹配(默认false)\r\n\t bf = cv2.BFMatcher()\r\n\t matches = bf.knnMatch(des1, des2, k=2)\r\n\t # print(type(matches))\r\n\t # print(matches)\r\n\t # print(len(matches))\r\n\t # exit()\r\n\t if len(matches) * nm_t < minkpnum:\r\n\t \tcontinue\r\n\r\n\t # cv2.drawMatchesKnn expects list of lists as matches.\r\n\t # opencv3.0有drawMatchesKnn函数\r\n\t # Apply ratio test\r\n\t # 比值测试,首先获取与A 距离最近的点B(最近)和C(次近),只有当B/C\r\n\t # 小于阈值时(0.75)才被认为是匹配,因为假设匹配是一一对应的,真正的匹配的理想距离为0\r\n\t good = []\r\n\t for m, n in matches:\r\n\t if m.distance < pm_t * n.distance:\r\n\t print('ddist: ', m.distance/n.distance)\r\n\t good.append([m])\r\n\r\n\t if len(good) * nm_t > minkpnum: # 匹配识别阈值\r\n\t print('lengood: %d, minkpnum:%f ' % (len(good), min(len(kp1), len(kp2))))\r\n\t print('matched: ', fns, '---', fng)\r\n\t img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good[:int(len(good))], None, flags=2)\r\n\t cv2.imencode('.jpeg', img3)[1].tofile(rst_folder + '/' + fnsrc + '---' + fnlib)\r\n","sub_path":"image_retrieval_sift_nmslib/image_match_brute_1by1.py","file_name":"image_match_brute_1by1.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"388860728","text":"from requests import post as http_post\n\nfrom opwen_email_server.constants.cloudflare import DNS_URL\nfrom opwen_email_server.constants.sendgrid import MX_RECORD\nfrom opwen_email_server.utils.log import LogMixin\n\n\nclass SetupCloudflareMxRecords(LogMixin):\n def __init__(self, user: str, key: str, zone: str) -> None:\n self._user = user\n self._key = key\n self._zone = zone\n\n def __call__(self, domain: str) -> None:\n if not self._key:\n self.log_warning('No key, skipping MX setup for %s', domain)\n return\n\n client_name = domain.split('.')[0]\n\n http_post(\n url=DNS_URL.format(self._zone),\n json={\n 'type': 'MX',\n 'content': MX_RECORD,\n 'proxied': False,\n 'priority': 1,\n 'name': client_name,\n },\n headers={\n 'X-Auth-Key': self._key,\n 'X-Auth-Email': self._user,\n }\n ).raise_for_status()\n\n self.log_debug('Set up mx records for %s', domain)\n","sub_path":"opwen_email_server/services/cloudflare.py","file_name":"cloudflare.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"103519024","text":"from crum import get_current_user\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\n\nfrom .mod import audi\n# Create your models here.\n\nclass Autor(models.Model):\n nombre = models.CharField(max_length=100)\n telefono = models.PositiveBigIntegerField()\n def __str__(self):\n return self.nombre\n\n\nclass Canciones(models.Model):\n titulo = models.CharField(max_length=100)\n fechalanzamiento = models.DateTimeField(auto_now_add=True, null=True, blank=True)\n url= models.URLField(unique=True , null=True, blank=True)\n imagen = models.ImageField(upload_to='Caratulas', null=True, blank=True)\n\n\n def __str__(self):\n return self.titulo\n\n\nclass AutorCancion(models.Model):\n genero = [\n ('ROCK', 'ROCK'),\n ('POP', 'POP'),\n ('ALTERNATIVA', 'ALTERNATIVA'),\n ]\n autor = models.ForeignKey(Autor, on_delete=models.CASCADE, default='Undefinided')\n cancion = models.ForeignKey(Canciones, on_delete=models.CASCADE)\n genero = models.CharField(max_length=12, choices=genero, null=True, blank=True)\n def __str__(self):\n return '{} de {}'.format(self.cancion.titulo, self.autor.nombre)\n\n\n\n\nclass Lista(audi):\n cancionL= models.ForeignKey(AutorCancion, on_delete=models.CASCADE)\n\n def save(self, force_insert=False, force_update=False, using=None,\n update_fields=None):\n\n user = get_current_user()\n if user is not None:\n if not self.pk:\n self.usuariocreatedor = user\n else:\n self.usuariocreatedor = user\n\n super(Lista, self).save()\n\n","sub_path":"musiquita/Musica/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"386081224","text":"##########################\n# PREAMBLE #\n##########################\n\nimport os\nimport sys\nscript_dir = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, os.path.join(script_dir, '..'))\n\n# Limit the number of threads\nfrom util import limit_threads, set_seed, create_plots, store_history,\\\n TimeHistory, threshold_plots, save_img\nlimit_threads()\n\n# Try to generate the results as reproducible as possible\nset_seed(42)\n\n\n##########################\n# IMPORTS #\n##########################\n\nimport random\nimport numpy as np\nimport keras\nimport math\nimport time\nimport tensorflow as tf\nfrom data_manipulation import load_data, crop_data, merge_data_without_overlap,\\\n check_crops, crop_data_with_overlap,\\\n merge_data_with_overlap, check_binary_masks\nfrom data_3D_generators import VoxelDataGenerator\nfrom unet_3d import U_Net_3D\nfrom metrics import jaccard_index, jaccard_index_numpy, voc_calculation,\\\n DET_calculation\nfrom itertools import chain\nfrom skimage.io import imread, imshow, imread_collection, concatenate_images\nfrom skimage.morphology import label\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.models import load_model\nfrom PIL import Image\nfrom tqdm import tqdm\nfrom smooth_tiled_predictions import predict_img_with_smooth_windowing, \\\n predict_img_with_overlap\nfrom skimage.segmentation import clear_border\n\n\n##########################\n# ARGS COMPROBATION #\n##########################\n\n# Take arguments\ngpu_selected = str(sys.argv[1]) \njob_id = str(sys.argv[2]) \ntest_id = str(sys.argv[3]) \njob_file = job_id + '_' + test_id \nbase_work_dir = str(sys.argv[4])\nlog_dir = os.path.join(base_work_dir, 'logs', job_id)\n\n# Checks\nprint(\"job_id : {}\".format(job_id))\nprint(\"GPU selected : {}\".format(gpu_selected))\nprint(\"Python : {}\".format(sys.version.split('\\n')[0]))\nprint(\"Numpy : {}\".format(np.__version__))\nprint(\"Keras : {}\".format(keras.__version__))\nprint(\"Tensorflow : {}\".format(tf.__version__))\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\";\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = gpu_selected;\n\n# Control variables \ncrops_made = False\n\n# Working dir\nos.chdir(base_work_dir)\n\n\n########################## \n# EXPERIMENT VARIABLES #\n##########################\n\n### Dataset variables\n# Main dataset data/mask paths\ndata_base_path = 'data'\ntrain_path = os.path.join(data_base_path, 'train', 'x')\ntrain_mask_path = os.path.join(data_base_path, 'train', 'y')\ntest_path = os.path.join(data_base_path, 'test', 'x')\ntest_mask_path = os.path.join(data_base_path, 'test', 'y')\n# Percentage of the training data used as validation \nperc_used_as_val = 0.1\n\n\n### Dataset shape\n# Note: train and test dimensions must be the same when training the network and\n# making the predictions. Be sure to take care of this if you are not going to\n# use \"crop_data()\" with the arg force_shape, as this function resolves the \n# problem creating always crops of the same dimension\nimg_train_shape = (1024, 768, 1)\nimg_test_shape = (1024, 768, 1)\n\n\n### 3D volume variables\n# Shape of the 3D subvolumes \nimg_3d_desired_shape = (80, 256, 256, 1)\n# Flag to use all the images to create the 3D subvolumes. If it is False random\n# subvolumes from the whole data will be generated instead.\nuse_all_volume = True\n\n\n### Normalization\n# Flag to normalize the data dividing by the mean pixel value\nnormalize_data = False \n# Force the normalization value to the given number instead of the mean pixel \n# value\nnorm_value_forced = -1 \n\n\n### Data augmentation (DA) variables\n# Flag to activate DA\nda = False\n# Flag to shuffle the training data on every epoch \nshuffle_train_data_each_epoch = False\n# Flag to shuffle the validation data on every epoch\nshuffle_val_data_each_epoch = False\n# Shift range to appply to the subvolumes \nshift_range = 0.0\n# Range of rotation to the subvolumes\nrotation_range = 0\n# Flag to make flips on the subvolumes \nflips = False\n# Flag to extract random subvolumnes during the DA. Not compatible with \n# 'use_all_volume' as it forces the data preparation into subvolumes\nrandom_subvolumes_in_DA = False\n\n\n### Load previously generated model weigths\n# Flag to activate the load of a previous training weigths instead of train \n# the network again\nload_previous_weights = True\n# ID of the previous experiment to load the weigths from \nprevious_job_weights = job_id\n# Flag to activate the fine tunning\nfine_tunning = False\n# ID of the previous weigths to load the weigths from to make the fine tunning \nfine_tunning_weigths = \"232\"\n# Prefix of the files where the weights are stored/loaded from\nweight_files_prefix = 'model.fibsem_'\n# Name of the folder where weights files will be stored/loaded from. This folder \n# must be located inside the directory pointed by \"base_work_dir\" variable. If\n# there is no such directory, it will be created for the first time\nh5_dir = 'h5_files'\n\n\n### Experiment main parameters\n# Batch size value\nbatch_size_value = 1\n# Optimizer to use. Posible values: \"sgd\" or \"adam\"\noptimizer = \"sgd\"\n# Learning rate used by the optimization method\nlearning_rate_value = 0.001\n# Number of epochs to train the network\nepochs_value = 360\n# Number of epochs to stop the training process after no improvement\npatience = 50 \n# Flag to activate the creation of a chart showing the loss and metrics fixing \n# different binarization threshold values, from 0.1 to 1. Useful to check a \n# correct threshold value (normally 0.5)\nmake_threshold_plots = False\n# Define time callback \ntime_callback = TimeHistory()\n\n\n### Network architecture specific parameters\n# Number of channels in the first initial layer of the network\nnum_init_channels = 16\n# Flag to activate the Spatial Dropout instead of use the \"normal\" dropout layer\nspatial_dropout = False\n# Fixed value to make the dropout. Ignored if the value is zero\nfixed_dropout_value = 0.0 \n\n\n### Post-processing\n# Flag to activate the post-processing (Smoooth and Z-filtering)\npost_process = True\n\n\n### Paths of the results \n# Directory where predicted images of the segmentation will be stored\nresult_dir = os.path.join('results', 'results_' + job_id, job_file)\n# Directory where binarized predicted images will be stored\nresult_bin_dir = os.path.join(result_dir, 'binarized')\n# Directory where predicted images will be stored\nresult_no_bin_dir = os.path.join(result_dir, 'no_binarized')\n# Directory where binarized predicted images with 50% of overlap will be stored\nresult_bin_dir_50ov = os.path.join(result_dir, 'binarized_50ov')\n# Folder where the smoothed images will be stored\nsmooth_dir = os.path.join(result_dir, 'smooth')\n# Folder where the images with the z-filter applied will be stored\nzfil_dir = os.path.join(result_dir, 'zfil')\n# Folder where the images with smoothing and z-filter applied will be stored\nsmoo_zfil_dir = os.path.join(result_dir, 'smoo_zfil')\n# Name of the folder where the charts of the loss and metrics values while \n# training the network will be shown. This folder will be created under the\n# folder pointed by \"base_work_dir\" variable \nchar_dir = 'charts'\n\n\n#####################\n# SANITY CHECKS #\n#####################\n\nprint(\"#####################\\n# SANITY CHECKS #\\n#####################\")\n\ncheck_binary_masks(train_mask_path)\ncheck_binary_masks(test_mask_path)\n\n\n########################## \n# LOAD DATA # \n##########################\n\nprint(\"##################\\n# LOAD DATA #\\n##################\\n\")\n\nX_train, Y_train, \\\nX_val, Y_val, \\\nX_test, Y_test, \\\nnorm_value, _ = load_data(\n train_path, train_mask_path, test_path, test_mask_path, img_train_shape, \n img_test_shape, val_split=perc_used_as_val, shuffle_val=False,\n make_crops=False, prepare_subvolumes=use_all_volume, \n subvol_shape=img_3d_desired_shape)\n\n# Normalize the data\nif normalize_data == True:\n if norm_value_forced != -1: \n print(\"Forced normalization value to {}\".format(norm_value_forced))\n norm_value = norm_value_forced\n else:\n print(\"Normalization value calculated: {}\".format(norm_value))\n X_train -= int(norm_value)\n X_val -= int(norm_value)\n X_test -= int(norm_value)\n \n\n##########################\n# DATA AUGMENTATION #\n##########################\n\nprint(\"##################\\n# DATA AUG #\\n##################\\n\")\n\ntrain_generator = VoxelDataGenerator(\n X_train, Y_train, random_subvolumes_in_DA=random_subvolumes_in_DA, \n shuffle_each_epoch=shuffle_train_data_each_epoch, batch_size=batch_size_value, \n da=da, flip=flips, shift_range=shift_range, rotation_range=rotation_range)\n\nval_generator = VoxelDataGenerator(\n X_val, Y_val, random_subvolumes_in_DA=False, \n shuffle_each_epoch=shuffle_val_data_each_epoch, batch_size=batch_size_value, \n da=False) \n \n\n##########################\n# BUILD THE NETWORK #\n##########################\n\nprint(\"###################\\n# TRAIN PROCESS #\\n###################\\n\")\n\nprint(\"Creating the network . . .\")\nmodel = U_Net_3D(img_3d_desired_shape, numInitChannels=num_init_channels, \n spatial_dropout=spatial_dropout,\n fixed_dropout=fixed_dropout_value,\n optimizer=optimizer, lr=learning_rate_value)\n\nmodel.summary()\n\nif load_previous_weights == False:\n earlystopper = EarlyStopping(patience=patience, verbose=1, \n restore_best_weights=True)\n \n if not os.path.exists(h5_dir): \n os.makedirs(h5_dir)\n checkpointer = ModelCheckpoint(\n os.path.join(h5_dir, weight_files_prefix + job_file + '.h5'),\n verbose=1, save_best_only=True)\n \n if fine_tunning == True: \n h5_file=os.path.join(h5_dir, weight_files_prefix + fine_tunning_weigths \n + '_' + test_id + '.h5') \n print(\"Fine-tunning: loading model weights from h5_file: {}\".format(h5_file)) \n model.load_weights(h5_file) \n \n results = model.fit_generator(\n train_generator, validation_data=val_generator,\n validation_steps=math.ceil(len(X_val)/batch_size_value),\n steps_per_epoch=math.ceil(len(X_train)/batch_size_value),\n epochs=epochs_value, \n callbacks=[earlystopper, checkpointer, time_callback])\nelse:\n h5_file=os.path.join(h5_dir, weight_files_prefix + previous_job_weights \n + '_' + test_id + '.h5')\n print(\"Loading model weights from h5_file: {}\".format(h5_file))\n model.load_weights(h5_file)\n\n\n#####################\n# INFERENCE #\n#####################\n\nprint(\"##################\\n# INFERENCE #\\n##################\\n\")\n\n# Evaluate to obtain the loss value and the Jaccard index\nprint(\"Evaluating test data . . .\")\nscore = model.evaluate(X_test, Y_test, batch_size=batch_size_value, verbose=1)\njac_per_subvolume = score[1]\n\n# Predict on test\nprint(\"Making the predictions on test data . . .\")\npreds_test = model.predict(X_test, batch_size=batch_size_value, verbose=1)\n\n# Threshold images\nbin_preds_test = (preds_test > 0.5).astype(np.uint8)\n\nprint(\"Saving predicted images . . .\")\n#reconstruct the images \n#save_img(Y=bin_preds_test, mask_dir=result_bin_dir, prefix=\"test_out_bin\")\n#save_img(Y=preds_test, mask_dir=result_no_bin_dir, prefix=\"test_out_no_bin\")\n\nprint(\"Calculate metrics . . .\")\n# Per image without overlap\nscore[1] = jaccard_index_numpy(Y_test, bin_preds_test)\nvoc = voc_calculation(Y_test, bin_preds_test, score[1])\n#det = DET_calculation(Y_test, bin_preds_test, det_eval_ge_path,\n# det_eval_path, det_bin, n_dig, job_id)\ndet = -1\n\n# 50% overlap\njac_per_img_50ov = -1\nvoc_per_img_50ov = -1\ndet_per_img_50ov = -1\n\n \n####################\n# POST-PROCESING #\n####################\n\nprint(\"##################\\n# POST-PROCESING #\\n##################\\n\") \n\nprint(\"1) SMOOTH\")\n# not implemented\nprint(\"2) Z-FILTERING\")\n# not implemented\nprint(\"Finish post-processing\") \n\n\n####################################\n# PRINT AND SAVE SCORES OBTAINED #\n####################################\n\nif load_previous_weights == False:\n print(\"Epoch average time: {}\".format(np.mean(time_callback.times)))\n print(\"Epoch number: {}\".format(len(results.history['val_loss'])))\n print(\"Train time (s): {}\".format(np.sum(time_callback.times)))\n print(\"Train loss: {}\".format(np.min(results.history['loss'])))\n print(\"Train jaccard_index: {}\"\\\n .format(np.max(results.history['jaccard_index'])))\n print(\"Validation loss: {}\".format(np.min(results.history['val_loss'])))\n print(\"Validation jaccard_index: {}\"\\\n .format(np.max(results.history['val_jaccard_index'])))\n\nprint(\"Test loss: \".format(score[0]))\nprint(\"Test jaccard_index (per subvolume): {}\".format(jac_per_subvolume))\nprint(\"Test jaccard_index (per image without overlap): {}\".format(score[1]))\nprint(\"Test jaccard_index (per image with 50% overlap): {}\".format(jac_per_img_50ov))\nprint(\"VOC (per image without overlap): {}\".format(voc))\nprint(\"VOC (per image with 50% overlap): {}\".format(voc_per_img_50ov))\nprint(\"DET (per image without overlap): {}\".format(det))\nprint(\"DET (per image with 50% overlap): {}\".format(det_per_img_50ov))\n \nif load_previous_weights == False:\n smooth_score = -1 if 'smooth_score' not in globals() else smooth_score\n smooth_voc = -1 if 'smooth_voc' not in globals() else smooth_voc\n smooth_det = -1 if 'smooth_det' not in globals() else smooth_det\n zfil_score = -1 if 'zfil_score' not in globals() else zfil_score\n zfil_voc = -1 if 'zfil_voc' not in globals() else zfil_voc\n zfil_det = -1 if 'zfil_det' not in globals() else zfil_det\n smo_zfil_score = -1 if 'smo_zfil_score' not in globals() else smo_zfil_score\n smo_zfil_voc = -1 if 'smo_zfil_voc' not in globals() else smo_zfil_voc\n smo_zfil_det = -1 if 'smo_zfil_det' not in globals() else smo_zfil_det\n jac_per_subvolume = -1 if 'jac_per_subvolume' not in globals() else jac_per_subvolume\n\n store_history(\n results, jac_per_subvolume, score, jac_per_img_50ov, voc, \n voc_per_img_50ov, det, det_per_img_50ov, time_callback, log_dir,\n job_file, smooth_score, smooth_voc, smooth_det, zfil_score, zfil_voc, \n zfil_det, smo_zfil_score, smo_zfil_voc, smo_zfil_det)\n\n create_plots(results, job_id, test_id, char_dir)\n\nprint(\"FINISHED JOB {} !!\".format(job_file))\n","sub_path":"cheng_2017/tf_2.0_code/templates/3d_template.py","file_name":"3d_template.py","file_ext":"py","file_size_in_byte":15166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"599513079","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nplt.subplots_adjust(left=0.25, bottom=0.25)\n\"\"\"\nx = np.arange(0.0, 1.0, 0.1)\na0 = 5\nb0 = 1\ny = a0 * x + b0\nz = np.zeros(10)\n\"\"\"\na0 = 5\nb0 = 1\n\nf, a, b, c = 3, 1, 1, 1\n\nt = np.linspace(0, np.pi, f)\ng = np.linspace(0, 2*np.pi, f)\n\nth, ph = np.meshgrid(t, g)\nr = 0.2\nX, Y, Z = a*np.sin(th)*np.cos(ph),b*np.sin(th)*np.sin(ph),c*np.cos(th)\n\nl, = plt.plot(X, Y, Z)\n\n# Set size of Axes\nplt.axis([0, 1, -10, 10])\n\n# Place Sliders on Graph\nax_a = plt.axes([0.25, 0.1, 0.65, 0.03])\nax_b = plt.axes([0.25, 0.15, 0.65, 0.03])\n\n# Create Sliders & Determine Range\nsa = Slider(ax_a, 'a', 0, 10.0, valinit=a0)\nsb = Slider(ax_b, 'b', 0, 10.0, valinit=b0)\n\n\ndef update(val):\n a = sa.val\n b = sb.val\n l.set_data(f, a * f + b)\n l.set_3d_properties(Z)\n fig.canvas.draw_idle()\n\nsa.on_changed(update)\nsb.on_changed(update)\n\nplt.show()","sub_path":"CG/lab3/ialjwd.py","file_name":"ialjwd.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"244369723","text":"import numpy as np\nfrom collections import Counter\nimport re\nimport json\n\"\"\"\nATTENTION: Use the following dictionaries to get the correct index for each\n amino acid when accessing any type of matrix or array provided as\n parameters. Further, use those indices when generating or returning\n any matrices or arrays. Failure to do so will most likely result in\n not passing the tests.\nEXAMPLE: To access the substitution frequency from alanine 'A' to proline 'P'\n in the bg_matrix use bg_matrix[AA_TO_INT['A'], AA_TO_INT['P']].\n\"\"\"\nALPHABET = 'ACDEFGHIKLMNPQRSTVWY-'\nAA_TO_INT = {aa: index for index, aa in enumerate(ALPHABET)}\nINT_TO_AA = {index: aa for index, aa in enumerate(ALPHABET)}\nGAP_INDEX = AA_TO_INT['-']\npattern = re.compile(ALPHABET)\n#config = json.loads(open('./pssm_test.json').read())\n\nclass MSA:\n\n def __init__(self, sequences):\n \"\"\"\n Initialize the MSA class with the provided list of sequences. Check the\n sequences for correctness. Pre-calculate any statistics you seem fit.\n\n :param sequences: List containing the MSA sequences.\n \"\"\"\n list_does_contain = True if all(bool(re.match('^[ACDEFGHIKLMNPQRSTVWY-]+$', item)) for item in sequences) else False\n all_same_length = True if len(sequences) > 0 and all(len(l) == len(next(iter(sequences))) for l in sequences) else False\n has_item = True if len(sequences) > 0 else False\n \n if not has_item or not all_same_length or not list_does_contain or sequences==None :\n raise TypeError('Invalid MSA')\n else:\n self.sequences = sequences \n self.num_seqs, self.msa_length = self.get_size()\n self.frequencies = self.freq_count()\n self.ungapped_seq_length = len(self.get_primary_sequence())\n self.ungapped_pri_seq_positions= list(i for i,x in enumerate(self.sequences[0]) if x != '-')\n self.weighted_freq = self.get_weighted_freq()\n\n self.p = 0.05\n self.pssm_matrix = np.zeros((self.ungapped_seq_length, len(ALPHABET)-1), dtype=np.float64)\n self.freq = self.frequencies[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]\n self.gaps = self.frequencies[self.ungapped_pri_seq_positions, len(ALPHABET)-1].reshape(self.ungapped_seq_length,1)\n self.alpha = self.get_number_of_observations()-1\n def freq_count(self):\n frequencies = np.zeros((self.msa_length, len(ALPHABET)), dtype=np.float64)\n for s in self.sequences:\n for i,j in enumerate(s):\n frequencies[i][AA_TO_INT[j]] += 1\n return frequencies\n\n def get_weighted_freq(self):\n weighted_freq = np.zeros((self.msa_length, 21), dtype=np.float64)\n curr_seq = 0\n weights = self.get_sequence_weights()\n for s in self.sequences:\n for i,j in enumerate(s):\n weighted_freq[i][AA_TO_INT[j]] += weights[curr_seq]\n if i+1 == self.msa_length:\n curr_seq += 1\n return weighted_freq \n # def get_pseudo_freq(self, bg_matrix):\n # pseudo_freq = np.zeros((self.msa_length, 21), dtype=np.float64)\n # curr_seq = 0\n # pseudo_counts = (self.freq/self.p).dot(bg_matrix)\n # for s in self.sequences:\n # for i,j in enumerate(s):\n # pseudo_freq[i][AA_TO_INT[j]] += pseudo_counts[curr_seq]\n # if i+1 == self.msa_length:\n # curr_seq += 1\n # return weighted_freq \n def calc_pssm(self, p, freq):\n pssm_matrix = np.zeros((self.ungapped_seq_length, len(ALPHABET)-1), dtype=np.float64)\n normalized_f = freq/np.sum(freq, axis=1, keepdims=True)\n pssm_matrix = 2*np.log2(normalized_f/p)\n pssm_matrix[np.where(pssm_matrix == 0.0) ] = -20\n return np.rint(pssm_matrix).astype(np.int64)\n def get_weighted_pssm(self):\n p = 0.05\n freq = self.weighted_freq[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]\n normalized_f = freq/np.sum(freq, axis=1, keepdims=True)\n pssm_matrix = 2*np.log2(normalized_f/p)\n pssm_matrix[np.where(normalized_f == 0.0) ] = -20\n\n return np.rint(pssm_matrix).astype(np.int64)\n\n def get_pssm_with_background(self, bg_matrix): \n pssm_matrix = np.zeros((self.ungapped_seq_length, len(ALPHABET)-1), dtype=np.float64) \n back_freq = np.sum(bg_matrix, axis=0).reshape(1,20)\n aligned_freq = self.frequencies[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]\n normalized_f = aligned_freq/np.sum(aligned_freq, axis=1, keepdims=True)\n pssm_matrix = 2*np.log2(normalized_f/back_freq)\n pssm_matrix[np.where(normalized_f == 0.0) ] = -20\n return np.rint(pssm_matrix).astype(np.int64)\n\n def get_basic_pssm(self):\n pssm_matrix = np.zeros((self.ungapped_seq_length, len(ALPHABET)-1), dtype=np.float64) \n p = 0.05\n aligned_freq = self.frequencies[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]\n normalized_f = aligned_freq/np.sum(aligned_freq, axis=1, keepdims=True)\n normalized_f[np.where(normalized_f == 0.0) ] = (2**-10)*p \n pssm_matrix = 2*np.log2(normalized_f/p)\n return np.rint(pssm_matrix).astype(np.int64)\n\n#pssm_matrix testtekilerle aynı gibi görünüyor ama test hata veriyor Initialization failed belki de get_pssm de çağırırken hata var.\n def get_pssm_with_distr_gap(self):\n pssm_matrix = np.zeros((self.ungapped_seq_length, len(ALPHABET)-1), dtype=np.float64)\n freq = self.frequencies[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]\n gaps = self.frequencies[self.ungapped_pri_seq_positions, len(ALPHABET)-1].reshape(self.ungapped_seq_length,1)\n p = 0.05\n freq += gaps.dot(p)\n normalized_f = freq/np.sum(freq, axis=1, keepdims=True)\n pssm_matrix = 2*np.log2(normalized_f/p)\n pssm_matrix[np.where(pssm_matrix == 0.0) ] = (2**-10)*p \n pssm_matrix = np.rint(pssm_matrix).astype(np.int64)\n return pssm_matrix\n def get_pssm_with_background_w_gaps(self, bg_matrix):\n pssm_matrix = np.zeros((self.ungapped_seq_length, len(ALPHABET)-1), dtype=np.float64)\n freq = self.frequencies[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]\n gaps = self.frequencies[self.ungapped_pri_seq_positions, len(ALPHABET)-1].reshape(self.ungapped_seq_length,1)\n back_freq = np.sum(bg_matrix, axis=0).reshape(1,20)\n freq += gaps.dot(back_freq)\n normalized_f = freq/np.sum(freq, axis=1, keepdims=True)\n pssm_matrix = 2*np.log2(normalized_f/back_freq)\n pssm_matrix[np.where(normalized_f == 0.0) ] = -20\n pssm_matrix = np.rint(pssm_matrix).astype(np.int64)\n return pssm_matrix\n def get_weighted_pssm_with_background(self, bg_matrix):\n back_freq = np.sum(bg_matrix, axis=0).reshape(1,20)\n freq = self.weighted_freq[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]\n f = freq/np.sum(freq, axis=1, keepdims=True)\n pssm_matrix = 2*np.log2(f/back_freq)\n pssm_matrix[np.where(f == 0.0) ] = -20\n \n return np.rint(pssm_matrix).astype(np.int64)\n def get_weighted_pssm_with_background_distr(self, bg_matrix):\n back_freq = np.sum(bg_matrix, axis=0).reshape(1,20)\n freq = self.weighted_freq[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]\n gaps = self.weighted_freq[self.ungapped_pri_seq_positions, len(ALPHABET)-1].reshape(self.ungapped_seq_length,1)\n freq += gaps.dot(back_freq)\n f = freq/np.sum(freq, axis=1, keepdims=True)\n pssm_matrix = 2*np.log2(f/back_freq)\n pssm_matrix[np.where(f == 0.0) ] = -20\n return np.rint(pssm_matrix).astype(np.int64)\n def get_pssm_with_pseudocounts(self, bg_matrix, beta):\n p= 0.05\n freq = self.frequencies[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]\n pseudo_counts = (freq/p).dot(bg_matrix)\n alpha = self.get_number_of_observations()-1\n freq = (alpha*freq+beta*pseudo_counts)/(alpha+beta)\n f = freq/np.sum(freq, axis=1, keepdims=True)\n pssm_matrix = 2*np.log2(f/p)\n pssm_matrix[np.where(f == 0.0) ] = -20\n return np.rint(pssm_matrix).astype(np.int64)\n def get_pssm_with_pseudocounts_with_gap_bg(self, bg_matrix, beta):\n back_freq = np.sum(bg_matrix, axis=0).reshape(1,20)\n freq = self.frequencies[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]\n gaps = self.frequencies[self.ungapped_pri_seq_positions, len(ALPHABET)-1].reshape(self.ungapped_seq_length,1)\n freq += gaps.dot(back_freq)\n pseudo_counts = (freq/back_freq).dot(bg_matrix)\n alpha = self.get_number_of_observations()-1\n freq = (alpha*freq+beta*pseudo_counts)/(alpha+beta)\n f = freq/np.sum(freq, axis=1, keepdims=True)\n pssm_matrix = 2*np.log2(f/back_freq)\n pssm_matrix[np.where(f == 0.0) ] = -20\n return np.rint(pseudo_counts).astype(np.int64)\n def get_pssm_with_weighted_distr_bg_pseudocounts(self, bg_matrix, beta):\n back_freq = np.sum(bg_matrix, axis=0).reshape(1,20)\n freq = self.weighted_freq[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]\n gaps = self.weighted_freq[self.ungapped_pri_seq_positions, len(ALPHABET)-1].reshape(self.ungapped_seq_length,1)\n freq += gaps.dot(back_freq)\n pseudo_counts = (freq/back_freq).dot(bg_matrix)\n alpha = self.get_number_of_observations()-1\n freq = (alpha*freq+beta*pseudo_counts)/(alpha+beta)\n f = freq/np.sum(freq, axis=1, keepdims=True)\n pssm_matrix = 2*np.log2(f/back_freq)\n pssm_matrix[np.where(f == 0.0) ] = -20\n return np.rint(pssm_matrix).astype(np.int64)\n def get_pssm_with_weighted_bg_pseudocounts(self, bg_matrix, beta):\n back_freq = np.sum(bg_matrix, axis=0).reshape(1,20)\n freq = self.weighted_freq[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]\n pseudo_counts = (freq/back_freq).dot(bg_matrix)\n alpha = self.get_number_of_observations()-1\n freq = (alpha*freq+beta*pseudo_counts)/(alpha+beta)\n f = freq/np.sum(freq, axis=1, keepdims=True)\n pssm_matrix = 2*np.log2(f/back_freq)\n pssm_matrix[np.where(f == 0.0) ] = -20\n return np.rint(pssm_matrix).astype(np.int64)\n def get_pssm(self, *, bg_matrix=None, beta=10, use_sequence_weights=False,\n redistribute_gaps=False, add_pseudocounts=False):\n \"\"\"\n Return a PSSM for the underlying MSA. Use the appropriate refinements \n according to the parameters. If no bg_matrix is specified, use uniform \n background frequencies.\n Every row in the resulting PSSM corresponds to a non-gap position in \n the primary sequence of the MSA (i.e. the first one).\n Every column in the PSSM corresponds to one of the 20 amino acids.\n Values that would be -inf must be replaced by -20 in the final PSSM.\n Before casting to dtype=numpy.int64, round all values to the nearest\n integer (do not just FLOOR all values).\n\n :param bg_matrix: Amino acid pair frequencies as numpy array (20, 20).\n Access the matrix using the indices from AA_TO_INT.\n :param beta: Beta value (float) used to weight the pseudocounts \n against the observed amino acids in the MSA.\n :param use_sequence_weights: Calculate and apply sequence weights.\n :param redistribute_gaps: Redistribute the gaps according to the \n background frequencies.\n :param add_pseudocounts: Calculate and add pseudocounts according \n to the background frequencies.\n\n :return: PSSM as numpy array of shape (L x 20, dtype=numpy.int64).\n L = ungapped length of the primary sequence.\n \"\"\"\n if self.sequences != None:\n pssm = np.zeros((self.ungapped_seq_length, len(ALPHABET)-1), np.int64)\n if bg_matrix and use_sequence_weights and redistribute_gaps and add_pseudocounts:\n pssm = self.get_pssm_with_weighted_distr_bg_pseudocounts(bg_matrix,beta)\n elif bg_matrix and redistribute_gaps and not use_sequence_weights and not add_pseudocounts:\n pssm = self.get_pssm_with_background_w_gaps(bg_matrix)\n elif bg_matrix and not redistribute_gaps and not use_sequence_weights and not add_pseudocounts:\n pssm = self.get_pssm_with_background(bg_matrix)\n elif bg_matrix and use_sequence_weights and not add_pseudocounts and not redistribute_gaps:\n pssm = self.get_weighted_pssm_with_background(bg_matrix)\n elif bg_matrix and use_sequence_weights and not add_pseudocounts and redistribute_gaps:\n pssm = self.get_weighted_pssm_with_background_distr(bg_matrix)\n elif not bg_matrix and add_pseudocounts and not use_sequence_weights and not redistribute_gaps:\n pssm = self.get_pssm_with_pseudocounts(bg_matrix, beta)\n elif bg_matrix and add_pseudocounts and use_sequence_weights and not redistribute_gaps:\n pssm = self.get_pssm_with_weighted_bg_pseudocounts(bg_matrix, beta)\n elif bg_matrix and add_pseudocounts and not use_sequence_weights and redistribute_gaps:\n pssm = self.get_pssm_with_pseudocounts_with_gap_bg(bg_matrix, beta)\n elif not bg_matrix and redistribute_gaps and not add_pseudocounts and not use_sequence_weights:\n pssm = self.get_pssm_with_distr_gap()\n elif not bg_matrix and not redistribute_gaps and not add_pseudocounts and use_sequence_weights:\n pssm = self.get_weighted_pssm()\n else:\n pssm = self.get_basic_pssm()\n return pssm\n\n\n # if bg_matrix:\n # back_freq = np.sum(bg_matrix, axis=0).reshape(1,20),\n # self.p = back_freq\n # if redistribute_gaps:\n # self.freq += self.gaps.dot(self.p)\n # if add_pseudocounts:\n # pseudo_counts = (self.freq/self.p).dot(bg_matrix)\n # self.freq = (self.alpha * self.freq + beta * pseudo_counts)/(self.alpha+beta)\n # if use_sequence_weights:\n # self.freq = self.weighted_freq[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]\n # self.gaps = self.weighted_freq[self.ungapped_pri_seq_positions, len(ALPHABET)-1].reshape(self.ungapped_seq_length,1)\n # f = self.freq/np.sum(self.freq, axis=1, keepdims=True)\n # self.pssm_matrix = 2*np.log2(f/self.p)\n # self.pssm_matrix[np.where(f == 0.0) ] = -20\n # return np.rint(self.pssm_matrix).astype(np.int64)\n\n\n # if bg_matrix:\n # back_freq = np.sum(bg_matrix, axis=0).reshape(1,20),\n # self.p = back_freq\n # if redistribute_gaps:\n # self.freq += self.gaps.dot(self.p)\n # if add_pseudocounts:\n # pseudo_counts = (self.freq/self.p).dot(bg_matrix)\n # self.freq = (self.alpha * self.freq + beta * pseudo_counts)/(self.alpha+beta)\n # if use_sequence_weights:\n # self.freq = self.weighted_freq[self.ungapped_pri_seq_positions, :len(ALPHABET)-1]\n # self.gaps = self.weighted_freq[self.ungapped_pri_seq_positions, len(ALPHABET)-1].reshape(self.ungapped_seq_length,1)\n # self.freq += self.gaps.dot(self.p)\n # f = self.freq/np.sum(self.freq, axis=1, keepdims=True)\n # self.pssm_matrix = 2*np.log2(f/self.p)\n # self.pssm_matrix[np.where(f == 0.0) ] = -20\n # print(self.pssm_matrix)\n # return np.rint(self.pssm_matrix).astype(np.int64)\n def get_size(self):\n \"\"\"\n Return the number of sequences in the MSA and the MSA length, i.e.\n the number of columns in the MSA. This includes gaps.\n\n :return: Tuple of two integers. First element is the number of\n sequences in the MSA, second element is the MSA length.\n \"\"\"\n num_seqs = len(self.sequences)\n msa_length = len(self.sequences[0])\n return (num_seqs, msa_length)\n\n def get_primary_sequence(self):\n \"\"\"\n Return the primary sequence of the MSA. In this exercise, the primary\n sequence is always the first sequence of the MSA. The returned \n sequence must NOT include gap characters.\n\n :return: String containing the ungapped primary sequence.\n \"\"\"\n return self.sequences[0].replace('-', '')\n\n def get_sequence_weights(self):\n \"\"\"\n Return the calculated sequence weights for all sequences in the MSA.\n The order of weights in the array must be equal to the order of the\n sequences in the MSA.\n\n :return: Numpy array (dtype=numpy.float64) containing the weights for\n all sequences in the MSA.\n \"\"\"\n weights = np.zeros(self.num_seqs)\n curr_seq = 0\n r = np.count_nonzero(self.frequencies, axis = 1)\n W = np.zeros((self.msa_length, self.num_seqs), dtype=np.float64)\n weights = np.zeros(self.num_seqs)\n for s in self.sequences:\n for i,j in enumerate(s):\n W[i][curr_seq] = 1.0/(self.frequencies[i][AA_TO_INT[j]]*r[i])\n if i+1 == self.msa_length:\n curr_seq += 1\n weights = np.sum(W[r > 1], axis = 0)\n return weights.astype(np.float64)\n\n def get_number_of_observations(self):\n \"\"\"\n Return the estimated number of independent observations in the MSA.\n\n :return: Estimate of independent observation (dtype=numpy.float64).\n \"\"\"\n r = np.count_nonzero(self.frequencies, axis = 1) \n num_obs = sum(r)/self.msa_length\n return num_obs.astype(np.float64)\n \n\n# pssm = MSA(config[\"msa_sequences\"]).get_pssm_with_pseudocounts_with_gap(config[\"bg_matrix\"],beta=10)\n# print(pssm)\n# # print(len(config[\"pssm_07\"]))\n# print(np.array_equal(pssm, config[\"pssm_08\"]))\n\n# pssm = MSA(config[\"msa_sequences\"]).get_pssm_with_pseudocounts(config[\"bg_matrix\"],beta=10)\n# print(pssm)\n# # print(len(config[\"pssm_07\"]))\n# print(np.array_equal(pssm, config[\"pssm_07\"]))","sub_path":"codechecker/repos/4/collected_files/pssm/ga62toz.py","file_name":"ga62toz.py","file_ext":"py","file_size_in_byte":18291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"293913140","text":"#from firebase import firebase\nfrom flask import Response, Flask, request, render_template\nimport requests\nimport sqlite3\nimport json\nfrom datetime import datetime\n\napp = Flask(__name__)\n\npalabraRecibida = \"\"\nmodo = \"\"\n\n@app.route('/restart')\ndef create_table():\n conn=sqlite3.connect('database.db')\n c=conn.cursor()\n c.execute('''DROP TABLE PALABRAS;''')\n c.execute('''CREATE TABLE PALABRAS (id INTEGER PRIMARY KEY,TEXTO TEXT, MODO TEXT, FECHAHORA TEXT)''')\n conn.close()\n global palabraRecibida\n global modo\n palabraRecibida = \"\"\n modo = \"\"\n return \"Tabla PALABRAS reiniciada\"\n\n@app.route('/nuevaPalabra')\ndef nuevaPalabra():\n return render_template('nuevaPalabra.html')\n\n@app.route('/nuevaPalabra/Mostrar')\ndef nuevaPalabraMostrar():\n now = datetime.now() # current date and time\n date_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\n global modo\n modo = \"MOSTRAR\"\n palabra = request.args['palabra']\n conn = sqlite3.connect(\"database.db\")\n c = conn.cursor()\n c.execute('''INSERT INTO PALABRAS(TEXTO,MODO,FECHAHORA) VALUES(?,?,?)''', (palabra,modo,date_time))\n conn.commit()\n conn.close()\n global palabraRecibida\n palabraRecibida = palabra\n return \"[\"+palabra+\"] GUARDADA CORRECTAMENTE, MODO: [\"+modo+\"]\"\n\n\n@app.route('/nuevaPalabra/saludo')\ndef nuevaPalabraSaludo():\n now = datetime.now() # current date and time\n date_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\n global modo\n modo = \"SALUDO\"\n palabra = \"HOLA GRUPO 8\"\n conn = sqlite3.connect(\"database.db\")\n c = conn.cursor()\n c.execute('''INSERT INTO PALABRAS(TEXTO,MODO,FECHAHORA) VALUES(?,?,?)''', (palabra,modo,date_time))\n conn.commit()\n conn.close()\n global palabraRecibida\n palabraRecibida = palabra\n return \"[\"+palabra+\"] GUARDADA CORRECTAMENTE, MODO: [\"+modo+\"]\"\n\n\n@app.route('/nuevaPalabra/Aprendizaje')\ndef nuevaPalabraAprendizaje():\n now = datetime.now() # current date and time\n date_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\n global modo\n modo = \"APRENDER\"\n palabra = request.args['palabra']\n conn = sqlite3.connect(\"database.db\")\n c = conn.cursor()\n c.execute('''INSERT INTO PALABRAS(TEXTO,MODO,FECHAHORA) VALUES(?,?,?)''', (palabra,modo,date_time))\n conn.commit()\n conn.close()\n global palabraRecibida\n palabraRecibida = palabra\n return \"[\"+palabra+\"] GUARDADA CORRECTAMENTE, MODO: [\"+modo+\"]\"\n\n@app.route('/')\ndef ListarPalabras():\n conn=sqlite3.connect('database.db')\n c=conn.cursor()\n c.execute('''SELECT * FROM PALABRAS;''')\n var = \"\"\n all_rows = c.fetchall()\n #opciones = '{ \"name\":\"John\", \"age\":30, \"city\":\"New York\"}'\n listaPalabras = []\n for row in all_rows:\n # row[0] returns the first column in the query (name), row[1] returns email column.\n var += ('{0} : {1}, {2}\\n'.format(row[0], row[1], row[2]))\n #opciones.append(4);\n palabra = [str(row[0]), str(row[1]), str(row[2]), str(row[3])]\n #listaPalabras = palabra\n listaPalabras.append(palabra)\n conn.close()\n #return Response(\"{\\\"a\\\":\\\"b\\\"}\", status=200, mimetype='application/json')\n #y = json.loads(opciones)\n y=json.dumps(listaPalabras)\n #return y\n return render_template('listarPalabras.html', result = listaPalabras)\n\n@app.route('/verificarBuffer')\ndef verificarBuffer():\n global palabraRecibida\n if palabraRecibida != \"\":\n auxPalabraRecibida = palabraRecibida\n palabraRecibida = \"\"\n return Response(\"{\\\"MODO\\\":\\\"\"+modo+\"\\\",\\\"PALABRA\\\":\\\"\"+auxPalabraRecibida+\"\\\"}\", status=200, mimetype='application/json')\n else:\n return \"\";","sub_path":"Servidor/FlaskApp.py","file_name":"FlaskApp.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"312051225","text":"# -*- coding: utf-8 -*-\nimport click\n\n\nclick.disable_unicode_literals_warning = True\n\n\n@click.group('virtualenv')\ndef main():\n pass\n\n\n@main.command()\n@click.argument('name')\n@click.option('requirements', '-r', multiple=True)\n@click.option('editables', '-e', multiple=True)\n@click.option('packages', '-i', multiple=True)\n@click.option('--python', default='python2')\n@click.option('--force', is_flag=True)\ndef create(name, requirements, editables, packages, python, force):\n \"\"\"\n Create a virtualenv, caching when possible.\n \"\"\"\n from zerotk.zops import Console\n\n venv = _create_venv(name, python)\n if force:\n venv.force_create()\n else:\n venv.open_or_create()\n venv.install('virtualenvwrapper')\n\n for i_requirement in requirements:\n Console.item('REQUIREMENT: {}'.format(i_requirement))\n Console.output(venv.requirement(i_requirement, force=True, upgrade=True))\n\n for i_editable in editables:\n Console.item('EDITABLE: {}'.format(i_editable))\n Console.output(venv.editable(i_editable, force=True, upgrade=True))\n\n for i_package in packages:\n Console.item('PACKAGE: {}'.format(i_package))\n Console.output(venv.install(i_package, force=True, upgrade=True))\n\n\ndef _create_venv(name, python):\n from virtualenvapi.manage import VirtualEnvironment\n import os\n\n def workon_home(*args):\n try:\n result = os.environ['WORKON_HOME']\n except KeyError:\n raise RuntimeError('Environment variables WORKON_HOME not found.')\n else:\n os.makedirs(result, exist_ok=True)\n return os.path.join(result, *args)\n\n venv_path = workon_home(name)\n result = VirtualEnvironment(venv_path, python=python)\n return result\n","sub_path":"zops/virtualenv/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"386627367","text":"\n\"\"\"\nstorage.py: support for using Banana as if it were pickle\n\nThis includes functions for serializing to and from strings, instead of a\nnetwork socket. It also has support for serializing 'unsafe' objects,\nspecifically classes, modules, functions, and instances of arbitrary classes.\nThese are 'unsafe' because to recreate the object on the deserializing end,\nwe must be willing to execute code of the sender's choosing (i.e. the\nconstructor of whatever package.module.class names they send us). It is\nunwise to do this unless you are willing to allow your internal state to be\ncompromised by the author of the serialized data you're unpacking.\n\nThis functionality is isolated here because it is never used for data coming\nover network connections.\n\"\"\"\n\nimport sys\nimport io\nimport types\nimport inspect\nimport operator as O\n\nimport pickle\nfrom pickle import whichmodule # used by FunctionSlicer\n\nfrom twisted.internet.defer import Deferred\nfrom twisted.python import reflect\n\nfrom foolscap import slicer, banana, tokens\nfrom foolscap.slicer import BaseSlicer\nfrom foolscap.slicers.root import ScopedRootSlicer, ScopedRootUnslicer\n\nfrom .tokens import BananaError, Violation\n\n\n#ClassType = getattr(types, 'ClassType', type)\nInstanceType = getattr(types, 'InstanceType', object)\n\n\nUnsafeUnslicerRegistry = {}\n\n\n################## Slicers for \"unsafe\" things\n\n# Extended types, not generally safe. The UnsafeRootSlicer checks for these\n# with a separate table.\n\nclass InstanceSlicer(BaseSlicer):\n opentype = (b'instance',)\n trackReferences = True\n\n pickle_protocol = pickle.DEFAULT_PROTOCOL\n ordered_state = False # @note: использую для тестов\n\n def __init__(self, obj):\n assert not issubclass(type(obj), type), (type(obj), obj) # @see: pickle\n# if issubclass(type(obj), type):\n# raise Violation('Instance expected', type(obj), obj)\n super().__init__(obj)\n\n def sliceBody(self, streamable, banana):\n # @see: Pickle.save\n\n type_obj = type(self.obj)\n\n reduce = getattr(self.obj, '__reduce_ex__', None)\n\n if reduce is not None:\n try:\n rv = reduce(self.pickle_protocol)\n except TypeError as exc:\n raise Violation(str(exc))\n else:\n reduce = getattr(self.obj, '__reduce__', None)\n if reduce is None:\n raise BananaError('Can\\'t pickle {!r} object: {!r}'.format(type_obj, self.obj))\n rv = reduce()\n\n if isinstance(rv, str):\n raise NotImplementedError(self.obj, rv)\n\n if not isinstance(rv, tuple):\n raise BananaError('{!r} must return string or tuple'.format(reduce))\n\n rv_len = len(rv)\n\n if rv_len < 2 or 5 < rv_len:\n raise BananaError('Tuple {!r} returned by {!r} must have two to five elements'\\\n .format(rv, reduce))\n\n# def unpack_rv(func, args, state=None, listitems=None, dictitems=None):\n# return func, args, state, listitems, dictitems\n# func, args, state, listitems, dictitems = unpack_rv()\n\n # @see: Pickle.save_reduce\n\n func, args, *rv_rest = rv\n state, listitems, dictitems = tuple(rv_rest) + (None,) * (5 - rv_len)\n\n assert state is None or type(state) is dict, (type(state), state)\n\n if listitems is not None:\n raise NotImplementedError('listitems', self.obj, rv)\n\n if dictitems is not None:\n raise NotImplementedError('dictitems', self.obj, rv)\n\n func_name = getattr(func, '__name__', '')\n\n if 4 <= self.pickle_protocol and func_name == '__newobj_ex__':\n cls, args, kwargs = args\n\n if not hasattr(cls, '__new__'):\n raise BananaError('args[0] from __newobj_ex__ args has no __new__', cls, args)\n\n if cls is not type_obj:\n raise BananaError('args[0] from __newobj_ex__ args has the wrong class', cls, args)\n\n yield 4\n yield cls\n yield tuple(args)\n yield kwargs\n # NEWOBJ_EX\n\n elif 2 <= self.pickle_protocol and func_name == '__newobj__':\n cls, *args = args\n\n if not hasattr(cls, '__new__'):\n raise BananaError('args[0] from __newobj__ args has no __new__', cls, args)\n\n if cls is not type_obj:\n raise BananaError('args[0] from __newobj__ args has the wrong class', cls, args)\n\n yield 2\n yield cls\n yield tuple(args)\n # NEWOBJ\n\n else:\n yield 0\n yield func\n yield args\n # REDUCE\n\n if state:\n if self.ordered_state:\n state_items = sorted(state.items(), key=O.itemgetter(0))\n else:\n state_items = state.items()\n\n for key, value in state_items:\n yield key\n yield value\n\n # BUILD\n\n # @todo: listitems & dictitems\n\n def describe(self):\n return ''.format(type(self.obj).__name__)\n\n\nclass InstanceUnslicer(slicer.BaseUnslicer):\n # this is an unsafe unslicer: an attacker could induce you to create\n # instances of arbitrary classes with arbitrary attributes: VERY\n # DANGEROUS!\n\n opentype = (b'instance',)\n unslicerRegistry = UnsafeUnslicerRegistry\n\n pickle_protocol = None\n reduce_func = None\n reduce_args = None\n new_cls = None\n new_args = None\n new_kwargs = None\n state = None\n state_key = None\n listitems = None # @xxx: not implemented\n dictitems = None # @xxx: not implemented\n\n num_unreferenceable_children = 0\n all_children_are_referenceable_defer = None\n\n # danger: instances are mutable containers. If an attribute value is not\n # yet available, __dict__ will hold a Deferred until it is. Other\n # objects might be created and use our object before this is fixed.\n # TODO: address this. Note that InstanceUnslicers aren't used in PB\n # (where we have pb.Referenceable and pb.Copyable which have schema\n # constraints and could have different restrictions like not being\n # allowed to participate in reference loops).\n\n def start(self, count):\n self.count = count\n self.deferred = Deferred()\n self.protocol.setObject(count, self.deferred)\n\n def checkToken(self, typebyte, size):\n if self.pickle_protocol is None:\n if typebyte != tokens.INT:\n raise BananaError('InstanceUnslicer `pickle_protocol` token must be INT, got 0x{:x}'.format(ord(typebyte)))\n\n elif self.pickle_protocol == 4:\n # @todo: more tests\n\n if self.new_cls is None:\n if typebyte != tokens.OPEN:\n raise BananaError('InstanceUnslicer `new_cls` token must be OPEN, got 0x{:x}'.format(ord(typebyte)))\n elif self.new_args is None:\n if typebyte != tokens.OPEN:\n raise BananaError('InstanceUnslicer `new_args` token must be OPEN, got 0x{:x}'.format(ord(typebyte)))\n elif self.new_kwargs is None:\n if typebyte != tokens.OPEN:\n raise BananaError('InstanceUnslicer `new_kwargs` token must be OPEN, got 0x{:x}'.format(ord(typebyte)))\n elif self.state_key is None:\n if typebyte not in (tokens.STRING, tokens.SVOCAB):\n raise BananaError('InstanceUnslicer `state_key` token must be STRING or SVOCAB, got 0x{:x}'.format(ord(typebyte)))\n\n elif self.pickle_protocol == 2:\n # @todo: more tests\n\n if self.new_cls is None:\n if typebyte != tokens.OPEN:\n raise BananaError('InstanceUnslicer `new_cls` token must be OPEN, got 0x{:x}'.format(ord(typebyte)))\n elif self.new_args is None:\n if typebyte != tokens.OPEN:\n raise BananaError('InstanceUnslicer `new_args` token must be OPEN, got 0x{:x}'.format(ord(typebyte)))\n elif self.state_key is None:\n if typebyte not in (tokens.STRING, tokens.SVOCAB):\n raise BananaError('InstanceUnslicer `state_key` token must be STRING or SVOCAB, got 0x{:x}'.format(ord(typebyte)))\n\n elif self.pickle_protocol == 0:\n # @todo: more tests\n\n if self.reduce_func is None:\n if typebyte != tokens.OPEN:\n raise BananaError('InstanceUnslicer `reduce_func` token must be OPEN, got 0x{:x}'.format(ord(typebyte)))\n elif self.reduce_args is None:\n if typebyte != tokens.OPEN:\n raise BananaError('InstanceUnslicer `reduce_args` token must be OPEN, got 0x{:x}'.format(ord(typebyte)))\n elif self.state_key is None:\n if typebyte not in (tokens.STRING, tokens.SVOCAB):\n raise BananaError('InstanceUnslicer `state_key` token must be STRING or SVOCAB, got 0x{:x}'.format(ord(typebyte)))\n\n else:\n raise BananaError('Unknown `pickle_protocol`', self.pickle_protocol)\n\n def receiveChild(self, obj, ready_deferred=None):\n assert ready_deferred is None\n\n # @todo: (?) finite state machine\n\n# print('--receiveChild--', self, obj)\n\n if self.state is None:\n if isinstance(obj, Deferred):\n raise NotImplementedError\n\n if self.pickle_protocol is None:\n self.pickle_protocol = obj\n\n elif self.pickle_protocol == 4:\n if self.new_cls is None: self.new_cls = obj\n elif self.new_args is None: self.new_args = obj\n elif self.new_kwargs is None: self.new_kwargs = obj\n elif self.state is None: self.state = {}\n else: raise BananaError('Unexpected child', obj)\n\n elif self.pickle_protocol == 2:\n if self.new_cls is None: self.new_cls = obj\n elif self.new_args is None: self.new_args = obj\n elif self.state is None: self.state = {}\n else: raise BananaError('Unexpected child', obj)\n\n elif self.pickle_protocol == 0:\n if self.reduce_func is None: self.reduce_func = obj\n elif self.reduce_args is None: self.reduce_args = obj\n elif self.state is None: self.state = {}\n else: raise BananaError('Unexpected child', obj)\n\n else:\n raise BananaError('Unknown `pickle_protocol`', self.pickle_protocol)\n\n if self.state is not None:\n if self.state_key is None:\n if isinstance(obj, Deferred):\n raise NotImplementedError\n\n if obj in self.state:\n raise BananaError('Duplicate attribute name \"{}\"'.format(obj))\n\n self.state_key = obj\n\n else:\n if isinstance(obj, Deferred):\n def setstate(value, key):\n self.state[key] = value\n\n self.num_unreferenceable_children -= 1\n\n if not self.num_unreferenceable_children and self.all_children_are_referenceable_defer:\n self.all_children_are_referenceable_defer.callback(None)\n\n self.num_unreferenceable_children += 1\n\n obj.addCallback(setstate, self.state_key)\n\n else:\n self.state[self.state_key] = obj\n\n del self.state_key\n\n def receiveClose(self):\n # you could attempt to do some value-checking here, but there would\n # probably still be holes\n\n if self.pickle_protocol == 4:\n obj = self.new_cls.__new__(self.new_cls, *self.new_args, **self.new_kwargs)\n\n elif self.pickle_protocol == 2:\n obj = self.new_cls.__new__(self.new_cls, *self.new_args)\n\n elif self.pickle_protocol == 0:\n obj = self.reduce_func(*self.reduce_args)\n\n else:\n raise BananaError('Unknown `pickle_protocol`', self.pickle_protocol)\n\n# print('--receiveClose--', self, obj)\n\n def setstate():\n setstate = getattr(obj, '__setstate__', None)\n\n if setstate is not None:\n setstate(self.state)\n\n else:\n # @todo: state is tuple = slotstate\n\n #slotstate = None\n #if isinstance(state, tuple) and len(state) == 2:\n # state, slotstate = state\n #if state:\n\n obj_dict = obj.__dict__\n\n intern = sys.intern\n\n for key, value in self.state.items():\n if type(key) is str:\n obj_dict[intern(key)] = value\n else:\n obj_dict[key] = value\n\n #if slotstate:\n # for key, value in slotstate.items():\n # setattr(obj, key, value)\n\n# print('--receiveClose-setstate--', self, vars(obj))\n\n if self.num_unreferenceable_children:\n # @xxx: мне всё это не нравится\n self.all_children_are_referenceable_defer = Deferred()\n self.all_children_are_referenceable_defer.addCallback(lambda _: setstate())\n # @todo: (?) addErrback\n\n elif self.state:\n setstate()\n\n self.protocol.setObject(self.count, obj)\n self.deferred.callback(obj)\n\n return obj, None\n\n def describe(self):\n if self.reduce_args:\n cls = self.reduce_args[0]\n elif self.new_cls:\n cls = self.new_cls\n else:\n return ''\n return ''.format(cls.__name__)\n\n\nclass ModuleSlicer(slicer.BaseSlicer):\n opentype = (b'module',)\n trackReferences = True\n\n def sliceBody(self, streamable, banana):\n yield self.obj.__name__\n\n\nclass ClassSlicer(slicer.BaseSlicer):\n opentype = (b'class',)\n trackReferences = True\n\n def sliceBody(self, streamable, banana):\n yield reflect.qual(self.obj)\n\n\nclass MethodSlicer(slicer.BaseSlicer):\n opentype = (b'method',)\n trackReferences = True\n\n def sliceBody(self, streamable, banana):\n if self.obj.__self__ is None:\n yield self.obj.__func__.__qualname__\n else:\n yield self.obj.__func__.__name__\n yield self.obj.__self__\n# yield self.obj.__class__\n\n\nclass FunctionSlicer(slicer.BaseSlicer):\n opentype = (b'function',)\n trackReferences = True\n\n def sliceBody(self, streamable, banana):\n# name = self.obj.__name__\n# fullname = str(whichmodule(self.obj, self.obj.__name__)) + '.' + name\n fullname = self.obj.__module__ + '.' + self.obj.__qualname__\n yield fullname\n\n\nUnsafeSlicerTable = {\n types.ModuleType: ModuleSlicer,\n# InstanceType: InstanceSlicer,\n# ClassType : ClassSlicer,\n InstanceType: None,\n type : ClassSlicer,\n types.MethodType : MethodSlicer,\n types.FunctionType: FunctionSlicer,\n #types.TypeType: NewstyleClassSlicer,\n # ???: NewstyleInstanceSlicer, # pickle uses obj.__reduce__ to help\n # http://docs.python.org/lib/node68.html\n}\n\n\n# the root slicer for storage is exactly like the regular root slicer\nclass StorageRootSlicer(ScopedRootSlicer):\n pass\n\n\n# but the \"unsafe\" one (which handles instances and stuff) uses its own table\nclass UnsafeStorageRootSlicer(StorageRootSlicer):\n slicerTable = UnsafeSlicerTable\n\n def slicerForObject(self, obj):\n try:\n slicer = super().slicerForObject(obj)\n except Violation:\n # @xxx: InstanceType\n if InstanceType not in self.slicerTable:\n raise\n# if not inspect.isclass(type(obj)):\n if issubclass(type(obj), type):\n raise\n slicer = InstanceSlicer(obj)\n return slicer\n\n\n################## Unslicers for \"unsafe\" things\n\n\nclass Dummy:\n def __repr__(self):\n return '' % self.__dict__\n\n def __eq__(self, other):\n if type(other) is type(self):\n return self.__dict__ == other.__dict__\n return NotImplemented\n\n def __lt__(self, other):\n if type(other) is type(self):\n return self.__dict__ < other.__dict__\n return NotImplemented\n\n\nclass ModuleUnslicer(slicer.LeafUnslicer):\n opentype = (b'module',)\n unslicerRegistry = UnsafeUnslicerRegistry\n\n finished = False\n\n def checkToken(self, typebyte, size):\n if typebyte not in (tokens.STRING, tokens.SVOCAB):\n raise BananaError(\"ModuleUnslicer only accepts STRINGs\")\n\n def receiveChild(self, obj, ready_deferred=None):\n assert not isinstance(obj, Deferred)\n assert ready_deferred is None\n if self.finished:\n raise BananaError(\"ModuleUnslicer only accepts one string\")\n self.finished = True\n # TODO: taste here!\n mod = __import__(obj, {}, {}, \"x\")\n self.mod = mod\n\n def receiveClose(self):\n if not self.finished:\n raise BananaError(\"ModuleUnslicer requires a string\")\n return self.mod, None\n\n\nclass ClassUnslicer(slicer.LeafUnslicer):\n opentype = (b'class',)\n unslicerRegistry = UnsafeUnslicerRegistry\n\n finished = False\n\n def checkToken(self, typebyte, size):\n if typebyte not in (tokens.STRING, tokens.SVOCAB):\n raise BananaError(\"ClassUnslicer only accepts STRINGs\")\n\n def receiveChild(self, obj, ready_deferred=None):\n assert not isinstance(obj, Deferred)\n assert ready_deferred is None\n if self.finished:\n raise BananaError(\"ClassUnslicer only accepts one string\")\n self.finished = True\n # TODO: taste here!\n self.klass = reflect.namedObject(obj)\n\n def receiveClose(self):\n if not self.finished:\n raise BananaError(\"ClassUnslicer requires a string\")\n return self.klass, None\n\n\nclass MethodUnslicer(slicer.BaseUnslicer):\n opentype = (b'method',)\n unslicerRegistry = UnsafeUnslicerRegistry\n\n state = 0\n im_func = None\n im_self = None\n# im_class = None\n\n # @xxx: [bw] много быстрых и необдуманных правок\n\n # self.state:\n # 0: expecting a string with the method name\n # 1: expecting an instance (or None for unbound methods)\n # 2: expecting a class\n\n def checkToken(self, typebyte, size):\n if self.state == 0:\n if typebyte not in (tokens.STRING, tokens.SVOCAB):\n raise BananaError('MethodUnslicer methodname must be a STRING')\n\n elif self.state == 1:\n if typebyte != tokens.OPEN:\n raise BananaError('MethodUnslicer instance must be OPEN')\n\n# elif self.state == 2:\n# if typebyte != tokens.OPEN:\n# raise BananaError('MethodUnslicer class must be an OPEN')\n\n def doOpen(self, opentype):\n # check the opentype\n if self.state == 1:\n if opentype[0] not in (b'instance', b'none'):\n raise BananaError('MethodUnslicer instance must be instance or None')\n\n# elif self.state == 2:\n# if opentype[0] != b'class':\n# raise BananaError('MethodUnslicer class must be a class')\n\n unslicer = self.open(opentype)\n # TODO: apply constraint\n return unslicer\n\n def receiveChild(self, obj, ready_deferred=None):\n assert not isinstance(obj, Deferred)\n assert ready_deferred is None\n\n if self.state == 0:\n self.im_func = obj\n\n elif self.state == 1:\n# assert type(obj) in (InstanceType, type(None))\n assert obj is None or not inspect.isclass(obj), type(obj)\n self.im_self = obj\n\n# elif self.state == 2:\n# assert type(obj) == ClassType # TODO: new-style classes?\n# assert inspect.isclass(obj), type(obj)\n# assert self.im_self is None or isinstance(self.im_self, obj), (self.im_self, obj)\n# self.im_class = obj\n\n else:\n raise BananaError('MethodUnslicer only accepts three objects')\n\n self.state += 1\n\n def receiveClose(self):\n# if self.state != 3:\n# raise BananaError('MethodUnslicer requires three objects')\n\n if self.im_self is None:\n # getattr gives us an unbound method\n# meth = getattr(self.im_class, self.im_func)\n meth = reflect.namedAny(self.im_func)\n return meth, None\n\n # TODO: late-available instances\n #if isinstance(self.im_self, NotKnown):\n # im = _InstanceMethod(self.im_name, self.im_self, self.im_class)\n # return im\n\n# meth = vars(self.im_class)[self.im_func]\n# meth = vars(type(self.im_self))[self.im_func]\n# meth = meth.__get__(self.im_self)\n meth = getattr(self.im_self, self.im_func)\n return meth, None\n\n\nclass FunctionUnslicer(slicer.LeafUnslicer):\n opentype = (b'function',)\n unslicerRegistry = UnsafeUnslicerRegistry\n\n finished = False\n\n def checkToken(self, typebyte, size):\n if typebyte not in (tokens.STRING, tokens.SVOCAB):\n raise BananaError(\"FunctionUnslicer only accepts STRINGs\")\n\n def receiveChild(self, obj, ready_deferred=None):\n assert not isinstance(obj, Deferred)\n assert ready_deferred is None\n\n if self.finished:\n raise BananaError(\"FunctionUnslicer only accepts one string\")\n\n self.finished = True\n # TODO: taste here!\n self.func = reflect.namedAny(obj)\n\n def receiveClose(self):\n if not self.finished:\n raise BananaError(\"FunctionUnslicer requires a string\")\n return self.func, None\n\n\n# the root unslicer for storage is just like the regular one, but hands\n# received objects to the StorageBanana\nclass StorageRootUnslicer(ScopedRootUnslicer):\n def receiveChild(self, obj, ready_deferred):\n self.protocol.receiveChild(obj, ready_deferred)\n\n\n# but the \"unsafe\" one has its own tables\nclass UnsafeStorageRootUnslicer(StorageRootUnslicer):\n # This version tracks references for the entire lifetime of the\n # protocol. It is most appropriate for single-use purposes, such as a\n # replacement for Pickle.\n topRegistries = [slicer.UnslicerRegistry, slicer.BananaUnslicerRegistry, UnsafeUnslicerRegistry]\n openRegistries = [slicer.UnslicerRegistry, UnsafeUnslicerRegistry]\n\n\nclass StorageBanana(banana.Banana):\n object = None\n violation = None\n disconnectReason = None\n slicerClass = StorageRootSlicer\n unslicerClass = StorageRootUnslicer\n\n def prepare(self):\n self.d = Deferred()\n return self.d\n\n def receiveChild(self, obj, ready_deferred):\n if ready_deferred:\n ready_deferred.addBoth(self.d.callback)\n self.d.addCallback(lambda res: obj)\n else:\n self.d.callback(obj)\n del self.d\n\n def receivedObject(self, obj):\n self.object = obj\n\n def sendError(self, msg):\n pass\n\n def reportViolation(self, fail):\n self.violation = fail\n # @todo: [bw] ??? может зависнуть (кажется из-за bytes/str это было), см. git:3a30fbd5 test_serialize (кажется Serialize.test_copyable)\n # но с этим кодом ломаются некоторые тесты, например: L{foolscap.test.test_banana.DecodeTest.test_failed_dict3}\n #self.d.errback(fail) # -or- fail.raiseException()\n\n def reportReceiveError(self, fail):\n self.disconnectReason = fail\n fail.raiseException()\n\n\nclass SerializerTransport:\n def __init__(self, sio):\n self.sio = sio\n\n def write(self, data):\n self.sio.write(data)\n\n def loseConnection(self, why='ignored'):\n pass\n\n\ndef serialize(obj, outstream=None, root_class=StorageRootSlicer, banana=None):\n \"\"\"Serialize an object graph into a sequence of bytes. Returns a Deferred\n that fires with the sequence of bytes.\"\"\"\n\n if banana is not None:\n b = banana\n else:\n b = StorageBanana()\n b.slicerClass = root_class\n\n if outstream is None:\n sio = io.BytesIO()\n else:\n sio = outstream\n\n b.transport = SerializerTransport(sio)\n b.connectionMade()\n\n d = b.send(obj)\n\n def _report_error(res):\n if b.disconnectReason:\n return b.disconnectReason\n if b.violation:\n return b.violation\n return res\n\n d.addCallback(_report_error)\n\n if outstream is None:\n d.addCallback(lambda res: sio.getvalue())\n else:\n d.addCallback(lambda res: outstream)\n\n return d\n\n\ndef unserialize(data, banana=None, root_class=StorageRootUnslicer):\n \"\"\"Unserialize a sequence of bytes back into an object graph.\"\"\"\n\n if type(data) is not bytes:\n raise TypeError(type(data))\n\n if banana:\n b = banana\n else:\n b = StorageBanana()\n b.unslicerClass = root_class\n\n b.connectionMade()\n d = b.prepare() # this will fire with the unserialized object\n\n b.dataReceived(data)\n\n def _report_error(res):\n if b.disconnectReason:\n return b.disconnectReason\n if b.violation:\n return b.violation\n return res # return the unserialized object\n\n return d.addCallback(_report_error)\n","sub_path":"src/foolscap/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":25470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"449157345","text":"# test_integration_summation_2d\n#\n# Copyright (C) 2013 Diamond Light Source\n#\n# Author: Luis Fuentes-Montero (Luiso)\n#\n# This code is distributed under the BSD license, a copy of which is\n# included in the root directory of this package.\n\nfrom __future__ import absolute_import, division\nfrom __future__ import print_function\n\ndef run(i, imp):\n from random import randint\n from dials.array_family import flex\n\n #building a reflection table\n num_ref = 5\n ref_table = flex.reflection_table()\n\n shoebox = flex.shoebox(num_ref)\n ref_table['shoebox'] = shoebox\n\n intensity = flex.double(num_ref)\n ref_table['intensity.sum.value'] = intensity\n\n intensity_var = flex.double(num_ref)\n ref_table['intensity.sum.variance'] = intensity_var\n\n iterate = ref_table['shoebox']\n i_to_compare = []\n\n # bulding the shoebox with a desired content\n # which is a reflection with noise included\n\n n = 0\n for arr in iterate:\n img = flex.double(flex.grid(3, 3, 3))\n bkg = flex.double(flex.grid(3, 3, 3))\n msk = flex.int(flex.grid(3, 3, 3))\n for row in range(3):\n for col in range(3):\n for fra in range(3):\n img[row, col, fra] = row + col + fra + n * 9 + randint(0, i)\n bkg[row, col, fra] = 0.0\n msk[row, col, fra] = 3\n n += 1\n msk[1, 1, 1] = 5\n tmp_i = n * n * n * 3\n i_to_compare.append(tmp_i)\n img[1, 1, 1] += tmp_i\n\n arr.data = img[:, :, :]\n arr.background = bkg[:, :, :]\n arr.mask = msk[:, :, :]\n\n # calling the functions that we need to test\n # first select the algorithm for background calculation\n\n if imp == \"inclined\":\n print(\"testing inclined_background_subtractor\")\n from dials.algorithms.background.inclined_background_subtractor \\\n import layering_and_background_plane\n layering_and_background_plane(ref_table)\n elif imp == \"flat\":\n print(\"testing flat_background_subtractor\")\n from dials.algorithms.background.flat_background_subtractor \\\n import layering_and_background_avg\n layering_and_background_avg(ref_table)\n elif imp == \"curved\":\n print(\"testing curved_background_subtractor\")\n from dials.algorithms.background.curved_background_subtractor \\\n import layering_and_background_modl\n layering_and_background_modl(ref_table)\n\n # no matter which algorithm was used for background calculation\n # the integration summation must remain compatible\n\n from dials.algorithms.integration.summation2d \\\n import flex_2d_layering_n_integrating\n flex_2d_layering_n_integrating(ref_table)\n\n # comparing results\n\n result = \"OK\"\n resl_its = ref_table['intensity.sum.value']\n resl_var = ref_table['intensity.sum.variance']\n for n_its in range(len(resl_its)):\n if resl_its[n_its] <= i_to_compare[n_its] + i and \\\n resl_its[n_its] >= i_to_compare[n_its] - i and \\\n resl_var[n_its] > resl_its[n_its]:\n print(\"Ok \", n_its)\n else:\n print(\"Wrong num\", n_its)\n\n print(\"i =\", i)\n print(\"resl_its[n_its] =\", resl_its[n_its])\n print(\"i_to_compare[n_its] =\", i_to_compare[n_its])\n print(\"resl_var[n_its] =\", resl_var[n_its])\n\n result = \"wrong\"\n raise RuntimeError('wrong result')\n return result\n\n\nif __name__ == '__main__':\n for i in range(5):\n res1 = run(i, \"flat\")\n print(res1)\n res2 = run(i, \"inclined\")\n print(res2)\n res3 = run(i, \"curved\")\n print(res3)\n","sub_path":"test/algorithms/integration/test_integration_summation_2d.py","file_name":"test_integration_summation_2d.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"534591176","text":"#!/usr/bin/env python3\n\nimport pickle\nimport base64\nimport sys\n\nclass beet:\n def __init__(self, name):\n self.name = name\n\nprint(\"welcome to my beet reciever! i'm on a quest to find the best beets in the world\\nsend me your beet when ready\")\npickled_beet = base64.b64decode(raw_input())\nbeet = pickle.loads(pickled_beet)\nprint(\"thanks for your beet! \" + str(beet.name) + \" sounds like it is delicious!\")\nsys.exit(0)\n\n\n","sub_path":"online_ctfs/kaizen_2016/picked_beets/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"625042378","text":"import time\nimport json\nimport datetime\nimport requests\n\nfrom urllib.request import urlopen\nimport re\nfrom datetime import datetime, date\nfrom collections import OrderedDict\nfrom jira import JIRA\n\noptions = {\n 'server': 'https://issues.apache.org/jira/'\n}\n\n#project_name = \"Lucene - Core\"\n#project_name = \"Struts 2\"\n#project_name = \"Oozie\"\n#project_name = \"Ignite\"\n#project_name = \"Pig\"\n#project_name = \"Apache NiFi\"\n#project_name = \"Apache Storm\"\n#project_name = \"Tajo\"\n#project_name = \"Zeppelin\"\nproject_name = \"ZooKeeper\"\n\n###########################################################\n# Find the list of isues #\n\njira = JIRA(options)\nsess_get = jira._session.get\nprojects = jira.projects()\n\nfor p in projects:\n # print(p.name)\n if p.name == project_name:\n project = p\n print(project.name)\n break\n\nissues = []\n\nkeepCrawling = True\ni = 0\nwhile keepCrawling:\n tmp = jira.search_issues('project=' + project.key + ' AND status in (Resolved, Closed) AND resolution=Fixed',\n startAt=i, maxResults=50) #should set to 50 which is Jira's limitation for 1 request. For testing purpose, can set to 5\n print('.', end=\"\")\n if (len(tmp) > 0):\n issues.extend(tmp)\n i = i + 50\n keepCrawling = True #temporal limitation for testing, should set to True for real running\n else:\n keepCrawling = False\n\nprint('Total number of issues: ' + str(len(issues)))\n\n###########################################################\n# Download isues #\n\nstoreIssues = []\n\ntimeFormat = \"%Y-%m-%dT%H:%M:%S.000+0000\"\n\nfor issue in issues:\n try:\n print('.', end=\"\")\n exportedData = OrderedDict([])\n\n try:\n affectversion = {'affect': issue.fields.versions[0].name}\n except Exception:\n affectversion = {'affect': \"\"}\n\n # print(issue.fields.versions)\n try:\n output = \"\"\n for s in issue.fields.versions:\n output = output + s.name + \",\"\n # print(output)\n allaffectversion = {'all_affect': output[:-1]}\n except Exception:\n allaffectversion = {'all_affect': \"\"}\n\n try:\n fixversion = {'fix': issue.fields.fixVersions[0].name}\n except Exception:\n fixversion = {'fix': \"\"}\n\n try:\n output = \"\"\n for s in issue.fields.fixVersions:\n output = output + s.name + \",\"\n # print(output)\n allfixversion = {'all_fix': output[:-1]}\n except Exception:\n allfixversion = {'all_fix': \"\"}\n\n priority = {'priority': issue.fields.priority.name}\n resolvedDate = datetime.strptime(issue.fields.resolutiondate, timeFormat)\n createdDate = datetime.strptime(issue.fields.created, timeFormat)\n fixdays = {'time': (resolvedDate - createdDate).seconds}\n issue_type = {'type': issue.fields.issuetype.name}\n issue_id = {'issue_id': issue.key}\n\n # print(affectversion)\n # print(fixversion)\n # print(priority)\n # print(fixdays)\n exportedData.update(issue_id)\n exportedData.update(affectversion)\n exportedData.update(fixversion)\n exportedData.update(priority)\n exportedData.update(issue_type)\n exportedData.update(fixdays)\n exportedData.update(allaffectversion)\n exportedData.update(allfixversion)\n\n DEV_STATUS = 'https://issues.apache.org/jira/rest/dev-status/1.0'\n _issue = 'issue/detail?issueId=%s' % issue.id\n _args = 'applicationType=fecru&dataType=repository&_=%s' % int(time.time())\n req_url = '%s/%s&%s' % (DEV_STATUS, _issue, _args)\n response = sess_get(req_url)\n raw_data = json.loads(response.content.decode('utf-8'))\n # print(issue)\n # print(issue.key)\n # print(raw_data)\n try:\n hasCommit = True\n commits = raw_data['detail'][0]['repositories'][0]['commits']\n # storeIssues.append(response.content.decode('utf-8'))\n except IndexError:\n hasCommit = False\n if hasCommit:\n commitList = []\n for commit in commits:\n # print(req)\n # print(issue.id)\n patches = []\n # print('%s\\n%s\\n\\n' % (req['displayId'], req['files']))\n for file in commit['files']:\n patches.append({'filename': file['path']})\n commitList.append({'files': patches})\n # print(patches)\n exportedData.update({'commits': commitList})\n storeIssues.append(exportedData)\n hasCommit = False\n # if doesn't has commit, then find by pull request\n if not hasCommit:\n DEV_STATUS = 'https://issues.apache.org/jira/secure/AjaxIssueAction!default.jspa?'\n _issue = 'issueKey=%s' % issue.id\n _args = '&_=%s' % int(time.time())\n #_args = 'applicationType=github&dataType=pullrequest&_=%s' % int(time.time())\n req_url = '%s%s&%s' % (DEV_STATUS, _issue, _args)\n response = sess_get(req_url)\n raw_data = json.loads(response.content.decode('utf-8'))\n\n\n # find by regular expression\n pull_request = re.compile('https:\\/\\/github.com\\/apache\\/'+project_name.lower()+'\\/pull\\/[0-9]*')\n matched = pull_request.findall(raw_data['panels']['leftPanels'][3]['html'])\n if matched is not None and len(matched) != 0:\n pull_requests = set(matched);\n commitList = []\n for link in pull_requests:\n githubLink = str(link).replace('https://github.com/', 'https://api.github.com/repos/').replace('pull', 'pulls')+'/files' # + '?access_token=b1077655202a74c42d8ee5145c154b14a7db07e9';\n print(githubLink)\n related_files = requests.get(githubLink).json();\n patches = []\n for file in related_files:\n print(file['filename'])\n patches.append({'filename': file['filename']})\n commitList.append({'files': patches})\n # print(patches)\n exportedData.update({'commits': commitList})\n storeIssues.append(exportedData)\n else:\n hasPullRequest = False\n if not hasCommit and not hasPullRequest:\n # try to look for patch file\n # find by regular expression\n patch_file = re.compile('https:\\/\\/issues.apache.org\\/jira\\/secure\\/attachment\\/[0-9]*\\/[^.]+[.]*[0-9]*.patch')\n matched = patch_file.findall(raw_data['panels']['leftPanels'][3]['html'])\n java_file_pattern = re.compile('[^ ]+\\.java');\n if matched is not None:\n commitList = []\n for patch in set(matched):\n patches = []\n content = requests.get(patch).content\n # print(content.read())\n java_files = java_file_pattern.findall(str(content))\n for f in set(java_files):\n patches.append({'filename': f})\n commitList.append({'files': patches})\n exportedData.update({'commits': commitList})\n storeIssues.append(exportedData)\n # Done with collecting\n\n\n\n except:\n print(issue)\n\nwith open(project_name + '_data.json', 'w') as outfile:\n json.dump(storeIssues, outfile)\n\n print(issues)\n\n # issue = jira.issue('JRA-9')\n # print(issue.fields.project.key) # 'JRA'\n # print(issue.fields.issuetype.name) # 'New Feature'\n # print(issue.fields.reporter.displayName) # 'Mike Cannon-Brookes [Atlassian]'","sub_path":"issue_extractor_for_Apache.py","file_name":"issue_extractor_for_Apache.py","file_ext":"py","file_size_in_byte":7949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"620479491","text":"import argparse\nimport os\nimport json\n\nimport torch\nimport torch.utils.data as data\n\nfrom model import Glow\nimport matplotlib.pyplot as plt\nimport ipdb\nfrom recon_mnist import run_recon_evolution\nimport utils\ndevice = 'cpu' if (not torch.cuda.is_available()) else 'cuda:0'\n\nimport numpy as np\nfrom anomaly import load_ood_data\nfrom train import check_dataset, generate_from_noise\nimport math\nfrom collections import OrderedDict\nfrom datasets import preprocess\nfrom torchvision import transforms\n\nc, h, w = 3,32,32\nn_bins = 2**8\nchw = c * h * w\nbpd_correction = -math.log(n_bins) / (math.log(2.))\n\ndef compute_percent_nans_infs(x):\n x = x.view(x.size(0), -1)\n n, d = x.shape\n nx_nans = ((x!=x).sum(-1) > 0 ).sum()\n n_nans = (x!=x).sum()\n nx_infs = ((x==np.inf).sum(-1) > 0).sum()\n n_infs = (x==np.inf).sum()\n return (n_nans + n_infs).float() / float(n*d), (nx_nans + nx_infs).float() / float(n)\n\n\ndef compute_jac_cn(x, model):\n dic = utils.computeSVDjacobian(x, model, compute_inverse=False)\n D_for, jac = dic['D_for'], dic['jac_for']\n cn = float(D_for.max()/ D_for.min())\n return cn, jac\n\ndef run_analysis(x, model, recon_path):\n p_pxs, p_ims = compute_percent_nans_infs(x)\n \n # Note: CN is computed only for the 1st sample\n cn, jac = compute_jac_cn(x, model)\n _, numerical_logdet = np.linalg.slogdet(jac)\n \n with torch.no_grad():\n _, bpd, _, (_, analytic_logdet) = model.forward(x, None, return_details=True, correction=False)\n # Subtract the conditional gaussian likelihood from the split layers\n analytic_logdet = analytic_logdet - torch.stack([split._last_logdet for split in model.flow.splits]).sum(0)\n # The above forward pass was run w/o correction\n data_bpd = bpd.mean().item() - bpd_correction \n\n with torch.no_grad():\n data_pad = run_recon_evolution(model, \n x, \n recon_path)\n return p_pxs.item(), p_ims.item(), cn, np.abs(numerical_logdet-analytic_logdet[0].item()), data_bpd, data_pad.item()\n\ndef one_to_three_channels(x):\n if x.shape[0] == 1:\n x = x.repeat(3,1,1)\n return x \n\ndef main(dataset, dataroot, download, augment, n_workers, eval_batch_size, output_dir,db, glow_path,ckpt_name):\n\n \n (image_shape, num_classes, train_dataset, test_dataset) = check_dataset(dataset, dataroot, augment, download)\n\n test_loader = data.DataLoader(test_dataset, batch_size=eval_batch_size,\n shuffle=False, num_workers=n_workers,\n drop_last=False)\n\n x = test_loader.__iter__().__next__()[0].to(device)\n\n # OOD data\n ood_distributions = ['gaussian']\n # ood_distributions = ['gaussian', 'rademacher', 'texture3', 'svhn','tinyimagenet','lsun']\n tr = transforms.Compose([])\n tr.transforms.append(transforms.ToPILImage()) \n tr.transforms.append(transforms.Resize((32,32)))\n tr.transforms.append(transforms.ToTensor())\n tr.transforms.append(one_to_three_channels)\n tr.transforms.append(preprocess)\n ood_tensors = [(out_name, torch.stack([tr(x) for x in load_ood_data({\n 'name': out_name,\n 'ood_scale': 1,\n 'n_anom': eval_batch_size,\n })]).to(device)\n ) for out_name in ood_distributions]\n if 'sd' in glow_path:\n with open(os.path.join(os.path.dirname(glow_path), 'hparams.json'), 'r') as f:\n model_kwargs = json.load(f)\n model = Glow(\n (32, 32, 3), \n model_kwargs['hidden_channels'], \n model_kwargs['K'], \n model_kwargs['L'], \n model_kwargs['actnorm_scale'],\n model_kwargs['flow_permutation'], \n model_kwargs['flow_coupling'], \n model_kwargs['LU_decomposed'], \n 10,\n model_kwargs['learn_top'], \n model_kwargs['y_condition'],\n model_kwargs['logittransform'],\n model_kwargs['sn'],\n model_kwargs['affine_eps'],\n model_kwargs['no_actnorm'],\n model_kwargs['affine_scale_eps'], \n model_kwargs['actnorm_max_scale'], \n model_kwargs['no_conv_actnorm'],\n model_kwargs['affine_max_scale'],\n model_kwargs['actnorm_eps'],\n model_kwargs['no_split']\n )\n model.load_state_dict(torch.load(glow_path))\n model.set_actnorm_init()\n else:\n model = torch.load(glow_path)\n model = model.to(device)\n model.eval()\n\n with torch.no_grad():\n samples = generate_from_noise(model, eval_batch_size,clamp=False, guard_nans=False)\n stats = OrderedDict()\n for name, x in [('data',x), ('samples',samples)] + ood_tensors:\n p_pxs, p_ims, cn, dlogdet, bpd, pad = run_analysis(x, model, os.path.join(output_dir, f'recon_{ckpt_name}_{name}.jpeg'))\n \n stats[f\"{name}-percent-pixels-nans\"] = p_pxs\n stats[f\"{name}-percent-imgs-nans\"] = p_ims\n stats[f\"{name}-cn\"] = cn\n stats[f\"{name}-dlogdet\"] = dlogdet\n stats[f\"{name}-bpd\"] = bpd\n stats[f\"{name}-recon-err\"] = pad\n \n with open(os.path.join(output_dir, f'results_{ckpt_name}.json'), 'w') as fp:\n json.dump(stats, fp, indent=4)\n\n\ndef makedirs(dirname):\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--dataset', type=str,\n default='cifar10', choices=['cifar10', 'svhn', 'mnist'],\n help='Type of the dataset to be used.')\n parser.add_argument('--dataroot',\n type=str, default='/scratch/gobi2/wangkuan/data',\n help='path to dataset')\n parser.add_argument('--download', default=True)\n parser.add_argument('--no_augment', action='store_false',\n dest='augment', help='Augment training data')\n parser.add_argument('--n_workers',\n type=int, default=6,\n help='number of data loading workers')\n parser.add_argument('--eval_batch_size',\n type=int, default=512,\n help='batch size used during evaluation')\n parser.add_argument('--db', type=int, default=0)\n parser.add_argument('--glow_path', type=str, default='')\n\n args = parser.parse_args()\n kwargs = vars(args)\n\n # Create output_dir \n base_dir = os.path.dirname(args.glow_path)\n args.output_dir = os.path.join(base_dir, 'analyze')\n args.ckpt_name = os.path.basename(args.glow_path).split('.')[0]\n\n\n makedirs(args.dataroot)\n makedirs(args.output_dir)\n \n with open(os.path.join(args.output_dir, f'hparams_{args.ckpt_name}.json'), 'w') as fp:\n json.dump(kwargs, fp, sort_keys=True, indent=4)\n\n log_file = os.path.join(args.output_dir, f'log_{args.ckpt_name}.txt')\n log = open(log_file, 'w')\n _print = print\n def print(*content):\n _print(*content)\n _print(*content, file=log)\n log.flush()\n\n main(**kwargs)\n log.close()\n","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":7315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"492567263","text":"import numpy as np\nimport tensorflow as tf\n\nfrom tf_agents.environments.tf_py_environment import TFPyEnvironment\n\nfrom bellman.environments.initial_state_distribution_model import (\n DeterministicInitialStateModel,\n)\nfrom bellman.environments.reward_model import RewardModel\nfrom bellman.environments.termination_model import TerminationModel\n\n\nclass CRWRewardModel(RewardModel):\n \"\"\"\n Reward function for the controlled random walk environment, based on cost_per_buffer.\n Information from the environment is neeeded.\n \"\"\"\n def __init__(self, observation_spec: tf.TensorSpec, action_spec: tf.TensorSpec, env: TFPyEnvironment):\n self.cost_per_buffer = env.cost_per_buffer\n super().__init__(observation_spec, action_spec)\n\n def _step_reward(\n self, observation: tf.Tensor, action: tf.Tensor, next_observation: tf.Tensor\n ) -> tf.Tensor:\n cost = np.dot(self.cost_per_buffer.transpose(), observation)\n reward = - float(cost)\n return tf.cast(reward, self._reward_spec.dtype)\n\n\nclass CRWInitialStateModel(DeterministicInitialStateModel):\n \"\"\"\n Initial state model for the the controlled random walk environment.\n Information from the environment is neeeded.\n \"\"\"\n\n def __init__(self, env: TFPyEnvironment):\n self.initial_state = env.state_initialiser.get_initial_state()\n super().__init__(state=self.initial_state)\n","sub_path":"src/snc/agents/rl/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"446522127","text":"import App\n\nclass TargetListPerparer:\n def __init__(self, pAttackGroup):\n self.pAttackers = pAttackGroup\n\n def SetAttackedCondition(self, pCondition):\n self.pAttackedCondition = pCondition\n\n def GetNextUpdateTime(self):\n return 5.0\n\n def Update(self, dEndTime):\n import MissionLib\n pFriendlies = MissionLib.GetFriendlyGroup()\n\n # Update the list of attackers from the condition.\n pScript = self.pAttackedCondition.GetConditionScript()\n lsAttackers = pScript.GetTargetList()\n\n if lsAttackers:\n for sAttacker in lsAttackers:\n # If this attacker is a Friendly object, don't add it to the\n # list of targets.\n## if pFriendlies and pFriendlies.IsNameInGroup(sAttacker):\n## continue\n\n try:\n fShieldDamage = pScript.dfShieldDamage[sAttacker]\n except KeyError:\n fShieldDamage = 0.0\n\n try:\n fHullDamage = pScript.dfDamageDamage[sAttacker]\n except KeyError:\n fHullDamage = 0.0\n\n fPriority = fShieldDamage + fHullDamage\n self.pAttackers[sAttacker] = { \"Priority\" : fPriority }\n\n return App.PreprocessingAI.PS_NORMAL\n\n\n\ndef CreateAI(pShip):\n pAttackGroup = App.ObjectGroupWithInfo()\n pAttackGroup[pShip.GetName()] = { \"Priority\" : -1000.0 }\n\n #########################################\n # Creating CompoundAI Attack at (120, 106)\n import AI.Compound.BasicAttack\n pAttack = AI.Compound.BasicAttack.CreateAI(pShip, pAttackGroup, AggressivePulseWeapons = 1, SmartPhasers = 1, UseCloaking = 1, WarpOutBeforeDying = 1)\n # Done creating CompoundAI Attack\n #########################################\n #########################################\n # Creating PreprocessingAI PrepTargetList at (118, 154)\n ## Setup:\n pTargetPrep = TargetListPerparer(pAttackGroup)\n ## The PreprocessingAI:\n pPrepTargetList = App.PreprocessingAI_Create(pShip, \"PrepTargetList\")\n pPrepTargetList.SetInterruptable(1)\n pPrepTargetList.SetPreprocessingMethod(pTargetPrep, \"Update\")\n pPrepTargetList.SetContainedAI(pAttack)\n # Done creating PreprocessingAI PrepTargetList\n #########################################\n #########################################\n # Creating ConditionalAI DefendeeAttacked at (117, 201)\n ## Conditions:\n #### Condition Attacked\n pAttacked = App.ConditionScript_Create(\"Conditions.ConditionAttacked\", \"ConditionAttacked\", pShip.GetName(), 0.0001, 0.0001, 45)\n ## Evaluation function:\n def EvalFunc(bAttacked):\n ACTIVE = App.ArtificialIntelligence.US_ACTIVE\n DORMANT = App.ArtificialIntelligence.US_DORMANT\n DONE = App.ArtificialIntelligence.US_DONE\n if bAttacked:\n return ACTIVE\n return DORMANT\n ## The ConditionalAI:\n pDefendeeAttacked = App.ConditionalAI_Create(pShip, \"DefendeeAttacked\")\n pDefendeeAttacked.SetInterruptable(1)\n pDefendeeAttacked.SetContainedAI(pPrepTargetList)\n pDefendeeAttacked.AddCondition(pAttacked)\n pDefendeeAttacked.SetEvaluationFunction(EvalFunc)\n # Done creating ConditionalAI DefendeeAttacked\n #########################################\n pTargetPrep.SetAttackedCondition(pAttacked)\n return pDefendeeAttacked\n","sub_path":"scripts/AI/Compound/Defend3.py","file_name":"Defend3.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"182021724","text":"import numpy as np\nimport math\n# 參考文獻:Numerical example to understand Expectation-Maximization -- http://ai.stanford.edu/~chuongdo/papers/em_tutorial.pdf\n# What is the expectation maximization algorithm? (PDF) -- http://stats.stackexchange.com/questions/72774/numerical-example-to-understand-expectation-maximization\n\ndef logp(n):\n pi = 1.0/n\n return math.log(pi)\n\ndef xplog(x, p): # 計算條件熵 cross entropy H(x;p)\n n = np.sum(x)\n r = logp(n)\n for xi in x:\n r -= logp(xi)\n return r + np.dot(x, np.log(p))\n\ndef EM():\n# 1st: Coin B, {HTTTHHTHTH}, 5H,5T\n# 2nd: Coin A, {HHHHTHHHHH}, 9H,1T\n# 3rd: Coin A, {HTHHHHHTHH}, 8H,2T\n# 4th: Coin B, {HTHTTTHHTT}, 4H,6T\n# 5th: Coin A, {THHHTHHHTH}, 7H,3T\n# so, from MLE: pA(heads) = 0.80 and pB(heads)=0.45\n e = [ [5,5], [9,1], [8,2], [4,6], [7,3] ]\n pA = [0.6, 0.4]\n pB = [0.5, 0.5]\n delta = 9.9999\n for _ in range(1000):\n print(\"pA={} pB={} delta={}\".format(pA, pB, delta))\n sumA=[0,0]\n sumB=[0,0]\n for ei in e:\n lA = xplog(ei, pA)\n lB = xplog(ei, pB)\n a = np.exp(lA)\n b = np.exp(lB)\n wA = a/(a+b)\n wB = b/(a+b)\n eA = np.multiply(wA, ei)\n eB = np.multiply(wB, ei)\n sumA = np.add(sumA, eA)\n sumB = np.add(sumB, eB)\n\n npA = np.multiply(sumA, 1.0/np.sum(sumA))\n npB = np.multiply(sumB, 1.0/np.sum(sumB))\n dA = np.subtract(npA, pA)\n dB = np.subtract(npB, pB)\n delta = np.max([dA, dB])\n if delta < 0.001: break\n pA = npA\n pB = npB\n\nEM()\n","sub_path":"python/10-machineLearning/em/em.py","file_name":"em.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"188741205","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport sys\nsys.path.append(\"../analyse/\")\nimport calculations as cc\n\ndef fake_deg_to_mm(xvals):\n degrees = xvals * 1.2/1000.\n return 1000.*degrees*np.pi/180.\n\ndef xscale(xvals, labda):\n \"\"\" makes xvalues into distance in units of labda\n \"\"\"\n return xvals * 1.22 * 50/3. * labda\n\ndef airy_model_input_u(u, shift):\n \"\"\" takes the input u for (J1(u)/u)**2,\n independent of what u consists of,\n and a horizontal shift 'shift'\n \"\"\"\n # remove also pi so tops are 1.22 apart but still correct\n return (2*cc.jn(1,u*np.pi-shift) / (u*np.pi-shift))**2\n\nmodel = airy_model_input_u\nu = np.arange(-10.,10.,0.01)\ndist = xscale(u, 0.077)\n\nx_qcl, y_qcl, z_qcl = np.loadtxt('qcl_central_x_f1.csv', delimiter=',', skiprows=1, unpack=True)\nx_qcl, y_qcl, z_qcl = cc.shifttozero((x_qcl, y_qcl, z_qcl))\n\n\nplt.figure(figsize=(12,8))\ncc.plotsimple(fake_deg_to_mm(x_qcl), z_qcl/z_qcl.max(), 'blue', 'QCL', linewidth=2, linestyle='-')\ncc.plotsimple(dist, model(u, -0.3), 'red', r'theory, 0.077 mm', linewidth=2, linestyle='--')\nplt.axis([-5,5,0,1])\n# plt.legend(bbox_to_anchor=(0.4, 0.9), bbox_transform=plt.gcf().transFigure)\nplt.xlabel('Distance [mm]')\nplt.ylabel('Normalized Intensity [a.u.]')\nplt.tight_layout()\n# plt.savefig('qcl_airy.pdf')\nplt.show()","sub_path":"metingen_willemjan/qcl_plots.py","file_name":"qcl_plots.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"225807691","text":"#\n# @lc app=leetcode.cn id=46 lang=python\n#\n# [46] 全排列\n#\n\n# @lc code=start\nclass Solution(object):\n def permute(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n res = []\n self.dfs(nums, [], res)\n return res\n\n # Solution_1 —— 回溯\n def dfs(self, nums, l, ans):\n if not nums:\n ans.append(l)\n return \n for i in range(len(nums)):\n self.dfs(nums[:i] + nums[i+1:], l + [nums[i]], ans)\n# @lc code=end\n\n","sub_path":"Week03/46.全排列.py","file_name":"46.全排列.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"592324611","text":"from flask import Flask, request, abort\nfrom infer import infer_qa\nimport logging\nlogging.basicConfig(format='%(levelname)s :: %(asctime)s :: %(message)s', level=logging.DEBUG)\n\napp = Flask(__name__)\n@app.route('/query')\ndef query():\n logging.debug(\"Inside query function\")\n try:\n query = request.args.get('query', '')\n candidate = request.args.get('candidate','')\n answer, probability = infer_qa(query, candidate)\n return {\"answer\":answer, \"probability\":probability}\n except Exception as e:\n logging.debug(e)\n abort(400)\n","sub_path":"services/question_answering_backend/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"305773281","text":"import math\r\nimport pandas as pd\r\nfrom matplotlib import pyplot as plt\r\nimport matplotlib.colors as colors\r\n\r\n# Plot settings\r\nvoc_colors = dict(EK='#0173b2', RP='#de8f05', ED='#029e73', MS='#d55e00', N='#cc78bc', K='#0173b2', P='#de8f05',\r\n D='#029e73', S='#d55e00')\r\npvp_colors = {'Open PvP': '#0173b2', 'Retro Hardcore PvP': '#de8f05', 'Retro Open PvP': '#029e73',\r\n 'Optional PvP': '#d55e00', 'Hardcore PvP': '#cc78bc'}\r\ncolor_dict = {'red': ((0.0, 0.0, 0.0), # no red at 0\r\n (0.5, 1.0, 1.0), # all channels set to 1.0 at 0.5 to create white\r\n (1.0, 0.8, 0.8)), # set to 0.8 so its not too bright at 1\r\n\r\n 'green': ((0.0, 0.8, 0.8), # set to 0.8 so its not too bright at 0\r\n (0.5, 1.0, 1.0), # all channels set to 1.0 at 0.5 to create white\r\n (1.0, 0.0, 0.0)), # no green at 1\r\n\r\n 'blue': ((0.0, 0.0, 0.0), # no blue at 0\r\n (0.5, 1.0, 1.0), # all channels set to 1.0 at 0.5 to create white\r\n (1.0, 0.0, 0.0)) # no blue at 1\r\n }\r\n\r\n# Import scraped auction data and Tibia World data\r\ncomplete_auction_dataframe = pd.read_pickle('last_scrape.pkl')\r\nauction_dataframe = complete_auction_dataframe[~complete_auction_dataframe.duplicated(subset='Id', keep=False)]\r\nworlds_dataframe = pd.read_pickle('tibia_game_worlds.pkl')\r\n\r\n# Divide dataframe: successful and failed auctions\r\nwon_auctions = auction_dataframe[auction_dataframe.Type.eq(\"W\")]\r\nsuccessful_auctions = won_auctions[won_auctions.Status.ne(\"cancelled\")]\r\nfailed_auctions = auction_dataframe[auction_dataframe.Type.eq(\"M\")]\r\n\r\n# Calculate totals\r\ntotal_transactions = len(auction_dataframe)\r\nsuccessful_transactions = len(successful_auctions)\r\nfailed_transactions = len(failed_auctions)\r\nsuccess_ratio = successful_transactions / total_transactions\r\nfail_ratio = failed_transactions / total_transactions\r\ntotal_value = successful_auctions['Bid'].sum()\r\naverage_sale = total_value / successful_transactions\r\n\r\n# Calculate taxes\r\nauction_tax = total_transactions * 50\r\nsale_tax = [math.floor(tax) for tax in list(successful_auctions['Bid'] * 0.12)]\r\nsale_tax_total = sum(sale_tax)\r\ntotal_taxes = auction_tax + sale_tax_total\r\n\r\n# Data by vocation\r\nvocations = [('EK', 'K'), ('RP', 'P'), ('ED', 'D'), ('MS', 'S'), ('N',)]\r\nvoc_keys = list((map(lambda k: k[0], vocations)))\r\nvoc_columns = ['AvgLevel', 'AvgBid', 'Count']\r\nvocation_totals = pd.DataFrame(columns=voc_columns, index=voc_keys)\r\nfor vocation in vocations:\r\n vocation_dataframe = successful_auctions[successful_auctions.Vocation.isin(vocation)]\r\n voc_avg_level = vocation_dataframe['Level'].mean()\r\n voc_avg_bid = vocation_dataframe['Bid'].mean()\r\n voc_count = len(vocation_dataframe)\r\n vocation_totals.loc[vocation[0]] = (voc_avg_level, voc_avg_bid, voc_count)\r\n\r\n# Print summary to file\r\nwith open('totals_output.txt', 'w') as out_file:\r\n out_file.write(\"[code][quote]\")\r\n out_file.write(f\"\\nConcluded auctions: {total_transactions:,}\")\r\n out_file.write(f\"\\nSuccessful auctions: {successful_transactions:,} ({success_ratio * 100:.2f}%)\")\r\n out_file.write(f\"\\nFailed auctions: {failed_transactions:,} ({fail_ratio * 100:.2f}%)\")\r\n for voc_tuple in vocation_totals.iterrows():\r\n voc_count = voc_tuple[1]['Count']\r\n out_file.write(f\"\\n{voc_tuple[0]}s traded: {voc_count:,} ({100 * voc_count / successful_transactions:.2f}%)\")\r\n out_file.write(f\"\\nSuccessful auctions total {total_value:,} Tibia Coins.\")\r\n out_file.write(f\"\\nAuction taxes total {auction_tax:,} Tibia Coins.\")\r\n out_file.write(f\"\\nSale taxes total {sale_tax_total:,} Tibia Coins.\")\r\n out_file.write(f\"\\nTotal taxes: {total_taxes:,} Tibia Coins\")\r\n out_file.write(\"[code][quote]\")\r\n\r\n# Pie plot: auctions by vocation\r\n_, txts, autotxts = plt.pie(vocation_totals['Count'], labels=vocation_totals.index, wedgeprops={'edgecolor': 'black'},\r\n autopct=lambda pct: \"{:.2f}%\\n({:d})\".format(pct, int(pct * successful_transactions / 100)))\r\nplt.style.use(\"seaborn-colorblind\")\r\nplt.title(\"Successful auctions by vocation\", fontname=\"Cambria\", size=20)\r\nplt.setp(autotxts, fontname=\"Cambria\", size=15)\r\nplt.setp(txts, fontname=\"Cambria\", size=20)\r\nplt.show()\r\n\r\n# Scatter plot: bid values by level for each vocation\r\nfig, axs = plt.subplots(2, 2, sharey=True, sharex=True)\r\nfor index, vocation in enumerate(vocations[:-1]):\r\n vocation_dataframe = successful_auctions[successful_auctions.Vocation.isin(vocation)]\r\n failed_dataframe = failed_auctions[failed_auctions.Vocation.isin(vocation)]\r\n voc_color = voc_colors[vocation[0]]\r\n b = \"0\" + bin(index)[2:]\r\n bin_loc = b[-2:]\r\n i_idx = int(bin_loc[0])\r\n j_idx = int(bin_loc[1])\r\n axs[i_idx, j_idx].scatter(x=vocation_dataframe['Level'], y=vocation_dataframe['Bid'], edgecolor='black',\r\n color=voc_color, label=vocation[0])\r\n axs[i_idx, j_idx].scatter(x=failed_dataframe['Level'], y=failed_dataframe['Bid'], edgecolor=voc_color,\r\n color='none', label='(FAILED)', alpha=0.2)\r\n axs[i_idx, j_idx].legend(loc='upper left')\r\n axs[i_idx, j_idx].grid(which='both')\r\n axs[i_idx, j_idx].set_xlim(left=0)\r\nplt.ylim(0, 1.1*successful_auctions['Bid'].max())\r\nplt.xlim(0, 1.1*successful_auctions['Level'].max())\r\n#plt.xlabel('Level', size=20)\r\n#plt.ylabel('Bid', size=20)\r\nplt.suptitle('Level (X) versus Winning Bids (Y) for each Vocation', size=20)\r\nplt.show()\r\n\r\n# Histogram: name lengths\r\nname_lengths = list((map(lambda name: len(name), auction_dataframe['Name'])))\r\nnl_bins = range(0, max(name_lengths)+2)\r\nnl_hist = plt.hist(name_lengths, bins=nl_bins, color='#0173b2', edgecolor='black', alpha=0.5)\r\noffset = max(nl_hist[0])/50\r\nfor idx in range(0, len(nl_bins)-1):\r\n string = str(int(nl_hist[0][idx])) + \" (\" + str(int(nl_hist[1][idx])) + \")\"\r\n plt.text(nl_hist[1][idx], nl_hist[0][idx]+offset, string, size=8)\r\nplt.xlim(0, max(name_lengths)+1)\r\nplt.xlabel(\"Character Name Length\", size=25)\r\nplt.ylabel(\"Number of Auctions\", size=25)\r\nplt.show()\r\n\r\n# Bubble plot: world, avg level, avg bid, transaction count, pvp type\r\nworlds = list(worlds_dataframe.index)\r\nfor world in worlds:\r\n world_dataframe = auction_dataframe[auction_dataframe['World'].eq(world)]\r\n s_auctions = world_dataframe[world_dataframe['Type'].eq('W')]\r\n s_count = len(s_auctions)\r\n s_avg_level = s_auctions['Level'].mean()\r\n s_avg_bid = s_auctions['Bid'].mean()\r\n world_type = worlds_dataframe.loc[world]['Type']\r\n color = pvp_colors[world_type]\r\n plt.scatter(s_avg_level, s_avg_bid, s=s_count, alpha=0.5, color=color, edgecolor='black')\r\n plt.annotate(world, (s_avg_level, s_avg_bid), size=6)\r\nworld_types = pd.unique(worlds_dataframe['Type'])\r\nfor world_type in world_types:\r\n color = pvp_colors[world_type]\r\n plt.scatter(-100, -100, s=100, label=world_type, color=color, alpha=0.50, edgecolor='black')\r\nplt.legend(frameon=True, title='World Types', loc='upper left', fontsize=20)\r\nplt.xlabel('Average level', size=30)\r\nplt.ylabel('Average Bid', size=30)\r\nplt.xlim(80)\r\nplt.ylim(600)\r\nplt.grid(which='both')\r\nplt.xticks(range(80, 320, 20))\r\nplt.yticks(range(600, 5000, 200))\r\nplt.show()\r\n\r\n\r\nduplicated_chars_dataframe = successful_auctions[successful_auctions.duplicated(subset='Name', keep=False)]\r\nduplicated_char_names = set(duplicated_chars_dataframe['Name'])\r\nduplicated_count = len(duplicated_char_names)\r\nfirst_check = duplicated_chars_dataframe.duplicated(subset='Name', keep='first')\r\nfirst_entry = duplicated_chars_dataframe[~first_check]\r\nsecond_onwards_first = duplicated_chars_dataframe[first_check]\r\nsecond_entry = second_onwards_first[~second_onwards_first.duplicated(subset='Name', keep='first')]\r\n\r\nfirst_bid_avg = first_entry['Bid'].mean()\r\nsecond_bid_avg = second_entry['Bid'].mean()\r\n\r\ngreen_to_red = colors.LinearSegmentedColormap('G2R', color_dict)\r\nprofit = []\r\nlevel = []\r\nfor char_name in duplicated_char_names:\r\n char_dataframe = duplicated_chars_dataframe[duplicated_chars_dataframe['Name'].eq(char_name)]\r\n char_level = char_dataframe['Level'].iloc[0]\r\n first_bid = char_dataframe['Bid'].iloc[0]\r\n second_bid = char_dataframe['Bid'].iloc[1]\r\n char_profit = second_bid - first_bid\r\n if char_profit > 0:\r\n point_color = 'green'\r\n else:\r\n point_color = 'red'\r\n profit.append(char_profit)\r\n level.append(char_level)\r\n plt.scatter(char_level, char_profit, color=point_color, edgecolor='black')\r\nmax_profit = max(profit)\r\nmax_loss = min(profit)\r\nmin_level = min(level)\r\nmax_level = max(level)\r\nx_step = 20\r\nx_range = range(0, max_level+x_step, x_step)\r\ny_step = 200\r\ny_range = range(200*round((max_loss-y_step)/200), max_profit+y_step, y_step)\r\nplt.yticks(y_range)\r\nplt.xticks(x_range)\r\nplt.xlim(0, max_level+x_step)\r\nplt.grid(which='both')\r\nplt.xlabel('Character level', size=20)\r\nplt.ylabel('Resale profit (taxes disregarded)', size=20)\r\nplt.title('Characters Auctioned Twice', size=20)\r\n#plt.scatter(level, profit, c=profit, cmap=green_to_red, edgecolor='black')\r\nplt.show()\r\n\r\n\r\n#name_lengths = list((map(lambda name: len(name), auction_dataframe['Name'])))\r\n#nl_bins = range(0, max(name_lengths)+2)\r\n#nl_hist = plt.hist(name_lengths, bins=nl_bins, color='#0173b2', edgecolor='black', alpha=0.5)\r\n#offset = max(nl_hist[0])/50\r\n#for idx in range(0, len(nl_bins)-1):\r\n# string = str(int(nl_hist[0][idx])) + \" (\" + str(int(nl_hist[1][idx])) + \")\"\r\n# plt.text(nl_hist[1][idx], nl_hist[0][idx]+offset, string, size=8)\r\n#plt.xlim(0, max(name_lengths)+1)\r\n#plt.xlabel(\"Character Name Length\", size=25)\r\n#plt.ylabel(\"Number of Auctions\", size=25)\r\n#plt.show()","sub_path":"bazaar_report.py","file_name":"bazaar_report.py","file_ext":"py","file_size_in_byte":9758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"217061404","text":"import unittest\n\nfrom unittest.mock import patch\n\nfrom apps.jira.models import JiraModel, Board, Sprint, Issue\nfrom tests.helpers import MockResponse, MockSession\n\nBOARD_MOCK_OBJ = {\n 'id': 123,\n 'self': 'https://example.org',\n 'name': 'MockBoard',\n 'type': 'Scrum',\n 'location': {'projectKey': 'AB'}\n}\n\nSPRINT_MOCK_OBJ = {\n 'id': 1,\n 'self': 'https://example.org',\n 'state': 'active',\n 'name': 'mock sprint',\n 'originBoardId': 1\n}\n\nISSUE_MOCK_OBJ = {\n 'id': 1,\n 'self': 'https://example.org',\n 'key': 'AB-123',\n 'fields': {\n 'summary': 'MockSummary'\n }\n}\n\n\nclass TestJiraModel(unittest.TestCase):\n def test_incorrect_obj_raises_key_error(self):\n obj = {}\n self.assertRaises(KeyError, JiraModel, obj, MockSession(200, {}))\n\n def test_incorrect_session_raises_value_error(self):\n obj = {'id': 1, 'self': 'url'}\n session = {}\n self.assertRaises(ValueError, JiraModel, obj, session)\n\n\nclass TestBoard(TestJiraModel):\n @patch(\n 'apps.jira.models.get_paginated_results',\n return_value=[SPRINT_MOCK_OBJ]\n )\n def test_get_sprints(self, mock_response):\n obj = BOARD_MOCK_OBJ\n board = Board(obj, MockSession(200, {}))\n sprints = board.sprints()\n self.assertIsInstance(sprints[0], Sprint)\n\n @patch(\n 'apps.jira.models.get_paginated_results',\n return_value=[SPRINT_MOCK_OBJ]\n )\n def test_get_active_sprints(self, mock_response):\n obj = BOARD_MOCK_OBJ\n board = Board(obj, MockSession(200, {}))\n sprints = board.sprints(active=True)\n self.assertIsInstance(sprints[0], Sprint)\n\n\nclass TestSprint(TestJiraModel):\n @patch(\n 'apps.jira.models.get_paginated_results',\n return_value=[ISSUE_MOCK_OBJ]\n )\n def test_get_issues(self, mock_response):\n obj = SPRINT_MOCK_OBJ\n sprint = Sprint(obj, MockSession(200, {}))\n issues = sprint.issues()\n self.assertIsInstance(issues[0], Issue)\n\n\nclass TestIssue(TestJiraModel):\n def setUp(self):\n obj = {\n 'id': 1337,\n 'self': 'https://example.org/1337',\n 'key': 'AB-123',\n 'fields': {'summary': 'Test summary', 'fixVersions': []}\n }\n\n self.issue = Issue(obj, MockSession(400, {}))\n self.obj = obj\n\n @patch('apps.jira.models.get_response', return_value=MockResponse(201))\n def test_comment(self, mock_response):\n response = self.issue.comment('Test comment')\n self.assertTrue(response)\n\n @patch('apps.jira.models.get_response', return_value=MockResponse(201))\n def test_add_version(self, mock_response):\n response = self.issue.add_version('Test version')\n self.assertTrue(response)\n\n def test_add_version_already_has_version(self):\n issue = Issue(self.obj, MockSession(400, {}))\n issue.fix_versions.append({'name': 'Existing version'})\n self.assertFalse(issue.add_version('Test version'))\n","sub_path":"tests/apps/jira/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"306118532","text":"from flor.constants import *\nfrom flor import stateful, utils\nfrom flor.skipblock.skip_block import SkipBlock\nfrom flor.writer import Writer\n\nimport sys\nimport functools\n\ndef partition(iterator, partition_id, num_partitions):\n if stateful.MODE is EXEC:\n # This method is pass through on exec\n return iterator\n assert partition_id >= 0 and partition_id < num_partitions\n partition_id = int(partition_id)\n SkipBlock.parallel = True\n\n pretraining = stateful.pretraining\n iterations_count = len(iterator)\n period = stateful.period\n\n psl = Writer.partitioned_store_load\n if len(psl) > iterations_count:\n # This is true when Train & Eval loop share the same looper (see Rnn Translator)\n assert len(psl) % iterations_count == 0\n # We will stitch adjacents together\n new_group_size = int(len(psl) / iterations_count)\n new_psl = []\n current_group = None\n for i,each in enumerate(psl):\n if i % new_group_size == 0:\n new_psl.append(current_group)\n current_group = []\n current_group += each\n new_psl.append(current_group)\n assert new_psl.pop(0) is None\n assert len(new_psl) == iterations_count\n Writer.partitioned_store_load = new_psl\n del psl\n\n\n\n stateful.iterations_count = iterations_count\n\n epoch_partitions = utils.get_partitions(len(iterator), num_partitions, pretraining, period)\n\n our_epochs = epoch_partitions[partition_id]\n if not our_epochs:\n sys.exit(0)\n\n predecessor_id = our_epochs[0] - 1\n if predecessor_id >= 0 and stateful.PRED_INIT_MODE is WEAK:\n Writer.store_load = functools.reduce(lambda x,y: x+y, Writer.partitioned_store_load[predecessor_id:])\n # In case of STRONG init mode, just leave store_load as it is, it already has\n # What it needs to start from 0. It doesn't need to start at some k.\n\n if stateful.PRED_INIT_MODE is WEAK:\n predecessor_epochs = [predecessor_id,] if predecessor_id >= 0 else []\n else:\n predecessor_epochs = range(predecessor_id + 1)\n\n for pred in predecessor_epochs:\n print(f\"Initializing epoch {pred}\")\n yield iterator[pred]\n\n import flor\n flor.SKIP = False\n\n for epoch in our_epochs:\n print(f\"Executing epoch {epoch}\")\n yield iterator[epoch]\n\n\n\n\n","sub_path":"flor/parallelizer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"406609767","text":"from irSerial import Serial\nfrom time import time\n\nser = Serial()\ntry:\n ser.open()\n if ser.isOpen():\n t = time()\n while True:\n frame = ser.getIrFrame()\n print(f'\\n{time() - t}, shape={frame.shape}, type={type(frame[0][0])}')\n t = time()\n print('-' * 64)\n for y in range(24):\n for x in range(32):\n b = frame[y][x]\n if b > 80:\n print('@', end=' ')\n else:\n print(' ', end=' ')\n print()\n else:\n print (\"open serial port error\")\nfinally:\n ser.close()\n","sub_path":"windows_pyserial/simpleShow.py","file_name":"simpleShow.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"258069868","text":"from flask import Blueprint\nfrom app import jsonify\nfrom app.models import Branch\nfrom app.utils import decorators\n\napi = Blueprint(\"branch_api\", __name__, url_prefix=\"/api/branch\")\n\n\n@api.route(\"/list\", methods=[\"GET\"])\n@decorators.login_required\ndef list():\n branches = [branch.serialize() for branch in Branch.query.all()]\n data = dict(status=\"success\", branches=branches)\n return jsonify(data), 200\n\n\n@api.route(\"/\", methods=[\"GET\"])\n@decorators.login_required\ndef get_branch(branchid):\n branch = Branch.query.get(branchid)\n if branch:\n data = dict(status=\"success\", branch=branch.serialize())\n else:\n data = dict(status=\"fail\", message=\"No such Branch found\")\n return jsonify(data), 200\n","sub_path":"app/api/branch.py","file_name":"branch.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"604307452","text":"# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (nested_scopes, generators, division, absolute_import, with_statement,\n print_function, unicode_literals)\n\nimport functools\nimport os\nfrom contextlib import contextmanager\nfrom zipfile import ZIP_DEFLATED, ZIP_STORED\n\nfrom twitter.common.dirutil import safe_mkdir\n\nfrom pants.base.build_environment import get_buildroot\nfrom pants.base.exceptions import TaskError\nfrom pants.fs.fs import safe_filename\nfrom pants.java.jar import Manifest, open_jar\nfrom pants.targets.scala_library import ScalaLibrary\nfrom pants.tasks.task import Task\nfrom pants.tasks.javadoc_gen import javadoc\nfrom pants.tasks.scaladoc_gen import scaladoc\n\n\ndef is_java_library(target):\n return target.has_sources('.java')\n\n\ndef is_scala_library(target):\n return target.has_sources('.scala')\n\n\ndef is_jvm_library(target):\n return is_java_library(target) or is_scala_library(target)\n\n\ndef jarname(target, extension='.jar'):\n # TODO(John Sirois): incorporate version\n _, id_, _ = target.get_artifact_info()\n # Cap jar names quite a bit lower than the standard fs limit of 255 characters since these\n # artifacts will often be used outside pants and those uses may manipulate (expand) the jar\n # filenames blindly.\n return safe_filename(id_, extension, max_length=200)\n\n\ndef _abs_and_relative_sources(target):\n abs_source_root = os.path.join(get_buildroot(), target.target_base)\n for source in target.sources_relative_to_source_root():\n yield os.path.join(abs_source_root, source), source\n\n\nclass JarCreate(Task):\n \"\"\"Jars jvm libraries and optionally their sources and their docs.\"\"\"\n\n @classmethod\n def setup_parser(cls, option_group, args, mkflag):\n option_group.add_option(mkflag('compressed'), mkflag('compressed', negate=True),\n dest='jar_create_compressed', default=True,\n action='callback', callback=mkflag.set_bool,\n help='[%default] Create compressed jars.')\n\n option_group.add_option(mkflag('transitive'), mkflag('transitive', negate=True),\n dest='jar_create_transitive', default=True,\n action='callback', callback=mkflag.set_bool,\n help='[%default] Create jars for the transitive closure of internal '\n 'targets reachable from the roots specified on the command line.')\n\n option_group.add_option(mkflag('classes'), mkflag('classes', negate=True),\n dest='jar_create_classes', default=True,\n action='callback', callback=mkflag.set_bool,\n help='[%default] Create class jars.')\n option_group.add_option(mkflag('sources'), mkflag('sources', negate=True),\n dest='jar_create_sources', default=False,\n action='callback', callback=mkflag.set_bool,\n help='[%default] Create source jars.')\n #TODO tdesai: Think about a better way to set defaults per goal basis.\n javadoc_defaults = True if option_group.title.split(':')[0] == 'publish' else False\n option_group.add_option(mkflag('javadoc'), mkflag('javadoc', negate=True),\n dest='jar_create_javadoc',\n default=javadoc_defaults,\n action='callback', callback=mkflag.set_bool,\n help='[%default] Create javadoc jars.')\n\n def __init__(self, context, workdir):\n super(JarCreate, self).__init__(context, workdir)\n\n options = context.options\n products = context.products\n\n self.transitive = options.jar_create_transitive\n self.compression = ZIP_DEFLATED if options.jar_create_compressed else ZIP_STORED\n\n self.jar_classes = options.jar_create_classes or products.isrequired('jars')\n if self.jar_classes:\n products.require_data('classes_by_target')\n products.require_data('resources_by_target')\n\n definitely_create_javadoc = options.jar_create_javadoc or products.isrequired('javadoc_jars')\n definitely_dont_create_javadoc = options.jar_create_javadoc is False\n create_javadoc = options.jar_create_javadoc\n if definitely_create_javadoc and definitely_dont_create_javadoc:\n self.context.log.warn('javadoc jars are required but you have requested they not be created, '\n 'creating anyway')\n self.jar_javadoc = (True if definitely_create_javadoc else\n False if definitely_dont_create_javadoc else\n create_javadoc)\n if self.jar_javadoc:\n products.require(javadoc.product_type)\n products.require(scaladoc.product_type)\n\n self.jar_sources = products.isrequired('source_jars') or options.jar_create_sources\n\n self._jars = {}\n\n def execute(self, targets):\n safe_mkdir(self.workdir)\n\n def jar_targets(predicate):\n return filter(predicate, (targets if self.transitive else self.context.target_roots))\n\n def add_genjar(typename, target, name):\n self.context.products.get(typename).add(target, self.workdir).append(name)\n\n # TODO(Tejal Desai) pantsbuild/pants/65: Avoid creating 2 jars with java sources for\n # scala_library with java_sources. Currently publish fails fast if scala_library owning\n # java sources pointed by java_library target also provides an artifact. However, jar_create\n # ends up creating 2 jars one scala and other java both including the java_sources.\n if self.jar_classes:\n self._jar(jar_targets(is_jvm_library), functools.partial(add_genjar, 'jars'))\n\n if self.jar_sources:\n self.sourcejar(jar_targets(is_jvm_library), functools.partial(add_genjar, 'source_jars'))\n\n if self.jar_javadoc:\n javadoc_add_genjar = functools.partial(add_genjar, 'javadoc_jars')\n self.javadocjar(jar_targets(is_java_library),\n self.context.products.get(javadoc.product_type),\n javadoc_add_genjar)\n self.javadocjar(jar_targets(is_scala_library),\n self.context.products.get(scaladoc.product_type),\n javadoc_add_genjar)\n\n @contextmanager\n def create_jar(self, target, path):\n existing = self._jars.setdefault(path, target)\n if target != existing:\n raise TaskError('Duplicate name: target %s tried to write %s already mapped to target %s' % (\n target, path, existing\n ))\n self._jars[path] = target\n with open_jar(path, 'w', compression=self.compression) as jar:\n yield jar\n\n def _jar(self, jvm_targets, add_genjar):\n classes_by_target = self.context.products.get_data('classes_by_target')\n resources_by_target = self.context.products.get_data('resources_by_target')\n\n for target in jvm_targets:\n target_classes = classes_by_target.get(target)\n\n target_resources = []\n if target.has_resources:\n target_resources.extend(resources_by_target.get(r) for r in target.resources)\n\n if target_classes or target_resources:\n jar_name = jarname(target)\n add_genjar(target, jar_name)\n jar_path = os.path.join(self.workdir, jar_name)\n with self.create_jar(target, jar_path) as jarfile:\n def add_to_jar(target_products):\n if target_products:\n for root, products in target_products.rel_paths():\n for prod in products:\n jarfile.write(os.path.join(root, prod), prod)\n add_to_jar(target_classes)\n for resources_target in target_resources:\n add_to_jar(resources_target)\n if target.is_java_agent:\n self.write_agent_manifest(target, jarfile)\n\n def sourcejar(self, jvm_targets, add_genjar):\n for target in jvm_targets:\n jar_name = jarname(target, '-sources.jar')\n add_genjar(target, jar_name)\n jar_path = os.path.join(self.workdir, jar_name)\n with self.create_jar(target, jar_path) as jar:\n for abs_source, rel_source in _abs_and_relative_sources(target):\n jar.write(abs_source, rel_source)\n\n # TODO(Tejal Desai): pantsbuild/pants/65 Remove java_sources attribute for ScalaLibrary\n if isinstance(target, ScalaLibrary):\n for java_source_target in target.java_sources:\n for abs_source, rel_source in _abs_and_relative_sources(java_source_target):\n jar.write(abs_source, rel_source)\n\n if target.has_resources:\n for resource_target in target.resources:\n for abs_source, rel_source in _abs_and_relative_sources(resource_target):\n jar.write(abs_source, rel_source)\n\n def javadocjar(self, java_targets, genmap, add_genjar):\n for target in java_targets:\n generated = genmap.get(target)\n if generated:\n jar_name = jarname(target, '-javadoc.jar')\n add_genjar(target, jar_name)\n jar_path = os.path.join(self.workdir, jar_name)\n with self.create_jar(target, jar_path) as jar:\n for basedir, javadocfiles in generated.items():\n for javadocfile in javadocfiles:\n jar.write(os.path.join(basedir, javadocfile), javadocfile)\n\n def write_agent_manifest(self, agent, jarfile):\n # TODO(John Sirois): refactor an agent model to suport 'Boot-Class-Path' properly.\n manifest = Manifest()\n manifest.addentry(Manifest.MANIFEST_VERSION, '1.0')\n if agent.premain:\n manifest.addentry('Premain-Class', agent.premain)\n if agent.agent_class:\n manifest.addentry('Agent-Class', agent.agent_class)\n if agent.can_redefine:\n manifest.addentry('Can-Redefine-Classes', 'true')\n if agent.can_retransform:\n manifest.addentry('Can-Retransform-Classes', 'true')\n if agent.can_set_native_method_prefix:\n manifest.addentry('Can-Set-Native-Method-Prefix', 'true')\n jarfile.writestr(Manifest.PATH, manifest.contents())\n","sub_path":"src/python/pants/tasks/jar_create.py","file_name":"jar_create.py","file_ext":"py","file_size_in_byte":9988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"58235539","text":"from tkinter import *\nfrom tkinter import filedialog\nimport cv2\n\n\nroot = Tk()\nroot.title(\"VIA\")\nroot.geometry(\"710x394\")\nroot.resizable(width = False, height = False)\n\n# Here you must specify the path, where the image 'VIA.png' is saved by you i.e. (file= \"path where you saved the VIA.png\")\n\nphotoImage = PhotoImage(file=\"C:/Users/albta/PycharmProjects/ProjectVeinScanner/VIA.png\")\nHead = Label(root, image=photoImage)\nHead.place(x = 5, y = 8)\n\n# Here you can change the (initialdir=\"the directory or folder which you want to be opened when fetch button is clicked\")\n\ndef button_fetch():\n root.filename = filedialog.askopenfilename(initialdir=\"/Users/albta/OneDrive/Desktop/InputVIA\", title=\"FETCH\",\n filetypes=((\"JPEG\", \"*.jpeg\"),(\"PNG\", \"*.png\"), (\"All files\", \"*.*\")))\n button_2 = Button(root, text=\"OUTPUT\", padx=30, pady=8, fg=\"white\", bg=\"black\", borderwidth=0.1, command=button_output)\n button_2.place(x=300, y=330)\n\ndef button_output():\n\n imgg = cv2.imread(root.filename, 0)\n img1 = cv2.GaussianBlur(imgg, (5, 5), 0)\n clahe = cv2.createCLAHE(clipLimit=5)\n resultclahe = clahe.apply(img1)\n\n thresh4 = cv2.adaptiveThreshold(img1, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 35, 4)\n edges = cv2.Canny(thresh4, 100, 200)\n Out = cv2.hconcat([thresh4, edges, resultclahe])\n cv2.imshow(\"Output\", Out)\n\n\n\n#define button\nbutton_1 = Button(root, text = \"FETCH \",padx =33, pady =8.5, fg = \"white\", bg = \"black\", borderwidth = 0.1, command = button_fetch)\n\n#put on screen\nbutton_1.place(x = 300, y = 280)\n\nroot.mainloop()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"VIA (1).py","file_name":"VIA (1).py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"562829212","text":"import praw\n\nr = praw.Reddit(user_agent='AskReddit NTLK parser by u/abelincolncodes'\n 'https://github.com/WhiteAbeLincoln/reddit-ntlk')\n\n\ndef get_comments(sub_id, more_comments=True):\n submission = r.get_submission(submission_id=sub_id)\n if more_comments:\n submission.replace_more_comments()\n all_comments = submission.comments\n return [x.body for x in all_comments if type(x) == praw.objects.Comment]\n","sub_path":"reddit/reddit.py","file_name":"reddit.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"86690380","text":"'''\n- Leetcode problem: 55\n\n- Difficulty: Medium\n\n- Brief problem description:\n\nGiven an array of non-negative integers, you are initially positioned at the first index of the array.\n\nEach element in the array represents your maximum jump length at that position.\n\nDetermine if you are able to reach the last index.\n\n\n\nExample 1:\n\nInput: nums = [2,3,1,1,4]\nOutput: true\nExplanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.\nExample 2:\n\nInput: nums = [3,2,1,0,4]\nOutput: false\nExplanation: You will always arrive at index 3 no matter what. Its maximum jump length is 0, which makes it impossible\nto reach the last index.\n\n\nConstraints:\n\n1 <= nums.length <= 3 * 10^4\n0 <= nums[i][j] <= 10^5\n\n- Solution Summary:\n\nDP with memory, reduce time from O(n**2) to O(n)\n\n- Used Resources:\n\n--- Bo Zhou\n'''\n\n\nclass Solution:\n def canJump(self, nums: List[int]) -> bool:\n if len(nums) < 2:\n return True\n\n dp = [False for i in range(len(nums))]\n\n dp[-1] = True\n\n lastTruePos = len(nums) - 1\n for i in range(len(nums) - 2, -1, -1):\n if i + nums[i] >= lastTruePos:\n dp[i] = True\n lastTruePos = i\n\n return dp[0]\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n testList = [2,3,1,1,4]\n print(solution.canJump(testList))","sub_path":"p55_Jump_Game.py","file_name":"p55_Jump_Game.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"232489664","text":"# -*- coding:UTF-8 -*-\nimport time\nimport os\nimport pymysql\nfrom settings import *\nimport platform\nfrom lib.getLogging import *\nfilename = os.path.basename(__file__)\nlogging = Logger(filename).getlog()\n\nclass requestSQL():\n # init\n def __init__(self):\n self.conn = pymysql.connect(\n host=ip,\n port=int(port),\n user=user,\n passwd=password,\n db=database,\n charset='utf8'\n )\n self.cursor = self.conn.cursor()\n\n # 须是完整的SQL语句\n def sql_exe(self, sql):\n self.cursor.execute(sql)\n\n # INSERT INTO table_name (列1, 列2,...) VALUES (值1, 值2,....)\n def insert(self, tbName, field, values):\n insSql = \"insert into %s(%s)values %s\" % (tbName, field, values)\n return self.execute(insSql)\n\n # select (self,表,列,where)\n def select(self, tbName, field='*', where=''):\n if where:\n where = \" where \"+where\n selSql = \"select %s from %s %s\" % (field, tbName, where)\n return self.execute(selSql)\n\n # UPDATE 表名称 SET 列名称 = 新值 WHERE 列名称 = 某值\n def update(self, keyValues, tbName, where):\n setValue = ''\n for k,v in keyValues.items():\n setValue += '`%s`=\"%s\",' % (k, v)\n if where:\n where = \" where \"+where\n updateSql = \"update %s set %s %s\" % (tbName, setValue[:-1], where)\n return self.execute(updateSql)\n\n # DELETE FROM 表名称 WHERE 列名称 = 值\n def delete(self,tbName, where):\n if where:\n where = \" where \"+where\n delSql = \"delete from %s %s\" % (tbName,where)\n return self.execute(delSql)\n\n # execute\n def execute(self, sql):\n try:\n if sql.find('select') != -1:\n self.cursor.execute(sql)\n return self.cursor.fetchall()\n elif sql.find('insert') != -1 or sql.find('update') != -1 or sql.find('delete') != -1:\n self.cursor.execute(sql)\n self.conn.commit()\n return True\n else:\n return False\n except Exception as e:\n print(str(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())) + '--' + str(e))\n return False\n\n # __del__\n def __del__(self):\n self.cursor.close()\n self.conn.close()\n\n\n def excute_select(self,sql:str,fetch:str)->list:\n '''\n\n :param sql: 查询语句\n :param fetch:返回列表\n :return:\n '''\n\n self.cursor.execute(sql)\n if fetch == 'fetchone':\n return self.cursor.fetchone()\n elif fetch == 'fetchmany':\n return self.cursor.fetchmany()\n elif fetch == 'fetchall':\n return self.cursor.fetchall()\n else:\n return None\n\n\nif __name__ == \"__main__\":\n # requestSQL = requestSQL()\n # re = requestSQL.sql_exe('''select * FROM mrm_type where mrm_type_id = 'basy';''')\n requestSQL = requestSQL()\n re = requestSQL.excute_select('''select * FROM mrm_type where mrm_type_id = 'basy';''','fetchone')\n print(re)\n # print(requestSQL.cursor.fetchone)\n # pass","sub_path":"lib/get_sql.py","file_name":"get_sql.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"518140844","text":"import sys\nimport json\nimport os\n\npath=sys.argv[1]\nlibname=sys.argv[2]\nprint(path)\nf=open(path,'r')\njf=json.loads(f.read())\nos.system('mkdir libs')\nos.system('mkdir libs/' + libname)\nversions=jf['versions']\nprint(versions)\nprint('=====================')\nfor v in versions:\n print('Installing ' + libname + ' ' + v)\n os.system('mkdir libs/' + libname + '/' + v)\n cmd='npm install ' + libname + '@' + v + ''\n print(cmd)\n os.system(cmd)\n os.system('mv node_modules/' + libname + '/*' + ' libs/' + libname + '/' + v + '/')\n os.system('rm -rf node_modules/')\n\nprint('The lib has been installed. Now importing lib info to DB')","sub_path":"npmFetch.py","file_name":"npmFetch.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"160962741","text":"\"\"\"Should be obvious.\"\"\"\n\nimport discord\nfrom discord.ext import commands\nfrom cogs.utils import utils, checks\n\n\nclass Test:\n\n def __init__(self, bot):\n self.bot = bot\n self.db = bot.db\n\n @commands.group(name='test')\n async def test(self, ctx, *, arg):\n pass\n\n @test.group(name='t1')\n async def t1(self, ctx, *, arg):\n \"\"\"Test\"\"\"\n pass\n\n @t1.command(name='t1_1')\n async def t1_1(self, ctx, *, arg):\n \"\"\"T1 subcommand\"\"\"\n pass\n\n @test.command(name='t2')\n async def t2(self, ctx):\n \"\"\"Supposed to print shit\"\"\"\n print(dir(ctx))\n print()\n print(dir(ctx.command))\n\n @checks.sudo()\n @commands.command(name='countdown', hidden=True)\n async def countdown(self, ctx, seconds: int):\n \"\"\"Counts down from \n\n [p]countdown \"\"\"\n from asyncio import sleep\n if seconds > 600:\n await ctx.send(\"{}, I cannot count down for anytime longer than 600 seconds\".format(ctx.messsage.author.mention))\n return\n else:\n em = discord.Embed(title=\"countown\", description=str(seconds))\n count = await ctx.send(embed=em)\n sleep(1)\n for i in list(range(seconds))[::-1]:\n em = discord.Embed(title=\"countdown\", description=i)\n await count.edit(embed=em)\n await sleep(1)\n await count.delete()\n\n # import math\n # def _hex(r: int, g: int, b: int):\n # return (r * 0x10000) + (g * 0x100) + (b)\n #\n # c = [0, 255, 0]\n # h = _hex(*c)\n # s = 10\n # sr = math.floor((255 / s))\n # for i in range(10):\n # global c\n # c = [c[0] + sr, c[1] - sr, 0]\n # h = _hex(*c)\n # print(hex(h))\n\n @commands.command(name='pagtest', hidden=True)\n async def pagtest(self, ctx):\n value = \"\"\"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\nDuis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.\nExcepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\nSed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo.\nNemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt.\nNeque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem.\nUt enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur?\nQuis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?\"\"\"\n pag = utils.paginate(value)\n em = discord.Embed(title='Pagination Test:', color=discord.Colour.green())\n c = 1\n for i in pag:\n em.add_field(name='Field {}'.format(c), value=i)\n c += 1\n await ctx.send(embed=em)\n\n @checks.sudo()\n @commands.command(name='redtest', enabled=False, hidden=True)\n async def redtest(self, ctx, *, message: str):\n '''Test docstr'''\n self.db.hset('redtest', ctx.message.id, message)\n\n @checks.sudo()\n @commands.command()\n async def embtest(self, ctx):\n d = {\n \"content\": \"this `supports` __a__ **subset** *of* ~~markdown~~ 😃 ```js\\nfunction foo(bar) {\\n console.log(bar);\\n}\\n\\nfoo(1);```\",\n \"embed\": {\n \"title\": \"title ~~(did you know you can have markdown here too?)~~\",\n \"description\": \"this supports [named links](https://discordapp.com) on top of the previously shown subset of markdown. ```\\nyes, even code blocks```\",\n \"url\": \"https://discordapp.com\",\n \"color\": 4830089,\n \"footer\": {\n \"icon_url\": \"https://cdn.discordapp.com/embed/avatars/0.png\",\n \"text\": \"footer text\"\n },\n \"thumbnail\": {\n \"url\": \"https://cdn.discordapp.com/embed/avatars/0.png\"\n },\n \"image\": {\n \"url\": \"https://cdn.discordapp.com/embed/avatars/0.png\"\n },\n \"author\": {\n \"name\": \"author name\",\n \"url\": \"https://discordapp.com\",\n \"icon_url\": \"https://cdn.discordapp.com/embed/avatars/0.png\"\n },\n \"fields\": [\n {\n \"name\": \"🤔\",\n \"value\": \"some of these properties have certain limits...\"\n },\n {\n \"name\": \"😱\",\n \"value\": \"try exceeding some of them!\"\n },\n {\n \"name\": \"🙄\",\n \"value\": \"an informative error should show up, and this view will remain as-is until all issues are fixed\"\n },\n {\n \"name\": \"<:thonkang:219069250692841473>\",\n \"value\": \"these last two\",\n \"inline\": True\n },\n {\n \"name\": \"<:thonkang:219069250692841473>\",\n \"value\": \"are inline fields\",\n \"inline\": True\n }\n ]\n }\n }\n emb = discord.Embed.from_data(d['embed'])\n await ctx.send(d['content'], embed=emb)\n\n @commands.command()\n async def c4test(self, ctx):\n await ctx.send(embed=discord.Embed(description=\":one::two::three::four::five::six::seven:\"))\n\n\ndef setup(bot):\n bot.add_cog(Test(bot))","sub_path":"cogs/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"180358197","text":"dict = {\"aatrox\": {\"name\": \"Aatrox\", \"file\": \"aatrox\", \"champion_id\": \"266\", \"internal_name\": \"aatrox\"},\n \"ahri\": {\"name\": \"Ahri\", \"file\": \"ahri\", \"champion_id\": \"103\", \"internal_name\": \"ahri\"},\n \"akali\": {\"name\": \"Akali\", \"file\": \"akali\", \"champion_id\": \"84\", \"internal_name\": \"akali\"},\n \"alistar\": {\"name\": \"Alistar\", \"file\": \"alistar\", \"champion_id\": \"12\", \"internal_name\": \"alistar\"},\n \"amumu\": {\"name\": \"Amumu\", \"file\": \"amumu\", \"champion_id\": \"32\", \"internal_name\": \"amumu\"},\n \"anivia\": {\"name\": \"Anivia\", \"file\": \"anivia\", \"champion_id\": \"34\", \"internal_name\": \"anivia\"},\n \"annie\": {\"name\": \"Annie\", \"file\": \"annie\", \"champion_id\": \"1\", \"internal_name\": \"annie\"},\n \"ashe\": {\"name\": \"Ashe\", \"file\": \"ashe\", \"champion_id\": \"22\", \"internal_name\": \"ashe\"},\n \"azir\": {\"name\": \"Azir\", \"file\": \"azir\", \"champion_id\": \"268\", \"internal_name\": \"azir\"},\n \"blitzcrank\": {\"name\": \"Blitzcrank\", \"file\": \"blitzcrank\", \"champion_id\": \"53\", \"internal_name\": \"blitzcrank\"},\n \"brand\": {\"name\": \"Brand\", \"file\": \"brand\", \"champion_id\": \"63\", \"internal_name\": \"brand\"},\n \"braum\": {\"name\": \"Braum\", \"file\": \"braum\", \"champion_id\": \"201\", \"internal_name\": \"braum\"},\n \"caitlyn\": {\"name\": \"Caitlyn\", \"file\": \"caitlyn\", \"champion_id\": \"51\", \"internal_name\": \"caitlyn\"},\n \"cassiopeia\": {\"name\": \"Cassiopeia\", \"file\": \"cassiopeia\", \"champion_id\": \"69\", \"internal_name\": \"cassiopeia\"},\n \"cho'gath\": {\"name\": \"Cho'Gath\", \"file\": \"chogath\", \"champion_id\": \"31\", \"internal_name\": \"cho'gath\"},\n \"corki\": {\"name\": \"Corki\", \"file\": \"corki\", \"champion_id\": \"42\", \"internal_name\": \"corki\"},\n \"darius\": {\"name\": \"Darius\", \"file\": \"darius\", \"champion_id\": \"122\", \"internal_name\": \"darius\"},\n \"diana\": {\"name\": \"Diana\", \"file\": \"diana\", \"champion_id\": \"131\", \"internal_name\": \"diana\"},\n \"dr. mundo\": {\"name\": \"Dr. Mundo\", \"file\": \"drmundo\", \"champion_id\": \"36\", \"internal_name\": \"dr. mundo\"},\n \"draven\": {\"name\": \"Draven\", \"file\": \"draven\", \"champion_id\": \"119\", \"internal_name\": \"draven\"},\n \"elise\": {\"name\": \"Elise\", \"file\": \"elise\", \"champion_id\": \"60\", \"internal_name\": \"elise\"},\n \"evelynn\": {\"name\": \"Evelynn\", \"file\": \"evelynn\", \"champion_id\": \"28\", \"internal_name\": \"evelynn\"},\n \"ezreal\": {\"name\": \"Ezreal\", \"file\": \"ezreal\", \"champion_id\": \"81\", \"internal_name\": \"ezreal\"},\n \"fiddlesticks\": {\"name\": \"Fiddlesticks\", \"file\": \"fiddlesticks\", \"champion_id\": \"9\",\n \"internal_name\": \"fiddlesticks\"},\n \"fiora\": {\"name\": \"Fiora\", \"file\": \"fiora\", \"champion_id\": \"114\", \"internal_name\": \"fiora\"},\n \"fizz\": {\"name\": \"Fizz\", \"file\": \"fizz\", \"champion_id\": \"105\", \"internal_name\": \"fizz\"},\n \"galio\": {\"name\": \"Galio\", \"file\": \"galio\", \"champion_id\": \"3\", \"internal_name\": \"galio\"},\n \"gangplank\": {\"name\": \"Gangplank\", \"file\": \"gangplank\", \"champion_id\": \"41\", \"internal_name\": \"gangplank\"},\n \"garen\": {\"name\": \"Garen\", \"file\": \"garen\", \"champion_id\": \"86\", \"internal_name\": \"garen\"},\n \"gnar\": {\"name\": \"Gnar\", \"file\": \"gnar\", \"champion_id\": \"150\", \"internal_name\": \"gnar\"},\n \"gragas\": {\"name\": \"Gragas\", \"file\": \"gragas\", \"champion_id\": \"79\", \"internal_name\": \"gragas\"},\n \"graves\": {\"name\": \"Graves\", \"file\": \"graves\", \"champion_id\": \"104\", \"internal_name\": \"graves\"},\n \"hecarim\": {\"name\": \"Hecarim\", \"file\": \"hecarim\", \"champion_id\": \"120\", \"internal_name\": \"hecarim\"},\n \"heimerdinger\": {\"name\": \"Heimerdinger\", \"file\": \"heimerdinger\", \"champion_id\": \"74\",\n \"internal_name\": \"heimerdinger\"},\n \"irelia\": {\"name\": \"Irelia\", \"file\": \"irelia\", \"champion_id\": \"39\", \"internal_name\": \"irelia\"},\n \"janna\": {\"name\": \"Janna\", \"file\": \"janna\", \"champion_id\": \"40\", \"internal_name\": \"janna\"},\n \"jarvan iv\": {\"name\": \"Jarvan IV\", \"file\": \"jarvaniv\", \"champion_id\": \"59\", \"internal_name\": \"jarvan iv\"},\n \"jax\": {\"name\": \"Jax\", \"file\": \"jax\", \"champion_id\": \"24\", \"internal_name\": \"jax\"},\n \"jayce\": {\"name\": \"Jayce\", \"file\": \"jayce\", \"champion_id\": \"126\", \"internal_name\": \"jayce\"},\n \"jinx\": {\"name\": \"Jinx\", \"file\": \"jinx\", \"champion_id\": \"222\", \"internal_name\": \"jinx\"},\n \"karma\": {\"name\": \"Karma\", \"file\": \"karma\", \"champion_id\": \"43\", \"internal_name\": \"karma\"},\n \"karthus\": {\"name\": \"Karthus\", \"file\": \"karthus\", \"champion_id\": \"30\", \"internal_name\": \"karthus\"},\n \"kassadin\": {\"name\": \"Kassadin\", \"file\": \"kassadin\", \"champion_id\": \"38\", \"internal_name\": \"kassadin\"},\n \"katarina\": {\"name\": \"Katarina\", \"file\": \"katarina\", \"champion_id\": \"55\", \"internal_name\": \"katarina\"},\n \"kayle\": {\"name\": \"Kayle\", \"file\": \"kayle\", \"champion_id\": \"10\", \"internal_name\": \"kayle\"},\n \"kennen\": {\"name\": \"Kennen\", \"file\": \"kennen\", \"champion_id\": \"85\", \"internal_name\": \"kennen\"},\n \"kha'zix\": {\"name\": \"Kha'Zix\", \"file\": \"khazix\", \"champion_id\": \"121\", \"internal_name\": \"kha'zix\"},\n \"kog'maw\": {\"name\": \"Kog'Maw\", \"file\": \"kogmaw\", \"champion_id\": \"96\", \"internal_name\": \"kog'maw\"},\n \"leblanc\": {\"name\": \"LeBlanc\", \"file\": \"leblanc\", \"champion_id\": \"7\", \"internal_name\": \"leblanc\"},\n \"lee sin\": {\"name\": \"Lee Sin\", \"file\": \"leesin\", \"champion_id\": \"64\", \"internal_name\": \"lee sin\"},\n \"leona\": {\"name\": \"Leona\", \"file\": \"leona\", \"champion_id\": \"89\", \"internal_name\": \"leona\"},\n \"lissandra\": {\"name\": \"Lissandra\", \"file\": \"lissandra\", \"champion_id\": \"127\", \"internal_name\": \"lissandra\"},\n \"lucian\": {\"name\": \"Lucian\", \"file\": \"lucian\", \"champion_id\": \"236\", \"internal_name\": \"lucian\"},\n \"lulu\": {\"name\": \"Lulu\", \"file\": \"lulu\", \"champion_id\": \"117\", \"internal_name\": \"lulu\"},\n \"lux\": {\"name\": \"Lux\", \"file\": \"lux\", \"champion_id\": \"99\", \"internal_name\": \"lux\"},\n \"malphite\": {\"name\": \"Malphite\", \"file\": \"malphite\", \"champion_id\": \"54\", \"internal_name\": \"malphite\"},\n \"malzahar\": {\"name\": \"Malzahar\", \"file\": \"malzahar\", \"champion_id\": \"90\", \"internal_name\": \"malzahar\"},\n \"maokai\": {\"name\": \"Maokai\", \"file\": \"maokai\", \"champion_id\": \"57\", \"internal_name\": \"maokai\"},\n \"master yi\": {\"name\": \"Master Yi\", \"file\": \"masteryi\", \"champion_id\": \"11\", \"internal_name\": \"master yi\"},\n \"miss fortune\": {\"name\": \"Miss Fortune\", \"file\": \"missfortune\", \"champion_id\": \"21\",\n \"internal_name\": \"miss fortune\"},\n \"mordekaiser\": {\"name\": \"Mordekaiser\", \"file\": \"mordekaiser\", \"champion_id\": \"82\",\n \"internal_name\": \"mordekaiser\"},\n \"morgana\": {\"name\": \"Morgana\", \"file\": \"morgana\", \"champion_id\": \"25\", \"internal_name\": \"morgana\"},\n \"nami\": {\"name\": \"Nami\", \"file\": \"nami\", \"champion_id\": \"267\", \"internal_name\": \"nami\"},\n \"nasus\": {\"name\": \"Nasus\", \"file\": \"nasus\", \"champion_id\": \"75\", \"internal_name\": \"nasus\"},\n \"nautilus\": {\"name\": \"Nautilus\", \"file\": \"nautilus\", \"champion_id\": \"111\", \"internal_name\": \"nautilus\"},\n \"nidalee\": {\"name\": \"Nidalee\", \"file\": \"nidalee\", \"champion_id\": \"76\", \"internal_name\": \"nidalee\"},\n \"nocturne\": {\"name\": \"Nocturne\", \"file\": \"nocturne\", \"champion_id\": \"56\", \"internal_name\": \"nocturne\"},\n \"nunu\": {\"name\": \"Nunu\", \"file\": \"nunu\", \"champion_id\": \"20\", \"internal_name\": \"nunu\"},\n \"olaf\": {\"name\": \"Olaf\", \"file\": \"olaf\", \"champion_id\": \"2\", \"internal_name\": \"olaf\"},\n \"orianna\": {\"name\": \"Orianna\", \"file\": \"orianna\", \"champion_id\": \"61\", \"internal_name\": \"orianna\"},\n \"pantheon\": {\"name\": \"Pantheon\", \"file\": \"pantheon\", \"champion_id\": \"80\", \"internal_name\": \"pantheon\"},\n \"poppy\": {\"name\": \"Poppy\", \"file\": \"poppy\", \"champion_id\": \"78\", \"internal_name\": \"poppy\"},\n \"quinn\": {\"name\": \"Quinn\", \"file\": \"quinn\", \"champion_id\": \"133\", \"internal_name\": \"quinn\"},\n \"rammus\": {\"name\": \"Rammus\", \"file\": \"rammus\", \"champion_id\": \"33\", \"internal_name\": \"rammus\"},\n \"renekton\": {\"name\": \"Renekton\", \"file\": \"renekton\", \"champion_id\": \"58\", \"internal_name\": \"renekton\"},\n \"rengar\": {\"name\": \"Rengar\", \"file\": \"rengar\", \"champion_id\": \"107\", \"internal_name\": \"rengar\"},\n \"riven\": {\"name\": \"Riven\", \"file\": \"riven\", \"champion_id\": \"92\", \"internal_name\": \"riven\"},\n \"rumble\": {\"name\": \"Rumble\", \"file\": \"rumble\", \"champion_id\": \"68\", \"internal_name\": \"rumble\"},\n \"ryze\": {\"name\": \"Ryze\", \"file\": \"ryze\", \"champion_id\": \"13\", \"internal_name\": \"ryze\"},\n \"sejuani\": {\"name\": \"Sejuani\", \"file\": \"sejuani\", \"champion_id\": \"113\", \"internal_name\": \"sejuani\"},\n \"shaco\": {\"name\": \"Shaco\", \"file\": \"shaco\", \"champion_id\": \"35\", \"internal_name\": \"shaco\"},\n \"shen\": {\"name\": \"Shen\", \"file\": \"shen\", \"champion_id\": \"98\", \"internal_name\": \"shen\"},\n \"shyvana\": {\"name\": \"Shyvana\", \"file\": \"shyvana\", \"champion_id\": \"102\", \"internal_name\": \"shyvana\"},\n \"singed\": {\"name\": \"Singed\", \"file\": \"singed\", \"champion_id\": \"27\", \"internal_name\": \"singed\"},\n \"sion\": {\"name\": \"Sion\", \"file\": \"sion\", \"champion_id\": \"14\", \"internal_name\": \"sion\"},\n \"sivir\": {\"name\": \"Sivir\", \"file\": \"sivir\", \"champion_id\": \"15\", \"internal_name\": \"sivir\"},\n \"skarner\": {\"name\": \"Skarner\", \"file\": \"skarner\", \"champion_id\": \"72\", \"internal_name\": \"skarner\"},\n \"sona\": {\"name\": \"Sona\", \"file\": \"sona\", \"champion_id\": \"37\", \"internal_name\": \"sona\"},\n \"soraka\": {\"name\": \"Soraka\", \"file\": \"soraka\", \"champion_id\": \"16\", \"internal_name\": \"soraka\"},\n \"swain\": {\"name\": \"Swain\", \"file\": \"swain\", \"champion_id\": \"50\", \"internal_name\": \"swain\"},\n \"syndra\": {\"name\": \"Syndra\", \"file\": \"syndra\", \"champion_id\": \"134\", \"internal_name\": \"syndra\"},\n \"talon\": {\"name\": \"Talon\", \"file\": \"talon\", \"champion_id\": \"91\", \"internal_name\": \"talon\"},\n \"taric\": {\"name\": \"Taric\", \"file\": \"taric\", \"champion_id\": \"44\", \"internal_name\": \"taric\"},\n \"teemo\": {\"name\": \"Teemo\", \"file\": \"teemo\", \"champion_id\": \"17\", \"internal_name\": \"teemo\"},\n \"thresh\": {\"name\": \"Thresh\", \"file\": \"thresh\", \"champion_id\": \"412\", \"internal_name\": \"thresh\"},\n \"tristana\": {\"name\": \"Tristana\", \"file\": \"tristana\", \"champion_id\": \"18\", \"internal_name\": \"tristana\"},\n \"trundle\": {\"name\": \"Trundle\", \"file\": \"trundle\", \"champion_id\": \"48\", \"internal_name\": \"trundle\"},\n \"tryndamere\": {\"name\": \"Tryndamere\", \"file\": \"tryndamere\", \"champion_id\": \"23\", \"internal_name\": \"tryndamere\"},\n \"twisted fate\": {\"name\": \"Twisted Fate\", \"file\": \"twistedfate\", \"champion_id\": \"4\",\n \"internal_name\": \"twisted fate\"},\n \"twitch\": {\"name\": \"Twitch\", \"file\": \"twitch\", \"champion_id\": \"29\", \"internal_name\": \"twitch\"},\n \"udyr\": {\"name\": \"Udyr\", \"file\": \"udyr\", \"champion_id\": \"77\", \"internal_name\": \"udyr\"},\n \"urgot\": {\"name\": \"Urgot\", \"file\": \"urgot\", \"champion_id\": \"6\", \"internal_name\": \"urgot\"},\n \"varus\": {\"name\": \"Varus\", \"file\": \"varus\", \"champion_id\": \"110\", \"internal_name\": \"varus\"},\n \"vayne\": {\"name\": \"Vayne\", \"file\": \"vayne\", \"champion_id\": \"67\", \"internal_name\": \"vayne\"},\n \"veigar\": {\"name\": \"Veigar\", \"file\": \"veigar\", \"champion_id\": \"45\", \"internal_name\": \"veigar\"},\n \"vel'koz\": {\"name\": \"Vel'Koz\", \"file\": \"velkoz\", \"champion_id\": \"161\", \"internal_name\": \"vel'koz\"},\n \"vi\": {\"name\": \"Vi\", \"file\": \"vi\", \"champion_id\": \"254\", \"internal_name\": \"vi\"},\n \"viktor\": {\"name\": \"Viktor\", \"file\": \"viktor\", \"champion_id\": \"112\", \"internal_name\": \"viktor\"},\n \"vladimir\": {\"name\": \"Vladimir\", \"file\": \"vladimir\", \"champion_id\": \"8\", \"internal_name\": \"vladimir\"},\n \"volibear\": {\"name\": \"Volibear\", \"file\": \"volibear\", \"champion_id\": \"106\", \"internal_name\": \"volibear\"},\n \"warwick\": {\"name\": \"Warwick\", \"file\": \"warwick\", \"champion_id\": \"19\", \"internal_name\": \"warwick\"},\n \"wukong\": {\"name\": \"Wukong\", \"file\": \"monkeyking\", \"champion_id\": \"62\", \"internal_name\": \"wukong\"},\n \"xerath\": {\"name\": \"Xerath\", \"file\": \"xerath\", \"champion_id\": \"101\", \"internal_name\": \"xerath\"},\n \"xin zhao\": {\"name\": \"Xin Zhao\", \"file\": \"xinzhao\", \"champion_id\": \"5\", \"internal_name\": \"xin zhao\"},\n \"yasuo\": {\"name\": \"Yasuo\", \"file\": \"yasuo\", \"champion_id\": \"157\", \"internal_name\": \"yasuo\"},\n \"yorick\": {\"name\": \"Yorick\", \"file\": \"yorick\", \"champion_id\": \"83\", \"internal_name\": \"yorick\"},\n \"zac\": {\"name\": \"Zac\", \"file\": \"zac\", \"champion_id\": \"154\", \"internal_name\": \"zac\"},\n \"zed\": {\"name\": \"Zed\", \"file\": \"zed\", \"champion_id\": \"238\", \"internal_name\": \"zed\"},\n \"ziggs\": {\"name\": \"Ziggs\", \"file\": \"ziggs\", \"champion_id\": \"115\", \"internal_name\": \"ziggs\"},\n \"zilean\": {\"name\": \"Zilean\", \"file\": \"zilean\", \"champion_id\": \"26\", \"internal_name\": \"zilean\"},\n \"zyra\": {\"name\": \"Zyra\", \"file\": \"zyra\", \"champion_id\": \"143\", \"internal_name\": \"zyra\"}};\n\ndef getDict():\n return dict\n\ndef getChampionInfo(name):\n return dict[name.lower()]","sub_path":"lol-data-collection/ChampionDictionary.py","file_name":"ChampionDictionary.py","file_ext":"py","file_size_in_byte":12902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"112916206","text":"\nclass Object(object):\n\n \"\"\"\n \"\"\"\n\n def __init__(self,attributes = None,lazy = False,default_backend = None):\n if not attributes:\n attributes = {}\n self.__dict__['_attributes'] = attributes\n self.__dict__['embed'] = False\n self._default_backend = default_backend\n\n if not 'pk' in attributes:\n self.pk = None\n\n if not lazy:\n self.initialize()\n else:\n self._lazy = True\n\n\n def initialize(self):\n pass\n\n def __getattribute__(self,key):\n try:\n lazy = super(Object,self).__getattribute__('_lazy')\n except AttributeError:\n lazy = False\n if lazy:\n self._lazy = False\n\n if not 'pk' in self._attributes or not self._attributes['pk']:\n raise AttributeError(\"No primary key given!\")\n if not self._default_backend:\n raise AttributeError(\"No backend for lazy loading given!\")\n obj = self._default_backend.get(self.__class__,{'pk':self._attributes['pk']})\n self._attributes = obj.attributes\n self.initialize()\n\n return super(Object,self).__getattribute__(key)\n\n def __getattr__(self,key):\n try:\n super(Object,self).__getattr__(key)\n except AttributeError:\n return self._attributes[key]\n\n def __setattr__(self,key,value):\n if key.startswith('_'):\n return super(Object,self).__setattr__(key,value)\n else:\n self._attributes[key] = value\n\n def __delattr__(self,key):\n if key.startswith('_'):\n return super(Object,self).__delattr__(key)\n elif key in self._attributes:\n del self._attributes[key]\n\n @property\n def attributes(self):\n return self._attributes\n\n def save(self,backend = None):\n if not backend:\n if not self._default_backend:\n raise AttributeError(\"No default backend defined!\")\n return self._default_backend.save(self)\n return backend.save(self)\n\n def delete(self,backend = None):\n if not backend:\n if not self._default_backend:\n raise AttributeError(\"No default backend defined!\")\n return self._default_backend.delete(self)\n backend.delete(self)\n\n def __copy__(self):\n d = self.__class__(**self.attributes.copy())\n return d\n\n def __deepcopy__(self,memo):\n d = self.__class__(**copy.deepcopy(self.attributes,memo))\n return d\n\n def __ne__(self,other):\n return not self.__eq__(other)\n \n def __eq__(self,other):\n if id(self) == id(other):\n return True\n if type(self) != type(other):\n return False\n if self.pk == other.pk:\n return True\n if self.attributes == other.attributes:\n return True\n return False\n\n def _represent(self,n = 3):\n\n if n < 0:\n return self.__class__.__name__+\"({...})\"\n\n def truncate_dict(d,n = n):\n\n if isinstance(d,dict):\n out = {}\n return dict([(key,truncate_dict(value,n-1)) for key,value in d.items()])\n elif isinstance(d,list) or isinstance(d,set):\n return [truncate_dict(v,n-1) for v in d]\n elif isinstance(d,Object):\n return d._represent(n-1)\n else:\n return d\n\n return self.__class__.__name__+\"(\"+str(truncate_dict(self._attributes))+\")\"\n\n __str__ = __repr__ = _represent\n","sub_path":"blitzdb/object.py","file_name":"object.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"628758264","text":"\"\"\"\nThis Module contains the Model Forms for the accounts app.\n\nUserForm = Used for update_profile (View), ProfileForm = Used (for fields not defined in UserForm) update_profile (View)\nUserRegistrationForm = Used in user_registration (view), MyUsersForm = Used in user_registration (view)\n\n\"\"\"\n\nfrom django import forms\nfrom .models import User, UserProfile, MyUsers\n\n\nclass UserForm(forms.ModelForm):\n \"\"\"\n UserForm uses default Django User fields\n \"\"\"\n class Meta:\n model = User\n fields = ['first_name', 'last_name', 'email']\n\n\nclass UserProfileForm(forms.ModelForm):\n \"\"\"\n UserProfileForm define custom defined fields for Profile\n \"\"\"\n class Meta:\n model = UserProfile\n fields = ['dob', 'nickname']\n\n\nclass MyUsersForm(forms.ModelForm):\n \"\"\"\n MyUsersForm for project admin to identify who created the User\n \"\"\"\n\n class Meta:\n model = MyUsers\n fields = ['created_by', ]\n\n def clean_created_by(self):\n try:\n created_by = self.cleaned_data.get('created_by')\n return created_by\n except User.DoesNotExist:\n raise forms.ValidationError('The project admin does not exist')\n\n\nclass UserRegistrationForm(forms.ModelForm):\n \"\"\"\n UserRegistrationForm for User Creation\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n\n super(UserRegistrationForm, self).__init__(*args, **kwargs)\n # Making email field required for user registration form\n self.fields['email'].required = True\n\n class Meta:\n model = User\n fields = ['username', 'password', 'email']\n\n def clean_username(self):\n \"\"\"\n Check the new username versus the existing username in the database and throws a validation error\n if it matches, else return's the cleaned username (new_username)\n \"\"\"\n\n new_username = self.cleaned_data.get('username')\n try:\n existing_username = User.objects.get(username__iexact=new_username) # Remember it is a User object\n except User.DoesNotExist:\n return new_username\n raise forms.ValidationError('The username %(value)s already exists. Please try another one',\n params={'value': existing_username.username}, code='username exists')\n\n def clean_email(self):\n \"\"\"\n Check the new email versus the existing email in the database and throws a validation error\n if it matches, else return's the cleaned email (new_email)\n \"\"\"\n\n new_email = self.cleaned_data.get('email')\n try:\n existing_user = User.objects.get(email__exact=new_email) # exact query for the email address\n existing_email = existing_user.email\n except User.DoesNotExist:\n return new_email\n raise forms.ValidationError('The email %(value)s address is already registered with us',\n params={'value': existing_email}, code='email exists')\n","sub_path":"accounts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"329557805","text":"\"\"\"Utilities for transforming public scenes into COGs\"\"\"\n\nfrom rf.utils.io import s3_bucket_and_key_from_url\n\nimport boto3\nimport rasterio\n\nimport logging\nfrom multiprocessing import cpu_count, Pool\nimport os\nimport subprocess\n\nDATA_BUCKET = os.getenv(\"DATA_BUCKET\")\n\ns3client = boto3.client(\"s3\")\nlogger = logging.getLogger(__name__)\n\n\ndef georeference_file(file_path):\n logger.info(\"Georeferencing %s\", file_path)\n with rasterio.open(file_path) as ds:\n width = ds.width\n height = ds.height\n\n output_dir, source_filename = os.path.split(file_path)\n translated_tiff = os.path.join(\n output_dir, \"{}-referenced.tif\".format(source_filename.split(\".\")[0])\n )\n translate_command = [\n \"gdal_translate\",\n \"-a_ullr\",\n \"0\",\n str(height),\n str(width),\n \"0\",\n \"-a_srs\",\n \"epsg:3857\",\n file_path,\n translated_tiff,\n ]\n logger.debug(\"Running translate command: %s\", translate_command)\n subprocess.check_call(translate_command)\n return translated_tiff\n\n\ndef convert_to_cog(tif_path, local_dir):\n logger.info(\"Converting %s to a cog\", tif_path)\n with rasterio.open(tif_path) as src:\n has_64_bit = rasterio.dtypes.float64 in src.dtypes\n out_path = os.path.join(local_dir, \"cog.tif\")\n cog_command = [\n \"gdal_translate\",\n tif_path,\n \"-co\",\n \"TILING_SCHEME=GoogleMapsCompatible\",\n \"-co\",\n \"COMPRESS=DEFLATE\",\n \"-co\",\n \"BIGTIFF=IF_SAFER\",\n *([\"-co\", \"PREDICTOR=2\"] if not has_64_bit else []),\n \"-of\",\n \"COG\",\n out_path,\n ]\n subprocess.check_call(cog_command)\n return out_path\n","sub_path":"app-tasks/rf/src/rf/utils/cog.py","file_name":"cog.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"343883336","text":"##This file must be copied to /\nfrom flask import Flask, jsonify, request, render_template, abort\n# from Src.utils import ClassificationModelBuilder_short # Uncomment only if you want to re-train your model\nfrom Src.api.predict import predict_api\nfrom jinja2 import TemplateNotFound\n#This next line is added to run in Colab\nfrom flask_ngrok import run_with_ngrok\n\napplication = Flask(__name__ , template_folder='./Src/templates')\napplication.register_blueprint(predict_api, url_prefix='/em-prende-classification-model')\n#Next line added to run in Colab\nrun_with_ngrok(application)\n\n\n# Loading home page\n@application.route('/', defaults={'page': 'index'})\n@application.route('/')\ndef show(page):\n\n try:\n print('home route')\n return render_template(f'{page}.html', app_name='Em-prende: Classification Problem')\n\n except TemplateNotFound:\n abort(404)\n\n\n# Handling 400 Error\n@application.errorhandler(400)\ndef bad_request(error=None):\n\n message = {\n 'status': 400,\n 'message': 'Bad Request: ' + request.url + '--> Please check your data payload...',\n }\n resp = jsonify(message)\n resp.status_code = 400\n \n return resp\n\n# run application\nif __name__ == \"__main__\":\n application.run()\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"609288835","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport sys, os, json, re, codecs\nimport buildtools.localeTools as localeTools\n\ndef updateLocales(sourceDir, targetDir, localeMap, removed, imported):\n for source, target in localeMap.iteritems():\n targetFile = os.path.join(targetDir, target, 'messages.json')\n hasSource = os.path.exists(os.path.join(sourceDir, source))\n if hasSource and os.path.exists(os.path.join(sourceDir, source, '.incomplete')):\n hasSource = False\n if not hasSource and not os.path.exists(targetFile):\n continue\n\n data = {}\n if os.path.exists(targetFile):\n file = codecs.open(targetFile, 'rb', encoding='utf-8')\n data = json.load(file)\n file.close()\n\n for entry in removed:\n if entry in data:\n del data[entry]\n\n if hasSource:\n for entry in imported:\n fileName, stringID = entry.split(' ', 1)\n sourceFile = os.path.join(sourceDir, source, fileName)\n try:\n sourceData = localeTools.readFile(sourceFile)\n if stringID in sourceData:\n key = re.sub(r'\\..*', '', fileName) + '_' + re.sub(r'\\W', '_', stringID)\n data[key] = {'message': sourceData[stringID]}\n except:\n pass\n\n sourceFile = os.path.join(sourceDir, source, 'meta.properties')\n try:\n sourceData = localeTools.readFile(sourceFile)\n if 'name' in sourceData:\n data['name'] = {'message': sourceData['name'] + ' (Beta)'}\n except:\n pass\n\n try:\n os.makedirs(os.path.dirname(targetFile))\n except:\n pass\n file = codecs.open(targetFile, 'wb', encoding='utf-8')\n json.dump(data, file, ensure_ascii=False, sort_keys=True, indent=2)\n print >>file\n file.close()\n\nif __name__ == '__main__':\n sourceDir = os.path.join('..', 'adblockplus', 'chrome', 'locale')\n targetDir = os.path.join('_locales')\n localeMap = {\n 'ar': 'ar',\n 'bg': 'bg',\n 'ca': 'ca',\n 'cs': 'cs',\n 'da': 'da',\n 'de': 'de',\n 'el': 'el',\n 'en-US': 'en',\n 'en-GB': 'en_GB',\n 'es-ES': 'es',\n 'es-AR': 'es_419',\n 'et': 'et',\n 'fi': 'fi',\n# '': 'fil', ???\n 'fr': 'fr',\n 'he': 'he',\n 'hi-IN': 'hi',\n 'hr': 'hr',\n 'hu': 'hu',\n 'id': 'id',\n 'it': 'it',\n 'ja': 'ja',\n 'ko': 'ko',\n 'lt': 'lt',\n 'lv': 'lv',\n 'nl': 'nl',\n# 'nb-NO': 'no', ???\n 'pl': 'pl',\n 'pt-BR': 'pt_BR',\n 'pt-PT': 'pt_PT',\n 'ro': 'ro',\n 'ru': 'ru',\n 'sk': 'sk',\n 'sl': 'sl',\n 'sr': 'sr',\n 'sv-SE': 'sv',\n 'th': 'th',\n 'tr': 'tr',\n 'uk': 'uk',\n 'vi': 'vi',\n 'zh-CN': 'zh_CN',\n 'zh-TW': 'zh_TW',\n }\n removed = [\n 'not_a_filter_list',\n 'not_found_on_server',\n 'filter_list_desc',\n 'add_url_button',\n 'delete',\n 'add_a_filter_list',\n 'hovercraft',\n ]\n imported = [\n 'global.properties subscription_status_lastdownload_inprogress',\n 'global.properties subscription_invalid_location',\n 'global.properties synchronize_invalid_url',\n 'global.properties synchronize_connection_error',\n 'global.properties synchronize_invalid_data',\n 'global.properties synchronize_checksum_mismatch',\n 'global.properties remove_subscription_warning',\n 'settings.dtd enabled.column',\n 'settings.dtd remove.label',\n 'settings.dtd addsubscription.label',\n 'subscriptionSelection.dtd subscriptionSelector.label',\n 'subscriptionSelection.dtd addSubscription.label',\n 'subscriptionSelection.dtd other.label',\n 'subscriptionSelection.dtd title.label',\n 'subscriptionSelection.dtd location.label',\n ]\n updateLocales(sourceDir, targetDir, localeMap, removed, imported)\n","sub_path":"update_locales.py","file_name":"update_locales.py","file_ext":"py","file_size_in_byte":3626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"210667558","text":"############ GETTING THE TOTAL AREA OF EACH PIN OVER TIME #################\n\nimport muqfatc.imageanalysis as ia\nimport muqfatc.pingrowth as pinia\n# The myqfatc package requires cv2 (version 3.0.0) to be intalled \nimport sys, os\nimport numpy\n\n# Find all available time courses in the current working directory\nsyspath = os.path.dirname(sys.argv[0]) \nfullpath = os.path.abspath(syspath) \nallobj=os.listdir(fullpath)\n# Getting all the folder names\nmyfolders=[]\nfor f in allobj:\n if len(f)==6 and \".\" not in f and f[0]==\"R\" and f[3]==\"C\" and f!='R03C03':\n myfolders.append(f)\n\n# Getting time and area for pin growth estimates for 90 observations\narea,time=pinia.pintimecourse(myfolders,90,fullpath)\n\n# Write Output to File\nnumpy.savetxt(\"PopulationArea.txt\",area,delimiter=\"\\t\")\nnumpy.savetxt(\"PopulationTime.txt\",time,delimiter=\"\\t\")\ntext_file = open(\"PopulationFolders.txt\", \"w\")\nfor item in myfolders:\n text_file.write(\"%s\\n\" % item)\ntext_file.close()\n","sub_path":"Analyses/ImageAnalysis/PinGrowthEst.py","file_name":"PinGrowthEst.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"334166942","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 10 11:47:40 2019\n\nDistributed training based on Horovod\n\n@author: Ming Jin\n\"\"\"\n\nimport numpy as np\nimport pickle\nimport tensorflow as tf\nfrom tensorflow.keras.applications.vgg16 import VGG16\nfrom tensorflow.keras.applications.resnet50 import ResNet50\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport horovod.tensorflow.keras as hvd\nfrom tensorflow.keras import backend as K\n\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n dict = pickle.load(fo)\n return dict\n\ndef load_data(input_file):\n d = unpickle(input_file)\n x = d['data']\n y = d['labels']\n x = np.dstack((x[:, :4096], x[:, 4096:8192], x[:, 8192:]))\n x = x.reshape((x.shape[0], 64, 64, 3))\n# x = np.dstack((x[:, :1024], x[:, 1024:2048], x[:, 2048:]))\n# x = x.reshape((x.shape[0], 32, 32, 3))\n return x, y\n\ndef dense_to_one_hot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot\n\ndef split_dataset(features, labels, training = 0.8, validation = 0.3):\n rnd_indices = np.random.rand(len(labels)) < training\n train_x = features[rnd_indices]\n train_y = labels[rnd_indices]\n remain_x = features[~rnd_indices]\n remain_y = labels[~rnd_indices]\n \n rnd_indices2 = np.random.rand(len(remain_y)) < validation\n val_x = remain_x[rnd_indices2]\n val_y = remain_y[rnd_indices2]\n test_x = remain_x[~rnd_indices2]\n test_y = remain_y[~rnd_indices2]\n return train_x, train_y, val_x, val_y, test_x, test_y\n\n# define hyper parameters\n_LR = 0.01\n_EPOCH = 200\n_BATCH_SIZE = 128\n\n# Horovod: initialize Horovod.\nhvd.init()\n\n# Horovod: pin GPU to be used to process local rank (one GPU per process)\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nconfig.gpu_options.visible_device_list = str(hvd.local_rank())\nK.set_session(tf.Session(config = config))\n\n# loading data from binary files\nX = []\nY = []\ndirectory = '/home/tpc2/Downloads/64*64/training_data'\n\nfor i in range(1):\n i = i + 1 \n x, y = load_data(directory + '/train_data_batch_%d' % i)\n X.extend(x)\n Y.extend(y)\n if hvd.rank() == 0:\n print('%d out of 10 files' % i)\n \nX = np.array(X)\nY = np.array(Y)\nY = dense_to_one_hot(Y, 1000)\n\nif hvd.rank() == 0:\n print(X.shape)\n print(Y.shape)\n\ntrain_x, train_y, val_x, val_y, test_x, test_y = split_dataset(X, Y)\n\n# Determine how many batches are there in train and test sets\ntrain_batches = len(train_x) // _BATCH_SIZE\nval_batches = len(val_x) // _BATCH_SIZE\n\n# preparing data generator\n\ntrain_datagen = ImageDataGenerator(\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True,\n preprocessing_function=tf.keras.applications.resnet50.preprocess_input)\n\ntest_datagen = ImageDataGenerator(preprocessing_function=tf.keras.applications.resnet50.preprocess_input)\n\ntrain_generator = train_datagen.flow(train_x, train_y, batch_size=_BATCH_SIZE)\nvalidation_generator = test_datagen.flow(val_x, val_y, batch_size=_BATCH_SIZE)\ntest_generator = test_datagen.flow(test_x, test_y, batch_size=_BATCH_SIZE)\n\n#model = VGG16(include_top=True, weights=None, input_tensor=None, input_shape=(32,32,3), pooling=None, classes=1000)\nmodel = ResNet50(weights=None, input_shape=(64,64,3))\n\nopt = tf.keras.optimizers.SGD(lr = _LR * hvd.size(), momentum = 0.9)\nopt = hvd.DistributedOptimizer(opt, compression=hvd.Compression.fp16)\n \nmodel.compile(optimizer = opt, loss = 'categorical_crossentropy', metrics = ['accuracy'])\n\ncallbacks = [\n # Horovod: broadcast initial variable states from rank 0 to all other processes.\n hvd.callbacks.BroadcastGlobalVariablesCallback(0),\n \n # Horovod: average metrics among workers at the end of every epoch.\n #\n # Note: This callback must be in the list before the ReduceLROnPlateau,\n # TensorBoard, or other metrics-based callbacks.\n# hvd.callbacks.MetricAverageCallback(),\n \n # Horovod: set up warmup epochs before adjust the learning rate\n hvd.callbacks.LearningRateWarmupCallback(warmup_epochs=10, verbose=1),\n \n # Reduce the learning rate if training plateaues.\n tf.keras.callbacks.ReduceLROnPlateau(patience=10, verbose = 1)\n \n # Early stopping\n# tf.keras.callbacks.EarlyStopping(monitor = 'val_loss', min_delta = 0, patience = 10, verbose = 1, mode = 'auto', baseline = None),\n \n # Horovod: after the warmup reduce learning rate by 10 on the 30th, 60th and 80th epochs.\n# hvd.callbacks.LearningRateScheduleCallback(start_epoch=10, end_epoch=30, multiplier=1.),\n# hvd.callbacks.LearningRateScheduleCallback(start_epoch=30, end_epoch=60, multiplier=1e-1),\n# hvd.callbacks.LearningRateScheduleCallback(start_epoch=60, end_epoch=80, multiplier=1e-2),\n# hvd.callbacks.LearningRateScheduleCallback(start_epoch=80, multiplier=1e-3),\n ]\n\n# Horovod: save checkpoints only on the first worker to prevent other workers from corrupting them.\nif hvd.rank() == 0:\n callbacks.append(tf.keras.callbacks.TensorBoard(log_dir='./horovod_logs', histogram_freq=0, write_graph=True, write_grads=False, write_images=False))\n\nmodel.fit_generator(\n train_generator,\n steps_per_epoch = train_batches // hvd.size(),\n epochs =_EPOCH,\n callbacks = callbacks,\n workers = 8,\n validation_data = validation_generator,\n validation_steps = 3 * val_batches // hvd.size())\n\nprint('\\n')\nscore = hvd.allreduce(model.evaluate_generator(test_generator, workers = 8))\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])","sub_path":"horovod-based.py","file_name":"horovod-based.py","file_ext":"py","file_size_in_byte":5771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"227820998","text":" \nimport string\nimport yaml\nimport os\nimport sys\nfrom shutil import copyfile,copytree,rmtree\nfrom string import Template\nfrom pathlib import Path\nfrom ruamel.yaml import YAML\nimport re\nimport datetime\nfrom pprint import pp, pprint\nimport json\nimport jsonref\nimport copy\nfrom generatorLambdaUtils.nginxUtils import createNginxConfig\nfrom generatorLambdaUtils.cliUtils import getParser\n\ndef npmInstall(folder_path):\n os.system(f\"cd {folder_path} ; npm i \")\n os.system(\"cd ..\")\n os.system(\"cd ..\")\n\ndef slsDeploy(folder_path):\n os.system(f\"cd {folder_path} ; sls deploy \")\n os.system(\"cd ..\")\n os.system(\"cd ..\")\n\n\n\ndef prepareServelessYaml(file_path,folder_name,env): \n # date = datetime.datetime.now()\n # dateTime = date.strftime(\"%Y%m%d%M\")\n template_env_file_path = file_path\n with open(template_env_file_path) as f:\n data = yaml.load(f, Loader=yaml.FullLoader)\n data[\"provider\"][\"apiName\"] = folder_name + \" api gateway\" \n data[\"provider\"][\"deploymentBucket\"] = folder_name + \"-leumi\"\n data[\"provider\"][\"tags\"][\"env\"] = \"dev\"\n data[\"provider\"][\"tags\"][\"system\"] = \"Open Banking Aws\"\n data[\"provider\"][\"tags\"][\"applications\"] = \"Open Banking Aws\"\n # for key in data[\"functions\"] :\n # eventList = []\n # for event in data[\"functions\"][key][\"events\"]:\n # eventList.append(data[\"functions\"][key][\"events\"])\n # data[\"functions\"][key][\"events\"] = eventList\n data[\"service\"] = folder_name \n data[\"provider\"][\"profile\"] = \"ob-\" + env\n with open(file_path, 'w') as yaml_file:\n yaml.dump(data, yaml_file)\n yaml_file.close()\n\n\n\ndef changePathParameters(file_path):\n paths = []\n with open(file_path,'r') as f:\n data = yaml.load(f, Loader=yaml.FullLoader)\n for function in data['functions']:\n data['functions'][function][\"events\"][0][\"http\"][\"path\"]\n print(data['functions'][function][\"events\"][0][\"http\"][\"path\"])\n path = data['functions'][function][\"events\"][0][\"http\"][\"path\"] \n paths.append(path)\n # if \"-\" in path :\n # key_pathArry = path.split(\"-\")\n # for i in range(len(key_pathArry))[1::2] :\n # # print(key_pathArry[i])\n # key = key_pathArry[i]\n # key_pathArry[i]= key\n # newPath = \"\".join(key_pathArry)\n # paths.append(newPath)\n \n # data['functions'][function][\"events\"][0][\"http\"][\"path\"] = newPath\n # # print(file_path)\n # addAllPaths(paths,file_path,data)\n # with open(file_path,'w') as file : \n # yaml.dump(data,file)\n return addAllPaths(paths,file_path,data)\n\n\ndef addAllPaths(paths,serverless_path,serveless_data) :\n openApi_path = serverless_path.replace(\"serverless.yml\",\"openApi.yaml\")\n pprint(openApi_path)\n with open(openApi_path, 'r') as yaml_in :\n with open(serverless_path.replace(\"serverless.yml\",\"openApi.json\"), \"w\") as json_out:\n yamldata = yaml.load(yaml_in, Loader=yaml.FullLoader)\n json.dump(yamldata, json_out,indent=4)\n with open(serverless_path.replace(\"serverless.yml\",\"openApi.json\"),\"r\") as jsonf : \n data = jsonref.load(jsonf)\n with open(serverless_path.replace(\"serverless.yml\",\"fullOpenApi.json\"), \"w\") as json_out:\n json.dump(data,json_out,indent=4)\n # with open(serverless_path.replace(\"serverless.yml\",\"openApi.json\"),\"r\") as jsonf2 : \n # json.dump(serverless_path.replace(\"serverless.yml\",\"fullOpenApi.json\"),jsonref.load(data),indent=4)\n # jsonf.close()\n\n\n return defGetallPaths(paths,data,serverless_path)\n\ndef defGetallPaths(paths,data,serverless_path) : \n # print(paths)\n pathsobj = {}\n parser = getParser()\n args = parser.parse_args()\n folder_path = serverless_path.replace(\"/\" + \"serverless.yml\",\"\" )\n for key_path in data[\"paths\"] : \n \n if key_path in paths : \n newObj = {}\n print(key_path)\n path_data = data[\"paths\"][key_path]\n pathKeys = path_data.keys()\n for methodKey in pathKeys :\n newObj[methodKey] = {}\n newObj[methodKey][\"paths\"] = [key_path]\n for parametar in data[\"paths\"][key_path][methodKey][\"parameters\"] : \n if(parametar[\"in\"] == \"path\" and parametar[\"required\"] == True ) :\n schema = parametar[\"schema\"]\n if(\"enum\" in schema.keys()) :\n enums = schema[\"enum\"]\n newPaths = []\n for enum in enums : \n for newPath in newObj[methodKey][\"paths\"] : \n if len(args.rt) != 0 :\n print (args.rt)\n for rt in args.rt :\n if newPath.replace(\"{\"+ parametar[\"name\"] + \"}\", enum).find(\"/\" + rt + \"/\") != -1 :\n newPaths.append(newPath.replace(\"{\"+ parametar[\"name\"] + \"}\", enum))\n print(newPath.replace(\"{\"+ parametar[\"name\"] + \"}\", enum))\n break\n else : \n newPaths.append(newPath.replace(\"{\"+ parametar[\"name\"] + \"}\", enum))\n for p in newPaths :\n createNginxConfig(p,folder_path)\n newObj[methodKey][\"paths\"] = newPaths\n pathsobj[key_path] = newObj\n return pathsobj\n \ndef createFullServelrssYmal(filepath,paths,folder_path):\n \n with open(filepath) as f :\n data = yaml.load(f, Loader=yaml.FullLoader)\n newData = data\n pathsKeys= list(paths.keys())\n for function in data[\"functions\"] :\n # pprint(data[\"functions\"][function])\n newEvents = []\n for event in data[\"functions\"][function][\"events\"] : \n for httpEvent in event : \n if event[httpEvent][\"path\"] in list(paths.keys()) : \n newPathsData = paths[event[httpEvent][\"path\"]]\n try : \n for methodKey in list(newPathsData.keys()) :\n for newNewPath in paths[event[httpEvent][\"path\"]][methodKey][\"paths\"] :\n newHttpEvent = copy.deepcopy(event[httpEvent])\n newHttpEvent[\"method\"] = methodKey \n newHttpEvent[\"path\"] = newNewPath\n # pprint (newHttpEvent) \n temp = {}\n temp[\"http\"] = newHttpEvent \n data[\"functions\"][function][\"events\"].append(temp)\n print(temp)\n \n\n except : \n pass\n del data[\"functions\"][function][\"events\"][0]\n # newEvents.append(eventss)\n \n # pprint\n with open(filepath, 'w') as f :\n yaml.dump(newData,f)\n\n\n\n","sub_path":"front/generatorLambda/generatorLambdaUtils/deployUtils.py","file_name":"deployUtils.py","file_ext":"py","file_size_in_byte":7364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"225611425","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\n\ndf = pd.read_csv('../Data/E0.csv')\n\nhome_goals = df['FTHG']\naway_goals = df['FTAG']\n\nmaxgoals = np.amax([home_goals,away_goals])\nscores = np.zeros((maxgoals+1,maxgoals+1))\n\nfor i in range(0,len(df)):\n hg = df['FTHG'].iloc[i]\n ag = df['FTAG'].iloc[i]\n scores[hg][ag] = scores[hg][ag]+1 \n\n \n\nsns.heatmap(data=scores)\n\n\n\n","sub_path":"Football/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"55650187","text":"# Copyright 2019 D-Wave Systems Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ================================================================================================\nimport unittest\nimport itertools\n\nimport networkx as nx\nimport penaltymodel.core as pm\nimport dimod\n\nimport penaltymodel.maxgap as maxgap\n\nfrom penaltymodel.core import ImpossiblePenaltyModel\n\n\nclass TestInterface(unittest.TestCase):\n \"\"\"We assume that the generation code works correctly.\n Test that the interface gives a penalty model corresponding to the specification\"\"\"\n def test_typical(self):\n graph = nx.complete_graph(3)\n spec = pm.Specification(graph, [0, 1], {(-1, -1): 0, (+1, +1): 0}, dimod.SPIN)\n\n widget = maxgap.get_penalty_model(spec)\n\n # some quick test to see that the penalty model propogated in\n for v in graph:\n self.assertIn(v, widget.model.linear)\n for (u, v) in graph.edges:\n self.assertIn(u, widget.model.adj[v])\n\n def test_binary_specification(self):\n graph = nx.Graph()\n for i in range(4):\n for j in range(4, 8):\n graph.add_edge(i, j)\n\n decision_variables = (0, 1)\n feasible_configurations = ((0, 0), (1, 1)) # equality\n\n spec = pm.Specification(graph, decision_variables, feasible_configurations, vartype=dimod.BINARY)\n widget = maxgap.get_penalty_model(spec)\n\n self.assertIs(widget.model.vartype, dimod.BINARY)\n\n # test the correctness of the widget\n energies = {}\n for decision_config in itertools.product((0, 1), repeat=2):\n energies[decision_config] = float('inf')\n\n for aux_config in itertools.product((0, 1), repeat=6):\n sample = dict(enumerate(decision_config + aux_config))\n energy = widget.model.energy(sample)\n\n energies[decision_config] = min(energies[decision_config], energy)\n\n for decision_config, energy in energies.items():\n if decision_config in feasible_configurations:\n self.assertAlmostEqual(energy, widget.ground_energy)\n else:\n self.assertGreaterEqual(energy, widget.ground_energy + widget.classical_gap - 10**-6)\n\n","sub_path":"penaltymodel_maxgap/tests/test_interface.py","file_name":"test_interface.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"208821283","text":"#coding:utf-8\n\nimport requests\nimport urlparse\nfrom bs4 import BeautifulSoup\nfrom regex import regex as rgx\nimport urllib\nclass lib(object):\n def __init__(self,indexurl='http://202.119.210.15/'):\n # self.user={'user':'',\n # 'pwd':''}\n self.indexurl=indexurl\n self.makeurls()\n def makeurls(self):\n index=self.indexurl\n #取出hostname\n host= urlparse.urlparse(index).hostname\n if host==None:\n pass\n #阅览室url\n getroom = '/FunctionPages/SeatBespeak/BespeakSeat.aspx'\n self.getroom=urlparse.urljoin(index,getroom)\n seatinf='/FunctionPages/SeatBespeak/SeatLayoutHandle.ashx'\n self.seatinf=urlparse.urljoin(index,seatinf)\n setseat = '/FunctionPages/SeatBespeak/BespeakSubmitWindow.aspx'\n self.setseat=urlparse.urljoin(index,setseat)\n #初始请求头\n headers = {\n 'Host': host,\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',\n 'Accept-Encoding': 'gzip, deflate',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Connection': 'keep-alive'\n }\n self.headers=headers\n #session\n def login(self,user='',pwd=''):\n self.ss=requests.session()\n self.ss.headers=self.headers\n login1 = self.ss.get(self.indexurl)\n if login1.status_code==200:\n login1text = login1.text\n else:\n pass\n login1text=''\n sp = BeautifulSoup(login1text)\n #获取viewstate 和 eventvalidation,因为每个学校的这两个都不一样\n viewstate = sp.find('input', attrs={'name': \"__VIEWSTATE\"}).attrs['value']\n eventvalidation = sp.find('input', attrs={'name': \"__EVENTVALIDATION\"}).attrs['value']\n #构造postdata\n postdata={}\n postdata['__VIEWSTATE']=viewstate\n postdata['__EVENTVALIDATION']=eventvalidation\n postdata['txtUserName']=user\n postdata['txtPassword']=pwd\n postdata['cmdOK.x']='0'\n postdata['cmdOK.y'] = '0'\n login2 = self.ss.post(self.indexurl, data=postdata,allow_redirects=False)\n if login2.status_code==302:\n return True\n else:\n return False\n def getroomid(self):\n headers = {\n 'Referer': urlparse.urljoin(self.indexurl, '/Florms/FormSYS.aspx')\n }\n roomResponse = self.ss.get(self.getroom, headers=headers)\n roomText = roomResponse.text\n libs, rooms = rgx().BespeakSeat(roomText)\n return rooms\n def getseat(self,roomid,date):\n #date=2016/10/22\n datestr = date + \" 0:00:00\"\n postdata = {\n \"roomNum\": roomid,\n \"date\": datestr,\n \"divTransparentTop\": \"0\",\n \"divTransparentLeft\": \"0\"\n }\n seat=self.ss.post(self.seatinf, data=postdata)\n if seat.status_code==200:\n seatdata=rgx().SeatLayoutHandle(seat.text)\n elif seat.status_code==500:\n print('get seat Server Error %s_%s'%(roomid,date))\n seatdata= None\n else:\n print('Unknown getseat Server Error %s_%s' % (roomid, date))\n seatdata=None\n return seatdata\n def trysetseat(self,roomName,seatNum,seatOnclick,datestr):\n def getparam(setseaturl,seatonclick):\n headers = {\n 'Referer': 'http://202.119.210.15/FunctionPages/SeatBespeak/BespeakSeatLayout.aspx'\n }\n url = setseaturl + '?parameters=%s' % seatonclick\n html = self.ss.get(url, headers=headers).text\n dic = rgx().BespeakSubmitWindow(html)\n return url,dic\n\n import base64, re\n seturl,param=getparam(self.setseat,seatOnclick)\n dic2 = {\n 'X_CHANGED': 'false',\n 'X_TARGET': 'ContentPanel1_btnBespeak',\n 'Form2_Collapsed': 'false',\n 'ContentPanel1_Collapsed': 'false',\n 'X_STATE': '',\n 'X_AJAX': 'true'\n }\n #不清楚服务器是否对字典顺序有要求,故而干脆用最笨的方法\n strdic='''{\\\"Form2_ctl00_lblRoomName\\\":{\\\"Text\\\":\\\"%s\\\"},\\\"Form2_ctl01_lblSeatNo\\\":{\\\"Text\\\":\\\"%s\\\"},\\\"Form2_ctl02_lblbeginDate\\\":{\\\"Text\\\":\\\"%s\\\"},\\\"Form2_ctl03_lblEndDate\\\":{\\\"Text\\\":\\\"7:30至8:30\\\"}}'''% (roomName,seatNum,datestr)\n strdic= strdic.replace(\"'\", '\"')\n encodedstr = base64.b64encode(strdic)\n dic2['X_STATE'] = encodedstr\n postdata=dict(param,**dic2)\n postdata[\"__EVENTTARGET\"]='ContentPanel1$btnBespeak'\n header = {\n 'Referer': seturl\n }\n response = self.ss.post(seturl, data=postdata, headers=header)\n result = re.search('alert\\((.+?)\\)',response.text)\n if result:\n return result.group(1)\n else:\n return '服务器错误'\n","sub_path":"lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":5131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"301291424","text":"from typing import Any, Dict, List, Union\n\n\n_T = Any # should be 'JSONTypes' but mypy doesn't support recursive types yet\n\nJSONTypes = Union[Dict[str, _T], List[_T], str, float, bool, None]\n\nJSONObject = Dict[str, JSONTypes]\n\nJSONList = List[JSONTypes]\n\nResponseTypes = Union[bytes, JSONTypes]\n","sub_path":"popget/extratypes.py","file_name":"extratypes.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"39512451","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/1/17 14:21\n# @Author : qkwu\n# @File : Set.py\n\n# Set用于保存不重复元素\n# 实现方式与dict类似,可以认为set是只有key的dict\n\ns = set()\ns1 = {1, 2, 3}\ns.add('nice')\ns.remove('nice')","sub_path":"Part1Basics/BasicsDataStructure/Set.py","file_name":"Set.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"98019182","text":"#coding=utf-8\nimport os\n#从/home/python目录下找到包含有hello的py文件是那些\nlist_filepy=[]\ndef read_file(parent_dir,file_name):\n abs_file_dir=os.path.join(parent_dir,file_name)\n if os.path.isdir(abs_file_dir):\n for file in os.listdir(abs_file_dir):\n read_file(abs_file_dir,file)\n else:\n if abs_file_dir.endswith(\".py\"):\n if read_find_hello(abs_file_dir):\n list_filepy.append(abs_file_dir)\n\ndef read_find_hello(file_name):\n f=open(file_name,'r', encoding='UTF-8')\n flag=False\n while True:\n if \"hello\" in f.readline():\n flag=True\n break\n elif f.readline()==\"\":\n break\n f.close()\n return flag\n\n\n\nif __name__==\"__main__\":\n read_file(r\"C:\\Users\\Administrator\\PycharmProjects\",\"E6300\")\n print(list_filepy)","sub_path":"E6300/std_file.py","file_name":"std_file.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"222171593","text":"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :\n\n# Copyright (c) 2015 Florian Brucker (mail@florianbrucker.de).\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\nTests for ``coba.storage``.\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import *\nfrom future.builtins.disabled import *\n\nimport io\nimport os\nimport os.path\nimport shutil\nimport tempfile\nimport time\n\nfrom nose.tools import eq_ as eq, ok_ as ok, raises\n\nfrom coba import Revision\nfrom coba.crypto import CryptoProvider, is_encrypted\nfrom coba.storage import *\nfrom coba.utils import sha1\n\nfrom .test_coba_crypto import GOT_GPGME, GPG_KEY_DIR, needs_gpgme\nfrom .utils import parameterized\n\n\ndef _fake_revision(store, path):\n return Revision(store, path, time.time(), 1, 2, 3, 4, 5, 6, 7, 8)\n\n\nrecipients = [(None,)]\nif GOT_GPGME:\n recipients.append(('test@coba',))\n\n\nclass TestRevisionStore(object):\n \"\"\"\n Tests for ``coba.storage.Store``.\n \"\"\"\n def setup(self):\n self.path = tempfile.mkdtemp()\n self.driver = local_storage_driver(self.path)\n\n def make_store(self, recipient=None):\n crypto_provider = CryptoProvider(recipient, GPG_KEY_DIR)\n return Store(self.driver, 'container', crypto_provider)\n\n def teardown(self):\n shutil.rmtree(self.path, ignore_errors=True)\n\n @parameterized(recipients)\n def test_set_get_append_revisions(self, recipient):\n \"\"\"\n Setting, getting and appending revisions.\n \"\"\"\n store = self.make_store(recipient)\n p = '/foo/bar'\n eq(store.get_revisions(p), [])\n rev1 = _fake_revision(store, p)\n rev2 = _fake_revision(store, p)\n store.set_revisions(p, [rev1, rev2])\n revs = store.get_revisions(p)\n eq(revs, [rev1, rev2])\n rev3 = store.append_revision(p, time.time(), 1, 2, 3, 4, 5, 6, 7, 8)\n revs = store.get_revisions(p)\n eq(revs, [rev1, rev2, rev3])\n\n @parameterized(recipients)\n def test_put_get_content(self, recipient):\n \"\"\"\n Storing and retrieving content.\n \"\"\"\n store = self.make_store(recipient)\n content = b'foobar'\n hash = store.put_content(io.BytesIO(content))\n eq(hash, sha1(content))\n eq(store.get_content(hash).read(), content)\n\n @parameterized(recipients)\n @raises(KeyError)\n def test_get_content_keyerror(self, recipient):\n \"\"\"\n Getting non-existing content raises ``KeyError``.\n \"\"\"\n self.make_store(recipient).get_content('does not exist')\n\n @parameterized(recipients)\n def test_paths_are_hashed(self, recipient):\n \"\"\"\n Paths are hashed.\n \"\"\"\n store = self.make_store(recipient)\n p = '/foo/bar'\n rev = store.append_revision(p, time.time(), 1, 2, 3, 4, 5, 6, 7, 8)\n for root, filenames, dirnames in os.walk(self.path):\n for name in filenames + dirnames:\n n = name.lower()\n ok('foo' not in n)\n ok('bar' not in n)\n\n @needs_gpgme\n def test_files_are_encrypted(self):\n \"\"\"\n Files in the store are encrypted.\n \"\"\"\n store = self.make_store('test@coba')\n p = '/foo/bar'\n rev = store.append_revision(p, time.time(), 1, 2, 3, 4, 5, 6, 7, 8)\n store.put_content(io.BytesIO(b'foobar'))\n for d in [Store._META_PREFIX, Store._BLOB_PREFIX, Store._SALT_PREFIX]:\n for root, filenames, _ in os.walk(os.path.join(self.path, d)):\n for filename in filenames:\n with open(os.path.join(root, filename), 'rb') as f:\n ok(is_encrypted(f))\n\n @raises(ValueError)\n def test_invalid_store(self):\n \"\"\"\n An invalid store raises ``ValueError``.\n \"\"\"\n self.driver.create_container('invalid')\n Store(self.driver, 'invalid', None)\n\n @needs_gpgme\n def test_mixing_encrypted_and_non_encrypted_content(self):\n \"\"\"\n Mixing encrypted and non-encrypted content.\n \"\"\"\n p = '/foo/bar'\n store = self.make_store()\n rev1 = store.append_revision(p, time.time(), 1, 2, 3, 4, 5, 6, 7, 8)\n store = self.make_store('test@coba')\n eq(store.get_revisions(p), [rev1])\n rev2 = store.append_revision(p, time.time(), 1, 2, 3, 4, 5, 6, 7, 8)\n store = self.make_store()\n eq(store.get_revisions(p), [rev1, rev2])\n rev3 = store.append_revision(p, time.time(), 1, 2, 3, 4, 5, 6, 7, 8)\n store = self.make_store('test@coba')\n eq(store.get_revisions(p), [rev1, rev2, rev3])\n\n @raises(ValueError)\n def test_unsupported_format_version(self):\n \"\"\"\n Loading a store with an unsupported format version.\n \"\"\"\n old_format_version = Store._FORMAT_VERSION\n Store._FORMAT_VERSION += 1\n try:\n self.make_store()\n finally:\n Store._FORMAT_VERSION = old_format_version\n self.make_store()\n\n def test_format_version(self):\n \"\"\"\n Format version property.\n \"\"\"\n eq(self.make_store().format_version, 1)\n\n","sub_path":"test/test_coba_storage.py","file_name":"test_coba_storage.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"593161929","text":"\n\nfrom xai.brain.wordbase.nouns._mode import _MODE\n\n#calss header\nclass _MODES(_MODE, ):\n\tdef __init__(self,): \n\t\t_MODE.__init__(self)\n\t\tself.name = \"MODES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"mode\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_modes.py","file_name":"_modes.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"639762572","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nfrom __future__ import print_function # use python 3 syntax but make it compatible with python 2\nfrom __future__ import division # ''\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\nimport numpy as np\nimport time\nimport os\nimport math\n\nclass Map2D:\n def __init__(self, map_description_file):\n \"\"\"\n Load and initialize map from file. \\\n\n map_description_file: path to a text file containing map description in the standard format. \\\n Example for a 3x3 grid map, with (squared) cells of 400mm side length called mapa0. \\\n All free space, i.e., all connections between cells are open, except those on the limits of the map.\n For more details on the format, see class documentation.\n\n mapa0.txt content:\n 3 3 400\n 0 0 0 0 0 0 0\n 0 1 1 1 1 1 0\n 0 1 1 1 1 1 0\n 0 1 1 1 1 1 0\n 0 1 1 1 1 1 0\n 0 1 1 1 1 1 0\n 0 0 0 0 0 0 0\n\n \"\"\"\n # params to visualize\n self.mapLineStyle='r-'\n self.costValueStyle='g*'\n self.verbose = True\n # set to False to stop displaying plots interactively (and maybe just save the screenshots)\n # self.verbose = False\n self.current_ax = None\n\n # variables about map params\n self.sizeX=0\n self.sizeY=0\n self.sizeCell=0\n\n self.connectionMatrix = None\n self.costMatrix = None\n self.currentPath = None\n\n #self.endx\n #self.endy\n\n if self._loadMap(map_description_file):\n print(\"Map %s loaded ok\" % map_description_file)\n else:\n print(\"Map %s NOT loaded\" % map_description_file)\n\n\n # from python docs: https://docs.python.org/3/tutorial/classes.html#private-variables\n # “Private” instance variables that cannot be accessed except from inside an object don’t exist in Python.\n # However, there is a convention that is followed by most Python code: a name prefixed with an underscore \\\n # (e.g. _spam) should be treated as a non-public part of the API (whether it is a function, a method or a data member).\n\n # ############################################################\n # private methods\n # ############################################################\n def _initConnections(self, init_value=0):\n \"\"\"\n to initialize the matrix, we set all connections to be closed.\n When the file with the description is loaded, it will \"open\" (set to 1) the corresponding ones.\n \"\"\"\n self.connectionMatrix = np.ones( (2*self.sizeX+1, 2*self.sizeY+1) ) * init_value\n\n def _initCostMatrix(self, init_value=-2):\n \"\"\"\n to initialize the matrix, we set all connections to be closed.\n When the file with the description is loaded, it will \"open\" (set to 1) the corresponding ones.\n \"\"\"\n self.costMatrix = np.ones( (self.sizeX, self.sizeY) ) * init_value\n\n # Example costMatrix (filled manually!) for Map1\n # if we plan to go from 0,0 to 2,0\n # self.costMatrix[2,0] = 0\n # self.costMatrix[1,0] = 1\n # self.costMatrix[1,1] = 2\n # self.costMatrix[1,2] = 3\n # self.costMatrix[0,2] = 4\n # self.costMatrix[2,2] = 4\n # self.costMatrix[0,1] = 5\n # self.costMatrix[2,1] = 5\n # self.costMatrix[0,0] = 6\n\n\n\n def _loadMap(self, mapFileName):\n \"\"\"\n Load map from a txt file (mapFileName) to fill the map params and connectionMatrix. \\\n NOTES: \\\n \\t connectionMatrix is a numpy array \\\n \\t Function will return False if something went wrong loading the map file.\n \"\"\"\n try:\n # FILL GLOBAL VARIABLES dimX dimY cellSize\n loadingOk=False\n mapF = open(mapFileName, \"r\")\n\n # 1. special case for first line. initialize dimX dimY cellSize\n header = mapF.readline() #next()\n tmp = header.split() # any whitespace string is a separator and empty strings are removed from the result\n if self.verbose:\n print(\"Header line: %s \" % header)\n parsed_header = [int(c) for c in tmp]\n # expected to have three numbers: sizeX sizeY sizeCell_in_mm\n if len(parsed_header)==3:\n self.sizeX, self.sizeY, self.sizeCell = parsed_header\n else:\n print(\"Wrong header in map file: %s\" % header)\n return False\n\n # 2.init connectionMatrix and costMatrix\n self._initConnections()\n self._initCostMatrix()\n\n # 3. load rest of the map connection lines information\n for indx, line in enumerate(mapF):\n # we start loading from the file the \"top\" row of the map\n current_row = (self.connectionMatrix.shape[1]-1) - indx\n # Split numbers in the line. Any whitespace string is a separator and empty strings are removed from the result\n tmp = line.split()\n if self.verbose:\n print(\"Line for map row %d: %s \" % (current_row, line))\n parsed_line = [int(c) for c in tmp]\n\n if len(parsed_line) == self.connectionMatrix.shape[0] and indx < self.connectionMatrix.shape[1]:\n self.connectionMatrix[:, current_row] = parsed_line\n elif len(parsed_line): # don't give errors because of empty lines\n print(\"Wrong connectionMatrix (%s) row data: %s\" % (self.connectionMatrix.shape(), line) )\n return False\n mapF.close()\n loadingOk = True\n except Exception as e:\n print(\"ERROR:\", e.__doc__)\n print(e)\n #raise\n loadingOk = False\n\n return loadingOk\n\n def _cell2connCoord(self, cellX, cellY, numNeigh):\n \"\"\"\n Input:\n cellX, cellY: cell coordinates (cellX, cellY) in the map grid\n numNeigh: index of one of the cell 8-neighbours\n\n Output:\n (connX,connY): 2D coordinates (in the connectionMatrix!!) \\\n of the connection of the input cell to the input neighbour\n \"\"\"\n connX=2*cellX+1\n connY=2*cellY+1\n p = [connX, connY]\n\n result = {\n 0: lambda p: [ p[0], p[1]+1],\n 1: lambda p: [ p[0]+1, p[1]+1],\n 2: lambda p: [ p[0]+1, p[1]],\n 3: lambda p: [ p[0]+1, p[1]-1],\n 4: lambda p: [ p[0], p[1]-1],\n 5: lambda p: [ p[0]-1, p[1]-1],\n 6: lambda p: [ p[0]-1, p[1]],\n 7: lambda p: [ p[0]-1, p[1]+1],\n }\n\n return result[numNeigh](p)\n\n def _pos2cell(self, x_mm, y_mm):\n \"\"\" Convert from robot odometry coordinates (in mm) to cell coordinates \"\"\"\n # make sure we discretize the result to the closest lower integer value\n x_cell = int(np.floor(x_mm/self.sizeCell))\n y_cell = int(np.floor(y_mm/self.sizeCell))\n return [x_cell, y_cell]\n #\n # def _pos2cell_m(self, x_m, y_m):\n #\n\n\n # ############################################################\n # public methods\n # ############################################################\n def setConnection(self, cellX, cellY, numNeigh):\n \"\"\"\n open a connection, i.e., we can go straight from cellX,cellY to its neighbour number numNeigh\n \"\"\"\n # from coordinates in the grid of cells to coordinates in the connection matrix\n [connX, connY] = self._cell2connCoord(cellX, cellY, numNeigh)\n self.connectionMatrix[connX, connY]=1 # True\n\n def deleteConnection(self, cellX, cellY, numNeigh):\n \"\"\"\n close a connection, i.e., we can NOT go straight from cellX,cellY to its neighbour number numNeigh\n \"\"\"\n # from coordinates in the grid of cells to coordinates in the connection matrix\n [connX, connY] = self._cell2connCoord(cellX, cellY, numNeigh)\n self.connectionMatrix[connX, connY] = 0 # False\n\n def isConnectedNumber(self, cellX, cellY, numNeigh):\n \"\"\"\n returns True if the connnection from cell (x,y) to its neighbour number numNeigh is open.\n\n The neighbour indexing is considered as follows\n (8-neighbours from cell x,y numbered clock-wise):\n\n 7 0 1\n 6 (x,y) 2\n 5 4 3\n\n \"\"\"\n [connX, connY] = self._cell2connCoord(cellX, cellY, numNeigh)\n\n return self.connectionMatrix[connX, connY]\n\n def isConnected(self, cellX, cellY, numNeigh):\n \"\"\"\n returns True if the connnection from cell (x,y) to its neighbour number numNeigh is open.\n\n The neighbour indexing is considered as follows\n (8-neighbours from cell x,y numbered clock-wise):\n\n 7 0 1\n 6 (x,y) 2\n 5 4 3\n\n \"\"\"\n n = self.isConnectedNumber(cellX, cellY, numNeigh)\n return n>0.5\n\n # aux functions to display (or save image) with robot and map stuff\n def _drawGrid(self):\n \"\"\"\n aux function to create a grid with map lines\n \"\"\"\n if not self.current_ax:\n print(\"Error plotting: do not call this function directly, \\\n call drawMap first to create a plot where to draw\")\n return False\n\n plt.rc('grid', linestyle=\"--\", color='gray')\n plt.grid(True)\n plt.tight_layout()\n\n x_t = range(0, (self.sizeX+1)*400, 400)\n y_t = range(0, (self.sizeY+1)*400, 400)\n x_labels = [str(n) for n in x_t]\n y_labels = [str(n) for n in y_t]\n plt.xticks(x_t, x_labels)\n plt.yticks(y_t, y_labels)\n\n # Main rectangle\n X = np.array([0, self.sizeX, self.sizeX, 0, 0]) * self.sizeCell\n Y = np.array([0, 0, self.sizeY, self.sizeY, 0]) * self.sizeCell\n self.current_ax.plot(X, Y, self.mapLineStyle)\n\n # \"vertical\" walls\n for i in range(2, 2*self.sizeX, 2):\n for j in range(1, 2*self.sizeY, 2):\n if not self.connectionMatrix[i,j]:\n # paint \"right\" wall from cell (i-1)/2, (j-1)/2\n cx= np.floor((i-1)/2)\n cy= np.floor((j-1)/2)\n X = np.array([cx+1, cx+1]) * self.sizeCell\n Y = np.array([cy, cy+1]) * self.sizeCell\n self.current_ax.plot(X, Y, self.mapLineStyle)\n\n # \"horizontal\" walls\n for j in range(2, 2*self.sizeY, 2):\n for i in range(1, 2*self.sizeX, 2):\n if not self.connectionMatrix[i,j]:\n # paint \"top\" wall from cell (i-1)/2, (j-1)/2\n cx=np.floor((i-1)/2)\n cy=np.floor((j-1)/2)\n X = np.array([cx, cx+1]) * self.sizeCell\n Y = np.array([cy+1, cy+1]) * self.sizeCell\n self.current_ax.plot(X, Y, self.mapLineStyle)\n plt.axis('equal')\n\n return True\n\n\n # aux functions to display the current CostMatrix on the map\n def _drawCostMatrix(self):\n \"\"\"\n aux function to create a grid with map lines\n \"\"\"\n if not self.current_ax:\n print(\"Error plotting: do not call this function directly, \\\n call drawMap first to create a plot where to draw\")\n return False\n\n # \"center\" of each cell\n for i in range(0, self.sizeX):\n for j in range(0, self.sizeY):\n cx= i*self.sizeCell + self.sizeCell/2.\n cy= j*self.sizeCell + self.sizeCell/2.\n X = np.array([cx])\n Y = np.array([cy])\n cost = self.costMatrix[i,j]\n self.current_ax.text(X, Y, str(cost))\n\n\n plt.axis('equal')\n\n return True\n\n # Dibuja robot en location_eje con color (c) y tamano (p/g)\n def _drawRobot(self, loc_x_y_th=[0,0,0], robotPlotStyle='b', small=False):\n \"\"\"\n UPDATES existing plot to include current robot position\n It expects an existing open figure (probably with the map already on it)\n\n loc_x_y_th is the position x,y and orientation in mm and radians of the main axis of the robot\n\n \"\"\"\n if not self.current_ax:\n print(\"Error plotting: do not call this function directly, \\\n call drawMap first to create a plot where to draw\")\n return False\n\n if small:\n largo, corto, descentre = [80, 50, 5]\n else:\n largo, corto, descentre = [160, 100, 10]\n\n trasera_dcha=np.array([-largo,-corto,1])\n trasera_izda=np.array([-largo,corto,1])\n delantera_dcha=np.array([largo,-corto,1])\n delantera_izda=np.array([largo,corto,1])\n frontal_robot=np.array([largo,0,1])\n\n tita=loc_x_y_th[2]\n Hwe=np.array([[np.cos(tita), -np.sin(tita), loc_x_y_th[0]],\n [np.sin(tita), np.cos(tita), loc_x_y_th[1]],\n [0, 0 , 1]])\n\n Hec=np.array([[1,0,descentre],\n [0,1,0],\n [0,0,1]])\n\n extremos=np.array([trasera_izda, delantera_izda, delantera_dcha, trasera_dcha, trasera_izda, frontal_robot, trasera_dcha])\n robot=np.dot(Hwe, np.dot(Hec,np.transpose(extremos)))\n\n self.current_ax.plot(robot[0,:], robot[1,:], robotPlotStyle)\n\n return True\n\n def drawMapWithRobotLocations(self,\n robotPosVectors=[ [0,0,0], [600, 600, 3.14] ],\n saveSnapshot=True):\n \"\"\" Overloaded version of drawMap to include robot positions \"\"\"\n return self.drawMap(robotPosVectors=robotPosVectors, saveSnapshot=saveSnapshot)\n\n\n def drawMap(self, robotPosVectors = None, saveSnapshot=False):\n \"\"\"\n Generates a plot with currently loaded map status\n\n NOTE:\n if verbose, it displays the plot\n if saveSnapshot: saves a figure as mapstatus_currenttimestamp_FIGNUM.png\n \"\"\"\n self.verbose=True\n #self.verbose=False\n\n # create a new figure and set it as current axis\n current_fig = plt.figure()\n self.current_ax = current_fig.add_subplot(111)\n\n self._drawGrid()\n\n # if flag is true, draw also current CostMatrix\n if self.verbose:\n self._drawCostMatrix()\n\n if robotPosVectors:\n for loc in robotPosVectors:\n #print(\"Robot in pos: \", loc)\n self._drawRobot(loc_x_y_th=loc, robotPlotStyle='b--')\n # plot last robot position with solid green line\n self._drawRobot(loc_x_y_th=loc, robotPlotStyle='g-')\n\n if saveSnapshot:\n ts = str(time.time())\n snapshot_name = \"mapstatus_\"+ts+\"_F\"+str(current_fig.number)+\".png\"\n print(\"saving %s \" % snapshot_name)\n plt.savefig(snapshot_name)\n\n if self.verbose:\n current_fig.set_visible(True)\n current_fig.show()\n print(\"Press ENTER in the plot window to continue ... \")\n current_fig.waitforbuttonpress()\n else:\n current_fig.set_visible(False)\n\n return current_fig\n\n\n def findPath(self, point_ini, point_end):\n \"\"\" overloaded call to planPath (x_ini, y_ini, x_end, y_end) \"\"\"\n return self.planPath(point_ini[0], point_ini[1],\n point_end[0], point_end[1])\n\n # ############################################################\n # METHODS to IMPLEMENT in P4\n # ############################################################\n\n def neighbourCell(self, x, y, neighbour):\n if neighbour==0:\n return [x, y+1]\n elif neighbour==2:\n return [x+1, y]\n elif neighbour==4:\n return [x, y-1]\n elif neighbour==6:\n return [x-1, y]\n\n def printCostMatrix(self):\n \"\"\"\n Prints the cost matrix in the same order as the maps (X right, Y up)\n \"\"\"\n print(np.rot90(self.costMatrix))\n\n def hasValue(self, neighbour_cell, front):\n \"\"\"\n returns True if the cell already has a value or is in the frontier\n \"\"\"\n return neighbour_cell in front or self.costMatrix[neighbour_cell[0], neighbour_cell[1]] >= 0\n \n def cellExists(self, cell):\n x=cell[0]\n y=cell[1]\n return 0<=x= self.costMatrix.size:\n end=True\n frontier=newFront\n cost += 1\n self.printCostMatrix()\n return True\n\n\n\n def findPathFromPos(self, x_ini, y_ini, x_end, y_end):\n x_milli = max(x_ini*1000.0, 0)\n y_milli = max(y_ini*1000.0, 0)\n x,y = self._pos2cell(x_milli, y_milli)\n return self.findPath(x,y, x_end, y_end)\n\n def findPath(self, x_ini, y_ini, x_end, y_end):\n \"\"\"\n x_ini, y_ini, x_end, y_end: integer values that indicate \\\n the x and y coordinates of the starting (ini) and ending (end) cell\n Finds the path between ini and end\n \"\"\"\n self.goal = [x_end,y_end]\n if not self.fillCostMatrix():\n return False\n\n self.currentPath = []\n pathFound = False\n current_x=x_ini\n current_y=y_ini\n self.endx=x_end\n self.endy=y_end\n while not pathFound:\n x_min=0\n y_min=0\n min_cost=math.inf\n foundOne=False\n if(self.isConnected(current_x, current_y, 0) and self.costMatrix[current_x][current_y+1] cur_count:\r\n next_url = '{}{}?page={}'.format(host, request.path, page + 1)\r\n if cur_count > max_per_page:\r\n previous_url = '{}{}?page={}'.format(host, request.path, page - 1)\r\n return OrderedDict([\r\n ('count', total),\r\n ('next', next_url),\r\n ('previous', previous_url),\r\n ('results', data)\r\n ])\r\n\r\n return OrderedDict([\r\n ('count', max_per_page),\r\n ('page', page),\r\n ('total', total),\r\n ('next', next_url),\r\n ('previous', previous_url),\r\n ('results', data)\r\n ])","sub_path":"sanic_api/apps/utils/order_api.py","file_name":"order_api.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"239902154","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 24 02:26:47 2018\r\n\r\n@author: MOBASSIR\r\n\"\"\"\r\n\r\n\r\n\"\"\"\r\n\r\ncode for this research work was taken from catboost's(A fast, scalable, high performance Gradient Boosting on Decision Trees library, used for ranking, classification, regression and other machine learning tasks for Python, R, Java, C++. Supports computation on CPU and GPU. https://catboost.ai)\r\ndoccumentation.\r\n\r\nref link : https://github.com/catboost/catboost\r\n\r\n\r\n\"\"\"\r\n#importing libraries\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom catboost import CatBoostClassifier,Pool,cv\r\nfrom sklearn.preprocessing import LabelEncoder\r\nimport matplotlib.pyplot as plt\r\n \r\n \r\n#Importing the dataset\r\ndataset = pd.read_csv('appendix_for_ml.csv')\r\nX = dataset.iloc[:, :-1].values\r\ny = dataset.iloc[:, 8].values\r\n\r\n\r\nlabelencoder_y = LabelEncoder()\r\ny = labelencoder_y.fit_transform(y)\r\n\r\n\r\n\r\nrnd_state = 63\r\n\r\n# Splitting the dataset into the Training set and Test set\r\nfrom sklearn.cross_validation import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.4, random_state = 0)\r\n\r\n#X_test[104][10] = \"yes\"\r\n\r\n\r\ncat_featuresind=[0,1,2,3,4,5,6,7]\r\n\r\nclf = CatBoostClassifier (iterations=10,random_seed=rnd_state, custom_metric='Accuracy')\r\n\r\nclf.fit(X_train, y_train, cat_features=cat_featuresind,plot = True)\r\n\r\n\r\nclf.score(X_test, y_test)\r\n\r\n\r\n\r\n\r\nfrom sklearn.metrics import confusion_matrix,accuracy_score\r\ny_pred = clf.predict(X_test)\r\n\r\n#print(clf.predict(X_test[104]))\r\ncm = confusion_matrix (y_test, y_pred)\r\n\r\n\r\nfrom sklearn.metrics import recall_score,precision_score\r\n\r\nprint(recall_score(y_test,y_pred,average='macro'))\r\n\r\nprint(precision_score(y_test, y_pred, average='micro'))\r\n\r\n\r\nprint(accuracy_score(y_test,y_pred))\r\n\r\n\r\n\r\n#cr0ss validati0n\r\n\r\ncv_params = clf.get_params()\r\ncv_params.update({\r\n 'loss_function': 'Logloss'\r\n})\r\ncv_data = cv(\r\n Pool(X, y, cat_features=cat_featuresind),\r\n cv_params,\r\n plot=True\r\n)\r\n\r\n\r\nprint('Best validation accuracy score: {:.2f}±{:.2f} on step {}'.format(\r\n np.max(cv_data['test-Accuracy-mean']),\r\n cv_data['test-Accuracy-std'][np.argmax(cv_data['test-Accuracy-mean'])],\r\n np.argmax(cv_data['test-Accuracy-mean'])\r\n))\r\n\r\n\r\nprint('Precise validation accuracy score: {}'.format(np.max(cv_data['test-Accuracy-mean'])))\r\n\r\n\r\n\r\n\r\nimportances = clf.feature_importances_\r\nprint(clf.feature_importances_)\r\nplt.title('Feature Importances ')\r\nplt.barh(range(len(cat_featuresind)), importances[cat_featuresind], color='b', align='center')\r\n#plt.yticks(dataset[i][0] for i in cat_featuresind)\r\nplt.xlabel('Relative Importance')\r\nplt.savefig('Save.JPEG')\r\nplt.savefig('destination_path.eps', format='eps', dpi=1000)\r\nplt.savefig('myimage.svg', format='svg', dpi=1200)\r\n\r\nplt.show()\r\n \r\n\r\n\r\n\r\n","sub_path":"featureiportance_catboost.py","file_name":"featureiportance_catboost.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"615897145","text":"import cv2\n\nvid = cv2.VideoCapture('./video/small.avi')\n\nwhile True:\n ret, frame = vid.read()\n if not ret:\n break\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n if ret:\n cv2.imshow('video',frame)\n if cv2.waitKey(10) > 0:\n break\n\nvid.release()\ncv2.destroyAllWindows()","sub_path":"python/opencv/src/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"125203857","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import datasets\nfrom sklearn.cross_validation import StratifiedKFold\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA, KernelPCA\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score, confusion_matrix\n\n\npca, lda, rbf = 0, 0, 1\n\niris = datasets.load_iris()\nX = iris.data\ny = iris.target\n\n# pandafy the data\nX = pd.DataFrame(data=X, columns=list(\"ABCD\"))\ny = pd.Series(y)\n\n# standardize the features\nKfold = StratifiedKFold(y = y,\n n_folds=10,\n random_state=1)\n\nsc = StandardScaler()\n\nX_train, X_test, y_train, y_test = \\\n train_test_split(X, y, test_size=0.3, random_state=0)\n\n# standardize\nX_train_std = sc.fit_transform(X_train)\nX_test_std = sc.transform(X_test)\n# pca\nif pca:\n pca = PCA(n_components=2)\n X_train_std_pca = pca.fit_transform(X_train_std)\n X_test_std_pca = pca.transform(X_test_std)\n\nif lda:\n lda = LinearDiscriminantAnalysis(solver=\"svd\",n_components=2)\n X_train_std_lda = lda.fit_transform(X_train_std, y_train)\n\nif rbf:\n rbf = KernelPCA(n_components=2, kernel='rbf', gamma=10)\n X_train_rbf = rbf.fit_transform(X_train)\n\npipe1 = Pipeline([('sc', StandardScaler()),\n ('pca', PCA(n_components=3)),\n ('lr', LogisticRegression(penalty='l1', C=10))])\n\npipe2 = Pipeline([('sc', StandardScaler()),\n ('lda', LinearDiscriminantAnalysis(n_components=2)),\n ('lr', LogisticRegression(penalty='l1', C=10))])\n\npipe3 = Pipeline([('rbf', KernelPCA(n_components=2, kernel='rbf', gamma=2)),\n ('lr', LogisticRegression(penalty='l1', C=10))])\n\npipe1 = pipe1.fit(X_train, y_train)\npipe2 = pipe2.fit(X_train, y_train)\npipe3 = pipe3.fit(X_train, y_train)\n\npipelines = ['Standardize + PCA + Logistic Reg',\n 'Standardize + LDA + Logistic Reg',\n 'Standardize + RBF + Logistic Reg']\n\nfor pipeline, pipe in zip(pipelines, [pipe1, pipe2, pipe3]):\n print(\"\\n\\nPipeline: \" + str(pipeline))\n print(\"Training metrics .....\")\n confmat = confusion_matrix(y_train, pipe.predict(X_train))\n print(\"Confusion matrix: \\n\",confmat)\n accscore = np.trace(confmat).astype('float') / np.sum(confmat).astype('float')\n print(\"Accuracy : %.2f\" % accscore)\n print(\"Builtin Accuracy : %2f\" % accuracy_score(y_train, pipe.predict(X_train)))\n print(\"Precision : %2f\" % precision_score(y_train, pipe.predict(X_train), average=\"macro\"))\n print(\"Recall : %2f\" % recall_score(y_train, pipe.predict(X_train), average=\"macro\"))\n\n print(\"Testing metrics .....\")\n confmat = confusion_matrix(y_test, pipe.predict(X_test))\n print(\"Confusion matrix: \\n\", confmat)\n accscore = np.trace(confmat).astype('float') / np.sum(confmat).astype('float')\n print(\"Accuracy : %.2f\" % accscore)\n print(\"Builtin Accuracy : %2f\" % accuracy_score(y_test, pipe.predict(X_test)))\n print(\"Precision : %2f\" % precision_score(y_test, pipe.predict(X_test), average=\"macro\"))\n print(\"Recall : %2f\" % recall_score(y_test, pipe.predict(X_test), average=\"macro\"))\n\n\n# Notes\n# LDA required lesser components than PCA to achieve the same performance\n# rbf performs much worse than linear kernel","sub_path":"scikit-learn/FeatureExtraction.py","file_name":"FeatureExtraction.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"128598654","text":"import pyfirmata\r\nimport time\r\nimport requests\r\n\r\n\r\nboard = pyfirmata.Arduino('COM8')\r\n\r\n\r\nimport time\r\n\r\ndef executeSomething():\r\n #code here\r\n board.digital[8].write(1)\r\n currentHum = requests.get('https://pi2-ephec.herokuapp.com/data/last?potId=1')\r\n minHum = requests.get('https://pi2-ephec.herokuapp.com/data/humidityThreshold?potId=1')\r\n arrosageAutYesOrNo = requests.get('https://pi2-ephec.herokuapp.com/users/getLearningMode?id=32')\r\n # print(x.status_code)\r\n # print(x.json())\r\n oneOrZero = arrosageAutYesOrNo.json()\r\n currentHumNum = currentHum.json()\r\n minHumNum = minHum.json()\r\n\r\n print(oneOrZero[0][\"learningMode\"])\r\n print(minHumNum.get(\"humidity\"))\r\n print(currentHumNum[0][\"dataHumidity\"])\r\n learningMode = oneOrZero[0][\"learningMode\"]\r\n humidityMin = minHumNum.get(\"humidity\")\r\n humidtyFromPlant = currentHumNum[0][\"dataHumidity\"]\r\n if ((humidityMin > humidtyFromPlant) and (learningMode == 1)): # Mode auto activer et allume la pompe quelque seconde\r\n board.digital[8].write(0)\r\n print(\"ok\")\r\n time.sleep(5)\r\n board.digital[8].write(1)\r\n #time.sleep(3)\r\n if ((humidityMin < 80)):\r\n board.digital[8].write(0)\r\nwhile True:\r\n executeSomething()\r\n#while True:\r\n\r\n","sub_path":"électronique/waterPumpControl.py","file_name":"waterPumpControl.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"532130065","text":"# Question: removing element from a list.\n# This solution would come out to have a runtime complexity of O(n) because there is a while loop being used to traverse the array.\n# The space complexity would be O(1) since no extra data structures were created.\n\ndef remove_element(lst1, target):\n # new_lst = []\n print(\"lst1 start:\", lst1)\n i = 0\n while i < len(lst1)-1:\n if lst1[i] == target:\n lst1.remove(lst1[i])\n continue\n i += 1\n return lst1\n\nif __name__ == \"__main__\":\n lst1 = [1, 3, 5, 6, 3, 3, 4]\n target = 3\n print(remove_element(lst1, target))\n\n\n","sub_path":"Day_11_LeetCodeQuestions/remove_element.py","file_name":"remove_element.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"426872134","text":"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.11.4\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %% [raw] raw_mimetype=\"text/restructuredtext\"\n# .. _ug_layout:\n#\n# The subplot\n# ===========\n#\n# This section documents a variety of features related to ProPlot subplots,\n# including automatic a-b-c subplot labels, axis sharing between subplots,\n# automatic spacing between subplots, and a unique feature where the figure\n# size is determined automatically from the subplot geometry.\n#\n# %% [raw] raw_mimetype=\"text/restructuredtext\"\n# .. _ug_abc:\n#\n# A-b-c labels\n# ------------\n#\n# ProPlot can quickly add \"a-b-c\" labels to subplots. This is possible because\n# ProPlot assigns a unique `~proplot.axes.Axes.number` to each subplot. The\n# subplot number can be manually controlled by passing a `number` keyword to\n# `~proplot.figure.Figure.add_subplot`. Otherwise, the subplot number is\n# incremented by ``1`` each time you call `~proplot.figure.Figure.add_subplot`.\n#\n# If you draw all of your subplots at once with `~proplot.figure.Figure.add_subplots`,\n# the subplot numbers depend on the input arguments. If you\n# :ref:`passed an array `, the subplot numbers correspond to the numbers\n# in the array. But if you used the `ncols` and `nrows` keyword arguments, the\n# number order is row-major by default and can be switched to column-major by\n# passing ``order='F'``. The number order also determines the subplot order in\n# the `~proplot.gridspec.SubplotGrid` returned by `~proplot.figure.Figure.add_subplots`.\n#\n# To turn on \"a-b-c\" labels, set :rcraw:`abc` to ``True`` or pass ``abc=True``\n# to `~proplot.axes.Axes.format` (see :ref:`the format command `\n# for details). To change the label style, set :rcraw:`abc` to e.g. ``'A.'`` or\n# pass e.g. ``abc='A.'`` to `~proplot.axes.Axes.format`. You can also modify\n# the \"a-b-c\" label location, weight, and size with the :rcraw:`abc.loc`,\n# :rcraw:`abc.weight`, and :rcraw:`abc.size` settings. Also note that if the\n# an \"a-b-c\" label and title are in the same position, they are automatically\n# offset away from each other.\n#\n# .. note::\n#\n# \"Inner\" a-b-c labels and titles are surrounded with a white border when\n# :rcraw:`abc.border` and :rcraw:`title.border` are ``True`` (the default).\n# White boxes can be used instead by setting :rcraw:`abc.bbox` and\n# :rcraw:`title.bbox` to ``True``. These options help labels stand out\n# against plotted content. Any text can be given \"borders\" or \"boxes\" by\n# passing ``border=True`` or ``bbox=True`` to `proplot.axes.Axes.text`.\n\n# %%\nimport proplot as pplt\nfig = pplt.figure(space=0, refwidth='10em')\naxs = fig.subplots(nrows=3, ncols=3)\naxs.format(\n abc='A.', abcloc='ul',\n xticks='null', yticks='null', facecolor='gray5',\n xlabel='x axis', ylabel='y axis',\n suptitle='A-b-c label offsetting, borders, and boxes',\n)\naxs[:3].format(abcloc='l', titleloc='l', title='Title')\naxs[-3:].format(abcbbox=True) # also disables abcborder\n# axs[:-3].format(abcborder=True) # this is already the default\n\n# %%\nimport proplot as pplt\nfig = pplt.figure(space=0, refwidth=0.7)\naxs = fig.subplots(nrows=8, ncols=8)\naxs.format(\n abc=True, abcloc='ur',\n xlabel='x axis', ylabel='y axis', xticks=[], yticks=[],\n suptitle='A-b-c label stress test'\n)\n\n\n# %% [raw] raw_mimetype=\"text/restructuredtext\"\n# .. _ug_autosize:\n#\n# Automatic sizes\n# ---------------\n#\n# By default, ProPlot determines the suitable figure size given the\n# geometry of the subplot grid and the size of a \"reference\" subplot.\n# This \"reference\" subplot is specified with the `~proplot.figure.Figure`\n# keyword `refnum` (default is ``1``, i.e. the first subplot added to the figure\n# or the subplot in the upper-left corner when generated with `~proplot.ui.subplots`).\n# ProPlot can also determine the suitable figure height given a fixed figure\n# width, and the suitable figure width given a fixed figure height.\n#\n# The figure size is ultimately controlled by the following\n# `~proplot.figure.Figure` keyword arguments:\n#\n# * `refwidth` and `refheight` set the physical dimensions of the reference subplot\n# (default is :rc:`subplots.refwidth`). If one is specified, the other is calculated\n# to satisfy the subplot aspect ratio `refaspect` (default is ``1``). If both are\n# specified, `refaspect` is ignored. When these keyword arguments are used, the\n# width and height of the figure are both determined automatically.\n# * `figwidth` and `figheight` set the physical dimensions of the figure.\n# If one is specified, the other is calculated to satisfy `refaspect`\n# and the subplot spacing. If both are specified, or if the `figsize` parameter\n# is specified, the figure size is fixed and `refaspect` is ignored.\n# * `journal` constrains the physical dimensions of the figure to meet requirements\n# for submission to an academic journal. For example, ``journal='nat1'``\n# results in a width suitable for single-column *Nature* figures. See\n# :ref:`this table ` for the list of available journal\n# specifications (feel free to add to this table by submitting a pull request).\n#\n# The below examples show how these keyword arguments affect the figure size.\n#\n# .. important::\n#\n# The automatic figure size algorithm has the following notable properties:\n#\n# * For very simple subplot grids (e.g., subplots created with the `ncols` and\n# `nrows` arguments), the arguments `refaspect`, `refwidth`, and `refheight`\n# effectively apply to every subplot in the figure -- not just the\n# reference subplot.\n# * When the reference subplot `aspect ratio\n# `__\n# has been fixed (e.g., using ``ax.set_aspect(1)``) or is set to ``'equal'`` (as\n# with :ref:`geographic projections ` and `~proplot.axes.PlotAxes.imshow`\n# images), the fixed aspect ratio is used and the `~proplot.ui.subplots`\n# `refaspect` parameter is ignored. This is critical for getting the figure\n# size right when working with grids of images and geographic projections.\n# * The physical widths of `~proplot.axes.Axes.colorbar`\\ s and\n# `~proplot.axes.Axes.panel`\\ s are always preserved during figure resizing.\n# ProPlot specifies their widths in physical units to help avoid colorbars\n# and panels that look \"too skinny\" or \"too fat\".\n\n# %%\nimport proplot as pplt\nimport numpy as np\n\n# Grid of images (note the square pixels)\nstate = np.random.RandomState(51423)\ncolors = np.tile(state.rand(8, 12, 1), (1, 1, 3))\nfig, axs = pplt.subplots(ncols=3, nrows=2, refwidth=1.7)\nfig.format(suptitle='Auto figure size for grid of images')\nfor ax in axs:\n ax.imshow(colors)\n\n# Grid of cartopy projections\nfig, axs = pplt.subplots(ncols=2, nrows=3, proj='robin')\naxs.format(land=True, landcolor='k')\nfig.format(suptitle='Auto figure size for grid of cartopy projections')\n\n\n# %%\nimport proplot as pplt\npplt.rc.update(grid=False, titleloc='uc', titleweight='bold', titlecolor='red9')\n\n# Change the reference subplot width\nsuptitle = 'Effect of subplot width on figure size'\nfor refwidth in ('3cm', '5cm'):\n fig, axs = pplt.subplots(ncols=2, refwidth=refwidth,)\n axs[0].format(title=f'refwidth = {refwidth}', suptitle=suptitle)\n\n# Change the reference subplot aspect ratio\nsuptitle = 'Effect of subplot aspect ratio on figure size'\nfor refaspect in (1, 2):\n fig, axs = pplt.subplots(ncols=2, refwidth=1.6, refaspect=refaspect)\n axs[0].format(title=f'refaspect = {refaspect}', suptitle=suptitle)\n\n# Change the reference subplot\nsuptitle = 'Effect of reference subplot on figure size'\nfor ref in (1, 2): # with different width ratios\n fig, axs = pplt.subplots(ncols=3, wratios=(3, 2, 2), ref=ref, refwidth=1.1)\n axs[ref - 1].format(title='reference', suptitle=suptitle)\nfor ref in (1, 2): # with complex subplot grid\n fig, axs = pplt.subplots([[1, 2], [1, 3]], refnum=ref, refwidth=1.8)\n axs[ref - 1].format(title='reference', suptitle=suptitle)\n\npplt.rc.reset()\n\n# %% [raw] raw_mimetype=\"text/restructuredtext\"\n# .. _ug_tight:\n#\n# Subplot spaces\n# --------------\n#\n# By default, ProPlot automatically determines the suitable space between\n# subplots using a tight layout algorithm. This algorithm automatically\n# expands or contracts the space between subplots to accommodate labels.\n# It can be disabled by passing ``tight=False`` to `~proplot.ui.subplots`\n# or setting :rcraw:`subplots.tight` to ``False``. In contrast to\n# `matplotlib's tight layout algorithm\n# `__,\n# ProPlot's algorithm may change the figure size to accommodate the correct\n# spacing and permits variable spacing between subsequent subplot rows and\n# columns (see `proplot.gridspec.GridSpec` for details).\n#\n# The tight layout algorithm can also be completely or partly overridden. When\n# you pass any of the spacing arguments `left`, `right`, `top`, `bottom`,\n# `wspace`, or `hspace` to `~proplot.ui.figure`, `~proplot.ui.subplots`, or\n# `~proplot.gridspec.GridSpec`, that value is always respected. For example:\n#\n# * ``left=2`` fixes the left margin at 2 em-widths, while the right,\n# bottom, and top margin widths are determined by the tight layout algorithm.\n# * ``wspace=1`` fixes the spaces between subplot columns at 1 em-width, while the\n# spaces between subplot rows are determined by the tight layout algorithm.\n# * ``wspace=(3, None)`` fixes the space between the first two columns of\n# a three-column plot at 3 em-widths, while the space between the second two\n# columns is determined by the tight layout algorithm.\n#\n# Alternatively, the padding used by the tight layout algorithm (rather than the\n# absolute spaces between subplot edges) can be changed by passing `outerpad`,\n# `innerpad`, or `panelpad` to `~proplot.ui.figure` or `~proplot.ui.subplots`.\n# This padding can be set locally by passing an array of values to `wpad`\n# and `hpad` (analogous to `wspace` and `hspace`), or by passing the `pad`\n# keyword when creating :ref:`panel axes ` or :ref:`outer\n# colorbars and legends ` (analogous to `space`). Finally,\n# to constrain the tight layout algorithm to produce equal spacing between\n# main subplot rows and columns, you can pass ``wequal=True``, ``hequal=True``\n# or ``equal=True`` to `~proplot.ui.figure` or `~proplot.ui.subplots` (note that\n# equal spacing is the default behavior when tight layout is disabled).\n#\n# All the spacing parameters described above can be specified with a\n# :ref:`unit string ` interpreted by `~proplot.utils.units`.\n# The default unit assumed for numeric arguments is an \"em-width\" (i.e., a\n# :rcraw:`font.size` width -- see the :ref:`units table ` for details).\n\n# %%\nimport proplot as pplt\n\n# Stress test of the tight layout algorithm\n# Add large labels along the edge of one subplot\nfor equal, descrip in enumerate(('variable', 'equal')):\n fig, axs = pplt.subplots(\n nrows=3, ncols=3, refwidth=1.1, share=False, equal=bool(equal)\n )\n axs[1].format(\n xlabel='xlabel\\nxlabel',\n ylabel='ylabel\\nylabel\\nylabel\\nylabel'\n )\n axs.format(\n grid=False,\n toplabels=('Column 1', 'Column 2', 'Column 3'),\n leftlabels=('Row 1', 'Row 2', 'Row 3'),\n suptitle=f'Tight layout with {descrip} row-column spacing',\n )\n\n# %%\nimport proplot as pplt\n\n# Stress test of the tight layout algorithm\n# This time override the algorithm between selected subplot rows/columns\nfig, axs = pplt.subplots(\n ncols=4, nrows=3, refwidth=1.1, span=False,\n bottom='5em', right='5em', # margin spacing overrides\n wspace=(0, 0, None), hspace=(0, None), # column and row spacing overrides\n)\naxs.format(\n grid=False,\n xlocator=1, ylocator=1, tickdir='inout',\n xlim=(-1.5, 1.5), ylim=(-1.5, 1.5),\n suptitle='Tight layout with user overrides',\n toplabels=('Column 1', 'Column 2', 'Column 3', 'Column 4'),\n leftlabels=('Row 1', 'Row 2', 'Row 3'),\n)\naxs[0, :].format(xtickloc='top')\naxs[2, :].format(xtickloc='both')\naxs[:, 1].format(ytickloc='neither')\naxs[:, 2].format(ytickloc='right')\naxs[:, 3].format(ytickloc='both')\naxs[-1, :].format(xlabel='xlabel', title='Title\\nTitle\\nTitle')\naxs[:, 0].format(ylabel='ylabel')\n\n\n# %% [raw] raw_mimetype=\"text/restructuredtext\" tags=[]\n# .. _ug_share:\n#\n# Axis sharing\n# ------------\n#\n# Figures with lots of subplots often have :ref:`redundant labels `.\n# To help address this, `matplotlib.pyplot.subplots` includes the `sharex` and\n# `sharey` keyword arguments that permit sharing axis limits, ticks, and tick labels\n# between like rows and columns of subplots. ProPlot builds on this feature by...\n#\n# #. Automatically sharing axes between subplots and :ref:`panels `\n# occupying the same rows or columns of the `~proplot.gridspec.GridSpec`. This\n# works for :ref:`aribtrarily complex subplot grids `. It also works\n# if subplots were generated one-by-one with `~proplot.figure.Figure.add_subplot`\n# rather than `~proplot.figure.Figure.subplots`. It is controlled by the `sharex`\n# and `sharey` keywords (default is :rc:`subplots.share`). You can use the\n# `share` keyword as a shorthand to set both `sharex` and `sharey`.\n# #. Automatically sharing labels across subplots and :ref:`panels `\n# with edges against the same row or column of the `~proplot.gridspec.GridSpec`.\n# This also works for complex grids and subplots generated one-by-one. It is\n# controlled by the `spanx` and `spany` keywords (default is :rc:`subplots.span`).\n# Use the `span` keyword as a shorthand to set both `spanx` and `spany`.\n# #. Supporting five sharing \"levels\". These values can be passed to `sharex`,\n# `sharey`, or `share`, or assigned to :rcraw:`subplots.share`. The levels\n# are defined as follows:\n#\n# * ``False`` or ``0``: Axis sharing is disabled.\n# * ``'labels'``, ``'labs'``, or ``1``: Axis labels are shared, but\n# nothing else. Labels will appear on the leftmost and bottommost subplots.\n# * ``'limits'``, ``'lims'``, or ``2``: Same as ``1``, but axis limits, axis\n# scales, and major and minor tick locations and formatting are also shared.\n# * ``True`` or ``3``: Same as ``2``, but axis tick labels are also shared.\n# Tick labels will appear on the leftmost and bottommost subplots.\n# * ``'all'`` or ``4``: Same as ``3``, but axis limits, axis scales, and\n# axis ticks are shared even between subplots not in the same row or column.\n#\n# The below examples demonstrate the effect of various axis and label sharing\n# settings on the appearance of several subplot grids.\n\n# %%\nimport proplot as pplt\nimport numpy as np\nN = 50\nM = 40\nstate = np.random.RandomState(51423)\ncycle = pplt.Cycle('grays_r', M, left=0.1, right=0.8)\ndatas = []\nfor scale in (1, 3, 7, 0.2):\n data = scale * (state.rand(N, M) - 0.5).cumsum(axis=0)[N // 2:, :]\n datas.append(data)\n\n# Same plot with different sharing and spanning settings\nfor i, share in enumerate((False, 'labels', 'limits', True)):\n fig = pplt.figure(refaspect=1, refwidth=1.06, sharey=share, spanx=i // 2)\n axs = fig.subplots(ncols=4)\n for ax, data in zip(axs, datas):\n on = ('off', 'on')[i // 2]\n ax.plot(data, cycle=cycle)\n ax.format(\n suptitle=f'Sharing mode {share!r} (level {i}) with spanning labels {on}',\n grid=False, xlabel='spanning axis', ylabel='shared axis'\n )\n\n# %%\nimport proplot as pplt\nimport numpy as np\npplt.rc.reset()\npplt.rc.cycle = 'Set3'\nstate = np.random.RandomState(51423)\n\n# Same plot with and without default sharing settings\ntitles = ('With redundant labels', 'Without redundant labels')\nfor b in (False, True):\n fig = pplt.figure(refwidth=1, share=b, span=b)\n axs = fig.subplots(nrows=4, ncols=4)\n for ax in axs:\n ax.plot((state.rand(100, 20) - 0.4).cumsum(axis=0))\n axs.format(\n abc=True, abcloc='ul', suptitle=titles[b],\n xlabel='xlabel', ylabel='ylabel',\n grid=False, xticks=25, yticks=5\n )\n\n\n# %% [raw] raw_mimetype=\"text/restructuredtext\"\n# .. _ug_units:\n#\n# Physical units\n# --------------\n#\n# ProPlot supports arbitrary physical units for controlling the figure\n# `figwidth` and `figheight`, the reference subplot `refwidth` and `refheight`,\n# the gridspec spacing and tight layout padding values `left`, `right`, `bottom`,\n# `top`, `wspace`, `hspace`, `outerpad`, `innerpad`, `panelpad`, `wpad`, and `hpad`,\n# the `~proplot.axes.Axes.panel_axes` and `~proplot.axes.Axes.colorbar` widths,\n# and all applicable `~proplot.config.rc` settings (e.g., settings controlling\n# legend spacing, label padding, and font size). This feature is powered by the\n# `~proplot.utils.units` function.\n#\n# A table of acceptable physical units is found :ref:`here `.\n# They include centimeters, millimeters, pixels,\n# `em-heights `__,\n# `en-heights `__,\n# and `points `__.\n# The default physical unit (assumed when an argument is numeric) depends on the\n# context. For legend and gridspec spaces, it is em-widths. For subplot and\n# figure sizes, it is inches. For text padding and font sizes, it is points. See\n# the relevant documentation in the :ref:`API reference ` for details.\n\n# %%\nimport proplot as pplt\nimport numpy as np\nwith pplt.rc.context(fontsize='12px'):\n fig, axs = pplt.subplots(\n ncols=3, figwidth='15cm', figheight='3in',\n wspace=('10pt', '20pt'), right='10mm',\n )\n cmap = pplt.Colormap('Mono')\n cb = fig.colorbar(\n cmap, loc='b', extend='both', label='colorbar',\n width='2em', extendsize='3em', shrink=0.8,\n )\n pax = axs[2].panel_axes('r', width='5en')\naxs.format(\n suptitle='Arguments with arbitrary units',\n xlabel='x axis', ylabel='y axis',\n xlim=(0, 1), ylim=(0, 1),\n)\n","sub_path":"docs/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":18202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"381907472","text":"import random\nHANGMAN_PICS = ['''我的状态:很高兴''', '''我的状态:高兴''','''我的状态:低落''','''我的状态:恐慌''','''我的状态:死亡''']\n\nwords = \"today happy teacher wonderful school holiday homework coffee \".split()\n\ndef getRandomWord(wordList):\n wordIndex = random.randint(0, len(words) - 1)\n return wordList[wordIndex]\n\ndef displayBoard(missedLetters, correctLetters, secretWord):\n print(HANGMAN_PICS[len(missedLetters)])\n print()\n\n print(\"尝试过的字符:\", end= \"\")\n\n for letter in missedLetters:\n print(letter, end = \" \")\n print()\n\n blanks = \"0\" * len(secretWord)\n\n for i in range(len(secretWord)):\n if secretWord[i] in correctLetters:\n blanks = blanks[:i] + secretWord[i] + blanks[i+1:]\n\n for letter in blanks:\n print(letter, end=\" \")\n print()\n\ndef getGuess(alreadyGuessed):\n while True:\n print(\"猜一个字母吧。\")\n guess = input()\n guess = guess.lower()\n if len(guess) != 1:\n print(\"只输入一个字符。\")\n elif guess in alreadyGuessed:\n print(\"你已经猜过了这个字符。\")\n elif guess not in 'abcdefghijklmnopqrstuvwxyz':\n print(\"请只输入字母。\")\n else:\n return guess\n\ndef playAgain():\n print(\"再玩一次?(y/n)\")\n return input().lower().startswith('y')\n\n\nprint(\"恐怖字符游戏\")\nmissedLetters = \"\"\ncorrectLetters = \"\"\nsecretWord = getRandomWord(words)\ngameIsDone = False\n\nwhile True:\n displayBoard(missedLetters, correctLetters, secretWord)\n\n guess = getGuess(missedLetters + correctLetters)\n\n if guess in secretWord:\n correctLetters = correctLetters + guess\n\n foundAllLetters = True\n for i in range(len(secretWord)):\n if secretWord[i] not in correctLetters:\n foundAllLetters = False\n break\n if foundAllLetters:\n print(f\"恭喜你,你已经找到了 '{secretWord}' 这个秘密了!\")\n gameIsDone = True\n else:\n missedLetters = missedLetters + guess\n\n if len(missedLetters) == len(HANGMAN_PICS) -1:\n displayBoard(missedLetters, correctLetters, secretWord)\n print(\"你的竞猜已经完毕!\\n经过 \" + str(len(missedLetters)) + ' 次竞猜有 ' + str(len(correctLetters)) + ' 次才对, 答案是 \"' + secretWord + '\"')\n gameIsDone = True\n\n if gameIsDone:\n if playAgain():\n missedLetters = \"\"\n correctLetters = \"\"\n gameIsDone = False\n secretWord = getRandomWord(words)\n else:\n break\n","sub_path":"new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"301638698","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tasks', '0013_calendarsettings'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='calendarsettings',\n options={'verbose_name_plural': 'Calendar settings'},\n ),\n migrations.AlterModelOptions(\n name='recurringtasktemplate',\n options={'ordering': ['short_desc', '-sunday', '-monday', '-tuesday', '-wednesday', '-thursday', '-friday', '-saturday']},\n ),\n migrations.AddField(\n model_name='task',\n name='status',\n field=models.CharField(default='W', choices=[('W', 'Workable'), ('R', 'Reviewable'), ('D', 'Done'), ('C', 'Canceled')], help_text='The status of this task.', max_length=1),\n ),\n ]\n","sub_path":"tasks/migrations/0014_auto_20151002_1646.py","file_name":"0014_auto_20151002_1646.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"645970583","text":"from django.contrib import admin\nfrom .models import Question, Choice\n\nclass QuestionAdmin(admin.ModelAdmin):\n fieldsets = [\n (None, {'fields' : ['question_text']}),\n ('Date information', {'fields' : ['pud_date']}),\n ]\n # fields = ['pud_date','question_text']\n\n\nadmin.site.register(Question)\nadmin.site.register(Choice)\n\n","sub_path":"polls/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"423073745","text":"import timeit\n\nfrom tensorflow.keras.datasets import cifar10\n\nimport autokeras as ak\n\n\ndef main():\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n clf = ak.ImageClassifier(max_trials=10,\n directory='tmp_dir',\n overwrite=True)\n\n start_time = timeit.default_timer()\n clf.fit(x_train, y_train)\n stop_time = timeit.default_timer()\n\n accuracy = clf.evaluate(x_test, y_test)[1]\n print('Accuracy: {accuracy}%'.format(accuracy=round(accuracy * 100, 2)))\n print('Total time: {time} seconds.'.format(time=round(stop_time - start_time, 2)))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"benchmark/cifar10.py","file_name":"cifar10.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"519406632","text":"from swyf.x import x\nfrom swyf.x.window_manager import WindowManager\nfrom swyf.core.dock import Dock\nfrom swyf.core import gtk\nfrom swyf.err import SwyfFatalError, SwyfStop\nfrom swyf.util.log import Logger\nimport os\nfrom time import sleep\n\nlog = Logger('main')\n\nlog.tmi('Environment:')\nfor i in os.environ.items():\n log.tmi('\\t%s : %s' % i)\nlog.inf('\\n\\n\\t*** s w y f ***\\n\\n')\n\nif __name__ == '__main__':\n\n wm = WindowManager()\n dock = Dock()\n\n try:\n x.begin()\n wm.begin()\n dock.begin()\n\n # Main loop\n log.inf('Entering main loop.')\n while True:\n wm.tick()\n gtk.tick()\n\n sleep(0.001)\n\n except SwyfStop:\n log.inf(\"Stopping...\")\n\n except SwyfFatalError:\n log.err('Fatal error.')\n\n except KeyboardInterrupt:\n print()\n log.war('Keyboard Interrupt.')\n\n except Exception as ex:\n log.trace(ex)\n log.err('Crashed!')\n\n finally:\n dock.end()\n wm.end()\n x.end()\n log.inf('Bye!')\n","sub_path":"swyf/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"421321280","text":"skill = []\nskill1 = {\n 'Name': 'Tackle',\n 'Min level': 1,\n 'Damage': 5,\n 'Hit rate': 0.3\n}\n\nskill2 = {\n 'Name': 'Quick attack',\n 'Min level': 2,\n 'Damage': 3,\n 'Hit rate': 0.5\n}\n\nskill3 = {\n 'Name': 'Strong kick',\n 'Min level': 4,\n 'Damage': 7,\n 'Hit rate': 0.3\n}\n\nskill.append(skill1)\nskill.append(skill2)\nskill.append(skill3)\ni=1\nfor k in skill:\n print(\"Skill\", i)\n print(k['Name'])\n i+=1\n\ncharacter = {\n 'Name' : 'Tackle',\n 'Age': 17,\n 'Strength' : 8,\n 'Defense': 10,\n 'HP': 100,\n 'Backpack': [\"shield\", 'bread loaf'], \n 'Gold': 100,\n 'level': 2\n}\nloop = True\nwhile loop:\n try:\n n = int(input(\"Choose skill: \"))\n if n == 1:\n if skill1['Min level'] <= character['level']:\n print(\"Skill damage: \", skill1['Damage'])\n else:\n print(\"No permission\")\n if n == 2:\n if skill2['Min level'] <= character['level']:\n print(\"Skill damage: \", skill2['Damage'])\n else:\n print(\"No permission\")\n if n ==3:\n if skill3['Min level'] <= character['level']:\n print(\"Skill damage: \", skill3['Damage'])\n else:\n print(\"No permission\")\n if n >=4:\n print(\"Dont have that skill\")\n except ValueError:\n pass\n \n\n\n\n\n\n","sub_path":"mini_hack/ex27.py","file_name":"ex27.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"426764886","text":"import os, sys, time, getopt\nos.environ['CORAL_MSGFORMAT']='ATLAS'\nos.environ['CORAL_CONNECTIONPOOL_VERBOSE']='1'\n\nfrom PyCoralTest import validateBackends\n[urlRW,urlRO] = validateBackends( \"oracle:oracle\" )\n\ndef usage():\n print('Usage:', sys.argv[0], '[-c ] [-p = 0)>] [-t = 0)>]')\n sys.exit(1)\n\ntry:\n opts, args = getopt.getopt( sys.argv[1:], \"hc:p:t:\", [\"help\", \"cleanup=\", \"period=\",\"timeout=\"] )\nexcept getopt.GetoptError:\n usage()\n\nc=None\np=None\nt=None\nfor opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n usage()\n elif opt in (\"-c\", \"--cleanup\"):\n if arg == 'T' or arg == 'True': c=True \n elif arg == 'F' or arg == 'False': c=False\n else: usage()\n elif opt in (\"-p\", \"--period\"):\n try: p = int(arg) \n except: usage()\n if p<0: usage()\n elif opt in (\"-t\", \"--timeout\"):\n try: t = int(arg) \n except: usage()\n if t<0: usage()\n\nif p is not None:\n print('Set period to: ', p)\n os.environ['CORAL_CONNECTIONPOOL_CLEANUPPERIOD']=str(p)\nelse:\n print('Set period to: DEFAULT')\n # NB: This must be set _before_ retrieving ConnectionService.configuration()\n os.environ['CORAL_CONNECTIONPOOL_CLEANUPPERIOD']='10' # easier...\n\nimport coral\nsvc=coral.ConnectionService()\ncfg=svc.configuration()\n\nif c is not None:\n print('Set cleanup to:', c)\n if c: cfg.enablePoolAutomaticCleanUp()\n else: cfg.disablePoolAutomaticCleanUp()\nelse:\n print('Set cleanup to: DEFAULT')\n\nif t is not None:\n print('Set timeout to:', t)\n cfg.setConnectionTimeOut(t)\nelse:\n print('Set timeout to: DEFAULT')\n\nprint('Timeout:', cfg.connectionTimeOut())\nprint('Period: ', os.environ['CORAL_CONNECTIONPOOL_CLEANUPPERIOD'])\nprint('Cleanup:', cfg.isPoolAutomaticCleanUpEnabled())\n\n###sys.exit(0)\n\n# === WARNING #1: calling disablePoolAutomaticCleanUp() has no effect if the\n# cleanup thread has already started, i.e. after the first call to connect()\n\n# === WARNING #2: connection timeout and pool cleanup period are different;\n# the latter is set by CORAL_CONNECTIONPOOL_CLEANUPPERIOD (CORALCOOL-847)\n\nprint()\nprint('== Connect')\nses=svc.connect(urlRW)\nprint()\nprint('== Disconnect')\nses=0\nprint()\nfor i in range(0, 5):\n print('== Sleep 1 seconds (', i+1, 'of 5 )')\n time.sleep(1)\n\nif not cfg.isPoolAutomaticCleanUpEnabled():\n print()\n print('== Enable pool automatic cleanup')\n cfg.enablePoolAutomaticCleanUp()\nelse:\n print()\n print('== Disable pool automatic cleanup (WARNING: NO EFFECT!)')\n cfg.disablePoolAutomaticCleanUp()\n\nprint()\nprint('== Connect')\nses=svc.connect(urlRW)\nprint()\nprint('== Disconnect')\nses=0\nprint()\nfor i in range(0, 5):\n print('== Sleep 1 seconds (', i+1, 'of 5 )')\n time.sleep(1)\n\nprint()\nprint('== Exit')\n\n","sub_path":"PyCoral/tests/Python3/test_autoCleanup_coralcool948.py","file_name":"test_autoCleanup_coralcool948.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"615981398","text":"import paramiko, os\nfrom os import path, access, R_OK\n\n\nclass sftpconn(object):\n\n\trsa_private_key = r'/path/to/your/rsa.key'\n\n\tdef __init__(self, logfile, username, password, host, port, ssh_key):\n\t\tparamiko.util.log_to_file(logfile)\n\t\t\n\t\tprint ('Establishing SSH connection to:', host, port, '...')\n\t\tself.transport = paramiko.Transport((host, int(port)))\n\n\t\tif ssh_key == True:\n\t\t\tsshkey = paramiko.RSAKey.from_private_key_file(self.rsa_private_key)\n\t\t\tself.transport.connect(username = username, pkey = sshkey)\n\n\t\telse:\t\n\t\t\tself.transport.connect(username = username, password = password)\n\n\t\tself.sftp = paramiko.SFTPClient.from_transport(self.transport)\n\t\t\t\n\t# check if the file exists\n\tdef check_file(self, PATH):\n\t\tif path.exists(PATH) and path.isfile(PATH) and access(PATH, R_OK):\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn 1\n\n\t# this function will allow the use of wildcards in between underscores, eg: file_*_name.txt\n\tdef is_match(self, a, b):\n\t\taa = a.split('_')\n\t\tbb = b.split('_')\n\t\tif len(aa) != len(bb): return False\n\t\tfor x, y in zip(aa, bb):\n\t\t\tif not (x == y or x == '*' or y == '*'): return False\n\t\treturn True\n\n\t\n\tdef get(self, file_formats, local_dir, local_base_dir, remote_dir):\n\t\tfiles_copied = 0\n\t\terrors = 0\n\t\tsummary = ''\n\t\tactual_files = []\n\t\tremote_files = []\n\t\tprint ('local_dir:', local_dir)\n\t\tprint ('local_base_dir:', local_base_dir)\n\t\ttry:\n\t\t\tfor f in self.sftp.listdir(remote_dir):\n\t\t\t\tremote_files.append(f)\n\n\t\t\tdifference = list(set(remote_files).difference(file_formats))\n\t\t\t# print(difference)\n\t\t\t# for file_format in file_formats:\n\t\t\tfor f in difference:\n\t\t\t\tactual_files.append(remote_dir+f)\n\n\t\t\tfor actual_file in actual_files:\n\t\t\t\tprint ('actual_file:', actual_file)\n\t\t\t\tbase_file = actual_file.replace(remote_dir, '')\n\t\t\t\tself.sftp.get(actual_file, local_dir + base_file)\n\t\t\t\tself.sftp.get(actual_file, local_base_dir + base_file)\n\n\t\t\t\tfiles_copied += 1\n\t\t\t\tsummary += \"[Copied] \" + local_dir + base_file + '\\n'\n\n\t\t\tif errors > 0 or files_copied == 0:\n\t\t\t\tprint ('summary:', summary)\n\t\t\t\tif files_copied == 0:\n\t\t\t\t\treturn [summary, \"No files available for transfer.\", 'No Files']\n\t\t\t\telse:\n\t\t\t\t\treturn [summary, \"This transaction failed with \"+ str(errors) +\" error/s:\\n\\n\" + summary, 'Failed']\n\t\t\telse:\n\t\t\t\treturn [summary, \"Total file/s copied: %s. Summary: %s\" % (str(files_copied), summary), 'Success']\n\n\t\texcept Exception as e:\n\t\t\t\tprint ('exception:', e)\n\t\t\t\treturn [summary, \"Error while copying file : \" + str(e), 'Failed']\n\n\tdef chdir(self, dir):\n \t\tself.sftp.chdir(dir)\n\n\tdef ls(self, remote):\n\t\treturn self.sftp.listdir(remote)\n\n\tdef close(self):\n\t\tif self.transport.is_active():\n\t\t\tself.sftp.close()\n\t\t\tself.transport.close()\n\n\tdef __enter__(self):\n\t\treturn self\n\n\tdef __exit__(self, type, value, tb):\n\t\tself.close()\n\n\t# def mput(self, local, remote):\n\t# \tfiles_copied = 0\n\t# \tsummary = ''\n\t# \ttry:\n\t# \t\tfor root, dirs, files in os.walk(local):\n\t# \t\t\tprint files\n\t# \t\t\tfor name in sorted(files):\n\t# \t\t\t\tfilename = os.path.join(root, name)\n\t# \t\t\t\tself.sftp.put(filename, remote + name)\n\t# \t\t\t\tfiles_copied += 1\n\t# \t\t\t\tsummary = summary + \"Copied: \" + remote + name + \"\\n\"\n\t# \t\treturn [summary, \"Total file/s copied: \" + str(files_copied), 'Success']\n\t# \texcept Exception, e:\n\t# \t\treturn [summary, \"Error: \" + str(e), 'Failed']\n\n\tdef mget(self, lfile, local, remote):\n\t\tfiles_copied = 0\n\t\tsummary = ''\n\t\ttry:\n\n\t\t\tfor f in self.sftp.listdir(remote):\n\t\t\t\tprint (f)\n\t\t\t\tself.sftp.get(remote+f, local+f)\n\t\t\t\tfiles_copied += 1\n\t\t\t\tsummary = summary + \"Copied: \" + remote + f + \"\\n\"\n\t\t\tprint (summary)\n\t\t\treturn [summary, \"Total file/s copied: \" + str(files_copied), 'Success']\n\t\texcept Exception as e:\n\t\t\tprint (e)\n\t\t\treturn [summary, \"Error: \" + str(e), 'Failed']\n\n\n","sub_path":"sftpconn.py","file_name":"sftpconn.py","file_ext":"py","file_size_in_byte":3706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"189689821","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import fetch_olivetti_faces\n\nfaces = fetch_olivetti_faces()\nfig = plt.figure(figsize=(6,6)) # figure size in inches\nfig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)\n\n# plotting the faces\nfor i in range(64):\n ax = fig.add_subplot(8,8,i+1,xticks=[],yticks=[]) \n ax.imshow(faces.images[i], cmap=plt.cm.bone, interpolation='nearest')\nfig.show()","sub_path":"Part2: Representation of Data for Machine Learning/olivetti_faces.py","file_name":"olivetti_faces.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"480127307","text":"# Title: selection_sort.py\r\n# Author: J. Mayeux - pyRN\r\n# Date: 8-2-17\r\n\r\nimport time\r\n\r\n\r\ndef selection_sort(array, amount):\r\n print('*' * 50)\r\n print('Selection Sort'.center(50))\r\n print('*' * 50, '\\n')\r\n print('Unsorted', amount, 'item array--->', array, '\\n')\r\n start = time.time()\r\n\r\n for x in range(0, amount):\r\n low = x\r\n sort = x\r\n for y in range(sort, amount):\r\n if array[sort] <= array[y] <= array[low]:\r\n low = y\r\n sort = sort + 1\r\n if sort > amount:\r\n break\r\n array[x], array[low] = array[low], array[x]\r\n finish = time.time()\r\n print('Sorted', amount, 'item array--->', array, '\\n')\r\n print('Sorting took', finish - start, '\\n')\r\n","sub_path":"selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"265601284","text":"# !/usr/bin/python3\n# coding:utf-8 \n# Author : mahua\n# Email : lihh3721@gmail.com\n# Time : 2019/4/21 12:38 AM\n# FileName : test_addProject.py\n\nimport unittest\nfrom API_prac.common.http_request import HttpRequest_session\nfrom API_prac.common import rwExcel\nfrom API_prac.common import contants\nfrom ddt import ddt,data\nfrom API_prac.common.config import config\nfrom API_prac.common import context\n\n@ddt\nclass AddProjectTest(unittest.TestCase):\n excel = rwExcel.RWExcel(contants.case_file, 'add')\n cases = excel.readExcel()\n @classmethod\n def setUpClass(cls):#setUp:每个执行之前都要实例化一次,改成类方法setUpClass后就只需要在所有的执行之前实例化一次\n cls.http_request = HttpRequest_session()\n\n @data(*cases)\n def test_addProject(self,case):\n #在请求之前替换参数化的值\n case.data = context.replace(case.data)\n resp = self.http_request.http_request(case.method,case.url,case.data)\n try:\n self.assertEqual(case.expected,resp.text)\n self.excel.writeExcel(case.case_id+1,resp.text,'PASS')\n except AssertionError as e:\n self.excel.writeExcel(case.case_id+1,resp.text,'FAIL')\n raise e\n @classmethod\n def tearDownClass(cls):\n cls.http_request.close()","sub_path":"API_prac/testcases/test_addProject.py","file_name":"test_addProject.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"164105519","text":"# coding=utf-8\nimport sys\n# read .pyv file by console\ndirectory = './PYWs/'\ndestination_directory = './ATs/'\nfr = open(directory + sys.argv[1],'r')\nname_extension = sys.argv[1].split('.')\nfile_name = name_extension[0] + '.sent'\nfw = open(destination_directory + file_name, 'w')\n# load file\nparagraphList = list(fr)\n# deal with each line\nfor paragraph in paragraphList:\n words = paragraph.split()\n fw.write(' '.join(words))\n fw.write(' ')\nprint('---------------------------------------')\nprint('Executing...\\n')\nprint('Congratuation, new file ' + file_name + ' has been generated.')\nprint('---------------------------------------')\nfr.close()\nfw.close()\n","sub_path":"pinyinWord2Sentence.py","file_name":"pinyinWord2Sentence.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"352698978","text":"from flask import render_template, request, flash, redirect, url_for\nfrom . import admin\nfrom .forms import LoginForm, RegistrationForm, PostFactForm, ImportCSVFileForm\nfrom ..models import db, User, Post, AdditionalFact, TagButton\nfrom flask_login import current_user, login_user, login_required, logout_user\nfrom werkzeug.urls import url_parse\nimport csv\nimport io\nimport giphy_client\nfrom giphy_client.rest import ApiException\nimport requests\n\n\ndef find_additional_fact_index(original_fact=AdditionalFact, updated_facts=[AdditionalFact]):\n found_fact_index = -1\n for i, uf in enumerate(updated_facts):\n if uf[\"id\"] == original_fact.id:\n found_fact_index = i\n return found_fact_index\n\ndef find_tag_button_index(original_button=TagButton, updated_buttons=[TagButton]):\n found_button_index = -1\n for i, ub in enumerate(updated_buttons):\n if ub[\"id\"] == original_button.id:\n found_button_index = i\n return found_button_index\n\n\n@admin.route('/')\n@admin.route('/index')\n@login_required\ndef index():\n default_image_url = \"https://vignette.wikia.nocookie.net/justdance/images/8/8b/Alyssa_edwards_BYF_judging.gif\"\n image_url = default_image_url\n\n api_instance = giphy_client.DefaultApi()\n api_key = 'dc6zaTOxFJmzC'\n tag = 'lgbtq'\n\n try:\n api_response = api_instance.gifs_random_get(api_key, tag=tag)\n image_url = api_response.data.image_url\n except ApiException as e:\n print(\"Exception when calling DefaultApi->gifs_random_get: %s\\n\" % e)\n\n return render_template('index.html', image_url=image_url)\n\n\n@admin.route('/fact/')\n@login_required\ndef preview_fact(fact_id):\n fact = Post.query.filter_by(id=fact_id).first()\n if fact is None:\n flash('Fact not found')\n return render_template('fact.html', fact=fact)\n\n\n@admin.route('/fact/delete/')\n@login_required\ndef delete_fact(fact_id):\n fact = Post.query.filter_by(id=fact_id).first()\n if fact is None:\n flash('Fact not found')\n db.session.delete(fact)\n db.session.commit()\n return redirect(url_for('.facts'))\n\n\n@admin.route('/fact/edit/', methods=['GET', 'POST'])\n@login_required\ndef edit_fact(fact_id):\n fact = Post.query.filter_by(id=fact_id).first()\n if fact is None:\n flash('Fact not found')\n\n form = PostFactForm(obj=fact)\n form.submit.label.text = \"Save changes\"\n\n if form.validate_on_submit():\n fact.header = form.header.data\n fact.title = form.title.data\n fact.title_url = form.title_url.data\n fact.image_url = form.image_url.data\n fact.body = form.body.data\n\n original_additional_facts = AdditionalFact.query.filter_by(post_id=fact.id).all()\n updated_additional_facts = form.additional_facts.data\n\n # Remove/update old additional facts\n for of in original_additional_facts:\n db.session.delete(of)\n\n # Add new additional facts\n for af in updated_additional_facts:\n additionalFact = AdditionalFact(post_id=fact.id, title=af['title'], text=af['text'], is_long=af['is_long'])\n db.session.add(additionalFact)\n\n original_tag_buttons = TagButton.query.filter_by(post_id=fact.id).all()\n updated_tag_buttons = form.tag_buttons.data\n\n # Remove/update old tag buttons\n for ob in original_tag_buttons:\n db.session.delete(ob)\n\n # Add new tag buttons\n for tag in updated_tag_buttons:\n tagButton = TagButton(post_id=fact.id, title=tag['title'], url=tag['url'])\n db.session.add(tagButton)\n\n db.session.commit()\n return redirect(url_for('.preview_fact', fact_id=fact.id))\n\n else:\n flash(form.errors)\n\n return render_template('post_fact.html', title='Post an LGBTQ Fact', form=form)\n\n\n@admin.route('/fact/reset/')\n@login_required\ndef reset_fact(fact_id):\n fact = Post.query.filter_by(id=fact_id).first()\n if fact is None:\n flash('Fact not found')\n\n fact.shown = False\n db.session.commit()\n return redirect(url_for('.facts'))\n\n\n@admin.route('/facts')\n@login_required\ndef facts():\n facts = Post.query.all()\n return render_template('facts.html', title=\"🌈 aquabot | LGBTQ Pride Facts\", facts=facts)\n\n\n@admin.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('.index'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user is None or not user.check_password(form.password.data):\n flash('Invalid username or password')\n return redirect(url_for('.login'))\n login_user(user, remember=form.remember_me.data)\n next_page = request.args.get(\"next\")\n if not next_page or url_parse(next_page).netloc != '':\n next_page = url_for('.index')\n return redirect(next_page)\n return render_template('login.html', title='Sign In', form=form)\n\n\n@admin.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('.index'))\n form = RegistrationForm()\n if request.method == 'POST' and form.validate_on_submit():\n user = User(username=form.username.data, email=form.email.data)\n user.set_password(form.password.data)\n db.session.add(user)\n db.session.commit()\n flash('Congratulations, you are now a registered user!')\n return redirect(url_for('.login'))\n return render_template('register.html', title='Register', form=form)\n\n\n@admin.route('/post_fact', methods=['GET', 'POST'])\n@login_required\ndef post_fact():\n form = PostFactForm()\n\n if form.validate_on_submit():\n post = Post(user_id=current_user.id,\n header=form.header.data,\n title=form.title.data,\n title_url=form.title_url.data,\n image_url=form.image_url.data,\n body=form.body.data)\n db.session.add(post)\n db.session.flush()\n\n for fact in form.additional_facts.data:\n additionalFact = AdditionalFact(post_id=post.id,\n title=fact['title'],\n text=fact['text'],\n is_long=fact['is_long'])\n db.session.add(additionalFact)\n\n for tag in form.tag_buttons.data:\n tagButton = TagButton(post_id=post.id, title=tag['title'], url=tag['url'])\n db.session.add(tagButton)\n\n\n db.session.commit()\n return redirect(url_for('.facts'))\n\n else:\n flash(form.errors)\n\n return render_template('post_fact.html', title='Post an LGBTQ Fact', form=form)\n\n\n@admin.route('/import_csv', methods=['GET', 'POST'])\n@login_required\ndef import_csv():\n form = ImportCSVFileForm()\n\n if form.validate_on_submit():\n additional_fact_count = 3\n tag_button_count = 3\n\n file = request.files[form.csv_file.name]\n stream = io.StringIO(file.stream.read().decode(\"UTF8\"), newline=None)\n csv_input = csv.DictReader(stream)\n\n for row in csv_input:\n if row['completed'] == 'TRUE':\n post = Post(user_id=current_user.id,\n header=row['header'],\n title=row['title'],\n title_url=row['title_url'],\n image_url=row['image_url'],\n body=row['body'])\n db.session.add(post)\n db.session.flush()\n\n for index in range(1, additional_fact_count + 1):\n if row['fact_title_' + str(index)] != None:\n additionalFact = AdditionalFact(post_id=post.id,\n title=row['fact_title_' + str(index)],\n text=row['fact_text_' + str(index)])\n db.session.add(additionalFact)\n\n for index in range(1, tag_button_count + 1):\n if row['button_title_' + str(index)] != None:\n tagButton = TagButton(post_id=post.id,\n title=row['button_title_' + str(index)],\n url=row['button_url_' + str(index)])\n db.session.add(tagButton)\n\n db.session.commit()\n\n return redirect(url_for('.facts'))\n\n else:\n flash(form.errors)\n\n return render_template('import_csv.html', title='aquabot | CSV Import', form=form)\n\n@admin.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('.index'))\n","sub_path":"aquabot/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"262924429","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.main),\n url(r'^address/', views.address),\n url(r'^cart/', views.cart),\n url(r'^catalog/', views.catalog),\n url(r'^contacts/', views.contacts),\n url(r'^payments/', views.payments),\n url(r'^product/', views.product),\n url(r'^search/', views.catalog),\n url(r'^srresnot', views.srresnot),\n url(r'^tmi/', views.tmi),\n url(r'^tml/', views.tml),\n url(r'^profile/', views.profile),\n url(r'^changeprofile/', views.changeprofile),\n url(r'^addtocart/', views.addtocart),\n url(r'^islogin/', views.islogin),\n url(r'^splitsearch/', views.splitsearch),\n]\n","sub_path":"Yabloko_shop/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"135243665","text":"from HTMLParser import HTMLParser, HTMLParseError\n\nclass ScoreParser(HTMLParser):\n def __init__(self, file):\n HTMLParser.__init__(self);\n self.inGame = False;\n self.inTeam1 = False;\n self.inTeam2 = False;\n self.inScoreTeam1 = False;\n self.inScoreTeam2 = False;\n self.inScore = False;\n self.inLiveGame = False;\n self.inPreGame = False;\n self.f = file;\n self.inEm = False;\n\n def handle_starttag(self, tag, attrs):\n if (tag == \"tr\"):\n for attr in attrs:\n if (attr[0] == \"class\" and len(attr) > 1):\n if (attr[1].find(\"game link\") != -1):\n self.inGame = True;\n if (attr[1].find(\"game live link\") != -1):\n self.inLiveGame = True;\n if (attr[1].find(\"game pre link\") != -1):\n self.inPreGame = True;\n\n if (tag == \"td\" and (self.inGame or self.inLiveGame or self.inPreGame)):\n for attr in attrs:\n if (attr[0] == \"class\" and len(attr) > 1):\n if (attr[1].find(\"away\") != -1):\n self.inTeam1 = True;\n if (attr[1].find(\"home\") != -1):\n self.inTeam2 = True;\n if (attr[1].find(\"score\") != -1):\n self.inScore = True;\n\n if (tag == \"span\" and (self.inGame or self.inLiveGame or self.inPreGame) and self.inScore):\n for attr in attrs:\n if (attr[0] == \"class\" and len(attr) > 1):\n if (attr[1].find(\"away\") != -1):\n self.inScoreTeam1 = True;\n if (attr[1].find(\"home\") != -1):\n self.inScoreTeam2 = True;\n\n if (tag == \"em\" and (self.inTeam1 or self.inTeam2)):\n self.inEm = True;\n\n def handle_endtag(self, tag):\n if (tag == \"tr\"):\n self.inGame = False;\n self.inLiveGame = False;\n self.inPreGame = False;\n if (tag == \"td\"):\n self.inTeam1 = False;\n self.inTeam2 = False;\n self.inScore = False;\n if (tag == \"em\"):\n self.inEm = False;\n if (tag == \"span\"):\n self.inScoreTeam1 = False;\n self.inScoreTeam2 = False;\n\n def handle_data(self, data):\n data = data.strip();\n if (self.inGame or self.inLiveGame or self.inPreGame):\n if self.inTeam1 and self.inEm:\n self.f.write(data + \";\");\n if self.inTeam2 and self.inEm:\n self.f.write(data + \";\");\n if (self.inLiveGame):\n self.f.write(\"live//\");\n elif (self.inGame):\n self.f.write(\"completed//\");\n elif (self.inPreGame):\n self.f.write(\"pregame//\");\n if self.inScoreTeam1:\n self.f.write(data + \";\");\n if self.inScoreTeam2:\n self.f.write(data + \";\");\n","sub_path":"htmlParser.py","file_name":"htmlParser.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"365197406","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 25 10:53:42 2018\r\n\r\n@author: CAZ2BJ\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport sys\r\nsys.path.append('U:/!Python')\r\nimport os\r\nimport pandas as pd\r\nimport functions_io as fio\r\nimport functions_csv as fcsv\r\nimport functions_plot as fplot\r\nimport functions_excel as fexcel\r\nimport functions_data_processing as fdp\r\n\r\ncwd = fio.get_script_dir(__file__) \r\nhydra_name = \"hydra_after_test\"\r\nresults_dir = 'results'\r\nincluded_dirs_keywords = ['samples']\r\nexcluded_dirs_keywords = ['~', 'backup']\r\n\r\n\r\n\r\n# creating dir structure for results\r\nos.makedirs('{}/{}'.format(cwd, results_dir), exist_ok=True)\r\n\r\n\r\nabs_paths = fio.get_files(cwd, extension = ['xlsm'], contains = included_dirs_keywords, not_contains = excluded_dirs_keywords, print_path=False)\r\nroot_dirs = fio.get_parts_of_paths_list(abs_paths, -3)\r\nsample_dirs = fio.get_parts_of_paths_list(abs_paths, -2)\r\nfilenames = fio.get_parts_of_paths_list(abs_paths, -1)\r\n# hydra file\r\ntry:\r\n hydra_abs_path = fio.get_files(cwd, extension = ['xlsx'], contains = [hydra_name], not_contains = ['~'], print_path=False)\r\n hydra_frame = pd.read_excel(hydra_abs_path[0]) \r\nexcept Exception: \r\n print('{}{}{}'.format('file: ', hydra_name, ' not found'))\r\n input(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\r\n raise\r\ntry: \r\n hydra_frame = hydra_frame[['Ident No.', '066_Diff_BMP_VIS_P_Mess_6_0bar_MP4_Sp1', '006_BerechnetesHubvolumen_6_0bar_3_Sp1']] \r\nexcept Exception: \r\n print('{}{}{}'.format('required column in file :', hydra_name, ' missing'))\r\n input(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\r\n raise\r\n \r\nhydra_samples_sorted = sorted(list(hydra_frame['Ident No.'])) \r\nsamples_sorted = sorted(sample_dirs)\r\n\r\n# controll for equality between samples and hydra samples \r\n \r\nif len(samples_sorted) != len(hydra_samples_sorted): \r\n print('{}'.format('not the same number of samples in data and hydra'))\r\n input(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") \r\n raise \r\nfor sam, ple in zip(samples_sorted, hydra_samples_sorted):\r\n if sam == ple:\r\n pass\r\n else:\r\n print('{}'.format('files not equal'))\r\n input(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\") \r\n raise\r\nprint('All data files found and valid') \r\ninput(\"Press ENTER to continue..\") \r\n\r\n\r\n\r\nc_map = fplot.C_map()\r\nc_map\r\n\r\n\r\nvolumetric_constant = 31.75\r\n\r\n\r\nsamples = sorted([ a for a in hydra_frame['Ident No.'].unique()])\r\n\r\nprint('sample', ' ' , 'volumetric', ' ' , 'p_diff')\r\nfor sample in samples:\r\n p_diff = hydra_frame.loc[(hydra_frame['Ident No.'] == sample),['066_Diff_BMP_VIS_P_Mess_6_0bar_MP4_Sp1']].values[0]\r\n volumetric = hydra_frame.loc[(hydra_frame['Ident No.'] == sample),['006_BerechnetesHubvolumen_6_0bar_3_Sp1']].values[0] \r\n plt.plot( volumetric/volumetric_constant -1 , p_diff , color = c_map.get_color(False), marker = 'o', markeredgecolor = 'k')\r\n fplot.add_label(sample, c_map.get_color(True),0, '-', 'o' )\r\n \r\n print(sample, ' ' , volumetric/volumetric_constant -1, ' ' , p_diff)\r\n\r\n\r\nx = np.linspace(-0.08,0.08,20000)\r\n\r\nover_200_ppm = 6372549.02 * x**6\t-800150.8296 * x**5 + 19541.8552 * x**4 + 562.4057315 * x**3 -18.20980735 * x**2 -9.135497395 * x**1 + 1.300099753 * x**0\r\nover_10_ppm = 4017242.862 * x**6 -438603.9335 * x**5\t-8783.299926 * x**4 +\t1736.291492 * x**3 + 8.192154187 * x**2\t-10.47552085 * x**1 + 0.990573908 * x**0\r\n\r\nunder_200_ppm = -59264.74327 * x**6\t-4554.65587\t* x**5 + 335.2822676 * x**4 + 57.22096531 * x**3 + 2.013566176 * x**2 -8.579590974 * x**1 -0.383176295 * x**0\r\n\r\nunder_10_ppm = -169755.1637 * x**6 -13455.58466* x**5 + 3216.230457 * x**4 + 89.40620783 * x**3 -19.32913576 * x**2 -8.298948559 * x**1 -0.070463402 * x**0\r\n\r\naa = np.where(under_200_ppm < -0.5)[0][0]\r\n\r\nstart = np.where( (x >= -0.04) )[0][0]\r\nunder_200_ppm[start:aa] = -0.5\r\n\r\n\r\n#under_10_ppm[np.where(x > -0.04 and x < 0.04)] = -0.5\r\n\r\nfplot.modify_ticks(['-8%', '-4%', '-0%', '4%', '8%'], [-0.08,-0.04,0,0.04,0.08])\r\nplt.xlim(-0.08,0.08)\r\nplt.ylim(-1,1.5)\r\nplt.fill_between(x,-10, under_200_ppm, facecolor='red', alpha = 0.5)\r\nplt.fill_between(x,under_200_ppm, under_10_ppm, facecolor='g', alpha = 0.5)\r\nplt.fill_between(x,under_10_ppm, over_10_ppm, facecolor='green', alpha = 0.5)\r\nplt.fill_between(x,over_10_ppm, over_200_ppm, facecolor='g', alpha = 0.5)\r\nplt.fill_between(x,over_200_ppm, 10, facecolor='red', alpha = 0.5)\r\nplt.grid(color = 'k')\r\n\r\nplt.xlabel('Volumetric (compared to nominal)')\r\nplt.ylabel('p_diff [bar]')\r\n\r\nplt.savefig('{}/{}/{}'.format(cwd, results_dir, 'error_risk.png' ))\r\n\r\nprint()\r\ninput(\"press Enter to exit ;)\") \r\n\r\n\r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"calc_error_risk_map.py","file_name":"calc_error_risk_map.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"215482936","text":"from keras.models import Sequential\nfrom keras.layers import Activation, Dense\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.applications.xception import Xception, preprocess_input\nfrom keras.models import load_model\nfrom keras.backend.tensorflow_backend import set_session\nimport keras.callbacks as kcall\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport cv2\n\nimport os\nprint(os.listdir(\"./chest_xray\"))\n\nconfig = tf.ConfigProto() #device_count = {'GPU': 0}\nconfig.gpu_options.allow_growth = True\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.7\nset_session(tf.Session(config=config))\n\noutput_classes = 2\nlearning_rate = 0.0001\nimg_width, img_height,channel = 299, 299, 1\ntraining_examples = 1216\nbatch_size = 1\nepochs = 5\nresume_model = False\ntraining_data_dir = './chest_xray/train'\ntest_data_dir = './chest_xray/test'\nval_data_dir = './chest_xray/validation'\ntrained_model_dir = './chest_xray/pretreined_models/xception_weights_tf_dim_ordering_tf_kernels_notop.h5'\n\nif resume_model == False:\n ## Model Defination\n model = Sequential()\n model.add(Xception(weights=trained_model_dir , include_top=False,pooling = 'avg'))\n #model.add(Dense(units = 100 , activation = 'relu'))\n model.add(Dense(units=output_classes, activation='softmax'))\n\n model.layers[0].trainable = True\n\n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(lr=learning_rate),\n metrics=['accuracy'])\n\n\n ## Image generator function for training and validation\n\n def preprocess_input(img):\n #img = cv2.cvtColor(img, cv2.COLOR_BGRA2GRAY)\n cv2.resize(img, (299,299))\n return img\n\n img_generator = ImageDataGenerator(preprocessing_function=preprocess_input)\n\n\n\n train_img_generator = img_generator.flow_from_directory(\n training_data_dir,\n target_size = (img_width,img_height),\n batch_size = batch_size,\n class_mode = 'categorical')\n\n val_img_generator = img_generator.flow_from_directory(\n val_data_dir,\n target_size = (img_width,img_height),\n class_mode = 'categorical')\n\n for i, layer in enumerate(model.layers):\n print('Layer: ',i+1,' Name: ', layer.name)\n\n## Callbacks for model training\nearly_stop = kcall.EarlyStopping(monitor='acc', min_delta=0.0001)\ntensorboard = kcall.TensorBoard(log_dir='./tensorboard-logs', write_grads=1, batch_size=batch_size)\n\n\nclass LossHistory(kcall.Callback):\n def on_train_begin(self, logs={}):\n self.losses = []\n self.acc = []\n\n def on_batch_end(self, batch, logs={}):\n self.losses.append(logs.get('loss'))\n self.acc.append(logs.get('acc'))\n\n\nhistory = LossHistory()\n\n## Training only the newly added layer\nif resume_model:\n model = load_model('chest_xray.h5')\nelse:\n model.fit_generator(train_img_generator,\n steps_per_epoch = training_examples // batch_size,\n epochs = epochs,\n validation_data = val_img_generator,\n\t\tvalidation_steps = 1,\n\t\tcallbacks=[early_stop,history])\n\ntest_img_generator = img_generator.flow_from_directory(\n test_data_dir,\n target_size = (img_width,img_height),\n class_mode = 'categorical',\n batch_size= batch_size,\n\t\t\t shuffle = False)\n\ntest_accu = model.evaluate_generator(test_img_generator,steps=624 // batch_size)\nprint('Accuracy on test data is:', test_accu[1])\nprint('Loss on test data is:', test_accu[0])\n\nplt.plot(history.losses,'b--',label='Training')\nplt.plot(len(history.losses)-1,test_accu[0],'go',label = 'Test')\n\nplt.xlabel('# of batches trained')\nplt.ylabel('Training loss')\n\nplt.title('Training loss vs batches trained')\n\nplt.legend()\n\nplt.ylim(0,1.2)\nplt.show()\n\nplt.plot(history.acc,'--',label= 'Training')\nplt.plot(len(history.acc)-1,test_accu[1],'go',label='Test')\n\nplt.xlabel('# of batches trained')\nplt.ylabel('Training accuracy')\n\nplt.title('Training accuracy vs batches trained')\n\nplt.legend(loc=4)\nplt.ylim(0,1.1)\nplt.show()","sub_path":"kernel1.py","file_name":"kernel1.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"373150222","text":"#The saddle back search \n#EWD-934 http://www.cs.utexas.edu/users/EWD/ewd09xx/EWD934.PDF\n\"\"\"\n- If f(p, q) < z, since f is strict increasing, for all 0 ≤ y < q, we have f(p, y) < z. We can drop all points in the vertical line section (in red color);\n- If f(p, q) > z, then f(x, q) > z for all p < x ≤ z. We can drop all points in the horizontal line section (in blue color);\n- Otherwise if f(p, q) = z, we mark (p, q) as one solution, then both line sections can be dropped.\n\"\"\"\nclass Solution(object):\n def searchMatrix(self, matrix, target):\n \"\"\"\n :type matrix: List[List[int]]\n :type target: int\n :rtype: bool\n \"\"\"\n if not matrix or not matrix[0]:\n return False\n x, y = 0, len(matrix[0]) - 1\n while y >= 0 and x <= len(matrix)-1:\n v = matrix[x][y]\n if v < target:\n x += 1\n elif v > target:\n y -= 1\n else:\n return True\n return False\n\nmatrix = [\n [1, 3, 5, 7],\n [10, 11, 16, 20],\n [23, 30, 34, 50]\n]\ntarget = 11\nr = Solution().searchMatrix(matrix, target)\nprint(r)\n","sub_path":"search-a-2d-matrix.py","file_name":"search-a-2d-matrix.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"360127490","text":"import setuptools\n\nwith open('README.md') as f:\n long_description = f.read()\n\nsetuptools.setup(name='subarray',\n version='0.2',\n description='get 2D sub array slices from a large 2D array',\n author='Tasin Nawaz',\n author_email='tasin.buet@gmail.com',\n license='TN',\n url='https://github.com/tasin-megamind/subarray',\n long_description=long_description,\n long_description_content_type='text/markdown',\n packages=setuptools.find_packages(),\n include_package_data=True,\n install_requires=[\n ],\n zip_safe=False)\n\n","sub_path":"pypi_install_script/subarray-0.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"228407214","text":"def carga():\r\n lista = []\r\n for x in range(5):\r\n lista.append((input(\"Ingrese el nombre del pais: \"),int(input(\"Ingrese la cantidad de habitantes que tiene: \"))))\r\n return lista\r\n\r\ndef imprimir(lista: list):\r\n print(\"Los paises ingresados son:\")\r\n for x in lista:\r\n print(f\"Nombre: {x[0]} - Habitantes: {x[1]}\")\r\n\r\ndef mayor(lista: list):\r\n mayor = (0,0)\r\n for x in lista:\r\n if (x[1] > mayor[1]):\r\n mayor = x\r\n print(f\"El pais con mayor cantidad de habitantes es {mayor[0]} con {mayor[1]} habitantes\")\r\n\r\nlista = carga()\r\nimprimir(lista)\r\nmayor(lista)","sub_path":"guia_2/eje2_1.py","file_name":"eje2_1.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"90247033","text":"from Constants import *\nfrom math import *\nimport numpy as np\n#import matplotlib.pyplt as plt\n\n#Calculations of the Freuencies of the Qbits within a specific range\n\nomega12 = 0\nphi_bar12 = 0\nomega3 = 0\nphi_bar3 = 0\nfile12 = open(\"Lookup_tbl_12.dat\",\"w\")\nfile3 = open(\"Lookup_tbl_3.dat\",\"w\")\n\nfor i in range(40,1000):\n\tfor j in range(300,1000):\n\t\ti1 = i * 0.1\n\t\tj1 = j * 0.1\n\t\tomega12 = (1 / (hbar)) * sqrt(8 * E_CQ(j1) * (E_J(i1) + (4 * E_L(100e-9)))) * (1e-9) # frequencies in natural frequency units (Hertz)\n\t\tphi_bar12 = ((2*E_CQ(j1))/(E_J(i1) + (4 * E_L(100e-9))))**0.25 # strength No units\n\n\t\tomega3 = (1 / (hbar)) * sqrt(8 * E_CQ(j1) * (E_J(i1) + (8 * E_L(100e-9)))) * (1e-9)\n\t\tphi_bar3 = ((2*E_CQ(j1))/(E_J(i1) + (8 * E_L(100e-9))))**0.25\n\t\t\n\t\tfile12.write(str(E_J(i1)) + \" \" + str(E_CQ(j1)) + \" \" + str(omega12) + \" \" + str(phi_bar12) + \"\\n\")\n\t\tfile3.write(str(E_J(i1)) + \" \" + str(E_CQ(j1)) + \" \" + str(omega3) + \" \" + str(phi_bar3) + \"\\n\")\n\nfile12.close()\nfile3.close()","sub_path":"Functional code/LookUp Table/LookupTable.py","file_name":"LookupTable.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"316485349","text":"import frappe\nfrom erpnext.controllers.item_variant import make_variant_item_code\nfrom frappe.utils import strip\n\n\ndef autoname(doc, method):\n if doc.pni_material_type == \"Machinery\":\n pass\n elif doc.pni_material_type == \"Machine Design Item Code\":\n doc.item_code = \"-\".join((doc.main_category_code, doc.sub_category_code,\n doc.level_3_category_code, doc.item_manual_code))\n doc.name = doc.item_code\n else:\n if not doc.pni_item_code and not doc.variant_of:\n frappe.throw(\"PNI Item Code Mandatory \"+doc.item_code)\n if doc.pni_item_code:\n doc.item_code = doc.pni_item_code\n\n doc.item_code = strip(doc.item_code)\n doc.name = doc.item_code\n\n\ndef item_validate(doc, method):\n if doc.old_item_code == 'NA':\n return None\n else:\n get_item = frappe.get_all(\n 'Item', filters={'old_item_code': doc.old_item_code, 'name': [\"!=\", doc.name]}, fields=['name'])\n if get_item:\n frappe.throw(\"Old Item {0} already in Item {1}\".format(\n doc.old_item_code, get_item[0].name))\n doc.old_item_code = \"\"\n if doc.old_item_code == doc.name:\n frappe.throw(\"Same Item cannot be in Old Item\")\n doc.old_item_code = \"\"\n if not doc.is_stock_item:\n for raw in doc.item_defaults:\n if not raw.expense_account:\n frappe.throw(\n \"Expense Account is Mandatory in Item Default Table\")\n if doc.main_category and doc.sub_category and doc.level_3_category:\n item_group = frappe.get_value(\"Item Group\", {\n \"main_category\": doc.main_category, \"sub_category\": doc.sub_category, \"level_3_category\": doc.level_3_category})\n if item_group:\n doc.item_group = item_group\n else:\n doc.item_group = create_item_group(doc.main_category,\n doc.sub_category, doc.level_3_category)\n\n\ndef create_item_group(main_category, sub_category, level_3_category):\n doc = frappe.get_doc({\n \"doctype\": \"Item Group\",\n \"item_group_name\": main_category + \" \" + sub_category + \" \"+level_3_category,\n \"main_category\": main_category,\n \"sub_category\": sub_category,\n \"level_3_category\": level_3_category,\n \"parent_item_group\": \"Auto Group\"\n })\n doc.insert(ignore_permissions=True)\n return doc.name\n\n\n@frappe.whitelist()\ndef get_job_card(item, job_card_status):\n # Loss Time [ { (Setup Time) + (Total Completed Qty * Cycle Time) + (Inspection Time) } - {Total Time in Mins} ]\n if job_card_status == \"All\":\n job_card_status = \"\"\n else:\n job_card_status = \" and jc.status = '{status}' \".format(\n status=job_card_status)\n return frappe.db.sql(\"\"\"\n\t\tselect\n jc.docstatus,\n workstation.department,\n\t\t\tjc.workstation,\n jc.status,\n sum(jc.setup_time) as setup_time,\n sum(jc.pni_programme_cycle_time) as pni_programme_cycle_time,\n (sum(jc.setup_time) + sum(jc.total_completed_qty * jc.pni_programme_cycle_time ) + sum(jc.inspection_time) - sum(jc.total_time_in_mins) ) as loss_time,\n sum(jc.rework_time) as rework_time,\n sum(jc.inspection_time) as inspection_time,\n sum(jc.pni_rejected_qty) as pni_rejected_qty,\n sum(jc.pni_rework_qty) as pni_rework_qty,\n sum(jc.pni_setup_rejection_qty) as pni_setup_rejection_qty,\n\t\t\tsum(jc.for_quantity) as for_quantity,\n\t\t\tsum(jc.total_completed_qty) as total_completed_qty,\n (sum(jc.for_quantity) - sum(jc.total_completed_qty)) as ramaining_qty\n\t\tfrom \n `tabJob Card` as jc, `tabWorkstation` as workstation\n\t\twhere\n jc.workstation = workstation.name and\n\t\t\tjc.production_item = '{production_item}' and\n jc.docstatus <> 2\n {status}\n group by\n jc.workstation,jc.status,jc.docstatus\n\t\"\"\".format(production_item=item, status=job_card_status), as_dict=1)\n # return frappe.get_all(\"Job Card\", {\"production_item\": item, \"status\": job_card_status}, [\"workstation\", \"for_quantity\", \"total_completed_qty\", \" (for_quantity - total_completed_qty) as ramaining_qty\"])\n","sub_path":"pni_customization/utility/item_utility.py","file_name":"item_utility.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"300040152","text":"import xadmin\n\nfrom .models import CollegeOrg,Teacher\n\n\nclass CollegeOrgAdmin(object):\n\n list_display = ['name', 'desc', 'click_nums', 'fav_nums', 'image', 'address', 'add_time']\n search_fields = ['name', 'desc', 'click_nums', 'fav_nums', 'image', 'address']\n list_filter = ['name', 'desc', 'click_nums', 'fav_nums', 'image', 'address', 'add_time']\n\n\nclass TeacherAdmin(object):\n\n list_display = ['college_org', 'name', 'work_years', 'address', 'points', 'click_nums', 'image', 'fav_nums', 'add_time']\n search_fields = ['college_org', 'name', 'work_years', 'address', 'points', 'click_nums', 'image', 'fav_nums']\n list_filter = ['college_org', 'name', 'work_years', 'address', 'points', 'click_nums', 'image', 'fav_nums', 'add_time']\n\n\nxadmin.site.register(CollegeOrg, CollegeOrgAdmin)\nxadmin.site.register(Teacher, TeacherAdmin)","sub_path":"apps/colleges/adminx.py","file_name":"adminx.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"651321334","text":"import sys\nsys.path.append('../')\nfrom config import config\nfrom flask_socketio import SocketIO, emit\nfrom engineio.payload import Payload\nfrom werkzeug.exceptions import HTTPException\nfrom flask import Flask, render_template,make_response,send_from_directory, request\nimport os\nimport logging\nimport json\nimport stateMachine\nfrom state import Events\nimport subprocess\nimport my_states\nimport os\nfrom urllib.parse import quote, unquote\nimport posix_ipc\nfrom threading import Thread\n\n__author__ = 'Vincent LAMBERT'\n\napp = Flask(__name__)\napp.config['DEBUG'] = False\napp.logger.disabled = True\nlog = logging.getLogger('werkzeug')\nlog.disabled = True\nPayload.max_decode_packets = 500\nsocketio = SocketIO(app, async_mode=None, logger=False, engineio_logger=False)\n\ndef init():\n global ui_languages\n global stateMachine\n with open(\"ui_language.json\", \"r\", encoding='utf-8') as read_file:\n ui_languages = json.load(read_file) \n thread_notification = Thread(target=catch_send_notification, args=(socketio,))\n thread_notification.setDaemon(True)\n thread_notification.start()\n stateMachine = stateMachine.StateMachine(socketio)\n\ndef load_coordinates(file_path):\n positions_list = []\n try:\n with open(file_path) as file:\n for line in file:\n if line != \"\":\n coords = list(map(float, line.split(\" \")))\n positions_list.append([coords[0],coords[1]])\n except OSError as e:\n return None\n return positions_list\n\ndef load_ai_list(dir_path):\n ia_list = []\n for file in os.listdir(dir_path):\n if file.endswith(\".conf\"):\n ia_list.append(file.split(\".conf\")[0])\n return ia_list\n\ndef load_field_list(dir_path):\n field_list = []\n for file in os.listdir(dir_path):\n if file.endswith(\".txt\"):\n field_list.append(unquote(file.split(\".txt\")[0]))\n return field_list\n\ndef get_other_field():\n current_field = subprocess.run([\"readlink\",\"../field.txt\"], stdout=subprocess.PIPE).stdout.decode('utf-8').replace(\"fields/\", \"\")[:-5]\n field_list = load_field_list(\"../fields\")\n if len(field_list)>=2:\n coords_other = []\n for field_name in field_list:\n if field_name != unquote(current_field):\n with open(\"../fields/\"+quote(field_name,safe=\"\")+\".txt\") as file:\n points = file.readlines()\n \n coords = list()\n for coord in points:\n coord = coord.replace(\"\\n\",\"\").split(\" \")\n coords.append([float(coord[1]),float(coord[0])])\n coords.append(coords[0])\n coords_other.append(coords)\n return coords_other\n return list()\n\ndef formattingFieldPointsForSend(corners):\n coords = list()\n\n for coord in corners:\n coords.append([coord[1],coord[0]])\n\n coords.append(coords[0])\n\n return coords\n\ndef catch_send_notification(socketio: SocketIO):\n try:\n posix_ipc.unlink_message_queue(config.QUEUE_NAME_UI_NOTIFICATION)\n except:\n pass\n\n notificationQueue = posix_ipc.MessageQueue(config.QUEUE_NAME_UI_NOTIFICATION, posix_ipc.O_CREX)\n \n ui_language = config.UI_LANGUAGE\n\n while True:\n try:\n notification = notificationQueue.receive(timeout=1)\n \n message_name = json.loads(notification[0])[\"message_name\"]\n message = ui_languages[message_name][ui_language]\n \n socketio.emit('notification', {\"message_name\":message_name,\"message\":message} , namespace='/broadcast', broadcast=True)\n except:\n continue\n\n@socketio.on('data', namespace='/server')\ndef on_socket_data(data):\n if \"type\" in data: \n if data[\"type\"] == \"joystick\" and str(stateMachine.currentState) in [\"WaitWorkingState\",\"CreateFieldState\"]:\n stateMachine.on_socket_data(data)\n elif data[\"type\"] == \"field\":\n stateMachine.on_event(Events.CREATE_FIELD)\n stateMachine.on_socket_data(data)\n elif data[\"type\"] == \"field_name\":\n stateMachine.on_socket_data(data)\n stateMachine.on_event(Events.VALIDATE_FIELD_NAME)\n elif data[\"type\"] == \"validerZone\":\n data[\"client_id\"] = request.sid\n stateMachine.on_socket_data(data)\n stateMachine.on_event(Events.VALIDATE_FIELD)\n elif data[\"type\"] == \"start\":\n if data[\"audit\"]:\n stateMachine.on_event(Events.START_AUDIT)\n else:\n stateMachine.on_event(Events.START_MAIN)\n elif data[\"type\"] == \"continue\":\n if data[\"audit\"]:\n stateMachine.on_event(Events.CONTINUE_AUDIT)\n else:\n stateMachine.on_event(Events.CONTINUE_MAIN)\n elif data[\"type\"] == \"stop\":\n stateMachine.on_event(Events.STOP)\n elif data[\"type\"] == \"allChecked\":\n stateMachine.on_socket_data(data)\n stateMachine.on_event(Events.LIST_VALIDATION)\n elif data[\"type\"] == \"wheel\":\n stateMachine.on_event(Events.WHEEL)\n elif data[\"type\"] == \"modifyZone\":\n stateMachine.on_socket_data(data)\n elif data[\"type\"] == \"getField\":\n stateMachine.on_socket_data(data)\n elif data[\"type\"] == \"getStats\":\n stateMachine.on_socket_data(data)\n elif data[\"type\"] == \"removeField\":\n if isinstance(stateMachine.currentState,my_states.WaitWorkingState):\n stateMachine.on_socket_data(data)\n\n\n@socketio.on('data', namespace='/broadcast')\ndef on_socket_broadcast(data):\n if data[\"type\"] == \"audit\":\n if data[\"audit\"]:\n stateMachine.on_event(Events.AUDIT_ENABLE)\n else:\n stateMachine.on_event(Events.AUDIT_DISABLE)\n emit(data[\"type\"], data, broadcast=True)\n\n@socketio.on('disconnect')\ndef on_disconnect():\n if str(stateMachine.currentState) in [\"WaitWorkingState\",\"CreateFieldState\"]:\n stateMachine.on_socket_data({\"type\": \"joystick\", \"x\" : 0 , \"y\" : 0})\n\n@app.route('/')\ndef index():\n #ui_language = \"fr\"\n ui_language = config.UI_LANGUAGE\n if ui_language not in ui_languages[\"Supported Language\"]:\n ui_language = \"en\"\n sn = config.ROBOT_SN\n #sn = \"SNXXX\"\n statusOfUIObject = stateMachine.getStatusOfControls()\n\n IA_list = load_ai_list(\"../yolo\")\n Field_list = load_field_list(\"../fields\")\n\n if not Field_list:\n Field_list = None\n current_field = None\n else:\n Field_list.sort(key=str.casefold)\n current_field = subprocess.run([\"readlink\",\"../field.txt\"], stdout=subprocess.PIPE).stdout.decode('utf-8').replace(\"fields/\", \"\")[:-5]\n current_field = unquote(current_field)\n\n if str(stateMachine.currentState) == \"ErrorState\":\n render_template(\"Error.html\",sn=sn, error_message=ui_languages[\"Error_500\"][ui_language]), 500\n\n return render_template('UIRobot.html',sn=sn, statusOfUIObject=statusOfUIObject, ui_languages=ui_languages, ui_language=ui_language, Field_list=Field_list, current_field=current_field, IA_list=IA_list) \n\n@app.route('/map')\ndef maps():\n myCoords=[0,0]\n field = stateMachine.getField()\n if field is None:\n field = load_coordinates(\"../field.txt\")\n if field is None:\n return render_template('map.html', myCoords=myCoords)\n else:\n coords_other = get_other_field()\n coords_field = formattingFieldPointsForSend(field)\n if coords_other:\n return render_template('map.html', coords_field=coords_field, myCoords=myCoords, coords_other=coords_other)\n else:\n return render_template('map.html', coords_field=coords_field, myCoords=myCoords)\n\n@app.route('/offline.html')\ndef offline():\n sn = config.ROBOT_SN\n ui_language = config.UI_LANGUAGE\n if ui_language not in ui_languages[\"Supported Language\"]:\n ui_language = \"en\"\n return render_template('offline.html',sn=sn, ui_languages=ui_languages, ui_language=ui_language)\n\n@app.route('/styles.css')\ndef style():\n response=make_response(send_from_directory('static',filename='css/style.css'))\n response.headers['Content-Type'] = 'text/css'\n return response\n\n@app.errorhandler(Exception)\ndef handle_exception(e):\n # pass through HTTP errors\n if isinstance(e, HTTPException):\n return e\n\n # now you're handling non-HTTP exceptions only\n print(e)\n stateMachine.on_event(Events.ERROR)\n sn = config.ROBOT_SN\n ui_language = config.UI_LANGUAGE\n if ui_language not in ui_languages[\"Supported Language\"]:\n ui_language = \"en\"\n return render_template(\"Error.html\",sn=sn, error_message=ui_languages[\"Error_500\"][ui_language]), 500\n\n@app.route('/sw.js')\ndef worker():\n response=make_response(send_from_directory('static',filename='js/offline_worker.js'))\n response.headers['Content-Type'] = 'application/javascript'\n return response\n\n@app.route('/js/socket.io.min.js')\ndef socket_io_min():\n response=make_response(send_from_directory('static',filename='js/socket.io.min.js'))\n response.headers['Content-Type'] = 'application/javascript'\n return response\n\nif __name__ == \"__main__\":\n init()\n app.run(host=\"0.0.0.0\",port=\"80\",debug=True, use_reloader=False)","sub_path":"uiWebRobot/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":9262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"161147259","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\ngaetk2.forms.widgets - wtforms extention to render Bootstrap/HTML5 fields.\n\nbased on https://github.com/nickw444/wtforms-webwidgets\n\nCreated by Maximillian Dornseif on 2017-02-28.\nCoded (c) 2017, 2018. No rights reserved.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom abc import ABCMeta\nfrom functools import wraps\n\nimport wtforms.widgets.core as wt_core\nimport wtforms.widgets.html5 as wt_html5\n\nfrom wtforms.widgets.core import HTMLString\n\n\nclass CustomWidgetMixin(object):\n \"\"\"A mixin to apply to a widget to identify it as a non-wtforms builtin.\"\"\"\n\n __metaclass__ = ABCMeta\n __webwidget__ = True\n\n\ndef custom_widget_wrapper(cls):\n \"\"\"A decorator to wrap a widget to identify it as non-wtforms builtin.\"\"\"\n cls.__webwidget__ = True\n return cls\n\n\ndef render_field_errors(field):\n \"\"\"Render field errors as html.\"\"\"\n # https://getbootstrap.com/docs/4.0/components/forms/#server-side\n if field.errors:\n html = '
    {errors}
    '.format(\n errors='. '.join(field.errors)\n )\n return HTMLString(html)\n\n return None\n\n\ndef render_field_description(field):\n \"\"\"Render a field description as HTML.\"\"\"\n # https://getbootstrap.com/docs/4.0/components/forms/#help-text\n if hasattr(field, 'description') and field.description != '':\n html = 'field.description}

    '\n html = html.format(\n field=field\n )\n return HTMLString(html)\n\n return ''\n\n\ndef form_group_wrapped(f):\n \"\"\"Wrap a field within a bootstrap form-group.\n\n Additionally sets has-error\n This decorator sets has-error if the field has any errors.\n \"\"\"\n @wraps(f)\n def wrapped(self, field, *args, **kwargs):\n \"\"\"Closure, die bootstrap-gemässes HTML um eine Form-Group baut.\"\"\"\n classes = ['form-group']\n if field.errors:\n classes.append('is-invalid')\n\n html = \"\"\"
    {rendered_field}
    \"\"\".format(\n classes=' '.join(classes),\n rendered_field=f(self, field, *args, **kwargs)\n )\n return HTMLString(html)\n\n return wrapped\n\n\ndef meta_wrapped(f):\n \"\"\"Add a field label, errors, and a description (if it exists) to a field.\"\"\"\n @wraps(f)\n def wrapped(self, field, *args, **kwargs):\n \"\"\"Closure, die bootstrap-gemässes HTML um ein Feld baut.\"\"\"\n html = '{label}{errors}{original}{description}'.format(\n label=field.label(),\n errors=render_field_errors(field) or '',\n original=f(self, field, *args, **kwargs),\n description=render_field_description(field)\n )\n return HTMLString(html)\n return wrapped\n\n\ndef bootstrap_styled(cls=None, add_meta=True, form_group=True, input_class='form-control'):\n \"\"\"\n Wrap a widget to conform with Bootstrap's html control design.\n\n Args:\n input_class: Class to give to the rendered control.\n add_meta: bool:\n \"\"\"\n def real_decorator(cls):\n \"\"\"Funktion (Closure), die wir on demand bauen und zurück geben.\"\"\"\n class NewClass(cls):\n \"\"\"Klasse (Closure), die wir on demand bauen und zurück geben.\"\"\"\n\n pass\n\n NewClass.__name__ = cls.__name__\n newclass = custom_widget_wrapper(NewClass)\n\n _call = newclass.__call__\n\n def call(*args, **kwargs):\n \"\"\"Handler für `NewClass.__call__`.\"\"\"\n if input_class:\n kwargs.setdefault('class', input_class)\n\n return _call(*args, **kwargs)\n\n if add_meta:\n call = meta_wrapped(call)\n if form_group:\n call = form_group_wrapped(call)\n\n newclass.__call__ = call\n return newclass\n\n if cls:\n # Allow calling decorator(cls) instead of decorator()(cls)\n rv = real_decorator(cls)\n return rv\n\n return real_decorator\n\n\nclass BootstrapPlainCheckboxRadio(wt_core.CheckboxInput, CustomWidgetMixin):\n \"\"\"Abstract widget for a Bootstrap Checkbox or Radio implementation.\"\"\"\n\n __metaclass__ = ABCMeta\n\n def __call__(self, field, **kwargs):\n \"\"\"Aufruf zum Rendern.\"\"\"\n label = getattr(field, 'label', None)\n if label in kwargs:\n label = kwargs.pop('label').strip()\n\n html = '
    '.format(\n label=label,\n input_type=self.input_type,\n rendered_field=super(BootstrapPlainCheckboxRadio, self).__call__(field, **kwargs)\n )\n return HTMLString(html)\n\n\nclass PlainCheckbox(BootstrapPlainCheckboxRadio):\n \"\"\"Render a checkbox without any bootstrap container classes.\"\"\"\n\n def __init__(self):\n \"\"\"Setze den richtigen input_type.\"\"\"\n super(PlainCheckbox, self).__init__()\n self.input_type = 'checkbox'\n\n\nclass PlainRadio(BootstrapPlainCheckboxRadio):\n \"\"\"Render a radio without any bootstrap container classes.\"\"\"\n\n def __init__(self):\n \"\"\"Setze den richtigen input_type.\"\"\"\n super(PlainRadio, self).__init__()\n self.input_type = 'radio'\n\n\nCheckboxInput = PlainCheckbox\nRadioInput = PlainRadio\nInput = bootstrap_styled(wt_core.Input)\nTextInput = bootstrap_styled(wt_core.TextInput)\nPasswordInput = bootstrap_styled(wt_core.PasswordInput)\nHiddenInput = wt_core.HiddenInput # We don't need to style this.\nTextArea = bootstrap_styled(wt_core.TextArea)\nSelect = bootstrap_styled(wt_core.Select)\n\nColorInput = bootstrap_styled(wt_html5.ColorInput)\nDateInput = bootstrap_styled(wt_html5.DateInput)\nDateTimeInput = bootstrap_styled(wt_html5.DateTimeInput)\nDateTimeLocalInput = bootstrap_styled(wt_html5.DateTimeLocalInput)\nEmailInput = bootstrap_styled(wt_html5.EmailInput)\nMonthInput = bootstrap_styled(wt_html5.MonthInput)\nNumberInput = bootstrap_styled(wt_html5.NumberInput)\nRangeInput = bootstrap_styled(wt_html5.RangeInput)\nSearchInput = bootstrap_styled(wt_html5.SearchInput)\nTelInput = bootstrap_styled(wt_html5.TelInput)\nTimeInput = bootstrap_styled(wt_html5.TimeInput)\nURLInput = bootstrap_styled(wt_html5.URLInput)\nWeekInput = bootstrap_styled(wt_html5.WeekInput)\n\ndefault_widgets = {\n # Multi Types\n 'SelectMultipleField': Select(multiple=True),\n 'SelectField': Select(),\n 'QuerySelectMultipleField': Select(multiple=True),\n 'QuerySelectField': Select(),\n # 'RadioField': RadioGroup(),\n\n # Input Types\n 'DateField': DateInput(),\n # 'TextField': TextInput(),\n 'StringField': TextInput(),\n 'PasswordField': PasswordInput(),\n\n 'BooleanField': CheckboxInput(),\n # 'FileField': FileInput(),\n # 'SubmitField': SubmitInput(),\n}\n","sub_path":"gaetk2/forms/widgets4.py","file_name":"widgets4.py","file_ext":"py","file_size_in_byte":6721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"280172","text":"import os\r\nimport pygame\r\nfrom constants import *\r\nfrom find_data import find_data_file\r\n\r\nclass Template(pygame.sprite.Sprite):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n\r\n self.rect = None\r\n\r\n self.obtained_signal = False\r\n self.in_inventory = False\r\n self.dragged = False\r\n self.slot = None\r\n\r\n self.mouse_button_hold = False\r\n\r\n def update(self, inventory):\r\n mouse_pos = pygame.mouse.get_pos()\r\n\r\n # Putting something obtained in inventory\r\n if self.obtained_signal:\r\n self.rect = self.image.get_rect()\r\n for slot in inventory.slots:\r\n if slot.available:\r\n self.rect.center = slot.rect.center\r\n slot.available = False\r\n self.slot = slot\r\n break\r\n self.obtained_signal = False\r\n self.in_inventory = True\r\n\r\n def is_dragging(self, inventory, mouse_button_hold):\r\n if self.in_inventory:\r\n mouse_pos = pygame.mouse.get_pos()\r\n self.mouse_button_hold = mouse_button_hold\r\n\r\n if self.rect.collidepoint(mouse_pos) and self.mouse_button_hold:\r\n self.dragged = True\r\n for obj in inventory.objects:\r\n if not obj.dragged:\r\n obj.mouse_button_hold = False\r\n\r\n def drag_and_release(self, inventory, mouse_pos):\r\n if self.dragged and self.mouse_button_hold:\r\n self.rect.center = mouse_pos\r\n elif self.dragged and not self.mouse_button_hold:\r\n for slot in inventory.slots:\r\n if slot.rect.collidepoint(mouse_pos):\r\n if slot.available:\r\n self.slot.available = True\r\n slot.available = False\r\n self.slot = slot\r\n break\r\n\r\n self.dragged = False\r\n self.rect.center = self.slot.rect.center\r\n\r\n def draw(self, screen):\r\n if self.rect != None:\r\n screen.blit(self.image, self.rect)\r\n\r\nclass Flashlight(Template):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n\r\n state1 = pygame.image.load(find_data_file(os.path.join('data', 'images', 'inventory', 'objects', 'flashlight_1.png'))).convert_alpha()\r\n state2 = pygame.image.load(find_data_file(os.path.join('data', 'images', 'inventory', 'objects', 'flashlight_2.png'))).convert_alpha()\r\n state3 = pygame.image.load(find_data_file(os.path.join('data', 'images', 'inventory', 'objects', 'flashlight_3.png'))).convert_alpha()\r\n\r\n self.state_index = 0\r\n self.states_list = [state1, state2, state3]\r\n self.current_state = self.states_list[self.state_index]\r\n\r\n self.image = self.current_state\r\n\r\n self.on = False\r\n\r\n def update(self, inventory, scenario, light_ray):\r\n super().update(inventory)\r\n\r\n mouse_pos = pygame.mouse.get_pos()\r\n\r\n if self.in_inventory:\r\n # Interaction with a inventory object\r\n if self.dragged and not self.mouse_button_hold:\r\n for obj in inventory.objects:\r\n if obj.rect != None:\r\n if obj.rect.collidepoint(mouse_pos) and isinstance(obj, Batteries):\r\n self.slot.available = True\r\n self.slot = obj.slot\r\n obj.kill() # Eu deveria colocar in_inventory = False?\r\n\r\n if self.state_index < len(self.states_list):\r\n self.state_index += 1\r\n\r\n self.current_state = self.states_list[self.state_index]\r\n self.image = self.current_state\r\n inventory.flashlight_working = True\r\n break\r\n\r\n # Drag & Release\r\n self.drag_and_release(inventory, mouse_pos)\r\n\r\n # Turn flashlight on or off\r\n self.turn_on_off(scenario, light_ray, mouse_pos)\r\n\r\n def turn_on_off(self, scenario, light_ray, mouse_pos):\r\n if scenario.black_screen != None and self.state_index == 1:\r\n self.state_index += 1\r\n self.current_state = self.states_list[self.state_index]\r\n self.image = self.current_state\r\n self.on = True\r\n elif scenario.black_screen == None and self.state_index == 2:\r\n self.state_index -= 1\r\n self.current_state = self.states_list[self.state_index]\r\n self.image = self.current_state\r\n self.on = False\r\n\r\n if self.on:\r\n light_ray.update(scenario, mouse_pos)\r\n\r\nclass Batteries(Template):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n\r\n self.image = pygame.image.load(find_data_file(os.path.join('data', 'images', 'inventory', 'objects', 'batteries_i.png'))).convert_alpha()\r\n\r\n def update(self, inventory, scenario, light_ray):\r\n super().update(inventory)\r\n\r\n mouse_pos = pygame.mouse.get_pos()\r\n\r\n if self.in_inventory:\r\n # Interaction with a inventory object\r\n if self.dragged and not self.mouse_button_hold:\r\n for obj in inventory.objects:\r\n if obj.rect != None:\r\n if obj.rect.collidepoint(mouse_pos) and isinstance(obj, Flashlight):\r\n self.slot.available = True\r\n self.kill() # Eu deveria colocar in_inventory = False?\r\n\r\n if obj.state_index < len(obj.states_list):\r\n obj.state_index += 1\r\n\r\n obj.current_state = obj.states_list[obj.state_index]\r\n obj.image = obj.current_state\r\n inventory.flashlight_working = True\r\n break\r\n\r\n # Drag & Release\r\n self.drag_and_release(inventory, mouse_pos)\r\n\r\nclass WarehouseKey(Template):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n\r\n self.image = pygame.image.load(find_data_file(os.path.join('data', 'images', 'inventory', 'objects', 'warehouse_key_i.png'))).convert_alpha()\r\n\r\n def update(self, inventory, scenario, light_ray):\r\n super().update(inventory)\r\n\r\n mouse_pos = pygame.mouse.get_pos()\r\n\r\n if self.in_inventory:\r\n self.drag_and_release(inventory, mouse_pos)\r\n","sub_path":"development/inventory_objects.py","file_name":"inventory_objects.py","file_ext":"py","file_size_in_byte":6467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"326018459","text":"import requests\nfrom decouple import config\n\nSECRET_KEY = config('SECRET_KEY')\n\nheaders = {\n 'Host': 'kakaoi-newtone-openapi.kakao.com',\n 'Content-Type': 'application/octet-stream',\n 'X-DSS-Service': 'DICTATION',\n 'Authorization': f'KakaoAK {SECRET_KEY}',\n}\n\n# Transfer-Encoding: chunked # 보내는 양을 모를 땐 이걸 쓴다.\n\ndata = open(\"pansori2.wav\", \"rb\").read()\n# print(data)\nresponse = requests.post('https://kakaoi-newtone-openapi.kakao.com/v1/recognize', headers=headers, data=data)\n# print(response)\nprint(response.text)","sub_path":"ferpredict3/restapi.py","file_name":"restapi.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"397587061","text":"\nfrom importlib import import_module\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.optim import Adam, SGD\nfrom madgrad import MADGRAD\nfrom adamp import AdamP, SGDP\nfrom transformers import AdamW\n\nfrom transformers import ElectraConfig, ElectraForSequenceClassification\nfrom transformers import BertConfig, BertForSequenceClassification\n\n\ndef create_model(model_name):\n if \"electra\" in model_name.lower():\n electra_config = ElectraConfig.from_pretrained(model_name)\n electra_config.num_labels = 42\n model_ft = ElectraForSequenceClassification(electra_config)\n\n return model_ft\n\n elif \"bert\" in model_name.lower():\n bert_config = BertConfig.from_pretrained(model_name)\n bert_config.num_labels = 42\n model_ft = BertForSequenceClassification(bert_config)\n\n return model_ft\n\n\ndef create_criterion(criterion_name, *args, **kwargs):\n if criterion_name == \"CrossEntropyError\":\n criterion = nn.CrossEntropyLoss(*args, **kwargs)\n elif criterion_name == \"MSE\":\n criterion = nn.MSELoss(*args, **kwargs)\n elif criterion_name == \"FocalLoss\":\n criterion = FocalLoss(*args, **kwargs)\n elif criterion_name == \"KLDiv\":\n criterion = nn.KLDivLoss(*args, **kwargs)\n elif criterion_name == \"LabelSmoothingLoss\":\n criterion = LabelSmoothingLoss(*args, **kwargs)\n else:\n raise Exception(f\"{criterion_name} does not exist in criterion_list.\")\n\n return criterion\n\n\ndef create_optimizer(optimizer_name, **kwargs):\n if optimizer_name == \"Adam\":\n optimizer = Adam(**kwargs)\n elif optimizer_name == \"SGD\":\n optimizer = SGD(**kwargs)\n elif optimizer_name == \"MADGRAD\":\n optimizer = MADGRAD(**kwargs)\n elif optimizer_name == \"AdamP\":\n optimizer = AdamP(**kwargs)\n elif optimizer_name == \"SGDP\":\n optimizer = SGDP(**kwargs)\n elif optimizer_name == \"AdamW\":\n optimizer = AdamW(**kwargs)\n else:\n raise Exception(f\"{optimizer_name} does not exist in optimizer_list.\")\n\n return optimizer\n\n\nclass FocalLoss(nn.Module):\n def __init__(self, weight=None, gamma=2.0, reduction=\"mean\"):\n nn.Module.__init__(self)\n self.weight = weight\n self.gamma = gamma\n self.reduction = reduction\n\n def forward(self, input_tensor, target_tensor):\n log_prob = F.log_softmax(input_tensor, dim=-1)\n prob = torch.exp(log_prob)\n return F.nll_loss(((1 - prob) ** self.gamma) * log_prob, target_tensor, weight=self.weight, reduction=self.reduction)\n\n\nclass LabelSmoothingLoss(nn.Module):\n def __init__(self, classes=42, smoothing=0.0, dim=-1):\n super(LabelSmoothingLoss, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n self.cls = classes\n self.dim = dim\n\n def forward(self, pred, target):\n pred = pred.log_softmax(dim=self.dim)\n with torch.no_grad():\n true_dist = torch.zeros_like(pred)\n true_dist.fill_(self.smoothing / (self.cls - 1))\n true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)\n\n return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))\n\n\nclass ClassifierModel(nn.Module):\n def __init__(self, model_type, model_name, class_num=42, fc_size=256, dropout_rate=None, embedding_size=None):\n super(ClassifierModel, self).__init__()\n\n model_config = getattr(import_module(\"transformers\"), model_type + \"Config\").from_pretrained(model_name)\n self.model_type = model_type\n # backbone.\n self.backbone = getattr(import_module(\"transformers\"), model_type + \"Model\").from_pretrained(model_name)\n if embedding_size is not None:\n self.backbone.resize_token_embeddings(embedding_size)\n # flatten\n self.flatten = nn.Flatten(0, -1)\n # connector\n self.connector = nn.Linear(model_config.hidden_size, fc_size)\n # classifier.\n self.classifier = nn.Linear(fc_size * 3, class_num)\n # dropout\n self.dropout = nn.Dropout(p=dropout_rate) if dropout_rate else None\n # activation\n self.tanh = nn.Tanh()\n\n def forward(self, e_token_mask, **kwargs):\n # Reference : https://github.com/monologg/R-BERT\n e1_token_mask = e_token_mask[\"e1_token_mask\"]\n e2_token_mask = e_token_mask[\"e2_token_mask\"]\n\n if e1_token_mask is None or e2_token_mask is None:\n raise Exception(\"ERROR! Model must be feed e1_token_ids, e2_token_ids\")\n\n outputs = self.backbone(**kwargs).last_hidden_state # (batch_size, max_len, hidden_size)\n\n cls_output = outputs[:, 0, :]\n e1_output = torch.sum(outputs * e1_token_mask.unsqueeze(-1), dim=1) / torch.sum(e1_token_mask, dim=1, keepdim=True)\n e2_output = torch.sum(outputs * e2_token_mask.unsqueeze(-1), dim=1) / torch.sum(e2_token_mask, dim=1, keepdim=True)\n if self.dropout:\n cls_output = self.dropout(cls_output)\n e1_output = self.dropout(e1_output)\n e2_output = self.dropout(e2_output)\n cls_output = self.connector(self.tanh(cls_output))\n e1_output = self.connector(self.tanh(e1_output))\n e2_output = self.connector(self.tanh(e2_output))\n\n combine_output = torch.cat([cls_output, e1_output, e2_output], dim=-1)\n if self.dropout:\n combine_output = self.dropout(combine_output)\n\n out = self.classifier(combine_output)\n\n return out\n","sub_path":"code/creators.py","file_name":"creators.py","file_ext":"py","file_size_in_byte":5499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"170175051","text":"import requests\nimport random\nimport string\nfrom hashlib import sha256\nimport hmac\nimport time\nfrom config import SAP_API_URL, SAP_SECRET_KEY, SAP_ACCESSKEYID\nimport json\nfrom app.models import SAPCompanies, SAPCostCenters, SAPGLAccounts, SAPProductGroups, SAPProducts, SAPProfitCenters\nfrom app.models import SAPRevisions, SAPWBSElements\nfrom app import db\nfrom treelib import Tree\n\n\ndef get_sap_api(path):\n # Generate nonce in secure way\n nonce = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(15))\n\n # Get milliseconds from EPOCH as timestamp\n timestamp = str((int(time.time()) * 1000))\n\n # Convert message for HMAC. It should be in bytes format\n msg = str.encode(SAP_ACCESSKEYID + timestamp + nonce)\n\n # Generate HMAC SHA256 signature in HEX format\n signature = hmac.new(key=SAP_SECRET_KEY, msg=msg, digestmod=sha256).hexdigest()\n\n # Prepare params for request\n params = {'AccessKeyId': SAP_ACCESSKEYID, 'Signature': signature, 'Nonce': nonce, 'Timestamp': timestamp}\n\n # Start session\n s = requests.Session()\n req = requests.Request('GET', SAP_API_URL + path, params=params)\n prep = req.prepare()\n resp = s.send(prep)\n resp_dict = {'answer': {}, 'status_code': resp.status_code}\n if resp.status_code == 404:\n return resp_dict\n else:\n resp_dict['answer'] = json.loads(resp.text)\n return resp_dict\n\n\ndef sap_latest_revision():\n return db.session.query(db.func.max(SAPRevisions.id)).scalar()\n\n\ndef sap_new_revision():\n new_revision = SAPRevisions.status = 'ok'\n db.session.add(new_revision)\n db.session.commit()\n\n\ndef sap_import_companies():\n new_revision = SAPRevisions(status='in progress')\n db.session.add(new_revision)\n db.session.commit()\n companies = get_sap_api('/Companies')\n company_list = []\n if companies['status_code'] == 404:\n new_revision.status = 'SAP API return 404 error'\n else:\n for company in companies['answer']['Results']:\n new_company = SAPCompanies(code=company['Code'], description=company['Description'], revision=new_revision.id)\n db.session.add(new_company)\n company_list.append(company['Code'])\n # TODO: Check if import was successful\n if 1 == 1:\n new_revision.status = 'done'\n db.session.commit()\n result = {'revision': new_revision.id, 'companies': company_list}\n return result\n\n\ndef sap_import_cc(company, revision):\n revision = SAPRevisions.query.filter_by(id=revision).first()\n all_cc = get_sap_api('/{}/CostCenters'.format(company))\n if all_cc['status_code'] == 404:\n revision.status = 'SAP API return 404 error'\n else:\n for cc in all_cc['answer']['Results']:\n new_cc = SAPCostCenters(code=cc['Code'],\n company_code=company,\n name=cc['Name'],\n profit_center=cc['ProfitCenter'],\n person_responsible=cc['PersonResponsible'],\n parent=cc['Parent'],\n revision=revision.id)\n db.session.add(new_cc)\n # TODO: Check if import was successful\n if 1 == 1:\n revision.status = 'CC import - success'\n db.session.commit()\n\n\ndef sap_import_wbs(revision):\n revision = SAPRevisions.query.filter_by(id=revision).first()\n all_wbs = get_sap_api('/WbsElements')\n if all_wbs['status_code'] == 404:\n revision.status = 'SAP API return 404 error'\n else:\n for wbs in all_wbs['answer']['Results']:\n new_wbs = SAPWBSElements(code=wbs['Code'], description=wbs['Description'], revision=revision.id)\n db.session.add(new_wbs)\n # TODO: Check if import was successful\n if 1 == 1:\n revision.status = 'WBS import - success'\n db.session.commit()\n\n\ndef sap_import_products(revision):\n revision = SAPRevisions.query.filter_by(id=revision).first()\n products = get_sap_api('/Products')\n if products['status_code'] == 404:\n revision.status = 'SAP API return 404 error'\n else:\n for product in products['answer']['Results']:\n # It could be blank row\n # TODO: report this to Andreas\n if product['Code']:\n new_product = SAPProducts(code=product['Code'],\n name=product['Name'],\n group=product['Group'],\n blocked=product['Blocked'],\n revision=revision.id)\n\n # We need to check this, because sometimes it causes error\n if product['GroupCode']:\n new_product.group_code = product['GroupCode']\n if product['Type']:\n new_product.type = product['Type']\n db.session.add(new_product)\n # TODO: Check if import was successful\n if 1 == 1:\n revision.status = 'Products import - success'\n db.session.commit()\n\n\ndef sap_import_accounts(company, revision):\n revision = SAPRevisions.query.filter_by(id=revision).first()\n accounts = get_sap_api('/{}/GLAccounts'.format(company))\n if accounts['status_code'] == 404:\n revision.status = 'SAP API return 404 error'\n else:\n for account in accounts['answer']['Results']:\n new_account = SAPGLAccounts(account_number=account['AccountNumber'],\n description=account['Description'],\n revision=revision.id)\n db.session.add(new_account)\n # TODO: Check if import was successful\n if 1 == 1:\n revision.status = 'Accounts import - success'\n db.session.commit()\n\n\ndef sap_import_all():\n companies = sap_import_companies()\n for company in companies['companies']:\n sap_import_cc(company, companies['revision'])\n sap_import_accounts(company, companies['revision'])\n\n # Import WBS Elements\n sap_import_wbs(companies['revision'])\n\n # Import Companies\n sap_import_products(companies['revision'])\n\n\nif __name__ == \"__main__\":\n sap_import_all()\n\n","sub_path":"app/sap_api.py","file_name":"sap_api.py","file_ext":"py","file_size_in_byte":6280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"14778553","text":"import string\n\n\nclass Node:\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\ndef serialize(root):\n global s\n if(root != None):\n s.append(root.val)\n serialize(root.left)\n serialize(root.right)\n else:\n s.append('None')\n return str(s).strip('[]')\n\n\n# def deserialize(s):\n# l = list(s.split(', ')\n\n\ns = []\nnode = Node('root', Node('left', Node('left.left')), Node('right'))\nprint(serialize(node))\n\n\n# ultimate test\n# node = Node('root', Node('left', Node('left.left')), Node('right'))\n# assert deserialize(serialize(node)).left.left.val == 'left.left'\n","sub_path":"py/google_binTree_serialize_deserialize/binary_tree.py","file_name":"binary_tree.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"597178834","text":"from celery import Celery\nfrom werkzeug.utils import secure_filename\nimport sys\nimport os\nimport subprocess\nfrom datetime import datetime\nsys.path.append(os.path.join(os.getcwd(), '..', 'build_report', 'scripts'))\nsys.path.append(os.path.join(os.getcwd(), 'build_report', 'scripts'))\nfrom genomics import get_config\n\ndef make_celery(app):\n celery = Celery(\n app.import_name,\n backend=app.config['CELERY_RESULT_BACKEND'],\n broker=app.config['CELERY_BROKER_URL']\n )\n celery.conf.update(app.config)\n\n class ContextTask(celery.Task):\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return self.run(*args, **kwargs)\n\n celery.Task = ContextTask\n return celery\n\n@celery.task(bind=True)\ndef long_pdf_task(self):\n # Initialize state\n self.update_state(state='IN PROGRESS')\n\n # Background task that runs a long function\n path, filename = '', ''\n if request.method == 'POST':\n for f in [request.files['file'], request.files['target_file']]:\n filename = secure_filename(f.filename)\n path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n f.save(path)\n\n command = ['bash', get_config.main(\"flaskAPI\", \"sub_command\")]\n os.chdir(os.path.dirname(get_config.main(\"flaskAPI\", \"sub_command\")))\n\n with open(os.path.join(get_config.main(\"flaskAPI\", \"log_dir\"), str(datetime.now())), 'w') as f:\n process = subprocess.run(command, stdout=f)\n print(process)\n\n\n\n sample_id, ext = os.path.splitext(filename)\n output_filename = '%s.pdf' % sample_id\n #output_filename = '%s.pdf' % 'sample_variants'\n output_path = os.path.join(app.config[\"CLIENT_PDF\"], output_filename)\n\n try:\n\n self.update_state(state='Complete')\n return send_file(\n output_path,\n as_attachment=True,\n attachment_filename=output_filename)\n #attachment_filename=filename)\n\n except FileNotFoundError:\n abort(404)","sub_path":"api/celery_worker.py","file_name":"celery_worker.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"129005903","text":"from __future__ import unicode_literals\n\nimport collections\nfrom mock import Mock\n\nfrom django.contrib.auth import get_user_model\nfrom django.utils import unittest\nfrom rest_framework import status\nfrom rest_framework import test\nfrom rest_framework.reverse import reverse\n\nfrom nodeconductor.structure.models import CustomerRole, ProjectRole\nfrom nodeconductor.structure.views import CustomerPermissionViewSet\nfrom nodeconductor.structure.tests import factories\n\nUser = get_user_model()\n\nTestRole = collections.namedtuple('TestRole', ['user', 'customer', 'role'])\n\n\nclass CustomerPermissionViewSetTest(unittest.TestCase):\n def setUp(self):\n self.view_set = CustomerPermissionViewSet()\n self.request = Mock()\n self.user_group = Mock()\n\n def test_create_adds_user_role_to_customer(self):\n customer = self.user_group.group.customerrole.customer\n customer.add_user.return_value = self.user_group, True\n\n serializer = Mock()\n serializer.is_valid.return_value = True\n serializer.object = self.user_group\n\n self.view_set.request = self.request\n self.view_set.can_save = Mock(return_value=True)\n self.view_set.get_serializer = Mock(return_value=serializer)\n self.view_set.create(self.request)\n\n customer.add_user.assert_called_once_with(\n self.user_group.user,\n self.user_group.group.customerrole.role_type,\n )\n\n def test_destroy_removes_user_role_from_customer(self):\n customer = self.user_group.group.customerrole.customer\n\n self.view_set.get_object = Mock(return_value=self.user_group)\n\n self.view_set.destroy(self.request)\n\n customer.remove_user.assert_called_once_with(\n self.user_group.user,\n self.user_group.group.customerrole.role_type,\n )\n\n\nclass CustomerPermissionApiPermissionTest(test.APITransactionTestCase):\n all_roles = (\n # user customer role\n TestRole('first', 'first', 'owner'),\n\n TestRole('both', 'first', 'owner'),\n TestRole('both', 'second', 'owner'),\n )\n\n role_map = {\n 'owner': CustomerRole.OWNER,\n }\n\n def setUp(self):\n self.users = {\n # 'staff': factories.UserFactory(is_staff=True),\n 'first': factories.UserFactory(),\n 'both': factories.UserFactory(),\n 'no_role': factories.UserFactory(),\n }\n\n self.customers = {\n 'first': factories.CustomerFactory(),\n 'second': factories.CustomerFactory(),\n }\n\n for user, customer, role in self.all_roles:\n self.customers[customer].add_user(self.users[user], self.role_map[role])\n\n # No role tests\n def test_user_cannot_list_roles_within_customers_he_has_no_role_in(self):\n for login_user in self.users:\n self.client.force_authenticate(user=self.users[login_user])\n\n response = self.client.get(reverse('customer_permission-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n users_customers = set(r.customer for r in self.all_roles if r.user == login_user)\n unseen_roles = (r for r in self.all_roles if r.customer not in users_customers)\n\n for role in unseen_roles:\n role_url = self._get_permission_url(*role)\n\n urls = set([role['url'] for role in response.data])\n\n self.assertNotIn(\n role_url, urls,\n '{0} user sees privilege he is not supposed to see: {1}'.format(login_user, role),\n )\n\n # Customer owner tests\n def test_user_can_list_roles_within_customers_he_owns(self):\n for login_user in self.users:\n self.client.force_authenticate(user=self.users[login_user])\n\n response = self.client.get(reverse('customer_permission-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n users_customers = set(r.customer for r in self.all_roles if r.user == login_user)\n seen_roles = (r for r in self.all_roles if r.customer in users_customers)\n\n for role in seen_roles:\n role_url = self._get_permission_url(*role)\n\n urls = set([role['url'] for role in response.data])\n\n self.assertIn(\n role_url, urls,\n '{0} user does not see privilege he is supposed to see: {1}'.format(login_user, role),\n )\n\n def test_user_can_assign_roles_within_customers_he_owns(self):\n self.client.force_authenticate(user=self.users['first'])\n\n data = {\n 'customer': factories.CustomerFactory.get_url(self.customers['first']),\n 'user': factories.UserFactory.get_url(self.users['no_role']),\n 'role': 'owner',\n }\n\n response = self.client.post(reverse('customer_permission-list'), data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n response = self.client.post(reverse('customer_permission-list'), data)\n self.assertEqual(response.status_code, status.HTTP_304_NOT_MODIFIED)\n\n def test_user_cannot_assign_roles_within_customers_he_doesnt_owns(self):\n self.client.force_authenticate(user=self.users['no_role'])\n\n data = {\n 'customer': factories.CustomerFactory.get_url(self.customers['first']),\n 'user': factories.UserFactory.get_url(self.users['no_role']),\n 'role': 'owner'\n }\n\n response = self.client.post(reverse('customer_permission-list'), data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_user_with_customer_owner_role_cannot_assign_roles_within_customers_he_doesnt_own(self):\n self.client.force_authenticate(user=self.users['no_role'])\n\n data = {\n 'customer': factories.CustomerFactory.get_url(self.customers['first']),\n 'user': factories.UserFactory.get_url(self.users['no_role']),\n 'role': 'owner'\n }\n\n response = self.client.post(reverse('customer_permission-list'), data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_user_cannot_assign_roles_within_customers_he_doesnt_own_but_has_project_admin_role(self):\n admin_user = factories.UserFactory()\n project = factories.ProjectFactory(customer=self.customers['first'])\n project.add_user(admin_user, ProjectRole.ADMINISTRATOR)\n\n self.client.force_authenticate(user=admin_user)\n\n data = {\n 'customer': factories.CustomerFactory.get_url(self.customers['first']),\n 'user': factories.UserFactory.get_url(self.users['no_role']),\n 'role': 'owner'\n }\n\n response = self.client.post(reverse('customer_permission-list'), data)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n def test_user_can_list_roles_within_customer_if_he_has_admin_role_in_a_project_owned_by_that_customer(self):\n admin_user = factories.UserFactory()\n project = factories.ProjectFactory(customer=self.customers['first'])\n project.add_user(admin_user, ProjectRole.ADMINISTRATOR)\n\n self.client.force_authenticate(user=admin_user)\n\n response = self.client.get(reverse('customer_permission-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n role_url = self._get_permission_url('first', 'first', 'owner')\n\n urls = set([role['url'] for role in response.data])\n\n self.assertIn(\n role_url, urls,\n '{0} user does not see privilege he is supposed to see: {1}'.format(admin_user, role_url),\n )\n\n # Helper methods\n def _get_permission_url(self, user, customer, role):\n permission = User.groups.through.objects.get(\n user=self.users[user],\n group__customerrole__role_type=self.role_map[role],\n group__customerrole__customer=self.customers[customer],\n )\n return 'http://testserver' + reverse('customer_permission-detail', kwargs={'pk': permission.pk})\n\n\nclass CustomerPermissionApiFiltrationTest(test.APISimpleTestCase):\n def setUp(self):\n staff_user = factories.UserFactory(is_staff=True)\n self.client.force_authenticate(user=staff_user)\n\n self.users = {\n 'first': factories.UserFactory(),\n 'second': factories.UserFactory(),\n }\n\n self.customers = {\n 'first': factories.CustomerFactory(),\n 'second': factories.CustomerFactory(),\n }\n\n for customer in self.customers:\n self.customers[customer].add_user(self.users['first'], CustomerRole.OWNER)\n self.customers[customer].add_user(self.users['second'], CustomerRole.OWNER)\n\n def test_staff_user_can_filter_roles_within_customer_by_customer_uuid(self):\n response = self.client.get(reverse('customer_permission-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n for customer in self.customers:\n response = self.client.get(reverse('customer_permission-list'),\n data={'customer': self.customers[customer].uuid})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n customer_url = self._get_customer_url(self.customers[customer])\n\n for permission in response.data:\n self.assertEqual(customer_url, permission['customer'])\n\n def test_staff_user_can_filter_roles_within_customer_by_username(self):\n response = self.client.get(reverse('customer_permission-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n for user in self.users:\n self._ensure_matching_entries_in('username', self.users[user].username)\n self._ensure_non_matching_entries_not_in('username', self.users[user].username)\n\n def test_staff_user_can_filter_roles_within_customer_by_native_name(self):\n response = self.client.get(reverse('customer_permission-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n for user in self.users:\n self._ensure_matching_entries_in('native_name', self.users[user].native_name)\n self._ensure_non_matching_entries_not_in('native_name', self.users[user].native_name)\n\n def test_staff_user_can_filter_roles_within_customer_by_full_name(self):\n response = self.client.get(reverse('customer_permission-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n for user in self.users:\n self._ensure_matching_entries_in('full_name', self.users[user].full_name)\n self._ensure_non_matching_entries_not_in('full_name', self.users[user].full_name)\n\n def test_staff_user_can_filter_roles_within_customer_by_role_type_name(self):\n response = self.client.get(reverse('customer_permission-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.client.get(reverse('customer_permission-list'),\n data={'role': 'owner'})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n for permission in response.data:\n self.assertEqual('owner', permission['role'])\n\n def test_staff_user_cannot_filter_roles_within_customer_by_role_type_pk(self):\n response = self.client.get(reverse('customer_permission-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.client.get(reverse('customer_permission-list'),\n data={'role': '1'})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, [])\n\n def test_staff_user_can_see_required_fields_in_filtration_response(self):\n response = self.client.get(reverse('customer_permission-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n for customer in self.customers:\n response = self.client.get(reverse('customer_permission-list'),\n data={'customer': self.customers[customer].uuid})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n required_fields = ('url', 'user_native_name', 'user_full_name', 'user_username')\n\n for permission in response.data:\n for field in required_fields:\n self.assertIn(field, permission)\n\n # Helper methods\n def _ensure_matching_entries_in(self, field, value):\n response = self.client.get(reverse('customer_permission-list'),\n data={field: value})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n for permission in response.data:\n self.assertEqual(value, permission['user_' + field])\n\n def _ensure_non_matching_entries_not_in(self, field, value):\n user = factories.UserFactory()\n\n customer = factories.CustomerFactory()\n customer.add_user(user, CustomerRole.OWNER)\n\n response = self.client.get(reverse('customer_permission-list'),\n data={field: getattr(user, field)})\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n for permission in response.data:\n self.assertNotEqual(value, permission['user_' + field])\n\n def _get_customer_url(self, customer):\n return 'http://testserver' + reverse('customer-detail', kwargs={'uuid': customer.uuid})\n","sub_path":"nodeconductor/structure/tests/test_customer_permissions.py","file_name":"test_customer_permissions.py","file_ext":"py","file_size_in_byte":13555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"331879999","text":"from nltk.tokenize import sent_tokenize\n\n\ndef lines(a, b):\n \"\"\"Return lines in both a and b\"\"\"\n\n # delclear variables\n li_len = 0\n a_list = a.split(\"\\n\")\n b_list = b.split(\"\\n\")\n sim_list = []\n\n # judge comparing line's length\n if len(a_list) < len(b_list):\n li_len = len(a_list)\n else:\n li_len = len(b_list)\n\n # append lines corresponding between a and b to sim_list\n for a_item in a_list:\n for b_item in b_list:\n if a_item == b_item:\n sim_list.append(a_item)\n break\n\n # print(a_list)\n # print(b_list)\n # print(sim_list)\n\n return sim_list\n\n\ndef sentences(a, b):\n \"\"\"Return sentences in both a and b\"\"\"\n\n # split sentences by sentence\n a_list = sent_tokenize(a, language='english') # split by '.'\n a_list = list(dict.fromkeys(a_list))\n b_list = sent_tokenize(b, language='english')\n b_list = list(dict.fromkeys(b_list))\n\n # judge comparing line's length\n li_len = 0\n if len(a_list) < len(b_list):\n li_len = len(a_list)\n else:\n li_len = len(b_list)\n\n # append sentences corresponding between a and b to sim_list\n sim_list = []\n for a_item in a_list:\n for b_item in b_list:\n if a_item == b_item:\n sim_list.append(a_item)\n break\n\n # print(a_list)\n # print(b_list)\n # print(sim_list)\n return sim_list\n\n\ndef substrings(a, b, n):\n \"\"\"Return substrings of length n in both a and b\"\"\"\n\n # store each strings(a and b) as substring\n a_list = []\n b_list = []\n a_substr_num = len(a) - n + 1\n b_substr_num = len(b) - n + 1\n substr = \"\"\n\n # create substrings list by each strings(a and b)\n for i in range(a_substr_num):\n substr = \"\"\n for j in range(i, i+n, 1):\n substr = substr + a[j]\n a_list.append(substr)\n a_list = list(dict.fromkeys(a_list))\n\n for i in range(b_substr_num):\n substr = \"\"\n for j in range(i, i+n, 1):\n substr = substr + b[j]\n b_list.append(substr)\n b_list = list(dict.fromkeys(b_list))\n\n # append substrings corresponding between a and b to sim_list\n sim_list = []\n for a_item in a_list:\n for b_item in b_list:\n if a_item == b_item:\n sim_list.append(a_item)\n break\n\n # print(a_list)\n # print(b_list)\n # print(sim_list)\n\n return sim_list\n\n","sub_path":"week7_problem1_similarities/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"4476851","text":"#######################################################################\n# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\nfrom network import *\nfrom component import *\nfrom utils import *\nimport numpy as np\nimport time\nimport os\nimport pickle\nimport torch\n\nclass A2CAgent:\n def __init__(self, config):\n self.config = config\n self.learning_network = config.network_fn()\n self.optimizer = config.optimizer_fn(self.learning_network.parameters())\n self.task = config.task_fn()\n self.replay = config.replay_fn()\n self.policy = config.policy_fn()\n self.total_steps = 0\n\n def episode(self, deterministic=False):\n state = self.task.reset()\n total_reward = 0.0\n steps = 0\n while True:\n prob = self.learning_network.predict(np.stack([state]), True)\n action = self.policy.sample(prob, deterministic=deterministic)\n next_state, reward, done, info = self.task.step(action)\n done = (done or (self.config.max_episode_length and steps > self.config.max_episode_length))\n if not deterministic:\n self.replay.feed([state, action, reward, next_state, int(done)])\n self.total_steps += 1\n total_reward += np.sum(reward * self.config.reward_weight)\n steps += 1\n state = next_state\n if done:\n break\n if not deterministic and self.total_steps > self.config.min_memory_size:\n experiences = self.replay.sample()\n states, actions, rewards, next_states, terminals = experiences\n prob, log_prob, value = self.learning_network.predict(states, False)\n _, _, v_next = self.learning_network.predict(next_states, False)\n terminals = self.learning_network.to_torch_variable(terminals).unsqueeze(1)\n rewards = self.learning_network.to_torch_variable(rewards).unsqueeze(1)\n actions = self.learning_network.to_torch_variable(actions, 'int64').unsqueeze(1)\n target = rewards + self.config.discount * v_next * (1 - terminals)\n target = target.detach()\n advantage = target - value\n value_loss = 0.5 * advantage.pow(2).mean()\n policy_loss = -(log_prob.gather(1, actions) * Variable(advantage.data)).mean()\n kl_loss = (prob * log_prob).sum(1).mean()\n\n self.optimizer.zero_grad()\n (value_loss + policy_loss + self.config.entropy_weight * kl_loss).backward()\n torch.nn.utils.clip_grad_norm(self.learning_network.parameters(), self.config.gradient_clip)\n self.optimizer.step()\n\n return total_reward, steps\n\n def run(self):\n window_size = 100\n ep = 0\n rewards = []\n steps = []\n avg_test_rewards = []\n while True:\n ep += 1\n reward, step = self.episode()\n rewards.append(reward)\n steps.append(step)\n avg_reward = np.mean(rewards[-window_size:])\n self.config.logger.info('episode %d, reward %f, avg reward %f, total steps %d, episode step %d' % (\n ep, reward, avg_reward, self.total_steps, step))\n\n if self.config.episode_limit and ep > self.config.episode_limit:\n return rewards, steps, avg_test_rewards\n\n if self.config.test_interval and ep % self.config.test_interval == 0:\n self.config.logger.info('Testing...')\n with open('data/%s-dqn-model-%s.bin' % (self.config.tag, self.task.name), 'wb') as f:\n pickle.dump(self.learning_network.state_dict(), f)\n test_rewards = []\n for _ in range(self.config.test_repetitions):\n reward, step = self.episode(True)\n test_rewards.append(reward)\n avg_reward = np.mean(test_rewards)\n avg_test_rewards.append(avg_reward)\n self.config.logger.info('Avg reward %f(%f)' % (\n avg_reward, np.std(test_rewards) / np.sqrt(self.config.test_repetitions)))\n with open('data/%sdqn-statistics-%s.bin' % (self.config.tag, self.task.name), 'wb') as f:\n pickle.dump({'rewards': rewards,\n 'test_rewards': avg_test_rewards}, f)\n if avg_reward > self.task.success_threshold:\n break\n","sub_path":"agent/A2C_agent.py","file_name":"A2C_agent.py","file_ext":"py","file_size_in_byte":4732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"337096612","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module defines the API to the test case used by the REST requests to \nperform functions such as advancing the simulation, retreiving test case \ninformation, and calculating and reporting results.\n\n\"\"\"\n\nfrom pyfmi import load_fmu\nimport numpy as np\nimport copy\nimport config\nimport json\nfrom scipy.integrate import trapz\n\nclass TestCase(object):\n '''Class that implements the test case.\n \n '''\n \n def __init__(self):\n '''Constructor.\n \n '''\n \n # Get configuration information\n con = config.get_config()\n # Define simulation model\n self.fmupath = con['fmupath']\n # Load fmu\n self.fmu = load_fmu(self.fmupath)\n # Get version\n self.fmu_version = self.fmu.get_version()\n # Get available control inputs and outputs\n if self.fmu_version == '2.0':\n input_names = self.fmu.get_model_variables(causality = 2).keys()\n output_names = self.fmu.get_model_variables(causality = 3).keys()\n else:\n raise ValueError('FMU must be version 2.0.')\n # Define KPIs\n self.kpipath = con['kpipath']\n # Load kpi json\n with open(self.kpipath, 'r') as f:\n json_str = f.read()\n self.kpi_json = json.loads(json_str)\n # Define measurements\n self.y = {'time':[]}\n for key in output_names:\n self.y[key] = []\n self.y_store = copy.deepcopy(self.y)\n # Define inputs\n self.u = {'time':[]}\n for key in input_names:\n self.u[key] = []\n self.u_store = copy.deepcopy(self.u)\n # Set default options\n self.options = self.fmu.simulate_options()\n self.options['CVode_options']['rtol'] = 1e-6 \n # Set default communication step\n self.set_step(con['step'])\n # Set initial simulation start\n self.start_time = 0\n self.initialize = True\n self.options['initialize'] = self.initialize\n \n def advance(self,u):\n '''Advances the test case model simulation forward one step.\n \n Parameters\n ----------\n u : dict\n Defines the control input data to be used for the step.\n { : }\n \n Returns\n -------\n y : dict\n Contains the measurement data at the end of the step.\n { : }\n \n '''\n \n # Set final time\n self.final_time = self.start_time + self.step\n # Set control inputs if they exist\n if u.keys():\n u_list = []\n u_trajectory = self.start_time\n for key in u.keys():\n if key != 'time':\n value = float(u[key])\n u_list.append(key)\n u_trajectory = np.vstack((u_trajectory, value))\n input_object = (u_list, np.transpose(u_trajectory))\n else:\n input_object = None\n # Simulate\n self.options['initialize'] = self.initialize\n res = self.fmu.simulate(start_time=self.start_time, \n final_time=self.final_time, \n options=self.options, \n input=input_object)\n # Get result and store measurement\n for key in self.y.keys():\n self.y[key] = res[key][-1]\n self.y_store[key] = self.y_store[key] + res[key].tolist()[1:]\n # Store control inputs\n for key in self.u.keys():\n self.u_store[key] = self.u_store[key] + res[key].tolist()[1:] \n # Advance start time\n self.start_time = self.final_time\n # Prevent inialize\n self.initialize = False\n \n return self.y\n\n def reset(self):\n '''Reset the test.\n \n '''\n \n self.__init__()\n\n def get_step(self):\n '''Returns the current simulation step in seconds.'''\n\n return self.step\n\n def set_step(self,step):\n '''Sets the simulation step in seconds.\n \n Parameters\n ----------\n step : int\n Simulation step in seconds.\n \n Returns\n -------\n None\n \n '''\n \n self.step = float(step)\n \n return None\n \n def get_inputs(self):\n '''Returns a list of control input names.\n \n Parameters\n ----------\n None\n \n Returns\n -------\n inputs : list\n List of control input names.\n \n '''\n\n inputs = self.u.keys()\n \n return inputs\n \n def get_measurements(self):\n '''Returns a list of measurement names.\n \n Parameters\n ----------\n None\n \n Returns\n -------\n measurements : list\n List of measurement names.\n \n '''\n\n measurements = self.y.keys()\n \n return measurements\n \n def get_results(self):\n '''Returns measurement and control input trajectories.\n \n Parameters\n ----------\n None\n \n Returns\n -------\n Y : dict\n Dictionary of measurement and control input names and their \n trajectories as lists.\n {'y':{:},\n 'u':{:}\n }\n \n '''\n \n Y = {'y':self.y_store, 'u':self.u_store}\n \n return Y\n \n def get_kpis(self):\n '''Returns KPI data.\n \n Requires standard sensor signals.\n \n Parameters\n ----------\n None\n \n Returns\n kpis : dict\n Dictionary containing KPI names and values.\n {:}\n \n '''\n \n kpis = dict()\n # Calculate each KPI using json for signalsand save in dictionary\n for kpi in self.kpi_json.keys():\n print(kpi, type(kpi))\n if kpi == 'energy':\n # Calculate total energy [KWh - assumes measured in J]\n E = 0\n for signal in self.kpi_json[kpi]:\n E = E + self.y_store[signal][-1]\n # Store result in dictionary\n kpis[kpi] = E*2.77778e-7 # Convert to kWh\n elif kpi == 'comfort':\n # Calculate total discomfort [K-h = assumes measured in K]\n tot_dis = 0\n heat_setpoint = 273.15+20\n for signal in self.kpi_json[kpi]:\n data = np.array(self.y_store[signal])\n dT_heating = heat_setpoint - data\n dT_heating[dT_heating<0]=0\n tot_dis = tot_dis + trapz(dT_heating,self.y_store['time'])/3600\n # Store result in dictionary\n kpis[kpi] = tot_dis\n else:\n print('No calculation for KPI named \"{0}\".'.format(kpi))\n\n return kpis\n \n def get_name(self):\n '''Returns the name of the test case fmu.\n \n Parameters\n ----------\n None\n \n Returns\n -------\n name : str\n Name of test case fmu.\n \n '''\n \n name = self.fmupath[7:-4]\n \n return name","sub_path":"testcase.py","file_name":"testcase.py","file_ext":"py","file_size_in_byte":7475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"296985497","text":"# -*- coding: utf-8 -*-\n# from django.conf.urls import include, url\nfrom django.urls import include, path\nfrom demo_app.app import rest\n# Uncomment the next two lines to enable the admin:\nimport xadmin\n\nxadmin.autodiscover()\n\n# version模块自动注册需要版本控制的 Model\nfrom xadmin.plugins import xversion\n\nxversion.register_models()\n\nfrom django.contrib import admin\n\nurlpatterns = [\n path('xadmin/', xadmin.site.urls),\n path('login/', rest.Login, name='login'),\n path('register/', rest.Register, name='register'),\n path('checkImage/', rest.CheckImage, name='checkImage'),\n path('checkVideo/', rest.CheckVideo, name='checkVideo'),\n path('collect/', rest.CollectData, name='collect'),\n path('getMyCollect/', rest.GetMyCollect, name='getMyCollect'),\n path('getMyHistory/', rest.GetMyHistory, name='getMyHistory'),\n path('getVerifyCode/', rest.GetVerifyCode, name='getVerifyCode'),\n path('changePwd/', rest.ChangePwd, name='changePwd'),\n path('deleteCollection/', rest.deleteCollection, name='deleteCollection'),\n path('deleteHistory/', rest.deleteHistory, name='deleteHistory'),\n path('deleteAllCollection/', rest.deleteAllCollection, name='deleteAllCollection'),\n path('deleteAllHistory/', rest.deleteAllHistory, name='deleteAllHistory'),\n]\n","sub_path":"xadmin/demo_app/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"562043737","text":"class Solution(object):\n def summaryRanges(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[str]\n \"\"\"\n rt = []\n if not nums:\n return rt\n i = 0\n cur = nums[i]\n while i < len(nums):\n cur = nums[i]\n lower = cur\n rng = str(cur)\n i += 1\n cur += 1\n while i < len(nums) and cur == nums[i]:\n i += 1\n cur += 1\n if lower != cur - 1:\n rng += \"->\" + str(cur - 1)\n rt.append(rng)\n return rt\n\ns = Solution()\nprint(s.summaryRanges([0,1,2,4,5,7]))\nprint(s.summaryRanges([0,2]))\n","sub_path":"228. Summary Ranges/228.py","file_name":"228.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"633828640","text":"import numpy as np\nimport random\nimport cv2 as cv\nimport copy\nimport chainer\n\nfrom xdog import xdog_process\nfrom chainer import cuda\n\nxp = cuda.cupy\ncuda.get_device(0).use()\n\n\nclass DataLoader:\n def __init__(self,\n path,\n extension='.jpg',\n img_size=224,\n latent_dim=256):\n\n self.path = path\n self.pathlist = list(self.path.glob(f\"**/*{extension}\"))\n self.train, self.valid = self._split(self.pathlist)\n self.train_len = len(self.train)\n self.valid_len = len(self.valid)\n\n self.size = img_size\n self.latent_dim = latent_dim\n\n self.interpolations = (\n cv.INTER_LINEAR,\n cv.INTER_AREA,\n cv.INTER_NEAREST,\n cv.INTER_CUBIC,\n cv.INTER_LANCZOS4\n )\n\n def __str__(self):\n return f\"dataset path: {self.path} train data: {self.train_len}\"\n\n def _split(self, pathlist: list):\n split_point = int(len(self.pathlist) * 0.95)\n x_train = self.pathlist[:split_point]\n x_test = self.pathlist[split_point:]\n\n return x_train, x_test\n\n @staticmethod\n def _random_crop(line, color, size):\n height, width = line.shape[0], line.shape[1]\n rnd0 = np.random.randint(height - size - 1)\n rnd1 = np.random.randint(width - size - 1)\n\n line = line[rnd0: rnd0 + size, rnd1: rnd1 + size]\n color = color[rnd0: rnd0 + size, rnd1: rnd1 + size]\n\n return line, color\n\n @staticmethod\n def _coordinate(image):\n image = image[:, :, ::-1]\n image = image.transpose(2, 0, 1)\n image = (image - 127.5) / 127.5\n\n return image\n\n @staticmethod\n def _variable(image_list):\n return chainer.as_variable(xp.array(image_list).astype(xp.float32))\n\n def noise_generator(self, batchsize):\n noise = xp.random.normal(size=(batchsize, self.latent_dim)).astype(xp.float32)\n\n return chainer.as_variable(noise)\n\n def _prepare_pair(self, image_path, size, repeat=16):\n interpolation = random.choice(self.interpolations)\n\n color = cv.imread(str(image_path))\n line = xdog_process(str(image_path))\n\n line, color = self._random_crop(line, color, size=size)\n\n color = self._coordinate(color)\n line = self._coordinate(line)\n\n return (color, line)\n\n def __call__(self, batchsize, mode='train'):\n color_box = []\n line_box = []\n\n for _ in range(batchsize):\n if mode == 'train':\n rnd = np.random.randint(self.train_len)\n image_path = self.train[rnd]\n elif mode == 'valid':\n rnd = np.random.randint(self.valid_len)\n image_path = self.valid[rnd]\n else:\n raise AttributeError\n\n color, line = self._prepare_pair(image_path, size=self.size)\n\n color_box.append(color)\n line_box.append(line)\n\n color = self._variable(color_box)\n line = self._variable(line_box)\n\n return (color, line)\n","sub_path":"atari_gaugan/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"350219239","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport contextlib\nimport functools\nimport numpy as np\nimport six\nimport warnings\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.python.distribute import central_storage_strategy\nfrom tensorflow.python.distribute import distribution_strategy_context as distribute_ctx\nfrom tensorflow.python.distribute import parameter_server_strategy\nfrom tensorflow.python.distribute import parameter_server_strategy_v2\nfrom tensorflow.python.distribute import values as ds_values\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import monitoring\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_resource_variable_ops\nfrom tensorflow.python.ops import gradients\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.saved_model import revived_types\nfrom tensorflow.python.training import training_ops\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.tf_export import keras_export\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.keras import backend_config\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras.engine import base_layer_utils\nfrom tensorflow.python.keras.optimizer_v2 import learning_rate_schedule\nfrom tensorflow.python.keras.optimizer_v2 import optimizer_v2\nfrom tensorflow.python.keras.optimizer_v2 import utils as optimizer_utils\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.keras.utils import layer_utils\nfrom tensorflow.python.keras.utils import tf_inspect\nfrom tensorflow.python.keras.utils import tf_utils\n\ndef name_scope_only_in_function_or_graph(name):\n if not context.executing_eagerly():\n return ops.name_scope_v1(name)\n else:\n return NullContextmanager()\n\nclass NullContextmanager(object):\n def __init__(self, *args, **kwargs):\n pass\n \n def __enter__(self):\n pass\n \n def __exit__(self, type_arg, value_arg, traceback_arg):\n return False\n\nclass MLQN(keras.optimizers.Optimizer):\n \"\"\" Optimizer that implements the MLQN algorithm\n \n Memorry-Less Quasi-Newton (MLQN) Method solves the problem of quasi-Newton (QN) method\n that it requires an approximate matrix of Hessian, which makes it un suitable for the training\n of large-scale, by learning without the storage of matrix.\n This optimizer was implemented for comparing MLMoQ on Tensorflow and published on 17 October 2021.\n \n \"\"\"\n def __init__(self, lr = 1.0, globalconve_term = True, apply_theta = False, name = \"MLQN\", **kwargs):\n \"\"\"\n Args:\n lr : larning rate. Defaults to 1.0.\n globalconve_term : flag of global convergence term of Y. If globalconve_term equals True, global convergence term of Y will work. More details to follow. Defaults to True\n apply_theta : flag of limits the range of theta. If apply_theta equals True, the range of theta will be limited. Defaults to False\n \n \"\"\"\n super().__init__(name, **kwargs)\n # Parameters related main work of MLQN\n self._set_hyper(\"lr\", lr)\n self._set_hyper(\"theta\", 0.0)\n self._set_hyper(\"sg\", 0.0)\n self._set_hyper(\"yg\", 0.0)\n self._set_hyper(\"sy\", 0.0)\n self._set_hyper(\"yy\", 0.0)\n \n # flags\n self.globalconve_term = globalconve_term\n self.apply_theta = apply_theta\n\n def _create_slots(self, var_list):\n for var in var_list:\n self.add_slot(var, 'one_past_var')\n self.add_slot(var, 'g')\n self.add_slot(var, 's')\n self.add_slot(var, 'y')\n self.add_slot(var, 'z')\n self.add_slot(var, \"one_past_grad\")\n \n def minimize(self, loss, var_list, grad_loss=None, name=None, tape=None):\n grads_and_vars = self._compute_gradients(loss, var_list=var_list, grad_loss=grad_loss, tape=tape)\n return self.apply_gradients(grads_and_vars)\n\n \"\"\" ------------------------------------------------------------------------\"\"\"\n # MLQN\n def apply_gradients(self, grads_and_vars, name=None, experimental_aggregate_gradients=True):\n grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars)\n var_list = [v for (_, v) in grads_and_vars]\n\n with ops.name_scope_v2(self._name):\n with ops.init_scope():\n self._create_all_weights(var_list)\n \n if not grads_and_vars:\n return control_flow_ops.no_op()\n\n if distribute_ctx.in_cross_replica_context():\n raise RuntimeError(\"`apply_gradients() cannot be called in cross-replica context. \"\"Use `tf.distribute.Strategy.run` to enter replica \"\"context.\")\n \n strategy = distribute_ctx.get_strategy()\n if (not experimental_aggregate_gradients and strategy and\n isinstance(strategy,\n (parameter_server_strategy.ParameterServerStrategyV1,\n parameter_server_strategy_v2.ParameterServerStrategyV2,\n central_storage_strategy.CentralStorageStrategy,\n central_storage_strategy.CentralStorageStrategyV1))):\n raise NotImplementedError(\"`experimental_aggregate_gradients=False is not supported for \"\"ParameterServerStrategy and CentralStorageStrategy\")\n \n apply_state = self._prepare(var_list)\n if experimental_aggregate_gradients:\n grads_and_vars = self._transform_unaggregated_gradients(grads_and_vars)\n grads_and_vars = self._aggregate_gradients(grads_and_vars)\n grads_and_vars = self._transform_gradients(grads_and_vars)\n\n if optimizer_utils.strategy_supports_no_merge_call():\n return self._distributed_apply(strategy, grads_and_vars, name, apply_state)\n \n else:\n return distribute_ctx.get_replica_context().merge_call(\n functools.partial(self._distributed_apply, apply_state=apply_state),\n args=(grads_and_vars,),\n kwargs={\n \"name\": name,\n })\n \n def _distributed_apply(self, distribution, grads_and_vars, name, apply_state):\n def apply_grad_to_update_var(var, grad):\n if isinstance(var, ops.Tensor):\n raise NotImplementedError(\"Trying to update a Tensor \", var)\n \n apply_kwargs = {}\n if isinstance(grad, ops.IndexedSlices):\n if var.constraint is not None:\n raise RuntimeError(\"Cannot use a constraint function on a sparse variable.\")\n if \"apply_state\" in self._sparse_apply_args:\n apply_kwargs[\"apply_state\"] = apply_state\n return self._resource_apply_sparse_duplicate_indices(grad.values, var, grad.indices, **apply_kwargs)\n \n if \"apply_state\" in self._dense_apply_args:\n apply_kwargs[\"apply_state\"] = apply_state\n update_op = self._resource_apply_dense(grad, var, **apply_kwargs)\n if var.constraint is not None:\n with ops.control_dependencies([update_op]):\n return var.assign(var.constraint(var))\n else:\n return update_op\n\n # calculate the necessary parameters such as inner product\n self.prepare_apply(grads_and_vars)\n\n eagerly_outside_functions = ops.executing_eagerly_outside_functions()\n update_ops = []\n with name_scope_only_in_function_or_graph(name or self._name):\n for grad, var in grads_and_vars:\n with distribution.extended.colocate_vars_with(var):\n with name_scope_only_in_function_or_graph(\"update\" if eagerly_outside_functions else \"update_\" + var.op.name):\n update_op = distribution.extended.update(var, apply_grad_to_update_var, args=(grad,), group=False)\n if distribute_ctx.in_cross_replica_context():\n update_ops.extend(update_op)\n else:\n update_ops.append(update_op)\n \n any_symbolic = any(isinstance(i, ops.Operation) or tf_utils.is_symbolic_tensor(i) for i in update_ops)\n if not context.executing_eagerly() or any_symbolic:\n with backend._current_graph(update_ops).as_default(): # pylint: disable=protected-access\n with ops.control_dependencies([control_flow_ops.group(update_ops)]):\n return self._iterations.assign_add(1, read_value=False)\n \n return self._iterations.assign_add(1)\n \n # calculate the necessary parameters such as inner product\n @tf.function\n def prepare_apply(self, grads_and_vars):\n tmp_ZS = 0.0\n tmp_SS = 0.0\n norm_g = 0.0\n tmp_SG = 0.0\n tmp_YG = 0.0\n tmp_SY = 0.0\n tmp_YY = 0.0\n \n if self.globalconve_term:\n for grad, var in grads_and_vars:\n z = self.get_slot(var, \"z\")\n z_t = z.assign( grad - self.get_slot(var, \"one_past_grad\") )\n \n tmp_ZS += tf.reduce_sum( self.get_slot(var, \"s\") * z_t )\n tmp_SS += tf.reduce_sum( self.get_slot(var, \"s\") * self.get_slot(var, \"s\") )\n norm_g += tf.reduce_sum( grad * grad )\n \n w = 2.0 if norm_g > 1e-2 else 100.0\n delta = tf.maximum(tmp_ZS / tmp_SS, 0)\n xi = w * tf.math.sqrt(norm_g) + delta\n \n for grad, var in grads_and_vars:\n y = self.get_slot(var, \"y\")\n y_t = y.assign( self.get_slot(var, \"z\") + xi * self.get_slot(var, \"s\") )\n \n tmp_SG += tf.reduce_sum( self.get_slot(var, \"s\") * grad )\n tmp_YG += tf.reduce_sum( y_t * grad )\n tmp_SY += tf.reduce_sum( self.get_slot(var, \"s\") * y_t )\n tmp_YY += tf.reduce_sum( y_t * y_t )\n \n else:\n for grad, var in grads_and_vars:\n y = self.get_slot(var, \"y\")\n y_t = y.assign( grad - self.get_slot(var, \"one_past_grad\") )\n \n tmp_SG += tf.reduce_sum( self.get_slot(var, \"s\") * grad )\n tmp_YG += tf.reduce_sum( y_t * grad )\n tmp_SY += tf.reduce_sum( self.get_slot(var, \"s\") * y_t )\n tmp_YY += tf.reduce_sum( y_t * y_t )\n \n sg = self._get_hyper(\"sg\")\n sg.assign( tmp_SG )\n yg = self._get_hyper(\"yg\")\n yg.assign( tmp_YG )\n sy = self._get_hyper(\"sy\")\n sy.assign( tmp_SY )\n yy = self._get_hyper(\"yy\")\n yy.assign( tmp_YY )\n theta = self._get_hyper(\"theta\")\n theta.assign( tmp_SY / tmp_YY )\n \n @tf.function\n def _resource_apply_dense(self, grad, var):\n lr = self._get_hyper(\"lr\")\n\n theta = self._get_hyper(\"theta\")\n \n if self.apply_theta:\n if theta < 0: theta = lr\n elif theta > 1: 1\n\n sg = self._get_hyper(\"sg\")\n yg = self._get_hyper(\"yg\")\n sy = self._get_hyper(\"sy\")\n yy = self._get_hyper(\"yy\")\n\n s = self.get_slot(var, \"s\")\n y = self.get_slot(var, \"y\")\n\n one_past_grad = self.get_slot(var, \"one_past_grad\")\n one_past_var = self.get_slot(var, \"one_past_var\")\n\n if self.iterations == 0:\n direction = -1.0 * grad\n\n one_past_var_t = one_past_var.assign( var )\n one_past_grad.assign( grad )\n var_t = var.assign( var + lr * direction )\n \n else:\n direction = -1.0 * ( theta * grad - (theta * y * (sg / sy) + theta * s * (yg / sy)) \n + (1 + (theta * yy / sy)) * s * (sg / sy) )\n\n one_past_var_t = one_past_var.assign( var )\n one_past_grad.assign( grad )\n var_t = var.assign( var + lr * direction )\n\n s.assign( var_t - one_past_var_t )\n","sub_path":"MLQN.py","file_name":"MLQN.py","file_ext":"py","file_size_in_byte":12883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"529886504","text":"import json\n\nfrom flask_restplus import Namespace\n\n\nclass NameSpace(Namespace):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def response(self, code, description, model=None, **kwargs):\n\n to_json = kwargs.get('to_json', True)\n body = kwargs.get('body', True)\n\n if body:\n response = {'message': description}\n message = response if not to_json else json.dumps(response)\n else:\n message = description\n return self.doc(responses={code: (message, model, kwargs)})\n\n def response_error(self, exception, model=None, **kwargs):\n '''A decorator to specify one of the expected error responses\n\n :param ApiError exception: An exception instance of errors.ApiError\n :param ModelBase model: an optional response model\n '''\n\n to_json = kwargs.get('to_json', True)\n message = exception.as_dict() if not to_json else json.dumps(exception.as_dict())\n return self.doc(responses={exception.code: (message, model, kwargs)})\n","sub_path":"src/apis/namespace.py","file_name":"namespace.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"110780279","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 6 10:30:06 2019\n\n@author: aneekbasu\n\"\"\"\nimport loadData\n#import wordVeactors\nimport vocabulary\nimport dataset\nimport numpy as np\nfrom gensim.models import KeyedVectors\nimport training\n\nif __name__ == \"__main__\":\n reviews = loadData.parseReviews(\"data/train\", False)\n reviews_test = loadData.parseReviews(\"data/test\", False)\n print(len(reviews))\n #print(reviews[10][3])\n review_text = [reviews[index][3] for index in range(len(reviews))]\n sentiment_value = [reviews[index][1] for index in range(len(reviews))]\n review_text_test = [reviews_test[index][3] for index in range(len(reviews_test))]\n sentiment_value_test = [reviews_test[index][1] for index in range(len(reviews_test))]\n #print(review_text[:10])\n #print(sentiment_value[:10])\n #word_vectors = KeyedVectors.load_word2vec_format('wiki-news-300d-1M.vec', binary=False)\n mean_len = np.array([len(title) for title in review_text]).mean()\n big_len = max([(len(title)) for title in review_text])\n max_len = int((mean_len+big_len)/2)\n print('Average length of a review is {}',mean_len)\n print('Maximum length of a review is {}',big_len)\n #voc = vocabulary.Vocabulary(['',''])\n #for token in review_text:\n # voc.add_tokens(token)\n #print(len(voc))\n #print(voc[0])\n word_vectors = KeyedVectors.load_word2vec_format('wiki-news-300d-1M.vec', binary=False)\n #word_vectors = FastText.load_fasttext_format('wiki.simple')\n dataset_raw_train = dataset.SentimentDataset(review_text[:1000],sentiment_value[:1000],word_vectors,max_len=20)\n dataset_raw_test = dataset.SentimentDataset(review_text_test[:1000],sentiment_value_test[:1000],word_vectors,max_len=20)\n print(len(dataset_raw_train))\n print(len(dataset_raw_train[90]))\n print(len(dataset_raw_test))\n print(len(dataset_raw_test[90]))\n #input_list = []\n #output_list = []\n #print(len(outputs))\n #print(dataset_raw[10][1])\n #for i in range(len(dataset_raw)):\n # input_list.append(dataset_raw[i][0].numpy())\n # output_list.append(dataset_raw[i][1].numpy())\n #print(input_list[10])\n #print(output_list[10])\n #print(len(input_list),len(output_list))\n training.train(dataset_raw_train,dataset_raw_test,sentiment_value_test, word_vectors)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"564616953","text":"from django.test import TestCase\n\n# Create your tests here.\nfrom rango.models import Category\nfrom time import sleep\n\nclass CategoryMethodTests(TestCase):\n\tdef test_ensure_views_are_positive(self):\n\t\t'''\n\t\t\t ensure_views_are_positive should results True for categories where views are zero or positive\n\t\t'''\n\t\tcat = Category(name='test', views=-1, likes=0)\n\t\tcat.save()\n\t\tself.assertEqual((cat.views >= 0), True)\n\n\tdef test_slug_line_creation(self):\n\n\t\tcat = Category(name=\"Rango Category String\")\n\t\tcat.save()\n\t\tself.assertEqual(cat.slug, 'rango-category-string')\n\n\nfrom django.core.urlresolvers import reverse\n\n\nclass IndexViewTests(TestCase):\n\n def test_index_view_with_no_categories(self):\n \"\"\"\n If no questions exist, an appropriate message should be displayed.\n \"\"\"\n Category.objects.get_or_create(name='test', views=1, likes=1)\n Category.objects.get_or_create(name='temp', views=1, likes=1)\n Category.objects.get_or_create(name='tmp', views=1, likes=1)\n Category.objects.get_or_create(name='tmp test temp', views=1, likes=1)\n\n response = self.client.get(reverse('index'))\n \n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"tmp test temp\")\n \n num_cat = len(response.context['categories'])\n self.assertEqual(num_cat, 4)","sub_path":"rango/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"632227662","text":"import tensorflow as tf\nimport os\nimport subprocess\ncustom_ops_dir = os.sep.join([os.getenv(\"HOME\"), \".tf_custom_ops\"])\nlibrary_name = 'high_dim_filter.so'\nlib = os.sep.join([custom_ops_dir, library_name])\nif not os.path.exists(lib):\n cpp_path = os.sep.join([os.path.dirname(os.path.realpath(__file__)),\"..\",\"cpp\"])\n env = {\n \"TF_INC\":tf.sysconfig.get_include()\n }\n print(cpp_path)\n proc = subprocess.Popen(['bash','compile.sh'], shell=False, cwd=cpp_path, env=env)\n proc.communicate()\ncustom_module = tf.load_op_library(lib)\nimport crfrnn.high_dim_filter_grad # Register gradients for the custom op","sub_path":"crfrnn/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"590320706","text":"#!/usr/bin/env\n\nimport csv\nimport pandas\nimport numpy\nfrom sklearn.naive_bayes import GaussianNB\n\ndef prepare_data_sets():\n train_in_set = pandas.read_csv('train/train_in.tsv', header=None, sep='\\t')\n train_out_set = pandas.read_csv('train/train_out.tsv', header=None, sep='\\t')\n test_set = pandas.read_csv('test-A/in.tsv', header=None, sep='\\t')\n dev_set = pandas.read_csv('dev-0/in.tsv', header=None, sep='\\t')\n\n for col in train_in_set:\n train_in_set[col] = train_in_set[col].map(ord)\n\n for col in train_out_set:\n train_out_set[col] = train_out_set[col].map(ord)\n\n for col in test_set:\n test_set[col] = test_set[col].map(ord)\n\n for col in dev_set:\n dev_set[col] = dev_set[col].map(ord)\n\n train_in_set.to_csv('train/train_in_06.tsv', header=None, sep='\\t', index=False)\n train_out_set.to_csv('train/train_out_06.tsv', header=None, sep='\\t', index=False)\n test_set.to_csv('test-A/in_06.tsv', header=None, sep='\\t', index=False)\n dev_set.to_csv('dev-0/in_06.tsv', header=None, sep='\\t', index=False)\n\ndef write_out_file(predictions, out_file):\n with open(out_file, 'w') as f:\n for prediction in predictions:\n f.write(str(chr(int(prediction))) + '\\n')\n\ndef main():\n # prepare data sets\n prepare_data_sets()\n\n # load data\n X_train = numpy.loadtxt('./train/train_in_06.tsv', delimiter='\\t')\n y_train = numpy.loadtxt('./train/train_out_06.tsv', delimiter='\\t')\n X_test = numpy.loadtxt('./test-A/in_06.tsv', delimiter='\\t')\n X_dev = numpy.loadtxt('./dev-0/in_06.tsv', delimiter='\\t')\n\n # print data\n print('\\nX_train:')\n print(X_train)\n print('\\ny_train:')\n print(y_train)\n print('\\nX_test:')\n print(X_test)\n print('\\nX_dev:')\n print(X_dev)\n\n # create classifier\n gnb = GaussianNB()\n\n # train\n gnb.fit(X_train, y_train)\n\n # predict\n predictions = gnb.predict(X_test)\n predictions_dev = gnb.predict(X_dev)\n\n # print predictions\n print('\\npredictions:')\n print(predictions)\n print('\\npredictions_dev:')\n print(predictions_dev)\n\n # write outfiles\n write_out_file(predictions, 'test-A/out.tsv')\n write_out_file(predictions_dev, 'dev-0/out.tsv')\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ml/weraMushrooms/bayes.py","file_name":"bayes.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"562889145","text":"#!/usr/bin/env python3\n\n\"\"\"Even commands.\"\"\"\n\nimport random\nimport re\nimport sys\n\nimport prompt\n\ncrypto = random.SystemRandom()\n\n\ndef welcome_user():\n \"\"\"Greeting.\n\n Returns:\n Returns name of the player\n \"\"\"\n name = prompt.string('May I have your name? ')\n sys.stdout.write('Hello, {0}!\\n'.format(name))\n return name\n\n\ndef define_rules():\n \"\"\"Rules of the game.\"\"\"\n sys.stdout.write('What number is missing in the progression?\\n')\n\n\ndef create_progression():\n \"\"\"Create progression of numbers.\n\n Returns:\n Returns progression as a list.\n \"\"\"\n progression = []\n begin = crypto.randrange(1, 100)\n step = crypto.randrange(1, 10)\n stop = step * 10 + begin\n for num in range(begin, stop, step):\n progression.append(str(num))\n return progression\n\n\ndef create_task():\n \"\"\"Create 2 numbers and find the greatest common divisor of them.\n\n Returns:\n Returns progression of numbers and missing number.\n \"\"\"\n progression = create_progression()\n index_of_missing = crypto.randrange(0, 10)\n missing = progression.pop(index_of_missing)\n progression.insert(index_of_missing, '..')\n return (progression, missing)\n\n\ndef question(*args):\n r\"\"\"Ask the question to the player.\n\n Args:\n args: numbers (int) and/or operator (str).\n\n Returns:\n Returns answer of the player.\n \"\"\"\n message_with_symb = 'Question: {0}\\n'.format(*args)\n message = re.sub(r\"[\\[\\,\\'\\]]\", '', message_with_symb)\n sys.stdout.write(message)\n return prompt.string('Your answer: ')\n\n\ndef game(name, amount_of_rounds=3):\n \"\"\"One round of a game.\n\n Args:\n name: name of player.\n amount_of_rounds: how many rounds the game will continue.\n\n Returns:\n Returns nothing or recursively itself.\n \"\"\"\n if amount_of_rounds <= 0:\n return sys.stdout.write('Congratulations, {0}!\\n'.format(name))\n (progression, missing_element) = create_task()\n answer = question(progression)\n if int(answer) == int(missing_element):\n sys.stdout.write('Correct!\\n')\n return game(name, amount_of_rounds - 1)\n else:\n message = \"'{0}' is wrong answer ;(. Correct answer was '{1}'\\n\"\n sys.stdout.write(message.format(answer, missing_element))\n sys.stdout.write(\"Let's try again, {0}!\\n\".format(name))\n","sub_path":"brain_games/progression.py","file_name":"progression.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"297835138","text":"'''\nInput: a List of integers\nReturns: a List of integers\n'''\ndef foo():\n pass\n\ndef product_of_all_other_numbers(arr):\n #* Special Case: just one element in arr\n if len(arr) == 0 or len(arr) == 1:\n return [1]\n\n #* Special Case: just two elements in arr\n if len(arr) == 2:\n return [arr[1], arr[0]]\n\n #* General Case: three or more elements in arr\n # working dict/map multiplying consecutive array elements - going forward in the array\n fwd_dict = {}\n # working dict/map multiplying consecutive array elements - going backward in the array\n bwd_dict = {}\n # Declare the return array object\n ret_arr = []\n\n # Iterate through the array \n for idx in range(len(arr)):\n # Treat the first element as special case\n if idx == 0:\n fwd_dict[0] = arr[0]\n bwd_dict[len(arr)-1] = arr[len(arr)-1]\n continue\n\n # Treat subsequent elements\n fwd_dict[idx] = arr[idx]*fwd_dict[idx-1]\n bwd_dict[len(arr)-1-idx] = arr[len(arr)-1-idx]*bwd_dict[len(arr)-idx]\n\n # Handle all indices EXCEPT the first array index and the last\n for idx in range(1, len(arr)-1):\n # For each indexed position in the array...\n # calculate the product of all of the values BEFORE the array position\n # times all of the values AFTER the array position \n ret_arr.append(fwd_dict[idx-1]*bwd_dict[idx+1])\n \n # Add the first array index and the last array index values\n ret_arr.insert(0, bwd_dict[1])\n ret_arr.append(fwd_dict[len(arr)-2])\n\n # Return the array\n return ret_arr\n\n#if __name__ == '__main__':\n# # Use the main function to test your implementation\n# # arr = [1, 2, 3, 4, 5]\n# arr = [2, 6, 9, 8, 2, 2, 9, 10, 7, 4, 7, 1, 9, 5, 9, 1, 8, 1, 8, 6, 2, 6, 4, 8, 9, 5, 4, 9, 10, 3, 9, 1, 9, 2, 6, 8, 5, 5, 4, 7, 7, 5, 8, 1, 6, 5, 1, 7, 7, 8]\n#\n# print(f\"Output of product_of_all_other_numbers: {product_of_all_other_numbers(arr)}\")\n","sub_path":"product_of_all_other_numbers/product_of_all_other_numbers.py","file_name":"product_of_all_other_numbers.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"125664385","text":"from random import randint\n\nplayer = input('Choose either rock(r) or paper(p) or scissors(s):')\n\nprint(player, 'vs',end=' ') # end with a space instead of a new line\nchosen = randint(1,3)\nif chosen == 1:\n\tcomputer = 'r'\nelif chosen == 2:\n\tcomputer = 'p'\nelif chosen == 3:\n\tcomputer = 's'\n\nprint(computer)\n\nif player == computer :\n\tprint('Draw')\nelif player == 'r' and computer == 's':\n\tprint('player wins!')\nelif player == 'r' and computer == 'p':\n\tprint('Computer wins!')\nelif player == 'p' and computer == 's':\n\tprint('Computer wins!')\nelif player == 'p' and computer == 'r':\n\tprint('player wins!')\nelif player == 's' and computer == 'r':\n\tprint('Computer wins!')\nelif player == 's' and computer == 'p':\n\tprint('player wins!')","sub_path":"rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"258074725","text":"from urlparse import urlparse, urlunparse\nimport re\n\nfrom bs4 import BeautifulSoup\nimport requests\n\nfrom .base import BaseCrawler\nfrom ...models import Entity, Author, AuthorType\n\nclass CitizenCrawler(BaseCrawler):\n TL_RE = re.compile('(www\\.)?citizen.co.za')\n\n def offer(self, url):\n \"\"\" Can this crawler process this URL? \"\"\"\n parts = urlparse(url)\n return bool(self.TL_RE.match(parts.netloc))\n\n def canonicalise_url(self, url):\n \"\"\" Strip anchors, etc. \"\"\"\n parts = urlparse(url)\n\n # force http, strip www, enforce trailing slash\n path = parts.path\n if not path.endswith('/'):\n path = path + '/'\n\n return urlunparse(['http', 'citizen.co.za', path, parts.params, parts.query, None])\n\n def extract(self, doc, raw_html):\n \"\"\" Extract text and other things from the raw_html for this document. \"\"\"\n super(CitizenCrawler, self).extract(doc, raw_html)\n\n soup = BeautifulSoup(raw_html)\n\n doc.title = self.extract_plaintext(soup.select(\"h1.article-headline\"))\n doc.summary = self.extract_plaintext(soup.select(\".article-excerpt\"))\n doc.text = \"\\n\\n\".join(p.text for p in soup.select(\".article-content > p\"))\n doc.published_at = self.parse_timestamp(self.extract_plaintext(soup.select(\".page-lead-datetime\")))\n\n author = self.extract_plaintext(soup.select(\".article-byline\"))\n\n if author:\n doc.author = Author.get_or_create(author, AuthorType.journalist())\n else:\n doc.author = Author.unknown()\n","sub_path":"dexter/processing/crawlers/citizen.py","file_name":"citizen.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"519896057","text":"import os\n\ndict_num = {\n 'One': 'Один',\n 'Two': 'Два',\n 'Three': 'Три',\n 'Four': 'Четыре'\n}\n\nwith open('txt files/my_hw5_4.txt', 'r', encoding='UTF-8') as file:\n nums = file.read().splitlines()\n i = 0\n list_num_rus = []\n for el in nums:\n el = el.split(' — ')\n for key, value in dict_num.items():\n if el[0] == key:\n el[0] = value\n list_num_rus.append(el[0] + ' — ' + el[1])\n\n\nwith open('txt files/my_hw5_4_1.txt', 'w', encoding='utf-8') as f:\n for el in list_num_rus:\n f.write(el + '\\n')\n","sub_path":"hw5/hw5_4.py","file_name":"hw5_4.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"213309607","text":"import numpy as np\nimport cv2\n\nlow=(50,20,5)\nupp=(150,150,150)\n\ndef intersection(line1, line2):\n \"\"\"See https://stackoverflow.com/a/383527/5087436\"\"\"\n\n rho1, theta1 = line1[0]\n rho2, theta2 = line2[0]\n A = np.array([\n [np.cos(theta1), np.sin(theta1)],\n [np.cos(theta2), np.sin(theta2)]\n ])\n b = np.array([[rho1], [rho2]])\n x0, y0 = np.linalg.solve(A, b)\n x0, y0 = int(np.round(x0)), int(np.round(y0))\n return [[x0, y0]]\n\n\n# capture = cv2.VideoCapture(1)\n\n# while cv2.waitKey(1) & 0xff != ord('q'):\n# ret, image = capture.read()\n\ndef detect_cross(image):\n # image = cv2.imread('./cross.jpg')\n blur=cv2.GaussianBlur(image,(11,11),0)#blur the grayscale image\n hsv=cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)#convert each frame to grayscale.\n mask=cv2.inRange(hsv,low,upp)\n\n #ret,th1 = cv2.threshold(mask,100,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)#using threshold remove noise\n #ret1,th2 = cv2.threshold(th1,100,255,cv2.THRESH_BINARY_INV)# invert the pixels of the image frame\n\n thresh = cv2.erode(mask, None, iterations=2) \n thresh = cv2.dilate(thresh, None, iterations=2)\n\n# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n edges = cv2.Canny(thresh, 50, 150, apertureSize=3)\n lines = cv2.HoughLines(edges, 1, np.pi/180, 50)\n\n # vertical_line = []\n # vertical_num = 0\n\n # horizontal_line = []\n # horizontal_num = 0\n\n # for line in lines:\n # rho, theta = line[0]\n\n # if (theta>175*np.pi/180.0 and theta<180*np.pi/180.0) or (theta<5*np.pi/180.0 and theta>0):\n # horizontal_num+=1\n # horizontal_line.append(line)\n # elif theta>85*np.pi/180.0 and theta<95*np.pi/180.0:\n # vertical_num += 1\n # vertical_line.append(line)\n\n # minhoriz = 10000000\n # for line in horizontal_line:\n # rho, theta = line[0]\n\n # rho = abs(rho)\n # if rho < minhoriz:\n # minhoriz = rho\n # minline = line\n \n # rho, theta = minline[0]\n # rho = abs(rho)\n # a = np.cos(theta)\n # b = np.sin(theta)\n # x0 = a*rho\n # y0 = b*rho\n # x1 = int(x0+1000*(-b))\n # y1 = int(y0+1000*a)\n # x2 = int(x0-1000*(-b))\n # y2 = int(y0-1000*a)\n # cv2.line(image, (x1,y1), (x2,y2),(0,0,255),2)\n\n slopearr=[]\n flag = 0\n if lines is None:\n return 0\n else:\n for line in lines:\n rho, theta = line[0]\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0+1000*(-b))\n y1 = int(y0+1000*a)\n x2 = int(x0-1000*(-b))\n y2 = int(y0-1000*a)\n\n\n if flag == 1:\n for angle in slopearr:\n if(abs(theta-angle) > 85*np.pi/180.0 and abs(theta-angle)<95*np.pi/180.0):\n line2 = line\n cv2.line(image, (x1,y1), (x2,y2),(0,0,255),2)\n flag = 2\n break\n elif flag == 0:\n flag = 1\n line1 = line\n cv2.line(image, (x1,y1), (x2,y2),(0,0,255),2)\n slopearr.append(theta)\n elif flag == 2:\n break\n if flag == 2:\n [[x, y]] = intersection(line1, line2)\n cv2.circle(image, (x, y), 3, 255, -1)\n return 1\n elif flag == 1:\n return 0\n\n cv2.imshow('img', image)","sub_path":"catkin_ws/src/process_image/src/detect_cross_simple.py","file_name":"detect_cross_simple.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"601684160","text":"\"\"\"\nRead file into texts and calls.\nIt's ok if you don't understand how to read files.\n\"\"\"\nimport csv\n\nwith open('texts.csv', 'r') as f:\n reader = csv.reader(f)\n texts = list(reader)\n\nwith open('calls.csv', 'r') as f:\n reader = csv.reader(f)\n calls = list(reader)\n\n\"\"\"\nTASK 4:\nThe telephone company want to identify numbers that might be doing\ntelephone marketing. Create a set of possible telemarketers:\nthese are numbers that make outgoing calls but never send texts,\nreceive texts or receive incoming calls.\n\nPrint a message:\n\"These numbers could be telemarketers: \"\n\nThe list of numbers should be print out one per line in lexicographic order with no duplicates.\n\"\"\"\n\ndef possible_telemarketers(call_records, text_records):\n unique_callers = set()\n\n for call in call_records:\n caller = call[0]\n unique_callers.add(caller)\n\n for call in call_records:\n receiver = call[1]\n\n if receiver in unique_callers:\n unique_callers.remove(receiver)\n\n for text in text_records:\n sender = text[0]\n receiver = text[1]\n\n if sender in unique_callers:\n unique_callers.remove(sender)\n\n if receiver in unique_callers:\n unique_callers.remove(receiver)\n\n telemarketers = sorted(caller for caller in unique_callers)\n\n print(\"These numbers could be telemarketers: \")\n for telemarketer in telemarketers:\n print(\"{0}\".format(telemarketer))\n\n\npossible_telemarketers(calls, texts)\n","sub_path":"Scramble/Task4.py","file_name":"Task4.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"510464754","text":"##\nimport os, sys, string\nimport SConsAddons.Util as sca_util\npj = os.path.join\n\nImport( 'RootDir ves_pkg' )\n\nfileBundle = ves_pkg.createFileBundle( pj( 'share', 'vesuite', 'examples', 'Fermentor' ) )\nfermentorDemoDir = pj( RootDir, 'share', 'examples', 'Fermentor' )\niveFile = sca_util.getFilesRecursiveByExt( fermentorDemoDir, [ '.ive' ] )\n\nfileBundle.addFiles( iveFile )\nfileBundle.addFiles( [ 'Icons/fermentor.jpg' ] )\nfileBundle.addFiles( [ 'fermentor.ves' ] )\nfermentorSubdirs = Split(\"\"\"\n FermentorGP\n FermentorUI\n\"\"\")\n\n##Run SConscript files in all of these folders.\nfor d in fermentorSubdirs:\n SConscript( dirs = d )\n","sub_path":"share/examples/Fermentor/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"440638807","text":"# 17CH10013\n# Chinmay Singh\n# Assignment Number 1\n# python3 \n\nimport pandas as pd\nimport numpy as np\nimport math\n\n# structure to save Decision Tree\nclass Node :\n def __init__(self ,key): \n self.key = key \n self.child = []\n\n# utility function to caluclate the entropy of dataframe\n# for calcuation of entropy the formula of entropy is used in which once we take the probablity of occurence of yes and in other probablity of occurence of no\n# only required attribute is the last column of the dataframe\ndef entropy(df):\n yes_count = 0\n no_count = 0\n\n for i in range(len(df.iloc[:, -1])):\n if df.iloc[:, -1][i] == \"yes\":\n yes_count += 1\n \n if df.iloc[:, -1][i] == \"no\":\n no_count += 1\n continue\n\n if yes_count == 0 or no_count == 0:\n return 0\n \n entropy = -1 * (((yes_count/(yes_count + no_count))*math.log((yes_count/(yes_count + no_count)), 2)) + ((no_count/(yes_count + no_count))*math.log((no_count/(yes_count + no_count)), 2)))\n\n return entropy\n\n# utility function to calculate the information gain from DataFrameDict and parent information\n# information gain is the difference of parent_entropy and weighted sum of children entropy\n# required attributes are dictionary of splited dataframe and the parent entropy\ndef information_gain(DataFrameDict, parent_entropy, l):\n child_entropy = 0\n\n for key in DataFrameDict.keys():\n child_entropy += (len(DataFrameDict[key])/l)* entropy(DataFrameDict[key].reset_index(drop = True))\n\n return parent_entropy - child_entropy\n\n# utility function to split the dataframe on the basis of attribute\ndef split(df, attribute):\n temp = list(set(df[attribute]))\n\n DataFrameDict = {elem : pd.DataFrame for elem in temp}\n\n for key in DataFrameDict.keys():\n DataFrameDict[key] = df[:][df[attribute] == key]\n\n return DataFrameDict\n\n# utility function to traverse the tree and print the tree hierarchialy\ndef traverse_tree(root, indent): \n \n # Stack to store the nodes \n nodes=[] \n \n # push the current node onto the stack \n nodes.append(root) \n \n # loop while the stack is not empty \n while (len(nodes)): \n \n # store the current node and pop it from the stack \n curr = nodes[0] \n nodes.pop(0)\n # current node has been travarsed\n\n for i in range(len(indent)):\n if curr.key in indent[i]:\n print((i + 1)*\"\\t\", curr.key)\n # print(curr.key)\n \n # store all the childrent of current node from \n # right to left. \n for it in range(len(curr.child)-1,-1,-1): \n nodes.insert(0,curr.child[it])\n\n# utility function to create a decision tree\n# order stores the order in which the tree is being splitted in the recursion\n# attributes has all the attributes in the total dataset\n# current dataset is the parent node for the recursion\ndef tree(orignal_data, current_data, attributes, root, order):\n \n if len(set(current_data.iloc[:, -1])) == 1:\n \n root.child.append(Node(str(list(set(current_data.iloc[:, -1]))[0])))\n return root\n \n if len(attributes) == 0:\n \n temp = np.unique(current_data.iloc[:, -1], return_counts=True)\n index = np.argmax(temp[1])\n root.child.append(Node(temp[0][index]))\n return root\n \n parent_entropy = entropy(current_data.reset_index())\n \n max_gain = -10000\n for i in attributes: \n DataFrameDict = split(current_data, i)\n\n gain = information_gain(DataFrameDict, parent_entropy, len(current_data))\n\n if gain >= max_gain:\n max_gain = gain\n element = i\n \n order.append(element)\n attributes.remove(element)\n \n DataFrameDict = split(current_data, element)\n\n for key in DataFrameDict.keys():\n root.child.append(Node(str(key)))\n\n for i in range(len(root.child)):\n root.child[i] = tree(orignal_data, DataFrameDict[root.child[i].key], attributes[:], root.child[i], order)\n \n return root\n\n# main function to read the data and call other utility functions\ndef main():\n data = pd.read_csv(\"data1_19.csv\")\n\n root = Node(\"data\")\n\n attributes = list(data.columns.values)\n target = attributes[-1]\n attributes = attributes[:-1]\n\n order = []\n root = tree(data, data, attributes, root, order)\n order = order[:3]\n indent = []\n for i in order:\n indent.append(list(set(data[i])))\n indent.append(list(set(data[target])))\n traverse_tree(root, indent)\n\nif __name__==\"__main__\":\n main()","sub_path":"DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":4616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"322872380","text":"import sys\nimport json\n\nfrom typing import List, Dict, Any\n\n\ndef mutant_to_dict(mutant: List[str], mutant_types: Dict[str, bool]) -> Dict[str, Any]:\n return {\n 'systematic_name': mutant[0].strip(),\n 'strain_descriptor': mutant[1].strip(),\n 'associated_genes': mutant[2].split(' | '),\n 'phenotypes': mutant[3].split(' | '),\n 'mutant_types': mutant_types,\n }\n\n\ndef phenotypes_for_dictyostelium_mutants(\n all_mutants: str,\n null_mutants: str,\n overexpression_mutants: str,\n multiple_mutants: str,\n developmental_mutants: str,\n other_mutants: str,\n):\n\n file_name = 'mutant_phenotypes.json'\n\n with open(null_mutants, 'r') as fp:\n null_mutants = set([line.strip().split('\\t')[0] for line in fp])\n\n with open(overexpression_mutants, 'r') as fp:\n overexpression_mutants = set([line.strip().split('\\t')[0] for line in fp])\n\n with open(multiple_mutants, 'r') as fp:\n multiple_mutants = set([line.strip().split('\\t')[0] for line in fp])\n\n with open(developmental_mutants, 'r') as fp:\n developmental_mutants = set([line.strip().split('\\t')[0] for line in fp])\n\n with open(other_mutants, 'r') as fp:\n other_mutants = set([line.strip().split('\\t')[0] for line in fp])\n\n with open(all_mutants, 'r') as fp:\n # skip header\n fp.readline()\n\n all_mutants = []\n for line in fp.readlines():\n mutant = line.strip().split('\\t')\n\n if len(mutant) == 4:\n mutant_id = mutant[0]\n\n types = {\n 'null': mutant_id in null_mutants,\n 'overexpression': mutant_id in overexpression_mutants,\n 'multiple ': mutant_id in multiple_mutants,\n 'other': mutant_id in other_mutants,\n 'developmental': mutant_id in developmental_mutants,\n }\n\n all_mutants.append(mutant_to_dict(mutant, types))\n\n with open(f'data/dictybase/{file_name}', 'w', encoding='utf-8') as fp:\n json.dump(all_mutants, fp, ensure_ascii=False, indent=4)\n\n\nif __name__ == \"__main__\":\n phenotypes_for_dictyostelium_mutants(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6])\n","sub_path":"update_scripts/dictybase.py","file_name":"dictybase.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"246181369","text":"import os\nimport uuid\nfrom setuptools import setup, find_packages\nfrom pip.req import parse_requirements\n\n# parse_requirements() returns generator of pip.req.InstallRequirement objects\nif os.path.exists(\"requirements.txt\"):\n install_reqs = parse_requirements(\"requirements.txt\", session=uuid.uuid1())\nelse:\n install_reqs = parse_requirements(\n \"Flask_Captcha.egg-info/requires.txt\", session=uuid.uuid1())\n\n# reqs is a list of requirement\n# e.g. ['django==1.5.1', 'mezzanine==1.4.6']\nreqs = [str(ir.req) for ir in install_reqs]\n\nsetup(\n name='Flask-Captcha',\n version=\"0.1.8\",\n description='A very simple, yet powerful, Flask captcha extension',\n author='Eduardo Robles Elvira',\n author_email='edulix@wadobo.com',\n url='https://github.com/agoraciudadana/flask-captcha',\n license='MIT',\n packages=find_packages(),\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Topic :: Security',\n ],\n include_package_data=True,\n zip_safe=False,\n install_requires=reqs\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"282987432","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 10 17:22:43 2017\n\n@author: kamran\n\"\"\"\n\nx = 25\nepsilon = 0.01\nstep = epsilon*2 #?\nprint('Step is:', step)\nnumGuesses = 0\nans = 0.0\nwhile (abs(ans**2 - x) >= epsilon and ans <= x):\n ans += step\n numGuesses += 1\nif abs(ans**2 - x) >= epsilon:\n print('Failed on square root of', x)\nelse:\n print(ans, 'is closed to square root of', x)\n \nprint(numGuesses)","sub_path":"mit-python/week2/square_root_approximation.py","file_name":"square_root_approximation.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"422087886","text":"from rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom externalMethodes import loginIDGenerator\nfrom login_app.models import Login\nfrom login_app.serializers import LoginSerializer,SignUpSerializer,acDeatailsSerializer,walletDeatailsSerializer\n\n@api_view(['GET'])\ndef login_list(request,user_Name,password):\n \"\"\"\n To check weather the input usrname and password is currect or not\n \"\"\"\n newstr = user_Name.replace(\"/\", \"\")\n if request.method == 'GET':\n if Login.objects.filter(user_Name=newstr,password=password).exists():\n\n try:\n login = Login.objects.filter(user_Name=newstr,password=password)\n #get(user_Name=newstr)\n\n serializer = LoginSerializer(login,many=True)\n return Response(serializer.data)\n\n #Task.objects.get(title=title)\n except Login.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n else:\n return Response(data={\"Invalid User\"})\n\n else:\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n@api_view(['POST'])\ndef checkValidation(request):\n if request.method=='POST':\n usr=request.data.get('user_Name')\n mob=request.data.get('mob_num')\n if Login.objects.filter(user_Name=usr).exists():\n return Response(data={\"details\":\"Already Existing UserName\"},status=status.HTTP_306_RESERVED)\n elif Login.objects.filter(mob_num=mob).exists():\n return Response(data={\"details\":\"Already Existing Mobile Number\"},status=status.HTTP_306_RESERVED)\n else:\n return Response(data={\"details\":\"Your Account Is Available\"})\n else:\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n@api_view(['POST','GET','PUT'])\ndef createAccount(request):\n \"\"\"\n To crate the Account Details of Qpay\n \"\"\"\n if request.method == 'POST':\n '''\n usr=request.data.get('user_Name')\n mob=request.data.get('mob_num')\n if Login.objects.filter(user_Name=usr).exists():\n return Response(data={\"deatailS\":\"Already Existing UserName\"},status=status.HTTP_306_RESERVED)\n elif Login.objects.filter(mob_num=mob).exists():\n return Response(data={\"deatails\":\"Already Existing Mobile Number\"},status=status.HTTP_306_RESERVED)\n '''\n serializer = SignUpSerializer(data=request.data)\n if serializer.is_valid():\n newLoginId=loginIDGenerator(serializer)\n serializer.save(login_id=newLoginId)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n elif request.method == 'PUT':\n usr=request.data.get('user_Name')\n mob=request.data.get('mob_num')\n if Login.objects.filter(user_Name=usr).exists():\n return Response(data={\"details\":\"Already Existing UserName\"},status=status.HTTP_306_RESERVED)\n elif Login.objects.filter(mob_num=mob).exists():\n return Response(data={\"details\":\"Already Existing Mobile Number\"},status=status.HTTP_306_RESERVED)\n else:\n return Response(data={\"details\":\"Your Account Is Available\"})\n\n\n elif request.method=='GET':\n acDeatails=Login.objects.all()\n serializer = acDeatailsSerializer(acDeatails,many=True)\n return Response(serializer.data)\n else:\n return Response(data={\"Not Allowed\"})\n\n@api_view(['GET'])\ndef accountInfo(request,pk):\n if request.method=='GET':\n acInfo=Login.objects.get(login_id=pk)\n serializer = acDeatailsSerializer(acInfo)\n return Response(serializer.data)\n else:\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n@api_view(['GET'])\ndef walletInfo(request,pk):\n if request.method=='GET':\n wallInfo=Login.objects.get(login_id=pk)\n serializer = walletDeatailsSerializer(wallInfo)\n return Response(serializer.data)\n else:\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n@api_view(['POST'])\ndef changePWD(request,pk):\n if request.method=='POST':\n change_PWD=Login.objects.get(login_id=pk)\n change_PWD.password=request.data.get('newPWD')\n change_PWD.save()\n return Response(data={\"details\":\"Success !\"},status=status.HTTP_200_OK)\n else:\n return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n\n","sub_path":"wsgi/myproject/login_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"638582693","text":"import sys\nfrom distutils.core import setup\npython_version = sys.version_info[:2]\n\nif python_version < (2, 7):\n raise Exception(\"This version of xlrd requires Python 2.7 or above. \")\n\n\nsetup(name='amiconn',\n author='John Machin',\n version='0.50',\n py_modules=['amiconn'],\n )\n","sub_path":"Amiconn/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"646469773","text":"import os\nimport questionary\nimport logging\n\nimport nf_core.utils\n\nfrom .modules_command import ModuleCommand\nfrom .module_utils import get_installed_modules, get_module_git_log, module_exist_in_repo\nfrom .modules_repo import ModulesRepo\n\nlog = logging.getLogger(__name__)\n\n\nclass ModuleInstall(ModuleCommand):\n def __init__(self, pipeline_dir, force=False, latest=False, sha=None, update_all=False):\n super().__init__(pipeline_dir)\n self.force = force\n self.latest = latest\n self.sha = sha\n self.update_all = update_all\n\n def install(self, module):\n if self.repo_type == \"modules\":\n log.error(\"You cannot install a module in a clone of nf-core/modules\")\n return False\n # Check whether pipelines is valid\n self.has_valid_directory()\n if not self.update_all:\n # Get the available modules\n try:\n self.modules_repo.get_modules_file_tree()\n except LookupError as e:\n log.error(e)\n return False\n\n if self.latest and self.sha is not None:\n log.error(\"Cannot use '--sha' and '--latest' at the same time!\")\n return False\n\n if module is None:\n module = questionary.autocomplete(\n \"Tool name:\",\n choices=self.modules_repo.modules_avail_module_names,\n style=nf_core.utils.nfcore_question_style,\n ).unsafe_ask()\n\n # Check that the supplied name is an available module\n if module and module not in self.modules_repo.modules_avail_module_names:\n log.error(\"Module '{}' not found in list of available modules.\".format(module))\n log.info(\"Use the command 'nf-core modules list' to view available software\")\n return False\n repos_and_modules = [(self.modules_repo, module)]\n else:\n if module:\n raise UserWarning(\"You cannot specify a module and use the '--all' flag at the same time\")\n self.force = True\n\n self.get_pipeline_modules()\n repos_and_modules = [\n (ModulesRepo(repo=repo_name), modules) for repo_name, modules in self.module_names.items()\n ]\n # Load the modules file trees\n for repo, _ in repos_and_modules:\n repo.get_modules_file_tree()\n repos_and_modules = [(repo, module) for repo, modules in repos_and_modules for module in modules]\n\n # Load 'modules.json'\n modules_json = self.load_modules_json()\n if not modules_json:\n return False\n\n exit_value = True\n for modules_repo, module in repos_and_modules:\n if not module_exist_in_repo(module, modules_repo):\n warn_msg = f\"Module '{module}' not found in remote '{modules_repo.name}' ({modules_repo.branch})\"\n if self.update_all:\n warn_msg += \". Skipping...\"\n log.warning(warn_msg)\n exit_value = False\n continue\n\n if modules_repo.name in modules_json[\"repos\"]:\n current_entry = modules_json[\"repos\"][modules_repo.name].get(module)\n else:\n current_entry = None\n\n # Set the install folder based on the repository name\n install_folder = [modules_repo.owner, modules_repo.repo]\n\n # Compute the module directory\n module_dir = os.path.join(self.dir, \"modules\", *install_folder, module)\n\n if current_entry is not None and self.sha is None:\n # Fetch the latest commit for the module\n current_version = current_entry[\"git_sha\"]\n try:\n git_log = get_module_git_log(module, modules_repo=modules_repo, per_page=1, page_nbr=1)\n except LookupError as e:\n log.error(e)\n exit_value = False\n continue\n except UserWarning:\n log.error(f\"Was unable to fetch version of '{modules_repo.name}/{module}'\")\n exit_value = False\n continue\n latest_version = git_log[0][\"git_sha\"]\n if current_version == latest_version and (not self.force or self.latest or self.update_all):\n log.info(f\"'{modules_repo.name}/{module}' is already up to date\")\n continue\n elif not self.force:\n log.error(\"Found newer version of module.\")\n self.latest = self.force = questionary.confirm(\n \"Do you want to install it? (--force --latest)\", default=False\n ).unsafe_ask()\n if not self.latest:\n exit_value = False\n continue\n else:\n latest_version = None\n\n # Check that we don't already have a folder for this module\n if not self.check_module_files_installed(module, module_dir):\n exit_value = False\n continue\n\n if self.sha:\n if current_entry is not None:\n if self.force:\n if current_entry[\"git_sha\"] == self.sha:\n log.info(f\"Module {modules_repo.name}/{module} already installed at {self.sha}\")\n continue\n else:\n exit_value = False\n continue\n\n if self.force:\n log.info(f\"Removing old version of module '{module}'\")\n self.clear_module_dir(module, module_dir)\n\n if self.download_module_file(module, self.sha, modules_repo, install_folder, module_dir):\n self.update_modules_json(modules_json, modules_repo.name, module, self.sha)\n else:\n exit_value = False\n continue\n else:\n if self.latest or self.update_all:\n # Fetch the latest commit for the module\n if latest_version is None:\n try:\n git_log = get_module_git_log(module, modules_repo=modules_repo, per_page=1, page_nbr=1)\n except UserWarning:\n log.error(f\"Was unable to fetch version of module '{module}'\")\n exit_value = False\n continue\n latest_version = git_log[0][\"git_sha\"]\n version = latest_version\n else:\n try:\n version = self.prompt_module_version_sha(\n module,\n installed_sha=current_entry[\"git_sha\"] if not current_entry is None else None,\n modules_repo=modules_repo,\n )\n except SystemError as e:\n log.error(e)\n exit_value = False\n continue\n log.info(f\"Installing {module}\")\n log.debug(\n f\"Installing module '{module}' at modules hash {modules_repo.modules_current_hash} from {self.modules_repo.name}\"\n )\n\n if self.force:\n log.info(f\"Removing old version of module '{module}'\")\n self.clear_module_dir(module, module_dir)\n\n # Download module files\n if not self.download_module_file(module, version, modules_repo, install_folder, module_dir):\n exit_value = False\n continue\n\n # Update module.json with newly installed module\n self.update_modules_json(modules_json, modules_repo.name, module, version)\n return exit_value\n\n def check_module_files_installed(self, module_name, module_dir):\n \"\"\"Checks if a module is already installed\"\"\"\n if os.path.exists(module_dir):\n if not self.force:\n log.error(f\"Module directory '{module_dir}' already exists.\")\n self.force = questionary.confirm(\n \"Do you want to overwrite local files? (--force)\", default=False\n ).unsafe_ask()\n return self.force\n else:\n return True\n\n def prompt_module_version_sha(self, module, installed_sha=None, modules_repo=None):\n if modules_repo is None:\n modules_repo = self.modules_repo\n older_commits_choice = questionary.Choice(\n title=[(\"fg:ansiyellow\", \"older commits\"), (\"class:choice-default\", \"\")], value=\"\"\n )\n git_sha = \"\"\n page_nbr = 1\n try:\n next_page_commits = get_module_git_log(module, modules_repo=modules_repo, per_page=10, page_nbr=page_nbr)\n except UserWarning:\n next_page_commits = None\n except LookupError as e:\n log.warning(e)\n next_page_commits = None\n\n while git_sha is \"\":\n commits = next_page_commits\n try:\n next_page_commits = get_module_git_log(\n module, modules_repo=modules_repo, per_page=10, page_nbr=page_nbr + 1\n )\n except UserWarning:\n next_page_commits = None\n except LookupError as e:\n log.warning(e)\n next_page_commits = None\n\n choices = []\n for title, sha in map(lambda commit: (commit[\"trunc_message\"], commit[\"git_sha\"]), commits):\n\n display_color = \"fg:ansiblue\" if sha != installed_sha else \"fg:ansired\"\n message = f\"{title} {sha}\"\n if installed_sha == sha:\n message += \" (installed version)\"\n commit_display = [(display_color, message), (\"class:choice-default\", \"\")]\n choices.append(questionary.Choice(title=commit_display, value=sha))\n if next_page_commits is not None:\n choices += [older_commits_choice]\n git_sha = questionary.select(\n f\"Select '{module}' version:\", choices=choices, style=nf_core.utils.nfcore_question_style\n ).unsafe_ask()\n page_nbr += 1\n return git_sha\n","sub_path":"nf_core/modules/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":10410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"649586199","text":"# Copyright (c) 2014 Mirantis Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the License);\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an AS IS BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and#\n# limitations under the License.\nfrom SnapshotState import SnapshotState\nfrom Snapshot import Snapshot\n__author__ = 'mirrorcoder'\n\n\nclass SnapshotInstances(SnapshotState):\n def create_snapshot(self):\n snapshot = Snapshot()\n [snapshot.addInstance(id=instance.id,\n status=instance.status,\n name=instance.name)\n for instance in self.nova_client.servers.list()]\n return snapshot\n","sub_path":"migrationlib/os/utils/snapshot/SnapshotInstances.py","file_name":"SnapshotInstances.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"586378094","text":"from time import sleep\n\nfrom tests.virusmusic.player.default import Test\nfrom pages.virusmusic.main_page import MainPage\n\n\nclass TestWrapPlayer(Test):\n def test(self):\n page = MainPage(self.driver)\n page.open()\n track_id = page.get_first_track_id()\n page.play(track_id)\n # time for player to move\n sleep(1)\n player_pos_before = page.get_player_pos()\n page.wrap_player()\n player_pos_after = page.get_player_pos()\n\n self.assertNotEqual(player_pos_after, player_pos_before)\n","sub_path":"tests/virusmusic/player/player_wrap_test.py","file_name":"player_wrap_test.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"207017520","text":"from flask import Flask\r\nfrom flask import render_template, request\r\n# import the function\r\nfrom dict import translate2\r\n# import the dictionary\r\nfrom dict import hira\r\napp = Flask(__name__)\r\nnotes = []\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template(\"index.html\")\r\n\r\n@app.route('/table')\r\ndef table():\r\n return render_template(\"table.html\")\r\n\r\n@app.route('/lookup', methods=[\"POST\", \"GET\"])\r\ndef lookup():\r\n input = request.form.get('input')\r\n try:\r\n output = translate2(input, hira)\r\n notes.append(output)\r\n except:\r\n output = 'please enter'\r\n return render_template(\"look_up.html\", output=output, notes=notes)\r\n\r\n@app.route('/signup', methods=[\"GET\",'POST'])\r\ndef signup():\r\n new_id = request.form.get('new_id')\r\n new_pw = request.form.get('new_pw')\r\n confirm_pw = request.form.get('confirm_pw')\r\n\r\n message = ''\r\n\r\n accounts = open('accounts.txt', 'r')\r\n\r\n\r\n info_submitted = False\r\n if new_id and new_pw and confirm_pw:\r\n info_submitted = True\r\n\r\n acc_created = False\r\n\r\n acc = {}\r\n if info_submitted:\r\n if confirm_pw == new_pw:\r\n acc.update({str(new_id) : str(new_pw)})\r\n message = 'account created!'\r\n accounts = open('accounts.txt', 'w')\r\n accounts.write(str(acc))\r\n accounts.close()\r\n elif confirm_pw != new_pw:\r\n message = 'passwords do not match, please try again.'\r\n return render_template('signup.html', info_submitted=info_submitted, message=message)\r\n\r\n","sub_path":"Japanese learning tool/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"144998497","text":"from os import listdir\nfrom os.path import isfile, join\n\nfolder = \"./mflops_msize_danny\"\nout_folder = \"./parallel/size/\"\nout_map = [100, 500, 1000, 15000, 20000]\nfor f in listdir(folder):\n if isfile(join(folder, f)):\n thread_count = f[:1]\n alg = f.split(\".\")[0][-1:]\n if alg != \"6\":\n continue\n with open(join(folder, f)) as file:\n lines = file.readlines()\n for index, line in enumerate(lines):\n time = line.rstrip(\"\\n\")\n with open(join(out_folder, f\"{thread_count}.txt\"), \"a+\") as write_file:\n write_file.write(f\"{time} {out_map[index]}\\n\")\n","sub_path":"w2/assignment_final/visualization/vis_prep.py","file_name":"vis_prep.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"388292157","text":"\nfrom re import M\nfrom tensorflow.keras.models import Sequential, Model\nfrom tensorflow.keras.layers import Dense, SimpleRNN, Dropout, LSTM, GRU, Input\nimport numpy as np \n\nfrom sklearn.datasets import load_boston\ndatasets = load_boston()\n\n#1.data\nx = datasets.data\ny = datasets.target\n'''\n\n'''\nprint(np.max(x))\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x,y,\ntrain_size = 0.95, random_state=66)\n\nx_train = x_train.reshape(480,13,1) #\nx_test = x_test.reshape(26, 13, 1)\n\n'''\n\n'''\n\n\n\n\nmodel = Sequential()\nmodel.add(LSTM(16,activation = 'relu',input_shape=(13,1)))\nmodel.add(Dense(32,activation='relu'))\nmodel.add(Dense(16,activation='relu'))\nmodel.add(Dense(8,activation='relu'))\nmodel.add(Dense(4,activation='relu'))\nmodel.add(Dense(2,activation='relu'))\nmodel.add(Dense(1 ,activation='relu'))\n\nfrom tensorflow.keras.callbacks import EarlyStopping\nes = EarlyStopping(monitor='val_loss', patience=30, mode='min', verbose=3)\nimport time\nstarttime = time.time()\nmodel.compile(loss = 'mse', optimizer = 'adam')\nhist = model.fit(x_train, y_train, epochs=1000, batch_size=64, validation_split=0.003, verbose=2,callbacks=[es]) \nloss = model.evaluate(x_test, y_test,batch_size=64) \nend = time.time()- starttime\n\nprint(\"걸린시간\", end)\nprint('loss : ', loss)\ny_pred = model.predict(x_test) \n\n# y_pred = scaler.transform(y_pred)\nfrom sklearn.metrics import r2_score\nr2 = r2_score(y_test, y_pred)\nprint(\"r2score \", r2)\n\n'''\n걸린시간 21.428115844726562\nloss : 20.355998992919922\nr2score 0.8421284246602054\n'''","sub_path":"keras/keras42_boston_lstm.py","file_name":"keras42_boston_lstm.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"225184664","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport web, json\nfrom StringIO import StringIO\nfrom youtupi.modules.local import module_local\nfrom youtupi.modules.youtube import module_youtube\nfrom youtupi.playlist import prepareVideo, findVideoInPlaylist, removeVideo, playNextVideo, playVideo, addVideo, controlPlayer, playList\n\nclass redirect:\n\tdef GET(self, path):\n\t\tweb.seeother('/' + path)\n\nclass index:\n\tdef GET(self):\n\t\tweb.seeother('/static/index.html')\n\nclass playlist:\n\tdef GET(self):\n\t\tplaylistVideos = list()\n\t\tfor video in playList():\n\t\t\tplaylistVideos.append(video.data)\n\t\treturn json.dumps(playlistVideos, indent=4)\n\t\n\tdef POST(self):\n\t\tdata = json.load(StringIO(web.data()))\n\t\taddVideo(data)\n\t\tweb.seeother('/playlist')\n\t\t\n\tdef DELETE(self):\n\t\tdata = json.load(StringIO(web.data()))\n\t\tremoveVideo(data['id'])\n\t\tweb.seeother('/playlist')\n\nclass control:\n\t\n\tdef GET(self, action):\n\t\tif action == \"play\":\n\t\t\tplayNextVideo()\n\t\telse:\t\t\t\n\t\t\tcontrolPlayer(action)\n\t\tweb.seeother('/playlist')\n\t\t\n\tdef POST(self, action):\n\t\tif action == \"play\":\n\t\t\tdata = json.load(StringIO(web.data()))\n\t\t\tvideo = findVideoInPlaylist(data['id'])\n\t\t\tif video:\n\t\t\t\tprepareVideo(video)\n\t\t\t\tplayVideo(data['id'])\n\t\tweb.seeother('/playlist')\n\nif __name__ == \"__main__\":\n\turls = (\n\t\t'/(.*)/', 'redirect',\n\t\t'/playlist', 'playlist',\n\t\t'/video/(.*)', 'video',\n\t\t'/control/(.*)', 'control',\n\t\t'/local', module_local,\n\t\t'/youtube', module_youtube,\n\t\t'/', 'index'\n\t)\n\tapp = web.application(urls, globals())\n\tapp.run()\n","sub_path":"youtupi.py","file_name":"youtupi.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"109044156","text":"# MSBoard class\n# Luis Henriquez and Iavor Dekov\n#\n\nfrom Tile import Tile\nfrom random import sample\nimport pygame, sys\nfrom pygame.locals import *\n\n\n\"\"\"\nRepresents the board that contains all the \ntiles in the Minesweeper game\n\"\"\"\n\nclass MSBoard:\n \"\"\"Represents the board that holds all the tile in Minesweeper.\n Has a location, columns and rows, and the the tilewidth.\"\"\"\n def __init__(self, leftTop, dimensions, tilewidth, numBombs):\n self.leftTop = leftTop\n self.twidth = tilewidth\n self.width = dimensions[0]\n self.height = dimensions[1]\n self.numbombs = numBombs\n self.board = self.createBoard()\n self.setBombs()\n\n\n def getTilesAround(self, atile):\n \"\"\"Returns a list of the tiles around a certain tile.\"\"\"\n return [tile for tile in self.around(atile)]\n\n def getTileAtIndex(self, row, col):\n \"\"\"\n Accepts the row and column of the board and returns the tile there.\n Returns an None if there is an IndexError.\n :param row:\n :param col:\n :return:\n \"\"\"\n try:\n if (row > -1) and (col > -1):\n return self.board[row][col]\n except IndexError:\n return None\n\n def around(self, tile):\n \"\"\"\n Yields a stream of tiles around a specific tile.\n :param tile:\n :return:\n \"\"\"\n left, top = tile.getLeftTop()\n\n for x in [1, 0, -1]:\n for y in [1, 0, -1]:\n atile = self.getTileAtIndex(top + y, left + x)\n if atile and atile is not tile:\n yield atile\n\n\n def tiles(self):\n \"\"\"Stream of all the tiles of the board.\"\"\"\n for lst in self.board:\n for tile in lst:\n yield tile\n\n def createBoard(self):\n \"\"\"\n Returns 2D list of the board.\n :return:\n \"\"\"\n startrow, startcol = self.leftTop\n twidth = self.twidth\n return [[Tile((startcol + col * twidth, startrow + row * twidth), twidth) for col in range(self.width)]\n for row in range(self.height)]\n\n def setBombs(self):\n for tile in sample(list(self.tiles()), self.numbombs):\n tile.setBomb()\n\n def highlight(self, tile, surface):\n tile.highlight(surface)\n\n\n def revealAround(self, tile):\n \"\"\"\n Reveals the tiles around the tile.\n :param tile:\n :return:\n \"\"\"\n if not tile.hasBomb() and not tile.hasNum():\n blanktiles = []\n for tile in self.getTilesAround(tile):\n if not tile.hasBomb():\n tile.reveal()\n if not tile.hasNum():\n blanktiles.append(tile)\n for tile in blanktiles:\n self.revealAround(tile)\n\n def revealAnimation(self, tile):\n \"\"\"\n Reveals a tile and the tiles around that tile.\n :param tile:\n :return:\n \"\"\"\n tile.reveal()\n self.revealAround(tile)\n\n def getTileAtPixel(self, pixelpoint):\n \"\"\"\n Returns the tile at the specific pixel location.\n :param pixelpoint:\n :return:\n \"\"\"\n if pixelpoint != (None, None):\n for tile in self.tiles():\n if tile.contains(pixelpoint):\n return tile\n return None\n\n def allRevealed(self):\n \"\"\"\n Returns whether all the tiles of the board are revealed or not.\n :return:\n \"\"\"\n for tile in self.tiles():\n if not tile.isRevealed():\n return False\n return True\n\n def draw(self, surface):\n \"\"\"\n Draws the board.\n :param surface:\n :return:\n \"\"\"\n for tile in self.tiles():\n tile.draw(surface)\n\n# main method for unit test\ndef main():\n DISPLAYSURF = pygame.display.set_mode((400, 300))\n DISPLAYSURF.fill((255, 255, 255))\n m = MSBoard((10, 10), (4, 6), 20, 4)\n # print(m.tiles())\n # print(m.board)\n # print(m.getTileAtPixel((22, 22)).getLeftTop())\n # print(m.allRevealed())\n # print(m.getTileAtPixel((0, 0)))\n # for tile in m.getTilesAround(m.getTileAtPixel((0, 0))):\n # print(tile.getLeftTop())\n # for tile in m.tiles():\n # print(tile.hasBomb())\n\n m.draw(DISPLAYSURF)\n\n while True:\n\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n pygame.display.update()\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"MSBoard.py","file_name":"MSBoard.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"289477021","text":"# Python_project\nimport logging\nimport datetime\nfrom post import Post\nimport hashlib\nimport os\nfrom hash1 import HASH\n\n\n# her we define class User to do all the things asked in project\nclass User():\n # to track the number of users\n track = 0\n\n def __init__(self):\n User.track += 1\n logging.basicConfig(filename='app.log', filemode='w', format='%(levelname)s - %(asctime)s - %(message)s',\n level=logging.INFO)\n logging.info('creating new user!')\n self.user_name = ''\n self.pass_word = ''\n self.email = ''\n self.phone = ''\n self.bio = ''\n self.login_status = False\n self.acceptance = False\n self.following_list = []\n self.followers_list = []\n self.post_list = []\n\n def get_Info(self):\n logging.basicConfig(filename='app.log', filemode='w', format='%(levelname)s - %(asctime)s - %(message)s',\n level=logging.INFO)\n logging.info(\"getting information from user!\")\n self.user_name = input(\"Enter your desired username:\\n \")\n with open(\"username.txt\") as file_object1:\n for i, line in enumerate(file_object1):\n while str(line) == self.user_name + \"\\n\":\n self.user_name = input(\"please Enter another valid username,that was repeated:\\n \")\n while len(self.pass_word) < 8:\n self.pass_word = input(\"Enter your desired password(It should be at least 8 letters):\\n\")\n self.bio = input(\"You can add bio to your profile: \\n\")\n self.phone = input(\"You can add phone number to your profile: \\n\")\n while len(self.phone) < 7:\n self.phone = input(\"please enter the correct phone number: \\n\")\n self.email = input(\"You can add email to your profile: \\n\")\n # here we save personal detail in txt files\n while '@' not in self.email:\n self.email = input(\"please add valid email to your profile:\\n \")\n with open(\"username.txt\", \"a+\") as file_object1:\n # Move read cursor to the start of file.\n file_object1.seek(0)\n # If file is not empty then append '\\n'\n data = file_object1.read(100)\n if len(data) > 0:\n file_object1.write(\"\\n\")\n # Append text at the end of file\n file_object1.write(self.user_name)\n file_object1.write(\"\\n\")\n file_object1.write(self.bio)\n file_object1.write(\"\\n\")\n file_object1.write(self.phone)\n file_object1.write(\"\\n\")\n file_object1.write(self.email)\n with open(\"password.txt\", \"a+\") as file_object:\n file_object.seek(0)\n data = file_object.read(100)\n if len(data) > 0:\n file_object.write(\"\\n\")\n password = self.pass_word\n file_object.write(HASH(password))\n\n # here we want to check whether user name pass word are correct or not\n def login(self, username, password):\n logging.basicConfig(filename='app.log', filemode='w', format='%(levelname)s - %(asctime)s - %(message)s',\n level=logging.INFO)\n logging.info(\"user is trying to login!\")\n with open(\"username.txt\") as file_object:\n for i, line1 in enumerate(file_object):\n if str(line1) == username + \"\\n\":\n with open(\"password.txt\") as file_object2:\n for j, line2 in enumerate(file_object2):\n if j == i / 4:\n if str(line2) == HASH(password) + \"\\n\":\n self.login_status = True\n if self.login_status == True:\n print(\"Here is your following request:\")\n with open(\"follow_request_{}.txt\".format(username)) as file_object:\n for i, line1 in enumerate(file_object):\n print(line1)\n self.user_name = username\n self.pass_word = password\n\n\n # here we want to see others profile\n def watch_others_profile(self):\n logging.basicConfig(filename='app.log', filemode='w', format='%(levelname)s - %(asctime)s - %(message)s',\n level=logging.INFO)\n logging.info(\"user wants to see others profile!\")\n with open(\"username.txt\") as file_object:\n needed = file_object.readlines()\n # to track the row of data so we can show the appropriate data\n print(\"Here is the list of others: \")\n with open(\"username.txt\") as file_object:\n\n for i, line1 in enumerate(file_object):\n if i % 4 == 0 and i != len(needed):\n if str(line1) != self.user_name + \"\\n\":\n print(10 * \"-\")\n print(\"username: {}\".format(line1))\n print(\"bio: {}\".format(needed[i + 1]))\n print(\"phone: {}\".format(needed[i + 2]))\n print(\"email: {}\".format(needed[i + 3]))\n print(10 * '-')\n\n # here is a function to follow a user name\n def follow(self, username):\n logging.basicConfig(filename='app.log', filemode='w', format='%(levelname)s - %(asctime)s - %(message)s',\n level=logging.INFO)\n logging.info(\"user wants to follow some one!\")\n with open(\"follow_request_{}.txt\".format(username), \"a+\") as file_object:\n file_object.seek(0)\n data = file_object.read(100)\n if len(data) > 0:\n file_object.write(\"\\n\")\n file_object.write(\"{} wants to follow you!\".format(self.user_name))\n return username\n\n # here is a function to accept the request\n def accept_or_not(self, username_want):\n n = int(input(\"Enter 1 for yes(accept),2 for no(reject)!: \"))\n if n == 1:\n logging.basicConfig(filename='app.log', filemode='w', format='%(levelname)s - %(asctime)s - %(message)s',\n level=logging.INFO)\n logging.info(\"user accepted the follow request!\")\n print(\"Here is the list of your followers:\")\n for i in range(len(self.followers_list)):\n print(self.followers_list[i])\n self.acceptance = True\n with open(\"acceptance_response_for_{}.txt\".format(username_want), \"a+\") as file_object:\n file_object.seek(0)\n data = file_object.read(100)\n if len(data) > 0:\n file_object.write(\"\\n\")\n file_object.write(\"{}-accepted your follow request!\".format(self.user_name))\n return self.post_list\n if n == 2:\n self.acceptance = False\n with open(\"acceptance_response_for_{}.txt\".format(username_want), \"a+\") as file_object:\n file_object.seek(0)\n data = file_object.read(100)\n if len(data) > 0:\n file_object.write(\"\\n\")\n file_object.write(\"{} rejected your follow request!\".format(self.user_name))\n\n\n# Run part\nd1 = User()\nd2 = User()\nd1.get_Info()\nd2.get_Info()\nprint(15 * \"-\")\n\n\"---------------------------------------------\"\n\n# try to watch others profile\nd1.watch_others_profile()\nd2.watch_others_profile()\n\nprint(15 * \"-\")\nname_to_follow = d1.follow(\"fatemeh\")\n\n# try to add post to profile and change it if needed by user\nlogging.basicConfig(filename='app.log', filemode='w', format='%(levelname)s - %(asctime)s - %(message)s',\n level=logging.INFO)\nlogging.info(\"user wants to do some changes in list of posts!\")\na = Post(\"Today is a very good day!\")\na.comment(\"your post 1!:)\")\na.comment(\"your post 2!:)\")\na.comment(\"your post 3!:)\")\na.comment(\"your post 4!:)\")\nb = Post(\"Today is a very good day22!\")\nb.comment(\"your post 1!:)\")\nb.comment(\"your post 2!:)\")\nb.comment(\"your post 3!:)\")\nb.comment(\"your post 4!:)\")\nc = Post(\"How you doi'n?!\")\nc.comment(\"not good 1!:)\")\nc.comment(\"not good 2!:)\")\nc.comment(\"not good 3!:)\")\nc.comment(\"not good 4!:)\")\n\nd1.post_list.append(a)\nd1.post_list.append(b)\nd1.post_list.append(c)\n\n\"---------------------------------------------\"\nprint(15 * \"-\")\nprint(\"Here is to show you can update your profile.\")\nprint(15 * \"-\")\nfor i in range(len(d1.post_list)):\n d1.post_list[i].edit(\"Hi folks!\")\n d1.post_list[i].delete()\n d1.post_list[i].comment(\"It's a new comment {}\".format(i + 1))\n print(d1.post_list[i])\n# here for liking a post\nd1.post_list[2].like_pst()\n\"---------------------------------------------\"\n# try to login in this part\n# try to check the acceptance result\nprint(15 * \"-\")\nd4 = User()\na2 = Post(\"Today is a very good day!\")\na2.comment(\"your post 1!:)\")\na2.comment(\"your post 2!:)\")\na2.comment(\"your post 3!:)\")\na2.comment(\"your post 4!:)\")\nb2 = Post(\"Today is a very good day22!\")\nb2.comment(\"your post 1!:)\")\nb2.comment(\"your post 2!:)\")\nb2.comment(\"your post 3!:)\")\nb2.comment(\"your post 4!:)\")\nc2 = Post(\"How you doi'n?!\")\nc2.comment(\"not good 1!:)\")\nc2.comment(\"not good 2!:)\")\nc2.comment(\"not good 3!:)\")\nc2.comment(\"not good 4!:)\")\nd4.post_list.append(a2)\nd4.post_list.append(b2)\nd4.post_list.append(c2)\n# example for a successful login\nd4.login(\"fatemeh\", \"2222222222\")\nif d4.login_status == True:\n print(\"you are logged in!\")\nelse:\n print(\"sorry not correct information!\")\n\n# example of an unsuccessful logging\nprint(15 * \"-\")\nd3 = User()\nd3.post_list.append(a)\nd3.post_list.append(b)\nd3.post_list.append(c)\nd3.login(\"fatemeh\", '755555555555777')\nif d3.login_status == True:\n print(\"you are logged in!\")\nelse:\n print(\"sorry not correct information!\")\n\ntry:\n result = d4.accept_or_not('jack')\nexcept ValueError:\n logging.basicConfig(filename='app.log', filemode='w', format='%(levelname)s - %(asctime)s - %(message)s',\n level=logging.WARNING)\n logging.warning(\"user should enter an integer!\")\n print(\"Please Enter a digit!\")\n# try to check the list of posts of a user\n# we should get the response first\nprint(15 * \"-\")\nif result == None:\n exit()\nif result != None:\n d4.followers_list.append(name_to_follow)\nfor i in range(len(result)):\n print(result[i])\n print(\"you can add your comment to this post: \")\n comm = input(\"Enter your comment if you wish!: \")\n if comm != \"\":\n result[i].comment(comm)\n print(result[i])\n\"---------------------------------------------\"\n","sub_path":"latest_version_final/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":10445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"119058688","text":"x = range(2, 1000)\nresults = []\n\nfor n in x:\n b = len(str(n))\n a = 0\n for m in range(b):\n n = str(n)\n b = n[m]\n b = int(b)\n a += b**5\n if a == int(n):\n results.append(int(n))\n\nprint(sum(results))","sub_path":"Python/Exercicios/powernum.py","file_name":"powernum.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"248797574","text":"\n\n#calss header\nclass _SINGLE():\n\tdef __init__(self,): \n\t\tself.name = \"SINGLE\"\n\t\tself.definitions = [u'A baseball player singles by hitting a ball that allows him to reach first base.']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_single.py","file_name":"_single.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"307216880","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('projects', '0007_auto_20150617_0236'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='task',\n name='external_id',\n field=models.IntegerField(help_text=' \"ID\" for an external issue tracker system', null=True, verbose_name='External ID', blank=True),\n ),\n ]\n","sub_path":"django_erp/projects/migrations/0008_task_external_id.py","file_name":"0008_task_external_id.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"578285112","text":"from PyQt5.QtWidgets import QMainWindow, QMessageBox\nfrom controller.SiFuCanShu import SiFuCanShu\nfrom controller.YaLiCanShu import YaLiCanShu\nfrom controller.TiaoShiCanShu import TiaoShiCanShu\nfrom controller.GongJianCanShu import GongJianCanShu\nfrom controller.CameraPosition import CameraPosition\nfrom controller.ZiDongTiaoJiao import ZiDongTiaoJiao\nfrom controller.SiKongShiBie import SiKongShiBie\nfrom controller.DianYaTiaoShi import DianYaTiaoShi\nfrom controller.AdDuiBi import AdDuiBi\nfrom controller.Listener import Listener\nfrom util.communication_util import serial_init, get_data_from_serial_port, write_data_to_port\nimport time\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5.QtGui import QPixmap\nimport cv2\nimport math\nfrom util.xls_util import readDataForNineStep, writeDataForSixStep, writeDataForEightStep, writeDataForFourteenStep, readData\nimport numpy as np\nimport os\nimport threading\nfrom PyQt5.QtCore import Qt\nfrom Demo_opencv_byGetFrame import getPicture, getCameraInstance, close\nclass MyMainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.port = serial_init()\n self.sifucanshu_ui = SiFuCanShu()\n self.yalicanshu_ui = YaLiCanShu()\n self.tiaoshicanshu_ui = TiaoShiCanShu()\n self.gongjiancanshu_ui = GongJianCanShu()\n self.cameraPosition_ui = CameraPosition()\n self.ziDongTiaoJiao_ui = ZiDongTiaoJiao()\n self.sikongshibie_ui = SiKongShiBie()\n self.dianyatiaoshi_ui = DianYaTiaoShi()\n self.adDuiBi_ui = AdDuiBi()\n self.timer = QTimer()\n #self.openCamera()\n self.camera = getCameraInstance()\n self.ad1_count = 0\n self.ad1_value_list = []\n self.ad2_count = 0\n self.ad2_value_list = []\n self.loop_count = 0\n self.t = threading.Thread(target=self.update_img)\n self.t.start()\n # self.cam_timer = QTimer()\n # self.cam_timer.setInterval(1000)\n # self.cam_timer.start()\n # self.cam_timer.setTimerType(Qt.VeryCoarseTimer)\n # self.cam_timer.timeout.connect(self.update_img)\n #self.update_img()\n\n\n if self.port == -1:\n QMessageBox.information(self, '串口连接状态', '连接失败', QMessageBox.Yes)\n else:\n self.timer.setInterval(13)\n self.timer.start()\n self.timer.timeout.connect(self.listening)\n #self.update_img()\n\n\n\n def listening(self):\n data = get_data_from_serial_port(self.port)\n if not data == bytes('5501000000000000'.encode('ascii')):\n self.parse(data)\n\n\n def update_img(self):\n while True:\n if self.camera:\n self.loop_count = self.loop_count +1\n print(self.loop_count)\n # self.camera_flag, self.cap_image = self.cam_capture.read()\n #self.cap_image = getPicture(self.camera)\n self.cap_image = getPicture(self.camera)\n #self.cap_image = cv2.cvtColor(self.cap_image, cv2.COLOR_BGR2GRAY)\n #self.cap_image = cv2.imread(r'C:\\Users\\Administrator\\Desktop\\0803\\Pic_2019_08_02_151547_blockId#2192.bmp', 0)\n self.cap_image = cv2.resize(self.cap_image, (400, 300))\n self.ziDongTiaoJiao_ui.caped_image = self.cap_image\n #self.ziDongTiaoJiao_ui.showImage()\n self.cameraPosition_ui.caped_image = self.cap_image\n #self.cameraPosition_ui.showImage()\n self.sikongshibie_ui.caped_image = self.cap_image\n time.sleep(1)\n #self.sikongshibie_ui.showImage()\n\n def openCamera(self):\n self.cam_num = 0\n self.cam_capture = cv2.VideoCapture()\n if not self.cam_capture.isOpened():\n self.cam_capture.open(self.cam_num)\n\n\n def btnClicked(self):\n sender = self.sender()\n print(sender.text())\n if sender.text() == r'伺服参数设置':\n self.sifucanshu_ui.port = self.port\n self.sifucanshu_ui.activateWindow()\n self.sifucanshu_ui.show()\n\n if sender.text() == r'压力参数设置':\n self.yalicanshu_ui.port = self.port\n self.yalicanshu_ui.askTimer.start()\n self.yalicanshu_ui.activateWindow()\n self.yalicanshu_ui.show()\n\n if sender.text() == r'调试参数设置':\n self.tiaoshicanshu_ui.activateWindow()\n self.tiaoshicanshu_ui.show()\n\n if sender.text() == r'工件参数设置':\n self.gongjiancanshu_ui.activateWindow()\n self.gongjiancanshu_ui.show()\n\n if sender.text() == r'相机位置计算':\n self.cameraPosition_ui.port = self.port\n self.cameraPosition_ui.activateWindow()\n #self.cameraPosition_ui.openCamera()\n self.cameraPosition_ui.show()\n\n if sender.text() == r'相机四孔识别':\n self.sikongshibie_ui.port = self.port\n self.sikongshibie_ui.activateWindow()\n #self.sikongshibie_ui.openCamera()\n self.sikongshibie_ui.show()\n\n if sender.text() == r'自动调校':\n self.ziDongTiaoJiao_ui.port = self.port\n self.ziDongTiaoJiao_ui.activateWindow()\n #todo\n if self.port:\n write_data_to_port(self.port, r'0001000000000005')\n #self.ziDongTiaoJiao_ui.openCamera()\n self.ziDongTiaoJiao_ui.show()\n if sender.text() == r'电压调试':\n self.dianyatiaoshi_ui.port = self.port\n self.dianyatiaoshi_ui.activateWindow()\n self.dianyatiaoshi_ui.show()\n if sender.text() == r'零点标定':\n if self.port:\n write_data_to_port(self.port, r'0001000000000001')\n if sender.text() == r'电压对比':\n self.adDuiBi_ui.activateWindow()\n self.adDuiBi_ui.show()\n if self.port:\n write_data_to_port(self.port, r'0001000000000002')\n if sender.text() == r'关机归位':\n if self.port:\n write_data_to_port(self.port, r'0001000000000003')\n\n\n def parse(self,data):\n data_splited = []\n for i in range(0, 15, 2):\n data_splited.append(data[i:i+2])\n # 获取AD值和继电器状态\n if data_splited[0] == b'55' and data_splited[1] == b'02':\n high =(data_splited[3][0] - 48) if data_splited[3][0] < 58 else data_splited[3][0] - 87\n low = (data_splited[3][1] - 48) if data_splited[3][1] < 58 else data_splited[3][1] - 87\n high_bin = bin(high)[-4:]\n low_bin = bin(low)[-4:]\n if len(high_bin) < 4:\n high_bin = '0'+high_bin[-1]\n if len(low_bin) is 3:\n low_bin = '000' + low_bin[-1]\n if len(low_bin) is 4:\n if low_bin.startswith('b'):\n low_bin = '0' + low_bin[-3:]\n if low_bin.startswith('0b'):\n low_bin = '00' + low_bin[-2:]\n # if len(low_bin) is 5:\n # low_bin = '0' + low_bin[-3:]\n # if len(low_bin) is 6:\n # low_bin = low_bin[-4:]\n status = (high_bin + low_bin)[-6:]\n self.update_relay_status(status)\n ad1_high = data_splited[4]\n ad1_low = data_splited[5]\n ad2_high = data_splited[6]\n ad2_low = data_splited[7]\n ad1_value = byte_to_oct(ad1_high, ad1_low)\n ad2_value = byte_to_oct(ad2_high, ad2_low)\n temp1 = float(self.yalicanshu_ui.ad1_value_2.text()) - float(self.yalicanshu_ui.ad1_value_1.text())\n temp2 = float(self.yalicanshu_ui.ad1_pressure_2.text()) - float(self.yalicanshu_ui.ad1_pressure_1.text())\n if not temp2 == 0:\n vp = temp1 / temp2\n else:\n vp = 999\n # AD1 平均滤波\n self.ad1_value_list.append(ad1_value)\n self.ad1_count = self.ad1_count + 1\n if self.ad1_count >= 4:\n ad1_value = np.mean(self.ad1_value_list)\n self.ad1_value_list = []\n self.ad1_count = 0\n self.yalicanshu_ui.ad1_display.setText(str(ad1_value))\n # AD2 平均滤波\n self.ad2_value_list.append(ad2_value)\n self.ad2_count = self.ad2_count + 1\n if self.ad2_count >=4:\n ad2_value = np.mean(self.ad2_value_list)\n self.ad2_value_list = []\n self.ad2_count = 0\n self.yalicanshu_ui.ad2_display.setText(str(ad2_value))\n\n #todo 测试AD2 AD1 压力及电压更新\n self.yalicanshu_ui.updateUI()\n\n # 向下位机发送相机XYZ坐标\n if data_splited[0] == b'05' and data_splited[1] == b'01':\n gongjian = self.ziDongTiaoJiao_ui.gongjian\n camera_x = gongjian.c_x\n camera_y = gongjian.c_y\n camera_z = gongjian.c_z\n oct_c_x_high = math.floor(camera_x / 256)\n oct_c_x_low = math.floor(camera_x % 256)\n\n oct_c_y_high = math.floor(camera_y / 256)\n oct_c_y_low = math.floor(camera_y % 256)\n\n oct_c_z_high = math.floor(camera_z / 256)\n oct_c_z_low = math.floor(camera_z % 256)\n\n cmd = r'5001'+oct_to_bin(oct_c_x_high) + \\\n oct_to_bin(oct_c_x_low) + \\\n oct_to_bin(oct_c_y_high) + \\\n oct_to_bin(oct_c_y_low) + \\\n oct_to_bin(oct_c_z_high) + \\\n oct_to_bin(oct_c_z_low)\n if self.port:\n write_data_to_port(self.port, cmd)\n # 向下位机发送AD2压力和限压\n if data_splited[0] == b'05' and data_splited[1] == b'02':\n gongjian = self.ziDongTiaoJiao_ui.gongjian\n pressure = gongjian.pressure\n\n ad2 = self.yalicanshu_ui.sensor\n xianya = ad2.ad2_max\n pressure_high = math.floor(pressure / 256)\n pressure_low = math.floor(pressure % 256)\n xianya_high = math.floor(xianya / 256)\n xianya_low = math.floor(xianya % 256)\n cmd = r'05020000' + oct_to_bin(pressure_high) + \\\n oct_to_bin(pressure_low) + \\\n oct_to_bin(xianya_high) + \\\n oct_to_bin(xianya_low)\n if self.port:\n write_data_to_port(self.port, cmd)\n\n #接收下位机完成步数\n if data_splited[0] == b'05' and data_splited[1] == b'04':\n # 接收下位机的传感器判断结果,并显示\n if data_splited[-1] == b'04':\n ad2_status = data_splited[-2]\n if ad2_status == b'01':\n self.yalicanshu_ui.ad2_pressure.setText(r'传感器错误')\n #向下位机发送第四步完成的信号\n write_data_to_port(self.port, r'0006000000000004')\n\n # 第五步完成接第六步\n if data_splited[-1] == b'05':\n cx1 = 0\n cy1 = 0\n cx2 = 0\n cy2 = 0\n cx3 = 0\n cy3 = 0\n try:\n srcImage = self.cap_image\n self.circle_detector.origin_Image = srcImage\n #todo gengxin\n recognizedImage, x1, y1, x2, y2, x3, y3 = self.circle_detector.match()\n\n if not x1 == -1 and not y1 == -1:\n cx1 = x1\n cy1 = y1\n if not x2 == -1 and not y2 == -1:\n cx2 = x2\n cy2 = y2\n if not x3 == -2 and not y3 == -1:\n cx3 = x3\n cy3 = y3\n except Exception as e:\n print(e)\n QMessageBox.information(self, '识别结果', '识别失败', QMessageBox.Yes)\n filePath = os.path.join(r'C:\\Users\\zyp\\PycharmProjects\\cv\\data','test.xlsx')\n data_dict = {}\n data_dict['x1'] = cx1\n data_dict['y1'] = cy1\n data_dict['x2'] = cx2\n data_dict['y2'] = cy2\n data_dict['x3'] = cx3\n data_dict['y3'] = cy3\n data_dict['c_x'] = self.ziDongTiaoJiao_ui.gongjian.c_x\n data_dict['c_y'] = self.ziDongTiaoJiao_ui.gongjian.c_y\n data_dict['ad2_pressure'] = float(self.ziDongTiaoJiao_ui.ad2_pressure.text())\n data_dict['angle'] = self.ziDongTiaoJiao_ui.gongjian.angle\n data_dict['vision'] = self.ziDongTiaoJiao_ui.gongjian.vision\n data_dict['pressure'] = self.ziDongTiaoJiao_ui.gongjian.pressure\n writeDataForSixStep(filePath, data_dict)\n print(\"第6步 此处进行识别并写excel文件\")\n write_data_to_port(self.port, r'0006000000000006')\n\n # 下位机第7步完成\n if data_splited[-1] == b'07':\n cx1 = 0\n cy1 = 0\n cx2 = 0\n cy2 = 0\n cx3 = 0\n cy3 = 0\n try:\n srcImage = self.cap_image\n self.circle_detector.origin_Image = srcImage\n recognizedImage, x1, y1, x2, y2, x3, y3 = self.circle_detector.match()\n\n if not x1 == -1 and not y1 == -1:\n cx1 = x1\n cy1 = y1\n if not x2 == -1 and not y2 == -1:\n cx2 = x2\n cy2 = y2\n if not x3 == -2 and not y3 == -1:\n cx3 = x3\n cy3 = y3\n except Exception as e:\n print(e)\n QMessageBox.information(self, '识别结果', '识别失败', QMessageBox.Yes)\n filePath = os.path.join(r'C:\\Users\\zyp\\PycharmProjects\\cv\\data','test.xlsx')\n data_dict = {}\n data_dict['x1'] = cx1\n data_dict['y1'] = cy1\n data_dict['x2'] = cx2\n data_dict['y2'] = cy2\n data_dict['x3'] = cx3\n data_dict['y3'] = cy3\n data_dict['ad2_pressure'] = float(self.ziDongTiaoJiao_ui.ad2_pressure.text())\n writeDataForEightStep(filePath, data_dict)\n print(\"第8步 对工件进行识别 写excel文件\")\n time.sleep(2)\n write_data_to_port(self.port, r'0006000000000008')\n\n #下位机第九步完成\n if data_splited[-1] == b'09':\n write_data_to_port(self.port, r'5004000000000000')\n print(r\"下位机第 9 步完成信号收到\")\n\n #下位机第十步完成\n if data_splited[-1] == b'0a':\n write_data_to_port(self.port, r'5004000000000000')\n print(r\"下位机第 10 步完成信号收到\")\n\n #下位机第 11 步完成\n if data_splited[-1] == b'0b':\n write_data_to_port(self.port, r'5004000000000000')\n print(r\"下位机第 11 步完成信号收到\")\n\n #下位机第 12 步完成\n if data_splited[-1] == b'0c':\n write_data_to_port(self.port, r'5004000000000000')\n print(r\"下位机第 12 步完成信号收到\")\n\n #下位机第 13 步完成\n if data_splited[-1] == b'0d':\n time.sleep(2)\n print(r'识别工件 并写入excel')\n cx1 = 0\n cy1 = 0\n cx2 = 0\n cy2 = 0\n cx3 = 0\n cy3 = 0\n try:\n srcImage = self.cap_image\n self.circle_detector.origin_Image = srcImage\n recognizedImage, x1, y1, x2, y2, x3, y3 = self.circle_detector.match()\n\n if not x1 == -1 and not y1 == -1:\n cx1 = x1\n cy1 = y1\n if not x2 == -1 and not y2 == -1:\n cx2 = x2\n cy2 = y2\n if not x3 == -2 and not y3 == -1:\n cx3 = x3\n cy3 = y3\n except Exception as e:\n print(e)\n QMessageBox.information(self, '识别结果', '识别失败', QMessageBox.Yes)\n filePath = os.path.join(r'C:\\Users\\zyp\\PycharmProjects\\cv\\data','test.xlsx')\n data_dict = {}\n data_dict['x1'] = cx1\n data_dict['y1'] = cy1\n data_dict['x2'] = cx2\n data_dict['y2'] = cy2\n data_dict['x3'] = cx3\n data_dict['y3'] = cy3\n writeDataForFourteenStep(filePath, data_dict)\n\n print(r\"上位机 14 步识别工件 写入excel\")\n #通知下位机 上位机的第14步已经完成\n write_data_to_port(self.port, r'000600000000000e')\n print(r\"上位机第14步 读取D26 判断是否正确\")\n\n flag = int(readData(filePath, (25, 3)))\n unqualified_num = int(self.ziDongTiaoJiao_ui.unqualified.text())\n qualified_num = int(self.ziDongTiaoJiao_ui.qualified.text())\n if flag == 0:\n self.ziDongTiaoJiao_ui.img_box.setText(\"打孔位置正确\")\n self.ziDongTiaoJiao_ui.unqualified.setText(str(qualified_num + 1))\n print(r\"打孔合格,读取D27 D28\")\n ##通知下位机第十七步完成\n write_data_to_port(self.port, r'0006000000000011')\n else:\n self.ziDongTiaoJiao_ui.img_box.setText(\"打孔位置错误\")\n self.ziDongTiaoJiao_ui.unqualified.setText(str(unqualified_num + 1))\n #通知下位机第十五步完成\n write_data_to_port(self.port, r'000600000000000f')\n\n #下位机获取X轴和Y轴的坐标\n if data_splited[0] == b'05' and data_splited[1] == b'05':\n filePath = os.path.join(r'C:\\Users\\zyp\\PycharmProjects\\cv\\data','test.xlsx')\n x, y = readDataForNineStep(filePath)\n print(\"第九步从excel文件中读取数据\")\n x_high = math.floor(x / 256)\n x_low = math.floor(x % 256)\n y_high = math.floor(y / 256)\n y_low = math.floor(y % 256)\n\n cmd = r'5005'+oct_to_bin(x_high) + \\\n oct_to_bin(x_low) + \\\n oct_to_bin(y_high) + \\\n oct_to_bin(y_low) + \\\n '0009'\n\n write_data_to_port(self.port, cmd)\n\n #获取转速 和起钻、止钻的位置\n if data_splited[0] == b'05' and data_splited[1] == b'03':\n gongjian = self.ziDongTiaoJiao_ui.gongjian\n qizuan = gongjian.z_start\n zhizuan = gongjian.z_stop\n zhuansu = gongjian.zhuansu\n qizuan_high = math.floor(qizuan / 256)\n qizuan_low = math.floor(qizuan % 256)\n zhuansu_high = math.floor(zhuansu / 256)\n zhuansu_low = math.floor(zhuansu % 256)\n\n zhizuan_high = math.floor(zhizuan / 255)\n zhizuan_low = math.floor(zhizuan % 255)\n cmd = r'5003'+oct_to_bin(qizuan_high) + \\\n oct_to_bin(qizuan_low) + \\\n oct_to_bin(zhizuan_high) + \\\n oct_to_bin(zhizuan_low) + \\\n oct_to_bin(zhuansu_high) + \\\n oct_to_bin(zhuansu_low)\n if self.port:\n write_data_to_port(self.port, cmd)\n\n #获取游丝角度\n if data_splited[0] == b'05' and data_splited[1] == b'07':\n gongjian = self.ziDongTiaoJiao_ui.gongjian\n yousi = gongjian.yousi_ang\n yousi_high = math.floor(yousi / 256)\n yousi_low = math.floor(yousi % 256)\n cmd = r'5007'+ oct_to_bin(yousi_high) + \\\n oct_to_bin(yousi_low)+'00000000'\n write_data_to_port(self.port, cmd)\n\n #获取钻速\n if data_splited[0] == b'05' and data_splited[1] == b'02':\n gongjian = self.ziDongTiaoJiao_ui.gongjian\n zuansu = gongjian.zuansu\n zuansu_high = math.floor(zuansu / 256)\n zuansu_low = math.floor(zuansu % 256)\n\n ad2 = self.yalicanshu_ui.sensor\n xianya = ad2.ad2_max\n pressure_high = math.floor(pressure / 256)\n pressure_low = math.floor(pressure % 256)\n xianya_high = math.floor(xianya / 256)\n xianya_low = math.floor(xianya % 256)\n cmd = r'5002' + oct_to_bin(zuansu_high) + \\\n oct_to_bin(zuansu_low) + \\\n oct_to_bin(xianya_high) + \\\n oct_to_bin(xianya_low) + \\\n oct_to_bin(pressure_high) + \\\n oct_to_bin(pressure_low)\n write_data_to_port(self.port, cmd)\n\n #相机位置计算更新坐标\n #TODO 待更新计算公式\n if data_splited[0] == b'55' and data_splited[1] == b'04':\n x_high = data_splited[2]\n x_mid = data_splited[3]\n x_low = data_splited[4]\n y_high = data_splited[5]\n y_mid =data_splited[6]\n y_low = data_splited[7]\n\n # x = byte_to_oct(x_high,x_mid, x_low)\n high_b1 = x_high[0]\n high_b2 = x_high[1]\n high_b1 = high_b1 - 48 if high_b1 < 58 else high_b1 - 87\n high_b2 = high_b2 - 48 if high_b2 < 58 else high_b2 - 87\n\n mid_b1 = x_mid[0]\n mid_b2 = x_mid[1]\n mid_b1 = mid_b1 - 48 if mid_b1 < 58 else mid_b1 - 87\n mid_b2 = mid_b2 - 48 if mid_b2 < 58 else mid_b2 - 87\n\n low_b1 = x_low[0]\n low_b2 = x_low[1]\n low_b1 = low_b1 - 48 if low_b1 < 58 else low_b1 - 87\n low_b2 = low_b2 - 48 if low_b2 < 58 else low_b2 - 87\n\n x = (high_b1 * 16 + high_b2) * 65536 +(mid_b1 * 16+mid_b2) * 256+ (low_b1 * 16 + low_b2)\n\n #y = byte_to_oct(y_high, y_low)\n high_b1 = y_high[0]\n high_b2 = y_high[1]\n high_b1 = high_b1 - 48 if high_b1 < 58 else high_b1 - 87\n high_b2 = high_b2 - 48 if high_b2 < 58 else high_b2 - 87\n\n mid_b1 = y_mid[0]\n mid_b2 = y_mid[1]\n mid_b1 = mid_b1 - 48 if mid_b1 < 58 else mid_b1 - 87\n mid_b2 = mid_b2 - 48 if mid_b2 < 58 else mid_b2 - 87\n\n low_b1 = y_low[0]\n low_b2 = y_low[1]\n low_b1 = low_b1 - 48 if low_b1 < 58 else low_b1 - 87\n low_b2 = low_b2 - 48 if low_b2 < 58 else low_b2 - 87\n\n y = (high_b1 * 16 + high_b2) * 65536 + (mid_b1 * 16 + mid_b2) * 256 + (low_b1 * 16 + low_b2)\n\n self.cameraPosition_ui.current_x.setText(str(round(x/10000,3)))\n self.cameraPosition_ui.current_y.setText(str(round(y/10000,3)))\n\n # def getAdValue(self):\n # write_data_to_port(self.port, r'0002000000000000')\n\n def update_relay_status(self, status):\n status_l = [int(x) for x in status[-6:]]\n print(status_l)\n if status_l[0] == 1:\n self.yalicanshu_ui.status_JF.setStyleSheet(\"*{background-color:red}\")\n print(\"JF\")\n else:\n self.yalicanshu_ui.status_JF.setStyleSheet(\"*{background-color:#e1e1e1}\")\n\n if status_l[1] == 1:\n self.yalicanshu_ui.status_JE.setStyleSheet(\"*{background-color:red}\")\n print(\"JE\")\n else:\n self.yalicanshu_ui.status_JE.setStyleSheet(\"*{background-color:#e1e1e1}\")\n\n if status_l[2] == 1:\n self.yalicanshu_ui.status_JD.setStyleSheet(\"*{background-color:red}\")\n print(\"JD\")\n else:\n self.yalicanshu_ui.status_JD.setStyleSheet(\"*{background-color:#e1e1e1}\")\n\n if status_l[3] == 1:\n self.yalicanshu_ui.status_JC.setStyleSheet(\"*{background-color:red}\")\n print(\"JC\")\n else:\n self.yalicanshu_ui.status_JC.setStyleSheet(\"*{background-color:#e1e1e1}\")\n\n if status_l[4] == 1:\n self.yalicanshu_ui.status_JB.setStyleSheet(\"*{background-color:red}\")\n print(\"JB\")\n else:\n self.yalicanshu_ui.status_JB.setStyleSheet(\"*{background-color:#e1e1e1}\")\n\n if status_l[5] == 1:\n self.yalicanshu_ui.status_JA.setStyleSheet(\"*{background-color:red}\")\n print(\"JA\")\n else:\n self.yalicanshu_ui.status_JA.setStyleSheet(\"*{background-color:#e1e1e1}\")\n\n def closeEvent(self, *args, **kwargs):\n if self.camera:\n close(self.camera)\n os._exit(0)\n\n\n\ndef byte_to_oct(high_b, low_b):\n high_b1 = high_b[0]\n high_b2 = high_b[1]\n high_b1 = high_b1 - 48 if high_b1 < 58 else high_b1 - 87\n high_b2 = high_b2 - 48 if high_b2 < 58 else high_b2 - 87\n\n low_b1 = low_b[0]\n low_b2 = low_b[1]\n low_b1 = low_b1 - 48 if low_b1 < 58 else low_b1 - 87\n low_b2 = low_b2 - 48 if low_b2 < 58 else low_b2 - 87\n return (high_b1 * 16 + high_b2) * 256 + (low_b1 * 16 + low_b2)\n\n\n\n\ndef oct_to_bin(value):\n temp = hex(value)\n if value < 16:\n return str('0'+temp[-1])\n else:\n return str(temp[-2:])\n\n","sub_path":"controller/MyMainWindow.py","file_name":"MyMainWindow.py","file_ext":"py","file_size_in_byte":25634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"525484442","text":"import turtle\nt=turtle.Turtle()\nt.color(\"cyan\")\nt.speed(0)\n\ncolors=['green','blue','yellow','red','orange','purple','cyan','green','blue','yellow','red','orange','purple','cyan']\n\ndef square(color):\n for side in range(4):\n t.forward(100)\n t.right(90)\n for side in range(4):\n t.forward(50)\n t.right(90)\nt.penup()\nt.back(40)\nt.pendown()\n\nfor color in colors:\n t.color(color)\n square(colors)\n t.forward(50)\n t.left(45)\nt.hideturtle()","sub_path":"New folder/turtle/box circle.py","file_name":"box circle.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"386058106","text":"from datetime import datetime\nfrom pathlib import Path\nfrom homebot import bot_path\nfrom homebot.core.config import get_config\nfrom homebot.core.error_handler import format_exception\nfrom homebot.core.logging import LOGE\nfrom homebot.lib.libupload import Uploader\nfrom homebot.modules.ci.artifacts import STATUS_ERROR, STATUS_SUCCESS, STATUS_UPLOADING, Artifacts\nfrom homebot.modules.ci.parser import CIParser\nfrom homebot.modules.ci.projects.aosp.post import PostManager, chat_id\nfrom homebot.modules.ci.projects.aosp.returncode import ERROR_CODES, NEEDS_LOGS_UPLOAD, SUCCESS\nimport re\nimport subprocess\nfrom telegram.ext import CallbackContext\nfrom telegram.update import Update\n\nADDITIONAL_ARTIFACTS = [\n\t\"boot.img\",\n\t\"vendor_boot.img\",\n\t\"dtbo.img\",\n\t\"recovery.img\",\n]\n\nclass AOSPProject:\n\t\"\"\"\n\tThis class represent an AOSP project.\n\t\"\"\"\n\t# This value will also be used for folder name\n\tname: str\n\t# Version of the project\n\tversion: str\n\t# Android version to display on Telegram post\n\tandroid_version: str\n\t# Name of the parent folder used when uploading\n\tcategory: str\n\t# These next 2 values are needed for lunch (e.g. \"lineage\"_whyred-\"userdebug\")\n\tlunch_prefix: str\n\tlunch_suffix: str\n\t# Target to build (e.g. to build a ROM's OTA package, use \"bacon\" or \"otapackage\", for a recovery project, use \"recoveryimage\")\n\tbuild_target: str\n\t# Filename of the zip. You can also use wildcards if the name isn't fixed\n\tzip_name: str\n\n\tdef __init__(self, update: Update, context: CallbackContext, args: list[str]):\n\t\t\"\"\"Initialize AOSP project class.\"\"\"\n\t\tself.update = update\n\t\tself.context = context\n\t\tself.args = args\n\t\tparser = CIParser(prog=\"/ci\")\n\t\tparser.set_output(self.update.message.reply_text)\n\t\tparser.add_argument('device', help='device codename')\n\t\tparser.add_argument('-ic', '--installclean', help='make installclean before building', action='store_true')\n\t\tparser.add_argument('-c', '--clean', help='make clean before building', action='store_true')\n\t\tparser.add_argument('--release', help='upload build to release profile', action='store_true')\n\t\tparser.set_defaults(clean=False, installclean=False, release=False)\n\t\tself.parsed_args = parser.parse_args(args)\n\n\tdef build(self):\n\t\tproject_dir = Path(f\"{get_config('ci.main_dir', '')}/{self.name}-{self.version}\")\n\t\tdevice_out_dir: Path = project_dir / \"out\" / \"target\" / \"product\" / self.parsed_args.device\n\n\t\tartifacts = Artifacts(device_out_dir, [self.zip_name] + ADDITIONAL_ARTIFACTS)\n\t\tpost_manager = PostManager(self, self.parsed_args.device, artifacts)\n\n\t\tif self.parsed_args.clean is True:\n\t\t\tclean_type = \"clean\"\n\t\telif self.parsed_args.installclean is True:\n\t\t\tclean_type = \"installclean\"\n\t\telse:\n\t\t\tclean_type = \"none\"\n\n\t\tpost_manager.update(\"Building\")\n\n\t\tcommand = [bot_path / \"modules\" / \"ci\" / \"projects\" / \"aosp\" / \"tools\" / \"building.sh\",\n\t\t \"--sources\", project_dir,\n\t\t \"--lunch_prefix\", self.lunch_prefix,\n\t\t \"--lunch_suffix\", self.lunch_suffix,\n\t\t \"--build_target\", self.build_target,\n\t\t \"--clean\", clean_type,\n\t\t \"--device\", self.parsed_args.device]\n\n\t\tlast_edit = datetime.now()\n\t\tprocess = subprocess.Popen(command, encoding=\"UTF-8\",\n\t\t stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\t\twhile True:\n\t\t\toutput = process.stdout.readline()\n\t\t\tif output == '' and process.poll() is not None:\n\t\t\t\tbreak\n\t\t\tif not output:\n\t\t\t\tcontinue\n\n\t\t\tnow = datetime.now()\n\t\t\tif (now - last_edit).seconds < 150:\n\t\t\t\tcontinue\n\n\t\t\tresult = re.search(r\"\\[ +([0-9]+% [0-9]+/[0-9]+)\\]\", output.strip())\n\t\t\tif result is None:\n\t\t\t\tcontinue\n\t\t\tresult_split = str(result.group(1)).split()\n\t\t\tif len(result_split) != 2:\n\t\t\t\tcontinue\n\n\t\t\tpercentage, targets = re.split(\" +\", result.group(1))\n\t\t\tpost_manager.update(f\"Building: {percentage} ({targets})\")\n\n\t\t\tlast_edit = now\n\n\t\treturncode = process.poll()\n\n\t\t# Process return code\n\t\tbuild_result = ERROR_CODES.get(returncode, \"Build failed: Unknown error\")\n\n\t\tpost_manager.update(build_result)\n\n\t\tneeds_logs_upload = NEEDS_LOGS_UPLOAD.get(returncode, False)\n\t\tif needs_logs_upload != False:\n\t\t\tlog_file = open(project_dir / needs_logs_upload, \"rb\")\n\t\t\tself.context.bot.send_document(chat_id, log_file)\n\t\t\tlog_file.close()\n\n\t\tif returncode != SUCCESS or get_config(\"ci.upload_artifacts\", False) is not True:\n\t\t\treturn\n\n\t\t# Upload artifacts\n\t\tif self.parsed_args.release:\n\t\t\tuploader_profile = \"release\"\n\t\telse:\n\t\t\tuploader_profile = \"ci\"\n\n\t\ttry:\n\t\t\tuploader = Uploader(uploader_profile)\n\t\texcept Exception as e:\n\t\t\tpost_manager.update(f\"{build_result}\\n\"\n\t\t\t f\"Upload failed: {type(e)}: {e}\")\n\t\t\treturn\n\n\t\tartifacts.update()\n\n\t\tzip_filename = list(device_out_dir.glob(self.zip_name))\n\t\tif not zip_filename:\n\t\t\treturn\n\n\t\tzip_filename = zip_filename[0].name\n\n\t\tpost_manager.update()\n\t\tupload_path = Path() / self.parsed_args.device / zip_filename.removesuffix(\".zip\")\n\t\tfor artifact in artifacts.keys():\n\t\t\tartifacts[artifact] = STATUS_UPLOADING\n\t\t\tpost_manager.update()\n\n\t\t\ttry:\n\t\t\t\tuploader.upload(artifact, upload_path)\n\t\t\texcept Exception as e:\n\t\t\t\tartifacts[artifact] = STATUS_ERROR\n\t\t\t\tLOGE(f\"Error while uploading artifact {artifact.name}:\\n\"\n\t\t\t f\"{format_exception(e)}\")\n\t\t\telse:\n\t\t\t\tartifacts[artifact] = STATUS_SUCCESS\n\n\t\t\tpost_manager.update()\n","sub_path":"homebot/modules/ci/projects/aosp/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"557153377","text":"import os\nimport sys\nimport logging\n\nimport numpy as np\nimport matplotlib.pyplot as plt \n\nimport ase.io\nfrom ase import Atoms, Atom, units\nfrom ase.calculators.vasp import Vasp\nfrom ase.calculators.emt import EMT\nfrom ase.build import fcc110\nfrom ase.build import fcc111\nfrom ase.build import fcc100\nfrom ase.build import surface\nfrom ase.md.velocitydistribution import MaxwellBoltzmannDistribution\nfrom ase.md import VelocityVerlet\nfrom ase.constraints import FixAtoms\nfrom ase.calculators.vasp.vasp import VaspChargeDensity\n\nimport amp\nfrom amp import Amp\nfrom amp import utilities\nfrom amp.model import LossFunction\nfrom amp.analysis import read_trainlog\nfrom amp.analysis import plot_convergence\nfrom amp.analysis import plot_sensitivity\nfrom amp.descriptor.gaussian import Gaussian\nfrom amp.model.neuralnetwork import NeuralNetwork\n\n\n# ---------------------------------------------------------\n# GLOBAL SETTINGS AND VARIABLES\n# ---------------------------------------------------------\n\nFILENAME = 'Al10.traj' #'al_large_longMD.traj'\n\nlogging.basicConfig(level=logging.INFO)\n\nPATH_PLOT = \"/home/nuss/01/bt702501/Dokumente/PlotsII_reloaded/\"\n\nT_GLOBAL = 300\nDT_STEP_GLOBAL = 1\n\n# ---------------------------------------------------------\n# FUNCTIONS\n# ---------------------------------------------------------\n\ndef generate_data(count, filename='data.traj'):\n \"\"\"Generates test or training data with a simple MD simulation.\"\"\"\n if not os.path.exists(filename):\n traj = ase.io.Trajectory(filename, 'w')\n #atoms = fcc110('Pt', (2, 2, 2), vacuum=7.)\n #atoms.extend(Atoms([Atom('Cu', atoms[7].position + (0., 0., 2.5)),\n # Atom('Cu', atoms[7].position + (0., 0., 5.))]))\n #atoms.set_constraint(FixAtoms(indices=[0, 2]))\n\n T = T_GLOBAL\n dt_step = DT_STEP_GLOBAL\n \n if filename == 'gold.traj' or filename == 'gold2.traj':\n atoms = fcc111('Au', (2,2,2), vacuum=7.0)\n atoms.set_chemical_symbols(['Au', 'Au', 'Au', 'Au', 'Pd', 'Pd', 'Pd', 'Pd'])\n T = 6000\n dt_step = 5\n print(atoms)\n elif filename == 'al.traj' or filename == 'al2.traj':\n atoms = fcc100('Al', (3,3,2), vacuum=10.0)\n T = 300\n dt_step = 2\n elif filename == 'al_large.traj' or filename == 'al_large_longMD.traj' or filename == 'Al6.traj':\n atoms = fcc100('Al', (4,6,6), vacuum=20.)\n T = 300\n dt_step = 2\n elif filename == 'Al6.traj':\n atoms = fcc100('Al', (4,6,6), vacuum=20.)\n T = 300\n dt_step = 2\n elif filename == 'Al8.traj':\n atoms = fcc100('Al', (4,6,8), vacuum=20.)\n T = 300\n dt_step = 2\n elif filename == 'al_large_12layers.traj':\n atoms = fcc100('Al', (4,6,12), vacuum=20.)\n T = 300\n dt_step = 2\n elif filename == 'MA.traj':\n with open(\"POSCAR_MA\") as f:\n all_lines = f.readlines()\n all_lines = [x[:-1] for x in all_lines]\n all_atoms = all_lines[5].split()\n n_atoms = all_lines[6].split()\n atoms = []\n for atom, n in zip(all_atoms, n_atoms):\n atoms.append((atom, n))\n atom_types = ''\n for atom_type, n in atoms:\n atom_types += str(atom_type)\n if n != '1':\n atom_types += str(n)\n lattice_vectors = np.array([np.float64(line.split()) for line in all_lines[2:5]])\n all_coordinates = np.array([np.float64(line.split()) for line in all_lines[8:]])\n\n atoms = Atoms(atom_types, pbc=True, positions=all_coordinates, cell=lattice_vectors)\n T = 300\n dt_step = 2 \n \n print(atoms)\n print(atoms.get_positions())\n print(atoms.get_chemical_symbols())\n\n atoms.set_calculator(Vasp(setups='recommended', npar=16, nsim=4))\n MaxwellBoltzmannDistribution(atoms, T * units.kB)\n dyn = VelocityVerlet(atoms, dt=dt_step * units.fs)\n dyn.run(50)\n\n energy = atoms.get_potential_energy()\n traj.write(atoms)\n densities = []\n energies = []\n energies.append(energy)\n # IMPORTANT: Density is already divided by volume!!!\n C = VaspChargeDensity(filename='CHGCAR')\n densities.append(C.chg)\n\n for step in range(count-1):\n logging.info(\" Calculating step {}\".format(step+1))\n dyn.run(5)\n traj.write(atoms)\n C = VaspChargeDensity(filename='CHGCAR')\n densities.append(C.chg)\n energies.append(atoms.get_potential_energy())\n print(energies[-1])\n traj.close()\n densities = np.array(densities)\n \n #densities_numpy = 'densities_{}.npy'.format(filename[:-5])\n #if not os.path.exists(densities_numpy):\n # np.save(densities_numpy, densities, allow_pickle=True)\n #else:\n # densities = np.load(densities_numpy, allow_pickle=True)\n energies_numpy = '../Al6_reloaded/energies_Al6.npy'.format(filename[:-5])\n if not os.path.exists(energies_numpy):\n np.save(energies_numpy, energies, allow_pickle=True)\n else:\n energies = np.load(energies_numpy, allow_pickle=True)\n\n # Train-test-split\n train_images, test_images, train_energies, test_energies = train_test_split(\n filename, energies, fraction=0.8) \n\n #densities_train_numpy = 'densities_train_{}.npy'.format(filename[:-5])\n #densities_test_numpy = 'densities_test_{}.npy'.format(filename[:-5])\n #if not os.path.exists(densities_train_numpy) or os.path.exists(densities_test_numpy):\n # np.save(densities_train_numpy, train_densities)\n # np.save(densities_test_numpy, test_densities)\n\n return (train_images, test_images, train_energies, test_energies)\n\n\ndef predict(train_images, test_images, calc):\n fig, ax = plt.subplots()\n\n actual_energies = []\n actual_densities = []\n predicted_energies = [] \n predicted_densities = []\n\n # TODO: Rework for loop --> Calculator might be reset at wrong times\n # Predicting on training data\n for i_dataset, atoms in enumerate(train_images):\n # Get actual energy\n actual_energy = atoms.get_potential_energy()\n actual_energies.append(actual_energy)\n # Get predicted energy\n atoms.set_calculator(calc)\n predicted_energy = atoms.get_potential_energy()\n predicted_energies.append(predicted_energy)\n \n ax.plot(actual_energy, predicted_energy, 'b.')\n \n # Predicting on test data\n for i_dataset, atoms in enumerate(test_images):\n # Get actual energy\n actual_energy = atoms.get_potential_energy()\n actual_energies.append(actual_energy)\n # Get predicted energy\n atoms.set_calculator(calc)\n predicted_energy = atoms.get_potential_energy()\n predicted_energies.append(predicted_energy)\n\n ax.plot(actual_energy, predicted_energy, 'r.')\n \n ax.set_xlabel('Actual energy / eV')\n ax.set_ylabel('Predicted energy / eV')\n fig.savefig(os.path.join(PATH_PLOT, 'parity000.png'))\n\n return (actual_energies, predicted_energies)\n \n \n\ndef train_test_split(images, energies, fraction=0.8):\n \"\"\"Randomly assigns 'fraction' of the images to a training set and\n (1-'fraction') to a test set. Returns two lists of ASE images\n and two lists of the respective densities\n \n Parameters\n ----------\n images: str\n Path to ASE trajectory (.traj)\n densities: numpy array\n Numpy array containing all densities\n fraction: float\n Portion of train_images to all images\n\n Returns\n -------\n train_images, test_images: list\n List of train and test images\n train_densities, test_densities: list\n Numpy array of train and test densities\n \"\"\"\n images = ase.io.Trajectory(images, 'r')\n \n trainingsize = int(fraction * len(images))\n testsize = len(images) - trainingsize\n testindices = []\n while len(testindices) < testsize:\n next = np.random.randint(len(images))\n if next not in testindices:\n testindices.append(next)\n testindices.sort()\n trainindices = [index for index in range(len(images)) if index\n not in testindices]\n train_images = [images[index] for index in trainindices]\n test_images = [images[index] for index in testindices]\n \n print(\"Train indices:\", trainindices)\n print(\"Test indices:\", testindices)\n\n train_energies = np.array([energies[index] for index in trainindices])\n test_energies = np.array([energies[index] for index in testindices]) \n\n images.close()\n \n return train_images, test_images, train_energies, test_energies\n\n\n\ndef observer(model, vector, loss):\n \"\"\"Function used for verbosity during training\n ERROR in amp --> Function not correctly implemented in current version\"\"\"\n print(vector[0])\n\n# ---------------------------------------------------------\n# MAIN PROGRAM\n# ---------------------------------------------------------\n\nif __name__ == '__main__':\n from amp.analysis import plot_parity_and_error\n\n # Generate training and test data\n logging.info(\"Generating training and test data\")\n train_images, test_images, \\\n train_energies, test_energies\\\n = generate_data(50, FILENAME)\n logging.info(\"Generation of training and test data finished!\")\n #print(\"Train densities:\", train_densities.shape)\n #print(\"Test densities:\", test_densities.shape)\n print(\"Train energies:\", train_energies.shape)\n print(\"Test energies:\", test_energies.shape)\n \n \n #sys.exit(0) \n # Training model\n logging.info(\"Starting Training\")\n \n cores = {} \n with open('mpd.hosts') as f:\n all_lines = f.readlines()\n for core in all_lines:\n core = core[:-1]\n if core not in cores.keys():\n cores[core] = 1\n elif core in cores.keys():\n cores[core] += 1\n print(cores) \n\n #calc = Amp(descriptor=Gaussian(),\n # model=NeuralNetwork(hiddenlayers=(10, 10, 10)),\n # cores=32,\n # #envcommand='export PYTHONPATH=/tp_leppert/amp_package/amp',\n # label='calc_{}'.format(FILENAME[:-5]))\n #convergence = {'energy_rmse': 0.0009,\n # 'energy_maxresid': 0.0018,\n # 'force_rmse': 0.1,\n # 'force_maxresid': 0.7}\n #calc.model.lossfunction = LossFunction(convergence=convergence, force_coefficient=0.3)\n #calc.train(images=train_images)\n calc = Amp.load('../Al6_reloaded/calc_Al6.amp')\n logging.info(\"Training finished!\")\n\n #print(calc.descriptor.fingerprints)\n calc_vasp = Vasp(setups='recommended', npar=16, nsim=4)\n\n energies = []\n forces = [] \n for image in test_images:\n image.set_calculator(calc_vasp)\n energy = image.get_potential_energy()\n force = image.get_forces()\n energies.append(energy)\n forces.append(force)\n energies = np.array(energies)\n forces = np.array(forces)\n print(energies)\n np.save('energies_vasp_{}'.format(FILENAME[:-5]), energies)\n np.save('forces_vasp_{}'.format(FILENAME[:-5]), forces)\n\n\n energies = []\n forces = []\n for image in test_images:\n image.set_calculator(calc)\n energy = image.get_potential_energy()\n force = image.get_forces()\n energies.append(energy)\n forces.append(force)\n energies = np.array(energies)\n forces = np.array(forces)\n print(energies)\n np.save('energies_amp_{}'.format(FILENAME[:-5]), energies)\n np.save('forces_amp_{}'.format(FILENAME[:-5]), forces)\n\n\n # Testing model\n #plot_parity_and_error(calc=calc, \n # images=test_images,\n # plotfile_parity=os.path.join(PATH_PLOT, 'parity_{}.pdf'.format(FILENAME[:-5])),\n # plotfile_error=os.path.join(PATH_PLOT, 'error_{}.pdf'.format(FILENAME[:-5])),\n # overwrite=True)\n #actual_energies, predicted_energies = predict(train_images, test_images, calc)\n #logdata = read_trainlog('calc_{}-log.txt'.format(FILENAME[:-5]))\n #plot_convergence(data=logdata, plotfile=os.path.join(PATH_PLOT, 'convergence_{}.pdf'.format(FILENAME[:-5])))\n #plot_sensitivity(calc=calc, images=FILENAME, plotfile=os.path.join(PATH_PLOT, 'sensitivity_{}.pdf'.format(FILENAME[:-5])))\n \n","sub_path":"Al_slabs/Al6_for_Al10_reloaded/model_AMP.py","file_name":"model_AMP.py","file_ext":"py","file_size_in_byte":12566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"132670725","text":"from PyQt5 import QtCore, QtWidgets\n\nfrom configurationdialog import ConfigurationDialog\nfrom persistancefacility import MongoDataPersistenceFacility\nfrom managementtools import OrdersManagementTool, ProductsManagementTool, CustomerManagementTool\nfrom sharedcomponets import IconProvider\n\nclass UserInteractionMainWindow(object):\n\n def setupUi(self, MainWindow):\n\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(595, 458)\n MainWindow.setWindowTitle(\"Gestión Arroces Llopis \")\n\n\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n\n # Add layout de to the main window\n self.horizontalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.horizontalLayout.setObjectName(\"verticalLayout\")\n\n # Add tabebd tools widget to the main window\n self.tabbedToolsWidget = ToolsWidget(self.centralwidget)\n self.horizontalLayout.addWidget(self.tabbedToolsWidget)\n\n # Add toolbar to the main window\n self.toolBar = ArrocesLlopisToolBar(self.tabbedToolsWidget, MainWindow)\n MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)\n\n # Add status bar to the main window\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n # Add central Widget to the main window\n MainWindow.setCentralWidget(self.centralwidget)\n\nclass ToolsWidget(QtWidgets.QTabWidget):\n \"\"\"\"\"\"\n def __init__(self, parent=None):\n \"\"\"Constructor for ToolsWidget\"\"\"\n super(QtWidgets.QTabWidget, self).__init__(parent)\n self.setTabsClosable(True)\n self.setMovable(True)\n self.setObjectName(\"toolsTabWidget\")\n\n self.ordersManagementTool = None\n self.customerManagementTool = None\n self.productsManagementTool = None\n\n self.tabCloseRequested.connect(self.removeTabHandler)\n\n self.setStyleSheet(\"QTabBar::close - button { image: url(close.png) subcontrol - position: left; }\")\n self.setStyleSheet(\"QTabBar::tab { height: 30px; width: 150px;}\")\n\n def removeTabHandler(self, index):\n\n supressedTab = self.widget(index)\n if type(supressedTab) is OrdersManagementTool:\n self.ordersManagementTool = None\n elif type(supressedTab) is CustomerManagementTool:\n self.customerManagementTool = None\n elif type(supressedTab) is ProductsManagementTool:\n self.productsManagementTool = None\n\n self.removeTab(index)\n\n def addCustomersTool(self):\n if self.customerManagementTool is None:\n\n hedlabels = ('Nombre', 'Apellidos', 'Télefono', 'Email', 'Dirección','Fecha')\n hedprops = (100, 100, 100, 100, 100, 100)\n self.customerManagementTool = CustomerManagementTool(hedlabels, hedprops)\n self.addTab(self.customerManagementTool, IconProvider.getIconByName(\"customers\"), \"Clientes\")\n\n def addOrdersTool(self):\n if self.ordersManagementTool is None:\n hedlabels = ('Pedido ID','Nombre Cliente', 'Teléfono','Fecha','Hora Entrega', 'Estado', 'Precio')\n hedprops = (100, 100, 100, 100, 100, 100,100)\n self.ordersManagementTool = OrdersManagementTool(hedlabels, hedprops)\n self.addTab(self.ordersManagementTool, IconProvider.getIconByName(\"orders\"), \"Pedidos\")\n\n def addProductsTool(self):\n if self.productsManagementTool is None:\n hedlabels = ('Nombre', 'Tipo', 'Precio', 'Estatus', 'Versión', 'Product ID')\n hedprops = (200, 100, 100, 100, 100, 100)\n self.productsManagementTool = ProductsManagementTool(hedlabels, hedprops)\n self.addTab(self.productsManagementTool, IconProvider.getIconByName(\"products\"), \"Productos\")\n\nclass ArrocesLlopisToolBar(QtWidgets.QToolBar):\n \"\"\" \"\"\"\n def __init__(self, workAreaTabWidget, parent=None):\n super(QtWidgets.QToolBar, self).__init__(parent)\n # creamos todos los iconos necesarios para la barra de herramientas\n self.workAreaTabWidget = workAreaTabWidget\n\n self.customersAction = QtWidgets.QAction(mainWindow)\n self.customersAction.setIcon(IconProvider.getIconByName(\"customers\"))\n self.customersAction.setObjectName(\"customersAction\")\n self.customersAction.triggered.connect(workAreaTabWidget.addCustomersTool)\n\n self.ordersAction = QtWidgets.QAction(mainWindow)\n self.ordersAction.setIcon(IconProvider.getIconByName(\"orders\"))\n self.ordersAction.setObjectName(\"ordersAction\")\n self.ordersAction.triggered.connect(workAreaTabWidget.addOrdersTool)\n\n self.productsAction = QtWidgets.QAction(mainWindow)\n self.productsAction.setIcon(IconProvider.getIconByName(\"products\"))\n self.productsAction.setObjectName(\"productsAction\")\n self.productsAction.triggered.connect(workAreaTabWidget.addProductsTool)\n\n self.connectToolButton = QtWidgets.QToolButton(mainWindow)\n self.connectToolButton.setCheckable(True)\n self.connectToolButton.setIcon(IconProvider.getIconByName(\"disconnect\"))\n self.connectToolButton.setObjectName(\"connectToolButton\")\n self.connectToolButton.toggled.connect(self.connectAction)\n\n self.configureAction = QtWidgets.QAction(mainWindow)\n self.configureAction.setIcon(IconProvider.getIconByName(\"configure\"))\n self.configureAction.setObjectName(\"configureAction\")\n self.configureAction.triggered.connect(self.configuretAction)\n\n self.addAction(self.customersAction)\n self.addAction(self.ordersAction)\n self.addAction(self.productsAction)\n self.addSeparator()\n self.addWidget(self.connectToolButton)\n self.addSeparator()\n self.addAction(self.configureAction)\n\n def connectAction(self):\n if self.connectToolButton.isChecked():\n MongoDataPersistenceFacility.getInstance().connect()\n if not MongoDataPersistenceFacility.getInstance().isConnected():\n self.connectToolButton.setChecked(False)\n QtWidgets.QMessageBox.critical(self, \"Conexion a Base de datos\", \"Fallo conectando a la base de datos\",\n QtWidgets.QMessageBox.Ok)\n else:\n self.connectToolButton.setIcon(IconProvider.getIconByName(\"connect\"))\n else:\n self.connectToolButton.setIcon(IconProvider.getIconByName(\"disconnect\"))\n MongoDataPersistenceFacility.getInstance().disconnect()\n\n def configuretAction(self):\n configuration = MongoDataPersistenceFacility.getInstance().parseConfigurationFile()\n configurationDialog = ConfigurationDialog(configuration)\n configurationDialog.setWindowModality(QtCore.Qt.ApplicationModal)\n if configurationDialog.exec_():\n modifiedConfiguration = configurationDialog.getConfiguration()\n MongoDataPersistenceFacility.getInstance().saveConfigurationToFile(modifiedConfiguration)\n\nimport resources_rc\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n mainWindow = QtWidgets.QMainWindow()\n userInteractionMainWindow = UserInteractionMainWindow()\n userInteractionMainWindow.setupUi(mainWindow)\n mainWindow.show()\n sys.exit(app.exec_())\n","sub_path":"arrocesllopisapp.py","file_name":"arrocesllopisapp.py","file_ext":"py","file_size_in_byte":7396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"58068697","text":"import base64\nimport hashlib\nimport hmac\nimport json\nimport subprocess\nfrom server import change_documents_jurisdiction_settings\nfrom random import randint\nfrom os.path import exists\n\n\ndef makedir(path):\n \"\"\"\n 修改指定文件夹的权限\n :param path: 文件夹的绝对路径\n :return:无\n \"\"\"\n p = subprocess.Popen(\n \"mkdir \" + path,\n universal_newlines=True,\n shell=True,\n close_fds=True\n )\n p.communicate()\n\n\ndef change_jur(path, password, jur=\"777\"):\n \"\"\"\n 修改指定文件夹的指定权限\n :param path: 指定文件夹的绝对路径\n :param password:当前登录的sudo组用户密码\n :param jur: 修改为的权限组码\n :return: 无\n \"\"\"\n p = subprocess.Popen(\n \"sudo -S chmod \" + jur + \" \" + path,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n shell=True,\n close_fds=True\n )\n p.stdin.write(password + \"\\n\")\n p.communicate()\n p.stdin.close()\n\n\ndef get_user_group_list():\n \"\"\"\n 获取当前系统中所有的有效用户及其所属组组成的字典\n :return:字典,{groupname&groupid:[username&userid] ... }\n \"\"\"\n\n p = subprocess.Popen(\n \"cat /etc/passwd\",\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n shell=True,\n close_fds=True\n )\n users_list = p.stdout.readlines()\n p.communicate()\n p.stdin.close()\n\n real_users_list = []\n for user_info in users_list:\n user_info = user_info[:-1]\n if user_info.endswith(\"sh\"):\n real_users_list.append(user_info)\n\n users_list = []\n gid_list = []\n for user_info in real_users_list:\n username = user_info.split(\":\")[0]\n uid = user_info.split(\":\")[2]\n gid = user_info.split(\":\")[3]\n users_list.append(\"%s&%s&%s\" % (username, uid, gid))\n gid_list.append(gid)\n\n gid_list = set(gid_list)\n\n p = subprocess.Popen(\n \"cat /etc/group\",\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n shell=True,\n close_fds=True\n )\n group_list = p.stdout.readlines()\n p.communicate()\n p.stdin.close()\n user_group_list = []\n for group in group_list:\n if group.split(\":\")[2] in gid_list:\n for user in users_list:\n if group.split(\":\")[2] == user.split(\"&\")[2]:\n user_group_list.append(\"%s&%s\" % (user, group.split(\":\")[0]))\n\n user_dict = {}\n for i in user_group_list:\n if i.split(\"&\")[3] in user_dict:\n user_dict[i.split(\"&\")[3]].append(i.split(\"&\")[0])\n else:\n user_dict[i.split(\"&\")[3]] = [i.split(\"&\")[0]]\n\n return user_dict\n\n\ndef get_all_folder(customer_number=\"\"):\n \"\"\"\n 获取一个已存在客户号文件夹中所的的项目名列表\n :param customer_number: 客户号\n :return: 该客户号文件下所有的项目名列表,[项目1-平头车刀, 项目2, ...]\n \"\"\"\n\n p = subprocess.Popen(\n \"ls \" + change_documents_jurisdiction_settings.FILEPATH + customer_number,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n shell=True,\n close_fds=True\n )\n result_list = p.stdout.readlines()\n p.communicate()\n p.stdin.close()\n customer_folder_list = []\n for i in result_list:\n customer_folder_list.append(i[:-1])\n\n return customer_folder_list\n\n\ndef get_filename(customer_number):\n \"\"\"\n 获取指定目录下的推荐新建目录名\n :param customer_number: 被指定的目录\n :return: 推荐目录名\n \"\"\"\n customer_number_all_folder = get_all_folder(customer_number)\n\n if not customer_number_all_folder:\n return change_documents_jurisdiction_settings.FILENAME_PREFIX + \"000001\"\n\n max_folder = customer_number_all_folder[-1]\n\n max_folder_num = \"\"\n for i in max_folder:\n if i.isdigit():\n max_folder_num += i\n else:\n if max_folder_num:\n break\n\n prefix = max_folder[:max_folder.find(max_folder_num)]\n\n return prefix + f\"%0{change_documents_jurisdiction_settings.FILENAME_NUMBER_LENGTH}d\" % (int(max_folder_num) + 1)\n\n\ndef create_secret():\n \"\"\"\n 生成一个32位的密钥\n :return: 字符串\n \"\"\"\n str_list = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\",\n \"v\", \"w\", \"x\", \"y\", \"z\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\",\n \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\", \"!\", \"@\", \"#\", \"$\", \"%\", \"^\", \"&\", \"*\", \"?\"]\n\n secret = \"\"\n for i in range(32):\n secret += str_list[randint(0, len(str_list) - 1)]\n\n return secret\n\n\nclass MakedirWithDifferentJurisdiction:\n \"\"\"\n 根据客户号创建并修改二级目录的权限\n \"\"\"\n password = change_documents_jurisdiction_settings.USER_PASSWORD\n file_list = [change_documents_jurisdiction_settings.CHILDFILE1,\n change_documents_jurisdiction_settings.CHILDFILE2,\n change_documents_jurisdiction_settings.CHILDFILE3,\n change_documents_jurisdiction_settings.CHILDFILE4,\n change_documents_jurisdiction_settings.CHILDFILE5,\n change_documents_jurisdiction_settings.CHILDFILE6,\n change_documents_jurisdiction_settings.CHILDFILE7,\n change_documents_jurisdiction_settings.CHILDFILE8,\n ]\n\n def __init__(self, customer_num):\n self.customer_num = customer_num\n self.path = change_documents_jurisdiction_settings.FILEPATH + customer_num\n\n def create_file(self, filename, child_filename=None):\n if not child_filename:\n child_filename = \"\"\n\n file_path = self.path + \"/\" + filename + \"/\" + child_filename\n makedir(path=file_path)\n change_jur(path=file_path, password=self.password, jur=\"700\")\n\n def change_jurisdiction(self, filename, child_filename, jurisdiction, user_list):\n\n if jurisdiction == \"r\":\n jurisdiction = \"r-\"\n\n user_str = ''\n for user in user_list:\n user_str += f\"u:{user}:{jurisdiction}x,\"\n user_str = user_str[:-1]\n\n p = subprocess.Popen(\n \"sudo -S setfacl -m \" + user_str + \" \" + self.path + \"/\" + filename + \"/\" + child_filename,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n shell=True,\n close_fds=True\n )\n p.stdin.write(self.password + \"\\n\")\n p.communicate()\n p.stdin.close()\n\n\n# def create_doc_with_jur(data):\n# \"\"\"\n# 根据data中的客户号和文件权限表创建项目文件夹\n# :param data: 特定格式的数据\n# :return: 创建项目文件夹的绝对路径\n# \"\"\"\n# customer_number = data.get(\"customer_number\")\n# result = get_all_folder(customer_number)\n# if result:\n# filename = get_filename(customer_number)\n#\n# else:\n# # 创建客户号目录并修改权限\n# path = change_documents_jurisdiction_settings.FILEPATH + customer_number\n# makedir(path=path)\n# change_jur(path=path, password=change_documents_jurisdiction_settings.USER_PASSWORD)\n#\n# filename = get_filename(customer_number)\n#\n# # 创建项目目录并修改权限\n# path = change_documents_jurisdiction_settings.FILEPATH + customer_number + \"/\" + filename\n# makedir(path=path)\n# change_jur(path=path, password=change_documents_jurisdiction_settings.USER_PASSWORD)\n#\n# # 创建二级目录并修改权限\n# for index in range(1, 9):\n# user_index = data.get(str(index))\n# if not user_index:\n# continue\n# r_user_list = user_index.get(\"r\")\n# rw_user_list = user_index.get(\"rw\")\n# # 创建对象\n# mdj = MakedirWithDifferentJurisdiction(customer_num=customer_number)\n# # 二级目录\n# child_filename = mdj.file_list[index - 1]\n# mdj.create_file(filename=filename, child_filename=child_filename)\n# if r_user_list:\n# mdj.change_jurisdiction(filename=filename, child_filename=child_filename, jurisdiction=\"r\",\n# user_list=r_user_list)\n# if rw_user_list:\n# mdj.change_jurisdiction(filename=filename, child_filename=child_filename, jurisdiction=\"rw\",\n# user_list=rw_user_list)\n#\n# return change_documents_jurisdiction_settings.FILEPATH + customer_number + \"/\" + filename\n\n\ndef encode_data(data):\n \"\"\"\n 将一条数据编码成bytes类型字符串\n :param data: 数据\n :return: 字符串\n \"\"\"\n return base64.b64encode(json.dumps({\"data\": data}).encode())\n\n\ndef decode_data(data):\n \"\"\"\n 将一条bytes类型数据解码为其原始数据\n :param data:\n :return:\n \"\"\"\n return json.loads(base64.b64decode(data)).get(\"data\")\n\n\nclass PersonalEncrypt:\n\n def __init__(self, key):\n self.key = key\n self.prefix = \"eyJkYXRhIjogI\"\n self.suffix = \"In0=\"\n\n @staticmethod\n def create_char():\n str_list = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\",\n \"u\",\n \"v\", \"w\", \"x\", \"y\", \"z\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\",\n \"P\",\n \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\", \"!\", \"@\", \"#\", \"$\", \"%\", \"^\", \"&\", \"*\", \"?\"]\n\n return str_list[randint(0, 31)].encode()\n\n def encrypt_data(self, data):\n b_str = base64.b64encode(json.dumps({\"data\": data}).encode())[13:56]\n b_str = b_str[:13] + self.create_char() + b_str[13:]\n b_str = b_str[:15] + self.create_char() + b_str[15:] # 长度:45\n result = b_str + hmac.new(self.key.encode(), b_str, digestmod=hashlib.sha256).hexdigest().encode()\n\n return result # 长度:109\n\n def decrypt_data(self, data):\n b1 = data[:45]\n b2 = data[45:].decode()\n if hmac.new(self.key.encode(), b1, digestmod=hashlib.sha256).hexdigest() != b2:\n return None\n b1 = b1[:13] + b1[14:]\n b1 = b1[:14] + b1[15:]\n b1 = self.prefix + b1.decode() + self.suffix\n b1 = json.loads(base64.b64decode(b1)).get(\"data\")\n return b1\n\n\ndef make_logfile():\n if not exists(\"/var/log/makedir_with_set_jurisdiction.log\"):\n p = subprocess.Popen(\n \"sudo -S touch /var/log/makedir_with_set_jurisdiction.log\",\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n shell=True,\n close_fds=True\n )\n p.stdin.write(change_documents_jurisdiction_settings.USER_PASSWORD + \"\\n\")\n p.communicate()\n p.stdin.close()\n\n p = subprocess.Popen(\n \"sudo -S chmod 666 /var/log/makedir_with_set_jurisdiction.log\",\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n shell=True,\n close_fds=True\n )\n p.stdin.write(change_documents_jurisdiction_settings.USER_PASSWORD + \"\\n\")\n p.communicate()\n p.stdin.close()\n\n\ndef create_doc_with_jur(data):\n \"\"\"\n 根据data中的客户号和文件权限表创建项目文件夹\n :param data: 特定格式的数据\n :return: 创建项目文件夹的绝对路径\n \"\"\"\n customer_number = data.get(\"customer_number\")\n result = get_all_folder(customer_number)\n if result:\n filename = get_filename(customer_number)\n\n else:\n # 创建客户号目录并修改权限\n path = change_documents_jurisdiction_settings.FILEPATH + customer_number\n makedir(path=path)\n change_jur(path=path, password=change_documents_jurisdiction_settings.USER_PASSWORD)\n\n filename = get_filename(customer_number)\n\n # 创建项目目录并修改权限\n path = change_documents_jurisdiction_settings.FILEPATH + customer_number + \"/\" + filename\n makedir(path=path)\n change_jur(path=path, password=change_documents_jurisdiction_settings.USER_PASSWORD)\n\n # for index in range(1, 9):\n # user_index = data.get(str(index))\n # if not user_index:\n # continue\n # r_user_list = user_index.get(\"r\")\n # rw_user_list = user_index.get(\"rw\")\n # # 创建对象\n # mdj = MakedirWithDifferentJurisdiction(customer_num=customer_number)\n # # 二级目录\n # child_filename = mdj.file_list[index - 1]\n # mdj.create_file(filename=filename, child_filename=child_filename)\n # if r_user_list:\n # mdj.change_jurisdiction(filename=filename, child_filename=child_filename, jurisdiction=\"r\",\n # user_list=r_user_list)\n # if rw_user_list:\n # mdj.change_jurisdiction(filename=filename, child_filename=child_filename, jurisdiction=\"rw\",\n # user_list=rw_user_list)\n #\n # 创建二级目录并修改权限\n for index in range(8):\n mdj = MakedirWithDifferentJurisdiction(customer_num=customer_number)\n child_filename = mdj.file_list[index]\n mdj.create_file(filename=filename, child_filename=child_filename)\n change_jur(jur=\"700\", path=path + f\"/{child_filename}\",\n password=change_documents_jurisdiction_settings.USER_PASSWORD)\n # data = {\n # \"order\": \"create\",\n # \"customer_number\": \"000001\",\n # \"token\": \"xxxxx\",\n # s '1': {'group_name': '市场部', 'group_id': '1001', 'user_list': ['张一', '张二']},\n # a '2': {'group_name': '应用部', 'group_id': '1002', 'user_list': ['王一', '王二']},\n # t '3': {'group_name': '研发部', 'group_id': '1003', 'user_list': ['李一']},\n # m '4': {'group_name': '营销部', 'group_id': '1004', 'user_list': ['赵一']},\n # b '5': {'group_name': '采购部', 'group_id': '1005', 'user_list': ['孙二/采购部部长']}\n # }\n # s-市场部 a-应用部 t-研发部 m-营销部 b-采购部\n if index == 0:\n r_user_list = data[\"1\"][\"user_list\"] + data[\"2\"][\"user_list\"] + data[\"3\"][\"user_list\"]\n rw_user_list = data[\"2\"][\"user_list\"]\n elif index == 1:\n r_user_list = data[\"1\"][\"user_list\"] + data[\"2\"][\"user_list\"]\n rw_user_list = data[\"2\"][\"user_list\"]\n elif index == 2:\n r_user_list = data[\"1\"][\"user_list\"] + data[\"2\"][\"user_list\"] + data[\"3\"][\"user_list\"]\n rw_user_list = data[\"1\"][\"user_list\"] + data[\"2\"][\"user_list\"]\n elif index == 3:\n r_user_list = data[\"1\"][\"user_list\"] + data[\"2\"][\"user_list\"] + data[\"3\"][\"user_list\"]\n rw_user_list = data[\"1\"][\"user_list\"] + data[\"2\"][\"user_list\"]\n elif index == 4:\n r_user_list = data[\"1\"][\"user_list\"] + data[\"2\"][\"user_list\"] + data[\"3\"][\"user_list\"]\n rw_user_list = data[\"1\"][\"user_list\"]\n elif index == 5:\n r_user_list = data[\"1\"][\"user_list\"] + data[\"2\"][\"user_list\"] + data[\"3\"][\"user_list\"] + data[\"4\"][\n \"user_list\"]\n rw_user_list = data[\"3\"][\"user_list\"]\n elif index == 6:\n r_user_list = data[\"1\"][\"user_list\"] + data[\"5\"][\"user_list\"] + data[\"3\"][\"user_list\"]\n rw_user_list = data[\"3\"][\"user_list\"]\n elif index == 7:\n r_user_list = data[\"1\"][\"user_list\"] + data[\"2\"][\"user_list\"]\n rw_user_list = data[\"1\"][\"user_list\"] + data[\"2\"][\"user_list\"]\n if r_user_list:\n mdj.change_jurisdiction(filename=filename, child_filename=child_filename, jurisdiction=\"r\",\n user_list=r_user_list)\n if rw_user_list:\n mdj.change_jurisdiction(filename=filename, child_filename=child_filename, jurisdiction=\"rw\",\n user_list=rw_user_list)\n\n return change_documents_jurisdiction_settings.FILEPATH + customer_number + \"/\" + filename\n\n\n\ndata = {\n \"order\": \"create\",\n \"customer_number\": \"000001\",\n \"token\": \"xxxxx\",\n '1': {'group_name': '市场部', 'group_id': '1001', 'user_list': ['张一', '张二']},\n '2': {'group_name': '应用部', 'group_id': '1002', 'user_list': ['王一', '王二']},\n '3': {'group_name': '研发部', 'group_id': '1003', 'user_list': ['李一']},\n '4': {'group_name': '营销部', 'group_id': '1004', 'user_list': ['赵一']},\n '5': {'group_name': '采购部', 'group_id': '1005', 'user_list': ['孙二/采购部部长']}\n}\n\nif __name__ == '__main__':\n res = create_doc_with_jur(data)\n print(res)\n","sub_path":"server/tools_1.py","file_name":"tools_1.py","file_ext":"py","file_size_in_byte":17156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"86963333","text":"from django.shortcuts import render, redirect, render_to_response\nfrom django.template import Context, RequestContext\nfrom django.template.loader import get_template\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Q\nfrom .models import Game, Coin\n\ndef user_login(request):\n context = RequestContext(request)\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n login(request, user)\n return HttpResponseRedirect(\"/connect4/games/\")\n else:\n return HttpResponseRedirect('accounts/login/account-disabled')\n # NOT WORKING Return a 'disabled account' error message\n else:\n return render_to_response('accounts/login.html' ,{'invalid': True}, context)\n # Return an 'invalid login' error message.\n else:\n return render_to_response(\"accounts/login.html\", {}, context)\n\n@login_required\ndef user_logout(request):\n logout(request)\n return HttpResponseRedirect(reverse('login'))\n\ndef signup(request):\n pass\n\n@login_required\ndef games(request):\n my_games = Game.objects.filter( Q(player1 = request.user) |\n Q(player2 = request.user))\n joinable_games = Game.objects.filter(player2 = None).exclude(player1 = request.user)\n my_active_games = my_games.filter(status = \"a\").exclude(player2 = None)\n my_completed_games = my_games.filter(status = \"c\")\n\n # Display in most recent order.\n context = {\"my_active_games\": my_active_games.order_by('-created_date'),\n \"my_completed_games\": my_completed_games.order_by('-created_date'),\n \"joinable_games\": joinable_games.order_by('-created_date')}\n\n rcontext = RequestContext(request, context)\n\n return render_to_response(\"games.html\", rcontext)\n\n@login_required\ndef new_game(request):\n game = Game.objects.create(player1=request.user, player2=None, status=\"a\")\n return redirect(\"play\", game.id)\n\n# no login required, allows users to observe other games\ndef play(request, game_id):\n\n # Get the current game from the database\n game = Game.objects.filter(id=game_id)[0]\n\n # If the player who requested the page is not the player who created\n # the game then join them up.\n if request.user != game.player1:\n game.join_up(request.user)\n\n coins = game.coin_set\n playernum = 1 if game.player1.id == request.user.id else 2\n\n # player1 is always red, and player2 always yellow\n colour = \"red\" if playernum == 1 else \"yellow\"\n\n context = {\"game\" : game,\n \"coins\": coins.all(),\n \"turn\": (game.turn == request.user) and game.status == \"a\",\n \"colour\": colour}\n\n # Check for victory if the game is still active, so we only display\n # the victory screen once\n if game.status == \"a\":\n if check_victory(game.last_move):\n game.status = \"c\"\n game.winner = game.last_move.player\n game.save()\n context['celebration'] = True;\n context['turn'] = False\n\n rcontext = RequestContext(request, context)\n\n return render_to_response(\"play.html\", rcontext)\n\ndef move(request, game_id, column):\n\n game = Game.objects.filter(id=game_id)[0]\n\n # Calculate which row the coin will fall to\n row = 5 - game.coin_set.filter(column = column).count()\n\n\n # Check if their move is valid\n if (game.status == \"c\" or # game is finished\n row < 0 or # coins overflowing\n game.turn != request.user): # isn't their turn!\n return redirect(\"play\", game_id)\n\n game.make_move(request.user, row, column)\n\n # Take them back to the play view\n return redirect(\"play\", game_id)\n\n\n# To check for 4 in a row, we check forwards and backwards in the 8 possible\n# directions a victory could've occured in\n\n# It should actually only check 7 direction since there can't be a line that\n# goes directly up from the just dropped coin, but this was easier to\n# implement without too much of a performance hit\ndef check_victory(coin):\n if coin is None: return None\n return (check_line(coin, 1,0) or\n check_line(coin, 1,1) or\n check_line(coin, 0,1) or\n check_line(coin, -1,1))\n\n\n# Check the number of consecutive coins of the same colour going forwards\n# and then backwards along the same direction and add the two numbers together\ndef check_line(coin, h, v):\n\n def check_direction(coin,h,v):\n neighbour = coin.neighbour(h,v)\n if neighbour: # only proceed if a neighbour exists\n neighbour = neighbour[0]\n if neighbour.colour == coin.colour:\n return 1 + check_direction(neighbour,h,v)\n return 0\n\n return (check_direction(coin,h,v) + check_direction(coin,-h,-v) + 1) >= 4\n\n","sub_path":"connect4/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"64915229","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 17 00:03:09 2018\n@author: Jesus Omar Cuenca Espino\n\"\"\"\n#receives the ip in form of a string so it can be divided and proccessed accordingly\ndef divide(ip):\n ipf=[0,0,0,0]\n s=\"\"\n i=0\n for x in ip:\n if(x=='.'):\n if(int(s)<256):\n num=bin(int(s))[2:]\n while(len(num)<8):\n num='0'+num\n ipf[i]=num\n s=\"\"\n i+=1\n else:\n return \"La ip ingresada esta mal escrita\"\n else:\n s+=x\n if(int(s)>=256):\n return \"La ip ingresada esta mal escrita\"\n num=bin(int(s))[2:]\n while(len(num)<8):\n num='0'+num\n ipf[3]=num\n return ipf\n\n#receives the ip generated from divide and then clasifies so it can find the mask by default\ndef clase(ip):\n tipo=int(ip[0],2)\n if(tipo<0):\n return -1\n elif(tipo<128):\n return 1\n elif(tipo<192):\n return 2\n elif(tipo<224):\n return 3\n else:\n return -1\n\n#checks the subnet mask so there are no contradictions\ndef comp(cl,smask):\n for x in range(cl):\n if(smask[x]!=255):\n return True\n return False\n\n#function to calculate the subnet mask\ndef final_mask(cl,use):\n if(use>30 or use<9):\n return \"error\"\n msk=[0,0,0,0]\n pos=0\n while(use>8):\n msk[pos]=255\n pos+=1\n use-=8\n if(comp(cl,msk)):\n return \"error\"\n count=7\n final=0\n while(use>0):\n final+=2**count\n count-=1\n use-=1\n msk[pos]=final\n return msk\n\n#Prints the mask\ndef pmask(mask):\n st=\"\"\n for x in range(4):\n st+=str(mask[x])\n if(x<3):\n st+='.'\n return st\n\n#merges the functions above in a single process meant to only be used once\ndef init(ipi,m):\n ip=divide(ipi)\n claseip=clase(ip)\n usebits=m-claseip*8\n if(usebits<1):\n return ipi,claseip,m,\"error\"\n else:\n mask=final_mask(claseip,m)\n return ip,claseip,mask,usebits\n\n#Makes easier the proccess of conversion into the ipv4 address\ndef transform_bits(string):\n if(len(string)<32):\n return \"error\"\n else:\n cont=0\n res=\"\"\n while(cont<32):\n if(cont in [8,16,24]):\n res+='.'\n res+=string[cont]\n cont+=1\n return res\n\n#Converts a large string into an ordered string in the form of an ipv4 address\ndef transform_bits2(string):\n close=transform_bits(string)\n st=\"\"\n res=\"\"\n for x in close:\n if(x=='.'):\n st=int(st,2)\n res+=str(st)+'.'\n st=\"\"\n else:\n st+=x\n res+=str(int(st,2))\n return res\n\n#checks if the input string is a candidate to be a broadcast address\ndef broadcast(string):\n for x in string:\n if(x=='0'):\n return True\n return False\n\n#Makes possible the iteraton over the subnets\ndef binarySum(subnet,quantity):\n long=len(subnet)\n num=int(subnet,2)\n num+=quantity\n res=bin(int(num))[2:]\n while(len(res)0 and ans= treshold:\n first.append(2 ** i)\n return first, data\n\n\ndef check(x):\n global data, trashold\n count = 0\n min = len(data) * trashold\n for n in data:\n if n & x == x:\n count += 1\n if count >= min:\n return True\n return count >= min\n\ndef check_zor(k, x):\n #print(bin(x).count(\"1\"))\n return bin(x).count(\"1\") <= k\n i = 0\n #print(x)\n while x > 0:\n if x % 2 == 1:\n i += 1\n if i > k:\n return False\n #print(x)\n x = int(x / 2)\n return True\n\ndef calc(new, k, i=0, j=0, z_or=0, z_add=0): # i: anzahl schon dazuaddierte zahl\n\n if i != 0 and not check_zor(k, z_or):\n return []\n if i == k: # Wenn man k zahlen zusammenaddiert/geort hat\n if z_or * (k - 1) == z_add and check(z_or):\n return [z_or]\n else:\n return []\n else:\n result = []\n for l in range(j, len(new)-(k-i)+1):\n if j == 0 and False:\n print(k)\n if k > len(new) - j + i:\n break\n result += calc(new, k, i + 1, l + 1, z_or | new[-l - 1], z_add + new[-l - 1])\n return result\n\n\ntie = 0\n\ndef myprog():\n global data, trashold\n trashold = 0.6\n first, data = read2(\"dm4.csv\", trashold)\n #print(\"First\", first)\n\n result = [first]\n\n for i in range(2, len(first)):\n t1 = time.clock()\n print(\"i\", i-1, \"länge\", len(result[i - 2]))\n tmp = calc(result[i - 2], i)\n print(\"Time:\", time.clock()-t1)\n if tmp == []:\n break\n\n result.append(tmp)\n # print(result[i - 2])\n\n result1 = []\n #print(\"result\", result)\n for x in result:\n for i in range(len(x)):\n number = set()\n a = x[i]\n j = 1\n # print(\"AAAAA\", a%2)\n while a != 0:\n # print(a)\n # print(\"AAAAA\", a % 2, j)\n if a % 2 == 1:\n number |= {j}\n j += 1\n a = int(a / 2)\n sorted(number)\n result1.append(number)\n\n print(result1)\n\n\n\nt = time.clock()\nmyprog()\nprint(time.clock()-t)\n\nprint(\"Vergleichzeit\", tie)","sub_path":"itemset_mining/apiori/apiori.py","file_name":"apiori.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"30433937","text":"#!/usr/local/bin/python3\n\n#required libraries\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom mpl_toolkits.basemap import Basemap\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nimport gdal, ogr, osr\nimport numpy as np\nimport argparse\nimport pandas as pd\nfrom glob import glob\nimport os\nimport subprocess\nimport sys\nimport shutil\nimport pfio\n\n#Parsing arguments\n\nparser = argparse.ArgumentParser(description='Create subset input files for ParFlow simulation')\nsubparsers = parser.add_subparsers(dest='type',help='subset using three options:')\n\n#group 1: using shapefile\nparser_a = subparsers.add_parser('shapefile', help='subset using shapefile and the selected id of watershed')\nparser_a.add_argument('-shp_file',type=str, help = 'input shapefile')\nparser_a.add_argument('-id',type=int, help = 'id of the selected watershed')\nparser_a.add_argument('-out_name',type=str, help = 'name of output solidfile (required)')\nparser_a.add_argument('-dx',type=int, help = 'spatial resolution of solidfile (optional). Default is 1000')\nparser_a.add_argument('-dz',type=int, help = 'lateral resolution of solidfile (optional). Default is 1000')\nparser_a.add_argument('-printmask',type=int, help = 'print mask (optional). Default is 0')\n#parser_a.add_argument('-z_bottom',type=int, help = 'bottom of domain (optional). Default is 0')\n#parser_a.add_argument('-z_top',type=int, help = 'top of domain (optional). Default is 1000')\n\n#group 2: using mask file\nparser_b = subparsers.add_parser('mask', help='subset using a mask file')\nparser_b.add_argument('-mask_file',type=str, help = 'input mask file')\nparser_b.add_argument('-out_name',type=str, help = 'name of output solidfile (required)')\nparser_b.add_argument('-dx',type=int, help = 'spatial resolution of solidfile (optional). Default is 1000')\nparser_b.add_argument('-dz',type=int, help = 'lateral resolution of solidfile (optional). Default is 1000')\nparser_b.add_argument('-printmask',type=int, help = 'print mask (optional). Default is 0')\n#parser_b.add_argument('-z_bottom',type=int, help = 'bottom of domain (optional). Default is 0')\n#parser_b.add_argument('-z_top',type=int, help = 'top of domain (optional). Default is 1000')\n\n#group 3: using custom watershed\nparser_c = subparsers.add_parser('define_watershed', help='subset using a newly created watershed')\nparser_c.add_argument('-dir_file',type=str, help = 'input direction file',)\nparser_c.add_argument('-outlet_file',type=str, help = 'file contains coordinates of outlet points')\nparser_c.add_argument('-out_name',type=str, help = 'name of output solidfile (required)')\nparser_c.add_argument('-dx',type=int, help = 'spatial resolution of solidfile (optional). Default is 1000')\nparser_c.add_argument('-dz',type=int, help = 'lateral resolution of solidfile (optional). Default is 1000')\nparser_c.add_argument('-printmask',type=int, help = 'print mask (optional). Default is 0')\n#parser_c.add_argument('-z_bottom',type=int, help = 'bottom of domain (optional). Default is 0')\n#parser_c.add_argument('-z_top',type=int, help = 'top of domain (optional). Default is 1000')\n\n###required raster files\n\nif not os.path.isdir('CONUS1_inputs/'):\n\tos.mkdir('CONUS1_inputs/')\n\nconus_pf_1k_mask = 'CONUS1_inputs/conus_1km_PFmask2.tif'\nconus_pf_1k_sinks = 'CONUS1_inputs/conus_1km_PFmask_manualsinks.tif' #1 for cells inside domain, 0 for cells outside domain, 2 for sinks\nconus_pf_1k_lakes = 'CONUS1_inputs/conus_1km_PFmask_selectLakesmask.tif' #1 for lakes, 0 for everything else\nconus_pf_1k_lakes_border = 'CONUS1_inputs/conus_1km_PFmask_selectLakesborder.tif'\nconus_pf_1k_border_type = 'CONUS1_inputs/1km_PF_BorderCellType.tif' # A mask marking with 1 for for cells with an ocean border and 2 for cells with a land border\n\nconus_pf_1k_tifs = [conus_pf_1k_mask,conus_pf_1k_sinks,conus_pf_1k_lakes,\n\t\t\t\t\tconus_pf_1k_lakes_border,conus_pf_1k_border_type]\navra_path_tif = '/iplant/home/shared/avra/CONUS2.0/Inputs/domain/'\n\n###check if file exits, if not we need to login to avra and download. This part requires icommand authorization\nif any([not os.path.isfile(x) for x in conus_pf_1k_tifs]):\t\n\tprint(conus_pf_1k_mask+' does not exits...downloading from avra')\n\tauth = os.system('iinit')\n\tif auth != 0:\n\t\tprint('Authentication failed...exit')\n\t\tsys.exit()\n\t\n\tfor tif_file in conus_pf_1k_tifs:\n\t\tos.system('iget -K '+avra_path_tif+os.path.basename(tif_file)+' CONUS1_inputs/')\n\n###required slope files\nslopex_tif = 'CONUS1_inputs/Str3Ep0_smth.rvth_1500.mx0.5.mn5.sec0.up_slopex.tif'\navra_path_slope = '/iplant/home/shared/avra/CONUS2.0/Inputs/Topography/Str5Ep0/'\nif not os.path.isfile(slopex_tif):\n\tos.system('iget -K '+avra_path_slope+os.path.basename(slopex_tif)+' CONUS1_inputs/')\n\nslopey_tif = 'CONUS1_inputs/Str3Ep0_smth.rvth_1500.mx0.5.mn5.sec0.up_slopey.tif'\nif not os.path.isfile(slopey_tif):\n\tos.system('iget -K '+avra_path_slope+os.path.basename(slopey_tif)+' CONUS1_inputs/')\n\n###required subsurface file\nsubsurface_tif = 'CONUS1_inputs/3d-grid.v3.tif'\n\n#check if subsurface_tif is exists\nif not os.path.isfile(subsurface_tif):\n\tprint(subsurface_tif+' does not exits...download and process from avra')\n\tgrid_3d_file = '3d-grid.v3.txt'\n\tavra_path_subsurface = '/iplant/home/shared/avra/CONUS_1.0/SteadyState_Final/Input_Development/Subsurface/'+\\\n\t\tgrid_3d_file\n\tos.system('iget -K '+avra_path_subsurface+' CONUS1_inputs/')\n\tos.chdir('utils')\n\tos.system('python3 map_conus_1_to_2.py ../CONUS1_inputs/'+grid_3d_file)\n\tos.chdir('..')\n\n###required PME file\npme_tif = 'CONUS1_inputs/PME.tif'\n\nif not os.path.isfile(pme_tif):\n\tprint(pme_tif+' does not exits...download and process from avra')\n\tpme_file = 'PME.txt'\n\tavra_path_pme = '/iplant/home/shared/avra/CONUS_1.0/SteadyState_Final/Input_Development/PME/'+\\\n\t\tpme_file\n\tos.system('iget -K '+avra_path_pme+' CONUS1_inputs/')\n\tos.chdir('utils')\n\tos.system('python3 map_conus_1_to_2.py ../CONUS1_inputs/'+pme_file)\n\tos.chdir('..')\n\n#parsing arguments\nargs = parser.parse_args()\n\n#deal with optional arguments\nif not args.dx:\n\tdx = 1000\nelse:\n\tdx = args.dx\n\nif not args.dz:\n\tdz = 1000\nelse:\n\tdz = args.dz\n\nif not args.printmask:\n\tprintmask = 0\nelse:\n\tprintmask = 1\n\nif not args.out_name:\n\tprint ('need to specified out_name')\n\tsys.exit()\nelse:\n\tout_name = args.out_name\n\nlist_conus_inputs = [subsurface_tif,pme_tif,slopex_tif,slopey_tif]\n\nif args.type == 'shapefile':\n\tbasin_id = args.id\n\tregion_shp = args.shp_file\n\t#create domain\n\tos.chdir('Create_Subdomain')\n\tcreate_sub = subprocess.run(['python3', 'subset_domain.py',\n\t\t\t\t\t\t\t'shapefile','-shp_file',region_shp,\n\t\t\t\t\t\t\t'-id',str(basin_id),\n\t\t\t\t\t\t\t'-out_name',out_name,\n\t\t\t\t\t\t\t'-printmask',str(printmask)], stdout=subprocess.PIPE)\n\ttemp_list = create_sub.stdout.decode('utf-8').split('\\n')\n\tbatches = ''\n\tfor line in temp_list:\n\t\tif 'Number of triangles in patch' in line:\n\t\t\tline = line.strip()\n\t\t\tbatches += line.split()[-3]+' '\n\t#os.system('python3 subset_domain.py shapefile -shp_file '+region_shp+\\\n\t#\t\t\t\t' -id '+str(basin_id)+' -out_name '+out_name+' -printmask '+str(printmask))\n\tos.chdir('..')\n\t#subset input\n\tos.chdir('Clip_Inputs')\n\tfor input in list_conus_inputs:\n\t\tos.system('python3 clip_inputs.py -i ../'+\\\n\t\t\t\t\tinput+' shapefile -shp_file '+region_shp+\\\n\t\t\t\t\t' -id '+str(basin_id)+' -out_name '+out_name+'_'+\\\n\t\t\t\t\tos.path.basename(input)+' -printmask '+str(printmask))\n\tos.chdir('..')\n\nelif args.type == 'mask':\n\tmask_file = args.mask_file\n\tif not os.path.isfile(mask_file):\n\t\tprint (mask_file+' does not exits...please create one')\n\t\tsys.exit()\n\t#create domain\n\tos.chdir('Create_Subdomain')\n\tcreate_sub = subprocess.run(['python3', 'subset_domain.py',\n\t\t\t\t\t\t\t'mask','-mask_file',mask_file,\n\t\t\t\t\t\t\t'-out_name',out_name,\n\t\t\t\t\t\t\t'-printmask',str(printmask)], stdout=subprocess.PIPE)\n\ttemp_list = create_sub.stdout.decode('utf-8').split('\\n')\n\tbatches = ''\n\tfor line in temp_list:\n\t\tif 'Number of triangles in patch' in line:\n\t\t\tline = line.strip()\n\t\t\tbatches += line.split()[-3]+' '\n\t#os.system('python3 subset_domain.py mask -mask_file '+mask_file+\\\n\t#\t\t\t\t' -out_name '+out_name+' -printmask '+str(printmask))\n\tos.chdir('..')\n\t#subset input\n\tos.chdir('Clip_Inputs')\n\tfor input in list_conus_inputs:\n\t\tos.system('python3 clip_inputs.py -i ../'+\\\n\t\t\t\t\tinput+' mask -mask_file '+mask_file+\\\n\t\t\t\t\t' -out_name '+out_name+'_'+\\\n\t\t\t\t\tos.path.basename(input)+' -printmask '+str(printmask))\n\tos.chdir('..')\n\nelif args.type == 'define_watershed':\n\tdir_file = args.dir_file\n\toutlet_file = args.outlet_file\n\tif not os.path.isfile(outlet_file):\n\t\tprint (outlet_file+' does not exits...please create one')\n\t\tsys.exit()\n\t\n\t#create domain\n\tos.chdir('Create_Subdomain')\n\tcreate_sub = subprocess.run(['python3', 'subset_domain.py',\n\t\t\t\t\t\t\t'define_watershed','-dir_file',dir_file,\n\t\t\t\t\t\t\t'-outlet_file',outlet_file,\n\t\t\t\t\t\t\t'-out_name',out_name,\n\t\t\t\t\t\t\t'-printmask',str(printmask)], stdout=subprocess.PIPE)\n\ttemp_list = create_sub.stdout.decode('utf-8').split('\\n')\n\tbatches = ''\n\tfor line in temp_list:\n\t\tif 'Number of triangles in patch' in line:\n\t\t\tline = line.strip()\n\t\t\tbatches += line.split()[-3]+' '\n\t#os.system('python3 subset_domain.py define_watershed -dir_file '+dir_file+\\\n\t#\t\t\t\t' -outlet_file '+outlet_file+\\\n\t#\t\t\t\t' -out_name '+out_name+' -printmask '+str(printmask))\n\tos.chdir('..')\n\t#subset input\n\tos.chdir('Clip_Inputs')\n\tfor input in list_conus_inputs:\n\t\tos.system('python3 clip_inputs.py -i ../'+\\\n\t\t\t\t\tinput+' define_watershed -dir_file '+dir_file+\\\n\t\t\t\t\t' -outlet_file '+outlet_file+\\\n\t\t\t\t\t' -out_name '+out_name+'_'+\\\n\t\t\t\t\tos.path.basename(input)+' -printmask '+str(printmask))\n\tos.chdir('..')\n#move newly created files to input_files folder\nif os.path.isdir('input_files/'):\n\tshutil.rmtree('input_files/')\n\nos.mkdir('input_files/')\nos.system('cp Create_Subdomain/'+out_name+'.pfsol input_files/')\nos.system('cp Clip_Inputs/'+out_name+'_*.pfb input_files/')\n\n#generate tcl script and run\ninput_files = sorted(glob('input_files/*'))\nos.chdir('Make_Tcl')\n\nos.system('python3 generate_tcl.py -o '+out_name+'.tcl '+\\\n\t\t\t'-i parking_lot_template.tcl --runname '+out_name+\\\n\t\t\t' -sl ../'+input_files[-1]+\\\n\t\t\t' -so ../'+input_files[0]+' -evap 1 '+\n\t\t\t'--evap_file ../'+input_files[2]+' -e 10 --batches '+batches)\n\nos.chdir('..')\n\nif os.path.isdir('run_output/'):\n\tshutil.rmtree('run_output/')\n\nos.mkdir('run_output')\nos.system('cp Make_Tcl/'+out_name+'.tcl run_output/')\nos.chdir('run_output')\nos.system('tclsh '+out_name+'.tcl')\n\n\n","sub_path":"general_subset.py","file_name":"general_subset.py","file_ext":"py","file_size_in_byte":10370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"306811846","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass Point(object):\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def get_x(self):\n return self.x\n\n def get_y(self):\n return self.y\n\n\nclass RegionGrow(object):\n def __init__(self, img, seeds, thresh=5, points_num=8):\n self.img = img\n self.seeds = seeds\n self.thresh = thresh\n self.points_num = points_num\n\n def gray_diff(self, cur_point, tmp_point):\n return abs(int(self.img[cur_point.x, cur_point.y]) - int(self.img[tmp_point.x, tmp_point.y]))\n\n def select_connects(self):\n connects = []\n if self.points_num == 8:\n connects = [Point(-1, -1), Point(0, -1), Point(1, -1), Point(1, 0), Point(1, 1),\n Point(0, 1), Point(-1, 1), Point(-1, 0)]\n elif self.points_num == 4:\n connects = [Point(0, -1), Point(1, 0), Point(0, 1), Point(-1, 0)]\n return connects\n\n def region_grow(self):\n img = self.img\n height, weight = img.shape[:2]\n seed_mark = np.zeros(img.shape)\n seed_list = self.seeds\n\n label = 1\n connects = self.select_connects()\n while len(seed_list) > 0:\n cur_point = seed_list.pop(0)\n seed_mark[cur_point.x, cur_point.y] = label\n for i in range(self.points_num):\n tmp_x = cur_point.x + connects[i].x\n tmp_y = cur_point.y + connects[i].y\n if tmp_x < 0 or tmp_y < 0 or tmp_x >= height or tmp_y >= weight:\n continue\n diff = self.gray_diff(cur_point, Point(tmp_x, tmp_y))\n if diff < self.thresh and seed_mark[tmp_x, tmp_y] == 0:\n seed_mark[tmp_x, tmp_y] = label\n seed_list.append(Point(tmp_x, tmp_y))\n return seed_mark\n\n\nif __name__ == '__main__':\n\n fig, ax = plt.subplots(1, figsize=(12, 12))\n im = plt.imread('./output/overlap.jpg')\n\n plt.imshow(im)\n pos = plt.ginput(-1)\n plt.show()\n\n seed_point = []\n seed_point_copy = []\n for seed in pos:\n seed_point.append(Point(int(seed[1]), int(seed[0])))\n seed_point_copy.append(Point(int(seed[1]), int(seed[0])))\n\n image1 = cv2.imread('./input/sample1.tif')\n image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2LAB)\n L1, A1, B1 = cv2.split(image1)\n src1 = RegionGrow(L1, seed_point, thresh=13, points_num=8)\n result1 = src1.region_grow()\n\n image2 = cv2.imread('./input/sample2.tif')\n image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2LAB)\n L2, A2, B2 = cv2.split(image2)\n src2 = RegionGrow(L2, seed_point_copy, thresh=5, points_num=8)\n result2 = src2.region_grow()\n\n cv2.imwrite('./output/buildings1.jpg', result1 * 255)\n cv2.imwrite('./output/buildings2.jpg', result2 * 255)\n","sub_path":"overlap/overlap.py","file_name":"overlap.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"488824907","text":"import os \r\nimport sys\r\nimport time\r\n\r\ninput_list = open(sys.argv[1], 'r')\r\nprey_input = open(sys.argv[2], 'r')\r\nstamped_app = r\"shiny_bubble\" + str(time.strftime('_%d_%m_%Y_%H_%M')) \r\ncmd = r\"cp -r /srv/shiny-server/shiny_bubble /srv/shiny-server/\" + str(stamped_app) \r\nos.system(cmd)\r\n\r\nif sys.argv[3] != 'None':\r\n\tcrapome = open(sys.argv[3], 'r')\r\n\tcrap_file = open('/srv/shiny-server/'+ str(stamped_app) + '/craptest.txt', 'w')\r\n\tglob_manip = open('/srv/shiny-server/shiny_bubble/global.R', 'r')\r\n\tglob_write = open('/srv/shiny-server/'+ str(stamped_app) + '/global.R', 'w')\r\n\tfor code_line in glob_manip:\r\n\t\tif r\"main.data <- as.data.frame\\(merge_files\" in code_line:\r\n\t\t\tglob_write.write(r\"main.data <- as.data.frame(merge_files(\\\"test_list.txt\\\", \\\"preytest.txt\\\", \\\"craptest.txt\\\"))\")\r\n\t\telse:\r\n\t\t\tglob_write.write(code_line)\r\n\tfor line in crapome:\r\n\t\tcrap_file.write(line)\r\n\r\ninput_file = open('/srv/shiny-server/'+ str(stamped_app) + '/test_list.txt', 'w')\r\nfor line in input_list:\r\n\tinput_file.write(line)\r\nprey_file = open('/srv/shiny-server/'+ str(stamped_app) + '/preytest.txt', 'w')\r\nfor line in prey_input:\r\n\tprey_file.write(line)\r\n\r\n\r\n\r\n\r\n#cmd1 = r\"touch '/srv/shiny-server/\" + str(stamped_app) + r\"/restart.txt\"\r\n#os.system(cmd1)\r\n\r\nwith open(\"shiny.txt\", \"wt\") as x:\r\n\tx.write(\" open
    Shiny Bubblebeam in your browser to view shiny app. If there are issues with the sizing within galaxy you can right click and open in a new tab or window.\")\r\n\r\nos.rename('shiny.txt', str(sys.argv[4]))\r\n","sub_path":"tools/Moffitt_Tools/shiny_wrapper.py","file_name":"shiny_wrapper.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"104336785","text":"\"\"\"\nRevisiting Deep Learning Models for Tabular Data\nhttps://arxiv.org/abs/2106.11959\n\"\"\"\n\nimport logging\nfrom typing import Optional\n\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom deep_table.nn.encoders.backbone.base import BaseBackbone\nfrom deep_table.nn.layers.transformer import TransformerEncoderLayer\n\nlogger = logging.getLogger(__name__)\n\n\nclass FTTransformerBackbone(BaseBackbone):\n def __init__(\n self,\n num_features: int,\n dim_embed: int,\n use_cls: bool = True,\n n_blocks: int = 3,\n n_heads: int = 4,\n dim_head: Optional[int] = None,\n dim_feedforward: int = 256,\n dropout: float = 0.1,\n activation: str = \"relu\",\n ) -> None:\n \"\"\"\n Args:\n num_features (int)\n dim_embed (int)\n use_cls (bool): Defaults to True.\n n_blocks (int): Defaults to 3.\n n_heads (int): Defaults to 4.\n dim_head (int, optional)\n dim_feedforward (int): Defaults to 256.\n dropout (float): Defaults to 0.1.\n activation (str): {\"relu\", \"gelu\"}. Defaults to \"relu\".\n \"\"\"\n super().__init__()\n self.dim_embed = dim_embed\n self.num_features = num_features\n self.use_cls = use_cls\n self.transformer = nn.ModuleList(\n [\n TransformerEncoderLayer(\n d_model=dim_embed,\n n_heads=n_heads,\n dim_head=dim_head,\n dim_feedforward=dim_feedforward,\n dropout=dropout,\n activation=activation,\n )\n for _ in range(n_blocks)\n ]\n )\n\n def dim_out(self, is_pretrain: bool = False) -> int:\n if not is_pretrain and self.use_cls:\n return self.dim_embed\n else:\n return self.num_features * self.dim_embed\n\n def forward(self, x: Tensor) -> Tensor:\n for transformer in self.transformer:\n x = transformer(x)\n return x\n","sub_path":"deep_table/nn/encoders/backbone/ft_transformer.py","file_name":"ft_transformer.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"576720758","text":"default_char = '@'\n\n# ====== IMPORTS ======\n# For copying the cached variable\nimport copy\n\n# For printing undefined chars.\nfrom util.output import try_print\n\n# Reddit API\nimport praw\n\nclass RedditInstance:\n # ====== STORAGE FOR POSTS ======\n # This will contain a dictionary, one entry per subreddit.\n # Each entry will be a list of posts. Posts are dictionaries.\n # post = {'title':'test', 'score':'41232', 'link':'www.example.com'}\n # posts = [post1, post2, post3, post4]\n # formatted_complete = {'subreddit_name': posts}\n _cached_posts_dict = {}\n\n # Constructor\n def __init__(self, id, secret, user_agent):\n self._reddit_instance = praw.Reddit(client_id=id,\n client_secret=secret,\n user_agent=user_agent)\n\n # Default search variable values.\n # This dictuionary contains data in the form 'subreddit : number of posts to print'\n self._subreddit_dict = {'all': 5, 'news': 5, 'worldnews': 10}\n self._timeframe = 'day'\n\n self.update()\n\n # Mutators\n def add_subreddit(self, subreddit, num_posts):\n try:\n self._subreddit_dict[str(subreddit)] = int(num_posts)\n except Exception as e:\n print(\"Invalid input to add_subreddit.\\n\" + str(e))\n\n def remove_subreddit(self, subreddit):\n try:\n return self._subreddit_dict.pop(subreddit)\n except Exception:\n return None\n\n def update(self):\n unformatted = self._get_all_posts()\n\n # Create the empty dictionary\n formatted_complete = {}\n\n # For each subreddit\n for subreddit in unformatted.keys():\n\n # Create a new empty list of posts.\n formatted_post_list = []\n\n for post in unformatted[subreddit]:\n # Create a new empty dictionary for each post.\n formatted_post = {'title': post.title, 'score': post.score, 'link': post.url}\n\n formatted_post_list.append(formatted_post.copy())\n\n formatted_complete[subreddit] = formatted_post_list.copy()\n\n self._cached_posts_dict = formatted_complete\n\n return copy.deepcopy(self._cached_posts_dict)\n\n # Accessors\n def get_cached_posts_dict(self):\n return copy.deepcopy(self._cached_posts_dict)\n\n def print(self):\n print(\"\\n============ Top posts of Leddit ============\")\n\n for subreddit in self._cached_posts_dict.keys():\n print(\"Subreddit: \" + subreddit)\n\n # Sort the posts by score for printing.\n sorted_list = self._cached_posts_dict[subreddit].copy()\n sorted_list.sort(key=lambda post: post['score'], reverse=True)\n\n # Index is the printing index that shows on screen.\n index = 1\n for post in sorted_list:\n # @Cleanup: make the spacing for the printouts nicer\n print(\"\\t\" + str(index), end=') ')\n print(\"[\" + str(post['score']) + \"]\", end='')\n print(\"[\" + \"LINK\" + \"] \", end='')\n\n try_print(post['title'], '')\n print()\n index += 1\n\n # Private internal functions\n def _get_all_posts(self):\n \"\"\"\n :return: Returns a dict mapping Subreddits to lists of Submissions. Still needs to be decoded.\n \"\"\"\n\n complete_subreddit_dict = {}\n master_titles_list = []\n\n for subreddit in self._subreddit_dict.keys():\n posts_for_current_subreddit = []\n for post in self._get_posts_for_subreddit(subreddit):\n post_title = post.title\n if post_title not in master_titles_list:\n posts_for_current_subreddit.append(post)\n\n # Sort the posts for the current subreddit by score.\n posts_for_current_subreddit.sort(key=lambda post: post.score, reverse=True)\n\n # Append the list of posts for this subreddit to the master dict.\n complete_subreddit_dict[subreddit] = posts_for_current_subreddit.copy()\n\n return complete_subreddit_dict\n\n def _get_posts_for_subreddit(self, sub):\n return self._reddit_instance.subreddit(sub).top(time_filter=self._timeframe,\n limit=int(self._subreddit_dict[sub]))\n","sub_path":"src/reddit_api/reddit.py","file_name":"reddit.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"334124966","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\nFile: bot.py\nAuthor: Julius Tens\nE-Mail: mail@julius-tens.de\nWeb: https://github.com/juliuste\nDate: 10.05.2016\n\nWortwürfelBot, requests the (german) Wiktionary API for a random entry. \n\"\"\"\n\nimport telebot, request\n\nbotkey = 'INSERT YOUR BOT KEY HERE'\n\nbot = telebot.TeleBot(botkey)\n\nmarkup = telebot.types.ReplyKeyboardMarkup()\nbutton1 = telebot.types.KeyboardButton('RAUSHAUEN')\nbutton2 = telebot.types.KeyboardButton('langes Wort')\nmarkup.add(button1, button2)\n\n@bot.message_handler(func=lambda message: message.text == 'langes Wort')\ndef send_longWord(message):\n\tbot.send_message(message.chat.id, request.randomLongWord(), reply_markup=markup)\n\n@bot.message_handler(func=lambda message: message.text != 'langes Wort')\ndef send_longWord(message):\n\tbot.send_message(message.chat.id, request.randomWord(), reply_markup=markup)\n\nprint('WortwürfelBot is running...')\n\nbot.polling()","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"115652124","text":"\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom convex_adversarial import robust_loss, robust_loss_parallel\nimport torch.optim as optim\n\nimport numpy as np\nimport time\nimport gc\n\nfrom trainer import *\nimport cv2 as cv\nimport random\n\ndef train_ibp(loader, model, opt, epsilon, epoch, log, verbose):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n errors = AverageMeter()\n\n model.train()\n\n end = time.time()\n for i, (X,y) in enumerate(loader):\n X,y = X.cuda(), y.cuda()\n data_time.update(time.time() - end)\n\n\n alpha = 0.5\n out = model(Variable(X))\n out_l, out_h = model.forward2(Variable(X - epsilon), Variable(X + epsilon))\n out_hat = out_h\n for i in range(out_l.shape[0]):\n out_hat[i][y[i]] = out_l[i][y[i]]\n ce = alpha * nn.CrossEntropyLoss()(out, Variable(y)) + (1.0-alpha) * nn.CrossEntropyLoss()(out_hat, Variable(y))\n err = (out.data.max(1)[1] != y).float().sum() / X.size(0)\n\n opt.zero_grad()\n ce.backward()\n opt.step()\n\n batch_time.update(time.time()-end)\n end = time.time()\n losses.update(ce.data.item(), X.size(0))\n errors.update(err, X.size(0))\n\n print(epoch, i, ce.data.item(), err, file=log)\n if verbose and i % verbose == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Error {errors.val:.3f} ({errors.avg:.3f})'.format(\n epoch, i, len(loader), batch_time=batch_time,\n data_time=data_time, loss=losses, errors=errors))\n log.flush()\n\n\ndef evaluate_rotations(loader, model, epsilon, epoch, log, verbose):\n batch_time = AverageMeter()\n losses = AverageMeter()\n errors = AverageMeter()\n\n model.eval()\n\n end = time.time()\n for i, (X,y) in enumerate(loader):\n # print(\"Got value of X\")\n # print(X.numpy()[0][0])\n\n npX = np.array(X)\n cols = 28\n rows = 28\n for i in range(npX.shape[0]):\n rotation_degree = random.randint(-15, 15)\n rotation_degree = rotation_degree + 15 * np.sign(rotation_degree)\n # print(\"Random\", rotation_degree)\n M = cv.getRotationMatrix2D((cols / 2, rows / 2), rotation_degree, 1)\n npX[i][0] = cv.warpAffine(npX[i][0], M, (cols, rows))\n # cv.namedWindow('image', cv.WINDOW_NORMAL)\n # cv.imshow('image', npX[i][0])\n # cv.resizeWindow('image', 600, 600)\n # cv.waitKey(0)\n # cv.destroyAllWindows()\n X = torch.from_numpy(npX)\n X,y = X.cuda(), y.cuda()\n out = model(Variable(X))\n ce = nn.CrossEntropyLoss()(out, Variable(y))\n err = (out.data.max(1)[1] != y).float().sum() / X.size(0)\n\n # print to logfile\n print(epoch, i, ce.data.item(), err, file=log)\n\n # measure accuracy and record loss\n losses.update(ce.data.item(), X.size(0))\n errors.update(err, X.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if verbose and i % verbose == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Error {error.val:.3f} ({error.avg:.3f})'.format(\n i, len(loader), batch_time=batch_time, loss=losses,\n error=errors))\n log.flush()\n\n print(' * Error {error.avg:.3f}'\n .format(error=errors))\n return errors.avg\n","sub_path":"examples/trainer2.py","file_name":"trainer2.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"4567100","text":"import numpy as np\nimport os\nimport pandas as pd\nimport cv2\nfrom scipy import ndimage\nimport skimage\nfrom sklearn.utils import shuffle\n\nSIMULATOR_HOME = \"../data/\"\nDRIVING_LOG_FILE = \"driving_log.csv\"\nDRIVING_LOG_FILE_PATH = os.path.join(SIMULATOR_HOME, DRIVING_LOG_FILE)\nIMAGE_PATH = os.path.join(SIMULATOR_HOME, \"IMG\")\ndriving_log = pd.read_csv(DRIVING_LOG_FILE_PATH)\ndriving_log.columns = [\"center\", \"left\", \"right\", \"steering\", \"throttle\", \"brake\", \"speed\"]\ndriving_log[\"new\"] = 0\n\nMY_DATA_HOME = \"../mydata/\"\nMY_LOG_FILE_PATH = os.path.join(MY_DATA_HOME, DRIVING_LOG_FILE)\nMY_IMAGE_PATH = os.path.join(MY_DATA_HOME, \"IMG\")\nmy_driving_log = pd.read_csv(MY_LOG_FILE_PATH)\nmy_driving_log.columns = [\"center\", \"left\", \"right\", \"steering\", \"throttle\", \"brake\", \"speed\"]\nmy_driving_log[\"new\"] = 1\n\nall_driving = pd.concat([driving_log,my_driving_log]).reset_index(drop=True)\n\n############################\n# Functions for Loading data\n############################\n\ndef load_data_from_frames():\n offset = 1.2\n dist = 100.0\n\n df_cn = all_driving.copy()[[\"center\", \"steering\", \"new\"]]\n df_cn.columns = [\"image_path\", \"angle\", \"new\"]\n\n df_lf = all_driving.copy()[[\"left\", \"steering\", \"new\"]]\n df_lf.columns = [\"image_path\", \"angle\", \"new\"]\n dsteering = -offset / dist * 360 / (2 * np.pi) / 25.0\n df_lf.angle += dsteering\n\n df_rh = all_driving.copy()[[\"right\", \"steering\", \"new\"]]\n df_rh.columns = [\"image_path\", \"angle\", \"new\"]\n dsteering = offset / dist * 360 / (2 * np.pi) / 25.0\n df_rh.angle -= dsteering\n\n df_all = pd.concat([df_cn, df_lf, df_rh]).reset_index(drop=True)\n return df_all\n\ndef load_training_validation_df(all_data):\n train_data = all_data.sample(frac=0.8, random_state=200123)\n validation_data = all_data.drop(train_data.index)\n return train_data, validation_data\n\n\ndef data_generator_for_vis(df, index=0, batch_size=1):\n m = np.random.randint(0, len(df.index))\n df_batch = df[m: m + batch_size]\n\n # Ignoring the last batch which is smaller than the requested batch size\n #if (df_batch.shape[0] == batch_size):\n X_batch = []\n y_batch = []\n for i , row in df_batch.iterrows():\n img = get_image(row) #row[\"image_path\"].strip()\n angle = row[\"angle\"]\n # Normal image\n X_batch.append(img)\n y_batch.append(angle)\n # Random brightness\n b_img = random_brightness(img)\n # Random Shadow\n sh_img = add_random_shadow(b_img)\n # Random Sheer\n s_img, s_angle = random_shear(sh_img, angle, shear_range=20)\n # Normal with random Translate\n t_img, t_angle = trans_image(s_img, s_angle)\n X_batch.append(t_img)\n y_batch.append(t_angle)\n # Flipped image\n f_img = get_flipped_image(img)\n # Flipped Random brightness\n fb_img = random_brightness(f_img)\n # Flipped Random Shadow\n fsh_img = add_random_shadow(fb_img)\n # Flipped Random Sheer\n fs_img, fs_angle = random_shear(fsh_img, -angle, shear_range=40)\n # Flipped Normal with random Translate\n ft_img, ft_angle = trans_image(fs_img, fs_angle)\n X_batch.append(ft_img)\n y_batch.append(ft_angle)\n\n #X_batch, batch_y = shuffle(X_batch, y_batch)\n\n #X_batch = np.array([get_image(row) for i, row in df_batch.iterrows()])\n #y_batch = np.array([row['angle'] for i, row in df_batch.iterrows()])\n return (np.array(X_batch), np.array(y_batch))\n\ndef data_generator(df, batch_size=128, is_training=1):\n n_rows = df.shape[0]\n while True:\n # Shuffle the data frame rows after every complete cycle through the data\n #df = df.sample(frac=1).reset_index(drop=True)\n\n for index in range(0, n_rows, batch_size):\n df_batch = df[index: index + batch_size]\n\n # Ignoring the last batch which is smaller than the requested batch size\n #if (df_batch.shape[0] == batch_size):\n X_batch = []\n y_batch = []\n for i , row in df_batch.iterrows():\n img = get_image(row) #row[\"image_path\"].strip()\n angle = row[\"angle\"]\n # Normal image\n X_batch.append(img)\n y_batch.append(angle)\n if is_training == 1:\n # Random brightness\n b_img = random_brightness(img)\n # Random Shadow\n sh_img = add_random_shadow(b_img)\n # Random Sheer\n s_img, s_angle = random_shear(sh_img, angle, shear_range=40)\n # Normal with random Translate\n t_img, t_angle = trans_image(s_img, s_angle)\n X_batch.append(t_img)\n y_batch.append(t_angle)\n # Flipped image\n f_img = get_flipped_image(img)\n # Flipped Random brightness\n fb_img = random_brightness(f_img)\n # Flipped Random Shadow\n fsh_img = add_random_shadow(fb_img)\n # Flipped Random Sheer\n fs_img, fs_angle = random_shear(fsh_img, -angle, shear_range=40)\n # Flipped Normal with random Translate\n ft_img, ft_angle = trans_image(fs_img, fs_angle)\n X_batch.append(ft_img)\n y_batch.append(ft_angle)\n\n X_batch, batch_y = shuffle(X_batch, y_batch)\n\n #X_batch = np.array([get_image(row) for i, row in df_batch.iterrows()])\n #y_batch = np.array([row['angle'] for i, row in df_batch.iterrows()])\n yield (np.array(X_batch), np.array(y_batch))\n\ndef old(f_img,img,angle,X_batch,y_batch):\n # Flipped with random Translate and Rotate\n X_batch.append(translateImage(rotateImage(f_img)))\n y_batch.append(-angle)\n # blurred image\n b_img = get_blurred_image(img)\n X_batch.append(b_img)\n y_batch.append(angle)\n # blurred with random Translate and Rotate\n X_batch.append(translateImage(rotateImage(b_img)))\n y_batch.append(angle)\n # Flipped & Blurred image\n f_b_img = get_blurred_image(get_flipped_image(img))\n X_batch.append(f_b_img)\n y_batch.append(-angle)\n # Flipped & Blurred with random Translate and Rotate\n X_batch.append(translateImage(rotateImage(f_b_img)))\n y_batch.append(-angle)\n # Speckled image\n s_img = get_speckled_image(img)\n X_batch.append(s_img)\n y_batch.append(angle)\n # Speckled with random Translate and Rotate\n X_batch.append(translateImage(rotateImage(s_img)))\n y_batch.append(angle)\n # Flipped & Speckled image\n f_s_img = get_speckled_image(get_flipped_image(img))\n X_batch.append(f_s_img)\n y_batch.append(-angle)\n # Flipped & Speckled with random Translate and Rotate\n X_batch.append(translateImage(rotateImage(f_s_img)))\n y_batch.append(-angle)\n\n############################\n# Functions for Loading Images\n############################\n\ndef get_image(row):\n \"\"\"\n For a given row of the df,\n get the Augmented image based on the operations specified\n in it's name\n \"\"\"\n image_name = row[\"image_path\"].strip()\n\n #ops = get_ops(image_name)\n\n #image_name = ops[0]\n\n #ops = ops[1:]\n if row[\"new\"] == 0:\n #print(os.path.join(SIMULATOR_HOME, image_name))\n image = cv2.imread(os.path.join(SIMULATOR_HOME, image_name))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image\n else:\n #print(os.path.join(MY_DATA_HOME, image_name))\n image = cv2.imread(os.path.join(MY_DATA_HOME, image_name))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n return image\n\n #for op in ops:\n #if op == \"INV\":\n #image = get_flipped_image(image)\n\n #elif op == \"BLUR\":\n #image = get_blurred_image(image)\n\n #elif op == \"NOISE\":\n #image = get_speckled_image(image)\n\n #return image\n\ndef pre_process(image, top_prop=0.35, bottom_prop=0.1):\n \"\"\"\n - Crop the top `top_prop` and the bottom `bottom_prop` of the image\n - Resize the image to half of it's original size\n \"\"\"\n rows_to_crop_top = int(image.shape[0] * 0.4)\n rows_to_crop_bottom = int(image.shape[0] * 0.1)\n image = image[rows_to_crop_top:image.shape[0] - rows_to_crop_bottom, :]\n\n return cv2.resize(image, (0,0), fx=0.5, fy=0.5)\n\n#############################\n# Functions for Sampling Data\n#############################\n\ndef sampling_data(df,num_bins = 23):\n angles = df[\"angle\"]\n df_length = len(df.index)\n avg_samples_per_bin = df_length / num_bins\n hist, bins = np.histogram(angles, num_bins)\n keep_probs = []\n target = avg_samples_per_bin * .5\n for i in range(num_bins):\n if hist[i] < target:\n keep_probs.append(1.)\n else:\n keep_probs.append(1. / (hist[i] / target))\n remove_list = []\n for i in range(df_length):\n for j in range(num_bins):\n if angles[i] > bins[j] and angles[i] <= bins[j + 1]:\n # delete from X and y with probability 1 - keep_probs[j]\n if np.random.rand() > keep_probs[j]:\n #df.drop(df.index[i], inplace=True)\n remove_list.append(i)\n df.drop(df.index[[idx for idx in remove_list]], inplace=True)\n df.reset_index(drop=True, inplace=True)\n return df\n #image_paths = np.delete(image_paths, remove_list, axis=0)\n #angles = np.delete(angles, remove_list)\n\n\n############################\n# Functions for Augmentation\n############################\n\ndef get_flipped_image(image):\n \"\"\"\n returns image which is flipped about the vertical axis\n \"\"\"\n return cv2.flip(image, 1)\n\n\ndef get_blurred_image(image):\n \"\"\"\n Performs a gaussian blur on the image and returns it\n \"\"\"\n return ndimage.gaussian_filter(image, sigma=1)\n\n\ndef get_speckled_image(image):\n \"\"\"\n Adds random noise to an image\n \"\"\"\n return skimage.img_as_ubyte(skimage.util.random_noise(image.astype(np.uint8), mode='gaussian'))\n\n\ndef translateImage(image):\n t_x = (np.random.randn(1)*.5)[0]\n t_y = (np.random.randn(1)*.5)[0]\n #print(t_x,t_y)\n rows,cols,_ = image.shape\n M = np.float32([[1,0,t_x],[0,1,t_y]])\n dst = cv2.warpAffine(image,M,(cols,rows))\n return dst\n\ndef rotateImage(image):\n theta = (np.random.randn(1)*5)[0]\n #print(theta)\n rows,cols,_ = image.shape\n M = cv2.getRotationMatrix2D((cols/2,rows/2),theta,1)\n dst = cv2.warpAffine(image,M,(cols,rows))\n return dst\n\n\ndef trans_image(image, steer, tx_range=32,ty_range=32):\n # Translation\n rows,cols,_ = image.shape\n tr_x = tx_range * np.random.uniform() - tx_range / 2\n steer_ang = steer + tr_x / tx_range * 2 * .2\n tr_y = ty_range * np.random.uniform() - ty_range / 2\n # tr_y = 0\n Trans_M = np.float32([[1, 0, tr_x], [0, 1, tr_y]])\n image_tr = cv2.warpAffine(image, Trans_M, (cols, rows))\n\n return image_tr, steer_ang\n\ndef add_random_shadow(image):\n top_y = 320*np.random.uniform()\n top_x = 0\n bot_x = 160\n bot_y = 320*np.random.uniform()\n image_hls = cv2.cvtColor(image,cv2.COLOR_RGB2HLS)\n shadow_mask = 0*image_hls[:,:,1]\n X_m = np.mgrid[0:image.shape[0],0:image.shape[1]][0]\n Y_m = np.mgrid[0:image.shape[0],0:image.shape[1]][1]\n shadow_mask[((X_m-top_x)*(bot_y-top_y) -(bot_x - top_x)*(Y_m-top_y) >=0)]=1\n #random_bright = .25+.7*np.random.uniform()\n if np.random.randint(2)==1:\n random_bright = .5\n cond1 = shadow_mask==1\n cond0 = shadow_mask==0\n if np.random.randint(2)==1:\n image_hls[:,:,1][cond1] = image_hls[:,:,1][cond1]*random_bright\n else:\n image_hls[:,:,1][cond0] = image_hls[:,:,1][cond0]*random_bright\n image = cv2.cvtColor(image_hls,cv2.COLOR_HLS2RGB)\n return image\n\n\ndef random_shear(image, steering, shear_range):\n rows, cols, ch = image.shape\n dx = np.random.randint(-shear_range, shear_range + 1)\n # print('dx',dx)\n random_point = [cols / 2 + dx, rows / 2]\n pts1 = np.float32([[0, rows], [cols, rows], [cols / 2, rows / 2]])\n pts2 = np.float32([[0, rows], [cols, rows], random_point])\n dsteering = dx / (rows / 2) * 360 / (2 * np.pi * 25.0) / 10.0\n M = cv2.getAffineTransform(pts1, pts2)\n image = cv2.warpAffine(image, M, (cols, rows), borderMode=1)\n steering += dsteering\n\n return image, steering\n\n\ndef random_brightness(image):\n image1 = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n random_bright = 1.0 + 0.1 * (2 * np.random.uniform() - 1.0)\n image1[:, :, 2] = image1[:, :, 2] * random_bright\n image1 = cv2.cvtColor(image1, cv2.COLOR_HSV2RGB)\n return image1","sub_path":"images_generator.py","file_name":"images_generator.py","file_ext":"py","file_size_in_byte":12694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"115734008","text":"# 5) Напишите программу. Есть 2 переменные salary и bonus.\n# Salary - integer, bonus - boolean. Если bonus - true, salary\n# должна быть умножена на 10. Если false - нет\n# 10000, True == '$100000'\n# 25000, True == '$250000'\n# 10000, False == '$10000'\n# 60000, False == '$60000'\nsalary = 5000\nbonus = False\nif bonus == True:\n new_salary = salary * 10\n print(f'{salary}, {bonus} == ${new_salary}')\nelse:\n print(f'{salary}, {bonus} == ${salary}')","sub_path":"courses/lesson3_task5.py","file_name":"lesson3_task5.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"543251629","text":"from .. import yaml\nfrom .parser import subparsers\nfrom .utils import load_and_decrypt_file\n\ndecrypt_file_parser = subparsers.add_parser(\n 'decrypt-file',\n description='Decrypt a Treehugger YAML file in-place.',\n)\ndecrypt_file_parser.add_argument('filename', type=str, help='The path to the file to decrypt')\n\n\ndef decrypt_file(args):\n filename = args.filename\n\n new_data = load_and_decrypt_file(filename)\n yaml.save_file(filename, new_data)\n\n print('Successfully decrypted')\n","sub_path":"treehugger/cli/decrypt_file.py","file_name":"decrypt_file.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"99891251","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport copy\n\nimport astropy.units as u\n\nfrom ._converter import _REGISTRY_FVALIDATORS, _register_validator\n\n__all__ = []\n\n\nclass Parameter:\n r\"\"\"Cosmological parameter (descriptor).\n\n Should only be used with a :class:`~astropy.cosmology.Cosmology` subclass.\n\n Parameters\n ----------\n derived : bool (optional, keyword-only)\n Whether the Parameter is 'derived', default `False`.\n Derived parameters behave similarly to normal parameters, but are not\n sorted by the |Cosmology| signature (probably not there) and are not\n included in all methods. For reference, see ``Ode0`` in\n ``FlatFLRWMixin``, which removes :math:`\\Omega_{de,0}`` as an\n independent parameter (:math:`\\Omega_{de,0} \\equiv 1 - \\Omega_{tot}`).\n unit : unit-like or None (optional, keyword-only)\n The `~astropy.units.Unit` for the Parameter. If None (default) no\n unit as assumed.\n equivalencies : `~astropy.units.Equivalency` or sequence thereof\n Unit equivalencies for this Parameter.\n fvalidate : callable[[object, object, Any], Any] or str (optional, keyword-only)\n Function to validate the Parameter value from instances of the\n cosmology class. If \"default\", uses default validator to assign units\n (with equivalencies), if Parameter has units.\n For other valid string options, see ``Parameter._registry_validators``.\n 'fvalidate' can also be set through a decorator with\n :meth:`~astropy.cosmology.Parameter.validator`.\n doc : str or None (optional, keyword-only)\n Parameter description.\n\n Examples\n --------\n For worked examples see :class:`~astropy.cosmology.FLRW`.\n \"\"\"\n\n def __init__(\n self,\n *,\n derived=False,\n unit=None,\n equivalencies=[],\n fvalidate=\"default\",\n doc=None,\n ):\n # attribute name on container cosmology class.\n # really set in __set_name__, but if Parameter is not init'ed as a\n # descriptor this ensures that the attributes exist.\n self._attr_name = self._attr_name_private = None\n\n self._derived = derived\n self.__doc__ = doc\n\n # units stuff\n self._unit = u.Unit(unit) if unit is not None else None\n self._equivalencies = equivalencies\n\n # Parse registered `fvalidate`\n self._fvalidate_in = fvalidate # Always store input fvalidate.\n if callable(fvalidate):\n pass\n elif fvalidate in _REGISTRY_FVALIDATORS:\n fvalidate = _REGISTRY_FVALIDATORS[fvalidate]\n elif isinstance(fvalidate, str):\n raise ValueError(\n f\"`fvalidate`, if str, must be in {_REGISTRY_FVALIDATORS.keys()}\"\n )\n else:\n raise TypeError(\n f\"`fvalidate` must be a function or {_REGISTRY_FVALIDATORS.keys()}\"\n )\n self._fvalidate = fvalidate\n\n def __set_name__(self, cosmo_cls, name):\n # attribute name on container cosmology class\n self._attr_name = name\n self._attr_name_private = \"_\" + name\n\n @property\n def name(self):\n \"\"\"Parameter name.\"\"\"\n return self._attr_name\n\n @property\n def unit(self):\n \"\"\"Parameter unit.\"\"\"\n return self._unit\n\n @property\n def equivalencies(self):\n \"\"\"Equivalencies used when initializing Parameter.\"\"\"\n return self._equivalencies\n\n @property\n def derived(self):\n \"\"\"Whether the Parameter is derived; true parameters are not.\"\"\"\n return self._derived\n\n # -------------------------------------------\n # descriptor and property-like methods\n\n def __get__(self, cosmology, cosmo_cls=None):\n # Get from class\n if cosmology is None:\n return self\n # Get from instance\n return getattr(cosmology, self._attr_name_private)\n\n def __set__(self, cosmology, value):\n \"\"\"Allows attribute setting once. Raises AttributeError subsequently.\"\"\"\n # Raise error if setting 2nd time.\n if hasattr(cosmology, self._attr_name_private):\n raise AttributeError(f\"can't set attribute {self._attr_name} again\")\n\n # Validate value, generally setting units if present\n value = self.validate(cosmology, copy.deepcopy(value))\n\n # Make the value read-only, if ndarray-like\n if hasattr(value, \"setflags\"):\n value.setflags(write=False)\n\n # Set the value on the cosmology\n setattr(cosmology, self._attr_name_private, value)\n\n # -------------------------------------------\n # validate value\n\n @property\n def fvalidate(self):\n \"\"\"Function to validate a potential value of this Parameter.\"\"\"\n return self._fvalidate\n\n def validator(self, fvalidate):\n \"\"\"Make new Parameter with custom ``fvalidate``.\n\n Note: ``Parameter.fvalidator`` must be the top-most descriptor decorator.\n\n Parameters\n ----------\n fvalidate : callable[[type, type, Any], Any]\n\n Returns\n -------\n `~astropy.cosmology.Parameter`\n Copy of this Parameter but with custom ``fvalidate``.\n \"\"\"\n return self.clone(fvalidate=fvalidate)\n\n def validate(self, cosmology, value):\n \"\"\"Run the validator on this Parameter.\n\n Parameters\n ----------\n cosmology : `~astropy.cosmology.Cosmology` instance\n value : Any\n The object to validate.\n\n Returns\n -------\n Any\n The output of calling ``fvalidate(cosmology, self, value)``\n (yes, that parameter order).\n \"\"\"\n return self.fvalidate(cosmology, self, value)\n\n @staticmethod\n def register_validator(key, fvalidate=None):\n \"\"\"Decorator to register a new kind of validator function.\n\n Parameters\n ----------\n key : str\n fvalidate : callable[[object, object, Any], Any] or None, optional\n Value validation function.\n\n Returns\n -------\n ``validator`` or callable[``validator``]\n if validator is None returns a function that takes and registers a\n validator. This allows ``register_validator`` to be used as a\n decorator.\n \"\"\"\n return _register_validator(key, fvalidate=fvalidate)\n\n # -------------------------------------------\n\n def _get_init_arguments(self, processed=False):\n \"\"\"Initialization arguments.\n\n Parameters\n ----------\n processed : bool\n Whether to more closely reproduce the input arguments (`False`,\n default) or the processed arguments (`True`). The former is better\n for string representations and round-tripping with ``eval(repr())``.\n\n Returns\n -------\n dict[str, Any]\n \"\"\"\n # The keys are added in this order because `repr` prints them in order.\n kw = {\n \"derived\": self.derived,\n \"unit\": self.unit,\n \"equivalencies\": self.equivalencies,\n # Validator is always turned into a function, but for ``repr`` it's nice\n # to know if it was originally a string.\n \"fvalidate\": self.fvalidate if processed else self._fvalidate_in,\n \"doc\": self.__doc__,\n }\n return kw\n\n def clone(self, **kw):\n \"\"\"Clone this `Parameter`, changing any constructor argument.\n\n Parameters\n ----------\n **kw\n Passed to constructor. The current values, eg. ``fvalidate`` are\n used as the default values, so an empty ``**kw`` is an exact copy.\n\n Examples\n --------\n >>> p = Parameter()\n >>> p\n Parameter(derived=False, unit=None, equivalencies=[],\n fvalidate='default', doc=None)\n\n >>> p.clone(unit=\"km\")\n Parameter(derived=False, unit=Unit(\"km\"), equivalencies=[],\n fvalidate='default', doc=None)\n \"\"\"\n # Start with defaults, update from kw.\n kwargs = {**self._get_init_arguments(), **kw}\n # All initialization failures, like incorrect input are handled by init\n cloned = type(self)(**kwargs)\n # Transfer over the __set_name__ stuff. If `clone` is used to make a\n # new descriptor, __set_name__ will be called again, overwriting this.\n cloned._attr_name = self._attr_name\n cloned._attr_name_private = self._attr_name_private\n\n return cloned\n\n def __eq__(self, other):\n \"\"\"Check Parameter equality. Only equal to other Parameter objects.\n\n Returns\n -------\n NotImplemented or True\n `True` if equal, `NotImplemented` otherwise. This allows `other` to\n be check for equality with ``other.__eq__``.\n\n Examples\n --------\n >>> p1, p2 = Parameter(unit=\"km\"), Parameter(unit=\"km\")\n >>> p1 == p2\n True\n\n >>> p3 = Parameter(unit=\"km / s\")\n >>> p3 == p1\n False\n\n >>> p1 != 2\n True\n \"\"\"\n if not isinstance(other, Parameter):\n return NotImplemented\n # Check equality on all `_init_arguments` & `name`.\n # Need to compare the processed arguments because the inputs are many-\n # to-one, e.g. `fvalidate` can be a string or the equivalent function.\n return (self._get_init_arguments(True) == other._get_init_arguments(True)) and (\n self.name == other.name\n )\n\n def __repr__(self):\n \"\"\"String representation.\n\n ``eval(repr())`` should work, depending if contents like ``fvalidate``\n can be similarly round-tripped.\n \"\"\"\n return \"Parameter({})\".format(\n \", \".join(f\"{k}={v!r}\" for k, v in self._get_init_arguments().items())\n )\n","sub_path":"astropy/cosmology/parameter/_core.py","file_name":"_core.py","file_ext":"py","file_size_in_byte":9857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"138785646","text":"import re\nimport tkinter as tk\nfrom tkinter import ttk\n\n#TODO - see if there's a way to remember the previously focused tree item\n#when the tree is rebuilt to avoid reparsing everything on every step\n#idea: remember the focus item's name and line number, in the case of\n#any change see if there's an item with the same name on the same line,\n#or an item on any adjacent lines with nearly the same name (?)\n\n#TODO - see if there's a way to select or somehow mark the line\n#that contains the item that's double-clicked on\n\nclass OutlineView(ttk.Frame):\n def __init__(self, master, workbench):\n ttk.Frame.__init__(self, master)\n self._workbench = workbench\n self._workbench.get_editor_notebook().bind(\"<>\",self._update_frame_contents ,True)\n\n #init and place scrollbar\n self.vert_scrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL)\n self.vert_scrollbar.grid(row=0, column=1, sticky=tk.NSEW)\n\n #init and place tree\n self.tree = ttk.Treeview(self, yscrollcommand=self.vert_scrollbar.set)\n self.tree.grid(row=0, column=0, sticky=tk.NSEW)\n self.vert_scrollbar['command'] = self.tree.yview\n\n #set single-cell frame\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n\n #init tree events\n self.tree.bind(\"\", self.on_double_click, \"+\")\n\n #configure the only tree column\n self.tree.column('#0', anchor=tk.W, stretch=True)\n self.tree.heading('#0', text='Item (type @ line)', anchor=tk.W)\n\n def _update_frame_contents(self, event=None):\n self._clear_tree()\n if self._workbench.get_editor_notebook().get_current_editor():\n self.parse_and_display_module(self._workbench.get_editor_notebook().get_current_editor()._code_view)\n \n module_contents = self.active_codeview.get_content()\n nodes = [] #all nodes in format (parent, node_indent, node_children, name, type, linernumber)\n root_node = (None, 0, []) #name, type and linenumber not needed for root\n nodes.append(root_node)\n active_node = root_node\n\n lineno = 0\n for line in module_contents.split('\\n'):\n lineno += 1\n m = re.match('[ ]*[\\w]{1}', line)\n if m:\n indent = len(m.group(0))\n while indent <= active_node[1]:\n active_node = active_node[0]\n\n t = re.match('[ ]*(?P(def|class){1})[ ]+(?P[\\w]+)', line)\n if t:\n current = (active_node, indent, [], t.group('name'), t.group('type'), lineno)\n active_node[2].append(current)\n active_node = current\n\n self.module_data = nodes\n self._display_content() #and now let's display the data\n\n #displays the parsed content\n def _display_content(self):\n if not self.module_data or self.module_data == None:\n return\n\n #go over each item in the root node, which will recursively do the same for child nodes\n for item in self.module_data[0][2]:\n self._add_item_to_tree('', item)\n\n #adds a single item to the tree, recursively calls itself to add any child nodes\n def _add_item_to_tree(self, parent, item):\n #create the text to be played for this item\n item_text = item[3] + ' (' + item[4] + ' @ ' + str(item[5]) + ')'\n \n #insert the item, set lineno as a 'hidden' value\n current = self.tree.insert(parent, 'end', text=item_text, values = item[5])\n\n for child in item[2]:\n self._add_item_to_tree(current, child)\n \n #clears the tree by deleting all items \n def _clear_tree(self):\n for child_id in self.tree.get_children():\n self.tree.delete(child_id)\n\n #called when a double-click is performed on any items\n def on_double_click(self, event):\n lineno = self.tree.item(self.tree.focus())['values'][0]\n index = self.active_codeview.text.index(str(lineno) + '.0')\n self.active_codeview.text.see(index) #make sure that the double-clicked item is visible\n self._workbench.event_generate(\"OutlineDoubleClick\",\n item_text=self.tree.item(self.tree.focus(), option='text'))\n\n #called by editornotebook publisher to notify of changed tab\n def notify_tab_changed(self):\n if self.active_codeview is not None and self.active_codeview.modify_listeners is not None and self in self.active_codeview.modify_listeners:\n self.active_codeview.modify_listeners.remove(self)\n else: \n self._clear_tree()\n\ndef load_plugin(workbench): \n workbench.add_view(OutlineView, \"Outline\", \"ne\")\n","sub_path":"thonny/plugins/outline.py","file_name":"outline.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"326439197","text":"# coding=utf-8\nimport requests\nimport parsedata\nimport Log\nimport logging\n\n# 广西历史趋势数据 小时力度\ndef main(sample):\n datautil = parsedata.DataUtil()\n url = 'http://sqmweb.itv.cmvideo.cn:18088/evqmaster/networkaction!returnAreaDetailByID.action'\n params = 'paramData={\"id\":23,\"KPIUTCSec\":\"' + datautil.getDate() + '\",\"SampleInterval\":3600,\"type\":\"2\",\"realtime\":\"\"}'\n \n try:\n st = requests.get(url, params).json()\n paramData = st['resultData']\n arealist = eval(paramData)\n # print \"广西共有数据 :\", arealist['topTotal']\n datalist = arealist['arealist']\n # print len(datalist)\n for i in datalist :\n # print i,type(i)\n params = datautil.parseParams(i['id'], sample=sample, SampleInterval=3600)\n desc = i['location'], '---', i['parentid']\n datautil.parseData(params, desc=desc)\n \n except requests.HTTPError as e:\n logging.error(\"HTTPError :\" + str(e.reason))\n# 距离现在条数 \nmain(sample=48*300)\n","sub_path":"guangxi_historicaltrend_hour/crawler2.py","file_name":"crawler2.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"339205510","text":"# **************************************************************************** #\n# #\n# ::: :::::::: #\n# socket_bridge.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: pde-rent +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2018/06/28 22:07:27 by pde-rent #+# #+# #\n# Updated: 2018/07/02 20:47:08 by pde-rent ### ########.fr #\n# #\n# **************************************************************************** #\n\n# !/usr/bin/python3\n\nimport websocket\nimport socket\nimport time\nimport asyncio\nimport sys\nimport os\n\ns_uri = os.getenv(\"WS_HOST\", \"127.0.0.1\")\ns_port = os.getenv(\"WS_PORT\", \"8083\")\n# ws_uri = os.getenv(\"WS_HOST\", \"127.0.0.1\")\nws_port = os.getenv(\"WS_PORT\", \"8082\")\n\naddress = \"ws://\" + s_uri + \":\" + ws_port\n\n# Create a TCP/IP socket\n\n# Bind the socket to the port\nprint(\"Expecting data on %s:%s [TCP]\" % (s_uri, s_port))\nprint(\"Sending data to %s:%s [WebSocket]\" % (s_uri, ws_port))\n# Let's bind\n# sock.bind((s_uri,int(s_port)))\n# Listen for incoming connections\n# sock.listen(1)\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nwsock = websocket.create_connection(address)\nsock.bind((s_uri,int(s_port)))\nsock.listen(1)\nloop = asyncio.get_event_loop()\n\nasync def send_websocket(data):\n\tawait wsock.send(data)\n\nasync def play_corewar():\n\n\t#_dbg = 0\n\tpayload = None\n\t(new_socket, client_address) = sock.accept()\n\twhile 1:\n\t\tbuf = None\n\t\twhile 1:\n\t\t\tpayload = new_socket.recv(1).decode()\n\t\t\t# print(payload)\n\t\t\tif (not payload or (\"\" in payload)): #or len(payload) < 2):\n\t\t\t\tprint(\"End of payload.\\nWaiting for another game to begin!\")\n\t\t\t\tnew_socket.close()\n\t\t\t\tawait play_corewar()\n\t\t\t\treturn\n\t\t\t\t# continue\n\t\t\telif not \"$\" in payload:\n\t\t\t\tif buf:\n\t\t\t\t\tbuf += payload\n\t\t\t\telse:\n\t\t\t\t\tbuf = payload\n\t\t\t# print(\"payload:%s\" % payload)\n\t\t\tif \"$\" in payload:\n\t\t\t\t# print(\"%s\" % buf, end='')\n\t\t\t\t# print(\"Sending #%d\" % _dbg)\n\t\t\t\t#_dbg += 1\n\t\t\t\t# asyncio.ensure_future(send_websocket(buf))\n\t\t\t\t# time.sleep(0.05)\n\t\t\t\twsock.send(buf)\n\t\t\t\tbuf = None\n\t\t\t# if (\"\" in payload): #or len(payload) < 2):\n\t\t\t\t# print(\"End of payload.\\nWaiting for another game to begin!\")\n\t\t\t\t# sock.close()\n\t\t\t\t# wsock.close()\n\t\t\t\t# return\n\t\t\t\t# c = sys.stdin.read(1)\n\t\t\t\t# if (c == 'y' or c == 'Y'):\n\t\t\t\t# \tcontinue\n\t\t\t\t# else:\n\t\t\t\t# \tsock.close()\n\t\t\t\t# \twsock.close()\n\t\t\t\t# \tsys.exit(0)\n\t\t\t\t# \tbreak\n\n# loop.run_until_complete(play_corewar())\nasyncio.ensure_future(play_corewar())\nloop.run_forever()\n# loop.close()\n","sub_path":"vizu/socket_bridge.py","file_name":"socket_bridge.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"381247814","text":"#!/usr/bin/python2\n\"\"\"Switch part of the objects file in working set to (possible) bad ones.\n\nThe \"portion\" is defined by the file (which is passed as the only argument to\nthis script) content. Every line in the file is an object index, which will be\nset to good (mark as 0).\n\nThis switch script is made for the noincremental-prune test. This makes sure\nthat, after pruning starts (>1 bad item is found), that the number of args sent\nto the switch scripts is equals to the actual number of items (i.e. checking\nthat noincremental always holds).\n\nWarning: This switch script assumes the --file_args option\n\"\"\"\n\nfrom __future__ import print_function\n\nimport shutil\nimport sys\n\nimport common\n\n\ndef Main(argv):\n \"\"\"Switch part of the objects file in working set to (possible) bad ones.\"\"\"\n working_set = common.ReadWorkingSet()\n objects_file = common.ReadObjectsFile()\n object_index = common.ReadObjectIndex(argv[1])\n\n for oi in object_index:\n working_set[oi] = objects_file[oi]\n\n shutil.copy(argv[1], './noinc_prune_bad')\n\n common.WriteWorkingSet(working_set)\n\n return 0\n\n\nif __name__ == '__main__':\n retval = Main(sys.argv)\n sys.exit(retval)\n","sub_path":"app/src/main/java/com/syd/source/aosp/external/toolchain-utils/binary_search_tool/test/switch_to_bad_noinc_prune.py","file_name":"switch_to_bad_noinc_prune.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"41548009","text":"#!/usr/bin/env python3\n\nimport csv\nimport html\nimport argparse\nimport sys\n\nTEMPLATE_FILE = \"template.html\"\nEXCLUDE_COLUMNS = [\"User\", \"Team\", \"P\"]\nNAME_MAPPING = {\n \"Username\": \"Lietotājvārds\",\n \"User\": \"Vārds\",\n \"Team\": \"Skola\",\n \"Global\": \"Summa\"\n}\n\n\ndef convert_result_to_html(input_name, output, template, title, description):\n with open(template, \"r\") as templateFile:\n template_data = templateFile.read()\n table = \"\"\n with open(input_name, \"r\") as input_file:\n reader = csv.reader(input_file)\n rows = list(reader)\n columns = []\n table += \"\"\n global_column = -2\n for i, name in enumerate(rows[0]):\n if name in EXCLUDE_COLUMNS:\n continue\n if name == \"Global\":\n global_column = i\n columns.append(i)\n column_name = NAME_MAPPING.get(name, name)\n table += \"{0}\".format(html.escape(column_name))\n table += \"\\n\"\n results = sorted(rows[1:], reverse=True, key=lambda x: float(x[global_column]))\n for row in results:\n table += \"\"\n for col_id in columns:\n if col_id > 2: # score\n table += \"{0}\".format(html.escape(row[col_id]))\n else:\n table += \"{0}\".format(html.escape(row[col_id]))\n table += \"\\n\"\n args = {\n \"table\": table,\n \"title\": title,\n \"description\": description\n }\n result = template_data.format(**args)\n output.write(result)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-o\", \"--output\", help=\"Ouptut file\")\n parser.add_argument(\"-t\", \"--title\", default=\"Rezultāti\", help=\"Page title\")\n parser.add_argument(\"-d\", \"--description\", default=\"\")\n parser.add_argument(\"--template\", default=TEMPLATE_FILE, help=\"Template html.\")\n parser.add_argument('input', help=\"Input csv file\")\n args = parser.parse_args()\n\n if args.output:\n with open(args.output, \"w\") as out_file:\n convert_result_to_html(args.input, out_file, args.template, args.title, args.description)\n else:\n convert_result_to_html(args.input, sys.stdout, args.template, args.title, args.description)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"resultToHtml.py","file_name":"resultToHtml.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"540444446","text":"import random\nimport numpy\n\nclass Game:\n def __init__(self):\n self.suitcases = [\n 0.01, 1, 5, 10, 25, 50, 75, 100, 200, 300, 400, 500, 750, 1000, 5000, 10000, 25000, 50000, 75000, 100000, 200000, 300000, 400000, 500000, 750000, 1000000\n ]\n self.round = 6\n self.user_ammount = 0\n self.offer = 0\n self.case_array = []\n self.cases_left = 26 - self.suitcases.count(0)\n \n\n def game_start(self):\n random.shuffle(self.suitcases)\n user_case = int(input('Choose a case to start! '))\n self.user_ammount = self.suitcases[user_case]\n self.suitcases[user_case] = 0\n print(user_case)\n return self.user_ammount\n\n def remaining_cases(self):\n counter = 0\n self.case_array = []\n while counter < len(self.suitcases):\n if self.suitcases[counter] > 0:\n self.case_array.append(counter)\n counter += 1\n else:\n counter += 1\n print(self.case_array)\n return self.case_array\n\n # def banker(self):\n # '''\n # Banker's offer = $12,275.30 + \n # (.748 * expected value) +\n # (-2714.74 * number of cases left) +\n # ( -.040 * maximum value left ) +\n # (.0000006986 * expected value squared ) +\n # ( 32.623 * number of cases left squared ).\n # '''\n # ex_val = [i * 1/26 for i in self.suitcases]\n # self.offer = 12275.30 + (0.748 * sum(ex_val)) + (-2714.74 * self.cases_left) + (-0.040 * max(self.suitcases)) + (0.0000006986 * (sum(ex_val)*sum(ex_val))) + (32.623 * (self.cases_left*self.cases_left))\n # return self.offer \n def banker(self):\n base_offer = max(self.suitcases) - min(self.suitcases)\n self.offer = (base_offer /2) + min(self.suitcases)\n return self.offer\n\n def case_removal(self):\n count = self.round\n while count > 0:\n cases = self.remaining_cases()\n choose_case = int(input(f'Choose a case to remove! {count} cases left to remove!: '))\n print(self.suitcases[choose_case])\n self.suitcases[choose_case] = 0\n count -= 1\n return self.deal_no_deal()\n \n \n \n def deal_no_deal(self):\n banker_offer = self.banker()\n print(banker_offer)\n choice = input(\"Deal or No Deal? \")\n if choice == 'Deal':\n print(banker_offer)\n return \"Thank you for playing!\"\n elif choice == 'No Deal' and self.round == 0:\n keep_case = input('Keep or trade?')\n if (keep_case == 'Keep'):\n return self.user_ammount\n else:\n return self.suitcases[self.remaining_cases[0]]\n return self.user_ammount\n else:\n self.round -= 1\n self.case_removal()\n \n# runner code\nnew_game = Game()\nnew_game.game_start()\nnew_game.case_removal()\nnew_game.deal_no_deal() ","sub_path":"deal_no_deal.py","file_name":"deal_no_deal.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"361808141","text":"\r\nf = open(\"input.txt\")\r\ninputList = f.read().splitlines()\r\nf.close()\r\n\r\nanswers = []\r\ncount = 0\r\ntotal = 0\r\n\r\nfor x in inputList:\r\n if x != \"\":\r\n for i in range(len(x)):\r\n if x[i] not in answers:\r\n \r\n answers.append(x[i])\r\n count = count + 1\r\n else:\r\n answers.clear()\r\n total = total + count\r\n count = 0\r\nprint(total)","sub_path":"Advent of Code day 6.py","file_name":"Advent of Code day 6.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"620326474","text":"# 360_2019p1: 城市修建\n# author: kurumi\n\n\ndef getS(c):\n x, y = [], []\n for i in range(len(c)):\n x.append(c[i][0])\n y.append(c[i][1])\n l = max([max(x) - min(x), max(y) - min(y)])\n return l * l\n\n\nif __name__ == \"__main__\":\n n = int(input())\n coordinate = []\n for i in range(n):\n coordinate.append([int(i) for i in input().split()])\n s = getS(coordinate)\n print(s)\n\n\"\"\"\n有一个城市需要修建,给你N个民居的坐标X,Y,问把这么多民居全都包进城市的话,\n城市所需最小面积是多少(注意,城市为平行于坐标轴的正方形)\n\"\"\"\n","sub_path":"newCoder/360_2019p1.py","file_name":"360_2019p1.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"499763773","text":"import falcon\nfrom wsgiref.simple_server import make_server\n\n\nclass Resource:\n def on_get(self, req, res):\n res.body = '{\"message\": \"test\"}'\n print(req.params)\n res.status = falcon.HTTP_200\n\n# res.stream -- почитать что это\n\n\napi = falcon.API()\n\nr = Resource()\n\napi.add_route('/', r)\n\nserv = make_server('', 5001, api)\nserv.serve_forever()","sub_path":"classroom/falcone_example.py","file_name":"falcone_example.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"347098192","text":"import RPi.GPIO as GPIO \nfrom time import sleep\nimport requests\nfrom firebase import firebase\nimport datetime\n \nGPIO.setmode(GPIO.BOARD) # set up BOARD GPIO numbering, could also be BCM \nGPIO.setup(7, GPIO.IN) # set GPIO04 as input (raindrops module)\nGPIO.setup(18,GPIO.IN) # set GPIO24 as input (water sensor)\nGPIO.setup(3, GPIO.OUT) # set GPIO02 as an output (LED) \nGPIO.output(3, 0)\nfirebase = firebase.FirebaseApplication('https://android-things-group.firebaseio.com', None)\n \n#https://android-things-group.firebaseio.com\n#AIzaSyCQkt7jn96UStHuidROSlO4Y93SRFSt9_g\n \ntry: \n while True:\n print(\"Output raindrops \" + str(GPIO.input(7)-1) + \" water sensor \" + str(GPIO.input(18)))\n if GPIO.input(18) == 1 and GPIO.input(7) == 0:\n GPIO.output(3, 1)\n result = firebase.post('/history', {'water-sensor':str(datetime.datetime.now()), 'rain-sensor':str(datetime.datetime.now())})\n print(str(result))\n else:\n if GPIO.input(18): # if port 7 == 1 \n #print(\"Port 7 is 1/HIGH/True - LED ON\") \n GPIO.output(3, 1) # set port/pin value to 1/HIGH/True\n \n elif GPIO.input(7) == 0: \n #print(\"Port 7 is 0/LOW/False - LED OFF\")\n GPIO.output(3, 1) # set port/pin value to 0/LOW/False\n #result = firebase.post('/history', {'water-sensor':str(datetime.datetime.now())})\n #print(str(result))\n else:\n GPIO.output(3, 0) # set port/pin value to 0/LOW/False\n \n sleep(0.1) # wait 1 seconds (0.1) \n \nfinally: \n GPIO.cleanup()\n","sub_path":"mobileApplication/RPI-sensors.py","file_name":"RPI-sensors.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"196808993","text":"# formatTimes.py\n\nimport re\n\n# problems to address: \n\t# strings that are longer than 5 chars after whitespace, colon, period,\n\t# stripped are usually the result of a colon, period incorrectly \n\t# recognized as a digit - perhaps one way to deal with is to examine the\n\t# the index of where a colon, period should be post-strip\n\t# make this check at (***)\n\n# problems: 23:23.6, 44:53.7, 8:52.3, 21:39.7, 4:17.8, 23:29.8\n# t_3, t_4, t_6, t_10, t_14, t_18\n\ndef translate(raw):\n\n\tMAX_LEN = 5\n\tfmt_time = \"\"\n\tminutes = \"\"\n\tsec = \"\"\n\tms = \"\"\n\n\ttmp = raw.replace(\" \", \"\") # strip whitespace\n\ttmp = re.sub(r'[^0-9]', '', tmp) # strip colons, periods\n\t\n\t# (***) perform a check here before only taking 5 characters\n\n\tif len(tmp) > MAX_LEN: # take at most 5 characters\n\t\ttmp = tmp[:MAX_LEN]\n\n\tif len(tmp) == 4:\n\t\t# print('length 4')\n\t\t# char 1 -> minutes, char 2, 3 -> seconds, char 4 -> ms\n\t\tminutes = tmp[0]\n\t\tsec = tmp[1:3]\n\t\tms = tmp[3:MAX_LEN]\n\telse:\n\t\t# print('length 5')\t\t\t\n\t\t# char 1,2 -> minutes, char 3, 4 -> seconds, char 5 -> ms\n\t\tminutes = tmp[:2]\n\t\tsec = tmp[2:4]\n\t\tms = tmp[4:MAX_LEN+1]\n\n\tfmt_time = str.join('', (minutes, ':', sec, '.', ms))\n\t# print(str.join(' --- ', (fmt_time, exp[i])))\n\treturn(fmt_time)\n\n\t# end translate() function\n","sub_path":"formatTime.py","file_name":"formatTime.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"628034078","text":"from datetime import datetime\nimport pytz\nimport urllib.parse\nfrom io import StringIO\nfrom collections import OrderedDict\n\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import get_template\nfrom django.conf import settings\n\nfrom lots_admin.look_ups import DENIAL_REASONS, APPLICATION_STATUS\nfrom lots_admin.models import Review, Application\n\ndef create_email_msg(template_name, email_subject, email_to_address, context):\n html_template = get_template('emails/{}.html'.format(template_name))\n txt_template = get_template('emails/{}.txt'.format(template_name))\n\n html_content = html_template.render(context)\n txt_content = txt_template.render(context)\n\n msg = EmailMultiAlternatives(email_subject,\n txt_content,\n settings.EMAIL_HOST_USER,\n [email_to_address])\n\n msg.attach_alternative(html_content, 'text/html')\n\n return msg\n\ndef send_denial_email(request, application_status):\n context = {'app': application_status.application,\n 'lot': application_status.lot,\n 'review': Review.objects.filter(application=application_status).latest('id'),\n 'today': datetime.now().date(),\n 'DENIAL_REASONS': DENIAL_REASONS\n }\n\n msg = create_email_msg(\n 'denial_email',\n 'Notification from LargeLots',\n application_status.application.email,\n context\n )\n\n msg.send()\n\ndef create_redirect_path_from_session(request):\n params = {k: request.session[k] for k in ('page', 'query', 'pilot') if request.session.get(k)}\n\n return '?' + urllib.parse.urlencode(params)\n\nclass InvalidStepError(Exception):\n pass\n\ndef step_from_status(description_key):\n '''\n Return step number as integer, given a step description key.\n '''\n key_list = list(APPLICATION_STATUS.keys())\n\n try:\n given_index = key_list.index(description_key)\n\n except ValueError:\n available_steps = ', '.join(key for key in key_list)\n message = '\"{0}\" is not in available step keys: {1}'.format(description_key,\n available_steps)\n raise InvalidStepError(message)\n\n else:\n return given_index + 2 # Our numbered steps begin at 2.\n\ndef application_steps():\n short_names = {\n 'deed': 'Deed check',\n 'location': 'Location check',\n 'multi': 'Multiple applicant check',\n 'letter': 'Alderman letter',\n 'lottery': 'Lottery',\n 'EDS_waiting': 'Submit EDS & PPF',\n 'EDS_submission': 'EDS & PPF submitted',\n 'city_council': 'Approved by City Council & Plan Commission',\n 'debts': 'Certified as debt free',\n 'sold': 'Sold',\n }\n\n steps = [(step_from_status(k), short_names[k])\n for k in APPLICATION_STATUS.keys()]\n\n return steps\n\ndef make_conditions(request, step):\n '''\n Convenience method for the `applications` view in the admin backend.\n '''\n query = request.GET.get('query', None)\n\n if step.isdigit():\n step = int(step)\n\n conditions = '''\n AND coalesce(deed_image, '') <> ''\n AND step = {0}\n '''.format(step)\n\n if request.GET.get('eds', None):\n conditions += 'AND app.eds_received = {} '.format(request.GET['eds'])\n\n if request.GET.get('ppf', None):\n conditions += 'AND app.ppf_received = {} '.format(request.GET['ppf'])\n\n elif step == 'denied':\n conditions = '''\n AND coalesce(deed_image, '') <> ''\n AND status.denied = TRUE\n '''\n\n elif step == 'all':\n conditions = ''\n\n if query:\n query_sql = \"plainto_tsquery('english', '{0}') @@ to_tsvector(app.first_name || ' ' || app.last_name || ' ' || address.ward)\".format(query)\n\n conditions += 'AND {0}'.format(query_sql)\n\n return conditions, step\n\ndef default_pilot_to_render():\n '''\n This method determines the default pilot to use in admin views. \n\n We assume that admins are reviewing applications from the previous pilot, \n while the site accepts applications for the current pilot. \n\n If the application process is open, then show the previous pilot \n in the admin view. Otherwise, show the most recent (or \"current\") pilot.\n '''\n timezone = pytz.timezone('America/Chicago')\n chicago_time = datetime.now(timezone)\n pilot_info = OrderedDict(reversed(sorted(settings.PILOT_INFO.items())))\n\n if settings.END_DATE > chicago_time:\n previous_pilot = list(pilot_info.keys())[1]\n return previous_pilot\n else:\n return settings.CURRENT_PILOT\n","sub_path":"lots_admin/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"499266872","text":"import pandas as pd\r\nfrom numpy import sqrt\r\n# 임시\r\ndef bmi(height , weight):\r\n bmi_h = height * height\r\n bmi_w = weight * 10000\r\n bmi = bmi_w / bmi_h\r\n returnDic = {}\r\n returnDic['bmi'] = bmi\r\n if bmi > 30:\r\n returnDic['bmi상태'] = (\"비만\")\r\n elif bmi >= 25:\r\n returnDic['bmi상태'] = (\"과체중\")\r\n elif bmi >= 18.5:\r\n returnDic['bmi상태'] = (\"정상\")\r\n else:\r\n returnDic['bmi상태'] = (\"저체중\")\r\n return returnDic\r\n\r\ndef encouraged(gender, age):\r\n # 남성: 1, 여성: 2 // 단위: ~세\r\n kcal_man1 = 1700\r\n kcal_man2 = 2100\r\n kcal_man3 = 2500\r\n kcal_man4 = 2700\r\n kcal_man5 = 2600\r\n kcal_man6 = 2400\r\n kcal_man7 = 2200\r\n kcal_man8 = 2000\r\n kcal_man9 = 2000\r\n\r\n kcal_woman1 = 1500\r\n kcal_woman2 = 1800\r\n kcal_woman3 = 2000\r\n kcal_woman4 = 2000\r\n kcal_woman5 = 2100\r\n kcal_woman6 = 1900\r\n kcal_woman7 = 1800\r\n kcal_woman8 = 1600\r\n kcal_woman9 = 1600\r\n\r\n protein_man1 = 30\r\n protein_man2 = 40\r\n protein_man3 = 55\r\n protein_man4 = 65\r\n protein_man5 = 65\r\n protein_man6 = 60\r\n protein_man7 = 60\r\n protein_man8 = 55\r\n protein_man9 = 55\r\n\r\n protein_woman1 = 25\r\n protein_woman2 = 40\r\n protein_woman3 = 50\r\n protein_woman4 = 50\r\n protein_woman5 = 55\r\n protein_woman6 = 50\r\n protein_woman7 = 50\r\n protein_woman8 = 45\r\n protein_woman9 = 45\r\n\r\n sodium_man1 = 2000\r\n sodium_man2 = 2000\r\n sodium_man3 = 2000\r\n sodium_man4 = 2000\r\n sodium_man5 = 2000\r\n sodium_man6 = 2000\r\n sodium_man7 = 2000\r\n sodium_man8 = 2000\r\n sodium_man9 = 2000\r\n\r\n sodium_woman1 = 2000\r\n sodium_woman2 = 2000\r\n sodium_woman3 = 2000\r\n sodium_woman4 = 2000\r\n sodium_woman5 = 2000\r\n sodium_woman6 = 2000\r\n sodium_woman7 = 2000\r\n sodium_woman8 = 2000\r\n sodium_woman9 = 2000\r\n\r\n potassium_man1 = 2600\r\n potassium_man2 = 3000\r\n potassium_man3 = 3500\r\n potassium_man4 = 3500\r\n potassium_man5 = 3500\r\n potassium_man6 = 3500\r\n potassium_man7 = 3500\r\n potassium_man8 = 3500\r\n potassium_man9 = 3500\r\n\r\n potassium_woman1 = 2600\r\n potassium_woman2 = 3000\r\n potassium_woman3 = 3500\r\n potassium_woman4 = 3500\r\n potassium_woman5 = 3500\r\n potassium_woman6 = 3500\r\n potassium_woman7 = 3500\r\n potassium_woman8 = 3500\r\n potassium_woman9 = 3500\r\n\r\n calcium_man1 = 700\r\n calcium_man2 = 800\r\n calcium_man3 = 1000\r\n calcium_man4 = 900\r\n calcium_man5 = 800\r\n calcium_man6 = 800\r\n calcium_man7 = 750\r\n calcium_man8 = 700\r\n calcium_man9 = 700\r\n\r\n calcium_woman1 = 700\r\n calcium_woman2 = 800\r\n calcium_woman3 = 900\r\n calcium_woman4 = 800\r\n calcium_woman5 = 700\r\n calcium_woman6 = 700\r\n calcium_woman7 = 800\r\n calcium_woman8 = 800\r\n calcium_woman9 = 800\r\n\r\n returnDic = {}\r\n\r\n if gender == 1:\r\n if age >= 75:\r\n returnDic = {\"칼로리\": kcal_man9,\r\n \"단백질\": protein_man9,\r\n \"단백질:\": protein_man9,\r\n \"나트륨\": sodium_man9,\r\n \"칼륨\": potassium_man9,\r\n \"칼슘\": calcium_man9}\r\n elif age >= 65:\r\n returnDic = {\"칼로리\": kcal_man8,\r\n \"단백질\": protein_man8,\r\n \"단백질:\": protein_man8,\r\n \"나트륨\": sodium_man8,\r\n \"칼륨\": potassium_man8,\r\n \"칼슘\": calcium_man8}\r\n\r\n elif age >= 50:\r\n returnDic = {\"칼로리\": kcal_man7,\r\n \"단백질\": protein_man7,\r\n \"단백질:\": protein_man7,\r\n \"나트륨\": sodium_man7,\r\n \"칼륨\": potassium_man7,\r\n \"칼슘\": calcium_man7}\r\n\r\n elif age >= 30:\r\n returnDic = {\"칼로리\": kcal_man6,\r\n \"단백질\": protein_man6,\r\n \"단백질:\": protein_man6,\r\n \"나트륨\": sodium_man6,\r\n \"칼륨\": potassium_man6,\r\n \"칼슘\": calcium_man6}\r\n\r\n elif age >= 19:\r\n returnDic = {\"칼로리\": kcal_man5,\r\n \"단백질\": protein_man5,\r\n \"단백질:\": protein_man5,\r\n \"나트륨\": sodium_man5,\r\n \"칼륨\": potassium_man5,\r\n \"칼슘\": calcium_man5}\r\n\r\n elif age >= 15:\r\n returnDic = {\"칼로리\": kcal_man4,\r\n \"단백질\": protein_man4,\r\n \"단백질:\": protein_man4,\r\n \"나트륨\": sodium_man4,\r\n \"칼륨\": potassium_man4,\r\n \"칼슘\": calcium_man4}\r\n\r\n elif age >= 12:\r\n returnDic = {\"칼로리\": kcal_man3,\r\n \"단백질\": protein_man3,\r\n \"단백질:\": protein_man3,\r\n \"나트륨\": sodium_man3,\r\n \"칼륨\": potassium_man3,\r\n \"칼슘\": calcium_man3}\r\n\r\n elif age >= 9:\r\n returnDic = {\"칼로리\": kcal_man2,\r\n \"단백질\": protein_man2,\r\n \"단백질:\": protein_man2,\r\n \"나트륨\": sodium_man2,\r\n \"칼륨\": potassium_man2,\r\n \"칼슘\": calcium_man2}\r\n\r\n else:\r\n returnDic = {\"칼로리\": kcal_man1,\r\n \"단백질\": protein_man1,\r\n \"단백질:\": protein_man1,\r\n \"나트륨\": sodium_man1,\r\n \"칼륨\": potassium_man1,\r\n \"칼슘\": calcium_man1}\r\n else:\r\n if age >= 75:\r\n returnDic = {\"칼로리\": kcal_woman9,\r\n \"단백질\": protein_woman9,\r\n \"단백질:\": protein_woman9,\r\n \"나트륨\": sodium_woman9,\r\n \"칼륨\": potassium_woman9,\r\n \"칼슘\": calcium_woman9}\r\n\r\n elif age >= 65:\r\n returnDic = {\"칼로리\": kcal_woman8,\r\n \"단백질\": protein_woman8,\r\n \"단백질:\": protein_woman8,\r\n \"나트륨\": sodium_woman8,\r\n \"칼륨\": potassium_woman8,\r\n \"칼슘\": calcium_woman8}\r\n\r\n\r\n elif age >= 50:\r\n returnDic = {\"칼로리\": kcal_woman7,\r\n \"단백질\": protein_woman7,\r\n \"단백질:\": protein_woman7,\r\n \"나트륨\": sodium_woman7,\r\n \"칼륨\": potassium_woman7,\r\n \"칼슘\": calcium_woman7}\r\n\r\n elif age >= 30:\r\n returnDic = {\"칼로리\": kcal_woman6,\r\n \"단백질\": protein_woman6,\r\n \"단백질:\": protein_woman6,\r\n \"나트륨\": sodium_woman6,\r\n \"칼륨\": potassium_woman6,\r\n \"칼슘\": calcium_woman6}\r\n\r\n\r\n elif age >= 19:\r\n returnDic = {\"칼로리\": kcal_woman5,\r\n \"단백질\": protein_woman5,\r\n \"단백질:\": protein_woman5,\r\n \"나트륨\": sodium_woman5,\r\n \"칼륨\": potassium_woman5,\r\n \"칼슘\": calcium_woman5}\r\n\r\n\r\n elif age >= 15:\r\n returnDic = {\"칼로리\": kcal_woman4,\r\n \"단백질\": protein_woman4,\r\n \"단백질:\": protein_woman4,\r\n \"나트륨\": sodium_woman4,\r\n \"칼륨\": potassium_woman4,\r\n \"칼슘\": calcium_woman4}\r\n\r\n\r\n elif age >= 12:\r\n returnDic = {\"칼로리\": kcal_woman3,\r\n \"단백질\": protein_woman3,\r\n \"단백질:\": protein_woman3,\r\n \"나트륨\": sodium_woman3,\r\n \"칼륨\": potassium_woman3,\r\n \"칼슘\": calcium_woman3}\r\n\r\n\r\n elif age >= 9:\r\n returnDic = {\"칼로리\": kcal_woman2,\r\n \"단백질\": protein_woman2,\r\n \"단백질:\": protein_woman2,\r\n \"나트륨\": sodium_woman2,\r\n \"칼륨\": potassium_woman2,\r\n \"칼슘\": calcium_woman2}\r\n else:\r\n returnDic = {\"칼로리\": kcal_woman1,\r\n \"단백질\": protein_woman1,\r\n \"단백질:\": protein_woman1,\r\n \"나트륨\": sodium_woman1,\r\n \"칼륨\": potassium_woman1,\r\n \"칼슘\": calcium_woman1 }\r\n\r\n return returnDic\r\n\r\ndef recommendCal(height,weight,age,gender,activity):\r\n# gender = int(input(\"* 성별을 입력하세요(남성: 1, 여성: 2)\"))\r\n# age = int(input(\"* 나이를 입력하세요(단위: 살)\"))\r\n# height = int(input(\"* 키를 입력하세요(단위: cm)\"))\r\n# weight = int(input(\"* 몸무게를 입력하세,요(단위: kg)\"))\r\n# activity = int(\r\n# input(\"* 활동량을 입력하세요\\ndef getRecommendation(data, person, sim_function=sim_pearson):\"))\r\n returnDic = {}\r\n if gender == 1:\r\n basic1 = 66.47 + (13.75 * weight) + (5 * height) - (6.76 * age)\r\n\r\n if activity == 1:\r\n activity_A = basic1 * 0.2\r\n elif activity == 2:\r\n activity_A = basic1 * 0.375\r\n elif activity == 3:\r\n activity_A = basic1 * 0.555\r\n else:\r\n activity_A = basic1 * 0.725\r\n activity_all = basic1 + activity_A\r\n returnDic = {\"기초대사량\": basic1, \"활동대사량\": activity_A, \"권장칼로리\": activity_all}\r\n\r\n else:\r\n basic2 = 65.51 + (9.56 * weight) + (1.85 * height) - (4.68 * age)\r\n\r\n if activity == 1:\r\n activity_A2 = basic2 * 0.2\r\n elif activity == 2:\r\n activity_A2 = basic2 * 0.375\r\n elif activity == 3:\r\n activity_A2 = basic2 * 0.555\r\n else:\r\n activity_A2 = basic2 * 0.725\r\n\r\n activity_all2 = basic2 + activity_A2\r\n returnDic = {\"기초대사량\": basic2, \"활동대사량\": activity_A2, \"권장칼로리\": activity_all2}\r\n return returnDic\r\n\r\n\r\ndef healthMain(height , weight, age, gender,activity=2,dietTarget = 1):\r\n dic = {}\r\n tempDic = {}\r\n for i in bmi(height,weight):\r\n dic[i]=bmi(height,weight)[i]\r\n for i in encouraged(gender,age):\r\n dic[i]= encouraged(gender,age)[i]\r\n for i in recommendCal(height,weight,age,gender,activity):\r\n dic[i] = recommendCal(height,weight,age,gender,activity)[i]\r\n\r\n if(dietTarget==2):\r\n dic['권장칼로리'] *= 0.8\r\n elif(dietTarget==3):\r\n dic['권장칼로리'] *= 1.2\r\n return dic\r\n\r\n\r\n# 임시\r\n\r\ndef loadAsCsv(fileName='dft.csv'):\r\n tempDf = pd.read_csv(fileName, encoding='EUC-KR')\r\n alist = []\r\n\r\n for i in tempDf.index:\r\n tempDic = {}\r\n for col in tempDf:\r\n tempDic[col] = tempDf.at[i, col]\r\n alist.append(tempDic)\r\n return alist\r\n\r\ndef linkNutrient(filteredList,nutData = loadAsCsv(\"calories.csv\")):\r\n returnLink = []\r\n for fl in filteredList:\r\n try:\r\n for nd in nutData:\r\n if(fl==nd[\"음식이름\"]):\r\n returnLink.append(nd)\r\n except:\r\n print('에러',fl)\r\n continue\r\n return returnLink\r\n\r\n\r\ndef transList(givenList):\r\n returnDic = {}\r\n tempList = []\r\n for gl in givenList:\r\n tempList.append(gl['음식이름'])\r\n tempList = list(set(tempList))\r\n tempDic = {}\r\n for tl in tempList:\r\n for gl in givenList:\r\n if (tl == gl['음식이름']):\r\n tempDic[gl['영양성분']] = gl['함유량']\r\n returnDic[tl] = tempDic\r\n tempDic = {}\r\n\r\n\r\n return returnDic\r\n\r\ndef sim_pearson(data, name1, name2):\r\n sumX = 0 # X의 합\r\n sumY = 0 # Y의 합\r\n sumPowX = 0 # X 제곱의 합\r\n sumPowY = 0 # Y 제곱의 합\r\n sumXY = 0 # X*Y의 합\r\n count = 0 # 음식 개수\r\n\r\n for i in data[name1]: # i = key\r\n if i in data[name2]: # 같은 음식을 평가했을때만\r\n sumX += data[name1][i]\r\n sumY += data[name2][i]\r\n sumPowX += pow(data[name1][i], 2)\r\n sumPowY += pow(data[name2][i], 2)\r\n sumXY += data[name1][i] * data[name2][i]\r\n count += 1\r\n\r\n return (sumXY - ((sumX * sumY) / count)) / sqrt(\r\n (sumPowX - (pow(sumX, 2) / count)) * (sumPowY - (pow(sumY, 2) / count)))\r\n\r\n# 딕셔너리 돌면서 상관계수순으로 정렬\r\ndef top_match(data, name, index=20, sim_function=sim_pearson):\r\n li=[]\r\n for i in data: #딕셔너리를 돌고\r\n if name!=i: #자기 자신이 아닐때만\r\n li.append((sim_function(data,name,i),i)) #sim_function()을 통해 상관계수를 구하고 li[]에 추가\r\n li.reverse() #내림차순\r\n return li[:index]\r\ndef UserloadOnData(data,userDic):\r\n userDic['당질'] = userDic[\"탄수화물\"]\r\n data['사용자'] = userDic\r\n\r\n\r\ndef main(data={},healthDict={}):\r\n# {'bmi': 23.120623596247853, 'bmi상태': '정상', '칼로리': 2600, '단백질': 65,\r\n# '단백질:': 65, '나트륨': 2000, '칼륨': 3500, '칼슘': 800,\r\n# '기초대사량': 1716.45, '활동대사량': 643.66875, '권장칼로리': 2360.11875}\r\n\r\n\r\n dishNameList = []\r\n for d in data:\r\n dishNameList.append(d)\r\n linkedList = linkNutrient(dishNameList)\r\n transedDic = transList(linkedList)\r\n transedDic['사용자'] = healthDict\r\n\r\n topList = top_match(transedDic,'사용자',20)\r\n print(topList)\r\n return topList\r\n\r\n\r\ntempdata = [(0.03809703432192489, '겨울 샤브샤브'), (0.03739845102413832, '계란말이김밥'), (0.037229803192769, '두부두루치기'),\r\n (0.03706344527230439, '수제비'), (0.03706344527230439, '쇠고기 떡국'), (0.03706344527230439, '무초절임 쌈밥'),\r\n (0.03706344527230439, '두부 스테이크'), (0.036899325648329845, '해물 하이라이스'), (0.036899325648329845, '콩나물잡채'),\r\n (0.036899325648329845, '양배추 두부찜'), (0.036899325648329845, '무 굴국'), (0.036899325648329845, '깐쇼새우'),\r\n (0.036737394325205806, '탕국'), (0.036737394325205806, '두부전골'), (0.036737394325205806, '두부구이와 김양념장'),\r\n (0.036737394325205806, '굴국밥'), (0.036577602861251314, '해물칼국수'), (0.036577602861251314, '버섯전골'),\r\n (0.036577602861251314, '묵밥'), (0.036577602861251314, '멸치국수'), (0.036577602861251314, '매운탕'),\r\n (0.036577602861251314, '돌나물물김치'), (0.036577602861251314, '계란말이'), (0.03641990430707569, '큐브참치 주먹밥'),\r\n (0.03641990430707569, '케이준샐러드'), (0.03641990430707569, '케이준 치킨샐러드'), (0.03641990430707569, '카레라이스'),\r\n (0.03641990430707569, '참치 야채볶음'), (0.03641990430707569, '우럭매운탕'), (0.03641990430707569, '우럭 매운탕')]\r\n\r\ntemplist = [174, 70, 27, 1, 2, 3] # 테스트 덤프\r\n\r\nprint(main(tempdata,templist))\r\n","sub_path":"source/3. Let's have dinner/health analysis module/3.healthFilter.py","file_name":"3.healthFilter.py","file_ext":"py","file_size_in_byte":15579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"575388256","text":"#!/usr/bin/env python\n\nimport usb.core\nimport usb.util\nimport explorerhat\nimport time\n\nexplorerhat.light.red.on()\n\nUSB_VENDOR = 0x1997 # Rii\nUSB_PRODUCT = 0x2433 # Mini Wireless Keyboard\n\nUSB_IF = 0 # Interface\nUSB_TIMEOUT = 5 # Timeout in MS\n\nBTN_LEFT = 80\nBTN_RIGHT = 79\nBTN_DOWN = 81\nBTN_UP = 82\nBTN_STOP = 44 # Space\nBTN_EXIT = 41 # ESC\n\ndev = usb.core.find(idVendor=USB_VENDOR, idProduct=USB_PRODUCT)\nendpoint = dev[0][(0,0)][0]\n\nif dev.is_kernel_driver_active(USB_IF) is True:\n dev.detach_kernel_driver(USB_IF)\n\nusb.util.claim_interface(dev, USB_IF)\n\nexplorerhat.light.red.off()\nexplorerhat.light.green.on()\n\nwhile True:\n control = None\n try:\n control = dev.read(endpoint.bEndpointAddress, endpoint.wMaxPacketSize, USB_TIMEOUT)\n print(control)\n except:\n pass\n\n if control != None:\n if BTN_DOWN in control:\n explorerhat.motor.backwards()\n\n if BTN_UP in control:\n explorerhat.motor.forwards()\n\n if BTN_LEFT in control:\n explorerhat.motor.two.forwards()\n explorerhat.motor.one.backwards()\n\n if BTN_RIGHT in control:\n explorerhat.motor.two.backwards()\n explorerhat.motor.one.forwards()\n\n if BTN_STOP in control:\n explorerhat.motor.stop()\n\n if BTN_EXIT in control:\n exit()\n\n time.sleep(0.02)\n","sub_path":"keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"597137391","text":"\"\"\"\nThis provides connectivity to a message broker supporting the STOMP protocol. Both protocol\nversions 1.0 and 1.1 are supported.\n\nSee the GITHUB project page for more information.\n\nAuthor: Jason R Briggs\nLicense: http://www.apache.org/licenses/LICENSE-2.0\nProject Page: https://github.com/jasonrbriggs/stomp.py\n\"\"\"\n\nimport os\nimport sys\nsys.path.insert(0, os.path.split(__file__)[0])\n\nimport connect, listener, exception, transport, protocol\n\n__version__ = (4, 0, 2)\nConnection10 = connect.StompConnection10\nConnection11 = connect.StompConnection11\nConnection12 = connect.StompConnection12\nConnection = connect.StompConnection11\nConnectionListener = listener.ConnectionListener\nStatsListener = listener.StatsListener\n","sub_path":"stomp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"586471523","text":"import libreria\n\ndef AgregarCapital():\n # 1. Pedir capital\n # 2. Pedir pais\n # 3. Guardadr datos en capitales.txt\n capital=libreria.pedir_capital(\"Ingrese capital: \")\n pais=libreria.pedir_nombre(\"ingrese pais:\")\n contenido = capital + \"-\" + pais + \"\\n\"\n libreria.agregar_datos(\"capitales.txt\", contenido,\"a\")\n print(\"se agrego una nueva capital\")\n\ndef MostrarCapital():\n # 1. Abrir el archivo capitales.txt y mostrar sus datos\n datos=libreria.obtener_datos_lista(\"capitales.txt\")\n # 2. Comprobar si hay datos\n if ( datos != \"\"):\n for item in datos:\n capital, pais= item.split(\"-\")\n msg=\" {} es la capital de {}\"\n capital=capital.replace(\"\\n\",\"\")\n pais=pais.replace(\"\\n\",\"\")\n print(msg.format(capital, pais))\n #fin_for\n else:\n print(\"No hay datos\")\n\n\n\nopc=\"\"\nmax=3\nwhile(opc!=max):\n print(\"######## MENU #############\")\n print(\"#1. Agregar Capital #\")\n print(\"#2. Mostrar Capital #\")\n print(\"#3. Salir #\")\n print(\"###########################\")\n opc=libreria.pedir_numero(\"ingrese opcion: \",1,3)\n\n if( opc==1):\n AgregarCapital()\n if(opc==2):\n MostrarCapital()\n #fin if\n#fin while\nprint(\"fin\")","sub_path":"damian/menu3.py","file_name":"menu3.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"70555171","text":"from rest_framework import routers\nfrom django.urls import include, path\n\nfrom . import views\nfrom .views import MangaViewSet, ChapterViewSet, PageViewSet, TagViewSet, CreateUserAPIView, ReportViewSet, ShowUserViewSet\n\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n TokenVerifyView\n)\n\nrouter = routers.DefaultRouter()\nrouter.register(r'manga', MangaViewSet, basename='manga')\nrouter.register(r'chapter', ChapterViewSet)\nrouter.register(r'page', PageViewSet)\nrouter.register(r'tag', TagViewSet)\nrouter.register(r'news', ReportViewSet)\nrouter.register(r'show_users', ShowUserViewSet)\n#router.register(r'manga/(?P[0-9]+)', MangaViewSet, basename='manga')\n#router.register(r'manga/', MangaUrlNameViewSet, basename='manga')\n#router.register(r'books/(?P[0-9]+)', MangaUrlNameViewSet, base_name='books')\n\nurlpatterns = [\n #$path('manga/url_name', views.MangaUrlNameViewSet.as_view(), name=mangaFiltered),\n path('userinfo/', views.GetUserInfo.as_view(), name='userinfo'),\n path('token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n path('token/check', TokenVerifyView.as_view(), name='token_checker'),\n path('create/', CreateUserAPIView.as_view())\n]\n\nurlpatterns += router.urls\n\n","sub_path":"backend/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"563748748","text":"from Bio import SeqIO\nfrom matplotlib import pyplot as plt\nfrom collections import defaultdict\nimport numpy as np\nimport math\nfrom ladder_fit import convert_to_bp, convert_to_index, find_lower, find_upper\n\na = 'DATA1'\nb = 'DATA2'\nc = 'DATA3'\nd = 'DATA4'\ne = 'DATA105'\n\nchannels = [a, b, c, d, e]\ndye = [500, 490, 450, 400, 350, 340, 300, 250, 200, 160, 150, 139, 100, 75, 50, 35]\n\nrecord = SeqIO.read('A_COR_12_1_Hos.fsa', 'abi')\ntrace = defaultdict(list)\n\nfor c in channels:\n\ttrace[c] = record.annotations['abif_raw'][c]\n\n# plt.plot(trace[a], color='blue')\n# plt.plot(trace[b], color='red')\n# plt.plot(trace[c], color='green')\n# plt.plot(trace[d], color='yellow')\n# plt.plot(trace[e], color='black')\n\n# plt.show()\n# converted_bp = convert_to_bp(1370, record.annotations['abif_raw'][e], dye)\n# print(converted_bp)\n\n# converted_index = convert_to_index(240.27, record.annotations['abif_raw'][e])\n# print(converted_index)\nalelle = 288\nheight = []\nindex_of_peaks = []\ndata1 = list(record.annotations['abif_raw'][a])\n\nfor c in range(find_lower(record.annotations['abif_raw'][e], dye), find_upper(record.annotations['abif_raw'][e], dye)):\n\tconverted_bp = convert_to_bp(c, record.annotations['abif_raw'][e], dye)\n\tin_range = converted_bp >= alelle - 1 and converted_bp <= alelle + 1\n\tif in_range:\n\t\tindex_of_peaks.append(c)\n\t\theight.append(data1[c])\n\telif converted_bp > alelle:\n\t\tbreak\n\nprint(height)\nprint(index_of_peaks)\nprint(max(height))\nprint(convert_to_bp(1175, record.annotations['abif_raw'][e], dye))\n\n# Make negative values in array zero\n# data_105 = list(record.annotations['abif_raw'][e])\n# i = len(record.annotations['abif_raw'][e])\n\n# for x in range(0, i):\n# \tif data_105[x] < 0:\n# \t\tdata_105[x] = 0\n\n# indexes = findpeaks.findpeaks(data_105, spacing=50, limit=200)\n\n# ind = []\n# i = len(indexes) - 1\n# j = 0\n\n# while i >= 0:\n# \tind.append(indexes[i])\n# \tj += 1\n# \ti -= 1\n\n# alelle = 3453\n\n# for c in range (0, len(ind)-1):\n# \tif alelle > ind[c]:\n# \t\ty_pred = ((alelle - ind[c])/((ind[c-1]-ind[c])/(LIZ_500[c-1]-LIZ_500[c]))) + LIZ_500[c]\n# \t\tbreak\n\n# print (y_pred)\n\n# 1\n# 3024 - True(207 bp)\n# 3482 - True(248 bp)\n# 2037 - True(116 bp)\n# 6219 - True(484 bp)\n\n# 2\n# 2068 - True(116 bp)\n# 3077 - True(210 bp)\n# 3096 - True(212 bp)\n# 3629 - True(260 bp)\n\n# 4\n# 2996 - True(199 bp)\n# 3042 - True(203 bp)\n# 3256 - True(222 bp)\n# 3453 - True(240 bp)\n# 3736 - True(264 bp)\n# 6330 - True(484? bp)\n\n# 5\n# ","sub_path":"ian/archive/ladder_script.py","file_name":"ladder_script.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"201088830","text":"from django.db import models\nfrom django.utils import encoding\nfrom pages.models import BaseModule\nfrom pages.models import BasePanel\n\nfrom filer.fields.image import FilerImageField\n\n\nclass FAQModule(BaseModule):\n\t@property\n\tdef module_type(self):\n\t\treturn \"v-style-faq\"\n\n\nclass FAQPanel(BasePanel):\n\thylands_park_content = models.TextField(blank=True)\n\tweston_park_content = models.TextField(blank=True)\n\n\tPANEL_WIDTH_CHOICES = (\n\t\t(4, 4),\n\t)\n\tpanel_width = models.IntegerField(choices=PANEL_WIDTH_CHOICES, default=4)\n\n\tPANEL_TYPE_CHOICES = (\n\t\t(1, 1),\n\t)\n\tpanel_type = models.IntegerField(choices=PANEL_TYPE_CHOICES, default=1)\n\n\tmodule = models.ForeignKey(FAQModule, related_name=\"panel_set\", blank=True, null=True)\n\n\tdef save(self, *args, **kwargs):\n\t\t#self.hylands_park_content = encoding.smart_str(self.hylands_park_content, encoding='ascii', errors='ignore')\n\t\t#self.weston_park_content = encoding.smart_str(self.weston_park_content, encoding='ascii', errors='ignore')\n\t\tif (self.hylands_park_content == \"


    \"):\n\t\t\tself.hylands_park_content = \"\"\n\t\tif (self.weston_park_content == \"


    \"):\n\t\t\tself.weston_park_content = \"\"\n\t\tsuper(FAQPanel, self).save(*args, **kwargs)","sub_path":"django/vfestival/apps/v_style_faqs/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"358463245","text":"from odoo import api, models, fields\n\n\nclass TerminationDetails(models.Model):\n _name = 'termination.details'\n _rec_name = 'employee_id'\n\n employee_id = fields.Many2one('hr.employee', string='Employee')\n joining_date = fields.Date('Joining Date')\n last_working_day = fields.Date('Last working day')\n gratuity_days = fields.Float('Gratuity days')\n gratuity_amt = fields.Float('Gratuity amount')\n fully_paid = fields.Boolean('Fully Paid')\n payment_ids = fields.Many2many('account.payment', 'termination_payment_rel', 'termination_id', 'payment_id',\n string=\"Payments\", copy=False, readonly=True)\n residual = fields.Float('Due Amount', compute='_compute_residual', copy=False)\n\n @api.depends('payment_ids')\n def _compute_residual(self):\n for rec in self:\n gratuity_amt = rec.gratuity_amt\n paid_amt = 0\n for line in rec.payment_ids:\n paid_amt += line.amount\n due = gratuity_amt - paid_amt\n rec.residual = due\n\n","sub_path":"Medical_09122019/hr_final_settlement/models/termination_details.py","file_name":"termination_details.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"484917775","text":"#encoding:utf-8\n#import os\n#os.path.join (os.path.dirname(os.path.abspath(__file__)))\n\nimport web\n\nimport urls\n\nurls = urls.urls\n\nweb.config.debug = False\n\napp = web.application(urls, globals())\n\nif web.config.get('_session') is None:\n session = web.session.Session(app, web.session.DiskStore('sessions'))\n web.config._session = session\nelse:\n session = web.config._session\n\ndef session_hook():\n web.ctx.session = session\napp.add_processor(web.loadhook(session_hook))\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"template/auto_gene/user/gene_site/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"258509141","text":"from Acquisition import aq_inner\nfrom Acquisition import aq_parent\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.ZCatalog.Lazy import LazyMap\n\n\ndef getReferences(self, object, relationship=None, targetObject=None,\n objects=True):\n \"\"\"return a collection of reference objects\"\"\"\n return self._optimizedReferences(object, relationship=relationship,\n targetObject=targetObject, objects=objects, attribute='sourceUID')\n\n\ndef getBackReferences(self, object, relationship=None, targetObject=None,\n objects=True):\n \"\"\"return a collection of reference objects\"\"\"\n # Back refs would be anything that target this object\n return self._optimizedReferences(object, relationship=relationship,\n targetObject=targetObject, objects=objects, attribute='targetUID')\n\n\ndef _optimizedReferences(self, object, relationship=None, targetObject=None,\n objects=True, attribute='sourceUID'):\n sID, sobj = self._uidFor(object)\n if targetObject:\n tID, tobj = self._uidFor(targetObject)\n if attribute == 'sourceUID':\n brains = self._queryFor(sID, tID, relationship)\n else:\n brains = self._queryFor(tID, sID, relationship)\n else:\n brains = self._optimizedQuery(sID, attribute, relationship)\n\n if objects:\n return self._resolveBrains(brains)\n return brains\n\n\ndef _optimizedQuery(self, uid, indexname, relationship):\n \"\"\"query reference catalog for object matching the info we are\n given, returns brains\n \"\"\"\n if not uid: # pragma: no cover\n return []\n\n _catalog = self._catalog\n indexes = _catalog.indexes\n\n # First get one or multiple record ids for the source/target uid index\n rids = indexes[indexname]._index.get(uid, None)\n if rids is None:\n return []\n elif isinstance(rids, int):\n rids = [rids]\n else:\n rids = list(rids)\n\n # As a second step make sure we only get references of the right type\n # The unindex holds data of the type: [(-311870037, 'relatesTo')]\n # The index holds data like: [('relatesTo', -311870037)]\n if relationship is None:\n result_rids = rids\n else:\n rel_unindex_get = indexes['relationship']._unindex.get\n result_rids = set()\n if isinstance(relationship, str):\n relationship = set([relationship])\n for r in rids:\n rels = rel_unindex_get(r, set())\n if isinstance(rels, str):\n rels = set([rels])\n if len(rels.intersection(relationship)) > 0:\n result_rids.add(r)\n\n # Create brains\n return LazyMap(_catalog.__getitem__,\n list(result_rids), len(result_rids))\n\n\ndef getSourceObject(self):\n return self._optimizedGetObject(self.sourceUID)\n\n\ndef getTargetObject(self):\n return self._optimizedGetObject(self.targetUID)\n\n\ndef _optimizedGetObject(self, uid):\n tool = getToolByName(self, 'uid_catalog', None)\n if tool is None: # pragma: no cover\n return ''\n tool = aq_inner(tool)\n traverse = aq_parent(tool).unrestrictedTraverse\n\n _catalog = tool._catalog\n rids = _catalog.indexes['UID']._index.get(uid, ())\n if isinstance(rids, int):\n rids = (rids, )\n\n for rid in rids:\n path = _catalog.paths[rid]\n obj = traverse(path, default=None)\n if obj is not None:\n return obj\n\n\ndef getRefs(self, relationship=None, targetObject=None):\n \"\"\"get all the referenced objects for this object\"\"\"\n tool = getToolByName(self, 'reference_catalog')\n brains = tool.getReferences(self, relationship, targetObject=targetObject,\n objects=False)\n if brains:\n return [_optimizedGetObject(self, b.targetUID) for b in brains]\n return []\n\n\ndef getBRefs(self, relationship=None, targetObject=None):\n \"\"\"get all the back referenced objects for this object\"\"\"\n tool = getToolByName(self, 'reference_catalog')\n brains = tool.getBackReferences(self, relationship,\n targetObject=targetObject, objects=False)\n if brains:\n return [_optimizedGetObject(self, b.sourceUID) for b in brains]\n return []\n\n\ndef apply():\n from Products.Archetypes.ReferenceEngine import ReferenceCatalog as rc\n\n rc._old_getReferences = rc.getReferences\n rc.getReferences = getReferences\n rc._old_getBackReferences = rc.getBackReferences\n rc.getBackReferences = getBackReferences\n rc._optimizedReferences = _optimizedReferences\n rc._optimizedQuery = _optimizedQuery\n\n from Products.Archetypes.ReferenceEngine import Reference as rf\n\n rf._old_getTargetObject = rf.getTargetObject\n rf.getTargetObject = getTargetObject\n rf._old_getSourceObject = rf.getSourceObject\n rf.getSourceObject = getSourceObject\n rf._optimizedGetObject = _optimizedGetObject\n\n from Products.Archetypes.Referenceable import Referenceable as ra\n\n ra._old_getRefs = ra.getRefs\n ra.getRefs = getRefs\n ra.getReferences = getRefs\n ra._old_getBRefs = ra.getBRefs\n ra.getBRefs = getBRefs\n ra.getBackReferences = getBRefs\n","sub_path":"experimental/atrefspeedup/patches.py","file_name":"patches.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"9032773","text":"from ggame import App, RectangleAsset, ImageAsset, SoundAsset, TextAsset\nfrom ggame import LineStyle, Color, Sprite, Sound, Frame\n\nSCREEN_WIDTH = 1420\nSCREEN_HEIGHT = 810\nblack=Color(0x000000, 1.0)\nblue = Color(0x0000ff, 1.0)\nedge=LineStyle(1,black)\nbackground_asset4=TextAsset(\"Game Over.\", align='center', style='200px Arial', width=2000 )\nbackground_asset5=TextAsset(\"Press 'Return' to restart.\", align='center', style='40px Arial', width=1000)\nbackground_asset6=TextAsset(\"Congrats, You Won\", align='center', style='200px Arial', width=1300 )\nbackground4=Sprite(background_asset4, (200,0))\nbackground5=Sprite(background_asset5, (600,600))\nbackground6=Sprite(background_asset6, (200,0))\nbackground_asset1=RectangleAsset(1220,650, edge, blue)\nbackground_asset2=ImageAsset(\"images/Green.png\",)\nbackground_asset3=RectangleAsset(1420,810,edge, black)\nbackground1=Sprite(background_asset1, (70,30))\nbackground2=Sprite(background_asset2, (0,0))\nbackground3=Sprite(background_asset3, (0,0))\ncastle_asset = ImageAsset(\"images/castleyeah.png\",)\nfactory_asset = ImageAsset(\"images/Factory.png\",)\nfactory=Sprite(factory_asset,(100,100))\nfactory.scale=.25\npotato_asset = ImageAsset(\"images/door.jpg\",)\ncastle= Sprite(castle_asset, (850,200))\ncastle.scale=.1\ncastle.fxcenter = castle.fycenter = 0.5\nclass FactoryFloor(Sprite):\n factoryflr= floor_asset=ImageAsset(\"images/stonefloor.jpg\",)\n def __init__(self, position):\n super().__init__(FactoryFloor.factoryflr, position)\n self.scale=.15\n self.fxcenter = self.fycenter = 0.5\ndoes=[]\nfor x in range (0,14):\n does.append(FactoryFloor((1200-x*76,600)))\nfor x in range (0,14):\n does.append(FactoryFloor((1200-x*76,524)))\nfor x in range (0,6):\n does.append(FactoryFloor((212,448-76*x)))\nfor x in range (0,6):\n does.append(FactoryFloor((288,448-76*x)))\nfor x in range (0,12):\n does.append(FactoryFloor((364+76*x,68)))\nfor x in range (0,12):\n does.append(FactoryFloor((364+76*x,144)))\nspaceship_asset = ImageAsset(\"images/four_spaceship_by_albertov_with_thrust.png\", \n Frame(227,0,292-227,125), 4, 'vertical')\nspaceship = Sprite(spaceship_asset, (200, 200))\nspaceship.fxcenter = spaceship.fycenter = 0.5\nspaceship.scale=.6\n\nclass Wall1(Sprite):\n asset= wall_asset=ImageAsset(\"images/wall.png\",)\n def __init__(self, position):\n super().__init__(Wall1.asset, position)\n self.scale=.3\n self.fxcenter = self.fycenter = 0.5\n \nclass Wall2(Sprite):\n asset= wall_asset=ImageAsset(\"images/wall.png\",)\n def __init__(self, position):\n super().__init__(Wall2.asset, position)\n self.scale=.3\n self.fxcenter = self.fycenter = 0.5\n self.rotation=(3.14159265358979/2)\n\nuno=[]\nfor x in range(0,14):\n uno.append(Wall1((112+x*88,672)))\nfor x in range(0,14):\n uno.append(Wall1((112+x*88, 30)))\nfor x in range(0,7):\n uno.append(Wall2((81,87+x*88)))\nfor x in range(0,7):\n uno.append(Wall2((1287,87+x*88)))\nprint(uno)\npotato= Sprite(potato_asset, (300,675))\npotato.scale=.05\npotato.fxcenter = potato.fycenter = 0.5\nchips_asset=ImageAsset(\"images/dipsiedoodles.png\",)\nchips=Sprite(chips_asset, (1150,100))\nchips.scale=.2\n# Movement\nsun_asset = ImageAsset(\"images/sun.png\",)\nsun=Sprite(sun_asset, (1150, 500))\n#sun.scale=.5\nsun.center=.5\nspaceship.dir = 3\nspaceship.bob=3\nspaceship.go = False\nspaceship.ygo= False\nspaceship.thrust = 0\nspaceship.thrustframe = 1\nbackground1.visible=True\nbackground2.visible=False\ncastle.visible=False\npotato.visible= True\nfactory.visible=False\nsun.visible=False\nbackground3.visible=False\nchips.visible=False\nwinning=False\nbackground4.visible=False\nbackground5.visible=False\nreset=False\nwon=False\nbackground6.visible=False\ndef tab(b):\n global reset\n if spaceship.visible==False:\n spaceship.visible=True\n reset=True\n print(\"working\")\n spaceship.x=400\n spaceship.y=300\ndef left(b):\n spaceship.dir=-4\ndef right(b):\n spaceship.dir=4\ndef up(b):\n spaceship.bob=-4\ndef down(b):\n spaceship.bob=4\ndef step():\n global reset\n global winning\n global won\n if reset==True:\n print('wub')\n background1.visible=True\n potato.visible=True\n for x in uno:\n x.visible=True\n background4.visible=False\n background5.visible=False\n background6.visible=False\n reset=False\n winning=False\n won=False\n print(\"oh god why\")\n if background1.visible==True and winning==True:\n spaceship.visible=False\n background6.visible=True\n background5.visible=True\n won=True\n if background1.visible==True:\n for x in does:\n if x.visible==True:\n x.visible=False\n if spaceship.visible==False:\n background2.visible=False\n background1.visible=False\n background3.visible=False\n background5.visible=True\n background4.visible=True\n if won==False:\n background4.visible=True\n else:\n background4.visible=False\n castle.visible=False\n potato.visible=False\n factory.visible=False\n sun.visible=False\n spaceship.x=1050\n spaceship.y=550\n chips.visible=False\n for x in uno:\n x.visible=False\n for x in does:\n x.visible=False\n if spaceship.collidingWith(chips) and chips.visible==True:\n sun.visible=True\n winning=True\n chips.visible=False\n if spaceship.collidingWith(sun) and sun.visible==True:\n background2.visible=True\n background3.visible=False\n castle.visible=True\n factory.visible=True\n sun.visible=False\n spaceship.x=100\n spaceship.y=300\n for x in does:\n x.visible=False\n if spaceship.collidingWith(factory) and castle.visible==True:\n background2.visible=False\n background1.visible=False\n background3.visible=True\n castle.visible=False\n potato.visible=False\n factory.visible=False\n sun.visible=False\n spaceship.x=1000\n spaceship.y=550\n chips.visible=True\n for x in uno:\n x.visible=False\n for x in does:\n x.visible=True\n if spaceship.collidingWith(castle) and castle.visible==True:\n background2.visible=False\n background1.visible=True\n castle.visible=False\n potato.visible=True\n spaceship.x=300\n spaceship.y=480\n factory.visible=False\n for x in uno:\n x.visible=True\n if spaceship.collidingWith(potato) and potato.visible==True:\n background2.visible=True\n background1.visible=False\n castle.visible =True\n potato.visible=False\n factory.visible=True\n spaceship.x=850\n spaceship.y=330\n for x in uno:\n x.visible=False\n reset=False\n if spaceship.go:\n spaceship.x += spaceship.dir\n if spaceship.x + spaceship.width > SCREEN_WIDTH:\n spaceship.x -= spaceship.dir\n if background3.visible==True: \n if spaceship.x<1300 and spaceship.x>400:\n if spaceship.y<800 and spaceship.y>400:\n spaceship.x+=spaceship.dir\n if spaceship.y<150 and spaceship.y:\n spaceship.x+=spaceship.dir\n if spaceship.y>150 and spaceship.y<520:\n spaceship.visible=False\n print(\"1\")\n if spaceship.x<320 and spaceship.x>250:\n if spaceship.y<800 and spaceship.y>30:\n spaceship.x+=spaceship.dir\n else:\n spaceship.visible=False\n print(\"2\")\n if spaceship.y<400 and spaceship.y>30:\n if spaceship.x<320 and spaceship.x>200:\n spaceship.x+=spaceship.dir\n if spaceship.x>320 and spaceship.x<200:\n spaceship.visible=False\n print(\"3\")\n if spaceship.x>1200:\n spaceship.visible=False\n print(\"4\")\n if spaceship.x<210:\n spaceship.visible=False\n print(\"5\")\n if spaceship.y>605:\n spaceship.visible=False\n print(\"6\")\n if spaceship.x +spaceship.width > 1280 and potato.visible==True:\n spaceship.x -= spaceship.dir\n if spaceship.x < 153 and potato.visible==True:\n spaceship.x -= spaceship.dir\n if spaceship.x < 60:\n spaceship.x -= spaceship.dir\n if spaceship.thrust == 1:\n spaceship.setImage(spaceship.thrustframe)\n spaceship.thrustframe += 1\n if spaceship.thrustframe == 4:\n spaceship.thrustframe = 1\n if spaceship.thrust == 0:\n spaceship.setImage(0)\n ystep()\n \ndef ystep():\n if spaceship.ygo:\n spaceship.y += spaceship.bob\n if spaceship.y +spaceship.height > SCREEN_HEIGHT+60:\n spaceship.y -= spaceship.bob\n spaceship.rotation=0\n if background3.visible==True: \n if spaceship.x<1300 and spaceship.x>400:\n if spaceship.y<605 and spaceship.y>520:\n spaceship.y+=spaceship.bob\n if spaceship.y>50 and spaceship.y<150:\n spaceship.y+=spaceship.bob\n if spaceship.y>150 and spaceship.y<520:\n spaceship.visible=False\n print(\"7\")\n if spaceship.x<320 and spaceship.x>180:\n if spaceship.y<800 and spaceship.y>30:\n spaceship.y+=spaceship.bob\n else:\n spaceship.visible=False\n print(\"8\")\n if spaceship.x>1200:\n spaceship.visible=False\n print(\"9\")\n if spaceship.x<210:\n spaceship.visible=False\n print(\"10\")\n if spaceship.y>605:\n spaceship.visible=False\n print(\"11\")\n if spaceship.y +spaceship.height > 722 and potato.visible==True:\n spaceship.y -= spaceship.bob\n if spaceship.y < 104 and potato.visible==True:\n spaceship.y-=spaceship.bob\n if spaceship.y < 60:\n spaceship.y -= spaceship.bob\n if spaceship.thrust == 1:\n spaceship.setImage(spaceship.thrustframe)\n spaceship.thrustframe += 1\n if spaceship.thrustframe == 4:\n spaceship.thrustframe = 1\n if spaceship.thrust == 0:\n spaceship.setImage(0)\n\ndef leftKey(event):\n spaceship.go = True\n spaceship.ygo= False\n spaceship.thrust = 1\n spaceship.rotation=(3.141592653589793238462643383/2)\n left(spaceship)\ndef leftUp(event):\n spaceship.go = False\n spaceship.ygo= False\n spaceship.thrust = 1\n left(spaceship)\n \n\ndef rightKey(event):\n spaceship.go = True\n spaceship.ygo=False\n spaceship.thrust = 1\n spaceship.rotation=(3.141592653589793238462643383*3)/2\n right(spaceship)\ndef rightUp(event):\n spaceship.go = False\n spaceship.ygo= False\n spaceship.thrust = 1\n right(spaceship)\n \n\ndef upKey(event):\n spaceship.ygo = True\n spaceship.go=False\n spaceship.thrust = 1\n spaceship.rotation=0\n up(spaceship)\ndef upUp(event):\n spaceship.go = False\n spaceship.ygo= False\n spaceship.thrust = 1\n up(spaceship)\n \n \ndef downKey (event):\n spaceship.ygo = True\n spaceship.go = False\n spaceship.thrust = 1\n spaceship.rotation=3.141592653589793238462643383\n down(spaceship)\ndef downUp(event):\n spaceship.go = False\n spaceship.ygo= False\n spaceship.thrust = 1\n down(spaceship)\n\ndef returnDown(event):\n tab(spaceship)\n\nmyapp = App(SCREEN_WIDTH, SCREEN_HEIGHT)\nmyapp.listenKeyEvent('keydown', 'a', leftKey)\nmyapp.listenKeyEvent('keyup', 'a', leftUp)\nmyapp.listenKeyEvent('keydown', 'd', rightKey)\nmyapp.listenKeyEvent('keyup', 'd', rightUp)\nmyapp.listenKeyEvent('keydown', 'w', upKey)\nmyapp.listenKeyEvent('keyup', 'w', upUp)\nmyapp.listenKeyEvent('keydown', 's', downKey)\nmyapp.listenKeyEvent('keyup', 's', downUp)\nmyapp.listenKeyEvent('keydown', 'tab', returnDown)\nmyapp.run(step)\n\n\n","sub_path":"WubAndaHalf.py","file_name":"WubAndaHalf.py","file_ext":"py","file_size_in_byte":12497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"200585702","text":"import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom plot import printall, printmelody\nimport sklearn.metrics as me\nimport math\n\n\ndef get545():\n # k545 input\n k545 = np.array([[0, 0, 0, 0]])\n # print(k545)\n # Read in and grasp useful info\n with open('k545.csv', 'r', newline='') as f:\n reader = csv.reader(f, delimiter=',')\n for row in reader:\n nprow = np.asarray(row)\n if nprow[2] == ' Note_on_c':\n # or nprow[2] == ' Note_off_c':\n # print(nprow)\n useful_row = np.array(\n [[round((int(nprow[1]) - 1536) / 96), int(nprow[3]), int(nprow[4]), int(nprow[5])]])\n # print(useful_row)\n k545 = np.concatenate((k545, useful_row), axis=0)\n\n k545 = k545[1:]\n # print(k545)\n\n # Sort by time\n k545 = k545[k545[:, 0].argsort()]\n # print(k545)\n\n empty_roll = np.array([[0, 0, 0, 0, 0]])\n pianoroll_r = empty_roll # Five tracks for right hand\n pianoroll_l = empty_roll\n i = 0\n for j in range(2328):\n new_r_roll = np.copy(empty_roll)\n new_l_roll = np.copy(empty_roll)\n l_i = 0 # left hand index\n r_i = 0 # right hand index\n while i < len(k545) and k545[i][0] == j:\n if not k545[i][1]: # right hand\n new_r_roll[0][r_i] = k545[i][2]\n r_i += 1\n else: # left hand\n # print(i, l_i)\n new_l_roll[0][l_i] = k545[i][2]\n l_i += 1\n i += 1\n new_r_roll.sort()\n new_l_roll.sort()\n new_r_roll = np.fliplr(new_r_roll)\n new_l_roll = np.fliplr(new_l_roll)\n # print(j, new_roll)\n pianoroll_r = np.concatenate((pianoroll_r, new_r_roll), axis=0)\n pianoroll_l = np.concatenate((pianoroll_l, new_l_roll), axis=0)\n\n # Get clean data\n pianoroll_r = pianoroll_r[1:]\n pianoroll_l = pianoroll_l[1:]\n # print(pianoroll_r, pianoroll_l)\n # print(np.shape(pianoroll_r), np.shape(pianoroll_l))\n T, _ = np.shape(pianoroll_r) # Number of time steps\n\n T1 = 320 # plot time -- discrete\n # printall(pianoroll_r, pianoroll_l, T, T1)\n\n # Make it conti's\n for i in range(1, T):\n if (pianoroll_l[i] == empty_roll).all():\n pianoroll_l[i] = pianoroll_l[i - 1]\n if (pianoroll_r[i] == empty_roll).all():\n pianoroll_r[i] = pianoroll_r[i - 1]\n\n # print(pianoroll_l, pianoroll_r)\n\n # printall(pianoroll_r, pianoroll_l, T, T1)\n\n # Get melody\n M = pianoroll_r[:, 0]\n # printmelody(M)\n return M\n\n\ndef kfold545(K, N):\n \"\"\"K --- K fold cross validation; N --- Nth fold\"\"\"\n T = 2328\n t = int(T / K)\n M = get545()\n if N == 0:\n # first fold\n return M[t:], M[:t - 1]\n if N == K - 1:\n # last fold\n return M[:t * N - 1], M[t * N:]\n train = np.concatenate((M[:t * N - 1], M[t * (N + 1):]), axis=0)\n test = M[t * N: t * (N + 1) - 1]\n return train, test\n","sub_path":"k545.py","file_name":"k545.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"465652726","text":"# coding:utf-8\r\nfrom RemoteCreditSystem import db,app\r\nfrom RemoteCreditSystem.config import logger\r\nimport RemoteCreditSystem.helpers as helpers\r\nimport datetime\r\nimport json\r\n\r\nfrom flask import Module, session, request, render_template, redirect, url_for, flash\r\nfrom flask.ext.login import current_user\r\n\r\nfrom RemoteCreditSystem.models import Rcs_Parameter_Tree\r\nfrom RemoteCreditSystem.models import Rcs_Parameter_Select\r\n\r\n\r\n# 模型参数管理(道德品质)\r\n@app.route('/parameter/model_ddpz', methods=['GET'])\r\ndef model_ddpz():\r\n return render_template(\"parameter/model_parameter_ddpz.html\")\r\n\r\n# 模型参数管理(生活状况)\r\n@app.route('/parameter/model_shzk', methods=['GET'])\r\ndef model_shzk():\r\n return render_template(\"parameter/model_parameter_shzk.html\")\r\n\r\n# 模型参数管理(经营状况)\r\n@app.route('/parameter/model_jyzk', methods=['GET'])\r\ndef model_jyzk():\r\n return render_template(\"parameter/model_parameter_jyzk.html\")\r\n\r\n#左侧加载树\r\n@app.route('/parameter/show_tree/', methods=['POST'])\r\ndef show_tree(param_type):\r\n\torgs = Rcs_Parameter_Tree.query.filter(\"param_type='\"+param_type+\"' and pid is null\").order_by(\"id\").all()\r\n\torgs_list = []\r\n\torgs_list+=orgs\r\n\tif orgs:\r\n\t\tfor obj in orgs:\r\n\t\t sql = \"FIND_IN_SET(id ,getParamList('\"+str(obj.id)+\"')) and create_user=\"+str(current_user.id)\r\n\t\t orgs_list += Rcs_Parameter_Tree.query.filter(sql).order_by(\"id\").all()\r\n\torgs_list = list(set(orgs_list))\r\n\tfor obj in orgs_list:\r\n\t\tobj.open = 1\r\n\torgs_json = helpers.show_result_content(list(set(orgs_list)))\r\n\torgs_json_obj = json.loads(orgs_json)\r\n\treturn json.dumps(orgs_json_obj)# 返回json\r\n\r\n#右边显示列表\r\n@app.route('/parameter/show_row/', methods=['GET'])\r\ndef get_project_docs(p_id):\r\n\tselect = Rcs_Parameter_Select.query.filter_by(tree_id=p_id).order_by(\"id\").all()\r\n\tif select:\r\n\t\tfor obj in select:\r\n\t\t\tobj.style = 2\r\n\t\treturn helpers.show_result_content(select) # 返回json\r\n\tparam = Rcs_Parameter_Tree.query.filter_by(pId=p_id,create_user=current_user.id).order_by(\"id\").all()\r\n\tfor obj in param:\r\n\t\tobj.style = 1\r\n\treturn helpers.show_result_content(param) # 返回json\r\n\r\n#新增模型项页面\r\n@app.route('/parameter/add_tree/', methods=['GET'])\r\ndef add_tree(p_id):\r\n\r\n\treturn render_template(\"parameter/model_tree_add.html\",p_id=p_id)\r\n\r\n#新增模型项页面save\r\n@app.route('/parameter/add_tree_save/', methods=['POST'])\r\ndef add_tree_save(p_id):\r\n\r\n\ttry:\r\n\t\tname = request.form[\"name\"]\r\n\t\tweight = request.form[\"weight\"]\r\n\t\ttree = Rcs_Parameter_Tree.query.filter_by(id=p_id).first()\r\n\t\tRcs_Parameter_Tree(tree.param_type,name,p_id,weight,int(tree.level_type)+1).add()\r\n\t\tdb.session.commit()\r\n\t\t# 消息闪现\r\n\t\tflash('保存成功','success')\r\n\texcept:\r\n\t # 回滚\r\n\t db.session.rollback()\r\n\t logger.exception('exception')\r\n\t # 消息闪现\r\n\t flash('保存失败','error')\r\n\r\n\treturn redirect(\"/parameter/model_\"+tree.param_type)\r\n\r\n#修改模型项页面\r\n@app.route('/parameter/edit_tree/', methods=['GET'])\r\ndef edit_tree(p_id):\r\n\ttree = Rcs_Parameter_Tree.query.filter_by(id=p_id).first()\r\n\treturn render_template(\"parameter/model_tree_edit.html\",tree=tree)\r\n\r\n#修改模型项页面save\r\n@app.route('/parameter/edit_tree_save/', methods=['POST'])\r\ndef edit_tree_save(p_id):\r\n\ttry:\r\n\t\tname = request.form[\"name\"]\r\n\t\tweight = request.form[\"weight\"]\r\n\t\ttree = Rcs_Parameter_Tree.query.filter_by(id=p_id).first()\r\n\t\ttree.name = name\r\n\t\ttree.weight = weight\r\n\t\tdb.session.commit()\r\n\t\t# 消息闪现\r\n\t\tflash('保存成功','success')\r\n\texcept:\r\n\t # 回滚\r\n\t db.session.rollback()\r\n\t logger.exception('exception')\r\n\t # 消息闪现\r\n\t flash('保存失败','error')\r\n\treturn redirect(\"/parameter/model_\"+tree.param_type)\r\n\r\n#新增模型值页面\r\n@app.route('/parameter/add_select/', methods=['GET'])\r\ndef add_select(p_id):\r\n\r\n\treturn render_template(\"parameter/model_select_add.html\",p_id=p_id)\r\n\r\n#新增模型值页面save\r\n@app.route('/parameter/add_select_save/', methods=['POST'])\r\ndef add_select_save(p_id):\r\n\ttry:\r\n\t\ttree = Rcs_Parameter_Tree.query.filter_by(id=p_id).first()\r\n\t\tname = request.form[\"name\"]\r\n\t\tscore = request.form[\"score\"]\r\n\t\tRcs_Parameter_Select(p_id,name,score).add()\r\n\t\tdb.session.commit()\r\n\t\t# 消息闪现\r\n\t\tflash('保存成功','success')\r\n\texcept:\r\n\t # 回滚\r\n\t db.session.rollback()\r\n\t logger.exception('exception')\r\n\t # 消息闪现\r\n\t flash('保存失败','error')\r\n\treturn redirect(\"/parameter/model_\"+tree.param_type)\r\n\r\n#修改模型值页面\r\n@app.route('/parameter/edit_select/', methods=['GET'])\r\ndef edit_select(p_id):\r\n\tselect = Rcs_Parameter_Select.query.filter_by(id=p_id).first()\r\n\treturn render_template(\"parameter/model_select_edit.html\",select=select)\r\n\r\n#修改模型值页面save\r\n@app.route('/parameter/edit_select_save/', methods=['POST'])\r\ndef edit_select_save(p_id):\r\n\t\r\n\tname = request.form[\"name\"]\r\n\tscore = request.form[\"score\"]\r\n\tselect = Rcs_Parameter_Select.query.filter_by(id=p_id).first()\r\n\tselect.name = name\r\n\tselect.score = score\r\n\ttree = Rcs_Parameter_Tree.query.filter_by(id=select.tree_id).first()\r\n\ttry:\r\n\t\tdb.session.commit()\r\n\t\t# 消息闪现\r\n\t\tflash('保存成功','success')\r\n\texcept:\r\n\t # 回滚\r\n\t db.session.rollback()\r\n\t logger.exception('exception')\r\n\t # 消息闪现\r\n\t flash('保存失败','error')\r\n\treturn redirect(\"/parameter/model_\"+tree.param_type)\r\n\r\n#判断是否存在子节点\r\n@app.route('/parameter/autoChild/', methods=['GET'])\r\ndef autoChild(p_id):\r\n\ttree = Rcs_Parameter_Tree.query.filter_by(pId=p_id).all()\r\n\tif tree:\r\n\t\treturn \"false\"\r\n\telse:\r\n\t\ttry:\r\n\t\t\tRcs_Parameter_Tree.query.filter_by(id=p_id).delete()\r\n\t\t\tdb.session.commit()\r\n\t\t\t# 消息闪现\r\n\t\t\tflash('保存成功','success')\r\n\t\texcept:\r\n\t\t # 回滚\r\n\t\t db.session.rollback()\r\n\t\t logger.exception('exception')\r\n\t\t # 消息闪现\r\n\t\t flash('保存失败','error')\r\n\t\treturn \"true\"\r\n","sub_path":"RemoteCreditSystem/views/parameter/rcs_parameter - 多用户_废弃.py","file_name":"rcs_parameter - 多用户_废弃.py","file_ext":"py","file_size_in_byte":6015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"536638250","text":"# -*- coding: utf-8 -*-\n\nimport message as msg\n\nclass ProjectVariables(object):\n \"\"\"\n ProjectVariables : defines all the variables to be used by the project\n \"\"\"\n out_deb_x86 = None\n out_deb_x64 = None\n out_rel_x86 = None\n out_rel_x64 = None\n out_deb = False\n out_rel = False\n\n def __init__(self, data):\n self.cmake = data['cmake']\n self.tree = data['vcxproj']['tree']\n self.ns = data['vcxproj']['ns']\n self.output = data['cmake_output']\n\n def define_variable(self):\n \"\"\"\n Variable : define main variables in CMakeLists.\n \"\"\"\n ProjectVariables.out_deb_x86 = self.tree.find(\n '//ns:PropertyGroup[@Condition=\"\\'$(Configuration)|$(Platform)\\'==\\'Debug|Win32\\'\"]/ns:OutDir',\n namespaces=self.ns)\n if ProjectVariables.out_deb_x86 is None:\n ProjectVariables.out_deb_x86 = self.tree.find(\n '//ns:PropertyGroup/ns:OutDir[@Condition=\"\\'$(Configuration)|$(Platform)\\'==\\'Debug|Win32\\'\"]',\n namespaces=self.ns)\n ProjectVariables.out_deb_x64 = self.tree.find(\n '//ns:PropertyGroup[@Condition=\"\\'$(Configuration)|$(Platform)\\'==\\'Debug|x64\\'\"]/ns:OutDir',\n namespaces=self.ns)\n if ProjectVariables.out_deb_x64 is None:\n ProjectVariables.out_deb_x64 = self.tree.find(\n '//ns:PropertyGroup/ns:OutDir[@Condition=\"\\'$(Configuration)|$(Platform)\\'==\\'Debug|x64\\'\"]',\n namespaces=self.ns)\n ProjectVariables.out_rel_x86 = self.tree.find(\n '//ns:PropertyGroup[@Condition=\"\\'$(Configuration)|$(Platform)\\'==\\'Release|Win32\\'\"]/ns:OutDir',\n namespaces=self.ns)\n if ProjectVariables.out_rel_x86 is None:\n ProjectVariables.out_rel_x86 = self.tree.find(\n '//ns:PropertyGroup/ns:OutDir[@Condition=\"\\'$(Configuration)|$(Platform)\\'==\\'Release|Win32\\'\"]',\n namespaces=self.ns)\n ProjectVariables.out_rel_x64 = self.tree.find(\n '//ns:PropertyGroup[@Condition=\"\\'$(Configuration)|$(Platform)\\'==\\'Release|x64\\'\"]/ns:OutDir',\n namespaces=self.ns)\n if ProjectVariables.out_rel_x64 is None:\n ProjectVariables.out_rel_x64 = self.tree.find(\n '//ns:PropertyGroup/ns:OutDir[@Condition=\"\\'$(Configuration)|$(Platform)\\'==\\'Release|x64\\'\"]',\n namespaces=self.ns)\n\n # CMake Minimum required.\n self.cmake.write('cmake_minimum_required(VERSION 3.0.0 FATAL_ERROR)\\n\\n')\n\n # Project Name\n projectname = self.tree.xpath('//ns:RootNamespace', namespaces=self.ns)[0]\n self.cmake.write('################### Variables. ####################\\n'\n '# Change if you want modify path or other values. #\\n'\n '###################################################\\n\\n')\n self.cmake.write('set(PROJECT_NAME ' + projectname.text + ')\\n')\n\n # Output DIR of artefacts\n self.cmake.write('# Output Variables\\n')\n output_deb_x86 = ''\n output_deb_x64 = ''\n output_rel_x86 = ''\n output_rel_x64 = ''\n if self.output is None:\n if ProjectVariables.out_deb_x86 is not None:\n output_deb_x86 = ProjectVariables.out_deb_x86.text.replace('$(ProjectDir)', '').replace('\\\\', '/')\n if ProjectVariables.out_deb_x64 is not None:\n output_deb_x64 = ProjectVariables.out_deb_x64.text.replace('$(ProjectDir)', '').replace('\\\\', '/')\n if ProjectVariables.out_rel_x86 is not None:\n output_rel_x86 = ProjectVariables.out_rel_x86.text.replace('$(ProjectDir)', '').replace('\\\\', '/')\n if ProjectVariables.out_rel_x64 is not None:\n output_rel_x64 = ProjectVariables.out_rel_x64.text.replace('$(ProjectDir)', '').replace('\\\\', '/')\n elif self.output:\n if self.output[-1:] == '/' or self.output[-1:] == '\\\\':\n build_type = '${CMAKE_BUILD_TYPE}'\n else:\n build_type = '/${CMAKE_BUILD_TYPE}'\n output_deb_x86 = self.output + build_type\n output_deb_x64 = self.output + build_type\n output_rel_x86 = self.output + build_type\n output_rel_x64 = self.output + build_type\n else:\n output_deb_x86 = ''\n output_deb_x64 = ''\n output_rel_x86 = ''\n output_rel_x64 = ''\n\n if output_deb_x64 != '':\n msg.send('Output Debug = ' + output_deb_x64, 'ok')\n self.cmake.write('set(OUTPUT_DEBUG ' + output_deb_x64 + ')\\n')\n ProjectVariables.out_deb = True\n elif output_deb_x86 != '':\n msg.send('Output Debug = ' + output_deb_x86, 'ok')\n self.cmake.write('set(OUTPUT_DEBUG ' + output_deb_x86 + ')\\n')\n ProjectVariables.out_deb = True\n else:\n msg.send('No Output Debug define.', '')\n\n if output_rel_x64 != '':\n msg.send('Output Release = ' + output_rel_x64, 'ok')\n self.cmake.write('set(OUTPUT_REL ' + output_rel_x64 + ')\\n')\n ProjectVariables.out_rel = True\n elif output_rel_x86 != '':\n msg.send('Output Release = ' + output_rel_x86, 'ok')\n self.cmake.write('set(OUTPUT_REL ' + output_rel_x86 + ')\\n')\n ProjectVariables.out_rel = True\n else:\n msg.send('No Output Release define.', '')\n\n def define_project(self):\n \"\"\"\n Define Cmake Project\n \"\"\"\n # Project Definition\n self.cmake.write('\\n')\n self.cmake.write('############## Define Project. ###############\\n'\n '# ---- This the main options of project ---- #\\n'\n '##############################################\\n\\n')\n self.cmake.write('project(${PROJECT_NAME} CXX)\\n\\n')\n\n def define_target(self):\n \"\"\"\n Define target release if not define.\n \"\"\"\n self.cmake.write('# Define Release by default.\\n'\n 'if(NOT CMAKE_BUILD_TYPE)\\n'\n ' set(CMAKE_BUILD_TYPE \"Release\")\\n'\n ' message(STATUS \"Build type not specified: defaulting to release.\")\\n'\n 'endif(NOT CMAKE_BUILD_TYPE)\\n\\n')\n\n def write_output(self):\n \"\"\"\n Set output for each target\n \"\"\"\n if ProjectVariables.out_deb or ProjectVariables.out_rel:\n self.cmake.write('############## Artefacts Output #################\\n')\n self.cmake.write('# Defines outputs , depending Debug or Release. #\\n')\n self.cmake.write('#################################################\\n\\n')\n if ProjectVariables.out_deb:\n self.cmake.write('if(CMAKE_BUILD_TYPE STREQUAL \"Debug\")\\n')\n self.cmake.write(' set(CMAKE_LIBRARY_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}/${OUTPUT_DEBUG}\")\\n')\n self.cmake.write(' set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}/${OUTPUT_DEBUG}\")\\n')\n self.cmake.write(' set(CMAKE_EXECUTABLE_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}/${OUTPUT_DEBUG}\")\\n')\n if ProjectVariables.out_rel:\n self.cmake.write('else()\\n')\n self.cmake.write(' set(CMAKE_LIBRARY_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}/${OUTPUT_REL}\")\\n')\n self.cmake.write(' set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}/${OUTPUT_REL}\")\\n')\n self.cmake.write(' set(CMAKE_EXECUTABLE_OUTPUT_DIRECTORY \"${CMAKE_BINARY_DIR}/${OUTPUT_REL}\")\\n')\n self.cmake.write('endif()\\n\\n')\n else:\n msg.send('No Output found or define. CMake will use default ouputs.', 'warn')\n","sub_path":"projectvariables.py","file_name":"projectvariables.py","file_ext":"py","file_size_in_byte":7758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"418648397","text":"from qiskit import QuantumCircuit\nfrom qiskit.circuit.library import RZXGate\nfrom qiskit.pulse import Schedule\nfrom qiskit import *\n\nqc_cal = QuantumCircuit(2)\nqc_cal.rzx(0.5, 0, 1)\nqc_cal.add_calibration(RZXGate, (0, 1), params=[0.5], schedule=Schedule())\n\nqc_cal = transpile(qc_cal, backend)\nprint(qc_cal.calibrations)\n\nqc = QuantumCircuit(2)\nqc.h(0)\n\nnew_circ_cal_on_lhs = qc_cal + qc\nprint(new_circ_cal_on_lhs.calibrations) #calibration information is lost\n\nnew_circ_cal_on_rhs = qc +qc_cal\nprint(new_circ_cal_on_rhs.calibrations) #calibration information is kept","sub_path":"Terra_3/Test_4/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"190394398","text":"#!/usr/bin/python\n\nimport os\n\nbuild_file = '''\napply plugin: 'java'\n\next.versions = [\n logback: \"1.0.13\",\n junit: \"4.11\",\n hamcrest: \"1.3\"\n]\n\nrepositories {\n mavenCentral()\n}\n\ndependencies {\n compile group: \"ch.qos.logback\", name: \"logback-classic\", version: versions.logback\n\n testCompile group: \"junit\", name: \"junit\", version: versions.junit\n testCompile group: \"org.hamcrest\", name: \"hamcrest-all\", version: versions.hamcrest\n}\n\ntask wrapper(type: Wrapper) {\n gradleVersion = '1.10'\n}\n\ntasks.withType(Compile) {\n options.encoding = 'UTF-8'\n}\n\ntasks.withType(Test) {\n systemProperties = System.getProperties()\n testLogging.showStandardStreams = true\n}\n\n/*\nOft-used, cut and past-ready stuff.\njersey: \"1.17\"\ntestCompile \"com.sun.jersey:jersey-core:${versions.jersey}\"\ntestCompile \"com.sun.jersey:jersey-json:${versions.jersey}\"\ntestCompile \"com.sun.jersey:jersey-server:${versions.jersey}\"\ntestCompile \"com.sun.jersey:jersey-servlet:${versions.jersey}\"\ntestCompile \"com.sun.jersey:jersey-client:${versions.jersey}\"\ntestCompile \"com.xoom.oss:feathercon:1.3.2\"\n*/\n'''\n\napp_java = '''\npackage _PACKAGE_;\n\npublic class App {\n}\n'''\n\napp_test = '''\npackage _PACKAGE_;\n\nimport org.junit.Test;\n\npublic class AppTest {\n @Test\n public void testApp() {\n }\n}\n'''\n\n\ndef writeToFile(file_name, string):\n directory = os.path.dirname(file_name)\n if ( not os.path.exists(directory)):\n os.makedirs(directory)\n F = open(file_name, \"w\")\n F.write(string)\n F.close()\n\n\ndef main(project_dir, package):\n source_tree = ['main', 'test']\n for source in source_tree:\n os.makedirs('%s/src/%s/resources' % (project_dir, source))\n package_directory = package.replace(\".\", \"/\")\n writeToFile(\"%s/build.gradle\" % project_dir, build_file)\n writeToFile(\"%s/src/main/java/%s/App.java\" % (project_dir, package_directory),\n app_java.replace(\"_PACKAGE_\", package))\n writeToFile(\"%s/src/test/java/%s/AppTest.java\" % (project_dir, package_directory),\n app_test.replace(\"_PACKAGE_\", package))\n\n\ndef parse_arguments():\n import argparse\n\n parser = argparse.ArgumentParser(description='Gradle project builder')\n parser.add_argument('--project', required=True, help='project name')\n parser.add_argument('--package', required=True, help='package name')\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n options = parse_arguments()\n main(options.project, options.package)\n","sub_path":"create-java.py","file_name":"create-java.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"589838720","text":"#!env/bin/python\nfrom flask import Flask, request, jsonify\nfrom config import WHITELIST\nimport youtube\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello():\n return \"Hello World!\"\n\n\n@app.route(\"/channel\")\ndef subscribers():\n channel_id = request.args.get(\"id\", type = str)\n device_id = request.args.get(\"dev\", type = str)\n for key in WHITELIST:\n if WHITELIST[key] == device_id:\n return jsonify(subCount=youtube.get_subscribers(channel_id))\n return jsonify(subcount=\"0\")\n\n@app.route(\"/channel/id\")\ndef get_id():\n search = request.args.get(\"search\", type = str)\n device_id = request.args.get(\"dev\", type = str)\n for key in WHITELIST:\n if WHITELIST[key] == device_id:\n return jsonify(channelId=youtube.get_channel_id(search))\n return jsonify(channelId=\"0\")\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"210724011","text":"import time\r\nimport json as json2\r\n\r\nfrom flask_user import login_required\r\nfrom flask import Flask, render_template, Blueprint, g, request\r\nfrom flask_socketio import SocketIO, send\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import Session\r\n\r\nfrom WateringApp.Models import Settings, Widget\r\nfrom WateringApp.materialien.SoilSensor import SoilSensor\r\nfrom WateringApp.materialien.Motor import Motor\r\nfrom WateringApp.Fachwerte.Humidity import Humidity\r\nimport WateringApp.WateringSystem as wsys\r\nfrom WateringApp.config import DB_BASE_URI, SQLALCHEMY_DATABASE_URI\r\n\r\n\r\n\r\nsocketio = SocketIO()\r\n# uri = URI(DB_BASE_URI, DB_NAME, DB_USERNAME, DB_PASSWORD)\r\n# uri = uri.get_uri_string()\r\nengine = create_engine(SQLALCHEMY_DATABASE_URI)\r\nsession = Session(engine)\r\n\r\n\r\n\r\nwidget = Blueprint('widget', __name__)\r\nwidget_no_auth = Blueprint('widget_no_auth', __name__)\r\nactivate_pump = Blueprint('activate_pump', __name__)\r\nget_widget_state = Blueprint('get_widget_state', __name__)\r\ntoggle_auto_mode = Blueprint('toggle_auto_mode', __name__)\r\n\r\nupdate_activation_level = Blueprint('update_activation_level', __name__)\r\nget_activation_level = Blueprint('get_activation_level', __name__)\r\nget_json = Blueprint('get_json', __name__)\r\n\r\n\r\n@socketio.on('message')\r\ndef message_func(msg):\r\n\r\n engine = create_engine(SQLALCHEMY_DATABASE_URI)\r\n session = Session(engine)\r\n\r\n\r\n valArray = []\r\n average = 0\r\n activeAmount = 0\r\n results = {}\r\n channel = {}\r\n\r\n\r\n water_level = wsys.wsys.get_water_level()\r\n\r\n\r\n\r\n for i in range(SoilSensor.AMOUNT + 1):\r\n sensor = SoilSensor(i)\r\n humidity = sensor.getHumidity()\r\n json = humidity.toJSONString()\r\n with session as sess:\r\n reservoir_size = sess.query(Settings).first().reservoir_size\r\n\r\n if humidity.getValue() > 100:\r\n average += humidity.getValue()\r\n activeAmount += 1\r\n json[\"active\"] = 1\r\n\r\n else:\r\n json[\"active\"] = 0\r\n json[\"value\"] = \"-\"\r\n json[\"percent\"] = \"-\"\r\n json[\"percentString\"] = \"-\"\r\n json[\"channel\"] = i\r\n valArray.append(json)\r\n\r\n # print('activeAmount: ' + str(activeAmount))\r\n average = round(average / activeAmount)\r\n avg_humidity = Humidity.intToHumidity(average)\r\n channel[\"channel\"] = valArray\r\n results[\"results\"] = channel\r\n\r\n # TODO: do this calculation in fachwerte\r\n results['results']['water_level'] = water_level * (60/reservoir_size)\r\n channel[\"average\"] = avg_humidity.toJSONString()\r\n\r\n send(json2.dumps(results), broadcast=True)\r\n\r\n # print(msg)\r\n\r\n@widget.route(\"/widget/\")\r\n@login_required\r\ndef widget_func(sensor_nr):\r\n # TODO: for now initialize last_activation and current_water_level when the page is opened\r\n # should be only initialized once when the program starts in the future\r\n\r\n # values = SoilSensor(1).getHumidity()\r\n # values = values.inPercent()\r\n return render_template(\"widget.html\", sensor_nr=sensor_nr)\r\n\r\n\r\n@widget_no_auth.route(\"/widget_no_auth/\")\r\ndef widget_no_auth_func(sensor_nr):\r\n return render_template(\"widget_no_auth.html\", sensor_nr=sensor_nr)\r\n\r\n@activate_pump.route(\"/activatePump\")\r\n@login_required\r\ndef activate_pump_func():\r\n # TODO: update water level\r\n motor = Motor()\r\n motor.continuous(\"right\")\r\n motor.stop()\r\n with session as sess:\r\n wsys.wsys.update_water_level(sess)\r\n\r\n return \"Successfully started Motor\"\r\n\r\n\r\n@get_widget_state.route(\"/getWidgetState\")\r\ndef get_widget_state_func():\r\n with session as sess:\r\n widget_state = sess.query(Widget).first().widget_state\r\n\r\n wsys.wsys.set_state(widget_state)\r\n\r\n return str(widget_state)\r\n\r\n\r\n@toggle_auto_mode.route(\"/toggleAutoMode\")\r\n@login_required\r\ndef toggle_auto_mode_func():\r\n\r\n engine = create_engine(SQLALCHEMY_DATABASE_URI)\r\n session = Session(engine)\r\n\r\n with session as sess:\r\n if sess.query(Widget).first().widget_state:\r\n\r\n sess.query(Widget).first().widget_state = False\r\n sess.commit()\r\n STOP = True\r\n # daemon.stop = False\r\n wsys.wsys.set_state(False)\r\n\r\n else:\r\n sess.query(Widget).first().widget_state = True\r\n sess.commit()\r\n wsys.wsys.set_state(True)\r\n\r\n result = str(sess.query(Widget).first().widget_state)\r\n\r\n return result\r\n\r\n\r\n@update_activation_level.route(\"/updateActivationLevel\", methods=['POST'])\r\n@login_required\r\ndef update_activation_level_func():\r\n if request.method == \"POST\":\r\n activation_level = request.form['data']\r\n # print('activation_level: ' + str(activation_level))\r\n with session as sess:\r\n sess.query(Settings).first().activation_level = request.form['data']\r\n sess.commit()\r\n wsys.wsys.set_activation_level(int(activation_level))\r\n return 'updated Activation Level'\r\n\r\n@get_activation_level.route(\"/getActivationLevel\", methods=['POST'])\r\n@login_required\r\ndef getActivationLevel():\r\n if request.method == \"POST\":\r\n with session as sess:\r\n value = sess.query(Settings).first().activation_level\r\n\r\n\r\n\r\n return str(value)\r\n\r\n@get_json.route(\"/json\")\r\ndef get_json_func():\r\n\r\n engine = create_engine(SQLALCHEMY_DATABASE_URI)\r\n session = Session(engine)\r\n\r\n\r\n valArray = []\r\n average = 0\r\n activeAmount = 0\r\n results = {}\r\n channel = {}\r\n\r\n\r\n water_level = wsys.wsys.get_water_level()\r\n\r\n\r\n\r\n for i in range(SoilSensor.AMOUNT + 1):\r\n sensor = SoilSensor(i)\r\n humidity = sensor.getHumidity()\r\n json = humidity.toJSONString()\r\n with session as sess:\r\n reservoir_size = sess.query(Settings).first().reservoir_size\r\n\r\n if humidity.getValue() > 100:\r\n average += humidity.getValue()\r\n activeAmount += 1\r\n json[\"active\"] = 1\r\n\r\n else:\r\n json[\"active\"] = 0\r\n json[\"value\"] = \"-\"\r\n json[\"percent\"] = \"-\"\r\n json[\"percentString\"] = \"-\"\r\n json[\"channel\"] = i\r\n valArray.append(json)\r\n\r\n # print('activeAmount: ' + str(activeAmount))\r\n average = round(average / activeAmount)\r\n avg_humidity = Humidity.intToHumidity(average)\r\n channel[\"channel\"] = valArray\r\n results[\"results\"] = channel\r\n\r\n # TODO: do this calculation in fachwerte\r\n results['results']['water_level'] = water_level * (60/reservoir_size)\r\n channel[\"average\"] = avg_humidity.toJSONString()\r\n\r\n # print(results)\r\n # valArray.append(\"\\\"average\\\" :\" + \"\\\"\" + str(average) + \"\\\"\")\r\n\r\n # print(valArray)\r\n return json2.dumps(results)\r\n # return values\r\n","sub_path":"WateringApp/werkzeuge/WidgetWerkzeug.py","file_name":"WidgetWerkzeug.py","file_ext":"py","file_size_in_byte":6722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"161359931","text":"#!/usr/bin/env python\n\"\"\"Test the converters.\n\nThis verifies results from tests built into the _registerconverters module.\n\"\"\"\n\nimport unittest\nfrom pyobjcryst._pyobjcryst import getTestVector, getTestMatrix\nimport numpy\n\nclass TestConverters(unittest.TestCase):\n\n def testVector(self):\n tv = numpy.array(range(3), dtype=float)\n v = getTestVector()\n self.assertTrue( numpy.array_equal(tv, v) )\n return\n\n def testMatrix(self):\n tm = numpy.array(range(6), dtype=float).reshape(3,2)\n m = getTestMatrix()\n self.assertTrue( numpy.array_equal(tm, m) )\n return\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"pyobjcryst/tests/testconverters.py","file_name":"testconverters.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"551143766","text":"from bs4 import BeautifulSoup as soup\nfrom urllib.request import urlopen as uReq\n\nweb_url = 'https://www.flipkart.com/search?q=iphone&otracker=start&as-show=on&as=off'\n\nuClient = uReq(web_url) # uReq -> connection open and stored on variable uClient\npage_html = uClient.read() # variable send to read function then gather all data that send to variable page_html\nuClient.close() # connection is closed\npage_soup = soup (page_html, \"html.parser\") # It's large html file of webpage\n\ncontainers = page_soup.findAll(\"div\", {\"class\": \"_13oc-S\"}) # get from main class from web that hold all the contentainers it also find div tag with the class\n# print (len(containers)) # print the length of the containers\n\n# print(soup.prettify(containers[0]))\n\n\ncontainer = containers[0]\n# print(container.div.img[\"alt\"])\n\n\nprice=container.findAll(\"div\",{\"class\":\"_4921Z t0pPfW\"}) # price tag get from class\nprint(price[0].text)\n\n\n\nratings = container.findAll(\"div\",{\"class\":\"niHOFQ\"}) # tag show the rating from web\nprint(ratings[0].text)\n\nfilename = \"products.csv\" # Creating file\nf = open(filename,\"w\") # Normal convention of f title\n\nheaders= \"Product_Name, Pricing, Ratings\\n\" # CSV have headers, so just created manually that hold all info\nf.write(headers)\n\nfor contrainer in containers: # for loop\n product_name = contrainer.div.img[\"alt\"] # Get product name\n\n price_container = container.findAll(\"div\", {\"class\":\"col col-5-12 _2o7WAb\"}) # set tag price\n price = price_container[0].text.strip() # get price of product\n\n rating_container = container.findAll(\"div\", {\"class\":\"niHOFQ\"}) # set the rating tag\n rating = rating_container[0].text # know the rating of product\n\n # print(\"product_name:\" + product_name)\n # print(\"price:\" + price)\n # print(\"ratings:\" + rating)\n\n #string parsing\n\n trim_price = ''.join(price.split(',')) # splitting the price\n rm_USD = trim_price.split(\"$\")\n add_USD_price = \"USD.\" + rm_USD[1] # price in USD\n split_price = add_USD_price.split('E') # If provide EMI option then need to setup E\n final_price = split_price[0]\n\n split_rating = ratings.split(\" \")\n final_rating = split_rating[0] #\n\n print(product_name.replace(\",\", \"|\") + final_price + \",\" + final_rating + \"\\n\") # replace function set the comma\n f.write(product_name.replace(\",\", \"|\") + \",\" + final_price + \",\" + final_rating + \"\\n\") # concatenating and file save to the folder\n\nf.close()\n","sub_path":"Web-Scraping.py","file_name":"Web-Scraping.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"79799926","text":"\"\"\"updadte bio\n\nRevision ID: 9b3bc308cd93\nRevises: 74cce18ddfd4\nCreate Date: 2021-06-14 18:46:32.558966\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '9b3bc308cd93'\ndown_revision = '74cce18ddfd4'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('pitches', sa.Column('date_posted', sa.DateTime(), nullable=True))\n op.add_column('pitches', sa.Column('category', sa.String(length=255), nullable=False))\n op.drop_index('ix_pitches_categories', table_name='pitches')\n op.create_index(op.f('ix_pitches_category'), 'pitches', ['category'], unique=False)\n op.drop_column('pitches', 'categories')\n op.drop_column('pitches', 'time')\n op.add_column('users', sa.Column('bio', sa.String(length=255), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('users', 'bio')\n op.add_column('pitches', sa.Column('time', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))\n op.add_column('pitches', sa.Column('categories', sa.VARCHAR(length=255), autoincrement=False, nullable=False))\n op.drop_index(op.f('ix_pitches_category'), table_name='pitches')\n op.create_index('ix_pitches_categories', 'pitches', ['categories'], unique=False)\n op.drop_column('pitches', 'category')\n op.drop_column('pitches', 'date_posted')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/9b3bc308cd93_updadte_bio.py","file_name":"9b3bc308cd93_updadte_bio.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"150144690","text":"#i = 6\n#while i>5 and i<19:\n# i+=1\n# print(i)\n\n\n#even numbers between 12 and 20\ni = 12\nwhile i>11 and i<20:\n i+=1\n if i%2==0:\n print(i)","sub_path":"whiie_loop.py","file_name":"whiie_loop.py","file_ext":"py","file_size_in_byte":154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"343507938","text":"#\n# gtkui.py\n#\n# Copyright (C) 2009 Thibault Person \n#\n# Basic plugin template created by:\n# Copyright (C) 2008 Martijn Voncken \n# Copyright (C) 2007-2009 Andrew Resch \n# Copyright (C) 2009 Damien Churchill \n#\n# Deluge is free software.\n#\n# You may redistribute it and/or modify it under the terms of the\n# GNU General Public License, as published by the Free Software\n# Foundation; either version 3 of the License, or (at your option)\n# any later version.\n#\n# deluge is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n# See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with deluge. If not, write to:\n# \tThe Free Software Foundation, Inc.,\n# \t51 Franklin Street, Fifth Floor\n# \tBoston, MA 02110-1301, USA.\n#\n# In addition, as a special exception, the copyright holders give\n# permission to link the code of portions of this program with the OpenSSL\n# library.\n# You must obey the GNU General Public License in all respects for all of\n# the code used other than OpenSSL. If you modify file(s) with this\n# exception, you may extend this exception to your version of the file(s),\n# but you are not obligated to do so. If you do not wish to do so, delete\n# this exception statement from your version. If you delete this exception\n# statement from all source files in the program, then also delete it here.\n#\n\nimport gtk\n\nfrom deluge.log import LOG as log\nfrom deluge.ui.client import client\nfrom deluge.plugins.pluginbase import GtkPluginBase\nimport deluge.component as component\nimport deluge.common\n\nfrom common import get_resource\n\n\nclass TrackerDialog(gtk.Dialog):\n\tdef __init__(self, parent, tracker=\"\", dest=\"\", command=\"\"):\n\t\tgtk.Dialog.__init__(self, \"Tracker rule edit\" , parent, 0,\n\t\t\t(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,\n\t\t\tgtk.STOCK_OK, gtk.RESPONSE_OK))\n\t\tself.set_default_size(400,150)\n\t\tbox = self.get_content_area()\n\t\tvbox = gtk.VBox()\n\n\t\thbox_url = gtk.HBox()\n\t\tlbl_url = gtk.Label(\"Tracker URL*: \")\n\t\tself.txt_url = gtk.Entry()\n\t\tself.set_tracker(tracker)\n\t\tself.txt_url.connect(\"changed\", self.entrychanged)\n\t\thbox_url.pack_start(lbl_url,False, False, 5)\n\t\thbox_url.pack_start(self.txt_url,True, True, 5)\n\t\tvbox.pack_start(hbox_url,False, False, 5)\n\n\t\thbox_dst = gtk.HBox()\n\t\tlbl_dst = gtk.Label(\"Destination folder*: \")\n\t\tself.txt_dst = gtk.Entry()\n\t\tself.set_destination(dest)\n\t\tself.txt_dst.connect(\"changed\", self.entrychanged)\n\t\thbox_dst.pack_start(lbl_dst,False, False, 5)\n\t\thbox_dst.pack_start(self.txt_dst,True, True, 5)\n\t\tvbox.pack_start(hbox_dst,False, False, 5)\n\n\t\thbox_cmd = gtk.HBox()\n\t\tlbl_cmd = gtk.Label(\"Command: \")\n\t\tself.txt_cmd = gtk.Entry()\n\t\tself.set_command(command)\n\t\thbox_cmd.pack_start(lbl_cmd,False, False, 5)\n\t\thbox_cmd.pack_start(self.txt_cmd,True, True, 5)\n\t\tvbox.pack_start(hbox_cmd,False, False, 5)\n\n\t\tbtn = self.get_widget_for_response(gtk.RESPONSE_OK)\n\t\tbtn.set_sensitive(self.entryfilled())\n\n\n\t\tbox.add(vbox)\n\t\tself.show_all()\n\n\n\tdef entryfilled(self):\n\t\treturn (self.txt_url.get_text_length()>0) and (self.txt_dst.get_text_length()>0)\n\n\n\tdef entrychanged(self, entry):\n\t\tbtn = self.get_widget_for_response(gtk.RESPONSE_OK)\n\t\tbtn.set_sensitive (self.entryfilled())\n\n\tdef get_tracker(self):\n\t\treturn self.txt_url.get_text()\n\n\tdef set_tracker(self, tracker):\n\t\tself.txt_url.set_text(tracker)\n\n\n\tdef get_destination(self):\n\t\treturn self.txt_dst.get_text()\n\n\tdef set_destination(self, dst):\n\t\tself.txt_dst.set_text(dst)\n\n\tdef get_command(self):\n\t\treturn self.txt_cmd.get_text()\n\n\tdef set_command(self, cmd):\n\t\tself.txt_cmd.set_text(cmd)\n\n\nclass GtkUI(GtkPluginBase):\n\tdef enable(self):\n\t\tlog.info(\"applying prefs for automove\")\n\n\t\tcomponent.get(\"PluginManager\").register_hook(\"on_apply_prefs\", self.on_apply_prefs)\n\t\tcomponent.get(\"PluginManager\").register_hook(\"on_show_prefs\", self.on_show_prefs)\n\t\tself.load_ui()\n\t\tself.dirty = False\n\n\n\n\n\tdef disable(self):\n\t\tlog.info(\"applying prefs for automove\")\n\t\tcomponent.get(\"Preferences\").remove_page(\"automove\")\n\n\n\n\tdef load_ui(self):\n\t\tmainWindow = gtk.Frame()\n\t\tself.window = mainWindow\n\t\tbtnAdd = gtk.Button(stock=gtk.STOCK_ADD)\n\t\tbtnAdd.connect(\"clicked\", self.on_add_tracker)\n\t\tself.btnEdit = gtk.Button(stock=gtk.STOCK_EDIT)\n\t\tself.btnEdit.connect(\"clicked\", self.on_edit_tracker)\n\t\tself.btnDelete = gtk.Button(stock=gtk.STOCK_DELETE)\n\t\tself.btnDelete.connect(\"clicked\", self.on_delete_tracker)\n\n\t\tvBox = gtk.VBox(homogeneous=False, spacing=6)\n\t\thBox = gtk.HBox(homogeneous=False, spacing=6)\n\n\t\tvBox.pack_start(hBox, False, False, 0)\n\t\thBox.pack_end(self.btnDelete, False, False, 5)\n\t\thBox.pack_end(self.btnEdit, False, False, 5)\n\t\thBox.pack_end(btnAdd, False, False, 5)\n\n\t\tself.liststore= gtk.ListStore (str,str,str);\n\t\tself.treeview = gtk.TreeView(self.liststore)\n\t\t#self.treeview.connect(\"cursor-changed\", self.treeviewselected)\n\t\tcol_url = gtk.TreeViewColumn('Tracker')\n\t\tcol_dst = gtk.TreeViewColumn('Destination')\n\t\tcol_cmd = gtk.TreeViewColumn('Command')\n\n\t\t# add columns to treeview\n\t\tself.treeview.append_column(col_url)\n\t\tself.treeview.append_column(col_dst)\n\t\tself.treeview.append_column(col_cmd)\n\t\tcell_url = gtk.CellRendererText()\n\t\tcell_url.editable = True\n\t\tcol_url.pack_start(cell_url, True)\n\t\tcol_url.add_attribute(cell_url, \"text\", 0)\n\t\tcell_dst = gtk.CellRendererText()\n\t\tcol_dst.pack_start(cell_dst, True)\n\t\tcol_dst.add_attribute(cell_dst, \"text\", 1)\n\t\tcell_cmd = gtk.CellRendererText()\n\t\tcol_cmd.pack_start(cell_cmd, True)\n\t\tcol_cmd.add_attribute(cell_cmd, \"text\", 2)\n\n\t\tvBox.pack_end(self.treeview)\n\t\tmainWindow.add(vBox)\n\t\tmainWindow.show_all()\n\t\tcomponent.get(\"Preferences\").add_page(\"automove\", self.window)\n\n\tdef on_add_tracker(self, widget):\n\t\tdialog = TrackerDialog(None)\n\t\tresponse = dialog.run()\n\t\tif response == gtk.RESPONSE_OK:\n\t\t\tself.liststore.append(row=[dialog.get_tracker(),\n\t\t\t\tdialog.get_destination(), dialog.get_command()])\n\t\t\tself.dirty = True\n\t\tdialog.destroy()\n\n\tdef on_edit_tracker(self, widget):\n\t\tmodel, it = self.treeview.get_selection().get_selected()\n\t\tif it:\n\t\t\tu= model.get_value(it, 0)\n\t\t\td= model.get_value(it, 1)\n\t\t\tc= model.get_value(it, 2)\n\t\t\tdialog = TrackerDialog(None, u, d, c)\n\t\t\tresponse = dialog.run()\n\t\t\tif response == gtk.RESPONSE_OK:\n\t\t\t\tself.liststore.set_value(it, 0, dialog.get_tracker())\n\t\t\t\tself.dirty = True\n\t\t\tdialog.destroy()\n\n\n\tdef on_delete_tracker(self, widget):\n\t\tmodel, it = self.treeview.get_selection().get_selected()\n\t\tif it:\n\t\t\tmodel.remove(it)\n\n\tdef populate_list(self):\n\t\tif self.dirty :\n\t\t\tlog.info(\"List in dirty state, don't reload prefs\")\n\t\t\treturn\n\t\tself.liststore.clear()\n\t\tfor t in self.config[\"trackers\"]:\n\t\t\tself.liststore.append(row=[ t[\"url\"], t[\"dst\"], t[\"cmd\"] ])\n\n\tdef on_apply_prefs(self):\n\t\tlog.info(\"applying prefs for automove\")\n\t\t#dump the list\n\t\ttl = []\n\t\tfor row in self.liststore:\n\t\t\ttl.append({\"url\": row[0], \"dst\": row[1], \"cmd\": row[2] })\n\n\t\tself.config[\"trackers\"] = tl\n\t\tclient.automove.set_config(self.config)\n\t\tself.dirty = False\n\n\tdef on_show_prefs(self):\n\t\tclient.automove.get_config().addCallback(self.cb_get_config)\n\t\tself.populate_list()\n\n\tdef cb_get_config(self, config):\n\t\t\"callback for on show_prefs\"\n\t\tself.config = config\n","sub_path":"automove/gtkui.py","file_name":"gtkui.py","file_ext":"py","file_size_in_byte":7368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"205571435","text":"# coding: utf-8\n\n\"\"\"Collection of Functions to convert API responses into python objects\nand vice versa.\n\"\"\"\nimport base64\nfrom functools import wraps\nfrom inspect import signature\nimport zlib\n\n\nimport pandas as pd\nimport pyarrow as pa\n\n\nfrom solarforecastarbiter import datamodel\n\n\ndef _dataframe_to_json(payload_df):\n payload_df.index.name = 'timestamp'\n json_vals = payload_df.tz_convert(\"UTC\").reset_index().to_json(\n orient=\"records\", date_format='iso', date_unit='s')\n return '{\"values\":' + json_vals + '}'\n\n\ndef observation_df_to_json_payload(\n observation_df, default_quality_flag=None):\n \"\"\"Extracts a variable from an observation DataFrame and formats it\n into a JSON payload for posting to the Solar Forecast Arbiter API.\n\n Parameters\n ----------\n observation_df : DataFrame\n Dataframe of observation data. Must contain a tz-aware DateTimeIndex\n and a 'value' column. May contain a column of data quality\n flags labeled 'quality_flag'.\n default_quality_flag : int\n If 'quality_flag' is not a column, the quality flag for each row is\n set to this value.\n\n Returns\n -------\n string\n SolarForecastArbiter API JSON payload for posting to the observation\n endpoint. See Notes section for example.\n\n Notes\n -----\n Function returns an object in the following format:\n\n .. code::\n\n {\n 'values': [\n {\n “timestamp”: “2018-11-22T12:01:48Z”, # ISO 8601 datetime in UTC\n “value”: 10.23, # floating point value of observation\n “quality_flag”: 0\n },...\n ]\n }\n\n Raises\n ------\n KeyError\n When 'value' is missing from the columns or 'quality_flag'\n is missing and default_quality_flag is None\n \"\"\"\n if default_quality_flag is None:\n payload_df = observation_df[['value', 'quality_flag']]\n else:\n payload_df = observation_df[['value']]\n payload_df['quality_flag'] = int(default_quality_flag)\n return _dataframe_to_json(payload_df)\n\n\ndef forecast_object_to_json(forecast_series):\n \"\"\"\n Converts a forecast Series to JSON to post to the\n SolarForecastArbiter API.\n\n Parameters\n ----------\n forecast_series : pandas.Series\n The series that contains the forecast values with a\n datetime index.\n\n Returns\n -------\n string\n The JSON encoded forecast values dict\n \"\"\"\n payload_df = forecast_series.to_frame('value')\n return _dataframe_to_json(payload_df)\n\n\ndef _json_to_dataframe(json_payload):\n # in the future, might worry about reading the response in chunks\n # to stream the data and avoid having it all in memory at once,\n # but 30 days of 1 minute data is probably ~4 MB of text. A better\n # approach would probably be to switch to a binary format.\n vals = json_payload['values']\n if len(vals) == 0:\n df = pd.DataFrame([], columns=['value', 'quality_flag'],\n index=pd.DatetimeIndex([], name='timestamp'))\n else:\n df = pd.DataFrame.from_dict(json_payload['values'])\n df.index = pd.to_datetime(df['timestamp'], utc=True,\n infer_datetime_format=True)\n return df\n\n\ndef json_payload_to_observation_df(json_payload):\n \"\"\"\n Convert the JSON payload dict as returned by the SolarForecastArbiter API\n observations/values endpoint into a DataFrame\n\n Parameters\n ----------\n json_payload : dict\n Dictionary as returned by the API with a \"values\" key which is a list\n of dicts like {'timestamp': , 'value': ,\n 'quality_flag': }\n\n Returns\n -------\n pandas.DataFrame\n With a tz-aware DatetimeIndex and ['value', 'quality_flag'] columns\n \"\"\"\n df = _json_to_dataframe(json_payload)\n return df[['value', 'quality_flag']]\n\n\ndef json_payload_to_forecast_series(json_payload):\n \"\"\"\n Convert the JSON payload dict as returned by the SolarForecastArbiter API\n forecasts/values endpoing into a Series\n\n Parameters\n ----------\n json_payload : dict\n Dictionary as returned by the API with a \"values\" key which is a list\n of dicts like {'timestamp': , 'value': }\n\n Returns\n -------\n pandas.Series\n With a tz-aware DatetimeIndex\n \"\"\"\n\n df = _json_to_dataframe(json_payload)\n return df['value']\n\n\ndef adjust_start_end_for_interval_label(interval_label, start, end,\n limit_instant=False):\n \"\"\"\n Adjusts the start and end times depending on the interval_label.\n\n Parameters\n ----------\n interval_label : str or None\n The interval label for the the object the data represents\n start : pandas.Timestamp\n Start time to restrict data to\n end : pandas.Timestamp\n End time to restrict data to\n limit_instant : boolean\n If true, an interval label of 'instant' will remove a nanosecond\n from end to ensure forecasts do not overlap. If False, instant\n returns start, end unmodified\n\n Returns\n -------\n start, end\n Return the adjusted start and end\n\n Raises\n ------\n ValueError\n If an invalid interval_label is given\n\n Examples\n --------\n .. testsetup::\n\n from solarforecastarbiter.io.utils import *\n\n Define input start/end:\n\n >>> start = pd.Timestamp('20190101 1200Z')\n >>> end = pd.Timestamp('20190101 1300Z')\n\n Beginning:\n\n >>> adjust_start_end_for_interval_label('beginning', start, end)\n (Timestamp('2019-01-01 12:00:00+0000', tz='UTC'), Timestamp('2019-01-01 12:59:59.999999999+0000', tz='UTC'))\n\n Ending:\n\n >>> adjust_start_end_for_interval_label('ending', start, end)\n (Timestamp('2019-01-01 12:00:00.000000001+0000', tz='UTC'), Timestamp('2019-01-01 13:00:00+0000', tz='UTC'))\n\n Instantaneous:\n\n >>> adjust_start_end_for_interval_label('instant', start, end)\n (Timestamp('2019-01-01 12:00:00+0000', tz='UTC'), Timestamp('2019-01-01 13:00:00+0000', tz='UTC'))\n\n >>> adjust_start_end_for_interval_label('instant', start, end,\n ... limit_instant=True)\n (Timestamp('2019-01-01 12:00:00+0000', tz='UTC'), Timestamp('2019-01-01 12:59:59.999999999+0000', tz='UTC'))\n\n \"\"\" # NOQA\n\n if (\n interval_label is not None and\n interval_label not in ('instant', 'beginning', 'ending')\n ):\n raise ValueError('Invalid interval_label')\n\n if (\n interval_label == 'beginning' or\n (interval_label == 'instant' and limit_instant)\n ):\n end -= pd.Timedelta(1, unit='nano')\n elif interval_label == 'ending':\n start += pd.Timedelta(1, unit='nano')\n return start, end\n\n\ndef adjust_timeseries_for_interval_label(data, interval_label, start, end):\n \"\"\"\n Adjusts the index of the data depending on the interval_label, start,\n and end. Will always return the data located between start, end.\n\n Parameters\n ----------\n data : pandas.Series or pandas.DataFrame\n The data with a localized DatetimeIndex\n interval_label : str or None\n The interval label for the the object the data represents\n start : pandas.Timestamp\n Start time to restrict data to\n end : pandas.Timestamp\n End time to restrict data to\n\n Returns\n -------\n pandas.Series or pandas.DataFrame\n Return data between start and end, in/excluding the endpoints\n depending on interval_label\n\n Raises\n ------\n ValueError\n If an invalid interval_label is given or data is not localized.\n \"\"\"\n start, end = adjust_start_end_for_interval_label(interval_label, start,\n end)\n data = data.sort_index(axis=0)\n # pandas >= 0.25.1 requires start, end to have same tzinfo.\n # unexpected behavior when data is not localized, so prevent that\n if data.empty:\n return data\n if data.index.tzinfo is None:\n raise ValueError('data must be localized')\n start = start.tz_convert(data.index.tzinfo)\n end = end.tz_convert(data.index.tzinfo)\n return data.loc[start:end]\n\n\ndef serialize_data(values):\n serialized_buf = pa.serialize(values).to_buffer()\n compressed_bytes = zlib.compress(serialized_buf)\n encoded = base64.b64encode(compressed_bytes)\n return encoded.decode('ascii') # bytes to str\n\n\ndef deserialize_data(data):\n compressed = base64.b64decode(data)\n serialized = zlib.decompress(compressed)\n values = pa.deserialize(serialized)\n return values\n\n\ndef serialize_raw_report(raw):\n bundle = {'metrics': raw.metrics,\n 'template': raw.template,\n 'metadata': raw.metadata.to_dict(),\n 'processed_forecasts_observations': [\n pfx.to_dict() for pfx in\n raw.processed_forecasts_observations]}\n return serialize_data(bundle)\n\n\ndef deserialize_raw_report(encoded_bundle, version=0):\n bundle = deserialize_data(encoded_bundle)\n return datamodel.RawReport.from_dict(bundle)\n\n\nclass HiddenToken:\n \"\"\"\n Obscure the representation of the input string `token` to avoid saving\n or displaying access tokens in logs.\n \"\"\"\n def __init__(self, token):\n self.token = str(token) # make sure it isn't a localproxy\n\n def __repr__(self):\n return '****ACCESS*TOKEN****'\n\n\ndef ensure_timestamps(*time_args):\n \"\"\"\n Decorator that converts the specified time arguments of the wrapped\n function to pandas.Timestamp objects\n\n Parameters\n ----------\n strings\n Function arguments to convert to pandas.Timestamp before\n executing function\n\n Raises\n ------\n ValueError\n If any of time_args cannot be converted to pandas.Timestamp\n\n Examples\n --------\n .. testsetup::\n\n import datetime as dt\n from solarforecastarbiter.io.utils import *\n\n >>> @ensure_timestamps('start', 'end')\n ... def get_values(start, end, other_arg):\n ... # do stuff with start, end assumed to be pandas.Timestamps\n ... if isinstance(start, pd.Timestamp):\n ... return True\n\n >>> get_values('2019-01-01T00:00Z', dt.datetime(2019, 1, 2, 12), 'other')\n True\n \"\"\"\n def decorator(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n sig = signature(f)\n inds = {k: None for k in time_args}\n for i, k in enumerate(sig.parameters.keys()):\n if k in inds:\n inds[k] = i\n nargs = list(args)\n for k, ind in inds.items():\n if k in kwargs:\n kwargs[k] = pd.Timestamp(kwargs[k])\n elif ind is not None:\n nargs[ind] = pd.Timestamp(args[ind])\n return f(*nargs, **kwargs)\n return wrapper\n return decorator\n","sub_path":"solarforecastarbiter/io/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"409038625","text":"#!/usr/bin/env python\n# Display a runtext with double-buffering.\nfrom samplebase import SampleBase\nfrom rgbmatrix import graphics\nfrom bs4 import BeautifulSoup\nfrom operator import itemgetter\nimport datetime\nimport time\nimport re\nimport urllib3\n\ndef station_to_num(destination):\n station_name_dictionary = {\"北浦和\" : 0, \"南与野\" : 1}\n station_name = re.findall(\"北浦和|南与野\", destination)\n return station_name_dictionary[station_name[0]]\n\ndef num_to_station(num):\n station_name_dictionary = {0: \"北浦和\", 1: \"南与野\"}\n return station_name_dictionary[num]\n\nclass RunText(SampleBase):\n def __init__(self, *args, **kwargs):\n super(RunText, self).__init__(*args, **kwargs)\n self.parser.add_argument(\"-t\", \"--text\", help=\"The text to scroll on the RGB LED panel\", default=\"Hello world!\")\n\n def run(self):\n start = time.time()\n print(start)\n offscreen_canvas = self.matrix.CreateFrameCanvas()\n #Number Font 7x14 font\n time_font = graphics.Font()\n time_font.LoadFont(\"../../fonts/7x14.bdf\")\n #Kanji&Hiragana Font <= Incomplete version\n kh_font = graphics.Font()\n kh_font.LoadFont(\"../../fonts/KH-Dot-kagurazakautf-12.bdf\")\n #Schedule Time Color : Chartreuse\n sch_textColor = graphics.Color(127, 220, 0)\n #Expected Time Color : Dark Orange\n exp_textColor = graphics.Color(230, 140, 0)\n #Destination Color : Yellow\n des_textColor = graphics.Color(200, 200, 0)\n pos = offscreen_canvas.width\n # Prepare\n http = urllib3.PoolManager()\n e_schedule = []\n e_expect = []\n e_delay = []\n e_destination = []\n p_schedule = []\n p_expect = []\n p_delay = []\n p_destination = []\n print(\"Into While Loop\")\n print(time.time()-start)\n while True:\n start = time.time()\n schedule = []\n expect = []\n destination = []\n company = []\n error = []\n e_schedule = []\n e_expect = []\n e_delay = []\n e_destination = []\n print(\"array prepared\")\n print(time.time()-start)\n # The same action as seibu.\n try:\n kokusai = http.request(\"GET\",\n \"\"\"http://www.kokusaibus.com/blsys/loca?VID=ldt&EID=nt&DSMK=15&DK=f_2gi_krib2u-f_2gi_kriau3-f_2gi_kriati-f_2gi_krib26-f_2gi_kriaub-f_2gi_1d0-f_2gi_krib06\"\"\")\n kokusai_soup = BeautifulSoup(kokusai.data.decode('Shift-JIS'), \"lxml\")\n kokusai_div = kokusai_soup.find(\"div\", {\"id\": \"mainContents\"})\n kokusai_table = kokusai_div.find(\"table\",\n {\"border\": \"0\", \"cellpadding\": \"0\", \"cellspacing\": \"1\",\n \"class\": \"R_Table\",\n \"width\": \"650\"})\n kokusai_tbody = kokusai_table.find(\"tbody\")\n kokusai_td = kokusai_tbody.find_all(\"td\") \n except AttributeError:\n # This error happens when bus is out of service.\n error.append(\"Kokusai Kougyou bus is out of service\")\n \n except (urllib3.exceptions.MaxRetryError, urllib3.exceptions.NewConnectionError):\n # These error happen when device is disconnected with internet.\n error.append(\"Internet disconnected\")\n \n except Exception as e:\n # This except syntax covers all errors except fot AttirbuteErrpr and (urllib3.exceptions...Error).\n error.append([\"In\" + company + \"Error\", type(e), str(e.args), str(e)])\n \n else:\n for i in range(0, len(kokusai_td), 6):\n schedule.append(kokusai_td[i].get_text())\n expect.append(kokusai_td[i + 1].get_text())\n destination.append(station_to_num(kokusai_td[i + 3].get_text()))\n company.append(\"K\")\n error.append(\"None\")\n print(\"Finished to fetch kokusai bus data\")\n print(time.time()-start)\n start = time.time()\n # Try to fetch data of running buses currently from seibu bus.\n try:\n seibu = http.request(\"GET\",\n \"\"\"http://loca.seibubus.co.jp/seibuloca/navi?VID=ldt&EID=nt&UKD=1&DSMK=120179&DK=3lbj_3e0_1705mm-3lbj_3e0_1705ji-3lbj_3e0_1705ge\"\"\")\n seibu_soup = BeautifulSoup(seibu.data, \"lxml\")\n seibu_table = seibu_soup.find(\"table\",\n {\"width\": \"760\", \"cellpadding\": \"0\", \"cellspacing\": \"0\", \"class\": \"src-dia\"})\n seibu_td = seibu_table.find_all(\"td\")\n \n # If try section got this error, execute this excepts.\n except AttributeError:\n # This error happens when bus is out of service.\n error.append(\"Seibu bus is out of service\")\n \n # If try section got error, executes this excepts.\n except (urllib3.exceptions.MaxRetryError, urllib3.exceptions.NewConnectionError):\n # These error happen when device is disconnected with internet.\n error.append(\"Internet disconnected\")\n \n # If try section got unexpected error, record the error.\n except Exception as e:\n # This except syntax covers all errors except fot AttirbuteErrpr and (urllib3.exceptions...Error).\n error.append([\"In\" + company + \"Error\", type(e), str(e.args), str(e)])\n # If try section got no error, execute following section.\n else:\n # print(seibu_td)\n for i in range(0, len(seibu_td), 7):\n schedule.append(seibu_td[i].get_text())\n expect.append(seibu_td[i + 1].get_text())\n destination.append(station_to_num(seibu_td[i + 5].get_text()))\n company.append(\"S\")\n error.append(\"None\")\n \n for i in range(0, len(schedule)):\n schedule_string = re.findall(\"\\d[0-9]\", schedule[i])\n expect_string = re.findall(\"\\d[0-9]\", expect[i])\n if len(expect_string) == 0:\n schedule[i] = datetime.time(hour=int(schedule_string[0]), minute=int(schedule_string[1]), second=0,\n microsecond=0)\n expect[i] = None\n else:\n schedule[i] = datetime.time(hour=int(schedule_string[0]), minute=int(schedule_string[1]), second=0,\n microsecond=0)\n expect[i] = datetime.time(hour=int(expect_string[0]), minute=int(expect_string[1]), second=0, microsecond=0)\n print(\"Finished to fetch seibu bus data\")\n print(time.time()-start)\n start = time.time()\n try:\n all_array = list(zip(schedule, expect, destination, company, error))\n all_array.sort(key=itemgetter(0))\n schedule, expect, destination, company, error = zip(*all_array)\n except ValueError:\n pass\n print(\"Finished to sort data\")\n print(time.time()-start)\n start = time.time()\n # For faster data procession in extracting data, this program obays following rule.\n # 1. destination_name\n # Kitaurawa : 0 (K, in variable name)\n # Minamiyono : 1 (M, in variable name)\n # 2. bus_company_name\n # Kokusaikougyou : K\n # Seibu : S\n fdd0t_or_f = 0 in destination\n fdd1t_or_f = 1 in destination\n \n if fdd0t_or_f and fdd1t_or_f:\n fdd0 = destination.index(0)\n fdd1 = destination.index(1)\n if fdd0 < fdd1:\n e_schedule.append(schedule[fdd0])\n e_expect.append(expect[fdd0])\n e_destination.append(num_to_station(destination[fdd0]))\n e_schedule.append(schedule[fdd1])\n e_expect.append(expect[fdd1])\n e_destination.append(num_to_station(destination[fdd1]))\n else:\n e_schedule.append(schedule[fdd1])\n e_expect.append(expect[fdd1])\n e_destination.append(num_to_station(destination[fdd1]))\n e_schedule.append(schedule[fdd0])\n e_expect.append(expect[fdd0])\n e_destination.append(num_to_station(destination[fdd0]))\n else:\n for i in range(0, 2):\n e_schedule.append(schedule[i])\n e_expect.append(expect[i])\n e_destination.append(num_to_station(destination[i]))\n TF_schedule = (p_schedule == e_schedule)\n TF_expect = (p_expect == e_expect)\n TF_destination = (p_destination == e_destination)\n if all([TF_destination, TF_expect, TF_schedule]):\n pass\n else:\n p_schedule = e_schedule\n p_expect = e_expect\n p_destination = e_destination\n p_delay = e_delay\n time.sleep(5)\n #schedule\n sche1 = e_schedule[0].strftime(\"%H:%M\") + \" \"\n sche2 = e_schedule[1].strftime(\"%H:%M\") + \" \"\n #expectation\n expt1 = e_expect[0].strftime(\"%H:%M\") + \" \"\n expt2 = e_expect[1].strftime(\"%H:%M\") + \" \"\n #destination\n dest1 = str(e_destination[0]) + \" \"\n dest2 = str(e_destination[1]) + \" \"\n offscreen_canvas.Clear()\n for i in range(0,60):\n len_dest1 = graphics.DrawText(offscreen_canvas, kh_font, 0, 14, des_textColor, dest1)\n len_dest2 = graphics.DrawText(offscreen_canvas, kh_font, 0, 28, des_textColor, dest2)\n pos -= 1\n time.sleep(3.0)\n offscreen_canvas = self.matrix.SwapOnVSync(offscreen_canvas)\n TORF = True\n time.sleep(1.0)\n while TORF:\n offscreen_canvas.Clear()\n #len1 = graphics.DrawText(offscreen_canvas, font, pos, 14, textColor, my_text)\n #len2 = graphics.DrawText(offscreen_canvas, font, pos, 28, textColor, my_text)\n len_sche1 = graphics.DrawText(offscreen_canvas, time_font, pos+len_dest1, 14, sch_textColor, sche1)\n len_sche2 = graphics.DrawText(offscreen_canvas, time_font, pos+len_dest2, 28, sch_textColor, sche2)\n len_expt1 = graphics.DrawText(offscreen_canvas, time_font, pos+len_dest1+len_sche1, 14, exp_textColor, expt1)\n len_expt2 = graphics.DrawText(offscreen_canvas, time_font, pos+len_dest2+len_sche2, 28, exp_textColor, expt2)\n pos -= 1\n if (pos + len_sche1 + len_expt1 + len_dest1 < 0 and pos + len_sche2 + len_expt2 + len_dest2 < 0):\n pos = offscreen_canvas.width\n time.sleep(0.08)\n offscreen_canvas = self.matrix.SwapOnVSync(offscreen_canvas)\n TORF = (pos + len_sche1 + len_expt1 + len_dest1 > 0) or (pos + len_sche2 + len_expt2 + len_dest2 > 0)\n time.sleep(2)\n\n# Main function\nif __name__ == \"__main__\":\n run_text = RunText()\n if (not run_text.process()):\n run_text.print_help()\n","sub_path":"runtext1.py","file_name":"runtext1.py","file_ext":"py","file_size_in_byte":11680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"459538288","text":"import random\r\nimport items\r\n\r\nclass root_of_monsters:\r\n def item_drop(self):\r\n rand = random.randrange(1,1000,1)\r\n dropped_list=[]\r\n \r\n for queue in self.drop_list:\r\n if rand%queue[0] == 0:\r\n dropped_list.append(queue)\r\n return dropped_list\r\n\r\nclass duck(root_of_monsters):\r\n def __init__(self):\r\n self.name = \"duck\"\r\n self.health = 45\r\n self.power = 4\r\n self.monster_level = 1\r\n\r\n #drop_rate, type, name\r\n self.drop_list = [\r\n [5, 'food', 'weak_meat'],\r\n [13, 'coins', 'duck_coin'],\r\n [47, 'wearable', 'old_maul'],\r\n [49, 'wearable', 'old_staff'],\r\n [3, 'junk', 'dirth']\r\n ]\r\n\r\n\r\nclass wild_bear(root_of_monsters):\r\n def __init__(self):\r\n self.name = \"wild_bear\"\r\n self.health = 60\r\n self.power = 6\r\n self.monster_level = 1\r\n\r\n #drop_rate, type, name\r\n self.drop_list = [\r\n [5, 'food', 'weak_meat'],\r\n [13, 'coins', 'wild_coin'],\r\n [43, 'wearable', 'old_maul'],\r\n [41, 'wearable', 'old_staff'],\r\n [3, 'junk', 'dirth'],\r\n [4, 'elixir', 'weak_potion']\r\n ]\r\n\r\nfirst = duck()\r\nprint(first.item_drop())\r\n","sub_path":"monsters.py","file_name":"monsters.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"475709635","text":"import random\n\n\ndef splitDataset(dataset: list) -> (list, list):\n random.shuffle(dataset)\n div = int(len(dataset) * 0.7)\n return dataset[:div], dataset[div:]\n\n\ndef getProbByKeyValue(dataset, key, value):\n count = 0\n for data in dataset:\n if data[key] == value:\n count += 1\n return count / len(dataset)\n\n\ndef getProbPrevKeyValue(dataset, key1, value1, key2, value2):\n # get P(key2|key1)\n count1 = 0\n count2 = 0\n for data in dataset:\n if data[key1] > value1:\n count1 += 1\n if data[key2] > value2:\n count2 += 1\n return count2 / count1\n\n\nif __name__ == \"__main__\":\n with open('./src/others/titanic.dat', 'r') as f:\n dataset = [\n dict(zip(\n ['Class', 'Age', 'Sex', 'Survived'],\n [float(x) for x in line.split(',')]\n )) for line in f if '@' not in line\n ]\n\n for i in range(10):\n training, testing = splitDataset(dataset)\n prob_y = getProbByKeyValue(training, 'Survived', 1)\n prob_n = getProbByKeyValue(training, 'Survived', -1)\n for test in testing:\n for key, value in test:\n prob_y *= getProbPrevKeyValue(training, )\n","sub_path":"src/others/native-bayes.py","file_name":"native-bayes.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"188136093","text":"#! python \n# @Time : 17-9-26\n# @Author : kay\n# @File : eva_template_standard.py\n# @E-mail : 861186267@qq.com\n# @Function:\n\nfrom glob import iglob\nimport sys\n\nsys.path.append('../')\nsys.dont_write_bytecode = True\n\nimport datetime\nfrom utils.utils import *\nfrom model.ori_bilstm import ExtractorLSTM\nfrom tensorflow.python.platform import flags\nfrom decoder.ctc_beam_search import *\nfrom decoder.edit_distance import *\nfrom model.rew_bilstm import *\n\n\nnp.set_printoptions(threshold=10000000000, linewidth=10000000000000)\n\nflags.DEFINE_string('utter_dataset', 'utterances', 'set the template dataset')\n\nflags.DEFINE_string('level', 'phn', 'set the task level, phn, cha, or seq2seq, seq2seq will be supported soon')\nflags.DEFINE_string('model', 'ExtractorLSTM', 'set the model to use, DBiRNN, BiRNN, ResNet..')\n\nflags.DEFINE_integer('batch_size', 1, 'set the batch size')\nflags.DEFINE_integer('num_hidden', 128, 'set the hidden size of rnn cell')\nflags.DEFINE_integer('num_feature', 40, 'set the size of input feature')\nflags.DEFINE_integer('num_classes', 70, 'set the number of output classes')\n\nflags.DEFINE_string('datadir', '/home/kay/Desktop/ctc/resource/code/python/data', 'set the pos root directory')\nflags.DEFINE_string('logdir', '/home/kay/Desktop/ctc/resource/code/python/data', 'set the log directory')\n\nFLAGS = flags.FLAGS\n\nutter_dataset = FLAGS.utter_dataset\nlevel = FLAGS.level\nmodel_fn = ExtractorLSTM\n\nbatch_size = FLAGS.batch_size\nnum_hidden = FLAGS.num_hidden\nnum_feature = FLAGS.num_feature\nnum_classes = FLAGS.num_classes\ndatadir = FLAGS.datadir\n\nlogdir = FLAGS.logdir\nsavedir = os.path.join(logdir, level, 'save')\nresultdir = os.path.join(logdir, level, 'result')\nloggingdir = os.path.join(logdir, level, 'logging')\ncheck_path_exists([logdir, savedir, resultdir, loggingdir])\n\nlogfile = os.path.join(loggingdir, str(datetime.datetime.strftime(datetime.datetime.now(),\n '%Y-%m-%d %H:%M:%S') + '.txt').replace(' ',\n '').replace(\n '/', ''))\n\n\ndef get_templates():\n \"\"\"\n function: get templates saved by users\n :return: templates\n \"\"\"\n template_dir = os.path.join(datadir, level, 'templates_standard')\n templates = []\n for index, temp_path in enumerate(iglob(os.path.join(template_dir, '**/**.npy'), recursive=True)):\n temp = np.load(temp_path)\n templates.append(temp)\n\n return templates\n\n\ndef get_utterances(datadir, level, utter_dataset):\n \"\"\"\n function: get utterances going to test\n :param datadir: the directory of utterances\n :param level: the default is phn\n :param utter_dataset: the subdirectory of utterances \n :return: the path list of utterances feature and labels\n \"\"\"\n feature_dirs = []\n label_dirs = []\n for idx, wav_path in enumerate(\n iglob(os.path.join(datadir, level, utter_dataset, 'feature', '**/**.npy'), recursive=True)):\n feature_dirs.append(wav_path)\n lab_path = wav_path.replace('feature', 'label')\n label_dirs.append(lab_path)\n\n return os.path.join(datadir, level, utter_dataset, 'feature', '**/**.npy'), os.path.join(datadir, level,\n utter_dataset, 'feature',\n '**/**.npy')\n\n\nclass Runner(object):\n def _default_configs(self):\n return {'model_fn': model_fn,\n 'batch_size': batch_size,\n 'num_hidden': num_hidden,\n 'num_feature': num_feature,\n 'num_classes': num_classes}\n\n def run(self):\n # load pos\n args_dict = self._default_configs()\n args = dotdict(args_dict)\n model = model_fn(args, 1)\n\n feature_dir = os.path.join(datadir, level, utter_dataset, 'feature')\n label_dir = os.path.join(datadir, level, utter_dataset, 'label')\n\n print('feature_dir:', feature_dir)\n print('label_dir:', label_dir)\n\n batchedData, maxTimeSteps, totalN = load_batched_data(feature_dir, label_dir, batch_size, level)\n print('len(batchedData):', len(batchedData))\n\n feva_result = os.path.join(resultdir, 'eva_utter.txt')\n if os.path.exists(feva_result):\n os.remove(feva_result)\n\n passcount = 0\n standard = 0\n phnslist = get_phns_list(num_classes)\n\n with tf.Session(graph=model.graph) as sess:\n for batch in batchedData:\n batchInputs, batchTargetSparse, batchSeqLengths = batch\n\n # params_path = os.path.join(os.getcwd(), '../', 'parameter', 'ctc_parameters.txt')\n # print('params_path:', params_path)\n # fparams = open(params_path, 'r')\n # params = []\n # for param in fparams.readlines():\n # print('param:', np.shape(param))\n # params.append(param)\n\n ckpt = tf.train.get_checkpoint_state(savedir)\n model.saver.restore(sess, ckpt.model_checkpoint_path)\n\n params = sess.run(model.var_trainable_op)\n logits2d = QbyENetwork(batchInputs, params, num_hidden, num_classes)\n\n beam_result_log = ctc_beam_search_decoder_log(\n probs_seq=logits2d,\n beam_size=1,\n vocabulary=phnslist,\n blank_id=len(phnslist),\n cutoff_prob=1.0)\n\n print(beam_result_log)\n\n pres_ = [int(item) for item in beam_result_log[0][1].split('_')[1:]]\n pres_list = list(pres_)\n\n print('pres_:', pres_)\n print('pres_list:', pres_list)\n\n spos = 0 # start pos remove silence\n epos = len(pres_list) # end pos remove silence\n if epos == 0:\n print('pres_list is null')\n continue\n\n if int(pres_list[0]) == 0:\n spos = 1\n if int(pres_list[epos - 1]) == 0:\n epos -= 1\n\n pre_ori = pres_[spos:epos]\n pre_list_ori = pres_list[spos:epos]\n pre_len = len(pre_list_ori)\n\n whone = 0\n tolerate = 0.35 # the toleration degree of length of prediction\n err = 0\n success = False\n templates = get_templates()\n for tag, temp in enumerate(templates):\n print('passcount:', passcount)\n # for temp in templates:\n whone = tag\n\n temp_ = temp\n temp_list = temp\n\n spos_ = 0 # start pos remove silence\n epos_ = len(temp_list) # end pos remove silence\n\n print('spos_:', spos_)\n print('epos_:', epos_)\n print('temp_list:', temp_list)\n\n if int(temp_list[0]) == 0:\n spos_ = 1\n\n if int(temp_list[epos_ - 1]) == 0:\n epos_ -= 1\n\n tmp_ori = temp_[spos_:epos_]\n tmp_list_ori = temp_list[spos_:epos_]\n tmp_len = len(tmp_list_ori)\n\n # standard is the threshold\n tole_len = np.ceil(tmp_len / 2) - 1\n standard = tole_len / tmp_len # + 0.001\n\n # the prediction is far shorter than template over the toleration\n if pre_len <= tmp_len * tolerate:\n continue\n\n # the predication is a bit shorter than template under the toleration\n elif pre_len <= tmp_len:\n err = edit_distance(pre_ori, tmp_ori)\n if err < standard:\n success = True\n passcount += 1\n break\n else:\n continue\n\n # the prediction is longer than template\n else:\n # whether target is in prediction or not\n if (''.join(map(repr, tmp_list_ori)) in ''.join(map(repr, pre_list_ori))):\n passcount += 1\n break\n\n else:\n # find the same element in prediction and template, then do comparison\n for i, atom in enumerate(tmp_list_ori):\n try:\n idx = pre_list_ori.index(atom)\n start = idx - i\n if start < 0:\n start = 0\n\n end = idx + tmp_len - i\n if end > pre_len:\n end = pre_len\n\n err = edit_distance(pres_[start + 1:end + 1], temp_[1:tmp_len + 1])\n\n if err < standard:\n success = True\n passcount += 1\n break\n else:\n continue\n\n except ValueError:\n continue\n\n if success:\n break\n\n with open(feva_result, 'a') as result:\n result.write(output_to_sequence(templates[whone], type=level) + '\\n')\n result.write(output_to_sequence(pres_list, type=level) + '\\n')\n result.write('standard:' + str(standard) + ', pErr:' + str(err) + ' ' + str(success) + '\\n')\n result.write('\\n')\n result.close()\n\n sess.close()\n print('passcount:', passcount)\n print('totalN:', totalN)\n print('percent:', passcount / totalN)\n\n\nif __name__ == '__main__':\n runner = Runner()\n runner.run()\n","sub_path":"code/evaluation/eva_template_standard.py","file_name":"eva_template_standard.py","file_ext":"py","file_size_in_byte":10345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"583828888","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\nimport pandas as pd\nimport numpy as np\nimport gensim\nimport codecs\n\n\n# In[64]:\n\n\nfrom sklearn import svm\nfrom sklearn import metrics\nfrom sklearn.externals import joblib\n\n\n# In[6]:\n\n\ndef getWordVecs(wordList,model):\n vecs = []\n for word in wordList:\n word = word.replace('\\n','')\n try:\n vecs.append(model[word])\n except KeyError:\n continue\n return np.array(vecs,dtype='float')\n\ndef buildVecs(data,model):\n new_vec = []\n for line in data:\n vecs = getWordVecs(line,model)\n if len(vecs) > 0:\n vecsArray = sum(np.array(vecs)) / len(vecs)\n new_vec.append(vecsArray)\n return new_vec\n\n\n# In[35]:\n\n\ndf = pd.read_csv('./data.csv')\ncontent = df['content'].tolist()\nsents = [eval(cont) for cont in content]\nprint(len(sents))\nlabels = df['label'].tolist()\nprint(len(labels))\n\n\n# In[33]:\n\n\nmodel = gensim.models.KeyedVectors.load_word2vec_format('semi.txt',binary=False)\n\n\n# In[45]:\n\n\ndata_vec = []\ndata_label = []\nfor i in range(len(sents)):\n senl= []\n sent = sents[i]\n for word in sent:\n try:\n senl.append(model[word])\n except KeyError:\n continue\n \n sen_arr = np.array(senl,dtype='float')\n# print(sen_arr.shape)\n if sen_arr.shape[0] > 0:\n sen_mean = sum(np.array(sen_arr)) / len(sen_arr)\n data_vec.append(sen_mean)\n data_label.append(labels[i])\n\n\n# In[46]:\n\n\nprint(len(data_vec))\nprint(len(data_label))\n\n\n# In[47]:\n\n\ndata_vec[0]\n\n\n# In[48]:\n\n\ndata_label[0]\n\n\n# In[51]:\n\n\nclf = svm.SVC(C=2,probability=True)\nclf.fit(data_vec,data_label)\n\n\n# In[52]:\n\n\nclf.score(data_vecec,data_label)\n\n\n# In[53]:\n\n\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[63]:\n\n\nprint(np.array(data_vec).shape)\npred_probas = clf.predict_proba(data_vec)[:,1]\nprint(pred_probas.shape)\nfpr,tpr,_ = metrics.roc_curve(data_label,pred_probas)\nroc_auc = metrics.auc(fpr,tpr)\nplt.plot(fpr, tpr, label = 'area = %.2f' % roc_auc)\nplt.plot([0, 1], [0, 1], 'k--')\nplt.xlim([0.0, 1.0])\nplt.ylim([0.0, 1.05])\nplt.legend(loc = 'lower right')\nplt.show()\n\n\n# In[65]:\n\n\njoblib.dump(clf,'semi_mode.m')\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"641957845","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass KMeans(object):\n def __init__(self, num_cluster):\n self.num_cluster = num_cluster\n\n def init(self, vectors):\n self.vectors = vectors\n self.num_vectors, self.dim = vectors.shape\n self.membership = np.random.randint(0, self.num_cluster, size=self.num_vectors)\n self.centroids = np.zeros(shape=(self.num_cluster, self.dim))\n self.update_centroid()\n\n def update_centroid(self):\n for k in range(self.num_cluster):\n self.centroids[k] = np.mean(self.get_vecs_clsuter_k(k), axis=0)\n\n def calc_loss(self):\n ret = 0\n for k in range(self.num_cluster):\n ret += np.sum((self.get_vecs_clsuter_k(k) - self.centroids[k]) ** 2)\n return ret\n\n def get_vecs_clsuter_k(self, k):\n return self.vectors[self.membership == k]\n\n def plot(self, ax):\n colormap = ['b', 'g', 'r', 'c', 'm', 'y', 'k']\n for k in range(self.num_cluster):\n ret =self.get_vecs_clsuter_k(k)\n x = ret[:, 0]\n y = ret[:, 1]\n ax.scatter(x, y, color=colormap[k], s=4)\n\n\n def iter_once(self):\n for n in range(self.num_vectors):\n ret = np.sum((self.centroids - self.vectors[n]) ** 2, axis=1)\n self.membership[n] = np.argmin(ret)\n self.update_centroid()\n\n def iterate(self, num_iter):\n fig, axes = plt.subplots(1, num_iter)\n for iter in range(num_iter):\n self.iter_once()\n print(\"loss: %0.2f\" % self.calc_loss())\n self.plot(axes[iter])\n plt.show()\n\ndef get_random_samples(size=5000):\n half_size = size // 2\n r = np.random.random()\n x_0 = np.random.normal(-3, 1, size=half_size)\n x_1 = np.random.normal(3, 1, size=half_size)\n y_0 = np.random.normal(-3, 1, size=half_size)\n y_1 = np.random.normal(3, 1, size=half_size)\n x = np.hstack([x_0, x_1])\n np.random.shuffle(x)\n y = np.hstack([y_0, y_1])\n np.random.shuffle(y)\n return np.vstack([x, y]).T\n\ndef main():\n \"\"\"\n import pickle\n with open(\"../data/item_factors.pkl\", \"rb\") as f:\n item_factors = pickle.load(f)\n \"\"\"\n data = get_random_samples(10000)\n plt.scatter(data[:, 0], data[:, 1])\n plt.show()\n print(data.shape)\n model = KMeans(4)\n model.init(data)\n model.iterate(5)\nif __name__ == \"__main__\":\n main()\n","sub_path":"clustering/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"351289879","text":"# Stat with a class\nclass Person():\n def __init__(self, fornavn, efternavn, telefon):\n self.fornavn = fornavn\n self.efternavn = efternavn\n self.telefon = telefon\n\n# The list can be created and objects added at the same time\npersoner = list([Person(\"Hans\", \"Petersen\", \"123123123\"),\n Person(\"Ole\", \"Nielsen\", \"456456456\")])\n\n# Append another object to the list\npersoner.append(Person(\"Svend\", \"Olsen\", \"234234234\"))\n\n# Create a object and then append to the list\nperson = Person(\"Ib\", \"Clausen\", \"765765765\")\npersoner.append(person)\n\n# Lets see what we got - nicely formatted in columns\nfor p in personer:\n print(\"{: <10}{: <10}{: <10}\".format(p.fornavn, p.efternavn, p.telefon))","sub_path":"Apps/List_of_objects.py","file_name":"List_of_objects.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"192521165","text":"\n\n\n\npoint_could=\"point_cloud.xyz\"\n\n\n\nfile=open(point_could,\"r\")\n\ndata=file.readlines()\n\narray=[]\nfor q in data:\n temp=q.split(\" \")\n x=float(temp[0])\n y=float(temp[1])\n z=float(temp[2])\n if z>0.2:\n\n array.append((x,y,z))\n\n\n\nvaule_found=[]\ndata=[[]]\ncount=0\n\nset_hight=array[0][2]\n\n\nfor q in array:\n if q[2]==set_hight:\n data[count].append(q)\n else:\n data.append([])\n count+=1\n set_hight=q[2]\n data[count].append(q)\n vaule_found.append(q[2])\n\n\n\nprint(data[0])\n\ncenter_point=0,0\n\n#max x point from center\n\nslice_points = []\nfor loop1 in data:\n layer_1=loop1\n max_x=[0,0,0]\n min_x=[999999999999999999999999999,0,0]\n\n\n\n for q in layer_1:\n\n x=q[0]\n y=q[1]\n z=q[2]\n\n if x > max_x[0]:\n max_x[0]=x\n max_x[1]=y\n max_x[2]=z\n\n if x< min_x[0]:\n min_x[0]=x\n min_x[1]=y\n min_x[2]=z\n\n slice_points.append(max_x)\n slice_points.append(min_x)\n\n\n\nslice_size=0.1\n#bott add in\nfor q in data[0]:\n x=q[0]\n y=q[1]\n z=q[2]\n\n\n if x >min_x[0] and xmin_x[1]-slice_size and ymin_x[0] and xmin_x[1]-slice_size and yslioet_max_x:\n slioet_max_x=x\n\n if xslioet_max_z:\n slioet_max_z=z\n\n if z< siloet_min_z:\n siloet_min_z=z\n\n\nbox_bond=((siloet_min_x, siloet_min_z), (slioet_max_x, slioet_max_z))\n\nprint(\"box bondersy \",box_bond)\n\ndef for_loop_2(v1,v2,step):\n data=[]\n if v1 >v2:\n temp=v2\n v2=v1\n v1=temp\n\n while(v1 Subject:\n return self.extended_query_subject\n","sub_path":"query_rewriter/ui/tabs/ExtensionTab.py","file_name":"ExtensionTab.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"132030663","text":"import scrapy\nfrom scrapy.spiders import Rule, CrawlSpider\nfrom scrapy.linkextractor import LinkExtractor\nfrom ..items import MsuItem\nimport re\nfrom bs4 import BeautifulSoup as BS\nimport uuid\ndef response_to_text(response):\n soup = BS(response._get_body().decode(\"utf-8\"), \"html.parser\")\n for child in soup.body.children:\n if child.name == 'script':\n child.decompose() \n res = soup.body.get_text()\n res = re.sub(r\"\\W\", \" \", res)\n # res = re.sub(r\"\\d\", \" \", res)\n res = re.sub(r\"\\s+\", \" \", res)\n return res\n\n\nclass MsuSpider(CrawlSpider):\n name = \"msu\"\n allowed_domains = [\"msu.ru\",\"www.msu.ru\"]\n start_urls = [\"https://www.msu.ru/\"]\n rules = [\n Rule(\n LinkExtractor(\n canonicalize=True,\n unique=True,\n ),\n follow=True,\n callback=\"parse_items\"\n )\n ]\n path = \"./msu_files/\"\n def start_requests(self):\n for url in self.start_urls:\n yield scrapy.Request(url, callback=self.parse, dont_filter=True)\n\n def parse_items(self, response):\n item = MsuItem()\n item['url'] = response.url\n filename = str(uuid.uuid4())\n with open(\"%s%s.txt\" % (self.path, filename), \"w\", encoding=\"utf-8\") as rf:\n rf.write(response_to_text(response))\n item['f_n'] = filename\n return item\n","sub_path":"thrdmodule/spiders/msu.py","file_name":"msu.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"216719441","text":"import os, sys, getopt\nimport os.path\nimport numpy as np\nimport multiprocessing\nfrom random import shuffle\nfrom collections import namedtuple, OrderedDict, defaultdict\nimport gensim\nfrom gensim.models import Doc2Vec\nimport gensim.models.doc2vec\nfrom gensim.test.test_doc2vec import ConcatenatedDoc2Vec\n# for timing\nfrom contextlib import contextmanager\nfrom timeit import default_timer\nimport time \nimport datetime\n\n\nopts,args=getopt.getopt(sys.argv[1:],'i:d:q:o:')\nfor opt,arg in opts:\n if opt in ('-d','--datadir'):\n datadir=str(arg)\n if opt in ('-i','--lineno2cwid'):\n cwiddir=str(arg)\n if opt in ('-q','--qid'):\n qid=str(arg)\n if opt in ('-o','--outdir'):\n outdir=str(arg)\n\n\ndatafile=datadir + \"/\" + str(qid) + \"/part-00000\"\nlineno2cwidf=cwiddir + \"/\" + str(qid) + \"/part-00000\"\nCWDocument = namedtuple('CWDocument', 'words tags')\nalldocs = [] # will hold all docs in original order\nalltags = []\nwith open(datafile) as data, open(lineno2cwidf) as cwid:\n for line_no, line in zip(cwid,data):\n docid=line_no.rstrip().split(\" \")[1]\n words = gensim.utils.to_unicode(line.rstrip(), errors='strict').split()\n tags = [docid] # `tags = [tokens[0]]` would also work at extra memory cost\n alldocs.append(CWDocument(words, tags))\n alltags.append(docid)\ndoc_list = alldocs[:] # for reshuffling per pass\n\nprint('Input %d docs for query %s ' % (len(doc_list), qid))\n\n\n\ncores = multiprocessing.cpu_count()\nassert gensim.models.doc2vec.FAST_VERSION > -1, \"this will be painfully slow otherwise\"\n\nsimple_models = [\n # PV-DM w/concatenation - window=5 (both sides) approximates paper's 10-word total window size\n Doc2Vec(dm=1, dm_concat=1, size=100, window=5, negative=5, hs=0, min_count=2, workers=cores),\n # PV-DBOW \n Doc2Vec(dm=0, size=100, negative=5, hs=0, min_count=2, workers=cores),\n # PV-DM w/average\n Doc2Vec(dm=1, dm_mean=1, size=100, window=10, negative=5, hs=0, min_count=2, workers=cores),\n]\n\n# speed setup by sharing results of 1st model's vocabulary scan\nsimple_models[0].build_vocab(alldocs) # PV-DM/concat requires one special NULL word so it serves as template\n#print(simple_models[0])\nfor model in simple_models[1:]:\n model.reset_from(simple_models[0])\n #print(model)\n\nmodels_by_name = OrderedDict((str(model), model) for model in simple_models)\n\nmodels_by_name['dbow+dmm'] = ConcatenatedDoc2Vec([simple_models[1], simple_models[2]])\nmodels_by_name['dbow+dmc'] = ConcatenatedDoc2Vec([simple_models[1], simple_models[0]])\n\nmanualModelName=['dmc','dbow','dmm','dbow+dmm','dbow+dmc']\n\n@contextmanager\ndef elapsed_timer():\n start = default_timer()\n elapser = lambda: default_timer() - start\n yield lambda: elapser()\n end = default_timer()\n elapser = lambda: end-start\n\ndef cwidvec2str(cwid, vec):\n line=list()\n line.append(cwid)\n for idx, val in enumerate(vec):\n line.append(str(idx) + \":\" + '%.6f'%val)\n return ' '.join(line)\n\n\nalpha, min_alpha, passes = (0.025, 0.001, 20)\nalpha_delta = (alpha - min_alpha) / passes\n\nprint(\"START query %s at %s\" % (qid, datetime.datetime.now()))\n\nfor epoch in range(passes):\n shuffle(doc_list) # shuffling gets best results\n for name, train_model in models_by_name.items():\n # train\n duration = 'na'\n train_model.alpha, train_model.min_alpha = alpha, alpha\n with elapsed_timer() as elapsed:\n train_model.train(doc_list)\n duration = '%.1f' % elapsed()\n #print(\"%i passes : %s %ss\" % (epoch + 1, name, duration))\n if (epoch + 1) % 5 == 0:\n print('%s: completed pass %i at alpha %f' % (qid, epoch + 1, alpha))\n alpha -= alpha_delta\n\n\ni=0\nfor name, model in models_by_name.items():\n lines=list()\n subdir = outdir + \"/\" + manualModelName[i]\n if not os.path.exists(subdir):\n os.makedirs(subdir)\n outf = open(subdir + \"/\" + qid,'w')\n i += 1\n for cwid in alltags:\n lines.append(cwidvec2str(cwid, model.docvecs[cwid]))\n outf.write('\\n'.join(lines))\n outf.close()\n\nprint(\"Finished query %s at %s\" % (qid, str(datetime.datetime.now())))\n","sub_path":"src/main/resources/python/para2vec/originPara2vec/clueweb.py","file_name":"clueweb.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"552871477","text":"#!/usr/bin/python3\n\"\"\"\nStart with your class from Exercise 9-1 Create three\ndifferent instances from the class, and call describe_restaurant() for each\ninstance\n\"\"\"\n\nclass Restaurant():\n \"\"\"A class that represents a restaurant\"\"\"\n\n def __init__(self, name, cuisine_type):\n \"\"\"Initilizes the restaurant\"\"\"\n self.name = name.title()\n self.cuisine_type = cuisine_type\n\n def describe_restaurant(self):\n \"\"\"A method that displays a summary of the restaurant\"\"\"\n msg = self.name + \" serves delicious \" + self.cuisine_type + \".\"\n print(\"\\n\" + msg)\n\n def open_restaurant(self):\n \"\"\"Displays a message saying that the restaurant is open\"\"\"\n msg = self.name + \" is open. Yokouso!\"\n print(\"\\n\" + msg)\n\nhamazushi = Restaurant('Hamazushi', 'sushi')\nhamazushi.describe_restaurant()\n\nfutomichi = Restaurant(\"futomichi\", \"ramen\")\nfutomichi.describe_restaurant()\n\nyakinikuking = Restaurant(\"Yakiniku King\", \"yakiniku\")\nyakinikuking.describe_restaurant()\n","sub_path":"09-Classes/2-three_restaurants.py","file_name":"2-three_restaurants.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"609128986","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom PyQt5 import QtCore\r\nimport os\r\nimport shutil\r\nimport sys\r\nimport numpy as np\r\nimport configparser\r\nsys.path.append('..')\r\nfrom pretreatment import vad, feature_extract\r\nfrom gmm import ubm, gmm\r\nfrom group import group\r\n\r\n\r\nclass MainThread(QtCore.QThread):\r\n def __init__(self, inputdir, outputdir):\r\n self.inputdir = inputdir\r\n self.outputdir = outputdir\r\n super(MainThread, self).__init__()\r\n\r\n update_text_signal = QtCore.pyqtSignal(str)\r\n is_finished_signal = QtCore.pyqtSignal(bool)\r\n change_color_signal = QtCore.pyqtSignal(bool)\r\n\r\n def run(self):\r\n cf = configparser.ConfigParser()\r\n root = os.path.dirname(os.path.dirname(__file__))\r\n cf.read(os.path.join(root, 'configure.ini'))\r\n inputdir = self.inputdir\r\n outputdir = self.outputdir\r\n \r\n self.update_text_signal.emit('正在进行端点检测...\\n')\r\n vadpath = os.path.join(outputdir, 'vad')\r\n if os.path.exists(vadpath):\r\n shutil.rmtree(vadpath)\r\n os.mkdir(vadpath)\r\n vad.main(inputdir, vadpath,\r\n max_interval=cf.getfloat('vad', 'max_interval'))\r\n self.update_text_signal.emit('端点检测完成\\n')\r\n\r\n self.update_text_signal.emit('正在进行特征提取...\\n')\r\n featurepath = os.path.join(outputdir, 'feature')\r\n if os.path.exists(featurepath):\r\n shutil.rmtree(featurepath)\r\n os.mkdir(featurepath)\r\n feature_extract.main(vadpath, featurepath,\r\n dim=cf.getint('feature', 'dim_mfcc'))\r\n self.update_text_signal.emit('特征提取完成\\n')\r\n\r\n self.update_text_signal.emit('正在训练模型....\\n')\r\n ubmpath = os.path.join(outputdir, 'ubm')\r\n ubm.train_ubm(featurepath, ubmpath,\r\n n_components=cf.getint('model', 'n_components'))\r\n\r\n ubmmodel = np.load(ubmpath + '.npy')[0]\r\n weights_init = ubmmodel.weights_init\r\n means_init = ubmmodel.means_init\r\n precisions_init = ubmmodel.precisions_init\r\n\r\n gmmpath = os.path.join(outputdir, 'gmm')\r\n if os.path.exists(gmmpath):\r\n shutil.rmtree(gmmpath)\r\n os.mkdir(gmmpath)\r\n gmm.main(featurepath, gmmpath,\r\n weights_init, means_init, precisions_init,\r\n n_components=cf.getint('model', 'n_components'))\r\n self.update_text_signal.emit('模型训练完成\\n')\r\n\r\n self.update_text_signal.emit('正在分组...\\n')\r\n relation = group.relation(gmmpath, featurepath, inputdir,\r\n dim=3*(cf.getint('feature', 'dim_mfcc')-1))\r\n np.save(os.path.join(outputdir, 'relation'), relation)\r\n union_relation = group.union(relation)\r\n\r\n resultpath = os.path.join(outputdir, 'result')\r\n np.save(resultpath, union_relation)\r\n\r\n if os.path.exists(resultpath + '.txt'):\r\n os.remove(resultpath + '.txt')\r\n f = open(os.path.join(outputdir, 'result.txt'), 'a+')\r\n for r in union_relation:\r\n f.writelines(str(r) + '\\n')\r\n f.close() \r\n self.update_text_signal.emit('分组完成\\n')\r\n self.update_text_signal.emit('结束!\\n')\r\n self.update_text_signal.emit('检测结果:\\n')\r\n err = 0\r\n for r in union_relation:\r\n tmp = set()\r\n for s in r:\r\n tmp.add(s.split('\\\\')[-2])\r\n if len(tmp) != 1:\r\n self.change_color_signal.emit(True)\r\n self.update_text_signal.emit('以下文件可能出自同一人:')\r\n for p in r:\r\n self.update_text_signal.emit(p)\r\n self.update_text_signal.emit('\\n')\r\n err += 1 \r\n if err == 0:\r\n self.update_text_signal.emit('无异常') \r\n self.is_finished_signal.emit(True)\r\n","sub_path":"voiceprint/gui/mainthread.py","file_name":"mainthread.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"58582341","text":"import pandas\nimport re\nimport string\nimport tensorflow as tf\nfrom collections import Counter\nfrom nltk.stem.porter import PorterStemmer\nfrom nltk.corpus import stopwords\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.layers import Flatten\nfrom keras.layers import Embedding\nfrom keras.layers.convolutional import Conv1D\nfrom keras.layers.convolutional import MaxPooling1D\n\n#---------------------------------\n# BUILT-IN FUNCTIONS \n#---------------------------------\ndef clean_text(txt):\n \"\"\"Preprocessing - Turning texts into clean tokens\n \"\"\"\n # Ensure lowercase text encoding\n txt = str(txt).lower()\n # split tokens by white space\n tokens = txt.split()\n # remove tokens not encoded in ascii\n isascii = lambda s: len(s) == len(s.encode())\n tokens = [w for w in tokens if isascii(w)]\n # regex for punctuation filtering\n re_punc = re.compile('[%s]' % re.escape(string.punctuation))\n # remove punctuation from each word\n tokens = [re_punc.sub('', w) for w in tokens]\n # remove tokens that aren't alphanumeric\n tokens = [w for w in tokens if w.isalnum()]\n # regex for digits filtering\n re_digt = re.compile('[%s]' % re.escape(string.digits)) \n # remove digits from each word\n tokens = [re_digt.sub('', w) for w in tokens] \n # filter out stop words\n stop_words = set(stopwords.words('english'))\n tokens = [w for w in tokens if not w in stop_words]\n # filter out long tokens\n tokens = [w for w in tokens if len(w) < 30]\n # filter out short tokens\n tokens = [w for w in tokens if len(w) > 1]\n # stemming of words\n porter = PorterStemmer()\n tokens = [porter.stem(w) for w in tokens]\n return tokens\n\ndef token_to_line(txt, vocab):\n \"\"\"Clean text and return line of tokens\n dependency: clean_text\n \"\"\"\n # clean text\n tokens = clean_text(txt)\n # filter by vocabulary\n tokens = [w for w in tokens if w in vocab]\n return ' '.join(tokens)\n\ndef process_texts(texts, vocab):\n \"\"\"Clean texts to only contain tokens present in the vocab\n dependency: token_to_line\n \"\"\"\n lines = list() \n for txt in texts:\n # load and clean the doc\n line = token_to_line(txt, vocab)\n # add to list\n lines.append(line)\n return lines\n\ndef save_vocab(lines, filename):\n \"\"\"Saving a list of items to a file; line-by-line\n \"\"\"\n data = '\\n'.join(lines)\n file = open(filename, 'w')\n file.write(data)\n file.close()\n\ndef load_vocab(filename):\n \"\"\"Load doc into memory\n \"\"\"\n # open the file as read only\n file = open(filename, 'r')\n # read all text\n text = file.read()\n # close the file\n file.close()\n return text\n\ndef add_tokens_vocab(txt, vocab):\n \"\"\"Creating vocabulary containing unique tokens from all texts\n dependency: add_tokens_vocab\n \"\"\"\n tokens = clean_text(txt) \n vocab.update(tokens) \n \ndef build_vocab(texts):\n \"\"\"Creating vocabulary and saving output to a text file\n dependency: clean_text\n \"\"\"\n vocab = Counter()\n for txt in texts:\n add_tokens_vocab(txt, vocab) \n # save tokens to a vocabulary file; for later access in model build/predict \n save_vocab(vocab, \"vocab.txt\")\n \ndef create_tokenizer(lines):\n \"\"\" Defining a tokenizer\n dependency: from keras.preprocessing.text import Tokenizer\n \"\"\" \n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(lines)\n return tokenizer\n \ndef encode_docs(tokenizer, max_length, docs):\n \"\"\" Encode each 'cleaned' string as a sequence of integers\n dependency: create_tokenizer\n \"\"\" \n # integer encode \n encoded = tokenizer.texts_to_sequences(docs)\n # pad sequences to ensure that all strings have the same length\n # max_length is the length of the longest string\n padded = pad_sequences(encoded, maxlen = max_length, padding='post') \n return padded\n\ndef tf_auc_roc(y_true, y_pred):\n \"\"\" Defining AUC ROC metrics for model performance from tensorflow package since AUC isn't available in Keras\n dependency: import tensorflow as tf\n \"\"\" \n # any tensorflow metric\n value, update_op = tf.contrib.metrics.streaming_auc(y_pred, y_true)\n # find all variables created for this metric\n metric_vars = [i for i in tf.local_variables() if 'auc_roc' in i.name.split('/')[1]]\n # Add metric variables to GLOBAL_VARIABLES collection.\n for v in metric_vars:\n tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, v)\n # force to update metric values\n with tf.control_dependencies([update_op]):\n value = tf.identity(value)\n return value\n\ndef define_model(vocab_size, max_length): \n \"\"\" Defining the neural network model\n \"\"\" \n model = Sequential()\n # embedding part: 150-dimensional vector space (explicit assignment; experimental)\n model.add(Embedding(vocab_size, 150, input_length = max_length))\n # add a CNN layer with 32 filters (parallel fields for processing words)\n # and a kernel size of 8 with a rectified linear (relu) activation function.\n model.add(Conv1D(filters = 32, kernel_size = 8, activation='relu')) \n # add pooling layer to reduce the output of the CNN layer\n # pool_size = 2 to reduce by half\n model.add(MaxPooling1D(pool_size=2))\n # flatten the CNN output to one long 2D vector representing features extracted by CNN\n model.add(Flatten())\n # add a standard MLP layer to interpret the CNN features\n model.add(Dense(30, activation='relu'))\n # use a sigmoid activation function in the output layer to resturn a value between 0 and 1 (binary classification)\n model.add(Dense(1, activation='sigmoid'))\n return model\n\n \n#---------------------------------\n# MAIN \n#---------------------------------\n#data_path = \"/Data\"\n\nprint(\"Loading data sets into Memory...\")\ntrain_df = pandas.read_csv(\"train.csv\", quotechar='\"', skipinitialspace=True, encoding='utf-8')\nprint(\"...training data dimension: \" + str(train_df.shape))\ntest_df = pandas.read_csv(\"test.csv\", quotechar='\"', skipinitialspace=True, encoding='utf-8')\nprint(\"...test data (rows for prediction): \" + str(test_df.shape[0]))\n\n#----\n# Data prep to Model Build \n#----\n\nprint(\"Shuffling the training data row-wise...\")\n# Shuffle the data frame row-wise\n# useful during model fit since keras is getting only the last n% of data (w/o randomization)\n# in defining the validation set\ntrain_df = train_df.sample(frac=1).reset_index(drop=True)\n\nprint(\"Building the neural network inputs...\")\n# get target\nytrain = train_df.label\n \n# create vocabulary file from the train data\nbuild_vocab(train_df.tweet) \n\n# load the vocabulary\ntokens = load_vocab(\"vocab.txt\")\n\n# process strings to contain only clean tokens\ntexts = process_texts(train_df.tweet, vocab = tokens)\n\n# identify the maximum string word length\nmax_length = max([len(t.split()) for t in texts])\n\n# instantiate the tokenizer\ntokenizer = create_tokenizer(texts)\n\n# identify the size of the full vocabulary\n# add +1 for unknown words\nvocab_size = len(tokenizer.word_index) + 1\n\n# prepare train and test sets for network processing\nxtrain = encode_docs(tokenizer, max_length, texts)\nxtest = encode_docs(tokenizer, max_length, process_texts(test_df.tweet, vocab = tokens))\n\nprint(\"Defining the neural network model...\")\n# define the neural network model\nmodel = define_model(vocab_size, max_length)\n\n# compile network\n# use binary cross entropy loss function for classification problem\n# use the 'adam' implementation of stochastic gradient descent\n# keep track of AUC ROC in addition to loss during training\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=[tf_auc_roc]) \n\n# summarize the defined network\nmodel.summary()\n\n# model checkpoint\n# checkpointing to ensure that each time the model performance improves on the validation set during model build, \n# the model is saved to file.\n# performance is evaluated based on the defined AUC ROC (monitor='val_tf_auc_roc')\nfilepath = \"weights.bestmodel.hdf5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_tf_auc_roc', verbose=1, save_best_only=True, mode='max')\ncallbacks_list = [checkpoint]\n\nprint(\"Running model Build...\")\n# fit network\n# 30 epochs to cycle through the training data (can be configured differently)\n# set the last 10% of training data as the validation set (can be configured differently)\n# use pre-defined callback_list to save the best model in the cycle\n# Assign class_weight to handle data imbalance\nclass_weight = {0 : 1000., 1: 75.}\nmodel.fit(xtrain, ytrain, epochs = 30, validation_split = 0.10, verbose = 0, callbacks=callbacks_list, class_weight = class_weight)\nprint(\"...Model build process:COMPLETED\")\n\n#----\n# Test file prediction to writing a .csv submission file\n#----\n\n# redefine the network structure (can be skipped if model build is active in the current session)\nmodel = define_model(vocab_size, max_length)\nmodel.load_weights(\"weights.bestmodel.hdf5\")\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=[tf_auc_roc])\n\nprint(\"Running test set predictions...\")\n# run predictions\nresults = pandas.DataFrame(model.predict(xtest, verbose=0))\n\nprint(\"writing predictions to a submission file...\")\n# write a submission file\ntest_df[\"label\"] = results.iloc[:,0]\ntest_df[\"label\"] = round(test_df[\"label\"])\ntest_df = test_df[[\"id\", \"label\"]]\ntest_df.to_csv(\"test_predictions.csv\", encoding='utf-8',index=False)\nprint(\"...Test set prediction process:COMPLETED\")\n\n\n#---End-Of-File\n\n\n\n\n\n\n","sub_path":"Hate_Speech_Classification.py","file_name":"Hate_Speech_Classification.py","file_ext":"py","file_size_in_byte":9660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"53189644","text":"# DP solution -> obstacles indicate no acceptable path \nclass Solution:\n def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:\n if not (obstacleGrid and obstacleGrid[0]):\n return 1\n elif obstacleGrid[0][0] != 1:\n obstacleGrid[0][0] = 1\n else:\n return 0\n \n for i in range(len(obstacleGrid)):\n for j in range(len(obstacleGrid[0])):\n if obstacleGrid[i][j] == 1 and not i == j == 0:\n obstacleGrid[i][j] = 0\n elif i > 0 and j > 0:\n obstacleGrid[i][j] = obstacleGrid[i-1][j] + obstacleGrid[i][j-1]\n elif i > 0:\n obstacleGrid[i][j] = obstacleGrid[i-1][j]\n elif j > 0:\n obstacleGrid[i][j] = obstacleGrid[i][j-1]\n return obstacleGrid[-1][-1]\n\nclass Solution:\n def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:\n if not obstacleGrid:\n return 0\n elif len(obstacleGrid[0]) == 0:\n return 1\n cache = {(0,0): int(obstacleGrid[0][0] == 0)}\n def helper(row, col):\n if obstacleGrid[row][col] == 1 or row < 0 or col < 0:\n cache[(row, col)] = 0\n return 0\n elif (row, col) in cache:\n return cache[(row, col)]\n else:\n cache[(row, col)] = helper(row-1, col) + helper(row, col-1)\n return cache[(row, col)]\n return helper(len(obstacleGrid)-1,len(obstacleGrid[0])-1)\n","sub_path":"python/63_UniquePathsII.py","file_name":"63_UniquePathsII.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"623276998","text":"def get_nuisance_mask(input, pathSPM, deformation, path_output, nerode_white=1, nerode_csf=1, \n segmentation=True, cleanup=True):\n \"\"\"\n This function calculates WM and CSF masks in space of the functional time series. It uses SPM\n to compute WM and CSF probability maps. These maps are masked with a skullstrip mask and \n transformed to native epi space.\n Inputs:\n *input: input anatomy (orig.mgz).\n *pathSPM: path to spm toolbox.\n *deformation: coordinate mapping for ana to epi transformation.\n *path_output: path where output is saved.\n *nerode_white: number of wm mask eroding steps.\n *nerode_csf: number of csf mask eroding steps.\n *segmentation: do not calculate new masks to not rerun everything.\n *cleanup: delete intermediate files.\n\n created by Daniel Haenelt\n Date created: 01-03-2019\n Last modified: 01-03-2019\n \"\"\"\n import os\n import shutil as sh\n import nibabel as nb\n from scipy.ndimage.morphology import binary_erosion\n from nipype.interfaces.fsl import BET\n from nipype.interfaces.freesurfer.preprocess import MRIConvert\n from nighres.registration import apply_coordinate_mappings\n from lib.skullstrip.skullstrip_spm12 import skullstrip_spm12\n\n # make output folder\n if not os.path.exists(path_output):\n os.mkdir(path_output)\n\n # get filename without file extension of input file\n file = os.path.splitext(os.path.basename(input))[0]\n\n # convert to nifti format\n mc = MRIConvert()\n mc.inputs.in_file = input\n mc.inputs.out_file = os.path.join(path_output,file + \".nii\")\n mc.inputs.out_type = \"nii\"\n mc.run()\n\n # bet skullstrip mask\n btr = BET()\n btr.inputs.in_file = os.path.join(path_output,file + \".nii\")\n btr.inputs.frac = 0.5\n btr.inputs.mask = True\n btr.inputs.no_output = True\n btr.inputs.out_file = os.path.join(path_output,\"bet\")\n btr.inputs.output_type = \"NIFTI\"\n btr.run() \n\n # segmentation\n if segmentation:\n skullstrip_spm12(os.path.join(path_output,file + \".nii\"), \n pathSPM, \n path_output)\n\n # load tissue maps\n wm_array = nb.load(os.path.join(path_output,\"skull\",\"c2\" + file + \".nii\")).get_fdata()\n csf_array = nb.load(os.path.join(path_output,\"skull\",\"c3\" + file + \".nii\")).get_fdata()\n mask_array = nb.load(os.path.join(path_output,\"bet_mask.nii\")).get_fdata()\n\n # binarize\n wm_array[wm_array > 0] = 1\n csf_array[csf_array > 0] = 1\n\n # apply brain mask\n wm_array = wm_array * mask_array\n csf_array = csf_array * mask_array\n\n # erode wm\n wm_array = binary_erosion(\n wm_array, \n structure=None, \n iterations=nerode_white,\n mask=None, \n output=None, \n border_value=0, \n origin=0, \n brute_force=False,\n )\n\n # erode csf\n csf_array = binary_erosion(\n csf_array, \n structure=None, \n iterations=nerode_csf,\n mask=None, \n output=None, \n border_value=0, \n origin=0, \n brute_force=False,\n )\n\n # write wm and csf mask\n data_img = nb.load(input)\n wm_out = nb.Nifti1Image(wm_array, data_img.affine, data_img.header)\n nb.save(wm_out, os.path.join(path_output,\"wm_mask_orig.nii\"))\n csf_out = nb.Nifti1Image(csf_array, data_img.affine, data_img.header)\n nb.save(csf_out, os.path.join(path_output,\"csf_mask_orig.nii\"))\n\n # apply deformation to mask\n apply_coordinate_mappings(os.path.join(path_output,\"wm_mask_orig.nii\"), # input \n deformation, # cmap\n interpolation = \"nearest\", # nearest or linear\n padding = \"zero\", # closest, zero or max\n save_data = True, # save output data to file (boolean)\n overwrite = True, # overwrite existing results (boolean)\n output_dir = path_output, # output directory\n file_name = \"wm_mask\" # base name with file extension for output\n )\n\n apply_coordinate_mappings(os.path.join(path_output,\"csf_mask_orig.nii\"), # input \n deformation, # cmap\n interpolation = \"nearest\", # nearest or linear\n padding = \"zero\", # closest, zero or max\n save_data = True, # save output data to file (boolean)\n overwrite = True, # overwrite existing results (boolean)\n output_dir = path_output, # output directory\n file_name = \"csf_mask\" # base name with file extension for output\n )\n \n # rename transformed masks\n os.rename(os.path.join(path_output,\"wm_mask_def-img.nii.gz\"),\n os.path.join(path_output,\"wm_mask.nii.gz\"))\n os.rename(os.path.join(path_output,\"csf_mask_def-img.nii.gz\"),\n os.path.join(path_output,\"csf_mask.nii.gz\"))\n\n # cleanup\n if cleanup:\n os.remove(os.path.join(path_output,\"bet_mask.nii\"))\n os.remove(os.path.join(path_output,\"csf_mask_orig.nii\"))\n os.remove(os.path.join(path_output,\"wm_mask_orig.nii\"))\n os.remove(os.path.join(path_output,\"orig.nii\"))\n sh.rmtree(os.path.join(path_output,\"skull\"), ignore_errors=True)\n","sub_path":"lib/preprocessing/get_nuisance_mask.py","file_name":"get_nuisance_mask.py","file_ext":"py","file_size_in_byte":5529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"174207723","text":"from .core import *\r\nfrom .utils import attributeerror_wrapper\r\nfrom .vparsers import *\r\n \r\n\r\nclass MetroTargowekParser(SingleWebpageParser):\r\n url = \"https://www.rsmpraga.pl/inwestycje/metro-targowek/\"\r\n method = \"GET\"\r\n \r\n schema = [\r\n DataUnit(label=\"Numer\", parser=DOMTextExtractor(), id=\"number\"),\r\n DataUnit(label=\"Pow.\", parser=AreaParser(DOMTextExtractor()), id=\"area\"),\r\n DataUnit(label=\"Piętro\", parser=IntParser(DOMTextExtractor()), id=\"floor\"),\r\n DataUnit(label=\"Pokoje\", parser=IntParser(DOMTextExtractor()), id=\"rooms\"),\r\n DataUnit(label=\"Benefity\", parser=NoneParser(), id=\"benefits_none\"),\r\n DataUnit(label=\"Plan\", parser=LinkParser(DOMElementExtractor(\"a\")), id=\"plan\"),\r\n DataUnit(label=\"Status\", parser=StatusParser(DOMTextExtractor()), id=\"status\")\r\n ]\r\n \r\n @attributeerror_wrapper(return_value=[])\r\n def find_records(self, soup):\r\n return soup.find(\"div\", {\"id\": \"invest-offer\"}).find(\"table\")\\\r\n .find(\"tbody\").find_all(\"tr\")\r\n \r\n def split_record(self, record):\r\n return record.find_all(\"td\")\r\n \r\n def modify_record(self, record, soup=None):\r\n record[\"number\"] = \"{floor}/{number}\".format(**record)\r\n record[\"fid\"] = record[\"number\"]\r\n if record[\"status\"] is None:\r\n record[\"status\"] = StatusParser.AVAILABLE\r\n return record\r\n","sub_path":"parsers/metrotargowek.py","file_name":"metrotargowek.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"100118928","text":"from typing import Any, Dict, Optional\n\nfrom ..._utils import ListPage\nfrom ..base import ResourceCollectionClient\n\n\nclass KeyValueStoreCollectionClient(ResourceCollectionClient):\n \"\"\"Sub-client for manipulating key-value stores.\"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Initialize the KeyValueStoreCollectionClient with the passed arguments.\"\"\"\n resource_path = kwargs.pop('resource_path', 'key-value-stores')\n super().__init__(*args, resource_path=resource_path, **kwargs)\n\n def list(\n self,\n *,\n unnamed: Optional[bool] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n desc: Optional[bool] = None,\n ) -> ListPage:\n \"\"\"List the available key-value stores.\n\n https://docs.apify.com/api/v2#/reference/key-value-stores/store-collection/get-list-of-key-value-stores\n\n Args:\n unnamed (bool, optional): Whether to include unnamed key-value stores in the list\n limit (int, optional): How many key-value stores to retrieve\n offset (int, optional): What key-value store to include as first when retrieving the list\n desc (bool, optional): Whether to sort the key-value stores in descending order based on their modification date\n\n Returns:\n ListPage: The list of available key-value stores matching the specified filters.\n \"\"\"\n return self._list(unnamed=unnamed, limit=limit, offset=offset, desc=desc)\n\n def get_or_create(self, *, name: Optional[str] = None) -> Dict:\n \"\"\"Retrieve a named key-value store, or create a new one when it doesn't exist.\n\n https://docs.apify.com/api/v2#/reference/key-value-stores/store-collection/create-key-value-store\n\n Args:\n name (str, optional): The name of the key-value store to retrieve or create.\n\n Returns:\n dict: The retrieved or newly-created key-value store.\n \"\"\"\n return self._get_or_create(name=name)\n","sub_path":"src/apify_client/clients/resource_clients/key_value_store_collection.py","file_name":"key_value_store_collection.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"230601249","text":"\"\"\"Test for remote_services.\"\"\"\nimport datetime\nfrom test import (G31_VIN, MESSAGE_DATA, MESSAGE_REQUEST, POI_DATA,\n POI_REQUEST, TEST_PASSWORD, TEST_REGION, TEST_USERNAME,\n BackendMock, load_response_json)\nfrom unittest import mock, TestCase\n\nfrom requests.exceptions import HTTPError\n\nfrom bimmer_connected import remote_services\nfrom bimmer_connected.account import ConnectedDriveAccount\nfrom bimmer_connected.remote_services import (ExecutionState, Message,\n PointOfInterest,\n RemoteServiceStatus)\n\n\n_RESPONSE_LEGACY_UNKNOWN = 'remote_services/legacy_flash_unknown.json'\n_RESPONSE_LEGACY_INITIATED = 'remote_services/legacy_flash_initiated.json'\n_RESPONSE_LEGACY_PENDING = 'remote_services/legacy_flash_pending.json'\n_RESPONSE_LEGACY_DELIVERED = 'remote_services/legacy_flash_delivered.json'\n_RESPONSE_LEGACY_EXECUTED = 'remote_services/legacy_flash_executed.json'\n_MSG_EXECUTED = 'remote_services/legacy_msg_executed.json'\n\n_RESPONSE_EADRAX_INITIATED = 'remote_services/eadrax_service_initiated.json'\n_RESPONSE_EADRAX_PENDING = 'remote_services/eadrax_service_pending.json'\n_RESPONSE_EADRAX_DELIVERED = 'remote_services/eadrax_service_delivered.json'\n_RESPONSE_EADRAX_EXECUTED = 'remote_services/eadrax_service_executed.json'\n\n\nclass TestRemoteServices(TestCase):\n \"\"\"Test for remote_services.\"\"\"\n\n # pylint: disable=protected-access\n\n def test_parse_timestamp(self):\n \"\"\"Test parsing the timestamp format.\"\"\"\n timestamp = RemoteServiceStatus._parse_timestamp(\"2018-02-11T15:10:39.465+01\")\n expected = datetime.datetime(year=2018, month=2, day=11, hour=15, minute=10, second=39, microsecond=465000)\n self.assertEqual(expected, timestamp)\n\n def test_states(self):\n \"\"\"Test parsing the different response types.\"\"\"\n rss = RemoteServiceStatus(load_response_json(_RESPONSE_LEGACY_UNKNOWN))\n self.assertEqual(ExecutionState.UNKNOWN, rss.state)\n\n rss = RemoteServiceStatus(load_response_json(_RESPONSE_LEGACY_INITIATED))\n self.assertEqual(ExecutionState.INITIATED, rss.state)\n\n rss = RemoteServiceStatus(load_response_json(_RESPONSE_LEGACY_PENDING))\n self.assertEqual(ExecutionState.PENDING, rss.state)\n\n rss = RemoteServiceStatus(load_response_json(_RESPONSE_LEGACY_DELIVERED))\n self.assertEqual(ExecutionState.DELIVERED, rss.state)\n\n rss = RemoteServiceStatus(load_response_json(_RESPONSE_LEGACY_EXECUTED))\n self.assertEqual(ExecutionState.EXECUTED, rss.state)\n\n def test_trigger_remote_services(self):\n \"\"\"Test executing a remote light flash.\"\"\"\n remote_services._POLLING_CYCLE = 0\n remote_services._UPDATE_AFTER_REMOTE_SERVICE_DELAY = 0\n\n services = [\n ('LIGHT_FLASH', 'trigger_remote_light_flash', False),\n ('DOOR_LOCK', 'trigger_remote_door_lock', True),\n ('DOOR_UNLOCK', 'trigger_remote_door_unlock', True),\n ('CLIMATE_NOW', 'trigger_remote_air_conditioning', True),\n ('VEHICLE_FINDER', 'trigger_remote_vehicle_finder', True),\n ('HORN_BLOW', 'trigger_remote_horn', False),\n ('SEND_MESSAGE', 'trigger_send_message', False),\n ('SEND_POI', 'trigger_send_poi', False),\n ]\n\n for service, call, triggers_update in services:\n backend_mock = BackendMock()\n backend_mock.setup_default_vehicles()\n\n backend_mock.add_response(\n r'https://.+/eadrax-vrccs/v2/presentation/remote-commands/{vin}/.+$'.format(vin=G31_VIN),\n data_files=[_RESPONSE_EADRAX_INITIATED])\n\n backend_mock.add_response(\n r'https://.+/eadrax-vrccs/v2/presentation/remote-commands/eventStatus\\?eventId=.+',\n data_files=[\n _RESPONSE_EADRAX_PENDING,\n _RESPONSE_EADRAX_DELIVERED,\n _RESPONSE_EADRAX_EXECUTED])\n\n backend_mock.add_response(r'https://.+/webapi/v1/user/vehicles/{vin}/executeService'.format(vin=G31_VIN),\n data_files=[_RESPONSE_LEGACY_INITIATED])\n\n backend_mock.add_response(\n r'https://.+/webapi/v1/user/vehicles/{vin}/serviceExecutionStatus\\?serviceType={service_type}'.format(\n vin=G31_VIN, service_type=service),\n r'https://.+/webapi/v1/user/vehicles/{vin}/status'.format(\n vin=G31_VIN),\n data_files=[\n _RESPONSE_LEGACY_UNKNOWN,\n _RESPONSE_LEGACY_PENDING,\n _RESPONSE_LEGACY_DELIVERED,\n _RESPONSE_LEGACY_EXECUTED])\n\n # backend_mock.add_response(\n # r'https://.+/webapi/v1/user/vehicles/{vin}/status'.format(\n # vin=G31_VIN),\n # data_files=[_RESPONSE_LEGACY_EXECUTED])\n\n backend_mock.add_response(\n r'https://.+/eadrax-dcs/v1/send-to-car/send-to-car',\n data_files=[_MSG_EXECUTED],\n status_code=204)\n\n backend_mock.add_response(\n r'https://.+/webapi/v1/user/vehicles/{vin}/sendpoi'.format(\n vin=G31_VIN),\n data_files=[_MSG_EXECUTED],\n status_code=204)\n\n with mock.patch('bimmer_connected.account.requests', new=backend_mock):\n account = ConnectedDriveAccount(TEST_USERNAME, TEST_PASSWORD, TEST_REGION)\n mock_listener = mock.Mock(return_value=None)\n account.add_update_listener(mock_listener)\n vehicle = account.get_vehicle(G31_VIN)\n\n if service == 'SEND_MESSAGE':\n if account.server_url_eadrax:\n with self.assertRaises(NotImplementedError):\n response = getattr(vehicle.remote_services, call)(MESSAGE_DATA)\n response = RemoteServiceStatus({\"eventStatus\": \"EXECUTED\"})\n else:\n response = getattr(vehicle.remote_services, call)(MESSAGE_DATA)\n elif service == 'SEND_POI':\n response = getattr(vehicle.remote_services, call)(POI_DATA)\n else:\n response = getattr(vehicle.remote_services, call)()\n self.assertEqual(ExecutionState.EXECUTED, response.state)\n\n if triggers_update:\n mock_listener.assert_called_once_with()\n else:\n mock_listener.assert_not_called()\n\n def test_get_remote_service_status(self):\n \"\"\"Test get_remove_service_status method.\"\"\"\n backend_mock = BackendMock()\n\n with mock.patch('bimmer_connected.account.requests', new=backend_mock):\n account = ConnectedDriveAccount(TEST_USERNAME, TEST_PASSWORD, TEST_REGION)\n vehicle = account.get_vehicle(G31_VIN)\n\n if account.server_url_eadrax:\n backend_mock.add_response(\n r'https://.+/eadrax-vrccs/v2/presentation/remote-commands/eventStatus\\?eventId=None',\n status_code=500,\n data='[]'\n )\n with self.assertRaises(HTTPError):\n vehicle.remote_services._get_remote_service_status(remote_services._Services.REMOTE_LIGHT_FLASH)\n\n backend_mock.add_response(\n r'https://.+/eadrax-vrccs/v2/presentation/remote-commands/eventStatus\\?eventId=.+',\n data_files=[_RESPONSE_EADRAX_EXECUTED])\n\n status = vehicle.remote_services._get_remote_service_status(event_id=\"000000\")\n self.assertEqual(ExecutionState.EXECUTED, status.state)\n\n else:\n with self.assertRaises(IOError):\n vehicle.remote_services._get_remote_service_status(remote_services._Services.REMOTE_LIGHT_FLASH)\n\n backend_mock.add_response(\n r'https://.+/webapi/v1/user/vehicles/{vin}/serviceExecutionStatus\\?.+'.format(vin=G31_VIN),\n data_files=[_RESPONSE_LEGACY_EXECUTED])\n\n status = vehicle.remote_services._get_remote_service_status(\n remote_services._Services.REMOTE_LIGHT_FLASH\n )\n self.assertEqual(ExecutionState.EXECUTED, status.state)\n\n def test_parsing_of_poi_min_attributes(self):\n \"\"\"Check that a PointOfInterest can be constructed using only latitude & longitude.\"\"\"\n poi = PointOfInterest(POI_DATA[\"lat\"], POI_DATA[\"lon\"])\n msg = Message.from_poi(poi)\n self.assertEqual(msg.as_server_request, POI_REQUEST[\"min\"])\n\n def test_parsing_of_poi_all_attributes(self):\n \"\"\"Check that a PointOfInterest can be constructed using all attributes.\"\"\"\n poi = PointOfInterest(POI_DATA[\"lat\"], POI_DATA[\"lon\"], name=POI_DATA[\"name\"],\n additional_info=POI_DATA[\"additional_info\"], street=POI_DATA[\"street\"],\n city=POI_DATA[\"city\"], postal_code=POI_DATA[\"postal_code\"],\n country=POI_DATA[\"country\"], website=POI_DATA[\"website\"],\n phone_numbers=POI_DATA[\"phone_numbers\"])\n msg = Message.from_poi(poi)\n self.assertEqual(msg.as_server_request, POI_REQUEST[\"all\"])\n\n def test_parsing_of_message_min_attributes(self):\n \"\"\"Check that a Message can be constructed using text.\"\"\"\n msg = Message.from_text(MESSAGE_DATA[\"text\"])\n self.assertEqual(msg.as_server_request, MESSAGE_REQUEST[\"min\"])\n\n def test_parsing_of_message_all_attributes(self):\n \"\"\"Check that a Message can be constructed using text.\"\"\"\n msg = Message.from_text(MESSAGE_DATA[\"text\"], MESSAGE_DATA[\"subject\"])\n self.assertEqual(msg.as_server_request, MESSAGE_REQUEST[\"all\"])\n","sub_path":"test/test_remote_services.py","file_name":"test_remote_services.py","file_ext":"py","file_size_in_byte":9965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"374176745","text":"import string\nimport random\n\nname = \"Баобаб\"\ntext = \" \"\nn = int(input())\nfor i in range(1, 1001):\n text += (random.choice(string.ascii_letters))\nprint((\"{0:^\" + \"%s\" % n + \"s}\").format(name))\na = 0\nfor i in range(1, 1001):\n if i % n == 0:\n print(text[a:i])\n a = i\n","sub_path":"1510_1/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"608271318","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 20 19:06:57 2017\n\n@author: wf\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nfrom sklearn import neighbors\nfrom sklearn.model_selection import train_test_split\nfrom featurepossess import generate\nfrom sklearn.externals import joblib\n\nsql_matrix=generate(\"./data/sqlnew.csv\",\"./data/sql_matrix.csv\",1)\nnor_matrix=generate(\"./data/normal_less.csv\",\"./data/nor_matrix.csv\",0)\n\ndf = pd.read_csv(sql_matrix)\ndf.to_csv(\"./data/all_matrix.csv\",encoding=\"utf_8_sig\",index=False)\ndf = pd.read_csv( nor_matrix)\ndf.to_csv(\"./data/all_matrix.csv\",encoding=\"utf_8_sig\",index=False, header=False, mode='a+')\n\nfeature_max = pd.read_csv('./data/all_matrix.csv')\narr=feature_max.values\ndata = np.delete(arr, -1, axis=1) #删除最后一列\n#print(arr)\ntarget=arr[:,7]\n#随机划分训练集和测试集\ntrain_data,test_data,train_target,test_target = train_test_split(data,target,test_size=0.3,random_state=3)\n#模型\nclf=neighbors.KNeighborsClassifier(algorithm='ball_tree')#创建分类器对象,\nclf.fit(train_data,train_target)#训练模型\njoblib.dump(clf, './file/knn.model')\nprint(\"forestrandom.model has been saved to 'file/knn.model'\")\n#clf = joblib.load('svm.model')\ny_pred=clf.predict(test_data)#预测\nprint(\"y_pred:%s\"%y_pred)\nprint(\"test_target:%s\"%test_target)\n#Verify\nprint('Precision:%.3f' %metrics.precision_score(y_true=test_target,y_pred=y_pred))#查全率\nprint('Recall:%.3f' %metrics.recall_score(y_true=test_target,y_pred=y_pred))#查准率\nprint(metrics.confusion_matrix(y_true=test_target,y_pred=y_pred))#混淆矩阵\n\n\n\n","sub_path":"Homework/2019/Task5/1/code/sqlkNN.py","file_name":"sqlkNN.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"119431443","text":"from batou import FileLockedError\nfrom configupdater import ConfigUpdater\nimport fcntl\nimport glob\nimport io\nimport os\nimport shlex\nimport subprocess\nimport tempfile\n\n# https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/\nNULL = tempfile.TemporaryFile()\n\nNEW_FILE_TEMPLATE = \"\"\"\\\n[batou]\nmembers =\n\"\"\"\n\n\nclass EncryptedFile(object):\n \"\"\"Basic encryption methods - key management handled externally.\"\"\"\n\n lockfd = None\n cleartext = None\n\n GPG_BINARY_CANDIDATES = [\"gpg\", \"gpg2\"]\n\n def __init__(self, encrypted_filename, write_lock=False, quiet=False):\n \"\"\"Context manager that opens an encrypted file.\n\n Use the read() and write() methods in the subordinate \"with\"\n block to manipulate cleartext content. If the cleartext content\n has been replaced, the encrypted file is updated.\n\n `write_lock` must be set True if a modification of the file is\n intended.\n \"\"\"\n self.encrypted_filename = encrypted_filename\n self.write_lock = write_lock\n self.quiet = quiet\n self.recipients = []\n\n def __enter__(self):\n self._lock()\n return self\n\n def __exit__(self, _exc_type=None, _exc_value=None, _traceback=None):\n self.lockfd.close()\n\n def gpg(self):\n with tempfile.TemporaryFile() as null:\n for gpg in self.GPG_BINARY_CANDIDATES:\n try:\n subprocess.check_call([gpg, \"--version\"],\n stdout=null,\n stderr=null)\n except (subprocess.CalledProcessError, OSError):\n pass\n else:\n return gpg\n raise RuntimeError(\"Could not find gpg binary.\"\n \" Is GPG installed? I tried looking for: {}\".format(\n \", \".join(\"`{}`\".format(x)\n for x in self.GPG_BINARY_CANDIDATES)))\n\n def read(self):\n \"\"\"Read encrypted data into cleartext - if not not read already.\"\"\"\n if self.cleartext is None:\n if os.path.exists(self.encrypted_filename):\n self.cleartext = self._decrypt()\n else:\n self.cleartext = ''\n return self.cleartext\n\n def write(self):\n \"\"\"Encrypt cleartext and write into destination file file. .\"\"\"\n if not self.write_lock:\n raise RuntimeError(\"write() needs a write lock\")\n self._encrypt(self.cleartext)\n\n def _lock(self):\n self.lockfd = open(self.encrypted_filename, self.write_lock and \"a+\"\n or \"r+\")\n try:\n fcntl.lockf(\n self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB |\n (fcntl.LOCK_EX if self.write_lock else fcntl.LOCK_SH))\n except BlockingIOError:\n raise FileLockedError(self.encrypted_filename)\n\n def _decrypt(self):\n args = [self.gpg()]\n if self.quiet:\n args += ['-q', '--no-tty', '--batch']\n args += ['--decrypt', self.encrypted_filename]\n return subprocess.check_output(args, stderr=NULL).decode(\"utf-8\")\n\n def _encrypt(self, data):\n if not self.recipients:\n raise ValueError('Need at least one recipient.')\n os.rename(self.encrypted_filename, self.encrypted_filename + \".old\")\n args = [self.gpg(), '--encrypt']\n for r in self.recipients:\n args.extend(['-r', r.strip()])\n args.extend(['-o', self.encrypted_filename])\n try:\n gpg = subprocess.Popen(args, stdin=subprocess.PIPE)\n gpg.communicate(data.encode(\"utf-8\"))\n if gpg.returncode != 0:\n raise RuntimeError(\"GPG returned non-zero exit code.\")\n except Exception:\n os.rename(self.encrypted_filename + \".old\",\n self.encrypted_filename)\n raise\n else:\n os.unlink(self.encrypted_filename + \".old\")\n\n\nclass EncryptedConfigFile(object):\n \"\"\"Wrap encrypted config files.\n\n Manages keys based on the data in the configuration. Also allows\n management of additional files with the same keys.\n\n \"\"\"\n\n def __init__(self,\n encrypted_file,\n subfile_pattern=None,\n write_lock=False,\n quiet=False):\n self.subfile_pattern = subfile_pattern\n self.write_lock = write_lock\n self.quiet = quiet\n self.files = {}\n\n self.main_file = self.add_file(encrypted_file)\n\n # Add all existing files to the session\n if self.subfile_pattern:\n for other_filename in glob.iglob(self.subfile_pattern):\n self.add_file(other_filename)\n\n def add_file(self, filename):\n if filename not in self.files:\n self.files[filename] = f = EncryptedFile(filename, self.write_lock,\n self.quiet)\n f.read()\n return self.files[filename]\n\n def __enter__(self):\n self.main_file.__enter__()\n return self\n\n def __exit__(self, _exc_type=None, _exc_value=None, _traceback=None):\n self.main_file.__exit__()\n\n def read(self):\n self.main_file.read()\n if not self.main_file.cleartext:\n self.main_file.cleartext = NEW_FILE_TEMPLATE\n self.config = ConfigUpdater()\n self.config.read_string(self.main_file.cleartext)\n self.set_members(self.get_members())\n\n def write(self):\n s = io.StringIO()\n self.config.write(s)\n self.main_file.cleartext = s.getvalue()\n for file in self.files.values():\n file.recipients = self.get_members()\n file.write()\n\n def get_members(self):\n if 'batou' not in self.config:\n self.config.add_section('batou')\n try:\n members = self.config.get(\"batou\", \"members\").value.split(\",\")\n except Exception:\n return []\n members = [x.strip() for x in members]\n members = [_f for _f in members if _f]\n members.sort()\n return members\n\n def set_members(self, members):\n # The whitespace here is exactly what\n # \"members = \" looks like in the config file so we get\n # proper indentation.\n members = \",\\n \".join(members)\n self.config.set(\"batou\", \"members\", members)\n","sub_path":"src/batou/secrets/encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":6438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"173152583","text":"import sqlite3\n\ndef setup():\n c = conn.cursor()\n\n # Create tables\n c.execute(\"CREATE TABLE executions(\"\n \"id integer primary key autoincrement,\"\n \"timestamp text)\")\n\n c.execute(\"CREATE TABLE configs(\"\n \"id integer primary key autoincrement,\"\n \"execution_id integer,\"\n \"n_hidden_recurrent integer,\"\n \"n_hidden integer,\"\n \"use_pretrained_rbm_weights numeric,\"\n \"rbm_pretrained_weights_filename text,\"\n \"initialize_weight_Wuh numeric,\"\n \"initialize_weight_Wuv numeric,\"\n \"initialize_weight_Wvu numeric,\"\n \"initialize_weight_Wuu numeric,\"\n \"use_momentum numeric,\"\n \"momentum_amount real,\"\n \"use_nesterov_momentum numeric,\"\n \"nesterov_momentum_amount real,\"\n \"learning_rate real,\"\n \"learning_rate_decay real,\"\n \"use_L1_regularization numeric,\"\n \"lambda_1 real,\"\n \"use_L2_regularization numeric,\"\n \"lambda_2 real,\"\n \"num_epochs integer,\"\n \"FOREIGN KEY(execution_id) references executions(id))\")\n\n c.execute(\"CREATE TABLE results(\"\n \"id integer primary key autoincrement,\"\n \"execution_id integer,\"\n \"training_log_likelihood real,\"\n \"validation_log_likelihood real,\"\n \"timestamp text,\"\n \"FOREIGN KEY(execution_id) references executions(id))\")\n\n conn.commit()\n\ndef insertExecutions():\n c = conn.cursor()\n\n # Insert a row of data\n c.execute(\"INSERT INTO executions (timestamp) VALUES (strftime('%Y-%m-%d %H:%M:%f','now'))\")\n\n # Save (commit) the changes\n conn.commit()\n\n # We can also close the connection if we are done with it.\n # Just be sure any changes have been committed or they will be lost.\n\n\ndef insert():\n c = conn.cursor()\n\n # Insert a row of data\n c.execute(\"INSERT INTO results (execution_id,training_log_likelihood,validation_log_likelihood,timestamp) VALUES ('10','0.4','0.5',strftime('%Y-%m-%d %H:%M:%f','now'))\")\n\n # Save (commit) the changes\n conn.commit()\n\n # We can also close the connection if we are done with it.\n # Just be sure any changes have been committed or they will be lost.\n\n\ndef testSelect():\n c = conn.cursor()\n\n c.execute(\"SELECT max(id) FROM results\")\n (m,) = c.fetchall()[0]\n print(m)\n\n conn.commit()\n\ndef delete():\n c = conn.cursor()\n\n c.execute(\"delete from execution\")\n\n conn.commit()\n\n\nconn = sqlite3.connect('example.db')\nc = conn.cursor()\n#setup()\n#insertExecutions()\ninsert()\ntestSelect()\n#delete()\nconn.close()\n","sub_path":"implementation/temp/dbconnection.py","file_name":"dbconnection.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"653402193","text":"#-*- coding:utf-8 -*-\n\nfrom django.shortcuts import get_object_or_404, render\nfrom django.http import *\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.views import generic\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.core.paginator import Paginator,InvalidPage,EmptyPage,PageNotAnInteger\nimport time,datetime\nfrom django.db.models import Q\nfrom django.db import connection\nfrom django.template import RequestContext \nfrom django.contrib.auth.models import User\nfrom django.views.generic.base import TemplateView\nfrom django.contrib.auth.decorators import login_required\n\nfrom UUBlog.common import pub,utility\nfrom UUBlog.apps.accounts.models import UserProfile\nfrom UUBlog.apps.accounts.views import viewaccounts\nfrom UUBlog.apps.blog.models import *\nfrom UUBlog.apps.blog import modules\nfrom UUBlog.apps.blog.views.baseblogview import *\n\nclass IndexView(UBaseBlogView):\n\n\n def GetContext(self, **kwargs):\n uid=int(kwargs.get(\"uid\",0))\n cid=int(kwargs.get(\"cid\",0))\n c2id=int(kwargs.get(\"c2id\",0))\n\n channelList=Channel.objects.filter(parent_id=0)\n parentChannel=Channel.objects.get(id=cid)\n childrenChannel=Channel.objects.filter(parent_id=cid)\n listenChannelId=cid\n\n try:\n childChannel=Channel.objects.get(id=c2id)\n listenChannelId=c2id\n\n articleList=Article.objects.order_by(\"-createtime\").filter(channel2_id=c2id)\n\n except:\n childChannel=None\n articleList=Article.objects.order_by(\"-createtime\").filter(channel1_id=cid)\n \n \n\n myChannelList=[]\n hasListened=False\n\n if self.currentBlog:\n dot=self.currentBlog.listenchannels.find(\"%s,\" %cid)\n hasListened=True if dot>-1 else False\n\n myChannelArray=self.currentBlog.listenchannels.split(\",\")\n for tempCId in myChannelArray:\n if tempCId!=\"\":\n myChannelList.append(Channel.objects.get(id=tempCId))\n\n \n\n self.template_name=\"blog/channel.html\"\n\n return locals()\n\ndef popular(request,cid=-1):\n userInfos=viewaccounts.UsersMeta(request,-1)\n\n myModules=[\"newuserlist\",\"hotarticlelist\",\"newarticlelist\"]\n moduleParams={}\n for myModule in myModules:\n moduleParams.setdefault(myModule,{})\n\n moduleList=modules.GetModuleList(moduleParams)\n\n articleList=Article.objects.order_by(\"-createtime\").filter(channel1_id=cid)\n\n\n channelList=Channel.objects.filter(parent_id=0)\n channelListPopular=Channel.objects.all()\n\n return pub.my_render_to_response(request,\"blog/channelpopular.html\",locals())\n\ndef my(request,cid=-1):\n userInfos=viewaccounts.UsersMeta(request,-1)\n\n myModules=[\"newuserlist\",\"hotarticlelist\",\"newarticlelist\"]\n moduleParams={}\n for myModule in myModules:\n moduleParams.setdefault(myModule,{})\n\n moduleList=modules.GetModuleList(moduleParams)\n\n articleList=Article.objects.order_by(\"-createtime\").filter(channel1_id=cid)\n\n\n channelList=Channel.objects.filter(parent_id=0)\n channelListPopular=Channel.objects.all()\n\n return pub.my_render_to_response(request,\"blog/channelpopular.html\",locals())\n\n\n\n","sub_path":"UUBlog/apps/blog/views/viewchannel.py","file_name":"viewchannel.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"24228494","text":"# project/server/admin/bestlap/views.py\n\nimport sys, datetime\nfrom flask import render_template, Blueprint, url_for, \\\n redirect, flash, request\nfrom flask_login import login_required, current_user\n\nfrom project.server import bcrypt, db\nfrom project.server.models import BestLap\nfrom project.server.dataservices import DataServices\nfrom project.server.admin.bestlap.forms import BestLapForm\n\n# Blueprints\nadmin_bestlap_blueprint = Blueprint('admin_bestlap', __name__,)\n\n# Helper Functions\n\n\ndef get_pghead():\n return 'BestLap'\n\n# Route Handlers\n\n# Best Lap\n@admin_bestlap_blueprint.route('/bestlap/main')\n@login_required\ndef main():\n if current_user.is_admin():\n return render_template('admin/bestlap/main.html', bestlaps=DataServices.get_model(BestLap), pghead=get_pghead())\n else:\n flash('You are not an admin!', 'danger')\n return redirect(url_for(\"user.members\"))\n\n@admin_bestlap_blueprint.route('/bestlap/create', methods=['GET', 'POST'])\n@login_required\ndef create():\n if current_user.is_admin():\n form = BestLapForm(request.form)\n form.racer.choices = DataServices.get_availableRacers()\n form.raceclass.choices = DataServices.get_modelChoices('RaceClass', 'name')\n form.event.choices = DataServices.get_modelChoices('Event', 'name')\n\n if form.validate_on_submit():\n bestlap = BestLap(\n time=form.time.data,\n lap_date=form.lap_date.data\n )\n if form.is_best.data == True:\n bestlap.is_best=1\n else:\n bestlap.is_best=0\n\n bestlap.racer_id = form.racer.data\n bestlap.raceclass_id = form.raceclass.data\n bestlap.event_id = form.event.data\n db.session.add(bestlap)\n db.session.commit()\n\n flash('New best lap created.', 'success')\n return redirect(url_for(\"admin_bestlap.main\", pghead=get_pghead()))\n return render_template('admin/bestlap/create.html', form=form, pghead=get_pghead())\n else:\n flash('You are not an admin!', 'danger') \n return redirect(url_for(\"user.members\"))\n\n@admin_bestlap_blueprint.route('/bestlap/update//', methods=['GET', 'POST'])\n@login_required\ndef update(bestlap_id):\n if current_user.is_admin():\n bestlap = DataServices.get_filterbyFirstQuery('BestLap', 'id', 'bestlap_id')\n form = BestLapForm(request.form)\n form.racer.choices = DataServices.get_availableRacers()\n form.raceclass.choices = DataServices.get_modelChoices('RaceClass', 'name')\n form.event.choices = DataServices.get_modelChoices('Event', 'name')\n\n \n if form.validate_on_submit():\n bestlap.racer_id = form.racer.data\n bestlap.raceclass_id = form.raceclass.data\n bestlap.event_id = form.event.data\n bestlap.time = form.time.data\n bestlap.lap_date = form.lap_date.data\n if form.is_best.data == True:\n bestlap.is_best=1\n else:\n bestlap.is_best=0\n\n bestlap.updated_date = datetime.datetime.now()\n db.session.commit()\n\n flash('Best Lap Updated.', 'success')\n return redirect(url_for(\"admin_bestlap.main\", pghead=get_pghead()))\n \n if bestlap:\n form.racer.data = bestlap.racer\n form.raceclass.data = bestlap.raceclass\n form.event.data = bestlap.event\n form.time.data = bestlap.time\n form.lap_date.data = bestlap.lap_date\n form.is_best.data = bestlap.is_best\n\n return render_template('admin/bestlap/update.html', bestlap=bestlap, form=form, pghead=get_pghead())\n else:\n flash('You are not an admin!', 'danger')\n return redirect(url_for(\"user.members\"))\n\n@admin_bestlap_blueprint.route('/bestlap/delete//')\n@login_required\ndef delete(bestlap_id):\n if current_user.is_admin():\n bestlap = DataServices.get_filterbyFirstQuery('BestLap', 'id', 'bestlap_id')\n bestlap.delete()\n db.session.commit()\n flash('The best lap was deleted.', 'success')\n return redirect(url_for('admin_bestlap.main', pghead=get_pghead()))\n else:\n flash('You are not an admin!', 'danger')\n return redirect(url_for(\"user.members\"))","sub_path":"project/server/admin/bestlap/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"31707119","text":"principal_amount = int(input(\"What is the principal amount? \"))\nrate_of_interest = float(input(\"What is the rate? \"))\nyears_of_investment = int(input(\"What is the number of years? \"))\nnumber_of_periods = int(input(\"What is the number of times the interest is compounded per year? \"))\n\nvalue_of_investment = principal_amount * (1 + ((rate_of_interest / 100) / years_of_investment))**(years_of_investment * number_of_periods)\n\nrate_converted_to_str = str(rate_of_interest)\n\nprint('${:d} invested at {:s}% for {:d} years\\ncompounded {:d} times per year is ${:,.2f}.'.format(principal_amount, rate_converted_to_str, years_of_investment, number_of_periods, value_of_investment))","sub_path":"determining_compound_interest/python/determining_compound_interest.py","file_name":"determining_compound_interest.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"410118524","text":"\"\"\"\n1. map() function is a built-in function that allows you to process and transform all the items in an iterable without\nusing an explicit for loop, a technique commonly known as mapping.\n\n2. map() maps a function with an iterable. The functions transforms the each item of the iterable/iterator and returns a new map object\nwhich is an iterator.\n\n3. General Syntax of map() function. map(function, iterable[, iterable1, iterable2,..., iterableN])\n\n4. map() applies the function to each item in the iterable in the loop and returns a new iterator, which you can feed it to next() function.\n\n5. The first argument to the map function is a callable. This includes built-in functions, classes, methods, lambda expressions/functions.\n\n6. The advantage of map function is that, it returns an iterator object and not a list. So the memory consumption is less. Each item inside the map\nobject can be obtained on-demand.\n\"\"\"\n\n# Square Numbers in the list. Using map function\ndef squares(item):\n return item ** 2\n\nnums = [1, 2, 3, 4, 5]\n\nsquared_numbers = map(squares, nums) # map returns a map object\n\n# Using lambda function\nsquared_numbers = map(lambda item: item ** 2, nums)\n\n# Iterator over the map object\nfor number in squared_numbers:\n print(number)\n\n# List of even numbers between range 1-50\ndef evens(item):\n if item % 2 == 0:\n return item\neven_numbers = map(evens, range(1, 51))\n\n# Build a list of tuples with string and its length pair\nnames = ['apple', 'google', 'yahoo', 'facebook', 'yelp', 'flipkart', 'gmail', 'instagram', 'microsoft']\n\ndef len_item_pair(item):\n return (item, len(item))\n\npairs = map(len_item_pair, names)\n\nprint(list(pairs))\n\n# Type Conversion\nstr_nums = [\"1\", \"2\", \"3\", \"4\", \"5\"]\nint_items = map(int, str_nums)\nprint(list(int_items))\n\n# inbuilt abs func\nnumbers = [-1, -2, 4, 5, -6]\nabs_values = map(abs, numbers)\n\n# Different precesions of pi values using map func\nfrom math import pi\npi_values = map(round, [pi, pi, pi, pi], [1, 2, 3, 4])\nprint(list(pi_values))\n\n# passing two arguments to an user defined function.\nexp = map(lambda x, y: 2*x + 3*y, [1, 2, 3, 4], [5, 6, 7, 8])\nprint(list(exp))\n\n\"\"\"\nNOTE: If we pass two iterables with different length's, the iteration\nstops at the shortest length\n\"\"\"\n\n# Convert to upper case\nsentence = \"This is bunch of words\"\nucase = map(str.upper, sentence.split())\n\n# Using strip function.\nwords = ['This ', ' is', ' Python', ' Programming ', ' Language ']\nstripped = map(str.strip, words)\nprint(list(stripped))\n\n# Factorial of a numbers\nfrom math import factorial\nf = map(factorial, [1, 2, 3, 4])\nprint(list(f))\n\n# Passing a class which is callable to map function\nclass Squares:\n def __call__(self, item):\n return item ** 2\n\ns = Squares()\nm = map(s, [1, 2, 3, 4, 5])\nprint(list(m))\n","sub_path":"4_Comprehensions/_maps.py","file_name":"_maps.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"227158305","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom random import sample\nimport numpy as np\nimport collections\nimport os\nimport pickle\nfrom scipy import special\nfrom scipy.stats import entropy\nimport itertools\nimport sys\nimport rospkg\n\nsys.path.append(os.path.join(rospkg.RosPack().get_path(\"simulators\"), \"scripts\"))\n\nfrom adaptive_assistance_sim_utils import TRUE_ACTION_TO_COMMAND, LOW_LEVEL_COMMANDS\nfrom adaptive_assistance_sim_utils import (\n AssistanceType,\n TRUE_TASK_ACTION_TO_INTERFACE_ACTION_MAP,\n TRUE_INTERFACE_ACTION_TO_TASK_ACTION_MAP,\n INTERFACE_LEVEL_ACTIONS,\n TASK_LEVEL_ACTIONS,\n INTERFACE_LEVEL_ACTIONS_TO_NUMBER_ID,\n CARTESIAN_MODE_SET_OPTIONS,\n)\n\n\nclass DiscreteMIDisambAlgo(object):\n def __init__(self, env_params, subject_id):\n\n self.env_params = env_params\n assert self.env_params is not None\n\n assert \"mdp_list\" in self.env_params\n assert \"spatial_window_half_length\" in self.env_params\n\n self.mdp_env_params = self.env_params[\"all_mdp_env_params\"]\n self.mdp_list = self.env_params[\"mdp_list\"]\n assert self.mdp_list is not None\n assert len(self.mdp_list) > 0\n\n self.subject_id = subject_id\n self.num_goals = len(self.mdp_list)\n self.SPATIAL_WINDOW_HALF_LENGTH = self.env_params[\"spatial_window_half_length\"]\n self.P_PHI_GIVEN_A = None\n self.P_PHM_GIVEN_PHI = None\n self.PHI_SPARSE_LEVEL = 0.0\n self.PHM_SPARSE_LEVEL = 0.0\n self.DEFAULT_PHI_GIVEN_A_NOISE = 0.1\n self.DEFAULT_PHM_GIVEN_PHI_NOISE = 0.1\n\n self.num_sample_trajectories = self.env_params.get(\"num_sample_trajectories\", 250)\n self.mode_set_type = self.env_params[\"mode_set_type\"]\n self.robot_type = self.env_params[\"robot_type\"]\n self.mode_set = CARTESIAN_MODE_SET_OPTIONS[self.robot_type][self.mode_set_type]\n self.num_modes = len(self.mode_set)\n self.num_discrete_orientations = self.mdp_env_params[\"num_discrete_orientations\"]\n\n self.num_modes = self.env_params.get(\"num_modes\", 3)\n self.kl_coeff = self.env_params.get(\"kl_coeff\", 0.8)\n self.dist_coeff = self.env_params.get(\"dist_coeff\", 0.2)\n\n self.avg_mi_for_valid_states = collections.OrderedDict()\n self.avg_dist_for_valid_states_from_goals = collections.OrderedDict()\n self.avg_total_reward_for_valid_states = collections.OrderedDict()\n\n self.distribution_directory_path = os.path.join(\n os.path.dirname(os.path.dirname(__file__)), \"se2_personalized_distributions\"\n )\n # unify the initialization of these distribution between different classes\n # init all distributions from file\n if os.path.exists(os.path.join(self.distribution_directory_path, str(self.subject_id) + \"_p_phi_given_a.pkl\")):\n print(\"LOADING PERSONALIZED P_PHI_GIVEN_A\")\n with open(\n os.path.join(self.distribution_directory_path, str(self.subject_id) + \"_p_phi_given_a.pkl\"), \"rb\"\n ) as fp:\n self.P_PHI_GIVEN_A = pickle.load(fp)\n else:\n self.P_PHI_GIVEN_A = collections.OrderedDict()\n self.init_P_PHI_GIVEN_A()\n\n if os.path.exists(\n os.path.join(self.distribution_directory_path, str(self.subject_id) + \"_p_phm_given_phi.pkl\")\n ):\n print(\"LOADING PERSONALIZED P_PHM_GIVEN_PHI\")\n with open(\n os.path.join(self.distribution_directory_path, str(self.subject_id) + \"_p_phm_given_phi.pkl\"), \"rb\"\n ) as fp:\n self.P_PHM_GIVEN_PHI = pickle.load(fp)\n else:\n self.P_PHM_GIVEN_PHI = collections.OrderedDict()\n self.init_P_PHM_GIVEN_PHI()\n\n print(\"Finished initializing DISAMB CLASS\")\n\n def get_local_disamb_state(self, prior, current_state):\n # compute window around current_state\n states_in_local_spatial_window = self._compute_spatial_window_around_current_state(current_state)\n print(states_in_local_spatial_window)\n print(len(states_in_local_spatial_window))\n # # perform mi computation for all states in spatial window\n self._compute_mi(prior, states_in_local_spatial_window)\n # # pick argmax among this list\n max_disamb_state = self._max_disambiguating_state()\n return max_disamb_state\n\n def _max_disambiguating_state(self):\n rewards = self.avg_total_reward_for_valid_states.values()\n amax = np.argmax(rewards)\n max_disamb_state = list(self.avg_total_reward_for_valid_states.keys())[amax]\n return max_disamb_state\n\n def _compute_mi(self, prior, states_for_disamb_computation=None):\n self.avg_mi_for_valid_states = collections.OrderedDict()\n self.avg_dist_for_valid_states_from_goals = collections.OrderedDict()\n self.avg_total_reward_for_valid_states = collections.OrderedDict()\n\n assert len(prior) == self.num_goals\n\n for i, vs in enumerate(states_for_disamb_computation):\n # print(\"Computing MI for \", vs)\n traj_list = collections.defaultdict(list)\n for t in range(self.num_sample_trajectories):\n sampled_goal_index = np.random.choice(self.num_goals)\n mdp_for_sampled_goal = self.mdp_list[sampled_goal_index]\n # sub optimal a_sampled\n a_sampled = mdp_for_sampled_goal.get_optimal_action(vs, return_optimal=False)\n # sampled corrupted interface level action corresponding to task-level action, could be None\n phi = self.sample_phi_given_a(a_sampled)\n # corrupted interface level action, could be None\n phm = self.sample_phm_given_phi(phi)\n if phm != \"None\":\n applied_a = TRUE_INTERFACE_ACTION_TO_TASK_ACTION_MAP[phm]\n else:\n applied_a = \"None\"\n\n next_state = mdp_for_sampled_goal.get_next_state_from_state_action(vs, applied_a)\n traj_tuple = (vs, a_sampled, phi, phm, applied_a, next_state)\n traj_list[sampled_goal_index].append(traj_tuple)\n\n p_phm_g_s0 = collections.defaultdict(list) # p(phm | g, s0)\n for g in traj_list.keys():\n for traj_g in traj_list[g]:\n (vs, a_sampled, phi, phm, applied_a, next_state) = traj_g\n p_phm_g_s0[g].append(INTERFACE_LEVEL_ACTIONS_TO_NUMBER_ID[phm])\n\n # p(phm|s). is a list instead of defaultdict(list) because all actions are just combinaed\n p_phm_s0 = []\n for g in p_phm_g_s0.keys():\n p_phm_s0.extend(p_phm_g_s0[g])\n\n ph_actions_ids = INTERFACE_LEVEL_ACTIONS_TO_NUMBER_ID.values()\n\n # histogram\n p_phm_s0_hist = collections.Counter(p_phm_s0)\n # to make sure that all interface level actions are present in the histogram\n for ph_action_id in ph_actions_ids:\n if ph_action_id not in p_phm_s0_hist.keys():\n p_phm_s0_hist[ph_action_id] = 0\n\n p_phm_s = np.array(p_phm_s0_hist.values(), dtype=np.float32)\n p_phm_s = p_phm_s / np.sum(p_phm_s)\n kl_list = []\n for g in p_phm_g_s0.keys():\n p_phm_g_s_hist = collections.Counter(p_phm_g_s0[g])\n for ph_action_id in ph_actions_ids:\n if ph_action_id not in p_phm_g_s_hist.keys():\n p_phm_g_s_hist[ph_action_id] = 0\n\n assert len(p_phm_g_s_hist) == len(p_phm_s)\n p_phm_g_s = np.array(p_phm_g_s_hist.values(), dtype=np.float32)\n p_phm_g_s = p_phm_g_s / np.sum(p_phm_g_s)\n kl = np.sum(special.rel_entr(p_phm_g_s, p_phm_s))\n kl_list.append(kl)\n\n self.avg_mi_for_valid_states[vs] = np.mean(kl_list) # averaged over goals.\n self.avg_total_reward_for_valid_states[vs] = self.kl_coeff * (self.avg_mi_for_valid_states[vs])\n # normalized to grid dimensions\n # avg_dist_of_vs_from_goals = np.mean(\n # np.linalg.norm(\n # (np.array(self.mdp_env_params[\"all_goals\"]) - np.array(vs[:2]))\n # / np.array([GRID_WIDTH, GRID_HEIGHT], dtype=np.float32),\n # axis=1,\n # )\n # )\n\n def _compute_spatial_window_around_current_state(self, current_state):\n current_grid_loc = np.array(current_state[0:2]) # (x,y)\n states_in_local_spatial_window = []\n current_orientation = current_state[2]\n\n # Add todo to ensure that self.mdp list is not None\n all_state_coords = self.mdp_list[0].get_all_state_coords()\n window_coordinates = itertools.product(\n range(-self.SPATIAL_WINDOW_HALF_LENGTH + 1, self.SPATIAL_WINDOW_HALF_LENGTH),\n range(-self.SPATIAL_WINDOW_HALF_LENGTH + 1, self.SPATIAL_WINDOW_HALF_LENGTH),\n )\n for wc in window_coordinates:\n vs = current_grid_loc + np.array(wc) # 2d grid loc\n for mode in range(self.num_modes): #\n vs_mode = (vs[0], vs[1], current_orientation, mode + 1)\n if vs_mode in all_state_coords:\n states_in_local_spatial_window.append(vs_mode)\n\n return states_in_local_spatial_window\n\n def sample_phi_given_a(self, a): # sample from p(phii|a)\n d = np.random.rand()\n\n if d < self.PHI_SPARSE_LEVEL:\n phi = \"None\"\n else:\n p_vector = self.P_PHI_GIVEN_A[a].values() # list of probabilities for phii\n # sample from the multinomial distribution with distribution p_vector\n phi_index_vector = np.random.multinomial(1, p_vector)\n phi_index = np.nonzero(phi_index_vector)[0][\n 0\n ] # grab the index of the index_vector which had a nonzero entry\n phi = self.P_PHI_GIVEN_A[a].keys()[phi_index] # retrieve phii using the phi_index\n # will be not None\n\n return phi\n\n def sample_phm_given_phi(self, phi): # sample from p(phm|phi)\n d = np.random.rand()\n if phi != \"None\":\n if d < self.PHM_SPARSE_LEVEL:\n phm = \"None\"\n else:\n p_vector = self.P_PHM_GIVEN_PHI[phi].values() # list of probabilities for phm given phi\n phm_index_vector = np.random.multinomial(1, p_vector) # sample from the multinomial distribution\n # grab the index of the index_vector which had a nonzero entry\n phm_index = np.nonzero(phm_index_vector)[0][0]\n phm = self.P_PHM_GIVEN_PHI[phi].keys()[phm_index] # retrieve phm\n else:\n print(\"Sampled phi is None, therefore phm is None\")\n phm = \"None\"\n\n return phm\n\n # TODO consolidate the following two functions so that both goal inference and\n # goal disamb both have the same set of information regarding interface noise\n def init_P_PHI_GIVEN_A(self):\n # only to be done at the beginning of a session for a subject. No updating between trials\n self.P_PHI_GIVEN_A = collections.OrderedDict()\n for k in TRUE_TASK_ACTION_TO_INTERFACE_ACTION_MAP.keys(): # task level action\n self.P_PHI_GIVEN_A[k] = collections.OrderedDict()\n for u in INTERFACE_LEVEL_ACTIONS:\n if u == TRUE_TASK_ACTION_TO_INTERFACE_ACTION_MAP[k]:\n # try to weight the true command more for realistic purposes. Can be offset by using a high PHI_GIVEN_A_NOISE\n self.P_PHI_GIVEN_A[k][u] = 1.0\n else:\n self.P_PHI_GIVEN_A[k][u] = 0.0\n\n delta_dist = np.array(list(self.P_PHI_GIVEN_A[k].values()))\n uniform_dist = (1.0 / len(INTERFACE_LEVEL_ACTIONS)) * np.ones(len(INTERFACE_LEVEL_ACTIONS))\n blended_dist = (\n 1 - self.DEFAULT_PHI_GIVEN_A_NOISE\n ) * delta_dist + self.DEFAULT_PHI_GIVEN_A_NOISE * uniform_dist # np.array\n for index, u in enumerate(INTERFACE_LEVEL_ACTIONS):\n self.P_PHI_GIVEN_A[k][u] = blended_dist[index]\n\n def init_P_PHM_GIVEN_PHI(self):\n \"\"\"\n Generates a random p(um|ui). key = ui, subkey = um\n \"\"\"\n self.P_PHM_GIVEN_PHI = collections.OrderedDict()\n for i in INTERFACE_LEVEL_ACTIONS: # ui\n self.P_PHM_GIVEN_PHI[i] = collections.OrderedDict()\n for j in INTERFACE_LEVEL_ACTIONS: # um\n if i == j:\n # try to weight the true command more for realistic purposes. Can be offset by using a high UM_GIVEN_UI_NOISE\n self.P_PHM_GIVEN_PHI[i][j] = 1.0\n else:\n # P_PHM_GIVEN_PHI[i][j] = np.random.random()*UM_GIVEN_UI_NOISE#IF UM_GIVEN_UI_NOISE is 0, then the p(um|ui) is a deterministic mapping\n self.P_PHM_GIVEN_PHI[i][j] = 0.0\n\n delta_dist = np.array(list(self.P_PHM_GIVEN_PHI[i].values()))\n uniform_dist = (1.0 / len(INTERFACE_LEVEL_ACTIONS)) * np.ones(len(INTERFACE_LEVEL_ACTIONS))\n blended_dist = (\n 1 - self.DEFAULT_PHM_GIVEN_PHI_NOISE\n ) * delta_dist + self.DEFAULT_PHM_GIVEN_PHI_NOISE * uniform_dist # np.array\n for index, j in enumerate(INTERFACE_LEVEL_ACTIONS):\n self.P_PHM_GIVEN_PHI[i][j] = blended_dist[index]\n","sub_path":"src/disamb_algo/src/disamb_algo/discrete_mi_disamb_algo.py","file_name":"discrete_mi_disamb_algo.py","file_ext":"py","file_size_in_byte":13438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"187621721","text":"from sys import stdin\n\n\ndelta = [(1,0), (0,1), (1,1), (-1,0), (0,-1), (-1,-1), (1,-1), (-1,1)]\n\n\ndef dfs(grid, visited, ini):\n global delta\n visited[ini[0]][ini[1]], stack, ans = True, [(ini[0],ini[1])], 1\n\n while len(stack) != 0:\n i, j = stack.pop()\n\n for deltaI, deltaJ in delta:\n di, dj = i+deltaI, j+deltaJ \n if(0 <= di < len(grid) and 0 <= dj < len(grid[0]) and not visited[di][dj] and grid[di][dj] == '1'):\n ans += 1\n stack.append((di,dj))\n visited[di][dj] = True\n return ans\n\n \n\ndef main():\n global delta\n cases, first = int(stdin.readline()), True; stdin.readline()\n\n for case in range(cases): \n if(not first): print(\"\")\n line, grid, first = stdin.readline()[:-1], list(), False\n\n while(line != \"\"):\n grid.append(line)\n line = stdin.readline()[:-1]\n visited = [[False for _ in range(len(grid[0]))] for _ in range(len(grid))]\n\n i, maximum = 0, 0\n while(i < len(grid)):\n j = 0\n while(j < len(grid[0])):\n if(not visited[i][j] and grid[i][j] == '1'):\n visited[i][j] = True\n maximum = max(maximum, dfs(grid, visited, (i, j)))\n j += 1 \n i += 1\n\n print(maximum)\n\nmain()","sub_path":"871.py","file_name":"871.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"134634114","text":"import sys\nimport collections\n\ndef findind(line, windowsize = 4):\n def all_unique(d):\n return len(set(d)) == len(d)\n\n window = collections.deque()\n for i in range(len(line)):\n if len(window) < windowsize:\n window.append(line[i])\n continue\n\n if all_unique(window):\n return i\n\n window.popleft()\n window.append(line[i])\n\ndef main():\n for line in sys.stdin.readlines():\n line = line.strip()\n print(findind(line))\n\nmain()\n","sub_path":"advent_of_code/2022/06/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"301300725","text":"import inspect\nfrom typing import Any\nfrom typing import List\n\nfrom django.http.response import HttpResponseBase\nfrom drf_yasg import openapi\n\nfrom winter.core import ComponentMethod\nfrom winter.web.default_response_status import get_default_response_status\nfrom winter.web.exceptions import MethodExceptionsManager\nfrom winter.web.exceptions import exception_handlers_registry\nfrom winter.web.routing import Route\nfrom .route_parameters_inspector import get_route_parameters_inspectors\nfrom .type_inspection import InspectorNotFound\nfrom .type_inspection import inspect_type\n\n\nclass CanNotInspectReturnType(Exception):\n\n def __init__(\n self,\n method: ComponentMethod,\n return_type: Any,\n message: str,\n ):\n self._return_type = return_type\n self._message = message\n self._method = method\n\n def __repr__(self):\n return f'{self.__class__.__name__}({self})'\n\n def __str__(self):\n component_cls = self._method.component.component_cls\n method_path = f'{component_cls.__module__}.{self._method.full_name}'\n return f'{method_path}: -> {self._return_type}: {self._message}'\n\n\ndef build_responses_schemas(route: Route):\n responses = {}\n http_method = route.http_method\n response_status = str(get_default_response_status(http_method, route.method))\n\n responses[response_status] = build_response_schema(route.method)\n method_exceptions_manager = MethodExceptionsManager(route.method)\n\n for exception_cls in method_exceptions_manager.declared_exception_classes:\n handler = method_exceptions_manager.get_handler(exception_cls)\n if handler is None:\n handler = exception_handlers_registry.get_handler(exception_cls)\n if handler is None:\n continue\n handle_method = ComponentMethod.get_or_create(handler.__class__.handle)\n response_status = str(get_default_response_status(http_method, handle_method))\n responses[response_status] = build_response_schema(handle_method)\n return responses\n\n\ndef build_response_schema(method: ComponentMethod):\n return_value_type = method.return_value_type\n\n if (\n return_value_type in (None, type(None))\n or (inspect.isclass(return_value_type) and issubclass(return_value_type, HttpResponseBase))\n ):\n return openapi.Response(description='')\n\n try:\n type_info = inspect_type(return_value_type)\n except InspectorNotFound as e:\n raise CanNotInspectReturnType(method, return_value_type, str(e))\n return type_info.get_openapi_schema(output=True)\n\n\ndef get_route_parameters(route: Route) -> List['openapi.Parameter']:\n parameters = []\n for inspector in get_route_parameters_inspectors():\n parameters += inspector.inspect_parameters(route)\n return parameters\n","sub_path":"winter_openapi/generation.py","file_name":"generation.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"86226179","text":"\"\"\"\nCopyright 2020 Christopher Andrews\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\nimport csv\nfrom urllib.parse import urlparse\n\nclass CSVLoader:\n def __init__(self, input_filepath, has_header, uri_column, filename_column):\n \"\"\" Loader class that helps to load CSV files\n :param input_filepath: Path of CSV file\n :param has_header: Whether the CSV file has a header or not\n :param uri_column: The column name or index of the column that contains the URIs\n :param filename_column: The column name or index of the column that contains the filenames, can be None\n \"\"\"\n\n # User must pass a valid csv file as the input_filepath argument as type str\n if input_filepath != \"\" and input_filepath != None and isinstance(input_filepath, str):\n if os.path.isfile(input_filepath):\n self.input_filepath = input_filepath\n else:\n raise FileNotFoundError(\"input_filepath %s is not a file!\" % input_filepath)\n elif input_filepath.lower().endswith(\".csv\") != True:\n raise Exception(\"input_filepath must be a valid *.csv file\")\n else: \n raise TypeError(\"input_filepath must be of type (str) and cannot be empty or None\")\n\n # Check if file has a header or not and represent with bool\n # TODO: Use csv.Sniffer().has_header as a fallback method if chosen\n if has_header is True or has_header is False:\n self.has_header = has_header\n else:\n raise TypeError(\"has_header must be of type (bool)\")\n\n # Allow users to pass the name of the column or the index of the column that contains the uri list, else raise exception\n # TODO: Add regex for detecting valid URI\n if uri_column != \"\" and uri_column != None and isinstance(uri_column, str):\n self.uri_column = self._translate_column_to_index(uri_column)\n elif isinstance(uri_column, int):\n self.uri_column = uri_column\n else:\n raise TypeError(\"uri_column must be either column name of type (str) or index of column of type (int)\")\n\n # Check if filename column is given, if empty or None, then assume that the filename is included in the uri\n if filename_column != \"\" and filename_column != None and isinstance(filename_column, str):\n self.filename_column = self._translate_column_to_index(filename_column)\n elif isinstance(filename_column, int):\n self.filename_column = filename_column\n else:\n self.filename_column = None\n\n # Create an empty dict\n self.uri_dict = {}\n\n def _translate_column_to_index(self, column_name):\n \"\"\" Takes a column name (str) and attempts to find the index (int), if not found, raise exception\n :param column_name: The column header name as a (str) for example: my_url_row\n :return: int if index was found\n \"\"\"\n if self.has_header == True:\n with open(self.input_filepath, 'r', encoding='utf-8') as in_file:\n reader = csv.reader(in_file)\n for i, line in enumerate(reader):\n if i < len(line):\n if column_name in line[i]:\n return i\n raise Exception(\"Could not locate filename column: %s\" % column_name)\n else:\n raise Exception(\"Cannot convert column name string to index, input_file does not have a header!\")\n\n def _set_uri_dict(self):\n \"\"\" Setter method for loading and parsing the CSV File\n \"\"\"\n with open(self.input_filepath, 'r', encoding='utf-8') as in_file:\n uri_dict_temp = {}\n reader = csv.reader(in_file)\n\n # If the filename is not specified, use netloc as filename + index of iteration\n if self.filename_column == None:\n # Exclude header if has_header is True\n if self.has_header == True:\n next(reader)\n for i, line in enumerate(reader):\n parsed_uri = urlparse(line[self.uri_column]).netloc.replace(\".\", \"_\").replace(':', \"_\")\n uri_dict_temp[parsed_uri + \"_%i\" %i] = line[self.uri_column]\n else:\n for i, line in enumerate(reader):\n parsed_uri = urlparse(line[self.uri_column]).netloc.replace(\".\", \"_\").replace(':', \"_\")\n uri_dict_temp[parsed_uri + \"_%i\" %i] = line[self.uri_column]\n\n # Exclude header if has_header is True\n elif isinstance(self.filename_column, int):\n if self.has_header == True:\n next(reader)\n for line in reader:\n uri_dict_temp[line[self.filename_column]] = line[self.uri_column]\n else:\n for line in reader:\n uri_dict_temp[line[self.filename_column]] = line[self.uri_column]\n \n # Replace dict with temp dict\n self.uri_dict = uri_dict_temp \n\n # Get the uri dict\n def get_uri_dict(self):\n self._set_uri_dict()\n return self.uri_dict","sub_path":"pywebcapture/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":5713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"115582632","text":"import collections\nimport re\n\n\ndef main():\n with open(\"input.txt\") as fh:\n data = fh.read()\n\n fabric = collections.defaultdict(int)\n pattern = re.compile(r\"#\\d+\\s@\\s(\\d+),(\\d+):\\s(\\d+)x(\\d+)\")\n for (xpos, ypos, w, h) in map(lambda i: map(int, i), pattern.findall(data)):\n for y in range(ypos, ypos + h):\n for x in range(xpos, xpos + w):\n fabric[(x, y)] += 1\n print(len(list(filter(lambda claim: claim[1] >= 2, fabric.items()))))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"project03/sliceit.py","file_name":"sliceit.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"537203833","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n进程和线程的操作方法基本是一致的\n\n进程之间的数据是相互独立的\n要想进程之间的数据进行共享,就要使用进程中特殊的队列----跟模块queue中的队列不一样,这里说的队列是进程特殊化的队列\n\"\"\"\n\n\"\"\"\n#证明进程之间数据是相互独立的\nfrom multiprocessing import Process\n\ndef f(arg,l):\n l.append(arg)\n print(arg,l)\n\n\nif __name__ == '__main__':\n li = [] # 进程之间数据是相互独立的,所以每个进程向列表中添加元素不会影响其他进程的列表\n for i in range(10):\n p = Process(target=f,args=(i,li))\n p.start()\n print(li)\n\"\"\"\n\n\"\"\"\n# 进程之间数据共享方法一\nfrom multiprocessing import Process\nfrom multiprocessing import queues\nimport multiprocessing\n\ndef f(arg,q):\n q.put(arg)\n print(arg,'个数:',q.qsize())\n\n\nif __name__ == '__main__':\n q = queues.Queue(20,ctx=multiprocessing) # 必须传递一个ctx参数,用来调用进程锁----进程模块调用锁的类是multiprocessing(multiprocessing.Lock())\n for i in range(10):\n p = Process(target=f,args=(i,q))\n p.start()\n\n\n\n\n# 进程之间数据共享方法二:使用数组----不常用\nfrom multiprocessing import Process\nfrom multiprocessing import Array\nimport multiprocessing\n\ndef f(arg,arr):\n arr[arg] += arg + 100\n for item in arr:\n print(item)\n print('======================')\n\n\nif __name__ == '__main__':\n arr = Array('i',10)\n for i in range(10):\n p = Process(target=f,args=(i,arr))\n p.start()\n\n\"\"\"\n# 进程之间数据共享方法三:特殊的字典\n#字典是在主进程中创建的,当主进程指向完毕,会断开和子进程的连接,这时子进程就找不到主进程中定义的字典,就会报错\nfrom multiprocessing import Process\nfrom multiprocessing import Manager\n\n\ndef f(arg,dic):\n dic[arg] = arg + 100\n print(dic.values())\n\n\nif __name__ == '__main__':\n obj = Manager()\n dic = obj.dict()\n for i in range(10):\n p = Process(target=f,args=(i,dic))\n p.start()\n p.join()","sub_path":"day11/10-进程.py","file_name":"10-进程.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"430858330","text":"# \"get_find\" 함수를 작성하세요.\n# 문자와 문자열이 주어졌을때, \"get_find\" 함수는 주어진 문자열에서 함께 주어진 문자가 나타나는 첫번째 위치를 반환합니다.\n# Notes:\n# 문자열의 첫번째 문자는 인덱스 값 0 을 가집니다.\n# 만약 문자열에 해당 문자가 여러번 나타나면, 첫번째로 나타나는 위치를 반환해야 합니다.\n# 만약 문자가 문자열에 존재하지 않는다면, -1 을 반환해야 합니다.\n# find 함수를 사용하지 마세요.\n# output = get_find('a', 'I am a hacker')\n# print(output) # --> 2\n\n\ndef get_find(x, s):\n for i in range(len(s)):\n if s[i] == x:\n return i \n return -1\n \n\nprint(get_find('a', 'I am a hacker'))\nprint(get_find('a','la')) \nprint(get_find('c','abrakadabra'))\n\n\n#주어진 리스트안에 있는 단어중 가장 긴 단어를 찾을수 있도록 함수를 완성해주세요.\ndef find_longest_word(s):\n return max(s, key=len)\n\nprint(find_longest_word([\"PHP\", \"Exercises\", \"Backend\"]))","sub_path":"replit quiz1.py","file_name":"replit quiz1.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"147727345","text":"import sys\n\nimport pygame as pg\n\nimport sounds\n\nsetBtn = 1\ncolor = 'grey'\n\n\ndef checkEvents(setting, screen, stats, sb, playBtn, greyBtn, redBtn, blueBtn, quitBtn, menuBtn, sel, ship, aliens,\n bullets, eBullets):\n \"\"\"Respond to keypresses and mouse events.\"\"\"\n global setBtn, color\n for event in pg.event.get():\n # Check for quit event\n if event.type == pg.QUIT:\n sys.exit()\n # Check for key down has been pressed\n elif event.type == pg.KEYDOWN:\n # Check if down, up, enter, esc is pressed\n if event.key == pg.K_DOWN:\n if setBtn < 5:\n sounds.control_menu.play()\n setBtn += 1\n sel.rect.y += 50\n if event.key == pg.K_UP:\n if setBtn > 1:\n sounds.control_menu.play()\n setBtn -= 1\n sel.rect.y -= 50\n if event.key == pg.K_RETURN:\n if setBtn == 1:\n # default mode\n sounds.start_game.play()\n color = 'grey'\n ship.image = pg.image.load(checkColor())\n stats.mainMenu = False\n stats.mainGame = True\n stats.playMenu = False\n stats.twoPlayer = False\n stats.mainAbout = False\n setBtn = 1\n sel.rect.centery = playBtn.rect.centery\n elif setBtn == 2:\n sounds.start_game.play()\n color = 'red'\n ship.image = pg.image.load(checkColor())\n stats.mainMenu = False\n stats.mainGame = True\n stats.playMenu = False\n stats.twoPlayer = False\n stats.mainAbout = False\n setBtn = 1\n sel.rect.centery = playBtn.rect.centery\n elif setBtn == 3:\n sounds.start_game.play()\n color = 'blue'\n ship.image = pg.image.load(checkColor())\n stats.mainMenu = False\n stats.mainGame = True\n stats.playMenu = False\n stats.twoPlayer = False\n stats.mainAbout = False\n setBtn = 1\n sel.rect.centery = playBtn.rect.centery\n elif setBtn == 4:\n # menu btn\n sounds.select_menu.play()\n stats.mainMenu = True\n stats.mainGame = False\n stats.playMenu = False\n stats.twoPlayer = False\n stats.mainAbout = False\n setBtn = 1\n sel.rect.centery = playBtn.rect.centery\n elif setBtn == 5:\n sys.exit()\n if event.key == pg.K_ESCAPE:\n sys.exit()\n\n\ndef drawMenu(setting, screen, sb, greyBtn, redBtn, blueBtn, menuBtn, quitBtn, sel):\n \"\"\"Draw the menu and all of its elements\"\"\"\n global image, rect\n screen.fill(setting.bgColor)\n menuBtn.rect.y = 350\n menuBtn.msgImageRect.y = 350\n quitBtn.rect.y = 400\n quitBtn.msgImageRect.y = 400\n menuBtn.drawBtn()\n quitBtn.drawBtn()\n greyBtn.drawBtn()\n redBtn.drawBtn()\n blueBtn.drawBtn()\n sel.blitme()\n pg.display.flip()\n\n\ndef checkColor():\n return 'gfx/player_' + color + '.bmp'\n","sub_path":"playMenu.py","file_name":"playMenu.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"575196954","text":"from watson_developer_cloud import TextToSpeechV1\nfrom credential.watsonC import ApiKeyTTS as watsonApiKey\nfrom robots.state import saveContent,loadContent\n\ndef robotVoice():\n text_to_speech = TextToSpeechV1(\n iam_apikey= watsonApiKey['iam_apikey'],\n url=watsonApiKey['url']\n )\n def sentencesToVoice(sentence,filename):\n output = './content/{}-audio.wav'.format(filename)\n try:\n with open(output, 'wb') as audio_file:\n audio_file.write(\n text_to_speech.synthesize(\n sentence,\n voice='en-US_AllisonVoice',\n accept='audio/wav' \n ).get_result().content)\n return True\n except:\n return False\n \n \n def fetchVoicesOfAllSentences(content):\n print('> Fetching voices of all sentences...')\n for i,item in enumerate(content['sentences']):\n content['sentences'][i]['audio'] = sentencesToVoice(content['sentences'][i]['text'],i)\n print('> Fetch voices of all sentences concluded')\n return content\n \n content = loadContent()\n fetchVoicesOfAllSentences(content)\n saveContent(content)","sub_path":"robots/voice.py","file_name":"voice.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"168925400","text":"from pandocfilters import toJSONFilter, Header, attributes\n\ndef cscify(key, value, format, meta):\n # image location depends on the theme\n try:\n theme = meta['theme']['c'][0]['c']\n except:\n theme = 'default'\n template = u'theme/{0}/img/%s.png'.format(theme)\n # markdown: special class names trigger loading of a data background image\n # and replacement with a corresponding generic class name\n if key == 'Header' and value[0] == 1:\n if 'data-background' not in [x[0] for x in value[1][2]]:\n for key in ['title-en', 'title-fi', 'author', 'section']:\n if key in value[1][1]:\n value[1][1].remove(key)\n value[1][2].append([u'data-background', template % key])\n if key == 'author':\n value[1][1].append(u'author-slide')\n elif key == 'section':\n value[1][1].append(u'section-slide')\n else:\n value[1][1].append(u'title-slide')\n return Header(value[0], value[1], value[2])\n # reST: special class name in a container Div triggers the same as above,\n # but only the modified Header is returned\n elif key == 'Div' and value[1][0]['t'] == 'Header':\n for key in ['title-en', 'title-fi', 'author', 'section']:\n if key in value[0][1]:\n header = value[1][0]['c']\n header[1][2].append([u'data-background', template % key])\n if key == 'author':\n header[1][1].append(u'author-slide')\n elif key == 'section':\n header[1][1].append(u'section-slide')\n else:\n header[1][1].append(u'title-slide')\n return Header(header[0], header[1], header[2])\n\nif __name__ == '__main__':\n toJSONFilter(cscify)\n","sub_path":"filter/background-image.py","file_name":"background-image.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"27505352","text":"#!D:\\Python27\n# coding:utf-8\n\nimport os\nimport logging\n\n\"\"\"\n日志级别等级: CRITICAL > ERROR > WARNING > INFO > DEBUG > NOTSET\n\nlogging模块提供logger,handler,filter,formatter。\nlogger:提供日志接口。logger最长用的操作有两类:配置和发送日志消息。\n 可以通过logging.getLogger(name)获取logger对象,如果不指定name则返回root对象,\n 多次使用相同的name调用getLogger方法返回同一个logger对象。\nhandler:将日志记录(log record)发送到合适的目的地(destination),比如文件,socket等。\n 一个logger对象可以通过addHandler方法添加0到多个handler,\n 每个handler又可以定义不同日志级别, 以实现日志分级过滤显示。\nfilter:提供一种优雅的方式决定一个日志记录是否发送到handler。\nformatter:指定日志记录输出的具体格式。\n formatter,定义log信息的顺序,结构和内容,如‘[%(asctime)s] [%(levelname)s] %(message)s'\n %(name)s Logger的名字\n %(levelname)s 文本形式的日志级别\n %(message)s 用户输出的消息\n %(asctime)s 字符串形式的当前时间。默认格式是 “2003-07-08 16:49:45,896”。逗号后面的是毫秒\n %(levelno)s 数字形式的日志级别\n %(pathname)s 调用日志输出函数的模块的完整路径名\n %(filename)s 调用日志输出函数的模块的文件名\n %(module)s 调用日志输出函数的模块名\n %(funcName)s 调用日志输出函数的函数名\n %(lineno)d 调用日志输出函数的语句所在的代码行\n %(created)f 当前时间,用UNIX标准的表示时间的浮点数表示\n %(relativeCreated)d 输出日志信息时的,自Logger创建以 来的毫秒数\n %(thread)d 线程ID。可能没有\n %(threadName)s 线程名。可能没有\n %(process)d 进程ID。可能没有\n \n有多中可用的Handler:\nlogging.StreamHandler 可以向类似与sys.stdout或者sys.stderr的任何文件对象(file object)输出信息\nlogging.FileHandler 用于向一个文件输出日志信息\nlogging.handlers.RotatingFileHandler 类似于上面的FileHandler,但是它可以管理文件大小。当文件达到一定大小之后,它会自动将当前日志文件改名,然后创建一个新的同名日志文件继续输出\nlogging.handlers.TimedRotatingFileHandler 和RotatingFileHandler类似,不过,它没有通过判断文件大小来决定何时重新创建日志文件,而是间隔一定时间就自动创建新的日志文件\nlogging.handlers.SocketHandler 使用TCP协议,将日志信息发送到网络。\nlogging.handlers.DatagramHandler 使用UDP协议,将日志信息发送到网络。\nlogging.handlers.SysLogHandler 日志输出到syslog\nlogging.handlers.NTEventLogHandler 远程输出日志到Windows NT/2000/XP的事件日志 \nlogging.handlers.SMTPHandler 远程输出日志到邮件地址\nlogging.handlers.MemoryHandler 日志输出到内存中的制定buffer\nlogging.handlers.HTTPHandler 通过\"GET\"或\"POST\"远程输出到HTTP服务器\n各个Handler的具体用法可查看参考书册:\nhttps://docs.python.org/2/library/logging.handlers.html#module-logging.handlers\n\"\"\"\n\n\n# 往控制台和文件输出日志,且输出不同级别的日志\ndef log_console_file():\n logger = logging.getLogger(\"log_console_file\")\n logger.setLevel(logging.DEBUG)\n\n # 建立一个filehandler来把日志记录在文件里,级别为debug以上\n fh = logging.FileHandler('debug.log', mode='w')\n fh.setLevel(logging.DEBUG)\n\n # 建立一个streamhandler来把日志打在CMD窗口上,级别为error以上\n ch = logging.StreamHandler()\n ch.setLevel(logging.ERROR)\n\n formatter = logging.Formatter(fmt=\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s : %(message)s\",\n datefmt='%Y-%m-%d %H:%M:%S')\n\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n # 书写日志\n logger.debug(\"debug message\")\n logger.info(\"info message\")\n logger.warn(\"warn message\")\n logger.error(\"error message\")\n logger.critical(\"critical message\")\n\n\nclass StudyLogger:\n def __init__(self, log_file):\n self.logger = logging.getLogger(log_file)\n self.logger.setLevel(logging.DEBUG)\n\n def format_log_info(self, log_file, console_level=logging.ERROR, file_level=logging.DEBUG):\n formatter = logging.Formatter(fmt=\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s : %(message)s\",\n datefmt='%Y-%m-%d %H:%M:%S')\n # 建立一个filehandler来把日志记录在文件里,级别为debug以上\n fh = logging.FileHandler(log_file, mode='w')\n fh.setLevel(logging.DEBUG)\n\n # 建立一个streamhandler来把日志打在CMD窗口上,级别为error以上\n ch = logging.StreamHandler()\n ch.setLevel(logging.ERROR)\n\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n self.logger.addHandler(fh)\n self.logger.addHandler(ch)\n\n def debug(self, message):\n self.logger.debug(message)\n\n def info(self, message):\n self.logger.info(message)\n\n def warn(self, message):\n self.logger.warning(message)\n\n def error(self, message):\n self.logger.error(message)\n\n def critical(self, message):\n self.logger.critical(message)\n\n\nif __name__ == \"__main__\":\n # basic use log name root\n logging.basicConfig(level=logging.DEBUG,\n format=\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s : %(message)s\",\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='/tmp/test.log',\n filemode='w')\n\n logging.debug('debug message')\n logging.info('info message')\n logging.warning('warning message')\n logging.error('error message')\n logging.critical('critical message')\n\n # log_console_file()\n s_log = StudyLogger('debug.log')\n s_log.format_log_info('debug.log')\n\n s_log.debug('一个debug信息')\n s_log.info('一个info信息')\n s_log.warn('一个warning信息')\n s_log.error('一个error信息')\n s_log.critical('一个致命critical信息')\n\n s_log2 = StudyLogger('debug.log')\n s_log2.info('info信息')\n s_log2.warn('warning信息')\n s_log2.critical('critical信息')\n s_log2.critical('critical2信息')\n\n # 创建一个logger\n logger = logging.getLogger()\n\n logger1 = logging.getLogger('mylogger')\n logger1.setLevel(logging.DEBUG)\n\n logger2 = logging.getLogger('mylogger')\n logger2.setLevel(logging.INFO)\n\n logger3 = logging.getLogger('mylogger.child1')\n logger3.setLevel(logging.WARNING)\n\n logger4 = logging.getLogger('mylogger.child1.child2')\n logger4.setLevel(logging.DEBUG)\n\n logger5 = logging.getLogger('mylogger.child1.child2.child3')\n logger5.setLevel(logging.DEBUG)\n\n # 创建一个handler,用于写入日志文件\n fh = logging.FileHandler('/tmp/test.log')\n\n # 再创建一个handler,用于输出到控制台\n ch = logging.StreamHandler()\n\n # 定义handler的输出格式formatter\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n # 定义一个filter\n # filter = logging.Filter('mylogger.child1.child2')\n # fh.addFilter(filter)\n\n # 给logger添加handler\n # logger.addFilter(filter)\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n # logger1.addFilter(filter)\n logger1.addHandler(fh)\n logger1.addHandler(ch)\n\n logger2.addHandler(fh)\n logger2.addHandler(ch)\n\n # logger3.addFilter(filter)\n logger3.addHandler(fh)\n logger3.addHandler(ch)\n\n # logger4.addFilter(filter)\n logger4.addHandler(fh)\n logger4.addHandler(ch)\n\n logger5.addHandler(fh)\n logger5.addHandler(ch)\n\n # 记录一条日志\n logger.debug('logger debug message')\n logger.info('logger info message')\n logger.warning('logger warning message')\n logger.error('logger error message')\n logger.critical('logger critical message')\n\n logger1.debug('logger1 debug message')\n logger1.info('logger1 info message')\n logger1.warning('logger1 warning message')\n logger1.error('logger1 error message')\n logger1.critical('logger1 critical message')\n\n logger2.debug('logger2 debug message')\n logger2.info('logger2 info message')\n logger2.warning('logger2 warning message')\n logger2.error('logger2 error message')\n logger2.critical('logger2 critical message')\n\n logger3.debug('logger3 debug message')\n logger3.info('logger3 info message')\n logger3.warning('logger3 warning message')\n logger3.error('logger3 error message')\n logger3.critical('logger3 critical message')\n\n logger4.debug('logger4 debug message')\n logger4.info('logger4 info message')\n logger4.warning('logger4 warning message')\n logger4.error('logger4 error message')\n logger4.critical('logger4 critical message')\n\n logger5.debug('logger5 debug message')\n logger5.info('logger5 info message')\n logger5.warning('logger5 warning message')\n logger5.error('logger5 error message')\n logger5.critical('logger5 critical message')\n","sub_path":"Code/built_in/Log/study_logging.py","file_name":"study_logging.py","file_ext":"py","file_size_in_byte":9376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"101183775","text":"\"\"\"mycontrollerauto controller.\"\"\"\n\n# You may need to import some classes of the controller module. Ex:\n# from controller import Robot, LED, DistanceSensor\nfrom controller import DifferentialWheels, LED, DistanceSensor, Camera, LightSensor\n\n# create the Robot instance.\nrobot = DifferentialWheels()\nprint(\"Vural's Robot is running\")\ntimestep = 40\n\n\ndef searchred(leftspeed, rightspeed):\n # searching for red\n\n\n print\n 'searching red'\n # We specified the probability of situations. Belove statemens explain the black line and obstacle avoiding.\n if (right0 >= 100 and left7 >= 100 and gRight < 750 or (left7 >= 100 and gRight < 750)):\n rightspeed += 1000\n leftspeed -= 1000\n elif (right0 >= 100 and left7 >= 100 and gLeft < 750 or (left7 >= 100 and gLeft < 750)):\n rightspeed -= 1000\n leftspeed += 1000\n elif (right0 >= 100 and left7 >= 100):\n rightspeed += 1000\n leftspeed -= 1000\n # if an obstacle is on the right side, the right wheels' speed increases.\n elif (right0 >= 100 or right1 >= 100 or right2 >= 100):\n rightspeed += 1000\n leftspeed -= 1000\n\n # if an obstacle is on the right side, the left wheels' speed increases.\n elif (left7 >= 100 or left6 >= 100 or left5 >= 100):\n rightspeed -= 1000\n leftspeed += 1000\n # these ground sensors increase speed when they detect a black line.\n # we determined it as four states. Speed should be maximum of the e-puck.\n # we chose some speed limits 1500 instead of 1000 that contribute us avoiding from the line\n elif (gLeft < 600):\n rightspeed -= 1500\n leftspeed += 1500\n elif (gRight < 600):\n rightspeed += 1500\n leftspeed -= 1500\n elif (gLeft < 750 and gCentre < 600):\n rightspeed -= 1500\n leftspeed += 1500\n elif (gRight < 750 and gCentre < 600):\n\n rightspeed += 1500\n leftspeed -= 1500\n\n robot.setSpeed(leftspeed, rightspeed)\n\n\ndef searchyellow(leftspeed, rightspeed):\n print\n 'searching yellow'\n # the searching methods are similar the only thing is that this time, it searches for yellow.\n if (right0 >= 100 and left7 >= 100 and gRight < 750 or (left7 >= 100 and gRight < 750)):\n rightspeed += 1000\n leftspeed -= 1000\n elif (right0 >= 100 and left7 >= 100 and gLeft < 750 or (left7 >= 100 and gLeft < 750)):\n rightspeed -= 1000\n leftspeed += 1000\n elif (right0 >= 100 and left7 >= 100):\n rightspeed += 1000\n leftspeed -= 1000\n # if an obstacle is on the right side, the right wheels' speed increases.\n elif (right0 >= 100 or right1 >= 100 or right2 >= 100):\n rightspeed += 1000\n leftspeed -= 1000\n\n # if an obstacle is on the right side, the left wheels' speed increases.\n elif (left7 >= 100 or left6 >= 100 or left5 >= 100):\n rightspeed -= 1000\n leftspeed += 1000\n # these ground sensors increase speed when they detect a black line.\n # we determined it as four states.\n elif (gLeft < 600):\n rightspeed -= 1500\n leftspeed += 1500\n elif (gRight < 600):\n rightspeed += 1500\n leftspeed -= 1500\n elif (gLeft < 750 and gCentre < 600):\n rightspeed -= 1500\n leftspeed += 1500\n elif (gRight < 750 and gCentre < 600):\n # we chose some speed limits 1500 instead of 1000 that contribute us avoiding from the line\n rightspeed += 1500\n leftspeed -= 1500\n\n robot.setSpeed(leftspeed, rightspeed)\n\n\n# \"found functions\" lead to focus on target and the epuck only goes the target slowly untill\n# untill it is being close the target.\ndef foundtrash(k, l):\n print(\"trash is detected\")\n\n robot.setSpeed(k, l)\n # the belove condition contributes to achieve target.\n # k and l is not obligatory just making sure condition is proven.\n # When it come closes, it needs to detect its existence in order to run next step\n # Therefore, left7 and righ0 help e-puck to understand it is near the target.\n # And the target colours should be confirmed as well.\n\n if ((k >= 190 and l >= 190) and (left7 >= 140 or right0 >= 140) and (\n red[20] >= 100 and green[20] >= 100 and blue[20] <= 10)):\n robot.setSpeed(0, 0)\n led[0].set(1)\n print(\"I am next to trash\")\n\n\n# this function same as above function. It focuses the bin.\ndef foundbin(k, l):\n print(\"bin is detected\")\n robot.setSpeed(k, l)\n # same as \"foundtrash\" method.\n if ((k >= 190 and l >= 190) and (left7 >= 140 or right0 >= 140) and (\n red[20] >= 100 and green[20] <= 10 and blue[20] <= 10)):\n led[0].set(0)\n print(\" I am next to bin\")\n\n\n# enable camera.\ncamera = Camera(\"camera\")\ncamera.enable(timestep * 2)\nprint(\"Camera width = \", camera.getWidth(), \"Camera height =\", camera.getHeight())\n\n# enable LEDs\nled = [0] * 8\ncount = 0\nfor i in range(8):\n name = \"led\" + str(i)\n led[i] = LED(name)\n\nrobot.enableEncoders(timestep)\n# enable distance and ground sensors\nirLeft7 = DistanceSensor(\"ps7\")\nirLeft6 = DistanceSensor(\"ps6\")\nirLeft5 = DistanceSensor(\"ps5\")\nirRight0 = DistanceSensor(\"ps0\")\nirRight1 = DistanceSensor(\"ps1\")\nirRight2 = DistanceSensor(\"ps2\")\nirLeft7.enable(timestep)\nirLeft6.enable(timestep)\nirLeft5.enable(timestep)\nirRight0.enable(timestep)\nirRight1.enable(timestep)\nirRight2.enable(timestep)\ngsLeft = DistanceSensor(\"gs0\")\ngsCentre = DistanceSensor(\"gs1\")\ngsRight = DistanceSensor(\"gs2\")\ngsLeft.enable(timestep)\ngsCentre.enable(timestep)\ngsRight.enable(timestep)\n\n# Create an array that includes camera pixel's value\nred = [0] * 40\nblue = [0] * 40\ngreen = [0] * 40\n\nwhile robot.step(timestep) != -1:\n\n # get and set value for parameters.\n left7 = irLeft7.getValue()\n left6 = irLeft6.getValue()\n left5 = irLeft5.getValue()\n\n right0 = irRight0.getValue()\n right1 = irRight1.getValue()\n right2 = irRight2.getValue()\n\n gRight = gsRight.getValue()\n gLeft = gsLeft.getValue()\n gCentre = gsCentre.getValue()\n\n # display the components of each pixel\n image = camera.getImageArray()\n\n # get the colour component of the pixel x (0,40) y(5,6)\n for x in range(0, camera.getWidth()):\n for y in range(5, 6):\n # we fill our arrays with the colour values.\n red[x] = image[x][y][0]\n green[x] = image[x][y][1]\n blue[x] = image[x][y][2]\n\n # that illustrates arrays in the text field.\n print\n 'r=' + str(red)\n print\n 'g=' + str(green)\n print\n 'b=' + str(blue)\n\n print(\"Left Encoder=\", robot.getLeftEncoder(),\n \"Right Encoder=\", robot.getRightEncoder())\n print(\"IR Distances: Left=\", irLeft7.getValue(),\n \" Right =\", irRight0.getValue())\n print(\"Line sensors: Left=\", gsLeft.getValue(), \"Centre = \",\n gsCentre.getValue(), \"Right=\", gsRight.getValue())\n # we create 2 parameters for encoders that contribute us to random search in different periods.\n tick1 = robot.getLeftEncoder()\n tick2 = robot.getRightEncoder()\n # get value from first LED\n ledx = led[0].get()\n print(\"led\", ledx)\n # this is a random search method. It first scans the environment then searches. Also,\n # It scans every each 20000 encounter values.\n if (tick1 <= 700 and tick2 >= -700) or abs(tick1) % 20000 >= 19000:\n robot.setSpeed(100.5, -100.5)\n\n # The states according to LED situation.\n if (ledx == 0):\n\n # We only look for middle of screen's value \"20\"; therefore, we compared it [20]\n if (100 <= green[20] and red[20] >= 100 and blue[20] <= 10):\n robot.setSpeed(0, 0)\n robot.setEncoders(0, 0)\n # it resets encoders to scan again after detecting.\n k = 200\n l = 200\n foundtrash(k, l)\n\n if (ledx == 1):\n if (green[20] <= 10 and red[20] >= 100 and blue[20] <= 10):\n robot.setSpeed(0, 0)\n robot.setEncoders(0, 0)\n # it resets encoders to scan again after detecting.\n k = 200\n l = 200\n foundbin(k, l)\n # if scannning cannot find anything, the epuck starts to search by checking the LED\n # If the first Led is deactive, search for trash.\n # If the first led is active, search for bin.\n else:\n if (ledx == 0):\n\n rightspeed = 400\n leftspeed = 400\n searchyellow(leftspeed, rightspeed)\n # if led[0] is deactive, colours in the pixel of width 20 are compared with belove conditions.\n # For height value we only chose one value such as 20x5.\n # If it detects, it lockes to the yellow ball.\n # That means the desired item has been found and run the \"found function\"\n\n if (green[20] >= 120 and red[20] >= 120 and blue[20] <= 30):\n # k and l determines initial speed of locking to target.\n k = 200\n l = 200\n foundtrash(k, l)\n\n if (ledx == 1):\n\n rightspeed = 400\n leftspeed = 400\n searchred(leftspeed, rightspeed)\n\n if (green[20] <= 10 and red[20] >= 120 and blue[20] <= 10):\n # if Led[0] is active and red is detected, it uses same way above but calls \"foundbin\" instead of \"found yellow\"\n # and it lockes to the red ball (trash-can).\n k = 200\n l = 200\n foundbin(k, l)\n","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":9548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"447971114","text":"# -----------------------------------------------------------------------\n# Name: Veronica Salm\n# CCID: vsalm\n# File: DataManager.py\n#\n# Description: Reads and parses a json file, and manages the resulting\n# \t\t\t object.\n#--------------------------------------------------------------------\nimport os, sys, csv\n\nfrom naive_bayes.normalizer import normalize, tokenize\n\nROW_ID = 0\nTOKENS = 1\nRELATION = 2\nLANGUAGE = 3\n\nclass DataManager():\n\n def __init__(self, input_path):\n #\"\"\" Read the index file from the current directory. Returns the index file object if successful. \"\"\"\n\n # try to open the index file in the given directory\n try:\n file = open(input_path, encoding='utf-8')\n except FileNotFoundError:\n # if we get here, print an error message and quit\n print(\"Error: Could not find data file '{}'.\".format(input_path))\n sys.exit();\n\n self.in_file = open(input_path, \"r\")\n self.in_reader = csv.reader(self.in_file, delimiter=\",\")\n\n # skip header\n self.header = next(self.in_reader)\n\n self.input_path = input_path\n self.data = []\n\n while True:\n try:\n r = next(self.in_reader)\n self.data.append(r)\n except StopIteration:\n break\n self._all_docs = None\n\n def _check_index(self, idx):\n \"\"\" Check that the given index is within a valid range,\n 0 <= idx <= len(self.data) \"\"\"\n if idx < 0 or idx >= len(self.data):\n print(\"Error: Attempted to index into '{}' out of bounds using index {}.\".format(self.input_path, idx))\n sys.exit()\n\n def get_tokens(self, idx):\n \"\"\" Given an index 0 <= idx < len(data), return the text (tokens)\n of the data at that position. \"\"\"\n self._check_index(idx)\n return self.data[idx][TOKENS]\n\n def get_language(self, idx):\n \"\"\" Given an index 0 <= idx < len(data), return the language\n of the tweet at that position. \"\"\"\n self._check_index(idx)\n return self.data[idx][LANGUAGE]\n\n def get_id(self, idx):\n \"\"\" Given an index 0 <= idx < len(data), return the id\n of the data at that position. \"\"\"\n self._check_index(idx)\n return self.data[idx][ROW_ID]\n\n def get_relation(self, idx):\n \"\"\" Given an index 0 <= idx < len(data), return the relation (label)\n of the data at that position. \"\"\"\n self._check_index(idx)\n return self.data[idx][RELATION]\n\n def get_document_tokens(self, idx, p=None):\n \"\"\" Return all normalized tokens from the document represented by the given\n index. \"\"\"\n return normalize(tokenize(self.get_tokens(idx)))\n\n def num_docs_in_corpus(self):\n \"\"\" Returns the number of documents in the data set.\"\"\"\n return len(self.data)\n\n def all_tokens(self):\n tokens = []\n for i in range(self.num_docs_in_corpus()):\n tokens += self.get_document_tokens(i)\n return tokens\n\n def all_docs(self):\n if self._all_docs:\n return self._all_docs\n else:\n docs = []\n for i in range(self.num_docs_in_corpus()):\n docs.append(self.get_document_tokens(i))\n self._all_docs = docs\n return self.all_docs()\n\n def __len__(self):\n return self.num_docs_in_corpus()\n","sub_path":"mask_classification/naive_bayes/DataManager.py","file_name":"DataManager.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"623923556","text":"import math,string,itertools,fractions,heapq,collections,re,array,bisect,random\n\nclass FoxAndClassroom:\n def is_full(self, i, j):\n \n while (self.board[i][j] == None):\n self.board[i][j] = 1\n i = (i + 1) % self.rows\n j = (j + 1) % self.cols\n\n return all([all(row) for row in self.board])\n\n board = []\n rows = 0\n cols = 0\n def ableTo(self, n, m):\n self.rows = n\n self.cols = m\n for i in range(n):\n for j in range(m):\n self.board = [[None for j in range(m)] for i in range(n)]\n if self.is_full(i,j):\n return \"Possible\"\n\n return \"Impossible\"\n\n# BEGIN KAWIGIEDIT TESTING\n# Generated by KawigiEdit-pf 2.3.0\nimport sys\nimport time\ndef KawigiEdit_RunTest(testNum, p0, p1, hasAnswer, p2):\n\tsys.stdout.write(str(\"Test \") + str(testNum) + str(\": [\") + str(p0) + str(\",\") + str(p1))\n\tprint(str(\"]\"))\n\tobj = FoxAndClassroom()\n\tstartTime = time.clock()\n\tanswer = obj.ableTo(p0, p1)\n\tendTime = time.clock()\n\tres = True\n\tprint(str(\"Time: \") + str((endTime - startTime)) + str(\" seconds\"))\n\tif (hasAnswer):\n\t\tprint(str(\"Desired answer:\"))\n\t\tprint(str(\"\\t\") + str(\"\\\"\") + str(p2) + str(\"\\\"\"))\n\t\n\tprint(str(\"Your answer:\"))\n\tprint(str(\"\\t\") + str(\"\\\"\") + str(answer) + str(\"\\\"\"))\n\tif (hasAnswer):\n\t\tres = answer == p2\n\t\n\tif (not res):\n\t\tprint(str(\"DOESN'T MATCH!!!!\"))\n\telif ((endTime - startTime) >= 2):\n\t\tprint(str(\"FAIL the timeout\"))\n\t\tres = False\n\telif (hasAnswer):\n\t\tprint(str(\"Match :-)\"))\n\telse:\n\t\tprint(str(\"OK, but is it right?\"))\n\t\n\tprint(str(\"\"))\n\treturn res\n\nall_right = True\ntests_disabled = False\n\n\n# ----- test 0 -----\ndisabled = False\np0 = 2\np1 = 3\np2 = \"Possible\"\nall_right = (disabled or KawigiEdit_RunTest(0, p0, p1, True, p2) ) and all_right\ntests_disabled = tests_disabled or disabled\n# ------------------\n\n# ----- test 1 -----\ndisabled = False\np0 = 2\np1 = 2\np2 = \"Impossible\"\nall_right = (disabled or KawigiEdit_RunTest(1, p0, p1, True, p2) ) and all_right\ntests_disabled = tests_disabled or disabled\n# ------------------\n\n# ----- test 2 -----\ndisabled = False\np0 = 4\np1 = 6\np2 = \"Impossible\"\nall_right = (disabled or KawigiEdit_RunTest(2, p0, p1, True, p2) ) and all_right\ntests_disabled = tests_disabled or disabled\n# ------------------\n\n# ----- test 3 -----\ndisabled = False\np0 = 3\np1 = 6\np2 = \"Impossible\"\nall_right = (disabled or KawigiEdit_RunTest(3, p0, p1, True, p2) ) and all_right\ntests_disabled = tests_disabled or disabled\n# ------------------\n\n# ----- test 4 -----\ndisabled = False\np0 = 5\np1 = 7\np2 = \"Possible\"\nall_right = (disabled or KawigiEdit_RunTest(4, p0, p1, True, p2) ) and all_right\ntests_disabled = tests_disabled or disabled\n# ------------------\n\n# ----- test 5 -----\ndisabled = False\np0 = 10\np1 = 10\np2 = \"Impossible\"\nall_right = (disabled or KawigiEdit_RunTest(5, p0, p1, True, p2) ) and all_right\ntests_disabled = tests_disabled or disabled\n# ------------------\n\nif (all_right):\n\tif (tests_disabled):\n\t\tprint(str(\"You're a stud (but some test cases were disabled)!\"))\n\telse:\n\t\tprint(str(\"You're a stud (at least on given cases)!\"))\n\t\nelse:\n\tprint(str(\"Some of the test cases had errors.\"))\n\n# PROBLEM STATEMENT\n# Fox Ciel is now in high school.\n# The seats in her classroom are arranged into an n by m matrix.\n# The rows are numbered from 0 to n-1 (front to back) and the columns from 0 to m-1 (left to right).\n# \n# \n# \n# At the beginning, Ciel can choose any of the seats.\n# Then, at the end of each week Ciel will shift one row to the back and one column to the right, wrapping around whenever necessary.\n# Formally, if her current seat is in row r and column c, then her seat next week will be the one in row ((r+1) modulo n) and column ((c+1) modulo m).\n# \n# \n# \n# Fox Ciel now wonders whether she can sit in all the seats in the classroom if she follows the above procedure.\n# As we already mentioned, she can start in any of the seats.\n# Also, she can attend the school for as many weeks as she wants to.\n# Return \"Possible\" if she can sit in all the seats and \"Impossible\" otherwise.\n# \n# DEFINITION\n# Class:FoxAndClassroom\n# Method:ableTo\n# Parameters:integer, integer\n# Returns:string\n# Method signature:def ableTo(self, n, m):\n# \n# \n# CONSTRAINTS\n# -n will be between 2 and 10, inclusive.\n# -m will be between 2 and 10, inclusive.\n# \n# \n# EXAMPLES\n# \n# 0)\n# 2\n# 3\n# \n# Returns: \"Possible\"\n# \n# We will use (r,c) to denote the chair at row r, column c.\n# Suppose Ciel starts at (1,0).\n# In the following weeks she will then sit at (0,1), (1,2), (0,0), (1,1), (0,2), (1,0) again, (0,1) again, and so on.\n# We can see that already after 6 weeks Ciel sat in all the seats.\n# \n# 1)\n# 2\n# 2\n# \n# Returns: \"Impossible\"\n# \n# Suppose that she starts at (0,0).\n# Then the next week she will sit at (1,1) and the week after that she will be back at (0,0).\n# She would never sit at (0,1) and (1,0).\n# Similarly we can show that none of the other starting positions work.\n# \n# 2)\n# 4\n# 6\n# \n# Returns: \"Impossible\"\n# \n# \n# \n# 3)\n# 3\n# 6\n# \n# Returns: \"Impossible\"\n# \n# \n# \n# 4)\n# 5\n# 7\n# \n# Returns: \"Possible\"\n# \n# \n# \n# 5)\n# 10\n# 10\n# \n# Returns: \"Impossible\"\n# \n# \n# \n# END KAWIGIEDIT TESTING\n#Powered by KawigiEdit-pf 2.3.0!\n","sub_path":"594_div2/FoxAndClassroom.py","file_name":"FoxAndClassroom.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"98546551","text":"#===============================================================================\n# Created on 1 d�c. 2016\n# @author: Matthieu\n#===============================================================================\n\nfrom rest_framework import permissions\n\n\nclass IsAdminOrReadOnly(permissions.IsAdminUser):\n \"\"\"\n Custom permission to only allow ADMIN members to edit objects\n \"\"\"\n def has_permission(self, request, view):\n \n # Read permission are allowed to any request,\n # so we'll always allow GET, HEAD or OPTIONS requests.\n if request.method in permissions.SAFE_METHODS:\n return True\n \n # Python3: is_admin = super().has_permission(request, view)\n is_admin = super(IsAdminOrReadOnly, self).has_permission(request, view)\n \n return is_admin","sub_path":"wog_exercise/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"402109169","text":"import jax\nfrom jax import numpy as jnp\nfrom datetime import datetime\nimport pandas as pd\n\n\n@jax.jit\ndef beale(x: jnp.array) -> jnp.array:\n return jnp.power(1.5 - x[0] + x[0] * x[1], 2) + \\\n jnp.power(2.25 - x[0] + x[0] * x[1] * x[1], 2) + \\\n jnp.power(2.625 - x[0] + x[0] * x[1] * x[1] * x[1], 2)\n\n\n@jax.jit\ndef rosenbrock(x: jnp.array) -> jnp.array:\n return jnp.power(1. - x[0], 2) + \\\n 100. * jnp.power(x[1] - x[0] * x[0], 2)\n\n\ndef fwd_grad(f, x0, key):\n t = jax.random.normal(key, shape=x0.shape)\n return jax.jvp(f, (x0,), (t,))[1] * t\n\n\ndef run_test(f, f_grad, x0: jnp.array, n_iter: int, n_trials: int, learning_rate: float) -> pd.DataFrame:\n data = []\n for j in range(n_trials):\n key = jax.random.PRNGKey(j)\n start = datetime.now()\n x = x0\n for i in range(n_iter):\n key, key_grad = jax.random.split(key)\n data.append(\n [j, i, (datetime.now() - start).total_seconds(), float(f(x))])\n x = x - learning_rate * f_grad(x, key_grad)\n df = pd.DataFrame(data, columns=['trial', 'iter', 'time', 'f'])\n return df\n\n\nif __name__ == '__main__':\n\n x0 = jnp.array([0., 0.5])\n n_iter = 1000\n learning_rate = 0.01\n df_fwd = run_test(beale, lambda x, key: fwd_grad(\n beale, x0=x, key=key), x0, n_iter, 10, learning_rate)\n df_bwd = run_test(beale, lambda x, key: jax.grad(beale)\n (x), x0, n_iter, 1, learning_rate)\n df_fwd['kind'] = 'fwd'\n df_bwd['kind'] = 'bwd'\n df = pd.concat([df_fwd, df_bwd])\n df.to_csv('./logs/fmin_beale.csv')\n\n x0 = jnp.array([-1., 0.])\n n_iter = 25000\n learning_rate = 5 * 10 ** -4\n df_fwd = run_test(rosenbrock, lambda x, key: fwd_grad(\n rosenbrock, x0=x, key=key), x0, n_iter, 10, learning_rate)\n df_bwd = run_test(rosenbrock, lambda x, key: jax.grad(\n rosenbrock)(x), x0, n_iter, 1, learning_rate)\n df_fwd['kind'] = 'fwd'\n df_bwd['kind'] = 'bwd'\n df = pd.concat([df_fwd, df_bwd])\n df.to_csv('./logs/fmin_rosenbrock.csv')\n","sub_path":"benchmarks/function_minimization.py","file_name":"function_minimization.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"134932830","text":"\"\"\"\nccextractor-web | parsers.py\n\nAuthor : Saurabh Shrivastava\nEmail : saurabh.shrivastava54+ccextractorweb[at]gmail.com\nLink : https://github.com/saurabhshri\n\n\"\"\"\n\nimport json\n\n\nclass ParseJob():\n def __init__(self, job_file):\n self.job_config = {}\n\n with open(job_file, 'r', encoding=\"utf-8\") as f:\n self.job_config = json.load(f)\n\n self.ccextractor_executable = self.job_config['executable_path']\n self.filename = self.job_config['filename']\n self.job_number = self.job_config['job_number']\n self.parameters = self.job_config['parameters']\n self.platform = self.job_config['platform']\n self.token = self.job_config['token']\n self.output_file_extension = self.job_config['output_file_extension']\n\n def get_job_config(self):\n return self.job_config\n\n\nclass ParseParameters():\n def __init__(self, argv):\n self.paramters = {}\n\n while argv:\n if argv[0][0] == '-':\n self.paramters[argv[0]] = argv[1]\n argv = argv[1:]\n\n self.job_dir = self.paramters['-jobDir']\n self.output_dir = self.paramters['-outputDir']\n self.archive_dir = self.paramters['-archiveDir']\n self.ccextractor_binaries_dir = self.paramters['-ccextractorBinariesDir']\n self.log_dir = self.paramters['-logDir']\n self.report_url = self.paramters['-reportURL']\n\n def get_raw_parameters(self):\n return self.paramters\n\n\nclass ParseCCExtractorParameters():\n def __init__(self, params):\n params = json.loads(params)\n self.params_list = []\n\n for key, value in params.items():\n self.params_list.append(key)\n if value:\n self.params_list.append(value)\n","sub_path":"daemon/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"176881455","text":"#!/usr/bin/python\n\nimport sys\nimport os\nimport array\nimport struct\nimport pyaudio\nimport wave\n\nTHRESHOLD = 1000\nRATE = 44100\nCHUNK_TIME = 100\nCHUNK_SIZE = int(RATE/(1000./CHUNK_TIME))\nSILENT_TIME, MINIMUM_TIME, MAXIMUM_TIME, ENDS_TIME = 2000, 1000, 55000, 200\nFORMAT = pyaudio.paInt16\nOUTPUT_FOLDER, FILENAME, EXTENSION = '.', 'demo', '.wav'\n\n\ndef is_silent(snd_data):\n \"Returns 'True' if below the 'silent' threshold\"\n #return max(snd_data) < THRESHOLD\n return sum([abs(x) for x in snd_data])/len(snd_data) < THRESHOLD\n\ndef normalize(snd_data):\n \"Average the volume out\"\n MAXIMUM = 16384\n times = float(MAXIMUM)/max(abs(i) for i in snd_data)\n r = array.array('h')\n for i in snd_data:\n r.append(int(i*times))\n return r\n\ndef trim(snd_data):\n \"Trim the blank spots at the start and end\"\n def _trim(snd_data):\n snd_started = False\n r = array.array('h')\n for i in snd_data:\n if not snd_started and abs(i)>THRESHOLD:\n snd_started = True\n r.append(i)\n\n elif snd_started:\n r.append(i)\n return r\n # Trim to the left\n snd_data = _trim(snd_data)\n # Trim to the right\n snd_data.reverse()\n snd_data = _trim(snd_data)\n snd_data.reverse()\n return snd_data\n\ndef add_silence(snd_data, seconds):\n \"Add silence to the start and end of 'snd_data' of length 'seconds' (float)\"\n r = array.array('h', [0 for i in range(int(seconds*RATE))])\n r.extend(snd_data)\n r.extend([0 for i in range(int(seconds*RATE))])\n return r\n\ndef record():\n \"\"\"\n Record a word or words from the microphone and\n return the data as an array of signed shorts.\n Normalizes the audio, trims silence from the\n start and end, and pads with 0.5 seconds of\n blank sound to make sure VLC et al can play\n it without getting chopped off.\n \"\"\"\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)\n num_silent, num_audio = 0, 0\n to_exit, snd_started = False, False\n r = array.array('h')\n while 1:\n # little endian, signed short\n snd_data = array.array('h', stream.read(CHUNK_SIZE))\n if sys.byteorder == 'big':\n snd_data.byteswap()\n silent = is_silent(snd_data)\n #print(max(snd_data),int(sum([abs(x) for x in snd_data])/len(snd_data)))\n if not snd_started:\n if silent:\n num_silent += 1\n else:\n print('\\tStarted.')\n snd_started = True\n num_silent = 0\n num_audio = 1\n r.extend(snd_data)\n else:\n num_audio += 1\n r.extend(snd_data)\n if silent:\n num_silent += 1\n if num_silent*CHUNK_TIME>=SILENT_TIME:\n to_exit = True\n else:\n if num_audio*CHUNK_TIME>=MAXIMUM_TIME:\n to_exit = True\n if num_audio*CHUNK_TIME>=MINIMUM_TIME and to_exit:\n print('\\tStoped.')\n break\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n #r = normalize(r)\n #r = trim(r)\n #r = add_silence(r, 0.5)\n return sample_width, r\n\ndef record_to_file(path):\n \"Records from the microphone and outputs the resulting data to 'path'\"\n sample_width, data = record()\n t = float(len(data))/RATE\n data = struct.pack('<' + ('h'*len(data)), *data)\n wf = wave.open(path, 'wb')\n wf.setnchannels(1)\n wf.setsampwidth(sample_width)\n wf.setframerate(RATE)\n wf.writeframes(data)\n wf.close()\n return t\n\ndef main():\n if __name__ == '__main__':\n if len(sys.argv)>2:\n OUTPUT_FOLDER = sys.argv[1]\n FILENAME = sys.argv[2]\n filepath=None\n if EXTENSION in FILENAME:\n filepath=OUTPUT_FOLDER+'/'+FILENAME\n else:\n filepath=OUTPUT_FOLDER+'/'+FILENAME+EXTENSION\n t=record_to_file(filepath)\n print('\\tDone - '+filepath+'({:.2f}s).'.format(t))\n\n\ndef demo():\n if __name__ == '__main__':\n try:\n print('Start recording.')\n files = os.listdir(OUTPUT_FOLDER)\n for file in files:\n if file.endswith(EXTENSION):\n os.remove(file)\n file_idx=0\n while True:\n file_idx += 1\n t=record_to_file(OUTPUT_FOLDER+'/'+FILENAME+str(file_idx)+EXTENSION)\n print('\\tDone - '+OUTPUT_FOLDER+'/'+FILENAME+str(file_idx)+EXTENSION+'({:.2f}s).'.format(t))\n except KeyboardInterrupt:\n print('Stop recording.')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n\n\nmain()\n","sub_path":"detect_voice.py","file_name":"detect_voice.py","file_ext":"py","file_size_in_byte":4831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"262384586","text":"# See discussion and more examples at http://packages.python.org/pymqi/examples.html\n# or in doc/sphinx/examples.rst in the source distribution.\n\nimport pymqi\nimport CMQXC\n\nqueue_manager = \"QM01\"\nchannel = \"SVRCONN.1\"\nhost = \"192.168.1.135\"\nport = \"1434\"\nqueue_name = \"TEST.1\"\nmessage = \"Hello from Python!\" * 10000\nconn_info = \"%s(%s)\" % (host, port)\n\ncd = pymqi.CD()\ncd.MsgCompList[1] = CMQXC.MQCOMPRESS_ZLIBHIGH\n\nqmgr = pymqi.connect(queue_manager, channel, conn_info)\n\nqueue = pymqi.Queue(qmgr, queue_name)\nqueue.put(message)\nqueue.close()\n\nqmgr.disconnect()\n","sub_path":"code/examples/channel_compression.py","file_name":"channel_compression.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"330613060","text":"#\n# Copyright (c) 2017-2019 TBD. All rights reserved.\n# This file is part of TBD (see TBD).\n# See the LICENSE file for licensing terms (TBD).\n#\n\nimport torch\nfrom ocropus import ocrorec\nfrom ocropus import slog\nfrom ocropus import models\n\ndef test_linetrainer():\n with open(\"models/linelstm.py\") as stream:\n text = stream.read()\n mmod = slog.load_module(\"mmod\", text)\n model = mmod.make_model(96)\n trainer = ocrorec.TextTrainer(model)\n trainer.set_lr(1e-3)\n xs = torch.zeros((1, 1, 48, 277))\n ys = [torch.tensor([0, 1, 0])]\n trainer.train_batch(xs, ys)\n\n\ndef test_linetrainer():\n model = models.text_model_210910()\n trainer = ocrorec.TextTrainer(model)\n trainer.set_lr(1e-3)\n xs = torch.zeros((1, 1, 48, 277))\n ys = [torch.tensor([0, 1, 0])]\n trainer.train_batch(xs, ys)","sub_path":"ocropus/tests/test_ocrorec.py","file_name":"test_ocrorec.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"331648389","text":"import os\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')\nCSRF_ENABLED = True\nSECRET_KEY = b'}?\\x06$\\xa5\\xcf]\\xd8\\xe1\\x8e[\\xcdX\\x0c\\x85m\\xee\\xd1\\xf2\\x8b\\xa2\\x1c+\\xd4'\nDEBUG = False\nDEBUG_TB_INTERCEPT_REDIRECTS = False\n\n\n'''\nВ будущем можно будет добавить генерацию с помощью сервисов\n{\n \"name\": \"app_name\",\n \"title\": \"app_title\",\n \"service\": {\n \"name\": \"service_name\",\n # обычный GET-запрос к сервису (http://ip:port/my/path)\n \"use_get\": True # или \"method\": \"service_method\"\n },\n \"routes\": {\n \"some/path1\": \"apps/app_name/file1.html\n ...\n }\n}\n'''\n\n\nAPPS = [\n {\n \"category\": \"dev\",\n \"name\": \"onlide\",\n \"title\": \"Онлайн среда разработки Onlide\",\n \"index_file_path\": \"index.html\",\n \"need_auth\": True\n },\n\n {\n \"category\": \"dev\",\n \"name\": \"empty\",\n \"title\": \"Пустое приложение\",\n \"index_file_path\": \"index.html\",\n \"need_auth\": True\n },\n\n {\n \"category\": \"dev\",\n \"name\": \"empty\",\n \"title\": \"Пустое приложение\",\n \"index_file_path\": \"index.html\",\n \"need_auth\": True\n },\n\n {\n \"category\": \"dev\",\n \"name\": \"empty\",\n \"title\": \"Пустое приложение\",\n \"index_file_path\": \"index.html\",\n \"need_auth\": True\n },\n\n {\n \"category\": \"dev\",\n \"name\": \"empty\",\n \"title\": \"Пустое приложение\",\n \"index_file_path\": \"index.html\",\n \"need_auth\": True\n }\n]\n\nSERVICES = {\n \"storage\": {\n \"url\": \"http://127.0.0.1:9494\",\n \"type\": \"jsonrpc2\",\n \"add_login\": True,\n \"on_add_user\": \"on_add_user\",\n \"on_delete_user\": \"on_delete_user\",\n \"on_update_user\": \"on_update_user\"\n },\n\n \"rcr\": {\n \"url\": \"http://127.0.0.1:9090\",\n \"type\": \"jsonrpc2\",\n \"add_login\": True,\n \"key\": \"lw-r2=2+=qKp[w[/',views.PostDetailView.as_view(template_name='post_detail.html'),name='post_detail'),\n path('post/new/',views.CreatePostView.as_view(),name='post_new'),\n path('post//edit/',views.PostUpdateView.as_view(template_name='post_form.html'),name='post_edit'),\n path('post//remove/',views.PostDeleteView.as_view(template_name='post_confirm_delete.html'),name='post_remove'),\n path('drafts/',views.DraftListView.as_view(),name='post_draft_list'),\n path('post//comment/',views.add_comment_to_post, name='add_comment_to_post'),\n path('comment//approve/',views.comment_approve, name='comment_approve'),\n path('comment//remove/',views.comment_remove, name='comment_remove'),\n path('comment//publish/',views.post_publish, name='post_publish'),\n]","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"285476384","text":"import os\nimport discord\nfrom discord.ext import commands\nimport json\nimport random\nfrom discord.ext.commands import Bot\n\nwith open('C:\\\\Discord Bot\\\\Project_1\\\\a1itt1eB0t\\\\setting.json','r',encoding='utf8') as jfile:\n jdata = json.load(jfile)\n\nintents = discord.Intents.all()\n\nbot = commands.Bot(command_prefix='?',intents=intents)\n\n@bot.event\nasync def on_ready():\n print(\"on_ready\")\n channel = bot.get_channel(int(jdata['A1itt1eB0t']))\n await channel.send(\"a1itt1eB0t just wake up!!!\")\n\nfor filename in os.listdir('C:\\\\Discord Bot\\\\Project_1\\\\a1itt1eB0t\\\\cmds'):\n if filename.endswith('.py'):\n bot.load_extension(f'cmds.{filename[:-3]}')\n\nfor filename in os.listdir('C:\\\\Discord Bot\\\\Project_1\\\\a1itt1eB0t\\\\onmsg'):\n if filename.endswith('.py'):\n bot.load_extension(f'onmsg.{filename[:-3]}')\n\nif __name__ == \"__main__\":\n bot.run(jdata['TOKEN'])","sub_path":"a1itt1eb0t.py","file_name":"a1itt1eb0t.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"436049144","text":"\ndef load_weather(data, client, database, collection):\n ''' Load data to specified database collection. This determines the\n appropriate way to process the load depending on the collection to which it\n should be loaded. Data is expected to be a weather-type dictionary. When\n the collection is \"instants\" the data is appended the specified object's\n forecasts array in the instants collection; when the collection is either\n \"forecasted\" or \"observed\" the object is insterted uniquely to the\n specified collection. Also checks for a preexisting document with the same\n instant and zipcode, then updates it in the case that there was already\n one there.\n\n :param data: the dictionary created from the api calls\n :type data: dict\n :param client: a MongoClient instance\n :type client: pymongo.MongoClient\n :param database: the database to be used\n :type database: str\n :param collection: the database collection to be used\n :type collection: str\n ''' \n col = db_ops.dbncol(client, collection, database=database)\n # decide how to handle the loading process depending on where the document\n # will be loaded.\n if collection == 'instant' or collection == 'test_instants' or collection == 'instant_temp':\n \n # set the appropriate database collections, filters and update types\n if \"Weather\" in data:\n filters = {'zipcode':data['Weather'].pop('zipcode'),\n 'instant':data['Weather'].pop('instant')}\n updates = {'$set': {'weather': data['Weather']}}\n else:\n filters = {'zipcode':data.pop('zipcode'),\n 'instant':data.pop('instant')}\n updates = {'$push': {'forecasts': data}} # append to forecasts list\n \n # Now attempt to load the data using the filters and updates.\n try:\n col.find_one_and_update(filters, updates, upsert=True)\n except DuplicateKeyError:\n return(f'DuplicateKeyError, could not insert data to {collection}')\n \n elif collection == 'observed'\\\n or collection == 'forecasted'\\\n or collection == 'obs_temp'\\\n or collection == 'cast_temp':\n try:\n col.insert_one(data)\n except DuplicateKeyError:\n return(f'DuplicateKeyError, could not insert data to {collection}')\n","sub_path":"cron/request_and_load.py","file_name":"request_and_load.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"524758542","text":"import os\nimport random\nimport logging\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nimport ipyvolume as ipv\nimport ipywidgets as widgets\n\nfrom config import config\n# from preprocess import match_hits_and_particles\ndef match_hits_and_particles(hits, particles, truth):\n particles = particles[particles.particle_id != 0]\n\n particle_ids = particles.particle_id.tolist()\n\n truth_of_particles = truth[truth.particle_id.isin(particle_ids)]\n\n hits_and_particles = pd.merge(truth_of_particles, hits, on='hit_id', how='inner')\n\n return hits_and_particles\n\ndisplay = \"DISPLAY\" in os.environ\nif not display:\n print('No display server, will save files')\n matplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef display_img(name):\n plt.title(name)\n if display:\n plt.show()\n else:\n plt.savefig(os.path.join('./img', name))\n\n plt.close()\n\ndef ground_truth_tracks(df):\n logging.info('Plotting ground truth tracks') \n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n ax.set_xlim3d(-1000, 1000)\n ax.set_ylim3d(-1000, 1000)\n ax.set_zlim3d(0, 6000)\n\n for particle in df['particle_id'].unique():\n hit = df[df['particle_id'] == particle]\n \n ax.scatter(hit.x, hit.y, hit.z, marker='o')\n ax.plot(hit.x, hit.y, hit.z)\n\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n display_img('ground-truth-tracks')\n\ndef predicted_tracks(df, preds):\n logging.info('Plotting predicted tracks') \n\n df['track_id'] = preds\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n ax.set_xlim3d(-1000, 1000)\n ax.set_ylim3d(-1000, 1000)\n ax.set_zlim3d(0, 6000)\n\n for i, track in enumerate(df['track_id'].unique()):\n hit = df[df['track_id'] == track]\n \n if len(hit) > 1:\n ax.scatter(hit.x, hit.y, hit.z, marker='o')\n ax.plot(hit.x, hit.y, hit.z)\n\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n display_img('predicted-tracks')\n\ndef cylindrical_ground_truth_tracks(df):\n logging.info('Plotting tracks in cylindrical coordinates') \n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n for particle in df['particle_id'].unique():\n hit = df[df['particle_id'] == particle]\n\n ax.scatter(hit.rho, hit.s, hit.c, marker='o')\n ax.plot(hit.rho, hit.s, hit.c)\n\n ax.set_xlabel('Rho')\n ax.set_ylabel('S')\n ax.set_zlabel('C ')\n\n display_img('cylindrical-ground-truth-tracks')\n\ndef cylindrical_flattened_ground_truth_tracks(df):\n logging.info('Plotting flattened tracks in cylindrical coordinates') \n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n for particle in df['particle_id'].unique():\n hit = df[df['particle_id'] == particle]\n\n ax.scatter(hit.rho, hit.phi, marker='o')\n ax.plot(hit.rho, hit.phi)\n\n ax.set_xlabel('Rho')\n ax.set_ylabel('Phi')\n\n display_img('cylindrical-flattened-ground-truth-tracks')\n\ndef triples(data, mode):\n logging.info('Plotting ' + mode + ' from triples')\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n if mode == 'true_tracks':\n data = data[data['true_track'] == 1]\n elif mode == 'false_tracks':\n data = data[data['true_track'] == 0]\n else:\n logging.info('You must choose a mode')\n\n for i in range(len(data)):\n track = data.iloc[i]\n\n ax.scatter(track['rho-1'], track['phi-1'], marker='o')\n ax.scatter(track['rho-2'], track['phi-2'], marker='o')\n ax.scatter(track['rho-3'], track['phi-3'], marker='o')\n ax.plot([track['rho-1'], track['rho-2'], track['rho-3']], [track['phi-1'], track['phi-2'], track['phi-3']])\n\n ax.set_xlabel('Rho')\n ax.set_ylabel('Phi')\n\n display_img('training-data-' + mode) \n\ndef visualize(hits, particles, truth, data, preds=np.array([])):\n hits_and_particles = match_hits_and_particles(hits, particles, truth)\n\n logging.info('-' * 50)\n logging.info('Visualizing')\n\n ground_truth_tracks(hits_and_particles)\n cylindrical_ground_truth_tracks(hits_and_particles)\n cylindrical_flattened_ground_truth_tracks(hits_and_particles)\n triples(data, 'true_tracks')\n triples(data, 'false_tracks')\n \n if preds.size != 0:\n predicted_tracks(hits, preds)","sub_path":"old/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"479346138","text":"import os\nimport pickle\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport uabRepoPaths\nimport sis_utils\n\nrun_ids = [0, 1, 2, 3, 4]\nbatch_sizes = [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]\npatch_sizes = [232, 248, 264, 276, 300, 321, 368, 424, 520, 736]\nresult_all = np.zeros((len(batch_sizes), len(run_ids)))\ncity_res = np.zeros((len(batch_sizes), 5, len(run_ids)))\ncity_dict = {'austin':0, 'chicago':1, 'kitsap':2, 'tyrol-w':3, 'vienna':4}\n\n\ndef get_results(file_str):\n for cnt_1, run_id in enumerate(run_ids):\n for cnt_2, (batch_size, patch_size) in enumerate(zip(batch_sizes, patch_sizes)):\n model_name = \\\n 'DeeplabV3_res101_inria_aug_psbs_{}_PS({}, {})_BS{}_EP100_LR1e-05_DS40_DR0.1_SFN32'.\\\n format(run_id, patch_size, patch_size, batch_size)\n res_path = os.path.join(uabRepoPaths.evalPath, file_str, model_name, 'inria', 'result.txt')\n with open(res_path, 'r') as f:\n results = f.readlines()\n\n mean_iou = 0\n for item in results:\n city_name = item.split(' ')[0]\n if len(item.split(' ')) == 1:\n mean_iou = float(item) * 100\n continue\n A, B = item.split('(')[1].strip().strip(')').split(',')\n iou = float(A)/float(B) * 100\n city_res[cnt_2, city_dict[city_name[:-1]], cnt_1] = iou\n result_all[cnt_2, cnt_1] = mean_iou\n result_mean = np.mean(result_all, axis=1)\n result_var = np.var(result_all, axis=1)\n result_up = np.max(result_all, axis=1)\n result_down = np.min(result_all, axis=1)\n return result_mean, result_var, result_up, result_down\n\n\nmatplotlib.rcParams.update({'font.size': 18})\nfig = plt.figure(figsize=(12, 5))\nind = np.arange(len(batch_sizes))\n\nax1 = plt.subplot()\n#result_mean, result_var, result_up, result_down = get_results('fix_pixel_fix_test')\n#ax1.errorbar(patch_sizes, result_mean[::-1], yerr=result_var, uplims=result_up, lolims=result_down, label='test size=496')\nresult_mean, result_var, result_up, result_down = get_results('fix_pixel')\nax1.errorbar(patch_sizes, result_mean[::-1], yerr=result_var, uplims=result_up, lolims=result_down, label='test size=496')\n\nax2 = ax1.twinx()\nax2.plot(patch_sizes, np.array(batch_sizes)*(np.array(patch_sizes)/4)**2, 'g.--')\nax2.set_ylim(31000, 36000)\nax2.tick_params('y', colors='g')\nax2.set_ylabel('#pixels', color='g')\n\nplt.grid('on')\nplt.xticks(patch_sizes, patch_sizes)\nax1.tick_params(axis='x', labelsize=14)\nfig.autofmt_xdate(rotation=40)\nax1.set_xlabel('Patch Size')\nax1.set_ylabel('Mean IoU')\nplt.title('DeepLab on Inria')\nax1.legend()\nplt.tight_layout()\n\nimg_dir, task_dir = sis_utils.get_task_img_folder()\nwith open(os.path.join(task_dir, 'deeplab_inria_fixpixel.npy'), 'wb') as pk:\n pickle.dump([result_mean[::-1], result_var, result_up, result_down, batch_sizes, patch_sizes], pk)\nplt.savefig(os.path.join(img_dir, 'deeplab_inria_fixpixel.png'))\n\nplt.show()\n","sub_path":"]tasks/2018.01.23.score_results/plot_inria_deeplab_fix_pixel.py","file_name":"plot_inria_deeplab_fix_pixel.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"474912502","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/ScoutSuite/providers/aws/facade/ses.py\n# Compiled at: 2020-04-02 05:37:10\n# Size of source mod 2**32: 2173 bytes\nfrom ScoutSuite.core.console import print_exception\nfrom ScoutSuite.providers.aws.facade.basefacade import AWSBaseFacade\nfrom ScoutSuite.providers.aws.facade.utils import AWSFacadeUtils\nfrom ScoutSuite.providers.utils import map_concurrently\nfrom ScoutSuite.providers.utils import run_concurrently\n\nclass SESFacade(AWSBaseFacade):\n\n async def get_identities(self, region: str):\n try:\n identity_names = await AWSFacadeUtils.get_all_pages('ses', region, self.session, 'list_identities', 'Identities')\n return await map_concurrently((self._get_identity_dkim_attributes), identity_names, region=region)\n except Exception as e:\n try:\n print_exception('Failed to get SES identities: {}'.format(e))\n return []\n finally:\n e = None\n del e\n\n async def _get_identity_dkim_attributes(self, identity_name: str, region: str):\n ses_client = AWSFacadeUtils.get_client('ses', self.session, region)\n try:\n dkim_attributes = await run_concurrently(lambda : ses_client.get_identity_dkim_attributes(Identities=[identity_name])['DkimAttributes'][identity_name])\n except Exception as e:\n try:\n print_exception('Failed to get SES DKIM attributes: {}'.format(e))\n raise\n finally:\n e = None\n del e\n\n return (\n identity_name, dkim_attributes)\n\n async def get_identity_policies(self, region: str, identity_name: str):\n ses_client = AWSFacadeUtils.get_client('ses', self.session, region)\n try:\n policy_names = await run_concurrently(lambda : ses_client.list_identity_policies(Identity=identity_name)['PolicyNames'])\n except Exception as e:\n try:\n print_exception('Failed to list SES policies: {}'.format(e))\n policy_names = []\n finally:\n e = None\n del e\n\n if len(policy_names) == 0:\n return {}\n try:\n return await run_concurrently(lambda : ses_client.get_identity_policies(Identity=identity_name, PolicyNames=policy_names)['Policies'])\n except Exception as e:\n try:\n print_exception('Failed to get SES policies: {}'.format(e))\n return\n finally:\n e = None\n del e","sub_path":"pycfiles/ScoutSuite-5.8.1-py3.7/ses.cpython-37.py","file_name":"ses.cpython-37.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"153388005","text":"import sys\n\nsys.stdin = open(\"input.txt\", \"r\")\n\n# 0: 위 1: 아래 2: 좌 3: 우\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\ndirection = [\n [0, 0, 0, 0],\n [1, 3, 0, 2],\n [3, 0, 1, 2],\n [2, 0, 3, 1],\n [1, 2, 3, 0],\n [1, 0, 3, 2]\n]\n\n\ndef dfs(x, y, d):\n start, end = x, y\n nx, ny = x, y\n cnt = 0\n while True:\n nx, ny = nx + dx[d], ny + dy[d]\n if (nx == start and ny == end) or ball_map[nx][ny] == -1: return cnt\n if ball_map[nx][ny] == 0: continue\n if 1 <= ball_map[nx][ny] <= 5:\n d = direction[ball_map[nx][ny]][d]\n cnt += 1\n elif ball_map[nx][ny] >= 6:\n h_idx = ball_map[nx][ny] - 6\n if hall[h_idx][0][0] == nx and hall[h_idx][0][1] == ny:\n nx, ny = hall[h_idx][1]\n else:\n nx, ny = hall[h_idx][0]\n\n\nfor tc in range(1, int(input()) + 1):\n N = int(input())\n ball_map = [[5] * (N + 2) for _ in range(N + 2)]\n hall = [[] * 2 for _ in range(5)]\n for i in range(1, N + 1):\n temp = list(map(int, input().rstrip().split()))\n for j in range(N):\n ball_map[i][j + 1] = temp[j]\n if temp[j] >= 6:\n hall[temp[j] - 6].append([i, j + 1])\n ans = 0\n\n for i in range(1, N+1):\n for j in range(1, N+1):\n if ball_map[i][j] == 0:\n for d in range(4):\n ans = max(ans, dfs(i, j, d))\n\n print(f'#{tc} {ans}')\n","sub_path":"study_0529/5650_pinball.py","file_name":"5650_pinball.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"297019084","text":"import matplotlib.pylab as plt\nimport numpy as np\nimport music21 as M2\n# cond_file = r\"E:\\Datasets\\classical-music-midi\\mozart\\mz_332_2.mid\"\n# mid = pretty_midi.PrettyMIDI(midi_file=cond_file)\n# ps_roll = mid.get_piano_roll()\nfs = 1 / 100.0\ndef plot_piano_roll(ps_roll, fs = 1/100.0):\n ps_uniq = ps_roll.nonzero()[0]\n minps = ps_uniq.min()\n maxps = ps_uniq.max()\n minps = np.int(np.floor(minps / 12)) * 12\n maxps = np.int(np.ceil(maxps / 12)) * 12\n maxT = ps_roll.shape[1] * fs\n # octv_ticks = list(range(int(minps), int(maxps), 12))\n octv_ticks = list(range(int(0), int(120), 12))\n T_ticks = list(range(0, int(maxT), 10))\n figh = plt.figure(figsize=[0.15*maxT, 7 / 128 * (maxps - minps)])\n plt.imshow(ps_roll[:, :], cmap='gray', aspect='auto')\n plt.hlines(octv_ticks, plt.xlim()[0], plt.xlim()[1], alpha=0.30, colors='white')\n plt.gca().invert_yaxis()\n plt.yticks(octv_ticks, [M2.pitch.Pitch(p).nameWithOctave for p in octv_ticks])\n plt.xticks([t / fs for t in T_ticks], T_ticks)\n figh.gca().set_ylim(minps, maxps)\n #figh.show()\n return figh","sub_path":"visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"590314163","text":"#script arguments, path\nimport sys\nimport os\n\n#import data\nimport pandas as pd\n\n#numpy for array, matrix, ...\nimport numpy as np\n\n#For plotting results\nimport matplotlib.pyplot as plt \nimport matplotlib as mpl\nfrom matplotlib.ticker import FormatStrFormatter\nmpl.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nmpl.rc('text', usetex=True)\n\n#local lib\nimport mplio\n\n'''\n\n'''\nclass Dat(object):\n filename=\"\"\n df=[]\n dat={}\n configName=''\n\n def __init__(self, filename):\n self.filename=filename\n self.name=os.path.splitext(os.path.basename(filename))[0]\n self.df=pd.read_csv(self.filename, sep=',',header=None)\n\n#open dataset\ndatas=[]\ndepths=np.array(0,float)\nfor el in sys.argv[1::]:\n datas.append(Dat(el))\n\n#Print simple histogram for datas[0]\ndat=datas[0].df.values[1:,1].astype(np.int)\nnbins=dat.ptp()\n#np.unique(x).size\nfig, ax = plt.subplots()\ncounts, bins, patches = plt.hist(dat, nbins, normed=1, facecolor='g', alpha=0.75)\n\nplt.xlabel('Number of links in path')\nplt.ylabel('Relative amount')\nplt.title('Histogram of SSSP path size')\nplt.grid(True)\n\n# Set the ticks to be at the edges of the bins.\nax.set_xticks(bins)\nax.xaxis.set_ticks(np.arange(bins.min(),bins.max(),50))#bins.ptp()/25))\nax.yaxis.set_ticks(np.arange(0, 1, 0.05))\n# Set the xaxis's tick labels to be formatted with 1 decimal place...\nax.xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))\n\n# Label the raw counts and the percentages below the x-axis...\n#bin_centers = 0.5 * np.diff(bins) + bins[:-1]\nperc = (counts.astype(float) / counts.sum())\nperc = perc[::-1].cumsum()[::-1]\nax2 = ax.twiny()\nax2.set_xticklabels([])\nax2.plot(perc, 'C1', label=\"Cumulative sum\")\n#ax2.tick_params('Relative amount (cumulative)', colors='C1')\n#for count, x, percent in zip(counts, bin_centers, perc):\n # Label the raw counts\n #ax.annotate(str(count), xy=(x, 0), xycoords=('data', 'axes fraction'),\n # xytext=(0, -18), textcoords='offset points', va='top', ha='center')\n\n # Label the percentages\n# ax.annotate('{:.1f}\\%'.format(percent), xy=(x, 0), xycoords=('data', 'axes fraction'),\n# xytext=(0, -32), textcoords='offset points', va='top', ha='center')\n\n\n# Give ourselves some more room at the bottom of the plot\n#plt.subplots_adjust(bottom=0.15)\nplt.legend()\nplt.show()\n","sub_path":"scripts/plotPathHistogram.py","file_name":"plotPathHistogram.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"22587926","text":"# num-of-ch.py\n\n'''\n두 개의 문자열 str1과 str2가 주어진다. 문자열 str1에 포함된 글자들이 str2에 몇 개씩 들어있는지 찾고, \n그중 가장 많은 글자의 개수를 출력하는 프로그램을 만드시오.\n예를 들어 str1 = “ABCA”, str2 = “ABABCA”인 경우, str1의 A가 str2에 3개 있으므로 \n가장 많은 글자가 되고 3을 출력한다.\n파이썬의 경우 딕셔너리를 이용할 수 있다.\n\n[입력]\n\n첫 줄에 테스트 케이스 개수 T가 주어진다. 1≤T≤50\n다음 줄부터 테스트 케이스 별로 길이가 N인 문자열 str1과 길이가 M인 str2가 각각 다른 줄에 주어진다. 5≤N≤100, 10≤M≤1000, N≤M\n\n[출력]\n\n각 줄마다 \"#T\" (T는 테스트 케이스 번호)를 출력한 뒤, 답을 출력한다.\n'''\n\nimport sys\nsys.stdin = open('sample_input2.txt', 'r')\n\nT = int(input())\nfor testCase in range(1, T+1) :\n str1_set = set(input())\n str2_list = list(input())\n \n str_dict = {ch: str2_list.count(ch) for ch in str1_set}\n answer = max(str_dict.values())\n \n print('#{0} {1}'.format(testCase, answer))\n \n ","sub_path":"python/sw-academy-intermediate/string/num-of-ch.py","file_name":"num-of-ch.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"152193179","text":"'''\nExamples\ninputCopy\n5\na\naba\nabacaba\nba\naba\noutputCopy\nYES\na\nba\naba\naba\nabacaba\n'''\nall_str = []\nfor i in range(int(input())):\n all_str.append(input())\nall_str.sort(key = lambda s: len(s))\nans = \"YES\"\nfor i in range(1, len(all_str)):\n if not all_str[i-1] in all_str[i]:\n ans = \"NO\"\n break\nprint(ans)\nif ans == \"YES\":print(\"\\n\".join(all_str))\n\n\n\n\n\n","sub_path":"CodeForces/486_3_B_substring_sort.py","file_name":"486_3_B_substring_sort.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"568422255","text":"import os, sys, Character, GameState, Field, DataType, Terminal, time, Key\n\nif not any(\"SunCat\" in s for s in sys.path):\n\tsys.path.append(os.getcwd() + \"\\SunCat\")\n\ntry:\n\timport SunCat, SCHotkey, SCLib\nexcept:\n\tprint(\"Couldn't find SunCat module\")\n\n#Version V018\n#################################################################################################################################################\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\n#\t\t\t\t\t\t\tBoldmold @ Gamekillers forums, Be sure to leave a like if you enjoy the script\t\t\t\t\t\t\t\t\t\t#\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\n#################################################################################################################################################\n\n###NPC ChatKey, Key for harvesting###\thttps://docs.microsoft.com/nb-no/windows/desktop/inputdev/virtual-key-codes\nNpcChat = 0x20 #(Spacebar Default)\n\n###Eneter Maps you want to check###\nmaps = [450005220,450005230,450005241] #List all the maps you want to check, You can list as many as you want.\nLastMapID = 450005241 #the MapID of the last map in the list above\n\n####Enter ID of the Herb/Ore's you want the script to look for and harvest.###\ncollectID = [200000,200001,200002,200003,200004,200005,200006,200007,200008,200009,200010,200011,200012,200013,100000,100001,100002,100003,100004,100005,100006,100007,100008,100009,100010,100011,100012,100013] #Enter CollectID's you want to harvest\n\n###list of maps, Add maps of your liking###\n#Arcana maps [450005530,450005550,450005520,450005510,450005500,450005440,450005431,450005432,450005430,450005420,450005412,450005411,450005410]\n#Temple of time maps [270010100,270010200,270010300,270010400,270010500,270020100,270020200,270020300,270020400,270020500,270030100,270030200,270030300,270030400,270030500]\n#Vanishing Journey maps [450001111,450001110,450001112,450001114,450001261,450001113,450001210,450001215,450001218,450001216,450001217,450001211,450001212,450001213,450001214,450001010,450001011,450001012,450001013,450001014,450001015,450001016,450001260]\n#Kerning Tower 5,6 floor [103041140,103041145,103041147,103041150,103041155,103041157]\n#Expert Harvesting Farm [910001014]\n\n\n###ID LIST###\n###Ores###\n#Silver Vein: 200000\n#Magenta Vein: 200001\n#Blue Vein: 200002\n#Brown Vein: 200003\n#Emerald Vein: 200004\n#Gold Vein: 200005\n#Aquamarine Vein: 200006\n#Red Vein: 200007\n#Black Vein: 200008\n#Purple Vein: 200009\n#Vein: 200010\n#Heartstone Vein: 200011\n#Mysterious : 200012\n#Legendary : 200013\n\n###Herbs###\n#Silver Herb: 100000\n#Magenta Herb: 100001\n#Blue Herb: 100002\n#Brown Herb: 100003\n#Emerald Herb: 100004\n#Gold Herb: 100005\n#Aquamarine Herb: 100006\n#Red Herb: 100007\n#Black Herb: 100008\n#Purple Herb: 100009\n#Herb: 100010\n#Heart Herb: 100011\n#Mysterious: 100012\n#Legendary: 100013\n\n###Expand the list for easier changes :)\n\n\n#########################################################################################################################################\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\n#\t\t\t\t\t\t\t\t\t\t\t\tDo not change anything below this line!\t\t\t\t\t\t\t\t\t\t\t\t\t#\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\n#########################################################################################################################################\n\n\nSCLib.PersistVar(\"noReactorInMap\", False)\nSCLib.PersistVar(\"MapNumber\", 0)\nSCLib.PersistVar(\"TeleportAttempt\", 0)\nSCLib.PersistVar(\"HarvestAttempt\", 0)\nSCLib.PersistVar(\"TeleportCount\",0)\nSCLib.StartVars(20)\nHasLooted = SCLib.GetVar(\"noReactorInMap\")\nTeleportAttempt = SCLib.GetVar(\"TeleportAttempt\")\nHarvestAttempt = SCLib.GetVar(\"HarvestAttempt\")\nfieldID = Field.GetID()\npos = Character.GetPos()\nCurrentChannel = GameState.GetChannel()\nNewChannel = CurrentChannel +1\n\ndef ChangeChannels():\n\tif CurrentChannel == 20:\n\t\tif fieldID == LastMapID:\n\t\t\tprint(\"Changing Channel to 1\")\n\t\t\ttime.sleep(0.5)\n\t\t\tTerminal.ChangeChannel(1)\n\t\t\ttime.sleep(3)\n\t\t\tprint(\"Resetting back to first map\")\n\t\t\tSCLib.UpdateVar(\"MapNumber\", 0)\n\t\t\ttime.sleep(3)\n\t\telse:\n\t\t\tprint(\"Changing channel to 1\")\n\t\t\ttime.sleep(0.5)\n\t\t\tTerminal.ChangeChannel(1)\n\t\t\ttime.sleep(3)\n\t\t\tSCLib.UpdateVar(\"MapNumber\", SCLib.GetVar(\"MapNumber\")+1)\n\t\t\tprint(\"Changing map to {0}\".format(SCLib.GetVar(\"MapNumber\")))\n\telse:\n\t\tprint(\"Changing channel to {0}\".format(NewChannel))\n\t\ttime.sleep(0.5)\n\t\tTerminal.ChangeChannel(NewChannel)\n\t\ttime.sleep(3)\ndef ResetTeleportAttempt():\n\tSCLib.UpdateVar(\"TeleportAttempt\", 0)\ndef ResetHarvestAttempt():\n\tSCLib.UpdateVar(\"HarvestAttempt\", 0)\nif GameState.IsInGame() and not Terminal.IsRushing():\n\ttime.sleep(1)\n\t\n\t\n\tfor harvest in collectID:\n\t\therbore = Field.FindReactor(harvest)\n\t\tif herbore.valid:\n\t\t\tprint(\"Found herb/Ore with ID {0}\".format(herbore.id))\n\t\t\tSCLib.UpdateVar(\"noReactorInMap\", False)\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"Did not find any herb/Ore with ID {0}\".format(harvest))\n\t\t\tSCLib.UpdateVar(\"noReactorInMap\", True)\n\tif herbore.valid:\n\t\tif Field.GetCharacterCount() != 0:\n\t\t\tChangeChannels()\n\t\telse:\n\t\t\tmaxX = herbore.x -1\n\t\t\tminX = herbore.x -60\n\t\t\tmaxY = herbore.y +10\n\t\t\tminY = herbore.y -10\n\t\t\tnewX = herbore.x -1\n\t\t\tnewY = herbore.y -9\n\t\t\tif pos.x < minX or pos.x > maxX or pos.y < minY or pos.y > maxY:\n\t\t\t\tif TeleportAttempt < 3:\n\t\t\t\t\tResetHarvestAttempt()\n\t\t\t\t\tprint(\"Teleporting Attempt {0}\".format(SCLib.GetVar(\"TeleportAttempt\")+1))\n\t\t\t\t\tCharacter.Teleport(newX, newY)\n\t\t\t\t\tSCLib.UpdateVar(\"TeleportAttempt\", SCLib.GetVar(\"TeleportAttempt\")+1)\n\t\t\t\t\tSCLib.UpdateVar(\"TeleportCount\", SCLib.GetVar(\"TeleportCount\")+1)\n\t\t\t\t\tprint(\"Has already teleported {} times in this map\".format(SCLib.GetVar(\"TeleportCount\")))\n\t\t\t\telse:\n\t\t\t\t\tif Terminal.GetCheckBox(\"Pet Item Teleport\"):\n\t\t\t\t\t\tTerminal.SetCheckBox(\"Pet Item Teleport\", False)\n\t\t\t\t\telse:\n\t\t\t\t\t\tResetTeleportAttempt()\n\t\t\t\t\t\ttime.sleep(0.5)\n\t\t\t\t\t\tChangeChannels()\n\t\t\telse:\n\t\t\t\tif not Terminal.GetCheckBox(\"Pet Item Teleport\"):\n\t\t\t\t\tTerminal.SetCheckBox(\"Pet Item Teleport\", True)\n\t\t\t\telse:\n\t\t\t\t\tResetTeleportAttempt()\n\t\t\t\t\tif HarvestAttempt < 4:\n\t\t\t\t\t\tprint(\"Harvesting attempt {0}\".format(SCLib.GetVar(\"HarvestAttempt\")+1))\n\t\t\t\t\t\ttime.sleep(0.5)\n\t\t\t\t\t\tKey.Press(NpcChat)\n\t\t\t\t\t\ttime.sleep(4)\n\t\t\t\t\t\tSCLib.UpdateVar(\"HarvestAttempt\", SCLib.GetVar(\"HarvestAttempt\")+1)\n\t\t\t\t\telse:\n\t\t\t\t\t\tChangeChannels()\n\telse:\n\t\tif HasLooted:\n\t\t\tResetHarvestAttempt()\n\t\t\tif Terminal.GetCheckBox(\"Pet Item Teleport\"):\n\t\t\t\tTerminal.SetCheckBox(\"Pet Item Teleport\", False)\n\t\t\telse:\n\t\t\t\tif fieldID != maps[SCLib.GetVar(\"MapNumber\")]:\n\t\t\t\t\tTerminal.Rush(maps[SCLib.GetVar(\"MapNumber\")])\n\t\t\t\t\tSCLib.UpdateVar(\"TeleportCount\",0)\n\t\t\t\telse:\n\t\t\t\t\tChangeChannels()\n\t\t\t\t\tSCLib.UpdateVar(\"TeleportCount\",0)\n\tif SCLib.GetVar(\"TeleportCount\")>=6:\n\t\tResetHarvestAttempt()\n\t\tprint(\"Reached teleporting limit\")\n\t\tif Terminal.GetCheckBox(\"Pet Item Teleport\"):\n\t\t\tTerminal.SetCheckBox(\"Pet Item Teleport\", False)\n\t\telse:\n\t\t\tif fieldID != maps[SCLib.GetVar(\"MapNumber\")]:\n\t\t\t\tTerminal.Rush(maps[SCLib.GetVar(\"MapNumber\")])\n\t\t\t\tSCLib.UpdateVar(\"TeleportCount\",0)\n\t\t\telse:\n\t\t\t\tChangeChannels()\n\t\t\t\tSCLib.UpdateVar(\"TeleportCount\",0)\nif GameState.IsInGame() and Terminal.IsRushing():\n\tif fieldID == 270000200:\n\t\tTerminal.StopRush()","sub_path":"BMAutoMiner-HerberV018.py","file_name":"BMAutoMiner-HerberV018.py","file_ext":"py","file_size_in_byte":7088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"164406569","text":"# requires an instance of pelias running at localhost:4000\n\nimport requests\nfrom mec.models import Address\nfrom django.contrib.gis.geos import Point\nfrom django.core.management.base import BaseCommand, CommandError\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n ungeocoded = Address.objects.filter(coordinates__isnull=True)\n for addr in ungeocoded.iterator():\n self.stdout.write(str(addr))\n response = requests.get(f'http://localhost:4000/v1/search/structured?address=\"{addr.address1}\"&locality=\"{addr.city}\"®ion=\"{addr.state}\"&postalcode=\"{addr.zip}\"').json()\n if len(response['features']) == 1:\n first_result = response['features'][0]\n #postalcode = first_result['properties'].get('postalcode')\n #locality = first_result['properties'].get('locality')\n geocode = first_result['geometry']['coordinates']\n # if postalcode==str(addr.zip):\n # addr.coordinates=Point(geocode)\n # addr.save()\n # self.stdout.write('successful')\n # elif not postalcode:\n # addr.coordinates=Point(geocode)\n # addr.save()\n # self.stdout.write('successful')\n # # elif locality = addr.city\n # else: \n # self.stdout.write('failed')\n addr.coordinates=Point(geocode)\n addr.save()\n self.stdout.write('successful')\n else:\n self.stdout.write('failed')\n","sub_path":"mec/management/commands/geocode_all_addr.py","file_name":"geocode_all_addr.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"234286716","text":"from django.shortcuts import render\n\nfrom django.http import JsonResponse\nfrom app.models import Data\n\ndef search(request):\n input_word = request.GET.get('word')\n results = Data.objects.filter(word__contains=input_word).order_by(\"-count\").values(\"word\", \"count\")\n exact_match = []\n starts_with = []\n remaining_words = []\n for r in results:\n _word = r.get('word')\n if _word == input_word:\n exact_match.append(r)\n elif _word.startswith(input_word):\n starts_with.append(r)\n else:\n remaining_words.append(r)\n \n total_results = exact_match + starts_with + remaining_words\n\n return JsonResponse({\"result\": total_results[:25]})\n","sub_path":"web/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"124714711","text":"from django.shortcuts import render\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom taggit.models import Tag\n\nfrom .models import Post\n\n#local functions\ndef _paginate_posts(request, posts):\n\tpaginator = Paginator(posts, 3)\n\t#providing page with current\n\tpage = request.GET.get('page', 1)\n\ttry:\n\t\tposts = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tposts = paginator.page(1)\n\texcept EmptyPage:\n\t\tposts = paginator.page(paginator.num_pages)\n\treturn posts\n\n# Create your views here.\ndef post_list(request):\n\tposts = Post.objects.filter(published_date__isnull=False).order_by('-published_date')\n\t#return render(request, 'blog/post_list.html', {'posts': posts})\n\tpaginated = _paginate_posts(request, posts)\n\treturn render(request, 'blog/post_list.html', {\n\t\t'posts': paginated,\n\t\t'tags': Tag.objects.all(),\n\t})\n\ndef post_detail(request, slug=\"\"):\n\ttry:\n\t\tpost = Post.objects.get(slug=slug.lower())\n\texcept Post.DoesNotExist:\n\t\treturn render(request, 'blog/base.html')\n\treturn render(request, 'blog/post_detail.html', {\n\t\t'post': post,\n\t})\n\ndef posts_by_tag(request, slug=\"\"):\n\tposts = Post.objects.filter(tags__slug=slug.lower())\n\t#if posts.count() == 0:\n\t#\traise Exception(\"whatever\")\n\tpaginated = _paginate_posts(request, posts)\n\treturn render(request, 'blog/post_list.html', {\n\t\t'posts': paginated,\n\t\t'slug': slug,\n\t\t'tags': Tag.objects.all(),\n\t})","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"399978595","text":"from fishlifeexoncapture.fileHandler import TollCheck\n\n# path = \".\"\n# forstep = None\n# npart = 2\n\nMETADATAFILE = \".ignoreFishLifeExonCapture_part{}\"\n\ndef simplepartition(path, npart):\n\n if npart < 2:\n exit()\n\n tc_class = TollCheck(path = path)\n mydict = tc_class.pickleIt\n mykeys = list(mydict.keys())\n\n window = len(mykeys)/npart if len(mykeys) >= npart else 1\n\n init = 0\n done = []\n for i in range(0, npart):\n outhiddenfile = METADATAFILE.format(i)\n # print(outhiddenfile)\n names = mykeys[round(init): round(init + window)]\n # print(init, init + window)\n # print(round(init), round(init + window))\n # out = {i: '' for i in names}\n out = { i:mydict[i] for i in names}\n tc_class.__save_obj__(out, outhiddenfile)\n \n done += names\n if len(done) == len(mykeys):\n break\n \n init += window\n","sub_path":"src/fishmanager/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"231227750","text":"\nimport matplotlib.pyplot as plt\nfrom pylab import mpl\n#plt.rcParams['font.sans-serif'] = ['YaHei Consolas Hybrid'] # 用来正常显示中文标签\n#plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号\nimport mpl_toolkits.axisartist.axislines as axislines\n\nfig = plt.figure(1, figsize=(10, 6))\nfig.subplots_adjust(bottom=0.2)\n\n# 子图1\nax1 = axislines.Subplot(fig, 131)\nfig.add_subplot(ax1)\n# for axis in ax1.axis.values():\n# axis.major_ticks.set_tick_out(True) # 标签全部在外部\nax1.axis[:].major_ticks.set_tick_out(True) # 这句和上面的for循环功能相同\nax1.axis[\"left\"].label.set_text(\"子图1 -left标签\") # 显示在左边\n# 设置刻度\nax1.set_yticks([2,4,6,8])\nax1.set_xticks([0.2,0.4,0.6,0.8])\n\n# 子图2\nax2 = axislines.Subplot(fig, 132)\nfig.add_subplot(ax2)\nax2.set_yticks([1,3,5,7])\nax2.set_yticklabels(('one','two','three', 'four', 'five')) # 不显示‘five’\nax2.set_xlim(5, 0) # X轴刻度\nax2.axis[\"left\"].set_axis_direction(\"right\")\nax2.axis[\"left\"].label.set_text(\"子图2 -left标签\") # 显示在右边\nax2.axis[\"bottom\"].set_axis_direction(\"top\")\nax2.axis[\"right\"].set_axis_direction(\"left\")\nax2.axis[\"top\"].set_axis_direction(\"bottom\")\n\n# 子图3\nax3 = axislines.Subplot(fig, 133)\nfig.add_subplot(ax3)\n# 前两位表示X轴范围,后两位表示Y轴范围\nax3.axis([40, 160, 0, 0.03])\nax3.axis[\"left\"].set_axis_direction(\"right\")\nax3.axis[:].major_ticks.set_tick_out(True)\n\nax3.axis[\"left\"].label.set_text(\"Long Label Left\")\nax3.axis[\"bottom\"].label.set_text(\"Label Bottom\")\nax3.axis[\"right\"].label.set_text(\"Long Label Right\")\nax3.axis[\"right\"].label.set_visible(True)\nax3.axis[\"left\"].label.set_pad(0)\nax3.axis[\"bottom\"].label.set_pad(20)\n\nplt.show()\n","sub_path":"code.org/plotExercise.py","file_name":"plotExercise.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"242582452","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/fritz/github/posterior/treecat/treecat/config.py\n# Compiled at: 2017-08-14 22:48:52\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nDEFAULT_CONFIG = {'seed': 0, \n 'model_num_clusters': 32, \n 'model_ensemble_size': 8, \n 'learning_init_epochs': 100, \n 'learning_full_epochs': 1, \n 'learning_estimate_tree': True, \n 'learning_sample_tree_rate': 3}\n\ndef make_config(**options):\n \"\"\"Create a new global config dict with default values.\"\"\"\n config = DEFAULT_CONFIG.copy()\n for key, value in options.items():\n if key not in config:\n raise ValueError(('Unknown option: {}. Try one of:\\n {}').format(key, ('\\n ').join(sorted(config.keys()))))\n config[key] = int(value)\n\n return config","sub_path":"pycfiles/pytreecat-0.1.9.tar/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"297356746","text":"\"\"\"Development automation\n\"\"\"\nimport os\nimport tempfile\n\nimport nox\n\nPACKAGE_NAME = \"furo\"\nnox.options.sessions = [\"lint\", \"test\"]\n\n\n#\n# Helpers\n#\ndef _install_this_project_with_flit(session, *, extras=None, editable=False):\n session.install(\"flit\")\n args = []\n if extras:\n args.append(\"--extras\")\n args.append(\",\".join(extras))\n if editable:\n args.append(\"--pth-file\" if os.name == \"nt\" else \"--symlink\")\n\n session.run(\"flit\", \"install\", \"--deps=production\", *args, silent=True)\n\n\n#\n# Development Sessions\n#\n@nox.session(name=\"docs-live\", python=\"3.8\")\ndef docs_live(session):\n if session.posargs:\n docs_dir = session.posargs[0]\n additional_dependencies = session.posargs[1:]\n else:\n docs_dir = \"docs/\"\n additional_dependencies = ()\n\n build_command = \"./node_modules/.bin/gulp build\"\n _install_this_project_with_flit(session, extras=[\"doc\"], editable=True)\n session.install(\"sphinx-autobuild\", *additional_dependencies)\n\n with tempfile.TemporaryDirectory() as destination:\n session.run(\n \"sphinx-autobuild\",\n # for sphinx-autobuild\n \"--port=0\",\n \"--watch=src/\",\n f\"--pre-build={build_command}\",\n r\"--re-ignore=src/.*/theme/static/.*\\.(css|js)\", # ignore the generated files\n \"--open-browser\",\n # for sphinx\n \"-a\",\n docs_dir,\n destination,\n )\n\n\n@nox.session(python=\"3.8\", reuse_venv=True)\ndef docs(session):\n # Generate relevant files prior to installation\n session.run(\"gulp\", \"build\", external=True)\n\n _install_this_project_with_flit(session, extras=[\"doc\"], editable=False)\n\n # Generate documentation into `build/docs`\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-v\", \"docs/\", \"build/docs\")\n\n\n@nox.session(python=\"3.8\", reuse_venv=True)\ndef lint(session):\n session.install(\"pre-commit\")\n\n args = list(session.posargs)\n args.append(\"--all-files\")\n if \"CI\" in os.environ:\n args.append(\"--show-diff-on-failure\")\n\n session.run(\"pre-commit\", \"run\", \"--all-files\", *args)\n\n\n@nox.session(python=\"3.6\")\ndef test(session):\n _install_this_project_with_flit(session, extras=[\"test\"])\n\n args = session.posargs or [\"-n\", \"auto\", \"--cov\", PACKAGE_NAME]\n session.run(\"pytest\", *args)\n","sub_path":"noxfile.py","file_name":"noxfile.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"159246475","text":"import core.module as module\nimport core.worker as worker\nimport mcstatus as mc\n\nclass ServerInfoModule(module.Module):\n \n def __init__(self):\n print('ServerInfoModule initialized...')\n\n '''\n This method gets called when a command arrives that passed this module's filter\n This function can return a string which will be the bot's response.\n '''\n async def handle_message(self, message):\n if not message.channel.name == module.chat_default.name:\n return\n\n args = message.content.split(' ')\n\n if len(args) == 2:\n if args[1] == 'minecraft':\n worker.queue_function(self.minecraft_status)\n return\n\n if await super().handle_message(message):\n return\n\n '''\n This method gets called when help is called on this module.\n It should return a string explaining the usage of this module\n '''\n def help_message(self):\n msg = 'ServerInfoModule help:\\r\\n'\n msg += 'This module allows you to get info on the status of the l3am server, note that it is still a work in progress at this time.\\r\\n\\r\\n'\n msg += 'Commands:\\r\\n'\n msg += ' \"!info minecraft\": Shows the online status of the minecraft server\\r\\n'\n return msg\n\n ''' Status in 1 line (running! or error etc..) '''\n def short_status(self):\n return self.name() + ': ready...'\n\n '''\n This method gets called when status is called on this module. \n It should return a string explaining the runtime status of this module.\n '''\n def status(self):\n msg = 'ServerInfoModule status: ok!\\r\\n'\n msg += self.minecraft_status()\n return msg\n\n ''' This method gets called once every second for time based operations. '''\n async def update(self):\n pass\n\n def minecraft_status(self):\n msg = ''\n \n try:\n server_1 = mc.MinecraftServer('minecraft.wavycolt.com')\n status = server_1.status()\n\n msg += 'Server 1: \"{}\" has {} players online and replied in {} ms'.format(status.description['text'], status.players.online, status.latency)\n if status.players.online > 0:\n msg += ', online players:\\r\\n'\n for x in status.players.sample:\n msg += ' - {}\\r\\n'.format(x.name)\n else:\n msg += '.'\n except Exception as e:\n print(e)\n msg += 'Server 1: offline...'\n\n try:\n server_1 = mc.MinecraftServer('minecraft.wavycolt.com', 25570)\n status = server_1.status()\n\n msg += '\\r\\n\\r\\nServer 2: \"{}\" has {} players online and replied in {} ms'.format(status.description['text'], status.players.online, status.latency)\n if status.players.online > 0:\n msg += ', online players:\\r\\n'\n for x in status.players.sample:\n msg += ' - {}\\r\\n'.format(x.name)\n else:\n msg += '.'\n except Exception as e:\n print(e)\n msg += '\\r\\nServer 2: offline...'\n\n try:\n server_1 = mc.MinecraftServer('minecraft.wavycolt.com', 25575)\n status = server_1.status()\n\n msg += '\\r\\n\\r\\nServer 3: \"{}\" has {} players online and replied in {} ms'.format(status.description['text'], status.players.online, status.latency)\n if status.players.online > 0:\n msg += ', online players:\\r\\n'\n for x in status.players.sample:\n msg += ' - {}\\r\\n'.format(x.name)\n else:\n msg += '.'\n except Exception as e:\n print(e)\n msg += '\\r\\nServer 3: offline...'\n\n module.send_message_nowait(module.chat_default, msg)\n","sub_path":"botv3/modules/ServerInfoModule.py","file_name":"ServerInfoModule.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"457418664","text":"from patterns import PlogLine, PlogBlock\r\n\r\nclass CDPBlock(object):\r\n\t'''Given as a 'block' through PlugBlockMixin.add_block.\r\n\tThe add_block method looks for the `block` attribute and appends\r\n\tit to the running blocks.\r\n\r\n\tLines represent each attribute to dicover within the _Start_ and _stop_\r\n\tcontent of a block. Each `PlogLine` extracts an explicitly designed\r\n\tvalue, adding it to the 'lines' of a block.\r\n\tA block returns the line value as a dictionary attribute in `Plug.data_block`.\r\n\t'''\r\n\r\n\t# Define a PlogBlock and its starting value.\r\n\tblock = PlogBlock('Device ID:', ref='Device')\r\n\tblock.header.ref='device_id'\r\n\r\n\tblock.footer = PlogLine('----------', ref='footer').anything()\r\n\r\n\tlines = {}\r\n\tlines['entry_address'] = PlogLine('IP address:')\r\n\tlines['platform'] = PlogLine('Platform:')\r\n\tlines['interface'] = PlogLine('Interface:')\r\n\tlines['hold_time'] = PlogLine('Holdtime').maybe(' ').then(':')\r\n\tlines['version'] = PlogLine('Version').maybe(' ').then(':').multiline()\r\n\tlines['version'] = PlogLine('advertisement version:')\r\n\tlines['duplex'] = PlogLine('Duplex:')\r\n\tlines['power_drawn'] = PlogLine('Power drawn:')\r\n\tlines['power_request_id'] = PlogLine('Power request id:')\r\n\tlines['power_management_id'] = PlogLine('Power management id:')\r\n\tlines['power_request_levels'] = PlogLine('Power request levels are:')\r\n\r\n\tblock.add_lines(**lines)\r\n","sub_path":"src/plog/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"653708811","text":"# -*- coding: utf-8 -*-\n# This code was copied from the pika documentation\nimport time\nimport pika\nimport pickle\nimport logging\nimport functools\nfrom threading import Thread\nfrom queue import Queue as ThreadQueue\nfrom cryptography.fernet import Fernet\nfrom threading import Event as ThreadEvent\n\nLOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '\n '-35s %(lineno) -5d: %(message)s')\nLOGGER = logging.getLogger(__name__)\n\nclass PID():\n def __init__(self, kp, kd, ki, i_max, i_min, dt):\n self.err = [0, 0]\n self.int = [0, 0]\n self.der = 0\n self.i_max = i_max\n self.i_min = i_min\n self.kp = kp\n self.kd = kd\n self.ki = ki\n self.dt = dt\n\n def next(self, x):\n '''A simple PID'''\n # err[0] = ref - x\n # err[1] = err[0] from the last sample\n # int[0] = int[1] + err[0]\n # int[1] = int[0] from the last sample\n # der = err[1] + err[0]\n # dt = sample period in sec\n # output = kp*err[0] + (ki*int[0]*dt) + (kd*der/dt)\n output = None\n ref = 0\n self.err[0] = ref - x\n self.int[0] = self.int[1] + self.err[0]\n self.der = self.err[1] + self.err[0]\n\n self.int[0] = self.i_max if self.int[0] > self.i_max else self.int[0]\n self.int[0] = self.i_min if self.int[0] < self.i_min else self.int[0]\n\n output = self.kp * self.err[0]\n output += self.ki * self.int[0] * self.dt\n\n if self.dt != 0:\n output += self.kd * self.der / self.dt\n\n self.output = output\n self.err[1] = self.err[0]\n self.int[1] = self.int[0]\n return output\n\n def reset(self):\n self.err = [0, 0]\n self.int = [0, 0]\n self.der = 0\n\n\nclass QueueToSampleTimeControl(PID):\n def __init__(self, i_max, dt):\n super().__init__(kp=0.07, kd=0.05, ki=0.4, i_max=i_max, i_min=-1 * i_max, dt=dt)\n if i_max != 0:\n self.min_tempo = 1 / i_max\n else:\n self.min_tempo = 0.000001\n\n def next(self, x):\n '''Invert the output of our PID -> large amounts of control need to express\n short durations in time'''\n output = super().next(x)\n\n # if the controller is working accelerate the wind-down of the integrator\n # (the queue can't be negative, so help it out)\n if output <= 0:\n self.int[1] /= 1.1\n self.err[1] /= 1.1\n output = 1 / self.dt\n\n # start with the baseline tempo\n time_recommendation = self.dt\n\n if output != 0:\n time_recommendation = 1 / output\n\n # clamps\n time_recommendation = \\\n self.min_tempo if time_recommendation < self.min_tempo else time_recommendation\n\n time_recommendation = \\\n self.dt if time_recommendation > self.dt else time_recommendation\n\n return time_recommendation\n\nclass SimplePikaTopicPublisher():\n '''\n This is a pika (Python-RabbitMq) message publisher heavily based on the\n asychronous example provided in the pika documentation. It should handle\n unexpected interactions with RabbitMQ such as channel and connection closures.\n\n If RabbitMQ closes the connection, an object of this class should reopen it.\n (You should look at the output, as there are limited reasons why the connection\n may be closed, which usually are tied to permission related issues or socket\n timeouts.)\n\n Example:\n # set a callback mechanism to sample the task's input queue every 1.5 seconds\n # name the exchange in the RabbitMq server at the url to 'g_pika_producer_exchange'\n # name the RabbitMq queue on the server at the url to 'g_queue'\n # set the topic routing key to 'pub_thread.text'\n\n publisher = \\\n SimplePikaTopicPublisher(\n amqp_url='amqp://bob:dobbs@192.168.1.69:5672/%2F?connection_attempts=3&heartbeat_interval=3600',\n publish_tempo_sec=1.5,\n exchange_name='g_pika_producer_exchange',\n routing_key='pub_thread.text',\n )\n\n # to start the thread so pika won't block your code:\n publisher.start_thread()\n\n # to actually write messages (publish) to the amqp_url:\n publish.post_fifo(\"Some Message\")\n\n # to stop the thread but keep the connection\n publisher.start_thread()\n\n # to start the thread again\n publisher.start_thread()\n\n # to stop the connection and the thread\n publisher.stop()\n\n # to reconnect and start the thread\n publisher.start_thread()\n\n Notes:\n It uses delivery confirmations and illustrates one way to keep track of\n messages that have been sent and if they've been confirmed by RabbitMQ.\n This confirmation mechanism will not work if message tempo exceeds the\n publish_tempo (the messages will get through but the confirmation mechanism\n will indicate there is a problem when there isn't one.)\n\n If the input queue has more than one item they will all be sent out to the\n network and the queue sampler callback's frequency will temporarily\n increase to deal with queue bursting.\n\n '''\n EXCHANGE_TYPE = 'topic'\n PUBLISH_FAST_INTERVAL_SEC = 0.000001 # right now\n PRODUCER_VERSION = u'1.0'\n\n def __init__(self,\n amqp_url,\n routing_key,\n publish_tempo_sec,\n exchange_name):\n '''Setup the example publisher object, passing in the URL we will use\n to connect to RabbitMQ.\n\n :param str amqp_url: The URL for connecting to RabbitMQ\n\n '''\n self._channel = None\n self._connection = None\n\n self._acked = 0\n self._nacked = 0\n self._deliveries = []\n self._message_number = 0\n\n self._closing = False\n self._stopping = False\n self.connect_error = False\n\n self._amqp_url = amqp_url\n self._task_run_event = ThreadEvent()\n self._publish_tempo_sec = publish_tempo_sec\n self._thread_queue = ThreadQueue(maxsize=500)\n\n self._tempo_controller = QueueToSampleTimeControl(\n i_max=1 / self.PUBLISH_FAST_INTERVAL_SEC,\n dt = publish_tempo_sec)\n\n # will set the exchange, queue and routing_keys names for the RabbitMq\n # server running on amqp_url\n self._rabbit_exchange_name = exchange_name\n self._rabbit_routing_key = routing_key\n\n def connect(self):\n '''This method connects to RabbitMQ, returning the connection handle.\n When the connection is established, the on_connection_open method\n will be invoked by pika. If you want the reconnection to work, make\n sure you set stop_ioloop_on_close to False, which is not the default\n behavior of this adapter.\n\n :rtype: pika.SelectConnection\n\n '''\n LOGGER.info('Connecting to %s', self._amqp_url)\n return pika.SelectConnection(pika.URLParameters(self._amqp_url),\n self.on_connection_open,\n stop_ioloop_on_close=False)\n\n def on_connection_open(self, unused_connection):\n '''This method is called by pika once the connection to RabbitMQ has\n been established. It passes the handle to the connection object in\n case we need it, but in this case, we'll just mark it unused.\n\n :type unused_connection: pika.SelectConnection\n\n '''\n LOGGER.info('Connection opened')\n self.add_on_connection_close_callback()\n self.open_channel()\n\n def add_on_connection_close_callback(self):\n '''This method adds an on close callback that will be invoked by pika\n when RabbitMQ closes the connection to the publisher unexpectedly.\n\n '''\n LOGGER.info('Adding connection close callback')\n self._connection.add_on_close_callback(self.on_connection_closed)\n\n def on_connection_closed(self, connection, reply_code, reply_text):\n '''This method is invoked by pika when the connection to RabbitMQ is\n closed unexpectedly. Since it is unexpected, we will reconnect to\n RabbitMQ if it disconnects.\n\n :param pika.connection.Connection connection: The closed connection obj\n :param int reply_code: The server provided reply_code if given\n :param str reply_text: The server provided reply_text if given\n\n '''\n self._channel = None\n if self._closing:\n self._connection.ioloop.stop()\n else:\n LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s',\n reply_code, reply_text)\n self._connection.add_timeout(5, self.reconnect)\n\n def reconnect(self):\n '''Will be invoked by the IOLoop timer if the connection is\n closed. See the on_connection_closed method.\n\n '''\n self._deliveries = []\n self._acked = 0\n self._nacked = 0\n self._message_number = 0\n\n # This is the old connection IOLoop instance, stop its ioloop\n self._connection.ioloop.stop()\n\n # Create a new connection\n self._connection = self.connect()\n\n # There is now a new connection, needs a new ioloop to run\n self._connection.ioloop.start()\n\n def open_channel(self):\n '''This method will open a new channel with RabbitMQ by issuing the\n Channel.Open RPC command. When RabbitMQ confirms the channel is open\n by sending the Channel.OpenOK RPC reply, the on_channel_open method\n will be invoked.\n\n '''\n LOGGER.info('Creating a new channel')\n self._connection.channel(on_open_callback=self.on_channel_open)\n\n def on_channel_open(self, channel):\n '''This method is invoked by pika when the channel has been opened.\n The channel object is passed in so we can make use of it.\n\n Since the channel is now open, we'll declare the exchange to use.\n\n :param pika.channel.Channel channel: The channel object\n\n '''\n LOGGER.info('Channel opened')\n self._channel = channel\n self.add_on_channel_close_callback()\n self.setup_exchange(self._rabbit_exchange_name)\n\n def add_on_channel_close_callback(self):\n '''This method tells pika to call the on_channel_closed method if\n RabbitMQ unexpectedly closes the channel.\n\n '''\n LOGGER.info('Adding channel close callback')\n self._channel.add_on_close_callback(self.on_channel_closed)\n\n def on_channel_closed(self, channel, reply_code, reply_text):\n '''Invoked by pika when RabbitMQ unexpectedly closes the channel.\n Channels are usually closed if you attempt to do something that\n violates the protocol, such as re-declare an exchange or queue with\n different parameters. In this case, we'll close the connection\n to shutdown the object.\n\n :param pika.channel.Channel: The closed channel\n :param int reply_code: The numeric reason the channel was closed\n :param str reply_text: The text reason the channel was closed\n\n '''\n LOGGER.warning('Channel was closed: (%s) %s', reply_code, reply_text)\n if not self._closing:\n self._connection.close()\n\n def setup_exchange(self, exchange_name):\n '''Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC\n command. When it is complete, the on_exchange_declareok method will\n be invoked by pika.\n\n :param str|unicode exchange_name: The name of the exchange to declare\n\n '''\n LOGGER.info('Declaring exchange %s', exchange_name)\n self._channel.exchange_declare(\n callback=self.on_exchange_declareok,\n exchange=exchange_name,\n exchange_type=self.EXCHANGE_TYPE,\n durable=False)\n\n def on_exchange_declareok(self, unused_frame):\n '''Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC\n command.\n\n :param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame\n\n '''\n LOGGER.info('Exchange declared')\n\n self.start_publishing()\n\n\n def start_publishing(self):\n '''This method will enable delivery confirmations and schedule the\n first message to be sent to RabbitMQ\n\n '''\n LOGGER.info('Issuing consumer related RPC commands')\n self.enable_delivery_confirmations()\n self.schedule_next_producer_heart_beat(self._publish_tempo_sec)\n\n def enable_delivery_confirmations(self):\n '''Send the Confirm.Select RPC method to RabbitMQ to enable delivery\n confirmations on the channel. The only way to turn this off is to close\n the channel and create a new one.\n\n When the message is confirmed from RabbitMQ, the\n on_delivery_confirmation method will be invoked passing in a Basic.Ack\n or Basic.Nack method from RabbitMQ that will indicate which messages it\n is confirming or rejecting.\n\n '''\n LOGGER.info('Issuing Confirm.Select RPC command')\n self._channel.confirm_delivery(self.on_delivery_confirmation)\n\n def on_delivery_confirmation(self, method_frame):\n '''Invoked by pika when RabbitMQ responds to a Basic.Publish RPC\n command, passing in either a Basic.Ack or Basic.Nack frame with\n the delivery tag of the message that was published. The delivery tag\n is an integer counter indicating the message number that was sent\n on the channel via Basic.Publish. Here we're just doing house keeping\n to keep track of stats and remove message numbers that we expect\n a delivery confirmation of from the list used to keep track of messages\n that are pending confirmation.\n\n :param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame\n\n '''\n confirmation_type = method_frame.method.NAME.split('.')[1].lower()\n LOGGER.info('Received %s for delivery tag: %i',\n confirmation_type,\n method_frame.method.delivery_tag)\n if confirmation_type == 'ack':\n self._acked += 1\n elif confirmation_type == 'nack':\n self._nacked += 1\n\n item = method_frame.method.delivery_tag\n # only remove items that exist in our list (if a previous thread was\n # canceled and this one was started we would receive delivery_tags which we\n # didn't send - this could cause the remove method to crash the producer\n if item in self._deliveries:\n self._deliveries.remove(method_frame.method.delivery_tag)\n LOGGER.info('Published %i messages, %i have yet to be confirmed, '\n '%i were acked and %i were nacked',\n self._message_number, len(self._deliveries),\n self._acked, self._nacked)\n else:\n LOGGER.info('Received delivery tag for something we did not send')\n\n def schedule_next_producer_heart_beat(self, timeout):\n '''If we are not closing our connection to RabbitMQ, schedule another\n message to be delivered in self._publish_tempo_sec seconds.\n\n '''\n if self._stopping:\n return\n\n # Scheduling next Task queue check\n LOGGER.info('Task queue check in %0.4f seconds', timeout)\n self._connection.add_timeout(timeout, self.producer_heart_beat)\n\n def publish_message(self, message):\n '''If the class is not stopping, publish a message to RabbitMQ,\n appending a list of deliveries with the message number that was sent.\n This list will be used to check for delivery confirmations in the\n on_delivery_confirmations method.\n\n Example:\n # get the message from somewhere\n message = self._thread_queue.get()\n\n # user partial of this method to make a custom callback with your message as an input\n cb = functools.partial(self.publish_message, message=message)\n\n # then load it into a timer\n self._connection.add_timeout(self.PUBLISH_FAST_INTERVAL_SEC, cb)\n '''\n if self._stopping:\n return\n properties = pika.BasicProperties(app_id='miros-rabbitmq-publisher',\n content_type='application/json',\n headers={u'version': self.PRODUCER_VERSION})\n\n self._channel.basic_publish(self._rabbit_exchange_name, self._rabbit_routing_key,\n message,\n properties)\n\n self._message_number += 1\n self._deliveries.append(self._message_number)\n LOGGER.info('Published message # %i', self._message_number)\n\n def close_channel(self):\n '''Invoke this command to close the channel with RabbitMQ by sending\n the Channel.Close RPC command.'''\n LOGGER.info('Closing the channel')\n if self._channel:\n self._channel.close()\n\n def run(self):\n '''Run the example code by connecting and then starting the IOLoop. '''\n self._connection = self.connect()\n self._connection.ioloop.start()\n\n def stop(self):\n '''Stop the example by closing the channel and connection and releasing the\n thread. We set a flag here so that we stop scheduling new messages to be\n published. The IOLoop is started because this method is\n invoked by the Try/Catch below when KeyboardInterrupt is caught.\n Starting the IOLoop again will allow the publisher to cleanly\n disconnect from RabbitMQ.\n '''\n LOGGER.info('Stopping')\n self._stopping = True\n self.close_channel()\n self.close_connection()\n self._task_run_event.clear()\n self._connection.ioloop.start()\n LOGGER.info('Stopped')\n\n def close_connection(self):\n '''This method closes the connection to RabbitMQ.'''\n LOGGER.info('Closing connection')\n self._closing = True\n self._connection.close()\n\n def producer_heart_beat(self):\n '''This is the callback that is called ever publish_tempo_sec to check to\n see if something is in the thread_queue. If there are items in this queue\n it schedules other callbacks to send out the messages, and temporarily\n increases its frequecy to deal with queue bursting.\n '''\n if self._task_run_event.is_set():\n if self._stopping:\n return\n # messages tend to bunch up, they are bursty, so speed up our\n # producer_heart_beat if there were messages in our queue\n queue_length = self._thread_queue.qsize()\n new_tempo_period_sec = self._tempo_controller.next(queue_length)\n self.schedule_next_producer_heart_beat(new_tempo_period_sec)\n\n # send out all messages in the queue\n if queue_length >= 1:\n for i in range(queue_length):\n message = self._thread_queue.get()\n cb = functools.partial(self.publish_message, message=message)\n self._connection.add_timeout(self.PUBLISH_FAST_INTERVAL_SEC, cb)\n LOGGER.info('Scheduling next output message in %0.6f seconds', self.PUBLISH_FAST_INTERVAL_SEC)\n\n def post_fifo(self, message):\n '''use this to post messages to the network'''\n self._thread_queue.put(message)\n\n def start_thread(self):\n '''Add a thread so that the run method doesn't steal our program control.'''\n self._task_run_event.set()\n self._stopping = False\n\n def thread_runner(self):\n # The run method will turn on pika's callback hell.\n # To see how this is turned off look at the producer_heart_beat\n try:\n self.run()\n except:\n self.stop_thread()\n self.connect_error = True\n\n thread = Thread(target=thread_runner, args=(self,), daemon=True)\n thread.start()\n\n def stop_thread(self):\n '''stop the thread, but keep the connection open. To close the connection\n and stop the thread, use the 'stop' api'''\n self._task_run_event.clear()\n\nclass PikaTopicPublisher(SimplePikaTopicPublisher):\n '''This is subclass of SimplePikaTopicPublisher which extends its capabilities.\n\n It can serialize and encrypt messages before it transmits them.\n While constructing it, you provide it with a\n symmetric encryption key, and optional functions for encrypting and\n serializing messages.\n\n Example:\n publisher = \\\n PikaTopicPublisher(\n amqp_url='amqp://bob:dobbs@192.168.1.69:5672/%2F?connection_attempts=3&heartbeat_interval=3600',\n routing_key='pub_thread.text',\n publish_tempo_sec=1.5,\n exchange_name='sex_change',\n encryption_key=b'u3Uc-qAi9iiCv3fkBfRUAKrM1gH8w51-nVU8M8A73Jg='\n )\n\n publisher.start_thread()\n publisher.post_fifo(\"Publish a Message\")\n publisher.stop_thread()\n '''\n def __init__(self,\n amqp_url,\n routing_key,\n publish_tempo_sec,\n exchange_name,\n encryption_key,\n encryption_function=None,\n serialization_function=None):\n\n super().__init__(\n amqp_url,\n routing_key,\n publish_tempo_sec,\n exchange_name)\n\n self._encryption_key = encryption_key\n self._rabbit_user = self.get_rabbit_user(amqp_url)\n self._rabbit_password = self.get_rabbit_password(amqp_url)\n\n # saved encryption function\n self._sef = None\n\n def default_encryption_function(message, encryption_key):\n return Fernet(encryption_key).encrypt(message)\n\n def default_serialization_function(obj):\n return pickle.dumps(obj)\n\n if encryption_function is None:\n self._sef = default_encryption_function\n else:\n self._sef = encryption_function\n\n self._encryption_function = functools.partial(self._sef,\n encryption_key=encryption_key)\n\n if serialization_function is None:\n self._serialization_function = default_serialization_function\n else:\n self._serialization_function = serialization_function\n\n def change_encryption_key(self, encryption_key):\n self.stop_thread()\n self._encryption_function = functools.partial(self._sef, encryption_key=encryption_key)\n self.start_thread()\n\n def get_rabbit_user(self, url):\n user = url.split(':')[1][2:]\n return user\n\n def get_rabbit_password(self, url):\n password = url.split(':')[2].split('@')[0]\n return password\n\n def encrypt(self, item):\n return self._encryption_function(item)\n\n def serialize(self, item):\n return self._serialization_function(item)\n\n def post_fifo(self, item):\n xsitem = self.encrypt(self.serialize(item))\n super().post_fifo(xsitem)\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)\n # send to the raspberry pi\n pub_thread1 = \\\n PikaTopicPublisher(\n amqp_url='amqp://bob:dobbs@192.168.1.69:5672/%2F?connection_attempts=3&heartbeat_interval=3600',\n routing_key='pub_thread.text',\n publish_tempo_sec=0.5,\n exchange_name='sex_change',\n encryption_key=b'u3Uc-qAi9iiCv3fkBfRUAKrM1gH8w51-nVU8M8A73Jg='\n )\n pub_thread2 = \\\n PikaTopicPublisher(\n amqp_url='amqp://bob:dobbs@192.168.1.69:5672/%2F?connection_attempts=3&heartbeat_interval=3600',\n routing_key='pub_thread.text',\n publish_tempo_sec=0.5,\n exchange_name='sex_change',\n encryption_key=b'u3Uc-qAi9iiCv3fkBfRUAKrM1gH8w51-nVU8M8A73Jg='\n )\n pub_thread3 = \\\n PikaTopicPublisher(\n amqp_url='amqp://bob:dobbs@127.0.0.1:5672/%2F?connection_attempts=3&heartbeat_interval=3600',\n routing_key='pub_thread.text',\n publish_tempo_sec=0.5,\n exchange_name='sex_change',\n encryption_key=b'u3Uc-qAi9iiCv3fkBfRUAKrM1gH8w51-nVU8M8A73Jg='\n )\n pub_thread1.start_thread()\n pub_thread2.start_thread()\n pub_thread3.start_thread()\n\n time.sleep(2)\n for i in range(100):\n pub_thread1.post_fifo(\"Janice Library {}\".format(i))\n pub_thread1.post_fifo(\"Janice Library {}\".format(i))\n pub_thread1.post_fifo(\"Janice Library {}\".format(i))\n pub_thread1.post_fifo(\"Janice Library {}\".format(i))\n pub_thread1.post_fifo(\"Janice Library {}\".format(i))\n if i != 0 and i % 40 is 0:\n time.sleep(10)\n pub_thread1.post_fifo(\"Janice Library {}\".format(i))\n pub_thread1.post_fifo(\"Janice Library {}\".format(i))\n pub_thread2.post_fifo(\"Mervin Burr {}\".format(i))\n pub_thread3.post_fifo(\"Scott Slow {}\".format(i))\n pub_thread3.post_fifo(\"Jessica Fast {}\".format(i))\n time.sleep(0.2)\n\n pub_thread1.stop_thread()\n pub_thread2.stop_thread()\n pub_thread3.stop()\n time.sleep(3)\n\n pub_thread1.start_thread()\n pub_thread2.start_thread()\n pub_thread3.start_thread()\n time.sleep(1)\n\n pub_thread1.post_fifo(\"Last Message on 1\")\n pub_thread2.post_fifo(\"Last Message on 2\")\n print(\"trying to publish in the new thread runner\")\n pub_thread3.post_fifo(\"Last Message on 3\")\n time.sleep(0.5)\n print(\"hello world\")\n","sub_path":"experiment/rabbit/h_pika_producer.py","file_name":"h_pika_producer.py","file_ext":"py","file_size_in_byte":23534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"570973090","text":"def fib(n):\n count = 0\n a, b = 0, 1\n while n >+ count:\n print(a, end=' ')\n a, b = b, b+a\n count += 1\n print()\n\nnumberList = []\n\nnumber = int(input(\"Enter the number for fibo rows: \"))\nif number >= 1 and number <= 100:\n for i in range(number):\n numbers = int(input(\"Enter the index of fibonacci: \"))\n if number >= 1 and number <= 100:\n numberList.append(numbers)\n else:\n print(\"out of index\")\n\n for index, number in enumerate(numberList):\n fib(numberList[index])\nelse:\n print(\"out of index\")","sub_path":"Python/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"549446006","text":"import ast\nfrom collections import deque\n\nfrom flynt.transform.format_call_transforms import matching_call, ast_string_node, joined_string, ast_formatted_value\nfrom flynt.exceptions import FlyntException\n\nimport re\n\nMOD_KEY_PATTERN = re.compile(r\"(%\\([^)]+\\)s)\")\nMOD_KEY_NAME_PATTERN = re.compile(r\"%\\(([^)]+)\\)s\")\nVAR_KEY_PATTERN = re.compile(\"%([.]?[0-9]*)[hlL]?([diouxXeEfFgGcrsa])\") # specs at https://docs.python.org/3/library/stdtypes.html#string-formatting\nobsolete_specifiers = 'hlL'\n\n\ntranslate_conversion_types = {'i': 'd', 'u': 'd'}\nconversion_methods = {'r' : '!r', 'a': '!a', 's': None}\n\ndef handle_from_mod_dict_name(node):\n \"\"\"Convert a `BinOp` `%` formatted str with a name representing a Dict on the right to an f-string.\n\n Takes an ast.BinOp representing `\"1. %(key1)s 2. %(key2)s\" % mydict`\n and converted it to a ast.JoinedStr representing `f\"1. {mydict['key1']} 2. {mydict['key2']}\"`\n\n Args:\n node (ast.BinOp): The node to convert to a f-string\n\n Returns ast.JoinedStr (f-string)\n \"\"\"\n\n format_str = node.left.s\n matches = MOD_KEY_PATTERN.findall(format_str)\n var_keys = []\n for idx, m in enumerate(matches):\n var_key = MOD_KEY_NAME_PATTERN.match(m)\n if not var_key:\n raise FlyntException(\"could not find dict key\")\n var_keys.append(var_key[1])\n\n # build result node\n result_node = ast.JoinedStr()\n result_node.values = []\n var_keys.reverse()\n blocks = MOD_KEY_PATTERN.split(format_str)\n # loop through the blocks of a string to build up dateh JoinStr.values\n for block in blocks:\n # if this block matches a %(arg)s pattern then inject f-string instead\n if MOD_KEY_PATTERN.match(block):\n fv = ast.FormattedValue(\n value=ast.Subscript(\n value=node.right, slice=ast.Index(value=ast.Str(s=var_keys.pop()))\n ),\n conversion=-1,\n format_spec=None,\n )\n\n result_node.values.append(fv)\n else:\n # no match means it's just a literal string\n result_node.values.append(ast.Str(s=block))\n return result_node\n\ndef handle_from_mod_tuple(node):\n \"\"\"Convert a `BinOp` `%` formatted str with a tuple on the right to an f-string.\n\n Takes an ast.BinOp representing `\"1. %s 2. %s\" % (a, b)`\n and converted it to a ast.JoinedStr representing `f\"1. {a} 2. {b}\"`\n\n Args:\n node (ast.BinOp): The node to convert to a f-string\n\n Returns ast.JoinedStr (f-string)\n \"\"\"\n\n format_str = node.left.s\n matches = VAR_KEY_PATTERN.findall(format_str)\n\n if len(node.right.elts) != len(matches):\n raise FlyntException(\"string formatting length mismatch\")\n\n str_vars = deque(node.right.elts)\n\n # build result node\n result_node = ast.JoinedStr()\n result_node.values = []\n blocks = deque(VAR_KEY_PATTERN.split(format_str))\n result_node.values.append(ast_string_node(blocks.popleft()))\n\n while len(blocks) > 0:\n\n fmt_prefix = blocks.popleft()\n fmt_spec = blocks.popleft()\n for c in obsolete_specifiers:\n fmt_spec = fmt_spec.replace(c, '')\n\n if fmt_spec in conversion_methods:\n if fmt_prefix:\n raise FlyntException(\"Default text alignment has changed between percent fmt and fstrings. \"\n \"Proceeding would result in changed code behaviour.\")\n fv = ast_formatted_value(str_vars.popleft(),\n fmt_str=fmt_prefix,\n conversion=conversion_methods[fmt_spec])\n else:\n fmt_spec = translate_conversion_types.get(fmt_spec, fmt_spec)\n fv = ast_formatted_value(str_vars.popleft(),\n fmt_str=fmt_prefix+fmt_spec)\n\n result_node.values.append(fv)\n result_node.values.append(ast_string_node(blocks.popleft()))\n\n return result_node\n\n\ndef handle_from_mod_generic_name(node):\n \"\"\"Convert a `BinOp` `%` formatted str with a unknown name on the `node.right` to an f-string.\n\n When `node.right` is a Name since we don't know if it's a single var or a dict so we sniff the string.\n\n `\"val: %(key_name1)s val2: %(key_name2)s\" % some_dict`\n Sniffs the left string for Dict style usage and calls: `handle_from_mod_dict_name`\n\n `\"val: %s\" % some_var`\n Borrow the core logic by injecting the name into a ast.Tuple\n\n Args:\n node (ast.BinOp): The node to convert to a f-string\n\n Returns ast.JoinedStr (f-string)\n \"\"\"\n\n has_dict_str_format = MOD_KEY_PATTERN.findall(node.left.s)\n if has_dict_str_format:\n return handle_from_mod_dict_name(node)\n\n # if it's just a name then pretend it's tuple to use that code\n node.right = ast.Tuple(elts=[node.right])\n return handle_from_mod_tuple(node)\n\ndef fstringify_node(node):\n ft = FstringifyTransformer()\n result = ft.visit(node)\n\n return (\n result,\n dict(\n changed=ft.counter > 0,\n lineno=ft.lineno,\n col_offset=ft.col_offset,\n skip=True,\n ),\n )\n\ndef handle_from_mod(node):\n if isinstance(node.right, (ast.Name, ast.Attribute, ast.Str, ast.BinOp, ast.Subscript)):\n return handle_from_mod_generic_name(node)\n\n elif isinstance(node.right, ast.Tuple):\n return handle_from_mod_tuple(node)\n\n elif isinstance(node.right, ast.Dict):\n # print(\"~~~~ Dict mod strings don't make sense to f-strings\")\n return node\n\n raise RuntimeError(\"unexpected `node.right` class\")\n\nclass FstringifyTransformer(ast.NodeTransformer):\n def __init__(self):\n super().__init__()\n self.counter = 0\n self.lineno = -1\n self.col_offset = -1\n\n\n def visit_Call(self, node: ast.Call):\n \"\"\"Convert `ast.Call` to `ast.JoinedStr` f-string\n \"\"\"\n\n match = matching_call(node)\n\n # bail in these edge cases...\n if match:\n if any(isinstance(arg, ast.Starred) for arg in node.args):\n return node\n\n if match:\n self.counter += 1\n self.lineno = node.lineno\n self.col_offset = node.col_offset\n result_node = joined_string(node)\n return result_node\n\n return node\n\n def visit_BinOp(self, node):\n \"\"\"Convert `ast.BinOp` to `ast.JoinedStr` f-string\n\n Currently only if a string literal `ast.Str` is on the left side of the `%`\n and one of `ast.Tuple`, `ast.Name`, `ast.Dict` is on the right\n\n Args:\n node (ast.BinOp): The node to convert to a f-string\n\n Returns ast.JoinedStr (f-string)\n \"\"\"\n\n percent_stringify = (\n isinstance(node.left, ast.Str)\n and isinstance(node.op, ast.Mod)\n and isinstance(node.right, (ast.Tuple, ast.Name, ast.Attribute, ast.Str, ast.Subscript))\n # ignore ast.Dict on right\n )\n\n # bail in these edge cases...\n if percent_stringify:\n no_good = [\"}\", \"{\"]\n for ng in no_good:\n if ng in node.left.s:\n return node\n for ch in ast.walk(node.right):\n # no nested binops!\n if isinstance(ch, ast.BinOp):\n return node\n # f-string expression part cannot include a backslash\n elif isinstance(ch, ast.Str) and (\n any(\n map(\n lambda x: x in ch.s,\n (\"\\n\", \"\\t\", \"\\r\", \"'\", '\"', \"%s\", \"%%\"),\n )\n )\n or \"\\\\\" in ch.s\n ):\n return node\n\n if percent_stringify:\n self.counter += 1\n self.lineno = node.lineno\n self.col_offset = node.col_offset\n result_node = handle_from_mod(node)\n return result_node\n\n return node","sub_path":"src/flynt/transform/node_transformer.py","file_name":"node_transformer.py","file_ext":"py","file_size_in_byte":7991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"73623674","text":"import glob\nimport sys\n\nfrom PyQt5.QtCore import QThread, pyqtSignal\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\n\nfrom base import SharedBase\nfrom ui import Ui_MainWindow\n\n\nclass MyMainWindow(QMainWindow, Ui_MainWindow):\n def __init__(self):\n super(MyMainWindow, self).__init__()\n self.setupUi(self)\n self.pushButton.clicked.connect(self.do)\n self.lineEdit.returnPressed.connect(self.do)\n\n def do(self):\n try:\n self.lineEdit.setDisabled(True)\n self.user_input_url = self.lineEdit.text()\n self.pushButton.setDisabled(True)\n self.checkBox.setDisabled(True)\n self.spinBox.setDisabled(True)\n self.label.setDisabled(True)\n self.base = SharedBase(self.user_input_url)\n self.site_name = self.base.get_site_name()\n if self.checkBox.isChecked():\n checkbox_value = self.spinBox.value()\n else:\n checkbox_value = False\n self.work = WorkingThread(self.site_name, self.user_input_url, checkbox_value)\n self.work.status_report_signal.connect(self.status_receive_signal)\n self.work.progress_report_signal.connect(self.progress_receive_signal)\n self.work.stop_signal.connect(self.stop_signal)\n self.work.start()\n except NameError as e:\n self.stop_signal('Website %s illegal or not supported' % e)\n\n def status_receive_signal(self, text):\n self.statusBar().showMessage(text)\n\n def progress_receive_signal(self, progress):\n self.progressBar.setProperty(\"value\", progress)\n\n def stop_signal(self, text=''):\n self.pushButton.setDisabled(False)\n self.lineEdit.setDisabled(False)\n self.checkBox.setDisabled(False)\n self.spinBox.setDisabled(False)\n self.label.setDisabled(False)\n self.statusBar().showMessage(text)\n\n\nclass WorkingThread(QThread):\n status_report_signal = pyqtSignal(str)\n progress_report_signal = pyqtSignal(float)\n stop_signal = pyqtSignal(str)\n\n def __init__(self, site_name, url, checkbox_value):\n super(WorkingThread, self).__init__()\n self.site_name = site_name\n self.user_input_url = url\n self.latest_limit = checkbox_value\n\n def run(self):\n if self.site_name == 'dm5':\n from sites import DM5 as SiteClass\n elif self.site_name == 'ck101':\n from sites import Ck101 as SiteClass\n elif self.site_name == 'dmzj':\n from sites import Dmzj as SiteClass\n elif self.site_name == 'ehentai':\n from sites import Ehentai as SiteClass\n try:\n self.website_object = SiteClass(self.user_input_url)\n self.comic_name = self.website_object.get_name()\n self.ref_box = self.website_object.get_parent_info()\n self.status_report_signal.emit('%s, total %d chapters detected.' % (self.comic_name, len(self.ref_box)))\n if self.latest_limit is not False:\n if self.latest_limit > len(self.ref_box):\n raise ValueError\n self.ref_box = self.ref_box[-self.latest_limit:]\n self.main_loop(self.ref_box)\n except ValueError as e:\n self.stop_signal.emit('Chapters selected out of range, maximum %s chapters' % len(self.ref_box))\n except ConnectionError as e:\n self.stop_signal.emit('%s, consider using a proxy or a VPN.' % e)\n\n def main_loop(self, refer_box):\n for ref_tuple in refer_box:\n title, parent_link = ref_tuple\n total_page = self.website_object.get_page_info(parent_link)\n for page in range(1, total_page + 1):\n vague_path = self.website_object.get_path(self.comic_name, title, page) + '*'\n if glob.glob(vague_path):\n self.status_report_signal.emit('%s page %d already existed.' % (title, page))\n else:\n try:\n link = self.website_object.get_image_link(parent_link, page)\n self.status_report_signal.emit('Downloading %s' % title)\n self.website_object.down(self.comic_name, parent_link, link, title, page)\n progress = page / self.website_object.get_page_info(parent_link)\n self.progress_report_signal.emit(progress * 100)\n except:\n errlog = 'Error occurred when downloading %s, Page %d.' % (title, page)\n self.status_report_signal.emit(errlog)\n self.stop_signal.emit('All Done!')\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = MyMainWindow()\n window.show()\n sys.exit(app.exec_())\n","sub_path":"driveit-gui.py","file_name":"driveit-gui.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"99613851","text":"from math import pow, floor, log\nfrom random import sample\nfrom typing import List, Union\n\nclass Node(object):\n def __init__(self, value):\n self.right = self.left = self.parent = None\n self.value = value\n\nclass AVLNode(Node):\n def __init__(self, value):\n super().__init__(value)\n self.height = 0\n\nclass Tree(object):\n def __init__(self):\n self.root = None\n\n # region [Methods][Nodes | Modify]\n # region [Nodes][Print | Height | Count | Paths]\n # region [Print][Pre Order | In Order]\n # region [Pre Order]\n def print_pre_order(self):\n print('PreOrder')\n print(*map(lambda x: x.value, self.pre_order()))\n\n def pre_order(self):\n return self.__pre_order(self.root)\n\n def __pre_order(self, node: Node):\n if node:\n yield node\n yield from self.__pre_order(node.left)\n yield from self.__pre_order(node.right)\n\n # endregion\n # region [In Order]\n def print_in_order(self):\n print('InOrder')\n print(*map(lambda x: x.value, self.in_order()))\n\n def in_order(self):\n return self.__in_order(self.root)\n\n def __in_order(self, node: Node):\n if node:\n yield from self.__in_order(node.left)\n yield node\n yield from self.__in_order(node.right)\n\n # endregion\n # region [Post Order]\n def print_post_order(self):\n print('PostOrder')\n print(*map(lambda x: x.value, self.post_order()))\n\n def post_order(self):\n return self.__in_order(self.root)\n\n def __post_order(self, node: Node):\n if node:\n yield from self.__post_order(node.left)\n yield from self.__post_order(node.right)\n yield node\n\n # endregion\n # endregion\n # region [Height]\n def find_height(self) -> int:\n return self.__find_height(self.root) if self.root else 0\n\n def __find_height(self, node: Node) -> int:\n if not node: return 0\n return max([self.__find_height(node.left), self.__find_height(node.right)])+1\n\n# endregion\n # region [Count]\n def count(self):\n return self.__count(self.root)\n\n def __count(self, root):\n if not root: return 0\n return self.__count(root.left) + 1 + self.__count(root.right)\n\n # endregion\n # region [Paths]\n # region [Find]\n def find(self, value: int) -> Node:\n return self.__find(self.root, value)\n\n def __find(self, node: Node, value: int) -> Node:\n if value == node.value:\n return node\n if value < node.value and node.left:\n return self.__find(node.left, value)\n elif value > node.value and node.right:\n return self.__find(node.right, value)\n\n # endregion\n # region [Min]\n def find_min_path(self) -> list:\n if not self.root:\n return []\n\n lst = [self.root]\n node = self.root\n while node.left:\n lst.append(node.left)\n node = node.left\n return lst\n\n # endregion\n # region [Max]\n def find_max_path(self) -> list:\n if not self.root:\n return []\n\n lst = [self.root]\n node = self.root\n while node.right:\n lst.append(node.right)\n node = node.right\n return lst\n\n # endregion\n# endregion\n # endregion\n # region [Modify][Balance | Insert | Remove | Delete]\n # region [Balance][Rotation | Spine | Balance]\n # region [Rotation]\n def __rotate_right(self, top: Node):\n node = top.left\n top.left = node.right\n if node.right:\n node.right.parent = top\n node.parent = top.parent\n if not top.parent:\n self.root = node\n elif top == top.parent.right:\n top.parent.right = node\n else:\n top.parent.left = node\n node.right = top\n top.parent = node\n\n def __rotate_left(self, top: Node):\n node = top.right\n top.right = node.left\n\n if node.left:\n node.left.parent = top\n\n node.parent = top.parent\n if not top.parent:\n self.root = node\n elif top is top.parent.left:\n top.parent.left = node\n elif top is top.parent.right:\n top.parent.right = node\n node.left = top\n top.parent = node\n\n def __make_rotations(self, x):\n top = self.root\n for i in range(x):\n if top:\n self.__rotate_left(top)\n if top.parent:\n top = top.parent.right\n\n # endregion\n # region [Spine]\n def __create_spine(self):\n parent = self.root\n while parent:\n left = parent.left\n if left:\n self.__rotate_right(parent)\n parent = left\n else:\n parent = parent.right\n\n # endregion\n # region [Balance]\n def balance(self):\n root, n = self.root, self.count()\n m = int(pow(2, floor(log(n + 1, 2))) - 1)\n\n self.__create_spine()\n self.__make_rotations(n - m)\n while m > 1:\n m = m // 2\n self.__make_rotations(m)\n\n # endregion\n # endregion\n # region [Insert]\n def insert(self, value: int):\n if not self.root: self.root = Node(value)\n else:\n node = self.root\n parent = None\n while node:\n parent = node\n node = node.left if value < node.value else node.right\n\n if value < parent.value:\n parent.left = Node(value)\n parent.left.parent = parent\n else:\n parent.right = Node(value)\n parent.right.parent = parent\n\n # endregion\n # region [Remove]\n def remove(self, value):\n self.__remove_node(self.find(value))\n\n def __remove_node(self, node):\n if not node: return None\n\n node_parent = node.parent\n child_num: int = bool(node.left)+bool(node.right)\n if child_num == 0:\n if node_parent:\n if node_parent.left == node:\n node_parent.left = None\n else:\n node_parent.right = None\n else:\n self.root = None\n\n elif child_num == 1:\n child = node.left if node.left else node.right\n if node_parent:\n if node_parent.left == node:\n node_parent.left = child\n else:\n node_parent.right = child\n else:\n self.root = child\n child.parent = node_parent\n\n elif child_num == 2:\n new_node = node.right\n while new_node.left:\n new_node = new_node.left\n\n node.value = new_node.value\n self.__remove_node(new_node)\n\n# endregion\n # region [Delete]\n def delete(self):\n for node in self.post_order():\n self.__remove_node(node)\n # endregion\n # endregion\n # endregion\n\nclass BST(Tree):\n def __init__(self, data=None):\n super().__init__()\n if data: self.__construct(data)\n\n # region [Constructor]\n def __construct(self, data: List[int]):\n [self.insert(val) for val in data]\n\n # endregion\n\n\nclass AVL(BST):\n def __init__(self, data):\n super().__init__()\n if data: self.root = self.__construct(sorted(data))\n\n def __construct(self, data) -> Node:\n if data:\n root = Node(data.pop((len(data) - 1) // 2))\n root.left = self.__construct(data[:len(data) // 2])\n root.right = self.__construct(data[len(data) // 2:])\n if root.left: root.left.parent = root\n if root.right: root.right.parent = root\n return root\n\n\n# region [List Generators]\n\ndef ascending(n: int) -> List[int]:\n return list(range(1, n+1))\n\ndef descending(n: int) -> List[int]:\n return list(range(n, 0, -1))\n\ndef random(n: int) -> List[int]:\n return sample(list(range(1, n+1)), n)\n\n# endregion\n\nar = AVL(random(10))\nar.print_pre_order()","sub_path":"wzor.py","file_name":"wzor.py","file_ext":"py","file_size_in_byte":8075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"249966362","text":"rivers = {\n 'nile': 'egypt',\n 'yangtze': 'china',\n 'amazon': 'south america'\n}\n\n# Use a loop to print a senctence about each river.\nfor river in rivers.keys():\n print(f\"The {river.title()} runs through {rivers[river].title()}.\")\n\nprint(\"\\n\")\n\n# Use a loop to print the name of each river included in the dictionary.\nfor river in rivers.keys():\n print(f\"{river.title()}\", end=\"\\t\\t\")\n\nprint(\"\\n\")\n\n# Use a loop to print the name of each country included in the dictionary.\nfor country in rivers.values():\n print(f\"{country.title()}\", end=\"\\t\\t\")","sub_path":"PythonCrashCourse/Chapter6/6.5.py","file_name":"6.5.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"595076771","text":"import unittest\n\nfrom scipy import stats\nfrom sklearn import (\n linear_model,\n tree,\n pipeline,\n impute,\n preprocessing\n)\n\nfrom sklearn_cv_pandas import (\n RandomizedSearchCV,\n GridSearchCV\n)\nfrom tests import utils\n\n\nclass TestPandasCV(unittest.TestCase):\n def test_random_linear_sv_ratio_cl(self):\n model_type, is_cl, with_prep, cv_type = \"linear\", True, False, \"random\"\n cv = self._get_cv(model_type, is_cl, with_prep, cv_type)\n self._test_basic_flow_sv_ratio(cv, is_cl)\n\n def test_random_tree_sv_2dfs_rg(self):\n model_type, is_cl, with_prep, cv_type = \"tree\", False, False, \"random\"\n cv = self._get_cv(model_type, is_cl, with_prep, cv_type)\n self._test_basic_flow_sv_2dfs(cv, is_cl)\n\n def test_random_tree_with_prep_cv_cl(self):\n model_type, is_cl, with_prep, cv_type = \"tree\", True, True, \"random\"\n cv = self._get_cv(model_type, is_cl, with_prep, cv_type)\n self._test_basic_flow_cv(cv, is_cl, 3)\n\n def test_grid_linear_sv_ratio_cl(self):\n model_type, is_cl, with_prep, cv_type = \"linear\", True, False, \"grid\"\n cv = self._get_cv(model_type, is_cl, with_prep, cv_type)\n self._test_basic_flow_sv_ratio(cv, is_cl)\n\n def test_grid_tree_sv_2dfs_rg(self):\n model_type, is_cl, with_prep, cv_type = \"tree\", False, False, \"grid\"\n cv = self._get_cv(model_type, is_cl, with_prep, cv_type)\n self._test_basic_flow_sv_2dfs(cv, is_cl)\n\n def test_grid_tree_with_prep_cv_cl(self):\n model_type, is_cl, with_prep, cv_type = \"tree\", True, True, \"grid\"\n cv = self._get_cv(model_type, is_cl, with_prep, cv_type)\n self._test_basic_flow_cv(cv, is_cl, 3)\n\n def _get_cv(self, model_type, is_cl, with_prep, cv_type):\n estimator = self._get_estimator(model_type, is_cl, with_prep)\n metric = \"roc_auc\" if is_cl else \"neg_root_mean_squared_error\"\n if cv_type == \"random\":\n params = self._get_params_random(model_type, is_cl, with_prep)\n return RandomizedSearchCV(estimator, params, scoring=metric)\n else:\n params = self._get_params_grid(model_type, is_cl, with_prep)\n return GridSearchCV(estimator, params, scoring=metric)\n\n def _get_estimator(self, model_type, is_cl, with_preprocessing):\n if model_type == \"linear\":\n ml_estimator = linear_model.LogisticRegression(solver=\"liblinear\") if is_cl else linear_model.Lasso()\n else:\n ml_estimator = tree.DecisionTreeClassifier() if is_cl else tree.DecisionTreeRegressor()\n return self._add_preprocessing(ml_estimator) if with_preprocessing else ml_estimator\n\n @staticmethod\n def _add_preprocessing(estimator):\n return pipeline.Pipeline(\n steps=[\n (\"mvi\", impute.SimpleImputer()),\n (\"std\", preprocessing.StandardScaler()),\n (\"ml\", estimator)\n ]\n )\n\n def _get_params_random(self, model_type, is_cl, with_preprocessing):\n if model_type == \"linear\":\n ml_params = dict(\n penalty=[\"l1\", \"l2\"],\n C=stats.loguniform(1e-5, 10)\n ) if is_cl else dict(alpha=stats.loguniform(1e-5, 10))\n else:\n ml_params = dict(max_depth=list(range(5, 16)))\n return self._convert_ml_params(ml_params) if with_preprocessing else ml_params\n\n def _get_params_grid(self, model_type, is_cl, with_preprocessing):\n if model_type == \"linear\":\n ml_params = dict(\n penalty=[\"l1\", \"l2\"],\n C=[1e-5, 1e-3]\n ) if is_cl else dict(alpha=[1e-5, 1e-3, 1e-1, 10])\n else:\n ml_params = dict(max_depth=[5, 8, 11, 14])\n return self._convert_ml_params(ml_params) if with_preprocessing else ml_params\n\n @staticmethod\n def _convert_ml_params(ml_params):\n return {\"{}__{}\".format(\"ml\", k): v for k, v in ml_params.items()}\n\n def _test_basic_flow_sv_ratio(self, cv, is_cl):\n df_training = utils.get_input_df(100)\n df_test = utils.get_input_df(10)\n target_column = \"target_cl\" if is_cl else \"target_rg\"\n feature_columns = [\"column{}\".format(i) for i in range(6)]\n model = cv.fit_sv_pandas(df_training, target_column, feature_columns, ratio_training=0.8)\n self._assert_prediction(model, df_test, is_cl)\n\n def _test_basic_flow_sv_2dfs(self, cv, is_cl):\n df_training = utils.get_input_df(100)\n df_validation = utils.get_input_df(100)\n df_test = utils.get_input_df(10)\n target_column = \"target_cl\" if is_cl else \"target_rg\"\n feature_columns = [\"column{}\".format(i) for i in range(6)]\n model = cv.fit_sv_pandas(df_training, target_column, feature_columns, df_validation)\n self._assert_prediction(model, df_test, is_cl)\n\n def _test_basic_flow_cv(self, cv, is_cl, n_fold):\n df_training = utils.get_input_df(100)\n df_test = utils.get_input_df(10)\n target_column = \"target_cl\" if is_cl else \"target_rg\"\n feature_columns = [\"column{}\".format(i) for i in range(6)]\n model = cv.fit_cv_pandas(df_training, target_column, feature_columns, n_fold=n_fold)\n self._assert_prediction(model, df_test, is_cl)\n\n def _assert_prediction(self, model, df_test, is_cl):\n pred_df = model.predict(df_test)\n expected_columns = [\"score\", \"id1\", \"id2\", \"target_cl\", \"target_rg\"]\n if is_cl:\n expected_columns.insert(1, \"predicted_class\")\n self.assertListEqual(list(pred_df.columns), expected_columns)\n self.assertEqual(len(pred_df), 10)\n","sub_path":"0614/Lib/site-packages/tests/test_pandas_cv.py","file_name":"test_pandas_cv.py","file_ext":"py","file_size_in_byte":5616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"189683842","text":"import numpy as np\nimport torch.nn as nn\nimport torch\n\n\nclass SimpleMedian:\n \"\"\"\n k-nearest neighbor baseline to infer number of visits\n \"\"\"\n\n def __init__(self, mean=False):\n self.mean = mean\n\n def __call__(self, data):\n \"\"\"\n Get closes feature vector in node_features and use their label\n \"\"\"\n node_features = data.x\n assert len(node_features.shape) == 2\n # assert that only one batch\n # assert len(torch.unique(data.batch)) == 1\n\n if self.mean:\n avg_label = torch.mean(node_features[:, -1])\n else:\n avg_label = torch.median(node_features[:, -1])\n return avg_label\n","sub_path":"predict_visits/baselines/simple_median.py","file_name":"simple_median.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"361154157","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('search', views.search, name = 'search'),\n path('/', views.meeting, name='meeting'),\n path('meeting1', views.temp, name='temp'),\n path('ajax/get_meeting_images', views.get_meeting_images, name='Ajax request for getting images'),\n path('ajax/get_meeting_details', views.get_meeting_details, name='Ajax request for getting meeting details'),\n path('ajax/get_search_results', views.get_search_results, name='Ajax request for getting search results'),\n path('ajax/get_index_page', views.get_index_page, name='Ajax request for getting index page')\n]","sub_path":"second/meetings/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"568210097","text":"import codecs\nimport logging\nimport os\nimport tempfile\nfrom builtins import range, str, zip\nfrom collections import namedtuple\n\nfrom fonduer.candidates.models import Candidate\nfrom fonduer.meta import Meta, new_sessionmaker\nfrom fonduer.utils.udf import UDF, UDFRunner\nfrom fonduer.utils.utils import remove_files\nfrom fonduer.utils.utils_annotations import (\n array_tsv_escape,\n copy_postgres,\n get_sql_name,\n load_annotation_matrix,\n table_exists,\n)\n\n# Used to conform to existing annotation key API call\n# Note that this annotation matrix class cannot be replaced with snorkel one\n# since we do not have ORM-backed key objects but rather a simple python list.\n_TempKey = namedtuple(\"TempKey\", [\"id\", \"name\"])\n\n# Grab a pointer to the global vars\n_meta = Meta.init()\n\nlogger = logging.getLogger(__name__)\nsegment_dir = tempfile.gettempdir()\n\n\ndef _to_annotation_generator(fns):\n \"\"\"\"\n Generic method which takes a set of functions, and returns a generator that\n yields function.__name__, function result pairs.\n \"\"\"\n\n def fn_gen(c):\n for f in fns:\n yield f.__name__, f(c)\n\n return fn_gen\n\n\ndef _segment_filename(db_name, table_name, job_id, start=None, end=None):\n suffix = \"*\"\n if start is not None:\n suffix = str(start)\n if end is not None:\n suffix += \"-\" + str(end)\n return \"%s_%s_%s_%s.tsv\" % (db_name, table_name, job_id, suffix)\n\n\nclass AnnotatorUDF(UDF):\n def __init__(self, f, **kwargs):\n self.anno_generator = (\n _to_annotation_generator(f) if hasattr(f, \"__iter__\") else f\n )\n super(AnnotatorUDF, self).__init__(**kwargs)\n\n def apply(self, batch_range, table_name, split, cache, **kwargs):\n \"\"\"\n Applies a given function to a range of candidates\n\n Note: Accepts a id_range as argument, because of issues with putting\n Candidate subclasses into Queues (can't pickle...)\n \"\"\"\n start, end = batch_range\n file_name = _segment_filename(_meta.DBNAME, table_name, split, self.worker_id)\n segment_path = os.path.join(segment_dir, file_name)\n candidates = (\n self.session.query(Candidate)\n .filter(Candidate.split == split)\n .order_by(Candidate.id)\n .slice(start, end)\n )\n with codecs.open(segment_path, \"a+\", encoding=\"utf-8\") as writer:\n if not cache:\n for i, candidate in enumerate(candidates):\n # Runs the actual extraction function\n nonzero_kvs = [\n (k, v) for k, v in self.anno_generator(candidate) if v != 0\n ]\n if nonzero_kvs:\n keys, values = list(zip(*nonzero_kvs))\n else:\n keys = values = []\n row = [\n str(candidate.id),\n array_tsv_escape(keys),\n array_tsv_escape(values),\n ]\n writer.write(\"\\t\".join(row) + \"\\n\")\n else:\n nonzero_kv_dict = {}\n for id, k, v in self.anno_generator(list(candidates)):\n if id not in nonzero_kv_dict:\n nonzero_kv_dict[id] = []\n if v != 0:\n nonzero_kv_dict[id].append((k, v))\n for i, candidate in enumerate(candidates):\n nonzero_kvs = nonzero_kv_dict[candidate.id]\n if nonzero_kvs:\n keys, values = list(zip(*nonzero_kvs))\n else:\n keys = values = []\n row = [\n str(candidate.id),\n array_tsv_escape(keys),\n array_tsv_escape(values),\n ]\n writer.write(\"\\t\".join(row) + \"\\n\")\n # This return + yield combination results in a purely empty generator\n # function. Specifically, the yield turns the function into a generator,\n # and the return terminates the generator before yielding anything.\n return\n yield\n\n\nclass Annotator(UDFRunner):\n \"\"\"Abstract class for annotating candidates and persisting these\n annotations to DB.\n \"\"\"\n\n def __init__(self, candidate_type, annotation_type, f, batch_size=50, **kwargs):\n self.candidate_type = candidate_type\n if isinstance(candidate_type, type):\n candidate_type = candidate_type.__name__\n self.table_name = get_sql_name(candidate_type) + \"_\" + annotation_type\n self.key_table_name = self.table_name + \"_keys\"\n self.annotation_type = annotation_type\n self.batch_size = batch_size\n super(Annotator, self).__init__(AnnotatorUDF, f=f, **kwargs)\n\n def apply(\n self,\n split,\n key_group=0,\n replace_key_set=True,\n update_keys=False,\n update_values=True,\n storage=None,\n ignore_keys=[],\n **kwargs\n ):\n if update_keys:\n replace_key_set = False\n # Get the cids based on the split, and also the count\n Session = new_sessionmaker()\n session = Session()\n\n # NOTE: In the current UDFRunner implementation, we load all these into\n # memory and fill a multiprocessing JoinableQueue with them before\n # starting... so might as well load them here and pass in. Also, if we\n # try to pass in a query iterator instead, with AUTOCOMMIT on, we get a\n # TXN error...\n candidates = (\n session.query(Candidate)\n .filter(Candidate.type == self.candidate_type.__tablename__)\n .filter(Candidate.split == split)\n .all()\n )\n cids_count = len(candidates)\n if cids_count == 0:\n raise ValueError(\"No candidates in current split\")\n\n # Setting up job batches\n chunks = cids_count // self.batch_size\n batch_range = [\n (i * self.batch_size, (i + 1) * self.batch_size) for i in range(chunks)\n ]\n remainder = cids_count % self.batch_size\n if remainder:\n batch_range.append((chunks * self.batch_size, cids_count))\n\n old_table_name = None\n table_name = self.table_name\n # Run the Annotator\n with _meta.engine.connect() as con:\n table_already_exists = table_exists(con, table_name)\n if update_values and table_already_exists:\n # Now we extract under a temporary name for merging\n old_table_name = table_name\n table_name += \"_updates\"\n\n segment_file_blob = os.path.join(\n segment_dir, _segment_filename(_meta.DBNAME, self.table_name, split)\n )\n remove_files(segment_file_blob)\n cache = True if self.annotation_type == \"feature\" else False\n super(Annotator, self).apply(\n batch_range,\n table_name=self.table_name,\n split=split,\n cache=cache,\n **kwargs\n )\n\n # Insert and update keys\n if not table_already_exists or old_table_name:\n con.execute(\n \"CREATE TABLE %s(candidate_id integer PRIMARY KEY, \"\n \"keys text[] NOT NULL, values real[] NOT NULL)\" % table_name\n )\n copy_postgres(segment_file_blob, table_name, \"candidate_id, keys, values\")\n remove_files(segment_file_blob)\n\n # Replace the LIL table with COO if requested\n if storage == \"COO\":\n temp_coo_table = table_name + \"_COO\"\n con.execute(\n \"CREATE TABLE %s AS \"\n \"(SELECT candidate_id, UNNEST(keys) as key, \"\n \"UNNEST(values) as value from %s)\" % (temp_coo_table, table_name)\n )\n con.execute(\"DROP TABLE %s\" % table_name)\n con.execute(\n \"ALTER TABLE %s RENAME TO %s\" % (temp_coo_table, table_name)\n )\n con.execute(\n \"ALTER TABLE %s ADD PRIMARY KEY(candidate_id, key)\" % table_name\n )\n # Update old table\n if old_table_name:\n con.execute(\n \"INSERT INTO %s SELECT * FROM %s \"\n \"ON CONFLICT(candidate_id, key) \"\n \"DO UPDATE SET value=EXCLUDED.value\"\n % (old_table_name, table_name)\n )\n con.execute(\"DROP TABLE %s\" % table_name)\n else: # LIL\n # Update old table\n if old_table_name:\n con.execute(\n \"INSERT INTO %s AS old SELECT * FROM %s \"\n \"ON CONFLICT(candidate_id) \"\n \"DO UPDATE SET \"\n \"values=old.values || EXCLUDED.values,\"\n \"keys=old.keys || EXCLUDED.keys\" % (old_table_name, table_name)\n )\n con.execute(\"DROP TABLE %s\" % table_name)\n\n if old_table_name:\n table_name = old_table_name\n # Load the matrix\n key_table_name = self.key_table_name\n if key_group:\n key_table_name = self.key_table_name + \"_\" + get_sql_name(key_group)\n\n return load_annotation_matrix(\n con,\n candidates,\n split,\n table_name,\n key_table_name,\n replace_key_set,\n storage,\n update_keys,\n ignore_keys,\n )\n\n def clear(self, session, split, replace_key_set=False, **kwargs):\n \"\"\"\n Deletes the Annotations for the Candidates in the given split.\n\n If replace_key_set=True, deletes *all* Annotations (of this Annotation\n sub-class) and also deletes all AnnotationKeys (of this sub-class)\n \"\"\"\n with _meta.engine.connect() as con:\n if split is None:\n con.execute(\"DROP TABLE IF EXISTS %s\" % self.table_name)\n elif table_exists(con, self.table_name):\n con.execute(\n \"DELETE FROM %s WHERE candidate_id IN \"\n \"(SELECT id FROM candidate WHERE split=%d)\"\n % (self.table_name, split)\n )\n if replace_key_set:\n con.execute(\"DROP TABLE IF EXISTS %s\" % self.key_table_name)\n\n def apply_existing(self, split, key_group=0, **kwargs):\n \"\"\"Alias for apply that emphasizes we are using an existing AnnotatorKey set.\"\"\"\n return self.apply(split, key_group=key_group, replace_key_set=False, **kwargs)\n\n def load_matrix(self, split, ignore_keys=[]):\n Session = new_sessionmaker()\n session = Session()\n candidates = session.query(Candidate).filter(Candidate.split == split).all()\n with _meta.engine.connect() as con:\n return load_annotation_matrix(\n con,\n candidates,\n split,\n self.table_name,\n self.key_table_name,\n False,\n None,\n False,\n ignore_keys,\n )\n","sub_path":"fonduer/utils/annotator.py","file_name":"annotator.py","file_ext":"py","file_size_in_byte":11436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"113697110","text":"import os\nimport time\nfrom collections import defaultdict\nfrom functools import wraps\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nimport torch.utils.data\nfrom torchvision import transforms, datasets\n\nfrom utils.caltech import Caltech256, Caltech10\nfrom utils.constants import DATA_DIR, MODELS_DIR, BATCH_SIZE\nfrom utils.datasubset import MNIST56, FashionMNIST56, CIFAR10_56\nfrom utils.normalize import NormalizeFromDataset\n\n\ndef set_seed(seed: int):\n import random\n import numpy as np\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n\ndef timer_profile(func):\n \"\"\"\n For debug purposes only.\n \"\"\"\n func_duration = defaultdict(list)\n\n @wraps(func)\n def wrapped(*args, **kwargs):\n start = time.time()\n res = func(*args, **kwargs)\n elapsed = time.time() - start\n elapsed *= 1e3\n func_duration[func.__name__].append(elapsed)\n print(f\"{func.__name__} {elapsed: .3f} (mean: {np.mean(func_duration[func.__name__]): .3f}) ms\")\n return res\n\n return wrapped\n\n\ndef get_data_loader(dataset: str, train=True, batch_size=BATCH_SIZE) -> torch.utils.data.DataLoader:\n if dataset == \"MNIST56\":\n dataset = MNIST56(train=train)\n elif dataset == \"FashionMNIST56\":\n dataset = FashionMNIST56(train=train)\n elif dataset == \"CIFAR10_56\":\n dataset = CIFAR10_56(train=train)\n elif dataset == \"Caltech256\":\n dataset = Caltech256(train=train)\n elif dataset == \"Caltech10\":\n dataset = Caltech10(train=train)\n else:\n if dataset == \"MNIST\":\n dataset_class = datasets.MNIST\n elif dataset == \"FashionMNIST\":\n dataset_class = datasets.FashionMNIST\n elif dataset == \"CIFAR10\":\n dataset_class = datasets.CIFAR10\n else:\n raise NotImplementedError()\n transform = transforms.Compose([transforms.ToTensor(), NormalizeFromDataset(dataset_cls=dataset_class)])\n dataset = dataset_class(DATA_DIR, train=train, download=True, transform=transform)\n num_workers = int(os.environ.get('LOADER_WORKERS', 4))\n loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)\n return loader\n\n\ndef load_model_state(dataset_name: str, model_name: str):\n model_path = MODELS_DIR.joinpath(dataset_name, Path(model_name).with_suffix('.pt'))\n if not model_path.exists():\n return None\n return torch.load(model_path)\n","sub_path":"utils/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"607986866","text":"import matplotlib.pyplot as plt #For plotting\r\nfrom math import sin, pi #For generating input signals\r\n\r\n### Filter - 6KHz->8Khz Bandpass Filter\r\n### @param [in] input - input unfiltered signal\r\n### @param [out] output - output filtered signal\r\ndef filter(x):\r\n y = [0]*48000\r\n for n in range(4, len(x)):\r\n y[n] = 0.0101*x[n] - 0.0202*x[n-2] + 0.0101*x[n-4] + 2.4354*y[n-1] - 3.1869*y[n-2] + 2.0889*y[n-3] - 0.7368*y[n-4]\r\n return y\r\n\r\n\r\nfrequency = int(input(\"Please input the frequency: \"))\r\n\t\r\n### Create empty arrays\r\ninput = [0]*48000\r\noutput = [0]*48000\r\n\r\n### Fill array with xxxHz signal\r\nfor i in range(48000):\r\n input[i] = sin(2 * pi * frequency * i / 48000) #+ sin(2 * pi * 70 * i / 48000)\r\n\r\n### Run the signal through the filter\r\noutput = filter(input)\r\n\r\n### Grab samples from input and output #1/100th of a second\r\noutput_section = output[0:480] \r\ninput_section = input[0:480] \r\n\r\n### Plot the signals for comparison\r\nplt.figure(1) \r\nplt.subplot(211) \r\nplt.ylabel('Magnitude')\r\nplt.xlabel('Samples') \r\nplt.title('Unfiltered Signal') \r\nplt.plot(input_section)\r\nplt.subplot(212) \r\nplt.ylabel('Magnitude')\r\nplt.xlabel('Samples') \r\nplt.title('Filtered Signal')\r\nplt.plot(output_section)\r\nplt.show()","sub_path":"filterExample.py","file_name":"filterExample.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"129159218","text":"import os\r\nimport random\r\nfrom PIL import Image\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\n\r\n\r\ndata_dir = 'train_images/'\r\nLABEL_FILE = 'labels.txt'\r\n# 图片默认大小\r\nIMAGE_SIZE = 32\r\nnum_classes = 10\r\n\r\n\r\ndef get_filepaths_and_labels(data_dir):\r\n \"\"\"\r\n 获取图片路径和labels\r\n :param data_dir:\r\n :return: [filepaths], [labels_dict: key标签,value索引]\r\n \"\"\"\r\n if not os.path.exists(data_dir):\r\n raise ValueError('cannot find the dir: ' + data_dir)\r\n\r\n filepaths = []\r\n labels_dict = {}\r\n\r\n index = 0\r\n for labeldir in os.listdir(data_dir):\r\n namedir = os.path.join(data_dir, labeldir)\r\n if os.path.isfile(namedir):\r\n continue\r\n for file in os.listdir(namedir):\r\n file = os.path.join(namedir, file)\r\n\r\n # 小于4k 的图片可能不完整不要\r\n # if os.path.getsize(file) / 1024 < 4:\r\n # continue\r\n filepaths.append(file)\r\n if labeldir not in labels_dict:\r\n labels_dict[labeldir] = index\r\n index = index + 1\r\n print(labels_dict)\r\n return filepaths, labels_dict\r\n\r\n\r\ndef write_label_file(labels_dict, label_file):\r\n \"\"\"\r\n 将label和其索引存到文件\r\n :param labels_dict:\r\n :param label_file:\r\n :return:\r\n \"\"\"\r\n with tf.gfile.Open(label_file, 'w') as f:\r\n for label in labels_dict:\r\n num = labels_dict[label]\r\n f.write('%d:%s\\n' % (num, label))\r\n\r\n\r\ndef get_images_labels(filepaths, labels_dict, batch_size):\r\n \"\"\"\r\n 获取图片和label\r\n :param filepaths\r\n :param labels_dict\r\n :param batch_size\r\n :return [imgs], [labels]\r\n \"\"\"\r\n imgs = []\r\n labels = []\r\n batch_size = min(len(filepaths), batch_size)\r\n print(\"图片数量:\", len(filepaths))\r\n for j in range(batch_size):\r\n img = Image.open(filepaths[j])\r\n img = img.resize((IMAGE_SIZE, IMAGE_SIZE))\r\n img = np.array(img)\r\n\r\n # 获取目录名作为labels\r\n img_label = os.path.split(os.path.dirname(filepaths[j]))[1]\r\n img_label = labels_dict[img_label]\r\n\r\n imgs.append(img)\r\n labels.append(img_label)\r\n imgs = np.array(imgs)\r\n return imgs, labels\r\n\r\n\r\ndef re_imgs_labes(imgs, labels):\r\n batch_size = len(imgs)\r\n\r\n # 图片一致化\r\n imgs = imgs.reshape([batch_size, IMAGE_SIZE, IMAGE_SIZE, 3])\r\n # reimgs = imgs * (1. / 255) - 0.5\r\n\r\n # 将labels转为ont-hot编码\r\n labels = tf.one_hot(labels, num_classes, 1, 0)\r\n labels = tf.cast(labels, dtype=tf.int32)\r\n labels = tf.reshape(labels, [batch_size, num_classes])\r\n\r\n # 将labels转numpy数组类型\r\n sess = tf.Session()\r\n with sess.as_default():\r\n relabels = labels.eval()\r\n\r\n return imgs, relabels\r\n\r\n\r\ndef read_images_labels(data_dir, batch_size=1000, shuffle=True):\r\n # 获取路径和label字典\r\n data_paths, labels_dict = get_filepaths_and_labels(data_dir)\r\n\r\n # 根据图片来源判断是否打乱\r\n if shuffle:\r\n random.seed(0)\r\n random.shuffle(data_paths)\r\n filepath = data_paths\r\n imgs, labels = get_images_labels(filepath, labels_dict, batch_size)\r\n # print(imgs[0])\r\n # print(labels[0])\r\n\r\n reimgs, relabels = re_imgs_labes(imgs, labels)\r\n\r\n # 将label字典写入文件\r\n write_label_file(labels_dict, LABEL_FILE)\r\n print('finsh')\r\n return reimgs, relabels\r\n\r\n\r\nif __name__ == \"__main__\":\r\n read_images_labels(data_dir)","sub_path":"深度学习课程代码/代码/图片分类识别/read_images_labels.py","file_name":"read_images_labels.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"329364629","text":"from PySide.QtCore import *\nfrom PySide.QtWebKit import *\nfrom PySide.QtGui import *\nimport os\nimport sys\nimport webbrowser\n\nclass HelpWindow(QMainWindow):\n def __init__(self, parent=None):\n super(HelpWindow,self).__init__(parent)\n\n self.setWindowTitle(\"Facepager 3.0 - Help\")\n self.setMinimumWidth(600);\n self.setMinimumHeight(600);\n central = QWidget()\n self.setCentralWidget(central)\n vLayout = QVBoxLayout(central)\n self.browser = QWebView(central)\n\n if getattr(sys, 'frozen', False):\n application_path = os.path.dirname(sys.executable)\n elif __file__:\n application_path = os.path.dirname(__file__)\n\n #self.loadPage()\n\n vLayout.addWidget(self.browser)\n hLayout = QHBoxLayout()\n vLayout.addLayout(hLayout)\n hLayout.addStretch(5)\n dismiss = QPushButton(central)\n dismiss.setText(\"Close\")\n dismiss.clicked.connect(self.hide)\n hLayout.addWidget(dismiss)\n #browser.setBackgroundRole(QPalette.Window)\n\n def show(self):\n super(HelpWindow,self).show()\n self.loadPage()\n\n\n def loadPage(self):\n self.browser.load(QUrl(\"http://htmlpreview.github.io/?https://github.com/strohne/Facepager/blob/master/src/help/help.html\"))\n self.browser.page().setLinkDelegationPolicy(QWebPage.DelegateExternalLinks)\n self.browser.page().linkClicked.connect(self.linkClicked)\n\n\n def linkClicked(self,url):\n url = url.toString()\n if url.startswith(\"http://htmlpreview.github.io/?https://github.com/strohne/Facepager/blob/master/src/help/help.html\"):\n self.browser.load(url)\n else:\n webbrowser.open(url)\n","sub_path":"src/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"516457227","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nNoneType = type(None)\n\nfrom collections import Mapping\nfrom ..helpers import escape as strescape\nfrom ..compat import text_type, PY3\nfrom collections import deque\n\nfrom markupsafe import escape\n\n\nclass _TK_buffer(object):\n def __init__(self):\n self._buffer = buffer = []\n e = buffer.extend\n a = buffer.append\n\n def do_output(*objs):\n for obj in objs:\n if obj.__class__ is _TK_buffer:\n e(obj._buffer)\n else:\n a(text_type(obj))\n\n self.output = do_output\n def output_boolean_attr(name, value):\n t = type(value)\n if t in (bool, NoneType):\n if bool(value):\n do_output(' ' + name + '=\"' + name + '\"')\n\n # skip on false, None\n return\n\n do_output(' ' + name + '=\"')\n do_output(escape(value))\n do_output('\"')\n\n self.output_boolean_attr = output_boolean_attr\n\n\n def __call__(self, *a):\n self.output(*a)\n\n\n def __html__(self):\n return self\n\n\n def join(self):\n return ''.join(self._buffer)\n\n\n if PY3:\n __str__ = join\n\n else:\n __unicode__ = join\n def __str__(self):\n return self.join().encode('UTF-8')\n\nBuffer = _TK_buffer\n\ntry:\n from ._buffer import Buffer as _Buffer, _set_escape_method\n _set_escape_method(escape)\n Buffer = _Buffer\n del _Buffer\n del _set_escape_method\nexcept ImportError as e:\n pass\n\n\ndef output_attrs(values):\n if not values:\n return ''\n\n if not isinstance(values, Mapping):\n values = iter(values)\n else:\n values = values.items()\n\n rv = Buffer()\n for k, v in values:\n if v in (True, False, None):\n if v:\n v = k\n else:\n continue\n\n rv(' ')\n rv(k)\n rv('=\"')\n rv(escape(v))\n rv('\"')\n\n return rv\n\n\ndef import_defs(href):\n return {}\n\n\ndef bind(context, block=False):\n \"\"\"\n Given the context, returns a decorator wrapper;\n the binder replaces the wrapped func with the\n value from the context OR puts this function in\n the context with the name.\n \"\"\"\n\n if block:\n def decorate(func):\n name = func.__name__.replace('__TK__block__', '')\n if name not in context:\n context[name] = func\n return context[name]\n\n return decorate\n\n def decorate(func):\n name = func.__name__\n if name not in context:\n context[name] = func\n return context[name]\n\n return decorate\n\n\nclass ImportedTemplate(object):\n def __init__(self, name):\n self.__name = name\n\n def __repr__(self):\n return \"\" % self.name\n\n\nclass TonnikalaRuntime(object):\n bind = staticmethod(bind)\n Buffer = staticmethod(Buffer)\n output_attrs = staticmethod(output_attrs)\n escape = staticmethod(escape)\n\n def __init__(self):\n self.loader = None\n\n def load(self, href):\n return self.loader.load(href)\n\n def import_defs(self, context, href):\n modified_context = context.copy()\n self.loader.load(href).bind(modified_context)\n container = ImportedTemplate(href)\n\n for k, v in modified_context.items():\n # modified\n if k in context and context[k] is v:\n continue\n\n setattr(container, k, v)\n\n return container\n","sub_path":"tonnikala/runtime/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":3605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"501395499","text":"from __future__ import division # floating point division\nimport numpy as np\nimport utilities as utils\n\nclass Classifier:\n \"\"\"\n Generic classifier interface; returns random classification\n Assumes y in {0,1}, rather than {-1, 1}\n \"\"\"\n\n def __init__( self, parameters={} ):\n \"\"\" Params can contain any useful parameters for the algorithm \"\"\"\n self.params = {}\n\n def reset(self, parameters):\n \"\"\" Reset learner \"\"\"\n self.resetparams(parameters)\n\n def resetparams(self, parameters):\n \"\"\" Can pass parameters to reset with new parameters \"\"\"\n try:\n utils.update_dictionary_items(self.params,parameters)\n except AttributeError:\n # Variable self.params does not exist, so not updated\n # Create an empty set of params for future reference\n self.params = {}\n\n def getparams(self):\n return self.params\n\n def learn(self, Xtrain, ytrain):\n \"\"\" Learns using the traindata \"\"\"\n\n def predict(self, Xtest):\n probs = np.random.rand(Xtest.shape[0])\n ytest = utils.threshold_probs(probs)\n return ytest\n\nclass LinearRegressionClass(Classifier):\n \"\"\"\n Linear Regression with ridge regularization\n Simply solves (X.T X/t + lambda eye)^{-1} X.T y/t\n \"\"\"\n def __init__( self, parameters={} ):\n self.params = {'regwgt': 0.01}\n self.reset(parameters)\n\n def reset(self, parameters):\n self.resetparams(parameters)\n self.weights = None\n\n def learn(self, Xtrain, ytrain):\n \"\"\" Learns using the traindata \"\"\"\n # Ensure ytrain is {-1,1}\n yt = np.copy(ytrain)\n yt[yt == 0] = -1\n\n # Dividing by numsamples before adding ridge regularization\n # for additional stability; this also makes the\n # regularization parameter not dependent on numsamples\n # if want regularization disappear with more samples, must pass\n # such a regularization parameter lambda/t\n numsamples = Xtrain.shape[0]\n self.weights = np.dot(np.dot(np.linalg.pinv(np.add(np.dot(Xtrain.T,Xtrain)/numsamples,self.params['regwgt']*np.identity(Xtrain.shape[1]))), Xtrain.T),yt)/numsamples\n\n def predict(self, Xtest):\n ytest = np.dot(Xtest, self.weights)\n ytest[ytest > 0] = 1\n ytest[ytest < 0] = 0\n return ytest\n\nclass NaiveBayes(Classifier):\n \"\"\" Gaussian naive Bayes; \"\"\"\n\n def __init__(self, parameters={}):\n \"\"\" Params can contain any useful parameters for the algorithm \"\"\"\n # Assumes that a bias unit has been added to feature vector as the last feature\n # If usecolumnones is False, it should ignore this last feature\n self.params = {'usecolumnones': True}\n self.reset(parameters)\n\n def reset(self, parameters):\n self.resetparams(parameters)\n self.means = []\n self.stds = []\n self.numfeatures = 0\n self.numclasses = 0\n\n def learn(self, Xtrain, ytrain):\n \"\"\"\n In the first code block, you should set self.numclasses and\n self.numfeatures correctly based on the inputs and the given parameters\n (use the column of ones or not).\n\n In the second code block, you should compute the parameters for each\n feature. In this case, they're mean and std for Gaussian distribution.\n \"\"\"\n\n ### YOUR CODE HERE\n self.numclasses = 2\n self.numfeatures = 9\n\n # prior\n p_0 = 0\n p_1 = 0\n for i in range (len(ytrain)):\n if ytrain[i] == 1:\n p_1 += 1\n else:\n p_0 += 1\n self.p_0 = p_0/len(ytrain)\n self.p_1 = p_1/len(ytrain)\n # print(self.p_0,self.p_1)\n ### END YOUR CODE\n\n origin_shape = (self.numclasses, self.numfeatures)\n self.means = np.zeros(origin_shape)\n self.stds = np.zeros(origin_shape)\n\n ### YOUR CODE HERE\n self.mean = np.mean(Xtrain, axis=0)\n self.std = np.std(Xtrain, axis=0)\n\n self.class_mean = np.zeros((self.numclasses, self.numfeatures))\n self.class_std = np.zeros((self.numclasses, self.numfeatures))\n n = Xtrain.shape[0]\n\n # print(ytrain)\n class_0 = []\n class_1 = []\n\n for i in range (len(ytrain)):\n if ytrain[i] == 1:\n self.class_1 = class_1.append(Xtrain[i])\n # count += 1\n elif ytrain[i] == 0:\n self.class_0 = class_0.append(Xtrain[i])\n # print (count)\n \n for j in range (self.numfeatures):\n mean = []\n for i in range (p_0):\n mean.append(class_0[i][j])\n self.class_mean[0][j] = np.mean(mean)\n self.class_std[0][j] = np.std(mean)\n mean = []\n for i in range(p_1):\n mean.append(class_1[i][j])\n self.class_mean[1][j] = np.mean(mean)\n self.class_std[1][j] = np.std(mean)\n\n # print(self.class_mean[0],self.class_std[0])\n # print(self.class_mean[1],self.class_std[1])\n ### END YOUR CODE\n\n assert self.means.shape == origin_shape\n assert self.stds.shape == origin_shape\n\n def predict(self, Xtest):\n \"\"\"\n Use the parameters computed in self.learn to give predictions on new\n observations.\n \"\"\"\n ytest = np.zeros(Xtest.shape[0], dtype=int)\n \n ### YOUR CODE HERE\n y = np.ones((self.numclasses, Xtest.shape[0]))\n h =[]\n for i in range (Xtest.shape[0]):\n for j in range (self.numfeatures):\n if self.class_std[0][j] == 0:\n # print(\"00\")\n y[0][i] = y[0][i] * 1\n else:\n # print(\"0\")\n y[0][i] = y[0][i] * (1.0/np.sqrt(2*np.pi*(self.class_std[0][j]**2))) * np.exp(-1.0*np.square(Xtest[i][j]-self.class_mean[0][j])/(2*(self.class_std[0][j]**2)))\n if self.class_std[1][j] == 0:\n # print(\"10\")\n y[1][i] = y[1][i] * 1\n else:\n # print(\"1\")\n y[1][i] = y[1][i] * (1.0/np.sqrt(2*np.pi*(self.class_std[1][j]**2))) * np.exp(-1.0*np.square(Xtest[i][j]-self.class_mean[1][j])/(2*(self.class_std[1][j]**2)))\n y[0][i] = y[0][i] * self.p_0\n y[1][i] = y[1][i] * self.p_1\n # print(\"y\")\n # print(y)\n for i in range (Xtest.shape[0]):\n if y[1][i] >= y[0][i]:\n ytest[i] = 1\n else:\n ytest[i] = 0 \n # print(\"ytest\") \n # print (ytest)\n ### END YOUR CODE\n\n assert len(ytest) == Xtest.shape[0]\n return ytest\n\nclass LogitReg(Classifier):\n\n def __init__(self, parameters={}):\n # Default: no regularization\n self.params = {'regwgt': 0.0, 'regularizer': 'None'}\n self.reset(parameters)\n\n def reset(self, parameters):\n self.resetparams(parameters)\n self.weights = None\n if self.params['regularizer'] is 'l1':\n self.regularizer = (utils.l1, utils.dl1)\n elif self.params['regularizer'] is 'l2':\n self.regularizer = (utils.l2, utils.dl2)\n else:\n self.regularizer = (lambda w: 0, lambda w: np.zeros(w.shape,))\n \n def sigmoid(self, x):\n ''' sigmoid function '''\n y = 1.0/(1+np.exp(-1.0*x))\n\n return y\n\n def logit_cost(self, theta, X, y):\n \"\"\"\n Compute cost for logistic regression using theta as the parameters.\n \"\"\"\n\n cost = 0.0\n\n ### YOUR CODE HERE\n # print(\"--1\")\n p_1 = utils.sigmoid(np.dot(theta, X))\n # print (p_1)\n cost = y*np.log(p_1) + (1-y)*np.log(1-p_1) \n # + 0.5*self.params['regwgt']*np.dot(theta, theta)\n cost = cost[0]\n ### END YOUR CODE\n\n return cost\n\n def logit_cost_grad(self, theta, X, y):\n \"\"\"\n Compute gradients of the cost with respect to theta.\n \"\"\"\n\n grad = np.zeros(len(theta))\n\n ### YOUR CODE HERE\n # print (X.shape, y.shape)\n p_1 = utils.sigmoid(np.dot(X,theta))\n # print (p_1.shape, y.shape, X.shape)\n\n # regularizer\n # grad = p_1 - y + self.params['regwgt'] * theta\n grad = p_1 - y \n ### END YOUR CODE\n\n return grad\n\n def learn(self, Xtrain, ytrain):\n \"\"\"\n Learn the weights using the training data\n \"\"\"\n\n self.weights = np.zeros(Xtrain.shape[1],)\n\n ### YOUR CODE HERE\n epochs = 100\n stepsize = 0.01\n numsamples = Xtrain.shape[0]\n for i in range (epochs):\n # shuffle data points from 1, ..., numbsamples\n arr = np.arange(numsamples)\n np.random.shuffle(arr)\n for j in arr:\n gradient = np.dot(self.logit_cost_grad(self.weights, Xtrain[j], ytrain[j]), Xtrain[j])\n # print (gradient)\n stepsize = 0.01/(1+i) \n self.weights = self.weights-stepsize*gradient\n ### END YOUR CODE\n\n def predict(self, Xtest):\n \"\"\"\n Use the parameters computed in self.learn to give predictions on new\n observations.\n \"\"\"\n ytest = np.zeros(Xtest.shape[0], dtype=int)\n\n ### YOUR CODE HERE\n # print(\"22\")\n # print (Xtest.shape, self.weights.shape)\n h = utils.sigmoid(np.dot(Xtest, self.weights))\n for i in range (Xtest.shape[0]):\n if h[i] >= 0.5:\n ytest[i] = 1\n else:\n ytest[i] = 0\n\n ### END YOUR CODE\n\n assert len(ytest) == Xtest.shape[0]\n return ytest\n\nclass NeuralNet(Classifier):\n \"\"\" Implement a neural network with a single hidden layer. Cross entropy is\n used as the cost function.\n\n Parameters:\n nh -- number of hidden units\n transfer -- transfer function, in this case, sigmoid\n stepsize -- stepsize for gradient descent\n epochs -- learning epochs\n\n Note:\n 1) feedforword will be useful! Make sure it can run properly.\n 2) Implement the back-propagation algorithm with one layer in ``backprop`` without\n any other technique or trick or regularization. However, you can implement\n whatever you want outside ``backprob``.\n 3) Set the best params you find as the default params. The performance with\n the default params will affect the points you get.\n \"\"\"\n def __init__(self, parameters={}):\n self.params = {'nh': 16,\n 'transfer': 'sigmoid',\n 'stepsize': 0.01,\n 'epochs': 10}\n self.reset(parameters)\n\n def reset(self, parameters):\n self.resetparams(parameters)\n if self.params['transfer'] is 'sigmoid':\n self.transfer = utils.sigmoid\n self.dtransfer = utils.dsigmoid\n else:\n # For now, only allowing sigmoid transfer\n raise Exception('NeuralNet -> can only handle sigmoid transfer, must set option transfer to string sigmoid')\n # self.w_input = None\n # self.w_output = None\n\n def init(self, X, Y):\n std = 1.0/np.sqrt(X.shape[1])\n self.numfeatures = X.shape[1]\n self.w_input = std * np.random.randn(self.params['nh'], self.numfeatures)\n std = 1.0/np.sqrt(self.params['nh'])\n self.w_output = std * np.random.randn(1, self.params['nh'])\n # print(self.w_input.shape, self.w_output.shape)\n\n def feedforward(self, inputs):\n \"\"\"\n Returns the output of the current neural network for the given input\n \"\"\"\n # hidden activations\n # print(self.w_input.shape, inputs.shape)\n a_hidden = self.transfer(np.dot(self.w_input, inputs)) # f2\n\n # output activations\n # print(self.w_output.shape, a_hidden.shape)\n a_output = self.transfer(np.dot(self.w_output, a_hidden)) # f1\n\n return (a_hidden, a_output)\n\n def backprop(self, x, y):\n \"\"\"\n Return a tuple ``(nabla_input, nabla_output)`` representing the gradients\n for the cost function with respect to self.w_input and self.w_output.\n \"\"\"\n\n ### YOUR CODE HERE\n # h = np.zeros(self.params['nh'])\n # for i in range ():\n # print(x.shape)\n h, y_hat = self.feedforward(x)\n # print(h.shape, y_hat.shape)\n # print(\"-----\")\n # print (self.feedforward(x))\n # print(\"-----\")\n d_1 = y_hat - y #derivative of loss\n d_2 = np.zeros(self.params['nh']) #nh no of hidden node\n nabla_output = np.zeros((1,self.params['nh']))\n nabla_input = np.zeros((self.params['nh'], self.numfeatures))\n for i in range (self.params['nh']):\n nabla_output[0][i] = d_1 * h[i]\n \n for i in range (self.params['nh']):\n # print (h.shape, self.w_output.shape)\n d_2[i] = (self.w_output[0][i] * d_1) * h[i] * (1-h[i])\n nabla_input[i] = np.dot(d_2[i], x) \n # print(self.w_output.shape, nabla_output.shape)\n # print(nabla_input, nabla_output)\n ### END YOUR CODE\n\n assert nabla_input.shape == self.w_input.shape\n assert nabla_output.shape == self.w_output.shape\n return (nabla_input, nabla_output)\n\n # TODO: implement learn and predict functions\n def learn(self, Xtrain, ytrain):\n \"\"\"\n Learn the weights using the training data\n \"\"\"\n self.init(Xtrain, ytrain)\n stepsize = 0.01 #self.params['stepsize']\n epochs = self.params['epochs']\n # nabla_input, nabla_output = self.backprop(Xtrain, ytrain)\n for i in range (10):#(epochs):\n arr = np.arange(Xtrain.shape[0])\n np.random.shuffle(arr)\n for j in arr:\n # gradient_1 = np.dot(np.subtract(np.dot(Xtrain[arr[j]].T, self.weights), ytrain[arr[j]]), Xtrain[arr[j]])\n # print (\"----\")\n # print (Xtrain[j].shape)\n gradient_1, gradient_2 = self.backprop(Xtrain[j], ytrain[j])\n # print (gradient)\n self.w_output = self.w_output - stepsize*gradient_2\n self.w_input = self.w_input - stepsize*gradient_1\n # print((self.feedforward(Xtrain[j])[1] - ytrain[j]) ** 2)\n # print(self.w_output, self.w_input)\n\n\n def predict(self, Xtest):\n \"\"\"\n Use the parameters computed in self.learn to give predictions on new\n observations.\n \"\"\"\n # print('hello')\n # print(self.w_output, self.w_input)\n ytest = np.zeros(Xtest.shape[0], dtype=int)\n\n for i in range (Xtest.shape[0]):\n h, y = self.feedforward(Xtest[i])\n if y >= 0.5:\n ytest[i] = 1\n else:\n ytest[i] = 0\n\n assert len(ytest) == Xtest.shape[0]\n return ytest \n\nclass KernelLogitReg(LogitReg):\n \"\"\" Implement kernel logistic regression.\n\n This class should be quite similar to class LogitReg except one more parameter\n 'kernel'. You should use this parameter to decide which kernel to use (None,\n linear or hamming).\n\n Note:\n 1) Please use 'linear' and 'hamming' as the input of the paramteter\n 'kernel'. For example, you can create a logistic regression classifier with\n linear kerenl with \"KernelLogitReg({'kernel': 'linear'})\".\n 2) Please don't introduce any randomness when computing the kernel representation.\n \"\"\"\n def __init__(self, parameters={}):\n # Default: no regularization\n self.params = {'regwgt': 0.0, 'regularizer': 'None', 'kernel': 'None'}\n self.reset(parameters)\n\n def resrt(self, parameters):\n self.resetparams(parameters)\n\n def init(self, Xtrain, ytrain):\n self.numcenters = 10\n self.centers = Xtrain[:self.numcenters]\n\n def linear(self, x, c):\n '''\n linear kernel\n '''\n k = 0\n for i in range (x.shape[0]):\n k = k + x[i]*c[i]\n return k\n\n def hamming(self, x, c):\n '''\n Hamming distance kernel\n '''\n k = 0 \n for i in range (len(x)):\n if x[i] != c[i]:\n k = k + 1\n return k\n\n def logit_cost(self, theta, X, y):\n \"\"\"\n Compute cost for logistic regression using theta as the parameters.\n \"\"\"\n\n cost = 0.0\n\n for i in range (X.shape[0]):\n cost = cost + (y[i]-1)*np.dot(X[i], theta) + np.log(utils.sigmoid(np.dot(X[i], theta)))\n\n cost = cost/n*(-1.0)\n\n return cost\n\n def logit_cost_grad(self, theta, X, y):\n \"\"\"\n Compute gradients of the cost with respect to theta.\n \"\"\"\n\n grad = np.zeros(len(theta))\n\n grad = utils.sigmoid(np.dot(X, theta)) - y\n # grad = grad * stepsize \n\n return grad\n\n def transform(self, Xtrain):\n '''\n transform the data to new representation\n '''\n Ktrain = np.zeros((Xtrain.shape[0], self.numcenters))\n\n for i in range (Xtrain.shape[0]):\n for j in range (self.numcenters):\n if self.params['kernel'] == 'linear':\n Ktrain[i][j] = self.linear(Xtrain[i], self.centers[j])\n elif self.params['kernel'] == 'hamming':\n Ktrain[i][j] == self.hamming(Xtrain[i], self.centers[j])\n return Ktrain\n\n def learn(self, Xtrain, ytrain):\n \"\"\"\n Learn the weights using the training data.\n\n Ktrain the is the kernel representation of the Xtrain.\n \"\"\"\n Ktrain = None\n\n ### YOUR CODE HERE\n self.init(Xtrain, ytrain)\n Ktrain = self.transform(Xtrain)\n ### END YOUR CODE\n\n self.weights = np.zeros(Ktrain.shape[1],)\n\n ### YOUR CODE HERE\n epochs = 100\n stepsize = 0.01\n numsamples = Xtrain.shape[0]\n for i in range (epochs):\n # shuffle data points from 1, ..., numbsamples\n arr = np.arange(numsamples)\n np.random.shuffle(arr)\n for j in arr:\n gradient = np.dot(self.logit_cost_grad(self.weights, Ktrain[j], ytrain[j]), Ktrain[j])\n # print (gradient)\n self.weights = self.weights-stepsize*gradient\n ### END YOUR CODE\n\n self.transformed = Ktrain # Don't delete this line. It's for evaluation.\n\n # TODO: implement necessary functions\n def predict(self, Xtest):\n \"\"\"\n Use the parameters computed in self.learn to give predictions on new\n observations.\n \"\"\"\n ytest = np.zeros(Xtest.shape[0], dtype=int)\n\n ktest = self.transform(Xtest)\n ytest = utils.sigmoid(np.dot(ktest, self.weights))\n print(ktest)\n print(self.weights)\n for i in range (len(ytest)):\n if ytest[i] >= 0.5:\n ytest[i] = 1\n else:\n ytest[i] = 0\n\n return ytest \n\n# ======================================================================\n\ndef test_lr():\n print(\"Basic test for logistic regression...\")\n clf = LogitReg()\n theta = np.array([0.])\n X = np.array([[1.]])\n y = np.array([0])\n\n try:\n cost = clf.logit_cost(theta, X, y)\n except:\n raise AssertionError(\"Incorrect input format for logit_cost!\")\n assert isinstance(cost, float), \"logit_cost should return a float!\"\n\n try:\n grad = clf.logit_cost_grad(theta, X, y)\n except:\n raise AssertionError(\"Incorrect input format for logit_cost_grad!\")\n assert isinstance(grad, np.ndarray), \"logit_cost_grad should return a numpy array!\"\n\n print(\"Test passed!\")\n print(\"-\" * 50)\n\ndef test_nn():\n print(\"Basic test for neural network...\")\n clf = NeuralNet()\n X = np.array([[1., 2.], [2., 1.]])\n y = np.array([0, 1])\n clf.learn(X, y)\n\n assert isinstance(clf.w_input, np.ndarray), \"w_input should be a numpy array!\"\n assert isinstance(clf.w_output, np.ndarray), \"w_output should be a numpy array!\"\n\n try:\n res = clf.feedforward(X[0, :])\n except:\n raise AssertionError(\"feedforward doesn't work!\")\n\n try:\n res = clf.backprop(X[0, :], y[0])\n except:\n raise AssertionError(\"backprob doesn't work!\")\n\n print(\"Test passed!\")\n print(\"-\" * 50)\n\ndef main():\n test_lr()\n test_nn()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Assignment3/Classification/classalgorithms.py","file_name":"classalgorithms.py","file_ext":"py","file_size_in_byte":20390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"486399159","text":"from PyQt5.QtWidgets import QApplication, QPushButton, QMainWindow, QAction\nfrom PyQt5.QtGui import QIcon\nimport sys\nfrom PyQt5 import QtGui\nfrom PyQt5.QtCore import QRect\nfrom PyQt5 import QtCore\n\nclass Window(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.title = 'okno Toolbar'\n self.top = 100\n self.left = 100\n self.width = 680\n self.height = 500\n self.setWindowIcon(QIcon('img\\\\icon.png'))\n self.InitWindow()\n\n def InitWindow(self):\n\n exitAct = QAction(QIcon('img\\\\exit.png'),'Exit',self)\n exitAct.setShortcut('Ctrl+Q')\n exitAct.triggered.connect(self.ClodeApp)\n openAct = QAction(QIcon('img\\\\open.png'),'Copy',self)\n openAct.setShortcut('Ctrl+C')\n\n pasteAct = QAction(QIcon('img\\\\paste.png'),'Paste',self)\n pasteAct.setShortcut('Ctrl+V')\n\n deleteAct = QAction(QIcon('img\\\\delete.png'), 'Delete',self)\n deleteAct.setShortcut('Ctrl+D')\n\n saveAct = QAction(QIcon('img\\\\icon.png'),'Save',self)\n saveAct.setShortcut('Ctrl+S')\n\n self.toolbar = self.addToolBar('Toolbar')\n self.toolbar.addAction(exitAct)\n self.toolbar.addAction(openAct)\n self.toolbar.addAction(pasteAct)\n self.toolbar.addAction(deleteAct)\n self.toolbar.addAction(saveAct)\n\n self.setWindowTitle(self.title)\n self.setGeometry(self.top, self.left, self.width, self.height)\n self.show()\n def ClodeApp(self):\n self.close()\n\nApp = QApplication(sys.argv)\nwindow = Window()\nsys.exit(App.exec())\n\n","sub_path":"PyQt/Toolbars.py","file_name":"Toolbars.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"269052237","text":"from mythic_payloadtype_container.MythicCommandBase import *\nimport json\n\n\nclass Spawntox64Arguments(TaskArguments):\n\n def __init__(self, command_line):\n super().__init__(command_line)\n self.args = {\n \"application\": CommandParameter(name=\"Path to Application\", type=ParameterType.String, required=True, default_value=\"C:\\\\Windows\\\\System32\\\\rundll32.exe\"),\n \"arguments\": CommandParameter(name=\"Arguments\", type=ParameterType.String, default_value=\"\", required=False)\n }\n\n def split_commandline(self):\n if self.command_line[0] == \"{\":\n raise Exception(\"split_commandline expected string, but got JSON object: \" + self.command_line)\n inQuotes = False\n curCommand = \"\"\n cmds = []\n for x in range(len(self.command_line)):\n c = self.command_line[x]\n if c == '\"' or c == \"'\":\n inQuotes = not inQuotes\n if (not inQuotes and c == ' '):\n cmds.append(curCommand)\n curCommand = \"\"\n else:\n curCommand += c\n \n if curCommand != \"\":\n cmds.append(curCommand)\n \n for x in range(len(cmds)):\n if cmds[x][0] == '\"' and cmds[x][-1] == '\"':\n cmds[x] = cmds[x][1:-1]\n elif cmds[x][0] == \"'\" and cmds[x][-1] == \"'\":\n cmds[x] = cmds[x][1:-1]\n\n return cmds\n\n async def parse_arguments(self):\n if len(self.command_line) == 0:\n raise Exception(\"spawnto_x64 requires a path to an executable to be passed on the command line.\\n\\tUsage: {}\".format(Spawntox64Command.help_cmd))\n if self.command_line[0] == \"{\":\n self.load_args_from_json_string(self.command_line)\n else:\n parts = self.split_commandline()\n self.add_arg(\"application\", parts[0])\n firstIndex = self.command_line.index(parts[0])\n cmdline = self.command_line[firstIndex+len(parts[0]):].strip()\n if cmdline[0] in ['\"', \"'\"]:\n cmdline = cmdline[1:].strip()\n self.add_arg(\"arguments\", cmdline)\n\n pass\n\n\nclass Spawntox64Command(CommandBase):\n cmd = \"spawnto_x64\"\n needs_admin = False\n help_cmd = \"spawnto_x64 [path] [args]\"\n description = \"Change the default binary used in post exploitation jobs to [path]. If [args] provided, the process is launched with those arguments.\"\n version = 2\n is_exit = False\n is_file_browse = False\n is_process_list = False\n is_download_file = False\n is_upload_file = False\n is_remove_file = False\n author = \"@djhohnstein\"\n argument_class = Spawntox64Arguments\n attackmapping = [\"T1055\"]\n\n async def create_tasking(self, task: MythicTask) -> MythicTask:\n args = task.args.get_arg(\"arguments\")\n task.display_params = task.args.get_arg(\"application\")\n if args:\n task.display_params += \" {}\".format(args)\n return task\n\n async def process_response(self, response: AgentResponse):\n pass","sub_path":"Payload_Type/apollo/mythic/agent_functions/spawnto_x64.py","file_name":"spawnto_x64.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"449830347","text":"# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tf_agents.bandits.networks.global_and_arm_feature_network.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\n\nfrom tf_agents.bandits.networks import global_and_arm_feature_network as gafn\nfrom tf_agents.bandits.specs import utils as bandit_spec_utils\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.utils import test_utils\n\n\nparameters = parameterized.named_parameters(\n {\n 'testcase_name': 'batch2feat4act3',\n 'batch_size': 2,\n 'feature_dim': 4,\n 'num_actions': 3\n }, {\n 'testcase_name': 'batch1feat7act9',\n 'batch_size': 1,\n 'feature_dim': 7,\n 'num_actions': 9\n })\n\n\nclass GlobalAndArmFeatureNetworkTest(parameterized.TestCase,\n test_utils.TestCase):\n\n @parameters\n def testCreateFeedForwardCommonTowerNetwork(self, batch_size, feature_dim,\n num_actions):\n obs_spec = bandit_spec_utils.create_per_arm_observation_spec(\n 7, feature_dim, num_actions)\n net = gafn.create_feed_forward_common_tower_network(obs_spec, (4, 3, 2),\n (6, 5, 4), (7, 6, 5))\n input_nest = tensor_spec.sample_spec_nest(\n obs_spec, outer_dims=(batch_size,))\n output, _ = net(input_nest)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n output = self.evaluate(output)\n self.assertAllEqual(output.shape, (batch_size, num_actions))\n\n @parameters\n def testCreateFeedForwardDotProductNetwork(self, batch_size, feature_dim,\n num_actions):\n obs_spec = bandit_spec_utils.create_per_arm_observation_spec(\n 7, feature_dim, num_actions)\n net = gafn.create_feed_forward_dot_product_network(obs_spec, (4, 3, 4),\n (6, 5, 4))\n input_nest = tensor_spec.sample_spec_nest(\n obs_spec, outer_dims=(batch_size,))\n output, _ = net(input_nest)\n self.evaluate(tf.compat.v1.global_variables_initializer())\n output = self.evaluate(output)\n self.assertAllEqual(output.shape, (batch_size, num_actions))\n\n\nif __name__ == '__main__':\n tf.test.main()\n","sub_path":"tf_agents/bandits/networks/global_and_arm_feature_network_test.py","file_name":"global_and_arm_feature_network_test.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"124727145","text":"#!/usr/bin/env python\n# coding=utf-8\ntry:\n input = raw_input\nexcept NameError:\n pass\n\n\ndef prompt(text, default=None, _test=None):\n \"\"\"Ask a question via raw_input() and return their answer.\n\n param text: prompt text\n param default: default value if no answer is provided.\n \"\"\"\n\n text += ' [%s] ' % default if default else ' '\n while True:\n if _test is not None:\n print(text)\n resp = _test\n else:\n resp = input(text)\n if resp:\n return resp\n if default is not None:\n return default\n\n\ndef prompt_bool(text, default=False, yes_choices=None, no_choices=None,\n _test=None):\n \"\"\"Ask a yes/no question via raw_input() and return their answer.\n\n :param text: prompt text\n :param default: default value if no answer is provided.\n :param yes_choices: default 'y', 'yes', '1', 'on', 'true', 't'\n :param no_choices: default 'n', 'no', '0', 'off', 'false', 'f'\n \"\"\"\n\n yes_choices = yes_choices or ('y', 'yes', 't', 'true', 'on', '1')\n no_choices = no_choices or ('n', 'no', 'f', 'false', 'off', '0')\n\n default = yes_choices[0] if default else no_choices[0]\n while True:\n if _test is not None:\n print(text)\n resp = _test\n else:\n resp = prompt(text, default)\n if not resp:\n return default\n resp = str(resp).lower()\n if resp in yes_choices:\n return True\n if resp in no_choices:\n return False\n","sub_path":"voodoo/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"300094920","text":"import distributedQuery\nfrom queryIndex import queryIndex,filteringResults\nimport multiTierFeatureBuilder\nimport numpy as np\n\nimport os\nfrom resources import Resource\nimport scoreMerge\n\nclass provenanceFiltering:\n\n #modify these variables\n algorithmName=\"SystemName\"\n algorithmVersion=\"1.0\"\n\n scalableQuery=None\n useSocketServers = False\n useSocketServerFile = False\n MultiTier = False\n #image results will go here\n imageOutputDir=\"\"\n #Load Index at initialization\n #indexFileResource is a distributedQuery object\n def __init__(self, distributedQueryObject):\n self.scalableQuery=distributedQueryObject\n if os.path.exists('./serverList.txt') and self.useSocketServers and self.useSocketServerFile:\n print('setting server list!')\n self.scalableQuery.setServerList('./serverList.txt')\n #image results from the distributed query will go here\n self.imageOutputDir= self.scalableQuery.imageDirectory\n\n def showTopResults(self,results,k):\n import featureExtraction\n import math\n import matplotlib.pyplot as plt\n fe = featureExtraction.featureExtraction()\n images = list(results.scores.keys())\n numImages = min(k,len(images))\n dim = math.ceil(np.sqrt(numImages))\n fig = plt.figure()\n i = 0\n for im in images[:numImages]:\n image = fe.deserialize_image(self.scalableQuery.getWorldImage(im)._data)\n sub = fig.add_subplot(dim,dim,i+1)\n sub.imshow(image)\n i+=1\n\n pass\n #probeImage conatins Image Data\n def processImage (self, probeImage, numberOfResultsToRetrieve,rootpath=''):\n #get filename\n probeFilename = probeImage.key\n print('yert')\n #create score object\n resultScores =filteringResults()\n\n print('yeet')\n allQueries = []\n allQueries.append(probeImage)\n\n #this can be called as many times as needed\n #image files will be put in\n print('yoot')\n allResults = self.scalableQuery.queryImages(allQueries,numberOfResultsToRetrieve,rootpath=rootpath)\n print('yote')\n #Tier2\n maxScore = allResults[0].scores[list(allResults[0].scores.keys())[0]]\n\n print('gonna hit dat dere if statement')\n if self.MultiTier and maxScore > .03: #only do multitier search if the first query gets enough votes (3% of all features match)\n try:\n mainResult = allResults[0]\n tier2ImageResources = []\n for r in list(mainResult.scores):\n tier2ImageResources.append(self.scalableQuery.getWorldImage(r))\n fullTier2FeatureResource,tier2FeatureSets, featureIDList, featureObjectIDList, featureDictionary,queryOrResultList,featureSetMap,visDict = multiTierFeatureBuilder.getTier2Features(probeImage,tier2ImageResources,30)\n if fullTier2FeatureResource is not None:\n # allTier2Results = self.scalableQuery.queryFeatures([fullTier2FeatureResource['supplemental_information']['value']], 100,ignoreIDs=list(allResults[0].map))\n allTier2Results = self.scalableQuery.queryFeatures(tier2FeatureSets,75,ignoreIDs=list(allResults[0].map))\n print('found results for ',len(allTier2Results),' tier 2 objects')\n # allTier2Scores = multiTierFeatureBuilder.getObjectScores(allTier2Results[0],featureIDList,featureObjectIDList,featureDictionary,queryOrResultList,objectWise=True,ignoreIDs=list(allResults[0].map))\n allTier2Scores = allTier2Results\n finalTier2Ranks = filteringResults()\n for r in allTier2Scores:\n r.I = None\n r.D = None\n r.pairDownResults(2)\n print('merging tier 2 scores')\n for r in allTier2Scores:\n finalTier2Ranks.mergeScores(r,ignoreIDs=allResults[0].map)\n # scoreMerge.mergeScoreSet(allTier2Scores)\n allResults[0].mergeScores(finalTier2Ranks)\n else:\n allTier2Results = None\n except:\n print('failed tier 2 search')\n allTier2Results = None\n # print(allResults)\n outputJson = self.createOutput(probeFilename,allResults[0])\n\n return outputJson\n\n\n def createOutput(self,probeFilename, resultScores):\n return {'algorithm': self.createAlgOutput(), 'provenance_filtering': self.createFilteringOutput(probeFilename,resultScores)}\n\n def createAlgOutput(self,):\n return {'name': self.algorithmName.replace(\" \", \"\"), 'version': self.algorithmVersion.replace(\" \", \"\")}\n\n def createFilteringOutput(self, probeFilename,resultScores):\n return {'probe': probeFilename, 'matches':resultScores.scores,'meta':resultScores.visData}\n","sub_path":"provenance/provenanceFiltering/provenanceFiltering.py","file_name":"provenanceFiltering.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"329732969","text":"import torch\nfrom datasets import MnistDataset\nimport argparse\nfrom models import NN_Model\nimport torch.optim as optim\n\n\n\"\"\"\nThis script is about a distillation problem. It trains two teachers with a simple NN for the MNIST dataset.\nTeacher 1 trains a model that goes from 0 to 4.\nTeacher 2 trains a model that goes from 5 to 9.\nThe aim of this script is to make some research from the paper 'Unifying Heterogeneous Classifiers With Distillation'\n\"\"\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epochs', default=20)\nparser.add_argument('--lr', default=0.001)\nparser.add_argument('--train_file', default=\"/home/student/noel_aitor/mnist/data/MNIST/processed/training.pt\")\nparser.add_argument('--save_teachers', default=True)\nparser.add_argument('--teachers_path', default=\"/home/student/noel_aitor/mnist/teachers/\")\nparser.add_argument('--device', default=\"cuda\")\nargs = vars(parser.parse_args())\ndevice = args[\"device\"]\n\n\n# Teacher 1\nimages, targets = torch.load(args[\"train_file\"])\ntrainset0to4 = MnistDataset(images, targets)\nidx = trainset0to4.targets <= 4\ntrainset0to4.images = trainset0to4.images[idx]\ntrainset0to4.targets = trainset0to4.targets[idx]\ntrainloader = torch.utils.data.DataLoader(trainset0to4, batch_size=128, shuffle=True, num_workers=2)\n\n\nteacher0to4 = NN_Model(n_classes=5, dropout=0.2, hidden_dropout=0.5)\nteacher0to4.to(device)\n\nepochs = args[\"epochs\"]\nlearning_rate = args[\"lr\"]\ncriterion = torch.nn.CrossEntropyLoss()\noptimizer = optim.SGD(teacher0to4.parameters(), lr=learning_rate, momentum=0.9)\n\n# Run over epochs (1 epoch = visited all items in dataset)\nfor epoch in range(epochs):\n\n running_loss = 0.0\n total = 0\n\n # for i, data in enumerate(trainloader, 0):\n for data in trainloader:\n # Apply the learning rate decay\n if epoch % 100 == 0 and epoch != 0:\n learning_rate = learning_rate * 0.5\n optimizer = optim.SGD(teacher0to4.parameters(),\n lr=learning_rate, momentum=0.9)\n\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n inputs = torch.flatten(inputs, start_dim=1).to(device)\n inputs = inputs.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = teacher0to4(inputs.float())\n target = labels.to(device).long()\n loss = criterion(outputs, target)\n loss.backward()\n optimizer.step()\n\n total += len(data)\n\n # print statistics\n running_loss += loss.item()\n # print every epoch\n print('[%d] loss: %.3f' % (epoch + 1, running_loss / total))\n\nprint('Finished teacher 1 training!')\n\n\ntrainset5to9 = MnistDataset(images, targets)\nidx = trainset5to9.targets >= 5\ntrainset5to9.images = trainset5to9.images[idx]\ntrainset5to9.targets = trainset5to9.targets[idx]\ntrainset5to9.targets = trainset5to9.targets - 5\ntrainloader = torch.utils.data.DataLoader(trainset5to9, batch_size=128, shuffle=True, num_workers=2)\n\nteacher5to9 = NN_Model(n_classes=5, dropout=0.2, hidden_dropout=0.5)\nteacher5to9.to(device)\n\nlearning_rate = 0.001\ncriterion = torch.nn.CrossEntropyLoss()\noptimizer = optim.SGD(teacher5to9.parameters(), lr=learning_rate, momentum=0.9)\n\n# Run over epochs (1 epoch = visited all items in dataset)\nfor epoch in range(epochs):\n\n running_loss = 0.0\n total = 0\n\n # for i, data in enumerate(trainloader, 0):\n for data in trainloader:\n # Apply the learning rate decay\n if epoch % 100 == 0 and epoch != 0:\n learning_rate = learning_rate * 0.5\n optimizer = optim.SGD(teacher5to9.parameters(),\n lr=learning_rate, momentum=0.9)\n\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n inputs = torch.flatten(inputs, start_dim=1).to(device)\n inputs = inputs.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = teacher5to9(inputs.float())\n target = labels.to(device).long()\n loss = criterion(outputs, target)\n loss.backward()\n optimizer.step()\n\n total += len(data)\n\n # print statistics\n running_loss += loss.item()\n # print every epoch\n print('[%d] loss: %.3f' % (epoch + 1, running_loss / total))\n\nprint('Finished teacher 2 training!')\n\n\nif args[\"save_teachers\"]:\n torch.save(teacher0to4, args[\"teachers_path\"] + \"teacher0to4_NN.pt\")\n torch.save(teacher5to9, args[\"teachers_path\"] + \"teacher5to9_NN.pt\")\n print(f\"Teachers stored in {args['teachers_path']}\")\n","sub_path":"code/train_teachers_NN.py","file_name":"train_teachers_NN.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"292897875","text":"from typing import Any\nfrom typing import Dict\nfrom typing import Text\n\nimport pytest\n\nfrom project.settings import HOMEWORKS\nfrom project.utils import import_by_path\n\n_BENZAK_HTTP_RESPONSE = \"\"\"HTTP/1.1 200 OK\nConnection: keep-alive\nContent-Length: 4053\nContent-Type: text/html; charset=utf-8\nDate: Sun, 26 Jan 2020 14:07:02 GMT\nServer: gunicorn/19.9.0\nVary: Cookie\nVia: 1.1 vegur\nX-Frame-Options: SAMEORIGIN\n\n\n\n\n\n\n
    \n    HTTP/9.1 666 NE OK\n\nConnection: keep-dead\n    Content-Length: 1488\nContent-Type: text/png; charset=utf-9\n    Date: Sun, 32 Jan 2020 14:88:99 BMT\nServer: gunicorn/1.2.3\n    Vary: Cookie\nVia: 1.1 vegur\n    X-Frame-Options: SAMEORIGIN\n1: 2\n0 1 2 : 3 4 5\nexit\n-2:-3\n
    \n\n\n\"\"\"\n\n\ndef find_modules_for_level(level: Text) -> Dict[Text, Any]:\n modules = {}\n\n for pyfile in HOMEWORKS.glob(f\"**/lesson13/{level}.py\"):\n student = pyfile.parts[-3]\n module = import_by_path(pyfile)\n modules[student] = module\n\n return modules\n\n\n@pytest.fixture\ndef benzak_http_response() -> str:\n return _BENZAK_HTTP_RESPONSE\n\n\n@pytest.fixture\ndef modules_level01() -> Dict[Text, Any]:\n return find_modules_for_level(\"level01\")\n\n\n@pytest.fixture\ndef modules_level02() -> Dict[Text, Any]:\n return find_modules_for_level(\"level02\")\n\n\n@pytest.fixture\ndef modules_level03() -> Dict[Text, Any]:\n return find_modules_for_level(\"level03\")\n\n\n@pytest.fixture\ndef modules_level04() -> Dict[Text, Any]:\n return find_modules_for_level(\"level04\")\n\n\n@pytest.fixture\ndef modules_level05() -> Dict[Text, Any]:\n return find_modules_for_level(\"level05\")\n","sub_path":"lessons/lesson13/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"194081249","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Product\nfrom django.db.models import Q\nfrom django.shortcuts import render, get_object_or_404\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\nfrom django.utils import timezone\nfrom .models import Product,OrderItem\nfrom math import ceil\nfrom django.contrib.auth.decorators import login_required\nfrom .decorators import allowed_users\n\n# Create your views here.\n\ndef index(request):\n if request.GET:\n query = request.GET.get('q')\n product = get_data_queryset(str(query)) # typeCasting query to sting\n else:\n product = Product.objects.all()\n\n cart = 0\n if request.user.username:\n cart = OrderItem.objects.filter(Customer_Id_id=request.user.id).count()\n params = { 'product': product, \"cart\": cart}\n\n return render(request, 'shop/index.html', params)\n\ndef view_product(request, pk):\n if request.method == \"POST\":\n product = Product.objects.get(pk=pk)\n else:\n product = Product.objects.get(pk=pk)\n params = {'product': product}\n return render(request, 'shop/product.html', params)\n\n@login_required(login_url=\"/Login/\")\ndef itemdetail(request):\n orderitems = ''\n if request.user.username:\n orderitems = OrderItem.objects.filter(Customer_Id_id=request.user.id).all()\n\n params = {'orderitems': orderitems}\n\n return render(request, \"Shop/item.html\", params)\n\n@login_required(login_url=\"/Login/\")\ndef add_to_cart(request, id):\n # login user object\n user = request.user\n # checking if data is exists or not\n if OrderItem.objects.filter(Customer_Id_id=user.id, item_id=id, ordered=False).exists():\n # update quantity\n order = OrderItem.objects.filter(Customer_Id_id=user.id, item_id=id).get();\n order.quantity += 1\n order.save()\n messages.info(request, \"This item is added to your cart\")\n else:\n # insert a data in a row\n OrderItem.objects.create(item_id=id, Customer_Id_id=user.id, ordered=False)\n\n # return HttpResponse('created')\n return redirect(\"/\")\n\n\ndef remove_from_cart(request, id):\n user = request.user\n\n if OrderItem.objects.filter(Customer_Id_id=user.id, item_id=id, ordered=False).exists():\n order = OrderItem.objects.filter(Customer_Id_id=user.id, item_id=id).get();\n order.quantity = 0\n order.save()\n if int(order.quantity) == 0:\n order.delete()\n messages.info(request, \"This item has been removed from your cart\")\n\n else:\n # OrderItem.objects.create(item_id = id ,Customer_Id_id = user.id,ordered=False)\n messages.info(request, \"Item is not in the cart\")\n return redirect(\"/\")\n\n\ndef get_data_queryset(query=None): #Searching #queryset= search garda aaaune\n\tqueryset = []\n\tqueries = query.split(\" \")\n\tfor q in queries:\n\t\tproduct = Product.objects.filter(\n\t\t\t\t Q(Product_Name__icontains=q) |\n\t\t\t\t Q(Product_Category__icontains=q)\n\t\t\t )\n\t\tfor product in product:\n\t\t\tqueryset.append(product)\n\n\treturn list(set(queryset))\n\n\ndef increase(request, id):\n item = OrderItem.objects.get(id=id)\n item.quantity += 1\n item.save()\n\n return redirect(\"itemdetail\")\n\ndef decrease(request, id):\n item = OrderItem.objects.get(id=id)\n item.quantity = int(item.quantity) - 1\n item.save()\n if int(item.quantity)==0:\n item.delete()\n\n\n return redirect(\"itemdetail\")\n\n@allowed_users(allowed_roles=['customer'])\ndef Checkout(request):\n orderitems = ''\n total=0\n totalamount=0\n if request.user.username:\n orderitems = OrderItem.objects.filter(Customer_Id_id=request.user.id).all()\n\n for i in orderitems:\n total = i.quantity * i.item.Product_Price\n totalamount = totalamount + total\n\n params = {'totalamount': totalamount}\n\n return render(request,\"Payment/Checkout.html\",params)","sub_path":"rbaclothing/Shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"334329644","text":"# configuration file\n#!/usr/bin/python\nimport ROOT\n\"\"\"\n\nThis is a configuration file containing \nthe code colors in hex, and translated \nin the ROOT format.\n\nThe colors has been inspired from :\nhttp://flatuicolors.com/\n\nyou can use the follwing command to \ntransform the HEX color format to ROOT\nformat : ROOT.TColor.GetColor(hexcolor)\n\n\"\"\"\n\nhexcolor = {\n \"turquoise\" :\"#1abc9c\",\n \"emerald\" :\"#2ecc71\",\n \"peter_river\" :\"#3498db\",#ggf\n \"amethyst\" :\"#9b59b6\",\n \"wet_asphalt\" :\"#34495e\",\n \"green_sea\" :\"#16a085\",\n \"nephritis\" :\"#27ae60\",\n \"belize_hole\" :\"#2980b9\",\n \"wisteria\" :\"#8e44ad\",\n \"midnight_blue\":\"#2c3e50\",#gg+gj\n \"sun_flower\" :\"#f1c40f\",#QCD (jj)\n \"carrot\" :\"#e67e22\",\n \"alizarin\" :\"#e74c3c\",\n \"clouds\" :\"#ecf0f1\",\n \"concrete\" :\"#95a5a6\",\n \"orange\" :\"#f39c12\",\n \"pumpkin\" :\"#d35400\",\n \"pomegranate\" :\"#c0392b\",# VBF signal\n \"silver\" :\"#bdc3c7\",\n \"asbestos\" :\"#7f8c8d\"\n}\nrgbcolor = {\n \"turquoise\" :[26 , 188, 156],\n \"emerland\" :[46 , 204, 113],\n \"peter-river\" :[52 , 152, 219],\n \"amethyst\" :[155, 89 , 182],\n \"wet-asphalt\" :[52 , 73 , 94 ],\n \"green-sea\" :[22 , 160, 133],\n \"nephritis\" :[39 , 174, 96 ],\n \"belize-hole\" :[41 , 128, 185],\n \"wisteria\" :[142, 68 , 173],\n \"midnight-blue\" :[44 , 62 , 80 ],\n \"sun-flower\" :[241, 196, 15 ],\n \"carrot\" :[230, 126, 34 ],\n \"alizarin\" :[231, 76 , 60 ],\n \"clouds\" :[236, 240, 241],\n \"concrete\" :[149, 165, 166],\n \"orange\" :[243, 156, 18 ],\n \"pumpkin\" :[211, 84 , 0 ],\n \"pomegranate\" :[192, 57 , 43 ],\n \"silver\" :[189, 195, 199],\n \"asbestos\" :[127, 140, 141]}\n\nusercolor = {}\ndef declar_color():\n with open('./.color-for-root.C','w') as g:\n ci = 1500\n g.write('{\\n')\n for c in rgbcolor:\n col = ROOT.TColor(ci,\n rgbcolor[c][0],\n rgbcolor[c][1],\n rgbcolor[c][2]) \n line = ('TColor *c_%s = new TColor(%i,%d,%d,%d);'\n % ( c, ci,\n rgbcolor[c][0]/255.,\n rgbcolor[c][1]/255.,\n rgbcolor[c][2]/255.)\n )\n ci = ci + 1;\n usercolor[c]=ci\n g.write(line + '\\n')\n g.write('}')\n \n#usercolor={\n# \"vbf_m125\" : 99,\n# \"ggf_m125\" : 215,\n# \"qcd\" : 91,\n# \"gamgam\" : 65,\n# \"gamJet\" : 51,\n# \"dy_toll_m50\" : 85,\n#}\n","sub_path":"colors.py","file_name":"colors.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"334113507","text":"#!/usr/bin/env python\n\n__author__ = \"Jose Antonio Navas Molina\"\n__copyright__ = \"Copyright 2013, The QIIME Scaling Project\"\n__credits__ = [\"Jose Antonio Navas Molina\"]\n__license__ = \"BSD\"\n__version__ = \"0.0.2-dev\"\n__maintainer__ = \"Jose Antonio Navas Molina\"\n__email__ = \"josenavasmolina@gmail.com\"\n__status__ = \"Development\"\n\nfrom matplotlib import use\nuse('Agg', warn=False)\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport imghdr\nfrom shutil import rmtree\nfrom unittest import TestCase, main\nfrom tempfile import mkdtemp\nfrom pyqi.core.exception import IncompetentDeveloperError\nfrom scaling.interfaces.optparse.output_handler import (write_summarized_results,\n write_matplotlib_figure,\n write_string_to_dir)\n\nclass OutputHandlerTests(TestCase):\n def setUp(self):\n self.output_dir = mkdtemp()\n self.data = {\n 'label' : [100, 200, 300, 400, 500],\n 'wall_time' : ([25, 50, 75, 100, 125],\n [1, 2, 3, 4, 5]),\n 'cpu_user' : ([23, 46, 70, 94, 123],\n [0.9, 2, 2.9, 4.1, 5]),\n 'cpu_kernel' : ([2, 4, 5, 6, 2],\n [0.1, 0.0, 0.001, 0.2, 0.02]),\n 'memory' : ([1048576, 2097152, 3145728, 4194304, 5242880],\n [0.0, 0.0, 0.0, 0.2, 0.0])\n }\n self.figure = plt.figure()\n # ax = self.figure.add_subplot(111)\n # ax.plot()\n\n def tearDown(self):\n rmtree(self.output_dir)\n\n def test_write_summarized_results(self):\n \"\"\"Correctly writes the bench results to a file\"\"\"\n # Can't write without a path\n self.assertRaises(IncompetentDeveloperError, write_summarized_results,\n 'a', self.data)\n write_summarized_results('foo', self.data, self.output_dir)\n fp = os.path.join(self.output_dir, 'foo.txt')\n with open(fp, 'U') as obs_f:\n obs = obs_f.read()\n self.assertEqual(obs, exp_write_summarized_results)\n\n def test_write_matplotlib_figure(self):\n \"\"\"Correctly writes a matplotlib figure to a file\"\"\"\n # Can't write without a path\n self.assertRaises(IncompetentDeveloperError, write_matplotlib_figure,\n 'a', self.figure)\n write_matplotlib_figure('foo', self.figure, self.output_dir)\n fp = os.path.join(self.output_dir, 'foo.png')\n self.assertEqual(imghdr.what(fp), 'png')\n\n def test_write_string_to_dir(self):\n \"\"\"Correctly writes a string in a directory\"\"\"\n # Can't write without a path\n self.assertRaises(IncompetentDeveloperError, write_string_to_dir,\n 'a', 'foo')\n write_string_to_dir('foo', 'bar', self.output_dir)\n fp = os.path.join(self.output_dir, 'foo.txt')\n with open(fp, 'U') as obs_f:\n obs = obs_f.read()\n\n self.assertEqual(obs, 'bar\\n')\n\n\nexp_write_summarized_results = \"\"\"#label\\twall_mean\\twall_std\\tuser_mean\\tuser_std\\tkernel_mean\\tkernel_std\\tmem_mean\\tmem_std\n100\\t25\\t1\\t23\\t0.9\\t2\\t0.1\\t1048576\\t0.0\n200\\t50\\t2\\t46\\t2\\t4\\t0.0\\t2097152\\t0.0\n300\\t75\\t3\\t70\\t2.9\\t5\\t0.001\\t3145728\\t0.0\n400\\t100\\t4\\t94\\t4.1\\t6\\t0.2\\t4194304\\t0.2\n500\\t125\\t5\\t123\\t5\\t2\\t0.02\\t5242880\\t0.0\n\"\"\"\n\nif __name__ == '__main__':\n main()","sub_path":"tests/test_interfaces/test_optparse/test_output_handler.py","file_name":"test_output_handler.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"595133656","text":"#!/home/kjeong23/softwares/bin/python3.4\n# micelle COM position rms fluctuation calculation\n#\n#starting from reindexed COM trajectory. reading -> should apply 'smart pbc reading'.\n#consider 2 subsequent steps -> if displacement >= box/2, transpose the later step.\n#-> collect along the whole COMtraj:get average position. -> 2nd reading(apply smart reading again)\n#-> calculate deviation from ave -> get rmsf components, norm (sqrt (dx^2+dy^2+dz^2))\n#2 arguments: input(reindexed COM traj) output(table of site, xyz components of rms position fluct, norm)\n\nimport math\nimport sys\nimport numpy\n\ndef gro_xyzsplit(str): #own needed function for splitting of .gro format. V05.\n splitstr=[str[20:28],str[28:36],str[36:44]]\n for i in range(len(splitstr)):\n splitstr[i]=splitstr[i].replace(\" \",\"\")\n for i in range(0,3):\n splitstr[i]=float(splitstr[i])\n return splitstr\n\ndef smartcrd(crd1,crd2,box,nmic): #'smart pbc reading'\n newcrd=crd2\n for i in range(nmic):\n for j in range(3):\n if crd2[i][j]-crd1[i][j] >= (box[j]/2.0):\n newcrd[i][j]-=box[j]\n elif crd2[i][j]-crd1[i][j] <= (-box[j]/2.0):\n newcrd[i][j]+=box[j]\n return newcrd\n\n#main fxn\ndef main():\n \n #Load input files\n trjfile = open(sys.argv[1],'r') #reindexed com trajectory file\n outfile = open(sys.argv[2],'w') #output file for rms position fluctuation\n\n #start the loop of 'COM trajectory reading'(1st: getting ave)\n sindex,lindex=0,0\n crd1=numpy.empty((0,3),float)\n crd2=numpy.empty((0,3),float)\n for line in trjfile:\n if lindex!=0:\n if lindex==1:\n nmic=int(line)\n elif lindex>=2 and lindex<=1+nmic:\n split=gro_xyzsplit(line)\n if sindex==0: #initial step\n crd1=numpy.vstack((crd1,split))\n else:\n crd2=numpy.vstack((crd2,split))\n elif lindex==2+nmic:\n split=line.split()\n if sindex==0: #initial step\n box1=numpy.array([float(x) for x in split])\n else:\n box2=numpy.array([float(x) for x in split])\n\n if sindex==0:\n avecrd=crd1\n else:\n crd2=smartcrd(crd1,crd2,box2,nmic)\n avecrd+=crd2\n crd1=crd2\n crd2=numpy.empty((0,3),float)\n #initialization\n if (sindex%50)==0:\n print('loop 1 step {} complete'.format(sindex))\n sindex+=1\n lindex=-1\n lindex+=1\n avecrd/=sindex\n\n #Load input file again for 2nd loop, 2nd loop:getting STDEV\n trjfile = open(sys.argv[1],'r') \n sindex,lindex=0,0\n crd1=numpy.empty((0,3),float)\n crd2=numpy.empty((0,3),float)\n stdev=numpy.zeros((nmic,3),float)\n norm=numpy.zeros(nmic,float)\n\n for line in trjfile:\n if lindex!=0:\n if lindex==1:\n nmic=int(line)\n elif lindex>=2 and lindex<=1+nmic: \n split=gro_xyzsplit(line)\n if sindex==0: #initial step\n crd1=numpy.vstack((crd1,split)) \n else:\n crd2=numpy.vstack((crd2,split)) \n elif lindex==2+nmic:\n split=line.split()\n if sindex==0: #initial step\n box1=numpy.array([float(x) for x in split]) \n else:\n box2=numpy.array([float(x) for x in split]) \n \n if sindex==0:\n onedev=crd1-avecrd #deviation\n else: \n crd2=smartcrd(crd1,crd2,box2,nmic) \n onedev=crd2-avecrd\n crd1=crd2 \n crd2=numpy.empty((0,3),float)\n onedev=onedev*onedev\n stdev+=onedev #first adding (dev)^2 of 1 step \n #initialization\n if (sindex%50)==0: \n print('loop 2 step {} complete'.format(sindex)) \n sindex+=1\n lindex=-1\n lindex+=1\n norm=stdev[:,0]+stdev[:,1]+stdev[:,2]\n stdev/=(sindex-1) #variance\n norm/=(sindex-1) #variance\n\n stdev=numpy.sqrt(stdev)\n norm=numpy.sqrt(norm)\n\n #printing section\n for i in range(nmic):\n outfile.write('{:2} {:8.5f} {:8.5f} {:8.5f} {:8.5f}\\n'.format(i+1,stdev[i][0],stdev[i][1],stdev[i][2],norm[i])) \n\n trjfile.close()\n outfile.close()\n\nif __name__ == \"__main__\": main()\n\n","sub_path":"py_development/data_process/micelles/rmsf_v01.py","file_name":"rmsf_v01.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"146673474","text":"# Definition for a binary tree node.\n\nfrom typing import List\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def delNodes(self, root: TreeNode, to_delete: List[int]) -> List[TreeNode]:\n\n roots = set([root])\n to_delete = set(to_delete)\n\n from collections import deque\n queue = deque([root])\n\n while queue:\n n = queue.popleft()\n if n.left:\n if n.val in to_delete:\n roots.add(n.left)\n queue.append(n.left)\n if n.left.val in to_delete:\n n.left = None\n if n.right:\n if n.val in to_delete:\n roots.add(n.right)\n queue.append(n.right)\n if n.right.val in to_delete:\n n.right = None\n if n.val in to_delete:\n roots = roots - {n}\n\n return sorted(roots, key=lambda x: x.val)\n","sub_path":"leetcode/1110_delete_nodes_and_return_forest.py","file_name":"1110_delete_nodes_and_return_forest.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"195834577","text":"'''\nSecurityList class that loads security data\nand finds hedge ratio of portolfio, returns\ntime series, and other useful functions\n\n'''\n\nimport quandl\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport statsmodels.tsa.vector_ar.vecm as jh\nimport matplotlib.pyplot as plt\nimport pickle\n\nquandl.ApiConfig.api_key = 'AfS6bPzj1CsRFyYxCcvz'\n\nclass SecurityList():\n\n def __init__(self,tickers):\n self.tickers = tickers\n self.data = pd.DataFrame(columns=self.tickers)\n self.volume = pd.DataFrame(columns=self.tickers)\n self.split = pd.DataFrame(columns=self.tickers)\n self.div = pd.DataFrame(columns=self.tickers)\n self.close = pd.DataFrame(columns=self.tickers)\n\n def importData(self,data):\n self.data = data\n\n def downloadQuandl(self,start,end):\n\n try:\n self.data,self.volume,self.split,self.div,self.close = pickle.load(open('WIKIdata.pickle','rb'))\n except FileNotFoundError:\n def convert_dt(elem):\n return pd.to_datetime(elem).date()\n for sec in self.tickers:\n print(\"downloading \"+sec)\n try:\n a = quandl.get('WIKI/'+sec, start_date=start,end_date=end)\n self.data[sec] = a['Adj. Close']\n self.volume[sec] = a['Volume']\n self.split[sec] = a['Split Ratio']\n self.div[sec] = a['Ex-Dividend']\n self.close[sec] = a['Close']\n f = np.vectorize(convert_dt)\n index = f(a.index)\n except:\n pass\n self.data = self.data.set_index(index)\n self.volume = self.volume.set_index(index)\n self.split = self.split.set_index(index)\n self.div = self.div.set_index(index)\n self.close = self.close.set_index(index)\n pickle.dump((self.data,self.volume,self.split,self.div,self.close),open('WIKIdata.pickle','wb'))\n self.data = self.data.dropna(axis='columns')\n self.volume = self.volume.dropna(axis='columns')\n self.split = self.split.dropna(axis='columns')\n self.div = self.div.dropna(axis='columns')\n self.close = self.close.dropna(axis='columns')\n print(self.data.columns)\n self.data = self.data[self.tickers]\n self.volume = self.volume[self.tickers]\n self.split = self.split[self.tickers]\n self.div = self.div[self.tickers]\n self.close = self.close[self.tickers]\n\n def genTimeSeries(self):\n\n '''\n Generate Time Series using johansen test\n '''\n eig = self.genHedgeRatio()\n ts = np.dot(self.data,eig)\n return ts\n\n def genHedgeRatio(self):\n\n matrix = self.genMatrix()\n results = jh.coint_johansen(matrix,0,1)\n return results.evec[:,0]\n\n def genMatrix(self):\n\n ts_row,ts_col = self.data.shape\n matrix = np.zeros((ts_row,ts_col))\n for i, sec in enumerate(self.data):\n matrix[:,i] = self.data[sec]\n return matrix\n\n def getVolume(self):\n return self.volume\n\n def getSplits(self):\n return self.split\n\n def getDiv(self):\n return self.div\n\n def getAdjFactors(self):\n temp = self.div.copy()\n temp[temp != 0] = 1\n close = self.close*temp\n adj_factors = self.div+close\n close[close == 0] = 1\n adj_factors /= close\n adj_factors[adj_factors == 0] = 1\n return adj_factors\n\n def adjSplits(self):\n split = self.split.product()\n eig = self.genHedgeRatio()\n adj = eig/split\n return adj\n\n def adjDividends(self):\n adj_fact = self.getAdjFactors()\n total_fact = adj_fact.product()\n return total_fact\n","sub_path":"securityList.py","file_name":"securityList.py","file_ext":"py","file_size_in_byte":3799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"159015664","text":"from ursina import *\nfrom ursina.prefabs.first_person_controller import FirstPersonController\n\napp = Ursina()\ngrass_texture = load_texture('minecraft/grass_block.png')\nstone_texture = load_texture('minecraft/stone_block.png')\nbrick_texture = load_texture('minecraft/brick_block.png')\ndirt_texture = load_texture('minecraft/dirt_block.png')\nsky_texture = load_texture('minecraft/skybox.png')\narm_texture = load_texture('minecraft/arm_texture.png')\npunch_sound = Audio('minecraft/punch_sound',loop = False, autoplay = False)\n\nblock_pick=1\nwindow.fps_counter.enabled = False\nwindow.exit_button.visible = False\ndef update():\n\tglobal block_pick\n\tif held_keys['left mouse'] or held_keys['right mouse']:\n\t\thand.animation()\n\telse:\n\t\thand.animation2()\n\n\n\tif held_keys['1']:block_pick = 1\n\tif held_keys['2']:block_pick = 2\n\tif held_keys['3']:block_pick = 3\n\tif held_keys['4']:block_pick = 4\n\nclass Voxel(Button):\n\tdef __init__(self, position= (0,0,0), texture =grass_texture):\n\t\tsuper().__init__(\n\t\t\tparent = scene,\n\t\t\tposition = position,\n\t\t\tmodel = 'minecraft/block',\n\t\t\torigin_y =0.5,\n\t\t\ttexture = texture,\n\t\t\tcolor = color.color(0,0,random.uniform(0.9,1)),\n\t\t\tscale=0.5)\n\n\tdef input(self,key):\n\t\tif self.hovered:\n\t\t\tif key == 'left mouse down':\n\t\t\t\tpunch_sound.play()\n\n\t\t\t\tif block_pick == 1:\n\t\t\t\t\tvoxel = Voxel(position = self.position + mouse.normal,texture=grass_texture)\n\t\t\t\tif block_pick == 2:\n\t\t\t\t\tvoxel = Voxel(position = self.position + mouse.normal,texture=stone_texture)\n\t\t\t\tif block_pick == 3:\n\t\t\t\t\tvoxel = Voxel(position = self.position + mouse.normal,texture=brick_texture)\n\t\t\t\tif block_pick == 4:\n\t\t\t\t\tvoxel = Voxel(position = self.position + mouse.normal,texture=dirt_texture)\n\t\t\t\tif block_pick == 1:\n\t\t\t\t\tvoxel = Voxel(position = self.position + mouse.normal,texture=grass_texture)\n\n\t\t\tif key =='right mouse down':\n\t\t\t\tpunch_sound.play()\n\t\t\t\tdestroy(self)\n\t\t\tif key == 'q':\n\t\t\t\tquit()\n\nclass Sky(Entity):\n\tdef __init__(self):\n\t\tsuper().__init__(\n\t\t\tparent = scene,\n\t\t\tmodel = 'sphere',\n\t\t\ttexture = sky_texture,\n\t\t\tscale = 150,\n\t\t\tdouble_sided = True)\n\nclass Hand(Entity):\n\tdef __init__(self):\n\t\tsuper().__init__(\n\t\t\tparent = camera.ui,\n\t\t\tmodel = 'arm',\n\t\t\ttexture= arm_texture,\n\t\t\tscale = 0.2,\n\t\t\trotation = Vec3(150,-10,0),\n\t\t\tposition = Vec2(0.4,-0.6))\n\n\tdef animation(self):\n\t\tself.position = Vec2(0.3, -0.5)\n\n\tdef animation2(self):\n\t\tself.position = Vec2(0.4,-0.6)\n\n\n\nfor z in range(20):\n\tfor x in range(20):\n\t\tvoxel = Voxel(position = (x,0,z))\n\nsky = Sky()\nhand = Hand()\n\n\nplayer = FirstPersonController()\napp.run()","sub_path":"minicraft.py","file_name":"minicraft.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"96098430","text":"import sys\r\nimport os\r\nimport glob\r\nimport gui_cycle\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtCore import *\r\nfrom pytdx.reader import TdxDailyBarReader, TdxFileNotFoundException\r\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\r\n\r\nclass circle(QMainWindow, gui_cycle.Ui_MainWindow):\r\n def __init__(self):\r\n super(self.__class__, self).__init__()\r\n self.setupUi(self)\r\n self.sys_init()\r\n\r\n def sys_init(self):\r\n self.figure = plt.figure()\r\n self.canvas = FigureCanvas(self.figure)\r\n self.verticalLayout.addWidget(self.canvas)\r\n\r\n self.lineEdit.editingFinished.connect(self.editFinished)\r\n self.dateEdit_start.dateChanged.connect(self.dateChanged)\r\n self.dateEdit_end.dateChanged.connect(self.dateChanged)\r\n self.verticalSlider.valueChanged.connect(self.sliderChanged)\r\n self.spinBox.valueChanged.connect(self.spinChanged)\r\n self.checkBoxRev.toggled.connect(self.reverseChanged)\r\n self.checkBoxLog.toggled.connect(self.logChanged)\r\n\r\n paths = []\r\n with open(\"./_path\") as f:\r\n for line in f.readlines():\r\n if \"/\" in line:\r\n paths.append(line.strip('\\r\\n \\t'))\r\n\r\n # 搜索文件列表\r\n self.tickers = []\r\n for path in paths:\r\n if not path.endswith('/'):\r\n path += \"/\"\r\n for name in glob.glob(path + '*.day'):\r\n self.tickers.append(name)\r\n # 自动补全\r\n items_list = [os.path.splitext(os.path.basename(t))[0] for t in self.tickers]\r\n completer = QCompleter(items_list)\r\n completer.activated.connect(self.completerActivated)\r\n completer.setCaseSensitivity(Qt.CaseInsensitive)\r\n completer.setFilterMode(Qt.MatchContains)\r\n self.lineEdit.setCompleter(completer)\r\n\r\n self.file = \"./sh000000.day\"\r\n self.read_file()\r\n\r\n def read_file(self):\r\n # 读取数据\r\n # print (\"read_file, {}\".format(self.file))\r\n reader = TdxDailyBarReader()\r\n self.df = reader.get_df(self.file)\r\n # print (self.df.index)\r\n\r\n # 更新UI\r\n self.lineEdit.setText(os.path.splitext(os.path.basename(self.file))[0])\r\n # 初始化日期\r\n start = self.df.index[0]\r\n end = self.df.index[-1]\r\n self.dateEdit_start.setDate(start)\r\n self.dateEdit_start.setMinimumDate(start)\r\n self.dateEdit_start.setMaximumDate(end)\r\n self.dateEdit_end.setDate(end)\r\n self.dateEdit_end.setMinimumDate(start)\r\n self.dateEdit_end.setMaximumDate(end)\r\n # print (\"read_file end, {}, {}\".format(start, end))\r\n self.read_ticker(start, end)\r\n\r\n def read_ticker(self, start, end):\r\n # 读取指定日期的收盘数据\r\n # close = self.df['close']\r\n close = self.df.loc[start:end, 'close'] # 取出start至end之间的close\r\n self.c = np.array(close)\r\n self.lenc = len(self.c)\r\n\r\n # 图形显示用相关处理\r\n minmax = self.c.min()+self.c.max()\r\n self.ticks = [self.c.min(), minmax * 0.5, self.c.max()]\r\n self.tickslabel = [self.c.min(), '', self.c.max()]\r\n\r\n self.spinBox.setValue(360)\r\n self.checkBoxRev.setChecked(False)\r\n self.checkBoxLog.setChecked(False)\r\n\r\n self.theta = 1\r\n self.direction = 1\r\n self.log = False\r\n self.canvasrefresh()\r\n\r\n def canvasrefresh(self):\r\n # 计算周期数, 即 self.c 绕一圈的K线个数\r\n self.label.setText(\"Cycle:{:0.1f}\".format(self.lenc/self.theta))\r\n\r\n # 画图\r\n self.figure.clear()\r\n ax = self.figure.add_subplot(111, projection='polar')\r\n ax.set_yticks(self.ticks, minor=False)\r\n ax.set_yticklabels(self.tickslabel, minor=False)\r\n ax.set_theta_direction(self.direction)\r\n ax.grid('tight', ls='--')\r\n r = np.linspace(0, self.theta * 2 * np.pi, self.lenc)\r\n\r\n # 彩色显示不同的圈\r\n for i in range(10):\r\n if (i > self.theta):\r\n break\r\n j = np.where((r >= i*2*np.pi) & (r < (i+1)*2*np.pi))\r\n # print (\"i={}, j={}\".format(i,j[0]))\r\n # ax.plot(r[j[0][0]: j[0][-1]], self.c[j[0][0]: j[0][-1]], alpha=0.5)\r\n if (self.log):\r\n ax.plot(r[j[0][0]: j[0][-1]], np.log(self.c[j[0][0]: j[0][-1]]), alpha=0.5)\r\n else:\r\n ax.plot(r[j[0][0]: j[0][-1]], self.c[j[0][0]: j[0][-1]], alpha=0.5)\r\n\r\n # 显示\r\n # ax.plot(r, self.c, alpha=0.5)\r\n self.canvas.draw()\r\n\r\n def tickerChoosed(self):\r\n file_back = self.file\r\n try:\r\n name = self.lineEdit.text()\r\n for t in self.tickers:\r\n if name == os.path.splitext(os.path.basename(t))[0]:\r\n self.file = t\r\n break\r\n except:\r\n self.file = file_back\r\n finally:\r\n self.read_file()\r\n\r\n def completerActivated(self):\r\n self.tickerChoosed()\r\n\r\n def editFinished(self):\r\n self.tickerChoosed()\r\n\r\n def dateChanged(self):\r\n start = self.dateEdit_start.date().toString(\"yyyy-MM-dd\")\r\n end = self.dateEdit_end.date().toString(\"yyyy-MM-dd\")\r\n self.read_ticker(start, end)\r\n\r\n def sliderChanged(self):\r\n value = self.verticalSlider.value()\r\n self.spinBox.setValue(value)\r\n self.theta = value / 360\r\n self.canvasrefresh()\r\n\r\n def spinChanged(self):\r\n value = self.spinBox.value()\r\n if (value != self.verticalSlider.value()):\r\n self.verticalSlider.setValue(value)\r\n\r\n def wheelEvent(self, event):\r\n numDegrees = event.angleDelta().y()\r\n value = self.verticalSlider.value()\r\n if numDegrees > 0:\r\n value += 10*self.direction\r\n else:\r\n value -= 10*self.direction\r\n self.verticalSlider.setValue(value)\r\n\r\n def reverseChanged(self):\r\n if self.direction == 1:\r\n self.direction = -1\r\n self.verticalSlider.setInvertedAppearance(True)\r\n self.verticalSlider.setInvertedControls(True)\r\n else:\r\n self.direction = 1\r\n self.verticalSlider.setInvertedAppearance(False)\r\n self.verticalSlider.setInvertedControls(False)\r\n self.canvasrefresh()\r\n\r\n def logChanged(self):\r\n self.log = self.checkBoxLog.checkState()\r\n self.canvasrefresh()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n gui_action = circle()\r\n gui_action.show()\r\n sys.exit(app.exec_())","sub_path":"cycle.py","file_name":"cycle.py","file_ext":"py","file_size_in_byte":6704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"57302996","text":"import sys\nimport torch\n\ndef solve_quadratic(a, b, c):\n \"\"\"\n API to calculate the roots of quadratic equation ax^2 + bx + c = 0\n Returns the value of roots or None if no roots exist\n \"\"\"\n discr = b * b - 4 * a * c\n x = torch.empty((discr.shape[0],2),dtype=torch.float32).fill_(float(\"Inf\"))\n\n mask = torch.eq(discr, 0)\n if mask.any():\n x[mask, 0] = - 0.5 * b[mask] / a[mask]\n x[mask, 1] = - 0.5 * b[mask] / a[mask]\n \n mask = torch.gt(discr, 0)\n if mask.any():\n x[mask, 0] = 0.5 * a[mask] * (-b[mask] + torch.sqrt(discr[mask]))\n x[mask, 1] = 0.5 * a[mask] * (-b[mask] - torch.sqrt(discr[mask]))\n\n # Negative value represents that the intersection point is behind the ray origin\n # Since we dont render objects behind the camera, we can set negative values to infinity\n x[torch.lt(x, 0)] = float(\"Inf\")\n\n return x\n\n\ndef ray_sphere_intersect(ray_origin, ray_dir, sphere_center, sphere_radius):\n L = ray_origin - sphere_center\n a = torch.sum(torch.mul(ray_dir, ray_dir),dim=1)\n b = 2 * torch.sum(torch.mul(L, ray_dir),dim=1)\n c = torch.sum(torch.mul(L,L), dim=1) - (sphere_radius * sphere_radius)\n\n intersect_dist = solve_quadratic(a, b, c)\n\n return intersect_dist\n\n\ndef ray_cube_intersect(ray_origin, ray_dir, min_bound, max_bound):\n t_min_bound = (min_bound - ray_origin) / ray_dir\n t_max_bound = (max_bound - ray_origin) / ray_dir\n\n t_min_bound[torch.isinf(t_min_bound)] = float(\"Inf\")\n t_max_bound[torch.isinf(t_max_bound)] = float(\"Inf\")\n\n tmin = torch.min(t_min_bound, t_max_bound)\n tmin[torch.isinf(tmin)] = -float(\"Inf\")\n\n t_min_bound[torch.isinf(t_min_bound)] = -float(\"Inf\")\n t_max_bound[torch.isinf(t_max_bound)] = -float(\"Inf\")\n\n tmax = torch.max(t_min_bound, t_max_bound)\n tmax[torch.isinf(tmax)] = float(\"Inf\")\n\n intersect_dist = torch.empty((ray_origin.shape[0],2),dtype=torch.float32).fill_(float(\"Inf\"))\n intersect_dist[:,0] = torch.max(tmin, dim=1).values\n intersect_dist[:,1] = torch.min(tmax, dim=1).values\n\n mask = torch.le(tmin, intersect_dist[:,1][:,None])\n mask = mask[:,0] & mask[:,1] & mask[:,2]\n intersect_dist[~mask] = float(\"Inf\")\n # Negative value represents that the intersection point is behind the ray origin\n # Since we dont render objects behind the camera, we can set negative values to infinity\n intersect_dist[torch.lt(intersect_dist, 0)] = float(\"Inf\")\n \n return intersect_dist\n\n\ndef ray_vol_intersect(ray_origin, ray_dir, vol_params=None):\n \"\"\"\n Checks whether a ray intersects with the bounding volume or not and\n returns the intersection distances\n\n Params : \n ray_origin -> 3D coordinates of ray origins (shape: [n_rays, 3])\n ray_dir -> Unit length direction vectors corresponding to each ray (shape: [n_rays, 3])\n vol_params -> Tuple defining bounding volume (first entry in tuple tells the type: sphere/cube)\n In case of sphere, vol_params -> (\"sphere\", center, radius)\n In case of cube, vol_params -> (\"cube\", min_bound, max_bound)\n\n Returns : intersection distances (shape: [n_rays, 2]).\n Each row contains intersection distance t0 and t1 corresponding to each ray\n t0 and t1 are the distance of the intersection points from the ray origin.\n t0 and t1 are None if ray does not intersect with the sphere\n If ray intersects at only one point, t0 and t1 will be same.\n t0 and t1 can be positive and negative. Negative value means that the intersection \n point is in direction opposite to ray direction.\n\n Intersection point can be calculated as = ray_origin + dist * ray_dir\n \"\"\"\n if vol_params is None:\n # If information about bounding volume is not provided, assume a unit sphere centered at origin\n vol_type = \"sphere\"\n origin = torch.Tensor([0.0, 0.0, 0.0])\n radius = 1.0\n vol_params = (vol_type, origin, radius)\n \n vol_type = vol_params[0]\n\n if vol_type == \"sphere\":\n return ray_sphere_intersect(ray_origin, ray_dir, vol_params[1], vol_params[2])\n elif vol_type == \"cube\":\n return ray_cube_intersect(ray_origin, ray_dir, vol_params[1], vol_params[2])\n else:\n sys.exit('Unknown bounding volume type. Please check bounding_volume.py')\n \n\n","sub_path":"src/data/bounding_volume.py","file_name":"bounding_volume.py","file_ext":"py","file_size_in_byte":4266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"545571628","text":"import os\nimport random\n\nfrom PIL import Image, ImageFilter\n\nfrom computer_text_generator import ComputerTextGenerator\ntry:\n from handwritten_text_generator import HandwrittenTextGenerator\nexcept ImportError as e:\n print('Missing modules for handwritten text generation.')\nfrom background_generator import BackgroundGenerator\nfrom distorsion_generator import DistorsionGenerator\nimport numpy as np\n\nclass FakeTextDataGenerator(object):\n @classmethod\n def generate_from_tuple(cls, t):\n \"\"\"\n Same as generate, but takes all parameters as one tuple\n \"\"\"\n\n cls.generate(*t)\n\n @classmethod\n def generate(cls, index, text, fonts, out_dir, height, extension, skewing_angle, random_skew, blur, random_blur, background_type, distorsion_type, distorsion_orientation, is_handwritten, name_format, width, alignment, text_color):\n ##########################\n # Create picture of text #\n ##########################\n images = ComputerTextGenerator.generate(text, fonts, text_color, height, width)\n\n #############################\n # Generate background image #\n #############################\n background_width = sum([ im.size[1] for im in images ])\n background = Image.fromarray(np.ones((height, background_width, 3), dtype='uint8') * 255, \"RGB\")\n\n print('# of images: {}'.format(len(images)))\n acc_width = np.random.randint(2, 13) # offset\n for idx, image in enumerate(images):\n random_angle = random.randint(0-skewing_angle, skewing_angle)\n rotated_img = image.rotate(skewing_angle if not random_skew else random_angle, expand=1)\n\n #############################\n # Apply distorsion to image #\n #############################\n if distorsion_type == 0:\n distorted_img = rotated_img # Mind = blown\n elif distorsion_type == 1:\n distorted_img = DistorsionGenerator.sin(\n rotated_img,\n vertical=(distorsion_orientation == 0 or distorsion_orientation == 2),\n horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)\n )\n elif distorsion_type == 2:\n distorted_img = DistorsionGenerator.cos(\n rotated_img,\n vertical=(distorsion_orientation == 0 or distorsion_orientation == 2),\n horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)\n )\n else:\n distorted_img = DistorsionGenerator.random(\n rotated_img,\n vertical=(distorsion_orientation == 0 or distorsion_orientation == 2),\n horizontal=(distorsion_orientation == 1 or distorsion_orientation == 2)\n )\n\n ##################################\n # Resize image to desired format #\n ##################################\n new_width = int(float(distorted_img.size[0] + 10) * (float(height) / float(distorted_img.size[1] + 10)))\n resized_img = distorted_img.resize((new_width, height - 10), Image.ANTIALIAS)\n\n\n #############################\n # Place text with alignment #\n #############################\n new_text_width, _ = resized_img.size\n background.paste(resized_img, (int(acc_width), np.random.randint(2, 10)))\n acc_width += new_text_width\n \n background = BackgroundGenerator.applyMyBackground(height, background_width, np.array(background))\n\n ##################################\n # Apply gaussian blur #\n ##################################\n\n final_image = background.filter(\n ImageFilter.GaussianBlur(\n radius=(blur if not random_blur else random.randint(0, blur))\n )\n )\n\n #####################################\n # Generate name for resulting image #\n #####################################\n if name_format == 0:\n image_name = '{}_{}.{}'.format(text, str(index), extension)\n elif name_format == 1:\n image_name = '{}_{}.{}'.format(str(index), text, extension)\n elif name_format == 2:\n image_name = '{}.{}'.format(str(index),extension)\n else:\n print('{} is not a valid name format. Using default.'.format(name_format))\n image_name = '{}_{}.{}'.format(text, str(index), extension)\n\n # Save the image\n final_image.convert('RGB').save(os.path.join(out_dir, image_name))\n","sub_path":"TextRecognitionDataGenerator/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":4646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"84621563","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib\n\nfrom temperations import *\nfrom hearing import Pair\n\nfig = plt.figure()\n\nax1 = fig.add_subplot(111)\n\nGRAY = [.3, .3, .3]\nLIGHT_GRAY = [.7, .7, .7]\nBROWN = [.99, .3, 0]\nFLAT_TEMPERATION_COLOR = [.2, .5, 0]\nHARMONIC_TEMPERAION_COLOR = [0, .4, 1]\n\nharmonic_major_temperation = map(float, HARMONIC_INTERVALS)\nharmonic_temperation = map(float, CHROMATIC_TEMPERATION_BY_TONAL[DO])\n\n\n# Все интервалы тональных темпераций\nax1.eventplot(\n [CHROMATIC_INTERVALS],\n colors=[LIGHT_GRAY], lineoffsets=[6], linelengths=12,\n orientation='vertical'\n)\n\n# Равномерно и гармонично-темперированные вертикльные линии\nax1.eventplot(\n [FLAT_TEMPERATION, harmonic_major_temperation],\n colors=[FLAT_TEMPERATION_COLOR, HARMONIC_TEMPERAION_COLOR],\n lineoffsets=[6, 6],\n linelengths=12,\n linestyles=['-', '--'],\n linewidths=[1.5, 2],\n orientation='vertical'\n)\n\nfor j in MAJOR_GAMMA:\n ax1.axvline(j, color=GRAY, linestyle=':')\n\n\nax1.eventplot(\n [\n temperation\n for tonal, temperation in enumerate(CHROMATIC_TEMPERATION_BY_TONAL)\n ],\n colors=[LIGHT_GRAY],\n lineoffsets=range(len(CHROMATIC_TEMPERATION_BY_TONAL)),\n linelengths=.4,\n linewidths=3,\n orientation='vertical'\n)\n\nax1.eventplot(\n [\n [\n interval for nota, interval in enumerate(temperation)\n if nota in MAJOR_GAMMA\n ]\n for temperation in CHROMATIC_TEMPERATION_BY_TONAL\n ],\n colors=[\n BROWN if nota in MAJOR_GAMMA else LIGHT_GRAY\n for nota in NOTES13\n ],\n lineoffsets=NOTES13,\n linelengths=.4,\n linewidths=3,\n orientation='vertical'\n)\n\n\nax1.set_ybound(.95, 2.05)\nax1.set_yticks(harmonic_major_temperation)\nax1.set_yticklabels([INTERVALS[nota] for nota in MAJOR_GAMMA])\n\nax1.set_xbound(-.5, 12.5)\nax1.set_xticks(range(len(NOTES13)))\nax1.set_xticklabels([\n name if nota in MAJOR_GAMMA else ''\n for nota, name in enumerate(NOTES_NAMES)\n])\n\n\nmatplotlib.rcParams['font.size'] = 8.0\n\nplt.show()\n","sub_path":"plotv_temperations.py","file_name":"plotv_temperations.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"377103361","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 29 21:15:24 2018\n\n@author: rstyczynski\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#\n# HELPER FUNCTIONS\n#\ndef changesPerSecond(dataset, column):\n if not 'timestamp_dt' in dataset:\n dataset['timestamp_dt'] = dataset['timestamp'].diff()\n\n columnDelta=column + '_dv'\n dataset[columnDelta] = dataset[column].diff()\n \n columnDelta=column + '_dvdt'\n dataset[columnDelta] = dataset[column + '_dv'] / dataset['timestamp_dt']\n\n#\n# \n#\ndf = pd.read_csv('/Users/rstyczynski/Documents/IKEA/11.Test/TESTS/TEST2505#5/ppseelm-lx41085/tmp/umc/TEST2505#5/2018-05-25/2018-05-25-140250_vmstat.log')\ndf['datetime'] = pd.to_datetime(df['datetime'])\ndf.index = df['datetime']\n\ndf2_ = pd.read_csv('/Users/rstyczynski/Documents/IKEA/11.Test/TESTS/TEST2505#5/ppseelm-lx41085/tmp/umc/TEST2505#5/2018-05-25/2018-05-25-140250_ifconfig.log') \ndf2 = df2_.loc[df2_['device'] == 'eth0']\ndf2['datetime'] = pd.to_datetime(df2['datetime'])\ndf2.index = df2['datetime']\n \nfig1, axes1 = plt.subplots(3,1, sharex=True)\nfor ax in axes1:\n ax.xaxis.grid(True, which='minor', linestyle='-', linewidth=0.25)\n\n \ncolumn1 = ' Interrupts'\ncnt=df[column1].count()\n\ndf[column1 + '_mean'] = df[column1].rolling(cnt/10).mean()\ndf[column1 + '_mean'].plot(ax=axes1[0], style='g-', grid=True)\n\ncolumn2 = 'ContextSwitches'\ncnt=df[column2].count()\ndf[column2 + '_mean'] = df[column2].rolling(cnt/10).mean()\n\ndf[column2 + '_mean'].plot(ax=axes1[1], style='b-', grid=True)\n\ndf[column1 + '_corr_' + column2] = df[column1].rolling(window=cnt/10).corr(other=df[column2]).rolling(cnt/10).mean()\ndf[column1 + '_corr_' + column2].plot(ax=axes1[2], style='r-', grid=True)\n\n#\n# \n#\ncolumn3=' RXbytes'\nchangesPerSecond(df2, column3)\ncnt=df2[column3].count()\n\nfig2, axes2 = plt.subplots(3, 1, sharex=True)\nfor ax in axes2:\n ax.xaxis.grid(True, which='minor', linestyle='-', linewidth=0.25)\n\ncorr = pd.DataFrame()\ncorr[column1] = df[column1].resample('5S').mean().ffill().rolling(cnt/10).mean()\ncorr[column3] = df2[column3 + '_dvdt'].resample('5S').mean().ffill().rolling(cnt/10).mean()\ncorr[column1 + '_corr_' + column3] = corr[column1].rolling(window=cnt/10).corr(other=corr[column3]).rolling(cnt/10).mean()\n\ncorr[column1].plot(ax=axes2[0], style='g-', grid=True)\ncorr[column3].plot(ax=axes2[1], style='b-', grid=True)\ncorr[column1 + '_corr_' + column3].plot(ax=axes2[2], style='r-', grid=True)\n\n\n","sub_path":"varia/plot/correlate.py","file_name":"correlate.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"559796079","text":"import numpy as np\nfrom numba import njit, prange\n\n\ndef normalize_rows(v):\n return v / (1e-30 + np.linalg.norm(v, axis=1)[:, np.newaxis])\n\n\ndef train(matrix, n_clusters, eps=1e-9, max_iter=100):\n \"\"\"\n trains a k-means model\n Args:\n matrix: scipy.sparse.coo_matrix of shape (n,d). Each row represents a data point.\n n_clusters: The desired number of clusters.\n Threshold for convergence.\n max_iter: Maximum number of iterations.\n Returns:\n i: number of iterations required for convergence\n objective: value of the objective function after the last iteration\n labels: an array of size n containing the label assigned to each data point.\n centroids: an array of shape (n_clusters,d) containing the centroid of each cluster\n \"\"\"\n n, d = matrix.shape\n\n row = matrix.row\n col = matrix.col\n data = matrix.data.astype(float)\n\n # Initialize the centroids with a random subset of data points\n indices = np.arange(n)\n np.random.shuffle(indices)\n subset = indices[0:n_clusters]\n initial_centroids = np.empty((n_clusters, d))\n initialize(initial_centroids, subset, row, col, data)\n initial_centroids = normalize_rows(initial_centroids)\n\n return k_means_sparse(row, col, data, n, d, initial_centroids, n_clusters, eps, max_iter)\n\n\n@njit\ndef initialize(initial_centroids, subset, row, col, data):\n for i in range(data.size):\n for j, r in enumerate(subset):\n if row[i] == r:\n initial_centroids[j, col[i]] = data[i]\n break\n\n\n@njit(parallel=True)\ndef k_means_sparse(row, col, data, n, d, initial_centroids, n_clusters, eps, max_iter):\n nnz = data.size\n\n # Normalize the data matrix\n row_norm_sq = np.zeros((n))\n for i in range(nnz):\n row_norm_sq[row[i]] += data[i] ** 2\n for i in range(nnz):\n data[i] /= 1e-30 + np.sqrt(row_norm_sq[row[i]])\n\n # Initialize main variables\n centroids = initial_centroids\n labels = np.zeros((n), dtype=np.int32)\n objective = -1\n\n # Initialize auxiliary variables\n product = np.zeros((n, n_clusters))\n label_freq = np.zeros((n_clusters), dtype=np.int32)\n\n iter = 0\n while True:\n iter += 1\n old_objective = objective\n\n # Compute new labels\n product[:] = 0\n for c in prange(n_clusters):\n for i in range(nnz):\n product[row[i], c] += data[i] * centroids[c, col[i]]\n for i in range(n):\n labels[i] = np.argmax(product[i])\n\n # Compute new centroids\n centroids[:] = 0\n label_freq[:] = 0\n\n for i in range(nnz):\n label_freq[labels[row[i]]] += 1\n centroids[labels[row[i]], col[i]] += data[i]\n for c in range(n_clusters):\n if label_freq[c] > 0:\n for j in range(d):\n centroids[c, j] /= label_freq[c]\n\n # Normalize centroids\n for c in range(n_clusters):\n centroids[c] /= 1e-30 + np.linalg.norm(centroids[c])\n\n # Compute cosine similarity\n sum = 0\n for i in range(nnz):\n sum += data[i] * centroids[labels[row[i]], col[i]]\n objective = sum / (n * d)\n\n if np.abs(objective - old_objective) < eps or iter >= max_iter:\n break\n\n return objective, centroids, labels\n","sub_path":"k_means.py","file_name":"k_means.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"530332084","text":"from pwn import *\n\n\narch = \"i486\"\n\n\ndef run(level, payload):\n write(\"/tmp/docgil\", payload)\n print(\"payload: \")\n print(read(\"/tmp/docgil\"))\n io = process(\"cat /tmp/docgil | /opt/phoenix/\" + arch + \"/\"+level, shell=True)\n return io.recvall()\n\n\ndef run_with_arg(level, arg):\n write(\"/tmp/docgil\", arg)\n print(\"payload: \")\n print(read(\"/tmp/docgil\"))\n io = process(\"/opt/phoenix/\" + arch + \"/\"+level+\" \\\"`cat /tmp/docgil`\\\"\", shell=True)\n return io.recvall()\n\n\ndef run_with_args(level, args):\n index = 0\n args_string = \"\"\n for arg in args:\n filename = \"/tmp/docgil\" + str(index)\n write(filename, arg)\n print(\"payload\"+str(index)+\": \")\n print(read(filename))\n args_string += \" \\\"`cat \" + filename + \"`\\\"\"\n index += 1\n\n io = process(\"/opt/phoenix/\" + arch + \"/\" + level + args_string, shell=True)\n return io.recvall()\n\n\nwinner_function_adr = pack(0x0804889a, 32)\nputs_got_adr = pack(0x804c140, 32)\narg1 = b\"\"\n\n# name\narg1 += 8 * b\"A\"\n# meta data of next chunk\narg1 += 8 * b\"B\"\n# prio next chunk\narg1 += 4 * b\"C\"\narg1 += puts_got_adr\n\narg2 = b\"\"\narg2 += winner_function_adr\nr = run_with_args(\"heap-one\", [arg1, arg2])\nprint(r)","sub_path":"heap1.py","file_name":"heap1.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"411541084","text":"import os, sys\nsys.path.append('/rpicluster/config')\nfrom functions import *\n\n\nf = open(\"/rpicluster/network-manager/configured\",\"r\")\n\nmachines = get_nodes()\nnetwork = int(f.read(1))\nstream = os.popen(\"ip addr\", 'r')\nip_output = stream.read()\ninternet_ip = \"\"\naccess_point = \"\"\ninternet_name = \"\"\nconnection_name = \"\"\n\n\n\nif(network != 0):\n\n if(network == 1):\n internet_ip = str(get_ip(ip_output, \"wlan1\"))\n internet_name = \"wlan1\"\n access_point = str(get_ip(ip_output, \"wlan0\"))\n connection_name = \"Access Point\"\n\n elif(network == 2):\n internet_ip = str(get_ip(ip_output, \"wlan0\"))\n internet_name = \"wlan0\"\n access_point = str(get_ip(ip_output, \"eth0\"))\n connection_name = \"Switch\"\n\n # else if(network == 3):\n print(\"\\nCurrent network configuration: \" + network_type(network))\n print(\"\\nInternet on \" + internet_name + \"--> \" + internet_ip)\n if(internet_ip != \"None\"):\n print(\" |\")\n print(\" |\")\n print(\" --> \"+ connection_name + \"--> \" + access_point)\n for x in range(len(machines)):\n if ping_node(machines[x][1]) == 0:\n print(\" |\")\n print(\" --> \" + machines[x][0] + \" - \" + machines[x][1])\n else:\n print(\" |\")\n print(\" |\")\n print(\" --> \"+ connection_name + \"--> \" + access_point)\n for x in range(len(machines)):\n if ping_node(machines[x][1]) == 0:\n print(\" |\")\n print(\" --> \" + machines[x][0] + \" - \" + machines[x][1])\n\nelse:\n print(\"\\nNo rpicluster Network configured ! ! !\\n\")\n","sub_path":"stage2S/02-net-tweaks/files/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"270144994","text":"from enemy import *\nimport random\nfrom bullet_enemy import EnemyBullet\nfrom BehaviorTree import BehaviorTree, LeafNode, SelectorNode, SequenceNode\n\n\nimport state_StageMain\n\n\nclass IdleState:\n @staticmethod\n def enter(bee):\n pass\n\n @staticmethod\n def exit(bee):\n pass\n\n @staticmethod\n def do(bee):\n bee.flying_frame = (bee.flying_frame + FRAMES_PER_FLYING_ACTION * FLYING_ACTION_PER_TIME\n * framework.frame_time) % FRAMES_PER_FLYING_ACTION\n\n @staticmethod\n def draw(bee):\n bee.image.clip_draw(int(bee.flying_frame) * 17, 0, 17, 17, bee.x, bee.y, 50, 50)\n\n\nclass ExplodeState:\n @staticmethod\n def enter(bee):\n bee.hit_sound.play()\n bee.explode_timer = TIME_PER_EXPLODE_ACTION\n state_StageMain.enemies.remove(bee)\n\n @staticmethod\n def exit(bee):\n gameworld.remove_object(bee)\n\n @staticmethod\n def do(bee):\n bee.explode_timer -= framework.frame_time\n bee.explode_frame = (bee.explode_frame + FRAMES_PER_EXPLODE_ACTION * EXPLODE_ACTION_PER_TIME\n * framework.frame_time) % FRAMES_PER_EXPLODE_ACTION\n if bee.explode_timer < 0:\n bee.cur_state.exit(bee)\n\n @staticmethod\n def draw(bee):\n bee.explode_images[int(bee.explode_frame)].draw(bee.x, bee.y, 75, 75)\n\n\nclass AttackState:\n @staticmethod\n def enter(bee):\n bee.attacking = True\n bee.attack_sound.play()\n\n @staticmethod\n def exit(bee):\n bee.attacking = False\n\n @staticmethod\n def do(bee):\n bee.attack_bt.run()\n\n @staticmethod\n def draw(bee):\n bee.image.clip_composite_draw(int(bee.flying_frame) * 17, 0, 17, 17,\n bee.dir + math.radians(-90), 'h', bee.x, bee.y, 50, 50)\n\n\nclass Butterfly:\n image = None\n explode_images = None\n\n def __init__(self, coord_pos):\n if Butterfly.image is None:\n Butterfly.image = load_image('Image/butterfly_sprite_34x17.png')\n\n if Butterfly.explode_images is None:\n Butterfly.explode_images = [load_image('Image/enemy_explosion0_39.png'),\n load_image('Image/enemy_explosion1_39.png'),\n load_image('Image/enemy_explosion2_39.png'),\n load_image('Image/enemy_explosion3_39.png'),\n load_image('Image/enemy_explosion4_39.png')]\n\n self.speed = 0\n self.dir = 0\n self.x, self.y = coord_pos\n self.target_pos = []\n\n self.explode_timer = 0\n\n self.flying_frame = 0\n self.explode_frame = 0\n\n self.cur_state = IdleState\n self.cur_state.enter(self)\n\n self.hit_sound = load_wav('Sound/ButterflyDie.wav')\n self.hit_sound.set_volume(256)\n self.attack_sound = load_wav('Sound/Attack.wav')\n\n self.attacking = False\n self.attack_positions = []\n self.attack_order = 0\n self.attack_bt = None\n self.build_behavior_tree()\n\n def is_attack_state(self):\n if self.cur_state == AttackState:\n return True\n else:\n return False\n\n def calculate_current_position(self):\n self.flying_frame = (self.flying_frame + FRAMES_PER_FLYING_ACTION * FLYING_ACTION_PER_TIME\n * framework.frame_time) % FRAMES_PER_FLYING_ACTION\n self.x += self.speed * math.cos(self.dir) * framework.frame_time\n self.y += self.speed * math.sin(self.dir) * framework.frame_time\n\n def get_next_position(self):\n self.target_pos = self.attack_positions[self.attack_order % 2]\n self.attack_order += 1\n if self.attack_order == 3:\n self.x, self.y = self.attack_positions[1]\n self.attack_order = 0\n self.cur_state.exit(self)\n self.cur_state = IdleState\n self.cur_state.enter(self)\n return BehaviorTree.FAIL\n\n self.dir = math.atan2(self.target_pos[1] - self.y, self.target_pos[0] - self.x)\n return BehaviorTree.SUCCESS\n\n def move_to_target(self):\n self.speed = ATTACK_SPEED_PPS\n self.calculate_current_position()\n distance = (self.target_pos[0] - self.x) ** 2 + (self.target_pos[1] - self.y) ** 2\n if distance < 10 ** 2:\n return BehaviorTree.SUCCESS\n else:\n return BehaviorTree.RUNNING\n\n def set_attack_position(self):\n self.attack_positions = [(self.target_pos[0], self.target_pos[1]), (self.x, self.y)]\n\n def attack(self, starship_pos):\n self.target_pos = starship_pos\n self.set_attack_position()\n\n self.cur_state.exit(self)\n self.cur_state = AttackState\n self.cur_state.enter(self)\n\n def get_bb(self):\n return self.x - 15, self.y - 15, self.x + 15, self.y + 15\n\n def shoot(self):\n bullet = EnemyBullet(self.x, self.y - 25)\n gameworld.add_object(bullet, 1)\n state_StageMain.enemy_bullets.append(bullet)\n\n def hit(self):\n if random.randint(0, 1) == 0:\n self.shoot()\n\n self.cur_state.exit(self)\n self.cur_state = ExplodeState\n self.cur_state.enter(self)\n\n def update(self):\n self.cur_state.do(self)\n\n def draw(self):\n self.cur_state.draw(self)\n\n def build_behavior_tree(self):\n attack_node = SequenceNode('Attack')\n get_next_position_node = LeafNode('Get Next Position', self.get_next_position)\n move_to_target_node = LeafNode('Move To Target', self.move_to_target)\n attack_node.add_children(get_next_position_node, move_to_target_node)\n self.attack_bt = BehaviorTree(attack_node)\n\n","sub_path":"Galaga/butterfly.py","file_name":"butterfly.py","file_ext":"py","file_size_in_byte":5729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"281909492","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/tbielawa/rhat/release-engine/re-client/src/reclient/utils.py\n# Compiled at: 2015-01-27 15:32:52\nimport os\nfrom subprocess import call\nimport tempfile, json, yaml, logging\nfrom reclient.colorize import colorize\nfrom prettytable import PrettyTable\nout = logging.getLogger('reclient')\n\ndef cooked_input(msg=''):\n \"\"\"We need this to test user prompt\"\"\"\n return raw_input(msg)\n\n\ndef user_prompt_yes_no(prompt_str=''):\n \"\"\"Simple re-useable prompt for action confirmation. Adds [y/n]\n suffix automatically.\n\n Returns True if Yes, False if No\n \"\"\"\n ret = None\n while ret is None:\n ans = cooked_input(prompt_str + '[y/n]: ')\n if ans == 'y' or ans == 'Y':\n ret = True\n elif ans == 'n' or ans == 'N':\n ret = False\n else:\n continue\n\n return ret\n\n\ndef serialize(blob, format):\n \"\"\"\n Serializes a structure.\n \"\"\"\n if format == 'json':\n return json.dumps(blob, indent=4)\n return yaml.safe_dump(blob)\n\n\ndef deserialize(blob, format):\n \"\"\"\n Retutns a deserialized structure.\n \"\"\"\n if format == 'json':\n return json.loads(blob)\n return yaml.safe_load(blob)\n\n\ndef save_playbook(blob, dest, format):\n \"\"\"Save the temporary playbook, `source` at `path`\"\"\"\n with open(dest, 'w') as (_dest):\n try:\n del blob['id']\n except KeyError:\n pass\n\n if format == 'json':\n json.dump(blob, _dest, indent=4)\n else:\n yaml.safe_dump(blob, _dest)\n\n\ndef temp_blob(data, format):\n \"\"\"data is either a string or a hash. Function will 'do the right\nthing' either way\n\nformat is the format to write with.\n\"\"\"\n out.debug('tmp_blob received [%s]: %s' % (type(data), str(data)))\n if type(data) in [unicode, str]:\n data = json.loads(data)\n elif type(data) == dict or type(data) == list:\n pass\n else:\n raise ValueError(\"This isn't something I can work with\")\n tmpfile = tempfile.NamedTemporaryFile(mode='w', suffix='.%s' % format, prefix='reclient-')\n if format == 'json':\n json.dump(data, tmpfile, indent=4)\n else:\n yaml.safe_dump(data, tmpfile)\n tmpfile.flush()\n return tmpfile\n\n\ndef edit_playbook(blob, format):\n \"\"\"Edit the playbook object 'blob'.\n\nIf 'blob' is an unserialized string, then it is serialized and dumped\n(with indenting) out to a temporary file.\n\nIf 'blob' is a serialized hash is is dumped out (with indenting) to a\ntemporary file.\n\nIf 'blob' is a file object (like you would get from 'temp_blob')\nit is flush()'d.\n\n'format' is either json or yaml.\n\nOnce all that is complete, an editor is opened pointing at the path to\nthe temporary file. After the editor is closed the original (or\ninstantiated) file handle is returned.\"\"\"\n VISUAL = os.environ.get('VISUAL', None)\n if VISUAL is None:\n EDITOR = os.environ.get('EDITOR', 'emacs')\n else:\n EDITOR = VISUAL\n callcmd = [\n EDITOR]\n tmpfile = blob\n if isinstance(blob, tempfile._TemporaryFileWrapper):\n blob.flush()\n else:\n tmpfile = temp_blob(blob, format)\n try:\n out.debug('Editing with EDITOR=%s' % EDITOR)\n if EDITOR == 'emacs':\n callcmd.extend(['-nw', tmpfile.name])\n else:\n callcmd.append(tmpfile.name)\n out.debug('Going to launch editor with args: %s' % str(callcmd))\n call(callcmd)\n except OSError:\n out.debug(\"First call to EDITOR failed. Trying 'vi' explicitly\")\n try:\n fallback_call = ['vi', tmpfile.name]\n call(fallback_call)\n except OSError:\n out.debug(\"Second call to EDITOR failed. Trying 'vim' explicitly\")\n try:\n fallback_back_call = [\n 'vim', tmpfile.name]\n call(fallback_back_call)\n except OSError:\n out.info('Could not launch any editors. Tried: %s, vi, and vim' % EDITOR)\n return False\n\n return tmpfile\n\n\ndef less_file(path):\n call(['less', '-X', path])\n\n\ndef read_dynamic_args():\n \"\"\"Prompt the user for dynamic arguments\n\nAn empty key name ends the prompt\"\"\"\n dynamic_args = {}\n while True:\n argname = cooked_input(colorize('Argument name: ', color='yellow'))\n if argname == '':\n break\n else:\n argvalue = cooked_input(colorize('Argument value: ', color='yellow'))\n try:\n argvalue = int(argvalue)\n except ValueError:\n pass\n\n dynamic_args[argname] = argvalue\n\n return dynamic_args\n\n\ndef dynamic_args_table(dargs):\n \"\"\"Build a nice table of collected dynamic args\"\"\"\n t = PrettyTable(['Arg Name', 'Value'])\n t.header_style = 'upper'\n if dargs == {}:\n return ''\n for k, v in dargs.iteritems():\n t.add_row([k, v])\n\n return t","sub_path":"pycfiles/re-client-0.0.6-5.tar/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"40823278","text":"#Trout, written by John Fish in July 2013.\n#Updated July 3, 2015\nimport os, sys, time\n\ncurPath = os.getcwd()\n\ndef writeFile(fileName):\n article = open(curPath+'/input/rawarticles/'+fileName, 'r+')\n articleHTML = open(curPath+'/output/writing/'+fileName+'.html', 'w')\n os.chdir(curPath)\n headerOne = open('input/headerOne', 'r+')\n headerTwo = open('input/headerTwo', 'r+')\n footer = open('input/footer', 'r+')\n article_type = article.readline()\n title = article.readline()\n featuretext = article.readline()\n articleHTML.write(headerOne.read()+title+headerTwo.read()+article.read()+footer.read())\n \n\ndef resetAll():\n writeHeaderToArticlePage()\n os.chdir(\"input/rawarticles\")\n articles = []\n for files in os.listdir(\".\"):\n articles.append(files)\n articles.sort(key=lambda x: os.path.getctime(x))\n articles.reverse()\n for article in articles:\n writeFile(article)\n writeFilesToArticlePage(article)\n os.chdir(\"input/rawarticles\")\n writeFooterToArticlePage()\n\ndef writeHeaderToArticlePage():\n articlePage = open('output/writing/index.html', 'w')\n headerArticles = open('input/headerArticles', 'r+')\n articlePage.write(headerArticles.read())\n\ndef writeFilesToArticlePage(fileName):\n os.chdir(curPath)\n article_file = \"{0}/input/rawarticles/{1}\".format(curPath, fileName)\n articlePage = open('output/writing/index.html', 'a')\n article = open('input/rawarticles/'+fileName, 'r+')\n modified_time = time.ctime(os.path.getctime(article_file)).split()\n user_time = str(modified_time[0]+\" \"+modified_time[1]+\" \"+modified_time[2]+\", \"+modified_time[4])\n articlePage.write('
  • '+article.readline()+'
    '+article.readline()+'
    Created '+user_time+'

  • ')\n \ndef writeFooterToArticlePage():\n os.chdir(curPath)\n articlePage = open('output/writing/index.html', 'a')\n footerArticles = open('input/footerArticles', 'r+')\n articlePage.write(footerArticles.read())\n \nresetAll()\n","sub_path":"trout.py","file_name":"trout.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"74486345","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom flask import Flask, render_template\nfrom routes import resource_routes as r\nfrom routes import img_routes as ir\nfrom routes import system_routes as s\nfrom routes import feature_routes as fe\nfrom routes import imgClassifier_routes\n\n# 플라스크 객체 생성\napp = Flask(__name__)\n\n# 블루프린트 객체 등록\napp.register_blueprint(r.re_bp)\napp.register_blueprint(ir.is_bp)\napp.register_blueprint(s.sy_bp)\napp.register_blueprint(fe.fe_bp)\napp.register_blueprint(imgClassifier_routes.mushroom_bp)\n\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(3, 6, 5) # kernel=5, paddig=0. stride=1. 32-5+1=28\n self.pool = nn.MaxPool2d(2, 2) # 14\n self.conv2 = nn.Conv2d(6, 16, 5) # kernel=5, paddig=0. stride=1. 14-5+1=10 => max pooling 후 5X5\n self.fc1 = nn.Linear(16 * 5 * 5, 120)\n self.fc2 = nn.Linear(120, 20)\n self.fc3 = nn.Linear(20, 9)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = torch.flatten(x, 1) # 배치를 제외한 모든 차원을 평탄화(flatten)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n@app.route('/')\ndef root():\n return render_template('resourceForm.html')\n\nif __name__ == '__main__':\n app.run()\n\n\n","sub_path":"FinalProject_Project Mushroom/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"129578320","text":"import logging\nimport os\nimport sys\nfrom abc import ABCMeta, abstractmethod, abstractproperty\nfrom collections import defaultdict\nfrom importlib.util import find_spec\nfrom typing import Type\n\nfrom jsoncfg.value_mappers import require_string, require_array\nfrom peek_plugin_base.PluginCommonEntryHookABC import PluginCommonEntryHookABC\nfrom peek_plugin_base.PluginPackageFileConfig import PluginPackageFileConfig\nfrom peek_platform import PeekPlatformConfig\nfrom vortex.PayloadIO import PayloadIO\nfrom vortex.Tuple import removeTuplesForTupleNames, registeredTupleNames, \\\n tupleForTupleName\n\nlogger = logging.getLogger(__name__)\n\n\nclass PluginLoaderABC(metaclass=ABCMeta):\n _instance = None\n\n def __new__(cls, *args, **kwargs):\n assert cls._instance is None, \"PluginServerLoader is a singleton, don't construct it\"\n cls._instance = object.__new__(cls)\n return cls._instance\n\n def __init__(self):\n self._loadedPlugins = {}\n\n self._vortexEndpointInstancesByPluginName = defaultdict(list)\n self._vortexTupleNamesByPluginName = defaultdict(list)\n\n @abstractproperty\n def _entryHookFuncName(self) -> str:\n \"\"\" Entry Hook Func Name.\n Protected property\n :return: EG \"peekServerEntryHook\"\n\n \"\"\"\n\n @abstractproperty\n def _entryHookClassType(self):\n \"\"\" Entry Hook Class Type\n Protected property\n :return: EG PluginServerEntryHookABC\n\n \"\"\"\n\n @abstractproperty\n def _platformServiceNames(self) -> [str]:\n \"\"\" Platform Service Name\n Protected property\n :return: one or more of \"server\", \"worker\", \"agent\", \"client\", \"storage\"\n\n \"\"\"\n\n def loadPlugin(self, pluginName):\n try:\n self.unloadPlugin(pluginName)\n\n # Make note of the initial registrations for this plugin\n endpointInstancesBefore = set(PayloadIO().endpoints)\n tupleNamesBefore = set(registeredTupleNames())\n\n modSpec = find_spec(pluginName)\n if not modSpec:\n raise Exception(\"Can not load Peek App package %s\", pluginName)\n\n PluginPackage = modSpec.loader.load_module()\n pluginRootDir = os.path.dirname(PluginPackage.__file__)\n\n # Load up the plugin package info\n pluginPackageJson = PluginPackageFileConfig(pluginRootDir)\n pluginVersion = pluginPackageJson.config.plugin.version(require_string)\n pluginRequiresService = pluginPackageJson.config.requiresServices(require_array)\n\n # Make sure the service is required\n # Storage and Server are loaded at the same time, hence the intersection\n if not set(pluginRequiresService) & set(self._platformServiceNames):\n logger.debug(\"%s does not require %s, Skipping load\",\n pluginName, self._platformServiceNames)\n return\n\n # Get the entry hook class from the package\n entryHookGetter = getattr(PluginPackage, str(self._entryHookFuncName))\n EntryHookClass = entryHookGetter() if entryHookGetter else None\n\n if not EntryHookClass:\n logger.warning(\n \"Skipping load for %s, %s.%s is missing or returned None\",\n pluginName, pluginName, self._entryHookFuncName)\n return\n\n if not issubclass(EntryHookClass, self._entryHookClassType):\n raise Exception(\"%s load error, Excpected %s, received %s\"\n % (pluginName, self._entryHookClassType, EntryHookClass))\n\n ### Perform the loading of the plugin\n self._loadPluginThrows(pluginName, EntryHookClass, pluginRootDir)\n\n # Make sure the version we have recorded is correct\n PeekPlatformConfig.config.setPluginVersion(pluginName, pluginVersion)\n\n # Make note of the final registrations for this plugin\n self._vortexEndpointInstancesByPluginName[pluginName] = list(\n set(PayloadIO().endpoints) - endpointInstancesBefore)\n\n self._vortexTupleNamesByPluginName[pluginName] = list(\n set(registeredTupleNames()) - tupleNamesBefore)\n\n self.sanityCheckServerPlugin(pluginName)\n\n except Exception as e:\n logger.error(\"Failed to load plugin %s\", pluginName)\n logger.exception(e)\n\n @abstractmethod\n def _loadPluginThrows(self, pluginName: str, EntryHookClass: Type[PluginCommonEntryHookABC],\n pluginRootDir: str) -> None:\n \"\"\" Load Plugin (May throw Exception)\n\n This method is called to perform the load of the module.\n\n :param pluginName: The name of the Peek App, eg \"plugin_noop\"\n :param PluginPackage: A reference to the main plugin package, eg \"import plugin_noop\"\n this parameter would be plugin_noop.\n :param pluginRootDir: The directory of the plugin package,\n EG dirname(plugin_noop.__file__)\n\n \"\"\"\n\n def unloadPlugin(self, pluginName: str):\n oldLoadedPlugin = self._loadedPlugins.get(pluginName)\n\n if not oldLoadedPlugin:\n return\n\n # Remove the registered endpoints\n for endpoint in self._vortexEndpointInstancesByPluginName[pluginName]:\n PayloadIO().remove(endpoint)\n del self._vortexEndpointInstancesByPluginName[pluginName]\n\n # Remove the registered tuples\n removeTuplesForTupleNames(self._vortexTupleNamesByPluginName[pluginName])\n del self._vortexTupleNamesByPluginName[pluginName]\n\n self._unloadPluginPackage(pluginName, oldLoadedPlugin)\n\n def listPlugins(self):\n def pluginTest(name):\n if not name.startswith(\"plugin_\"):\n return False\n return os.path.isdir(os.path.join(self._pluginPath, name))\n\n plugins = os.listdir(self._pluginPath)\n plugins = list(filter(pluginTest, plugins))\n return plugins\n\n def loadAllPlugins(self):\n for pluginName in PeekPlatformConfig.config.pluginsEnabled:\n self.loadPlugin(pluginName)\n\n def unloadAllPlugins(self):\n while self._loadedPlugins:\n self.unloadPlugin(list(self._loadedPlugins.keys())[0])\n\n def _unloadPluginPackage(self, pluginName, oldLoadedPlugin):\n\n # Stop and remove the Plugin\n del self._loadedPlugins[pluginName]\n\n try:\n oldLoadedPlugin.stop()\n oldLoadedPlugin.unload()\n\n except Exception as e:\n logger.error(\"An exception occured while unloading plugin %s,\"\n \" unloading continues\" % pluginName)\n logger.exception(e)\n\n # Unload the packages\n loadedSubmodules = [modName\n for modName in list(sys.modules.keys())\n if modName.startswith('%s.' % pluginName)]\n\n for modName in loadedSubmodules:\n del sys.modules[modName]\n\n if pluginName in sys.modules:\n del sys.modules[pluginName]\n\n # pypy doesn't have getrefcount\n if hasattr(sys, \"getrefcount\") and sys.getrefcount(oldLoadedPlugin) > 2:\n logger.warning(\"Old references to %s still exist, count = %s\",\n pluginName, sys.getrefcount(oldLoadedPlugin))\n\n def sanityCheckServerPlugin(self, pluginName):\n ''' Sanity Check Plugin\n\n This method ensures that all the things registed for this plugin are\n prefixed by it's pluginName, EG plugin_noop\n '''\n\n # All endpoint filters must have the 'plugin' : 'plugin_name' in them\n for endpoint in self._vortexEndpointInstancesByPluginName[pluginName]:\n filt = endpoint.filt\n if 'plugin' not in filt and filt['plugin'] != pluginName:\n raise Exception(\"Payload endpoint does not contan 'plugin':'%s'\\n%s\"\n % (pluginName, filt))\n\n # all tuple names must start with their pluginName\n for tupleName in self._vortexTupleNamesByPluginName[pluginName]:\n TupleCls = tupleForTupleName(tupleName)\n if not tupleName.startswith(pluginName):\n raise Exception(\"Tuple name does not start with '%s', %s (%s)\"\n % (pluginName, tupleName, TupleCls.__name__))\n\n def notifyOfPluginVersionUpdate(self, pluginName, pluginVersion):\n logger.info(\"Received PLUGIN update for %s version %s\", pluginName, pluginVersion)\n return self.loadPlugin(pluginName)\n","sub_path":"peek_platform/plugin/PluginLoaderABC.py","file_name":"PluginLoaderABC.py","file_ext":"py","file_size_in_byte":8561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"267871598","text":"#\n##\n##########################################################################\n# #\n# gauth :: config_parser #\n# #\n# (c) 2018 Vamegh Hedayati #\n# #\n# Vamegh Hedayati #\n# #\n# Please see Copying for License Information #\n# GNU/LGPL #\n##########################################################################\n##\n#\nimport file_handler\nimport getpass\nimport os\nimport pwd\nimport re\nimport sys\n\n\nclass Parse(object):\n def __init__(self, options=None, parser=None):\n self.options = options\n self.parser = parser\n self.handle = file_handler.FileHandler()\n self.config_data = None\n\n def read_config(self):\n if self.options.config:\n try:\n config_data = self.handle.read_file(config_file=self.options.config)\n self.config_data = config_data\n except (IOError, ValueError) as err:\n print(\"\\nConfig File Issue: %s :: Error : %s\\n\" % (self.options.config, err))\n self.parser.print_help()\n sys.exit(1)\n\n def combine_config(self):\n try:\n color_map = self.config_data['color_map']\n if not os.path.isfile(color_map):\n current_paths = os.path.dirname(os.path.realpath(__file__)).split('/')\n current_paths.pop()\n current_path = \"/\".join(current_paths)\n color_map = os.path.join(current_path, color_map)\n color_data = self.handle.read_file(config_file=color_map)\n if color_data:\n self.config_data.update(color_data)\n except KeyError as err:\n print(\"color map not supplied :: Error: %s :: skipping\" % err)\n\n def scan_config(self):\n if self.options.debug:\n debug = self.options.debug\n if debug == 1:\n debug_name = 'critical'\n elif debug == 2:\n debug_name = 'error'\n elif debug == 3:\n debug_name = 'warning'\n elif debug == 4:\n debug_name = 'info'\n elif debug == 5:\n debug_name = 'debug'\n else:\n print(\"Invalid debug level set, using default\")\n debug_name = None\n if debug_name:\n self.config_data['logging_config']['log_level'] = debug_name\n\n ''' Add the user-id running this to config_data'''\n pam_user = os.getenv('PAM_USER')\n sys_user = getpass.getuser()\n check_is_uid = re.compile(r\"^\\d+\")\n if not pam_user:\n self.config_data['user_name'] = sys_user\n pam_user = sys_user\n\n if check_is_uid.match(pam_user):\n self.config_data['user_name'] = pwd.getpwuid(pam_user)[0]\n\n '''add all of the command options to the config data as well'''\n self.config_data['options'] = self.options\n\n def return_config(self):\n return self.config_data\n","sub_path":"build-tools/python/build_libs/config_parser.py","file_name":"config_parser.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"545058045","text":"import numpy as np\nimport math\nimport scipy\nfrom scipy.stats import beta\nimport pdb, subprocess, random\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport os.path, time\n\nfrom scipy import optimize as scipyopt\n\nimport json\n\nimport os, sys\nhome_dir = os.getenv(\"HOME\")\nsys.path.insert(0, home_dir + '/phd-code/codes/')\n\nimport HM, HMgradopt, ROprob, NIPC\nimport utilities as utils\n\nimport algtest\n\ndef main():\n\n\n\n\n\n\ttest_obj = lambda x,u: algtest.TP3Dopt(x,u,delta=1)\n\tlb, ub = [-5,-5,-5], [5,5,5]\n\tx0 = [4,4,4] # gauss\n\tx0 = [3,2,0] # beta\n\n\t# HM notar optimum:\n\tcandHM = [ 5.00000272e+00, 4.99991138e+00, -3.76392771e-15]\n\t# WS optimum:\n\t#candWS = [ 4.99972135, 5.00010615, 3.12436706]\n\n\tcandWS = [ 5.00027226, 4.99918449, 4.0619249 ]\n\t# min mean\n\tcandMU = [ 5.0, 7.0, 0.0]\n\n\n\tname = 'notar'\n\tif name == 'gauss':\n\t\tmu, std = 3, 1\n\t\tDMT = lambda x: utils.DM_target_g(x, shift = 3, std=1)\n\t\tHMT = lambda x: utils.target_g(x, shift = 3, std=1, bInverse=True)\n\tif name == 'beta':\n\t\tmu, std = 10, 2\n\t\tDMT = lambda x: utils.DM_target_b(x, shift=mu, std=std)\n\t\tHMT = lambda x: utils.target_b(x, shift=mu, std=std, bInverse=True)\n\tif name == 'uni':\n\t\tDMT = lambda x: utils.DM_target_u(x, shift = 5, std=1.5)\n\t\tHMT = lambda x: utils.target_u(x, shift = 1, std=1, bInverse=True)\n\tif name == 'notar':\n\t\tDMT = lambda x: utils.DM_target_g(x, shift = -5, std=0.01)\n\t\tHMT = lambda x: utils.target_g(x, shift = -5, std=0.01, bInverse=True)\n\tif name == 'ws':\n\t\tDMT = lambda x: utils.DM_target_g(x, shift = -5, std=0.01)\n\t\tHMT = lambda x: utils.target_g(x, shift = -5, std=0.01, bInverse=True)\n\n\t# Density matching and horsetail matching objects with the same target and setup\n\tOptObj = HMgradopt.HorsetailMatchingOpt(test_obj, ualdim=1, uepdim=0, lb=lb, ub=ub, OptType = 'HM',\n\t\t\t\t\t\t\t\t\tn_sample=1*10**4, n_quad=1*10**3, log_file = 'DM_log_opt.txt',\n\t\t\t\t\t\t\t\t\tDMT = DMT,\n\t\t\t\t\t\t\t\t\tT1inv = HMT,\n\t\t\t\t\t\t\t\t\tpoly_order = 3, bLog = False,\n\t\t\t\t\t\t\t\t\tp = 2, trap_low = -25, trap_high = 75 )\n\n\n\t# Evaluate the optimal designs for comparisons\n\tOptObj.evaluator(candHM)\n\tqhtHM = [OptObj.fplot, OptObj.qplot, OptObj.tplot]\n\n\tOptObj.evaluator(candWS)\n\tqhtWS = [OptObj.fplot, OptObj.qplot, OptObj.tplot]\n\n\tOptObj.evaluator(candMU)\n\tqhtMU = [OptObj.fplot, OptObj.qplot, OptObj.tplot]\n\n\t# Deal with directory structure\n\tsubprocess.call('cd ' + home_dir + '/phd-code/HorsetailMatching/', shell=True)\n\n\t# before plotting, make latex compatible\n\tutils.mpl2tex()\n\n\tfig = plt.figure()\n\t#for ii in range(len(hm_plotlog)):\n\t#\tplt.plot(hm_plotlog[ii][0],hm_plotlog[ii][1],colorstr[ii], dashes=[3,3])\n\n\tplt.plot(qhtHM[0],qhtHM[1],'b')\n\tplt.plot(qhtWS[0],qhtWS[1],'r')\n\tplt.plot(qhtMU[0],qhtMU[1],'g')\n\t#plt.legend(loc='lower right')\n\tplt.xlabel('Quantity of Interest')\n\tplt.ylabel('CDF')\n\t#plt.xlim([-5,30])\n\t#plt.ylim([0,0.50])\n\tplt.tight_layout()\n\tutils.savefig('CDFs', bSaveData=True, bSaveBase=True)\n\t#fig.set_size_inches(8, 6, forward=True)\n\n\n\t#fig.set_size_inches(8, 6, forward=True)\n\tplt.show()\n\n\n\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"HorsetailMatching/algtest_test_candidates.py","file_name":"algtest_test_candidates.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"99512861","text":"from __future__ import annotations\n\nimport os\nimport traceback\nfrom dataclasses import dataclass\nfrom typing import Union\n\nfrom goldfnd.lib.ses import SESMessageParam, SESRawMessageParam\nfrom goldfnd.models.Reply import Reply\nfrom goldfnd.models.User import User\n\n\n@dataclass\nclass MailerCacheStorage:\n sender: str\n prepared_message_param: SESMessageParam\n\n def __init__(self):\n self.sender = None\n self.prepared_message_param = None\n\n\nclass Mailer(object):\n\n def __init__(self, ses_client):\n self.ses_client = ses_client\n self.message_param: Union[None, Union[SESMessageParam, SESRawMessageParam]] = None\n self.__cache_storage = MailerCacheStorage()\n\n @staticmethod\n def to_anchor_tag(string):\n return f'{string}'\n\n def prepare_message(self, reply: Reply):\n subject = reply.mail['subject']\n content = reply.mail['content']\n name_placeholder = reply.mail['name_placeholder']\n new_content = []\n\n for text in content.split(os.linesep):\n if text.startswith('http'):\n text = self.to_anchor_tag(text)\n new_content.append(text)\n\n new_content = os.linesep.join(new_content)\n\n self.message_param = SESMessageParam(subject, new_content, name_placeholder)\n # self.message_param = SESRawMessageParam(subject, content, name_placeholder)\n self.__cache_storage.prepared_message_param = self.message_param # Caching\n return self\n\n def send(self, user: User, sender_=None):\n if not self.message_param:\n print('Message param for AWS SES is required first!')\n raise NotImplemented\n sender = sender_ or self.__cache_storage.sender\n if not sender:\n print('Sender is required')\n raise NotImplemented\n self.__cache_storage.sender = sender # Caching\n # self.message_param.set_source(sender).set_destination(user.mail).set_raw_message(user)\n self.message_param.set_sender(sender).set_destination(user)\n try:\n return self.ses_client.send_email(**self.message_param.to_dict())\n # return self.ses_client.send_raw_email(**self.message_param.to_dict())\n except Exception as e:\n traceback.print_exc()\n raise e\n\n @property\n def cached_prepared_message_param(self):\n return self.__cache_storage.prepared_message_param\n","sub_path":"goldfnd/services/mailer.py","file_name":"mailer.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"150449620","text":"\"\"\" _\n |_|_\n _ | |\n _|_|_|_|_\n|_|_|_|_|_|_\n |_|_|_|_|_|\n | | |_|\n |_|_\n |_|\n\nAuthor: Souham Biswas\nWebsite: https://www.linkedin.com/in/souham/\n\"\"\"\n\nimport os\nfrom queue import Queue\n\nimport cv2\nimport numpy as np\n\nMODE = 'train' # choose between train and val\nBATCH_SIZE = 2\nSHUFFLE = True\nPRINT_LOSS_EVERY_N_STEPS = 50\n\nFREEZE_BACKBONE = False\nFREEZE_DECODER = False\n\nIM_DIM = 512\nSHADOW_GT_DIR = 'scratchspace/white-and_yellow-solid-lane-markings'\nFINAL_MODEL_DIR = 'final_model'\nFINAL_MODEL_NAME = 'model-v0'\n\n# BIN_POS_CE_COEFF = 3.\nFOCAL_TVERSKY_POWER = 1.5\nFOCAL_TVERSKY_FALSE_NEGATIVE_COEFF = .6\nHARD_NEGATIVE_MINING_COEFF = 3. # deprecated while using focal tvsersky loss\n\n# SAVE_FREQUENCY = 800 // BATCH_SIZE\nBATCHES_PER_ASYNC_QUEUE = 50\n\nCONFIDENCE_THRESHOLD = .5\nUPDATE_BATCHNORM_STATS = True\nSTART_TRAIN_STEP = 0\nNUM_TRAIN_STEPS = 1000000\nBASE_LEARN_RATE = 1e-4\nLEARN_RATE_EXPONENTIAL_DECAY_POWER = .068\n\n# WEIGHT_DECAY = 1e-5\n\nMODEL_SAVE_DIR_ROOT = 'scratchspace/model_dir'\nMODEL_NAME_PREFIX = 'flonet'\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\nSAMPLE_IMAGES_DIR = 'scratchspace/sample_0/images'\n\n\ndef split_to_backprop_and_update(upsampler_params):\n upsampler_backprop_vars = [v for v in upsampler_params if 'moving' not in v.name]\n upsampler_moving_stats_vars = [v for v in upsampler_params if 'moving' in v.name]\n return upsampler_backprop_vars, upsampler_moving_stats_vars\n\n\ndef set_gpu_id(id):\n os.environ['CUDA_VISIBLE_DEVICES'] = str(id)\n\n\ndef get_center_crop_ends(dim, center_size):\n if dim != center_size:\n start_idx = int((dim - center_size) // 2)\n end_idx = int(start_idx + center_size)\n else:\n start_idx = 0\n end_idx = dim\n return start_idx, end_idx\n\n\ndef get_random_crop_ends(dim, center_size):\n if dim != center_size:\n start_idx = int(np.random.randint(dim - center_size))\n end_idx = int(start_idx + center_size)\n else:\n start_idx = 0\n end_idx = dim\n return start_idx, end_idx\n\n\ndef get_rescaled_dims(w, h, min_dim_sz):\n if h > w:\n scale_factor = 1. * h / w\n new_h = int(min_dim_sz * scale_factor)\n new_w = min_dim_sz\n else:\n scale_factor = 1. * w / h\n new_h = min_dim_sz\n new_w = int(min_dim_sz * scale_factor)\n return new_w, new_h\n\n\ndef overlay_mask(im_in, conf_mask_in, color=(255, 0, 0), alpha=.5, thresh=.5):\n if conf_mask_in.max() > 1.:\n conf_mask = conf_mask_in / 255.\n else:\n conf_mask = conf_mask_in\n im = im_in.copy()\n f = conf_mask > thresh\n p = np.array(color) * np.tile(np.expand_dims(conf_mask[f], 0), [3, 1]).T\n im[f] = alpha * p + (1 - alpha) * im[f]\n im = im.astype(np.uint8)\n return im\n\n\ndef resize_aspect_ratio_preserved(im, min_dim_sz=720, interp=cv2.INTER_NEAREST):\n h, w = im.shape[0], im.shape[1]\n new_w, new_h = get_rescaled_dims(w, h, min_dim_sz=min_dim_sz)\n im_ret = cv2.resize(im, (new_w, new_h), interpolation=interp)\n return im_ret\n\n\ndef unit_vector(vector):\n \"\"\" Returns the unit vector of the vector. \"\"\"\n return vector / np.tile([np.linalg.norm(vector, axis=1)], [2, 1]).T\n\n\ndef angle_between(v1, v2):\n \"\"\"\n Returns the angle in radians between vectors 'v1' and 'v2'\n \"\"\"\n v1_u = unit_vector(v1)\n v2_u = unit_vector(v2)\n v = np.sum(v2_u * v1_u, axis=1)\n return np.arccos(np.clip(v, -1.0, 1.0))\n\n\ndef auto_canny(image_, sigma=0.33):\n image = cv2.cvtColor(image_, cv2.COLOR_BGR2GRAY).astype(np.uint8)\n v = np.median(image)\n lower = int(max(0, (1.0 - sigma) * v))\n upper = int(min(255, (1.0 + sigma) * v))\n edged = cv2.Canny(image, lower, upper)\n return edged\n\n\ndef nn_preprocess(im_in_): # re-implemented what tensorflow was doing internally for NASnet.\n im_in = im_in_ - im_in_.min()\n im_in = (im_in / im_in.max()) * 255.\n im_edged = auto_canny(im_in) / 255.\n im_edged[im_edged < 1.] = -1.\n im = im_in / 255.\n im = im - .5\n im = im * 2.\n h, w, _ = im.shape\n im_ = np.zeros([h, w, 4], dtype=np.float)\n im_[:, :, :3] = im\n im_[:, :, -1] = im_edged\n return im_\n\n\ndef nn_unpreprocess(im_in):\n im = im_in / 2.\n im = im + .5\n im = (im * 255).astype(np.uint8)\n return im\n\n\ndef input_infer_preprocess(im_bgr_uint8, side=IM_DIM):\n h, w, _ = im_bgr_uint8.shape\n if min(h, w) != side:\n im = resize_aspect_ratio_preserved(im_bgr_uint8, side, interp=cv2.INTER_LINEAR)\n else:\n im = im_bgr_uint8\n im = nn_preprocess(im)\n im = np.expand_dims(im, 0)\n return im\n\n\ndef force_makedir(dir):\n if not os.path.isdir(dir):\n print('Making folder at -', dir)\n os.makedirs(dir)\n\n\ndef topk_idx(v, k):\n return np.argpartition(v, -k)[-k:]\n\n\ndef bottomk_idx(v, k):\n return np.argpartition(v, k)[:k]\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"63362235","text":"import logging\nimport socket\nimport sys\n\nimport flask\nfrom flask import Flask, request\nfrom flask_cors import CORS\nfrom pyspark.sql import SparkSession\n\nfrom model_platform.src.operationalization.anomaly.profile_anomaly.online_score import \\\n load_profile_anomaly_model, \\\n get_profile_anomaly_score\n\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\napp = Flask(__name__)\nCORS(app)\n\nspark = SparkSession.builder.appName('MAAS').getOrCreate()\nsc = spark.sparkContext\nsc.setLogLevel(\"ERROR\")\nmodel_dict = dict()\nmodel_dict[\"windowsos\"] = load_profile_anomaly_model(spark=spark, data_source=\"windowsos\")\nmodel_dict[\"wgtraffic\"] = load_profile_anomaly_model(spark=spark, data_source=\"wgtraffic\")\nmodel_dict[\"msexchange\"] = load_profile_anomaly_model(spark=spark, data_source=\"msexchange\")\n\n\n@app.route('/')\ndef heart_beat():\n return flask.jsonify({\"status\": \"ok\"})\n\n\n@app.route('/apply', methods=['GET'])\ndef calculate_profile_outlier_score():\n input_json = request.args\n input_dict = dict(input_json)\n app.logger.info(\"input request : {input_dict}\".format(input_dict=input_dict))\n data_source = input_dict[\"data_source\"][0]\n if \"src_ip\" in input_dict:\n app.logger.info(\"calculating ip profile anomaly score for {data_source}\".format(data_source=data_source))\n score = get_profile_anomaly_score(spark=spark, model_dict=model_dict[data_source],\n input_req=input_dict, entity_type=\"ip\")\n elif \"user_name\" in input_dict:\n app.logger.info(\"calculating user profile anomaly score for {data_source}\".format(data_source=data_source))\n score = get_profile_anomaly_score(spark=spark, model_dict=model_dict[data_source],\n input_req=input_dict, entity_type=\"user\")\n else:\n score = None\n\n app.logger.info(\"response : {score}\".format(score=score))\n return flask.jsonify(score)\n\n\nif __name__ == '__main__':\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind(('localhost', 0))\n port = sock.getsockname()[1]\n sock.close()\n with open(\"endpoint.dat\", \"w\") as text_file:\n text_file.write(\"{\\\"url\\\" : \\\"http://0.0.0.0:%d\\\"}\" % port)\n app.run(threaded=True, host=\"0.0.0.0\", port=port)\n","sub_path":"models/model_platform/deployment/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"513040474","text":"#!/usr/bin/env python3\n\"\"\"\nThis cli program is used to calculate credit, then compare it with requirement\n\nAuthor: Mephis Pheies\nEmail: mephistommm@gmail.com\n\"\"\"\n\nimport re\n\nfrom collections import namedtuple\n\nCOURSECATEGORY = namedtuple(\"COURSECATEGORY\", \n (\"name\", \"basis_key\", \"requirement\", \"valid_value\", \"items\"))\n\ndef get_datas(filename):\n \"\"\"\n get data from filename, then parse it\n \"\"\"\n result = []\n space_re = r\"\\b +\\b\"\n with open(filename, \"r\") as f_descriptor:\n for data in f_descriptor.readlines():\n # replace spaces which used as borders\n # so, you should not write spaces into the not empty item!\n data = re.sub(space_re, \"\\t\", data).strip()\n if len(data) == 0 or data.startswith(\"#\"):\n continue\n\n # strip each item in data list expect space item\n # filter \"\", they may be exist at the end of list\n data = list(\n filter(lambda x: x != \"\",\n map(lambda x: x.strip() if x != \" \" else x,\n data.split(\"\\t\"))))\n # data should contain 13 items, the items of left end will not lack\n # so we should add additional space items to right end\n if len(data) < 13:\n spaces = [\" \" for i in range(13)]\n spaces[:len(data)] = data[:]\n data = spaces\n\n assert len(data) == 13\n result.append({\n \"course_code\": data[2],\n \"course_name\": data[3],\n \"course_property\": data[4],\n \"course_from\": data[10],\n \"credit\": float(data[6])})\n\n return result\n\ndef get_course_categories(filename):\n \"\"\"\n get the categories of courses, hence we could classify the courses\n \"\"\"\n result = []\n current_category = None\n type_line_re = r\"Type\\b.*\"\n with open(filename, \"r\") as f_descriptor:\n for data in f_descriptor.readlines():\n data = data.strip()\n if len(data) == 0 or data.startswith(\"#\"):\n continue\n\n if re.match(type_line_re, data, re.I):\n if current_category is not None:\n result.append(current_category)\n\n type_list = list(\n filter(lambda x: x != \"\",\n map(lambda x: x.strip(), \n data.split(\" \"))))\n\n if len(type_list) < 4:\n raise ValueError(\"invild line:\\n\\t'{}'\".format(data))\n\n current_category = COURSECATEGORY(\n type_list[1], type_list[2], float(type_list[3]), [], [])\n continue\n\n current_category.valid_value.append(data)\n\n result.append(current_category)\n return result\n\ndef check_data_in_category(data, category):\n \"\"\"\n check data in category\n use re.match compare all regExg in category.valid_value with data[category.basis_key]\n \"\"\"\n key = category.basis_key\n result = False\n for valid_re in category.valid_value:\n if re.match(valid_re, data[key], re.I):\n result = True\n break\n\n return result\n\ndef sum_of_credit(datas):\n \"\"\"\n calculate the sum of credit of datas, \n datas could be list or iterables\n \"\"\"\n return sum(map(lambda x: x[\"credit\"], datas))\n\ndef sum_of_require(categories):\n \"\"\"\n calculate the sum of requirement of categories\n \"\"\"\n return sum(map(lambda x: x.requirement, categories))\n\n\ndef main():\n \"\"\"\n * get and parse data\n \"\"\"\n course_categories = get_course_categories(\"courses_categorise.data\")\n student_credit_datas = get_datas(\"credit.data\")\n\n failed_to_classify_datas = []\n\n for data in student_credit_datas:\n for category in course_categories:\n if check_data_in_category(data, category):\n category.items.append(data)\n break\n else:\n failed_to_classify_datas.append(data)\n\n if len(failed_to_classify_datas) != 0:\n print(\"Some datas are failed to classify:\")\n for data in failed_to_classify_datas:\n print((\"{course_code} {course_name}\"\n \" {course_property} {course_from} {credit}\").format(**data))\n\n is_in_course_requirement = lambda x: re.match(\"课外\", x.name) is None\n is_in_course = lambda x: re.match(\"课外\", x[\"course_property\"]) is None\n total_requirement = sum_of_require(course_categories)\n total = sum_of_credit(student_credit_datas)\n in_course_requirement = sum_of_require(\n filter(is_in_course_requirement, course_categories))\n in_course = sum_of_credit(\n filter(is_in_course, student_credit_datas))\n out_course_requirement = sum_of_require(\n filter(lambda x: not is_in_course_requirement(x), course_categories))\n out_course = sum_of_credit(\n filter(lambda x: not is_in_course(x), student_credit_datas))\n\n print(\"total: {}/{} in_course: {}/{} out_course: {}/{}\".format(\n total, total_requirement,\n in_course, in_course_requirement,\n out_course, out_course_requirement))\n print(\"\")\n\n for category in course_categories:\n category_total = sum_of_credit(category.items)\n print(\"Type name:{} {}/{}\".format(\n category.name, category_total, category.requirement))\n for data in category.items:\n print((\"\\t{course_code} {course_name}\"\n \" {course_property} {course_from} {credit}\").format(**data))\n print(\"\")\n\ntry:\n main()\nexcept ValueError as err:\n print(err)\n","sub_path":"calculate.py","file_name":"calculate.py","file_ext":"py","file_size_in_byte":5635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"40946823","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^$', 'res1.views.home', name='home'),\n url(r'^registration/$', 'res1.views.registration', name='registration'),\n url(r'^addmore/$', 'res1.views.addmore', name='addmore'),\n url(r'^profn/$', 'res1.views.profn', name='profn'),\n url(r'^send/$', 'res1.views.send', name='send'),\n #url(r'^mail/$','res1.views.mail', name='mail'),\n \n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n)\n\n#from django.conf.urls import patterns, include, url\n#from django.conf import settings\n#from django.conf.urls.static import static\n\n# Uncomment the next two lines to enable the admin:\n#from django.contrib import admin\n#admin.autodiscover()\n\n#urlpatterns = patterns('',\n \n# url(r'^$','res1.views.home'),\n# url(r'^edn/$', 'res1.views.edn'),\n# url(r'^profn/$','res1.views.profn'),\n# Uncomment the admin/doc line below to enable admin documentation:\n# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n# url(r'^admin/', include(admin.site.urls)),\n \n#)\n","sub_path":"res/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"505719328","text":"#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nfrom torch import nn\nimport torch\nfrom unittest import TestCase\nimport pytest\nimport torchvision.transforms as transforms\nfrom bigdl.nano.pytorch import Trainer\nfrom bigdl.nano.pytorch import InferenceOptimizer\nimport torchmetrics\nimport torch\nimport torch.nn.functional as F\nfrom test.pytorch.utils._train_torch_lightning import create_data_loader\n\n\ndata_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n\n\nclass Net(nn.Module):\n def __init__(self, l1=8, l2=16):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(16 * 5 * 5, l1)\n self.fc2 = nn.Linear(l1, l2)\n self.fc3 = nn.Linear(l2, 10)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = x.reshape(-1, 16 * 5 * 5)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n\nclass TestInferencePipeline(TestCase):\n num_workers = 0\n data_dir = os.path.join(os.path.dirname(__file__), \"data\")\n metric = torchmetrics.Accuracy(num_classes=10, top_k=1)\n max_epochs = 5\n\n model = Net()\n test_loader = create_data_loader(data_dir, 1, num_workers, data_transform, subset=10, shuffle=False)\n train_loader = create_data_loader(data_dir, 32, num_workers, data_transform, subset=10, shuffle=True)\n loss = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n trainer = Trainer(max_epochs=max_epochs)\n model = Trainer.compile(model, loss, optimizer)\n trainer.fit(model, train_loader)\n \n def test_get_model_without_optimize(self):\n inference_opt = InferenceOptimizer()\n with pytest.raises(RuntimeError) as e:\n acc_model, option = inference_opt.get_best_model()\n error_msg = e.value.args[0]\n assert error_msg == \"There is no optimized model. You should call .optimize() \" \\\n \"before get_best_model()\"\n\n def test_pipeline_with_metric(self):\n inference_opt = InferenceOptimizer()\n inference_opt.optimize(model=self.model,\n training_data=self.train_loader,\n validation_data=self.test_loader,\n metric=self.metric,\n direction=\"max\",\n thread_num=1)\n\n acc_model, option = inference_opt.get_best_model()\n acc_model, option = inference_opt.get_best_model(accelerator=\"onnxruntime\")\n assert option == \"\" or \"onnxruntime\" in option\n acc_model, option = inference_opt.get_best_model(precision=\"int8\")\n assert option == \"\" or \"inc\" in option or \"int8\" in option\n acc_model, option = inference_opt.get_best_model(accuracy_criterion=0.1)\n acc_model(next(iter(self.train_loader))[0])\n\n def test_pipeline_without_metric(self):\n inference_opt = InferenceOptimizer()\n inference_opt.optimize(model=self.model,\n training_data=self.train_loader,\n thread_num=1)\n\n acc_model, option = inference_opt.get_best_model()\n acc_model, option = inference_opt.get_best_model(accelerator=\"onnxruntime\")\n assert option == \"\" or \"onnxruntime\" in option\n acc_model, option = inference_opt.get_best_model(precision=\"int8\")\n assert option == \"\" or \"inc\" in option or \"int8\" in option\n with pytest.raises(RuntimeError) as e:\n acc_model, option = inference_opt.get_best_model(accuracy_criterion=0.1)\n error_msg = e.value.args[0]\n assert error_msg == \"If you want to specify accuracy_criterion, you need \"\\\n \"to set metric and validation_data when call 'optimize'.\"\n\n def test_pipeline_with_excludes(self):\n inference_opt = InferenceOptimizer()\n inference_opt.optimize(model=self.model,\n training_data=self.train_loader,\n thread_num=1,\n excludes=[\"fp32_ipex\", \"original\"])\n\n # original is a special method that must be included in\n # the search\n assert \"original\" in inference_opt.optimized_model_dict\n assert \"jit_fp32_ipex\" in inference_opt.optimized_model_dict\n assert \"fp32_ipex\" not in inference_opt.optimized_model_dict\n\n def test_pipeline_with_includes(self):\n inference_opt = InferenceOptimizer()\n inference_opt.optimize(model=self.model,\n training_data=self.train_loader,\n thread_num=1,\n includes=[\"fp32_ipex\"])\n\n assert \"original\" in inference_opt.optimized_model_dict\n assert \"fp32_ipex\" in inference_opt.optimized_model_dict\n assert len(inference_opt.optimized_model_dict) == 2\n\n def test_summary(self):\n inference_opt = InferenceOptimizer()\n with pytest.raises(RuntimeError) as e:\n inference_opt.summary()\n error_msg = e.value.args[0]\n assert error_msg == \"There is no optimization result. You should call .optimize() \"\\\n \"before summary()\"\n inference_opt.optimize(model=self.model,\n training_data=self.train_loader,\n thread_num=1)\n inference_opt.summary()\n\n def test_wrong_data_loader(self):\n fake_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n transforms.Resize(64),\n ])\n fake_train_loader = create_data_loader(self.data_dir, 32, self.num_workers,\n fake_transform, subset=10, shuffle=True)\n inference_opt = InferenceOptimizer()\n with pytest.raises(RuntimeError) as e:\n inference_opt.optimize(model=self.model,\n training_data=fake_train_loader,\n thread_num=1)\n error_msg = e.value.args[0]\n assert error_msg == \"training_data is incompatible with your model input.\"\n\n def test_pipeline_with_custom_function_metric(self):\n inference_opt = InferenceOptimizer()\n\n def metric(pred, target):\n return self.metric(pred, target)\n\n inference_opt.optimize(model=self.model,\n training_data=self.train_loader,\n validation_data=self.test_loader,\n metric=metric,\n direction=\"max\",\n thread_num=1)\n \n def test_pipeline_with_torchmetrics_functional_metric(self):\n inference_opt = InferenceOptimizer()\n metric = torchmetrics.functional.accuracy\n inference_opt.optimize(model=self.model,\n training_data=self.train_loader,\n validation_data=self.test_loader,\n metric=metric,\n direction=\"max\",\n thread_num=1)\n\n def test_pipeline_with_custom_function_metric_without_data(self):\n inference_opt = InferenceOptimizer()\n\n def metric(pred, target):\n return self.metric(pred, target)\n\n with pytest.raises(RuntimeError):\n inference_opt.optimize(model=self.model,\n training_data=self.train_loader,\n validation_data=None,\n metric=metric,\n direction=\"max\",\n thread_num=1)\n\n def test_pipeline_with_wrong_custom_function_metric(self):\n inference_opt = InferenceOptimizer()\n\n def metric(x, y):\n return self.metric(x, y)\n\n with pytest.raises(RuntimeError):\n inference_opt.optimize(model=self.model,\n training_data=self.train_loader,\n validation_data=self.test_loader,\n metric=metric,\n direction=\"max\",\n thread_num=1)\n\n def test_pipeline_with_custom_function_metric_with_data_loader(self):\n inference_opt = InferenceOptimizer()\n import numpy as np\n def metric(model, data_loader):\n metrics = []\n for input_data, target in data_loader:\n pred = model(input_data)\n metric = self.metric(pred, target)\n metrics.append(metric)\n return np.mean(metrics)\n\n inference_opt.optimize(model=self.model,\n training_data=self.train_loader,\n validation_data=self.test_loader,\n metric=metric,\n direction=\"max\",\n thread_num=1)\n\n def test_get_model_with_wrong_method_name(self):\n inference_opt = InferenceOptimizer()\n inference_opt.optimize(model=self.model,\n training_data=self.train_loader,\n validation_data=self.test_loader,\n metric=self.metric,\n direction=\"max\",\n thread_num=1)\n\n with pytest.raises(RuntimeError):\n inference_opt.get_model(method_name=\"fp16_ipex\")\n\n def test_get_model_with_method_name(self):\n inference_opt = InferenceOptimizer()\n inference_opt.optimize(model=self.model,\n training_data=self.train_loader,\n validation_data=self.test_loader,\n metric=self.metric,\n direction=\"max\",\n thread_num=1)\n try:\n model = inference_opt.get_model(method_name=\"fp32_ipex\")\n from bigdl.nano.deps.ipex.ipex_inference_model import PytorchIPEXJITModel\n assert isinstance(model, PytorchIPEXJITModel)\n except:\n pass\n","sub_path":"python/nano/test/pytorch/tests/test_inference_pipeline_ipex.py","file_name":"test_inference_pipeline_ipex.py","file_ext":"py","file_size_in_byte":10973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"216993346","text":"# Author: Berin\n# Sketches repo: https://github.com/berinhard/sketches\n\nclass NoiseWave:\n \"\"\"\n Class to enable noise increment propagation through an array\n \"\"\"\n\n def __init__(self, array_size, init_value=0):\n self.array_size = array_size\n self.noises = [init_value] * array_size\n\n @property\n def max_index(self):\n return self.array_size - 1\n\n def increment(self, step):\n for i in range(self.max_index, -1, -1):\n if i == 0:\n self.noises[i] += step # only increments first value\n else:\n # other values should copy the step from the previous position\n self.noises[i] = self.noises[i-1]\n\n def index_noise(self, i):\n return noise(self.noises[i]) # get noise by position\n\n\nclass Diamond:\n\n def __init__(self, horizontal_position, offset_y, reversed=False):\n self.horizontal_position = horizontal_position\n self.offset_y = offset_y\n self.reversed = reversed\n self.max_lines = 40\n self.top_lines, self.bottom_lines = [], []\n self.alpha = 40\n\n\n def draw(self, noise_x, noise_y):\n y1, y2 = self.y_values\n\n if self.reversed:\n x = self.offset_x + 100 - (100 * noise_x)\n y = self.offset_y + (200 * noise_y)\n else:\n x = self.offset_x + (100 * noise_x)\n y = self.offset_y + 200 - 200 * noise_y\n\n self.add_line(self.x, y1, x, y)\n self.add_line(self.x, y2, x, y, is_top=False)\n self.draw_lines()\n\n @property\n def x(self):\n \"\"\"\n Translate the diamond respecting its position\n \"\"\"\n return (self.horizontal_position + 1) * 100\n\n @property\n def offset_x(self):\n return self.x - 50\n\n @property\n def y_values(self):\n \"\"\"\n Returns top and bottom of the diamond\n \"\"\"\n return self.offset_y, self.offset_y + 200\n\n def draw_lines(self):\n colours = [50, 120, 140]\n if self.reversed:\n colours = reverse(colours)\n\n stroke(colours[0], colours[1], colours[2], self.alpha)\n for x1, y1, x2, y2 in self.top_lines:\n line(x1, y1, x2, y2)\n\n colours = reverse(colours)\n stroke(colours[0], colours[1], colours[2], self.alpha)\n for x1, y1, x2, y2 in self.bottom_lines:\n line(x1, y1, x2, y2)\n\n def add_line(self, x1, y1, x2, y2, is_top=True):\n \"\"\"\n Keeps a maximum of max_lines of internal golden lines\n \"\"\"\n if is_top:\n lines = self.top_lines\n else:\n lines = self.bottom_lines\n\n if len(lines) > self.max_lines:\n lines.pop(0)\n lines.append((x1, y1, x2, y2))\n\n @property\n def oldest_point(self):\n return self.top_lines[0][2:]\n\n\n @property\n def newest_point(self):\n return self.top_lines[-1][2:]\n\ntop_lines = [\n Diamond(col, 20) for col in range(7)\n]\nbottom_lines = [\n Diamond(col, 240, reversed=True) for col in range(7)\n]\nnoise_x = NoiseWave(len(top_lines))\nnoise_y = NoiseWave(len(top_lines), init_value=8)\n\ndef setup():\n size(800, 460)\n frameRate(16)\n background(0)\n strokeWeight(2)\n\ndef draw():\n background(0)\n for i, diamonds in enumerate(zip(top_lines, bottom_lines)):\n diamonds[0].draw(noise_x.index_noise(i), noise_y.index_noise(i))\n diamonds[1].draw(noise_x.index_noise(i), noise_y.index_noise(i))\n\n noise_x.increment(0.1)\n noise_y.increment(0.02)\n","sub_path":"s_006/diamonds/diamonds.pyde","file_name":"diamonds.pyde","file_ext":"pyde","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"32485794","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 24 15:43:16 2016\n\n@author: R Carthigesan\n\nModule implementing a 3-D optical ray tracer, which can be used to model the behaviour of some simple optical systems.\n\"\"\"\nimport numpy as np\nimport numpy.linalg as npl\n\n\nclass Ray:\n \"\"\"\n Class representing optical ray. Create instance at starting point point_init and direction dir_init using Ray(point_init, dir_init)\n \"\"\"\n \n def __init__(self, dir_init, point_init=np.array([0.0, 0.0, 0.0])):\n \"\"\"\n Initialises ray: if initial point not provided, starts at origin. Starting point and direction are given as NumPy arrays of size 3, representing Cartesian 3 vectors.\n \"\"\"\n if dir_init.size != 3:\n raise Exception(\"Direction not 3-vector\")\n elif point_init.size != 3:\n raise Exception(\"Point not 3-vector\")\n else:\n self.dir_init = dir_init.astype(np.float)\n self.dirlist = [self.dir_init]\n self.point_init = point_init.astype(np.float)\n self.pointlist = [self.point_init]\n \n def pcurrent(self):\n \"\"\"Returns current point of ray.\"\"\"\n return self.pointlist[-1]\n \n def dcurrent(self):\n \"\"\"Returns current direction of ray.\"\"\"\n return self.dirlist[-1]\n \n def append(self,d,p):\n \"\"\"Adds new direction d and point p to ray.\"\"\"\n if d.size != 3:\n raise Exception(\"New direction not 3-vector\")\n elif p.size != 3:\n raise Exception(\"New point not 3-vector\")\n else:\n self.dirlist.append(d.astype(np.float))\n self.pointlist.append(p.astype(np.float))\n \n def vertices(self):\n \"\"\"Returns list of all points along ray.\"\"\"\n return self.pointlist\n\n \nclass OpticalElement:\n \n def propagate_ray(self, ray):\n \"\"\"Propagate a ray through the optical element.\"\"\"\n raise NotImplementedError()\n\n \nclass SphericalRefraction(OpticalElement):\n \"\"\"\n Class representing a spherical refracting surface centred on the optical axis. z_0 is the intercept of the surface with the axis; curv, the curvature, is the reciprocal of the radius of curvature; n_1 and n_2 are the refractive indices either side of the surface; ap_rad is the aperture radius - the maximum extent of the surface from the optical axis.\n \"\"\"\n def __init__(self, z_0, curv, n_1, n_2, ap_rad):\n self.z_0 = z_0\n self.curv = curv\n self.n_1 = n_1\n self.n_2 = n_2\n self.ap_rad = ap_rad\n \n def intercept(self,ray):\n \"\"\"Calculates the first valid intercept of a ray with the spherical surface.\"\"\"\n P=ray.pcurrent()\n R=1.0/self.curv\n centrecurve = (np.array([0.0,0.0,self.z_0 + R])).astype(np.float) #centre of curvature\n k_hat=(ray.dcurrent())/(npl.norm(ray.dcurrent()))\n r = np.subtract(P, centrecurve)\n Q = np.add(P, np.dot(r, k_hat))\n if npl.norm(np.subtract(Q, centrecurve)) > abs(R):\n return None\n else:\n l_plus = -1 * (np.dot(r, k_hat)) + np.sqrt((np.dot(r, k_hat))**2 - ((npl.norm(r))**2 - R**2))\n l_minus = -1 * (np.dot(r, k_hat)) - np.sqrt((np.dot(r, k_hat))**2 - ((npl.norm(r))**2 - R**2))\n intersect_plus = np.add(P, l_plus * k_hat)\n intersect_minus = np.add(P, l_minus * k_hat)\n return intersect_plus, intersect_minus\n if R>0 and P[-1] abs(R):\n return None\n else:\n return intersect_minus\n elif R<0 and P[-1]>self.z_0:\n if np.sqrt((intersect_minus[0])**2 + (intersect_minus[1])**2) > abs(R):\n return None\n else:\n return intersect_minus\n elif R>0 and P[-1]>(-self.z_0):\n if np.sqrt((intersect_plus[0])**2 + (intersect_plus[1])**2) > abs(R):\n return None\n else:\n return intersect_plus\n elif R<0 and P[-1]<(-self.z_0):\n if np.sqrt((intersect_plus[0])**2 + (intersect_plus[1])**2) > abs(R):\n return None\n else:\n return intersect_plus\n elif np.sign(R) == np.sign(k_hat[-1]):\n return None\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"raytracer_1642_30112016.py","file_name":"raytracer_1642_30112016.py","file_ext":"py","file_size_in_byte":4603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"117274790","text":"from PyQt5 import QtGui, QtCore, QtWidgets\r\nimport sys\r\nimport random\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtGui import *\r\nfrom functools import wraps\r\n\r\nclass UTILITY_GUI_HANDLER(QtCore.QObject):\r\n\r\n signalStatus = QtCore.pyqtSignal(str)\r\n\r\n def __init__(self, parent=None):\r\n super(self.__class__, self).__init__(parent)\r\n\r\n MainWindow = QtWidgets.QMainWindow()\r\n\r\n # Create a gui object.\r\n self.gui = Window()\r\n self.gui.setupUi(MainWindow)\r\n\r\n # Create a new worker thread.\r\n self.createWorkerThread()\r\n\r\n # Make any cross object connections.\r\n self._connectSignals()\r\n\r\n MainWindow.show()\r\n sys.exit(app.exec_())\r\n\r\n def _connectSignals(self):\r\n self.gui.pushButton2.clicked.connect(self.forceWorkerReset)\r\n self.signalStatus.connect(self.gui.updateStatus)\r\n self.parent().aboutToQuit.connect(self.forceWorkerQuit)\r\n\r\n\r\n def createWorkerThread(self):\r\n\r\n # Setup the worker object and the worker_thread.\r\n self.worker = Utility_tab()\r\n self.worker_thread = QtCore.QThread()\r\n self.worker.moveToThread(self.worker_thread)\r\n self.worker_thread.start()\r\n\r\n # Connect any worker signals\r\n self.worker.signalStatus.connect(self.gui.updateStatus)\r\n # self.gui.pushButton.clicked.connect(self.worker.startWork)\r\n self.gui.pushButton.clicked.connect(self.worker.myWork)\r\n\r\n\r\n def forceWorkerReset(self):\r\n if self.worker_thread.isRunning():\r\n print('Terminating thread.')\r\n self.worker_thread.terminate()\r\n\r\n print('Waiting for thread termination.')\r\n self.worker_thread.wait()\r\n\r\n self.signalStatus.emit('Idle.')\r\n\r\n print('building new working object.')\r\n self.createWorkerThread()\r\n\r\n\r\n def forceWorkerQuit(self):\r\n if self.worker_thread.isRunning():\r\n self.worker_thread.terminate()\r\n self.worker_thread.wait()\r\n\r\n\r\nclass Utility_tab(QtCore.QObject):\r\n\r\n signalStatus = QtCore.pyqtSignal(str)\r\n\r\n def __init__(self, parent=None):\r\n super(self.__class__, self).__init__(parent)\r\n\r\n @QtCore.pyqtSlot()\r\n def startWork(self):\r\n for ii in range(7):\r\n number = random.randint(0,5000**ii)\r\n self.signalStatus.emit('Iteration: {}, Factoring: {}'.format(ii, number))\r\n factors = self.primeFactors(number)\r\n print('Number: ', number, 'Factors: ', factors)\r\n self.signalStatus.emit('Idle.')\r\n\r\n def primeFactors(self, n):\r\n i = 2\r\n factors = []\r\n while i * i <= n:\r\n if n % i:\r\n i += 1\r\n else:\r\n n //= i\r\n factors.append(i)\r\n if n > 1:\r\n factors.append(n)\r\n return factors\r\n\r\n def a_decorator(func):\r\n @wraps(func)\r\n def wrapper(*args, **kwargs):\r\n \"\"\"A wrapper function\"\"\"\r\n\r\n # Extend some capabilities of func\r\n return func.__name__\r\n return wrapper\r\n\r\n @QtCore.pyqtSlot()\r\n def myWork(self):\r\n self.signalStatus.emit('This solution')\r\n while True:\r\n print('I\\'m here')\r\n\r\nclass Window(QMainWindow):\r\n\r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(800, 600)\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.label = QtWidgets.QLabel(self.centralwidget)\r\n self.label.setGeometry(QtCore.QRect(250, 100, 150, 80))\r\n self.label.setObjectName(\"label\")\r\n self.widget = QtWidgets.QWidget(self.centralwidget)\r\n self.widget.setGeometry(QtCore.QRect(30, 50, 300, 200))\r\n self.widget.setObjectName(\"widget\")\r\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.widget)\r\n self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)\r\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\r\n self.verticalLayout = QtWidgets.QVBoxLayout()\r\n self.verticalLayout.setObjectName(\"verticalLayout\")\r\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\r\n self.Address = QtWidgets.QLabel(self.widget)\r\n self.Address.setObjectName(\"Address\")\r\n self.horizontalLayout_2.addWidget(self.Address)\r\n self.lineEdit = QtWidgets.QLineEdit(self.widget)\r\n self.lineEdit.setObjectName(\"lineEdit\")\r\n self.horizontalLayout_2.addWidget(self.lineEdit)\r\n self.verticalLayout.addLayout(self.horizontalLayout_2)\r\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\r\n self.Age = QtWidgets.QLabel(self.widget)\r\n self.Age.setObjectName(\"Age\")\r\n self.horizontalLayout_3.addWidget(self.Age)\r\n self.lineEdit_2 = QtWidgets.QLineEdit(self.widget)\r\n self.lineEdit_2.setObjectName(\"lineEdit_2\")\r\n self.horizontalLayout_3.addWidget(self.lineEdit_2)\r\n self.verticalLayout.addLayout(self.horizontalLayout_3)\r\n self.verticalLayout_2.addLayout(self.verticalLayout)\r\n self.pushButton = QtWidgets.QPushButton(self.widget)\r\n self.pushButton.setObjectName(\"pb_run\")\r\n self.verticalLayout_2.addWidget(self.pushButton)\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.pushButton2 = QtWidgets.QPushButton(self.widget)\r\n self.pushButton2.setObjectName(\"pb_cancel\")\r\n self.verticalLayout_2.addWidget(self.pushButton2)\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.menubar = QtWidgets.QMenuBar(MainWindow)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))\r\n self.menubar.setObjectName(\"menubar\")\r\n MainWindow.setMenuBar(self.menubar)\r\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n MainWindow.setStatusBar(self.statusbar)\r\n\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\r\n self.label.setText(_translate(\"MainWindow\", \"Output\"))\r\n self.Address.setText(_translate(\"MainWindow\", \"Address\"))\r\n self.Age.setText(_translate(\"MainWindow\", \"Age\"))\r\n self.pushButton.setText(_translate(\"MainWindow\", \"Run\"))\r\n self.pushButton2.setText(_translate(\"MainWindow\", \"Cancel\"))\r\n\r\n @QtCore.pyqtSlot(str)\r\n def updateStatus(self, status):\r\n # self.label.setText(self.lineEdit.text()+' '+self.lineEdit_2.text())\r\n self.label.setText(status)\r\n\r\n\r\n # def __init__(self):\r\n # QWidget.__init__(self)\r\n # self.button_start = QtWidgets.QPushButton('Start', self)\r\n # self.button_cancel = QtWidgets.QPushButton('Cancel', self)\r\n # self.label_status = QtWidgets.QLabel('', self)\r\n #\r\n # layout = QtWidgets.QVBoxLayout(self)\r\n # layout.addWidget(self.button_start)\r\n # layout.addWidget(self.button_cancel)\r\n # layout.addWidget(self.label_status)\r\n #\r\n # self.setFixedSize(400, 200)\r\n #\r\n # @QtCore.pyqtSlot(str)\r\n # def updateStatus(self, status):\r\n # self.label_status.setText(status)\r\n\r\n\r\nif __name__=='__main__':\r\n app = QApplication(sys.argv)\r\n example = UTILITY_GUI_HANDLER(app)\r\n","sub_path":"qthread.py","file_name":"qthread.py","file_ext":"py","file_size_in_byte":7615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"424931269","text":"import requests\nimport logging\nimport time\nimport pytz\nimport datetime\nfrom tzlocal import get_localzone\n\n\ndef is_valid(url):\n logging.info(\"Check url: {}\".format(url))\n start = time.time()\n request = requests.get(url)\n if request.status_code != 200:\n logging.error(\"... url is invalid and will be skipped! Status code {}\".format(request.status_code))\n return False\n logging.info(\"... success [{:.4f}ms]\".format(time.time() - start))\n return True\n\n\ndef bot_send_photo(bot, chat_id, string):\n logging.info(\"Sending photo to chat ...\")\n bot.send_photo(\n chat_id=chat_id,\n photo=string,\n disable_notification=True)\n \n \ndef bot_send_document(bot, chat_id, string):\n logging.info(\"Sending Document / GIF to chat ...\")\n from telegram import Document\n bot.send_document(\n chat_id=chat_id,\n document=Document(string),\n disable_notification=True)\n \n \ndef bot_send_text(bot, chat_id, msg):\n logging.info(\"Sending Text msg to chat ...\")\n bot.send_message(\n chat_id=chat_id,\n text=msg,\n disable_notification=True) # silent msg\n\n\ndef get_midnight_time_of_timezone(time_zone='Europe/Berlin'):\n today = datetime.date.today()\n zone = get_localzone().zone\n midnight = datetime.datetime.combine(today, datetime.datetime.min.time())\n pytz_timezone = pytz.timezone(time_zone)\n midnight_europe = pytz_timezone.localize(midnight)\n midnight_europe_as_timezone = midnight_europe.astimezone(pytz.timezone(zone))\n logging.info(\"Daily Runtime {}: {}\".format(zone, midnight_europe_as_timezone))\n return midnight_europe_as_timezone.time()\n\n\ndef get_current_datetime_of_timezone(time_zone='Europe/Berlin'):\n pytz_timezone = pytz.timezone(time_zone)\n return datetime.datetime.now(pytz_timezone)\n","sub_path":"core/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"651982700","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport scipy.optimize\nimport seaborn as sns\nimport bootcamp_utils\nrc={'lines.linewidth': 2, 'axes.labelsize': 18, 'axes.titlesize': 18}\nsns.set(rc=rc)\n\n\n# Exercise 5.3: Beak depth and lengths\n# Load the tidy data from Exercise 4\ngrant_data = pd.read_csv('data/my_grant_complete.csv', comment='#')\n\n# Linear model to perfom regression on\ndef beak_model(depth, m, d0):\n \"\"\"Linear model for beak length as a function of depth.\"\"\"\n return d0 + m * depth\n\n# Get the beak depths and lengths of fortis and scandens in every year\nbeaks = []\nbeaks_model = []\nyears = [1973, 1975, 1987, 1991, 2012]\n\n# Initial Guess\nm = 1.0\ndepth0 = 0.0\nguess = np.array([m, depth0])\n\n# Iterate through getting the beak depth and length and perfoming regression\nfor _, yr in enumerate(years):\n # Get the depth and length for each year\n fortis = grant_data.loc[(grant_data['year']==yr) &\n (grant_data['species']=='fortis'),\n ['beak depth (mm)', 'beak length (mm)']]\n scandens = grant_data.loc[(grant_data['year']==yr) &\n (grant_data['species']=='scandens'),\n ['beak depth (mm)', 'beak length (mm)']]\n beaks.append((fortis, scandens))\n\n # Get the regression statistics\n p_f, _ = scipy.optimize.curve_fit(beak_model, fortis['beak depth (mm)'],\n fortis['beak length (mm)'], p0=guess)\n p_s, _ = scipy.optimize.curve_fit(beak_model, scandens['beak depth (mm)'],\n scandens['beak length (mm)'], p0=guess)\n beaks_model.append((p_f, p_s))\n\n# # Unpack the beak stuff\n# fortis_beak_73, scandens_beak_73 = beaks[0]\n# fortis_beak_75, scandens_beak_75 = beaks[1]\n# fortis_beak_87, scandens_beak_87 = beaks[2]\n# fortis_beak_91, scandens_beak_91 = beaks[3]\n# fortis_beak_12, scandens_beak_12 = beaks[4]\n# p_f_73, p_s_73 = beaks_model[0]\n# p_f_75, p_s_75 = beaks_model[1]\n# p_f_87, p_s_87 = beaks_model[2]\n# p_f_91, p_s_91 = beaks_model[3]\n# p_f_12, p_s_12 = beaks_model[4]\n\n# Plot all of the depth vs length for all years\nshow_plot = False\nfor i, _ in enumerate(beaks):\n # Unpack the beak data\n fortis, scandens = beaks[i]\n p_f, p_s = beaks_model[i]\n\n # Get the bound for the regressions\n depth = np.linspace(7, 13, 100)\n length_f = beak_model(depth, *tuple(p_f))\n length_s = beak_model(depth, *tuple(p_s))\n\n # Plot the data and regressions on the same plot\n plt.plot(fortis['beak depth (mm)'], fortis['beak length (mm)'], 'b.')\n plt.plot(scandens['beak depth (mm)'], scandens['beak length (mm)'], 'r.')\n plt.plot(depth, length_f, 'b-')\n plt.plot(depth, length_s, 'r-')\n plt.xlabel('beak depth (mm)')\n plt.ylabel('beak length (mm)')\n plt.legend(('Geospiza fortis', 'Geospiza scandens'), loc='lower right')\n plt.title('Beak Data ' + str(years[i]))\n if show_plot:\n plt.show()\n plt.figure()\n\n # Print the results\n print(\"\"\"In {0:d}:\n Scandens: m = {1:.2f}, d0 = {2:.2f}\n Fortis: m = {3:.2f}, d0 = {4:.2f}\"\"\".format(years[i], p_s[0], p_s[1],\n p_f[0], p_f[1]))\n","sub_path":"Exercise_5.py","file_name":"Exercise_5.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"468822078","text":"from django.db import models\nfrom api.mentor.models import Mentor\n\n\n# Create your models here.\nclass School(models.Model):\n class Meta:\n ordering = ['-id']\n name = models.CharField(max_length=40, null=False)\n profile_picture_url = models.CharField(max_length=100, null=True)\n page_description = models.TextField(blank=True) # for showing on the school page\n director = models.ForeignKey(\n Mentor, related_name=\"schools_directed\", on_delete=models.SET_NULL, null=True)\n mentors = models.ManyToManyField(Mentor, related_name=\"schools_mentored\")\n\n def __str__(self):\n return \"{} - {}\".format(self.id, self.name)\n","sub_path":"backend/api/school/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"644086626","text":"# Один исходный символ - один закодированный символ.\nimport datetime\nimport uuid\n\nfrom physical_layer.connection import Connection\nfrom transport_layer import repeated_timer\n\n\nclass LogicalConnection:\n @staticmethod\n def reserved_names():\n return ['все', 'all_connect', 'notify_disconnect']\n\n @staticmethod\n def system_names():\n return ['all_connect', 'notify_disconnect']\n\n @staticmethod\n def all_id():\n return '11111111-1111-1111-1111-111111111111'\n\n @staticmethod\n def all_connect_id():\n return '22222222-2222-2222-2222-222222222222'\n\n @staticmethod\n def division_polynomial():\n return 0b10011\n\n @staticmethod\n def max_count_of_try():\n return 3\n\n @staticmethod\n def timeout_sec():\n return 9\n\n @staticmethod\n def t():\n return str(datetime.datetime.now()) + ' '\n\n @staticmethod\n def division(rest):\n divider = LogicalConnection.division_polynomial()\n while int.bit_length(rest) >= int.bit_length(divider):\n sub = divider\n while not (int.bit_length(rest) == int.bit_length(sub)):\n sub <<= 1\n rest ^= sub\n return rest\n\n @staticmethod\n def validate(info_in_message):\n return type(info_in_message) == list \\\n and len(info_in_message) == 4 \\\n and info_in_message[0] != '' \\\n and info_in_message[1] != '' \\\n and (info_in_message[2]).isdigit() \\\n and info_in_message[3] != ''\n\n # Обертка сообщения.\n @staticmethod\n def wrap(recipient, msg, sender, counter):\n return sender + '\\0' + recipient + '\\0' + str(counter) + '\\0' + msg\n\n # Циклическое кодирование.\n @staticmethod\n def make_cyclic_code(vector):\n return (vector << 4) ^ LogicalConnection.division(vector << 4)\n\n # Проверка на наличие ошибки (остаток от деления).\n @staticmethod\n def find_error(rest):\n guess_error = 0b1 # Предполагаемая ошибка.\n rest_error = 0\n while rest_error != rest:\n rest_error = LogicalConnection.division(guess_error)\n guess_error <<= 1\n return guess_error >> 1\n\n def __init__(self):\n self.debug = True\n self.id = str(uuid.uuid4())\n self.username = None\n # Колбек для получения сообщения.\n # Два аргумента: отправитель и сообщение\n self.on_received = None\n # Колбек для получения массового сообщения.\n # Два аргумента: отправитель и сообщение\n self.on_broadcast_received = None\n # Колбек для уведомление об установлении соединения\n # Без аргументов\n self.on_connection_established = None\n # Колбек для получения списка пользователей\n # Аргумент - словарь id - nickname\n self.update_users = None\n # Колбек для сообщения о таймауте\n # Без аргументов\n self.on_timed_out = None\n # Колбек для сообщения о разрыве соединения\n # Аргументом имя разорвавшего\n self.on_disconnect = None\n # Колбек для события ошибки на физическом уровне\n # Без аргументов\n self.on_wire_corrupted = None\n # Колбек для случая, когда направленное сообщение пришло двоим\n # иначе говоря, в системе имеется конфликт имен\n # Аргументы: сообщение и конфликтное имя\n self.on_conflict = None\n # Колбек для случая, когда направленное сообщение не дошло до адресата\n # Аргументы: сообщение и имя\n self.recipient_not_found = None\n # Колбек для случая, когда широковещательное дошло не до всех\n # Аргументы: сообщение\n self.broadcast_failed = None\n # Физические соединения\n self.input_connection = None\n self.output_connection = None\n # Поддержка логического соединения.\n self.logical_connect_send = None\n self.logical_connect_receive = None\n self.count_of_try_connect = None\n self.is_connect = None\n self.last_time = None\n self.idle = None\n # ------------------------------\n\n def connect(self,\n username, # пользователь\n inport_name, # имя входного порта\n inport_baudrate, # входная скорость передачи данных\n outport_name, # имя выходного порта\n outport_baudrate): # входная скорость передачи данных\n self.username = username\n # Физические подключения\n self.input_connection = Connection()\n self.input_connection.on_received = lambda msg: self.receive(msg)\n self.input_connection.on_exception = lambda: self.on_wire_corrupted()\n self.input_connection.connect(inport_name, baudrate=inport_baudrate)\n self.output_connection = Connection()\n self.output_connection.on_exception = lambda: self.on_wire_corrupted()\n self.output_connection.connect(outport_name, baudrate=outport_baudrate)\n\n # Поддержка логического соединения.\n self.logical_connect_receive = \\\n repeated_timer.RepeatedTimer(3, self.support_logical_connection)\n self.count_of_try_connect = 0\n self.last_time = None\n self.idle = False\n\n def disconnect(self):\n self.is_connect = False\n self.idle = True\n if self.logical_connect_send is not None:\n self.logical_connect_send.stop()\n if self.logical_connect_receive is not None:\n self.logical_connect_receive.stop()\n if self.input_connection.is_connected():\n self.input_connection.disconnect()\n if self.output_connection.is_connected():\n self.output_connection.disconnect()\n\n def notify_disconnect(self):\n self.send('notify_disconnect', '_')\n\n def update_users_table(self, raw_users, check_conflict):\n parsed_users = dict(item.split(':') for item in raw_users.split(','))\n if check_conflict:\n cnt = 0\n for user_id, name in parsed_users.items():\n if name == self.username:\n cnt += 1\n if cnt > 1:\n self.on_conflict(self.username)\n return False\n self.update_users(parsed_users)\n return True\n\n # Поддержка логического соединения.\n def support_logical_connection(self):\n if self.idle:\n self.last_time = None\n return\n self.send(LogicalConnection.all_connect_id(), self.id + ':' + self.username)\n if self.last_time is not None\\\n and (datetime.datetime.utcnow() - self.last_time).total_seconds() > LogicalConnection.timeout_sec():\n self.on_timed_out()\n\n # Отправка сообщения.\n def send(self, recipient, message, **kwargs):\n # Обертка сообщения.\n message = LogicalConnection.wrap(recipient,\n message,\n kwargs.get('sender', self.id),\n kwargs.get('counter', 0))\n encoded_message = '' # Закодированное сообщение.\n\n if recipient != LogicalConnection.all_connect_id() or self.debug:\n print(LogicalConnection.t() + ' Отправка сообщения: ' + str(message.split('\\0')))\n\n # Кодирование каждого символа циклическим кодом [11,15]\n for i in message:\n encoded_message += chr(LogicalConnection.make_cyclic_code(ord(i)))\n self.output_connection.write(encoded_message.encode('utf-8'))\n\n # Прием сообщения.\n def receive(self, message):\n # Если соединение в режиме ожидания, то не заморачиваемся и пересылаем дальше\n if self.idle:\n self.output_connection.write(message)\n return\n\n message = message.decode('utf-8')\n decoded_message = '' # Раскодированное сообщение.\n for i in message:\n rest = LogicalConnection.division(ord(i)) # Проверка на ошибку.\n if rest != 0:\n i = chr(ord(i) ^ self.find_error(rest)) # Попытка исправления ошибки.\n decoded_message += chr(ord(i) >> 4)\n self.parse_message(decoded_message)\n\n # Интерпретация сообщения.\n def parse_message(self, message): # 0-отправитель, 1-получатель, 2-счетчик, 3-текст\n info_in_message = message.split('\\0')\n\n if LogicalConnection.validate(info_in_message):\n\n if info_in_message[1] != LogicalConnection.all_connect_id() or self.debug:\n print(LogicalConnection.t() + ' Принято сообщение: ' + str(info_in_message))\n\n # Если сообщение адресовано нам.\n if info_in_message[1] == self.id and info_in_message[0] != self.id:\n self.on_received(info_in_message[0], info_in_message[3])\n info_in_message[2] = int(info_in_message[2]) + 1\n if info_in_message[1] == LogicalConnection.all_id() and info_in_message[0] != self.id:\n self.on_broadcast_received(info_in_message[0], info_in_message[3])\n info_in_message[2] = int(info_in_message[2]) + 1\n\n # Если мы отправители.\n if info_in_message[0] == self.id:\n if info_in_message[1] == LogicalConnection.all_connect_id():\n # Проверяем на конфликт. Если он есть, не соединяем\n if not self.idle and self.update_users_table(info_in_message[3], not self.last_time):\n if not self.last_time:\n self.on_connection_established()\n self.last_time = datetime.datetime.utcnow()\n elif info_in_message[1] == LogicalConnection.all_id(): # Если послали всем.\n if int(info_in_message[2]) < 2: # Если приняли не все.\n self.broadcast_failed(info_in_message[3])\n else: # Если отправляли направленно.\n if info_in_message[2] == '0' and info_in_message[1] != self.id: # Если адресат не принял\n self.recipient_not_found(info_in_message[3], info_in_message[1])\n else:\n # Сообщаем о себе\n if info_in_message[1] == LogicalConnection.all_connect_id():\n info_in_message[3] += ',' + self.id + ':' + self.username\n # Отпраляем дальше\n self.send(info_in_message[1], info_in_message[3],\n sender=info_in_message[0], counter=info_in_message[2])\n\n if info_in_message[1] == 'notify_disconnect':\n self.on_disconnect(info_in_message[0])\n\n else: # Если сообщение невалидно\n pass\n\n\n","sub_path":"transport_layer/LogicalConnection.py","file_name":"LogicalConnection.py","file_ext":"py","file_size_in_byte":12180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"254986512","text":"import tensorflow as tf\nfrom tensorflow.keras import optimizers, datasets, layers, Sequential, metrics\n\n\ndef preprocess(x, y):\n x = tf.cast(x, dtype=tf.float32)/255.0\n y = tf.cast(y, dtype=tf.int32)\n return x, y\n\n\n(x,y), (x_test, y_test) = datasets.mnist.load_data()\n\nbatchsz = 128\ndb = tf.data.Dataset.from_tensor_slices((x, y))\ndb = db.map(preprocess).shuffle(60000).batch(batchsz)\n\ndb_test = tf.data.Dataset.from_tensor_slices((x_test, y_test)).map(preprocess).batch(batchsz)\n\nnetwork = Sequential([layers.Dense(256, activation=tf.nn.relu),\n layers.Dense(128, activation=tf.nn.relu),\n layers.Dense(64, activation=tf.nn.relu),\n layers.Dense(32, activation=tf.nn.relu),\n layers.Dense(10)])\nnetwork.build(input_shape=[None, 28*28])\nnetwork.summary()\n\noptimizer = optimizers.Adam(learning_rate=1e-2)\nacc_metrics = metrics.Accuracy()\nloss_metrics = metrics.Mean()\n\nfor step, (x, y) in enumerate(db):\n with tf.GradientTape() as tape:\n x = tf.reshape(x, [-1, 28*28])\n out = network(x)\n y_onehot = tf.one_hot(y, depth=10)\n pred = tf.nn.softmax(out)\n loss = tf.reduce_mean(tf.losses.categorical_crossentropy(y_onehot, pred))\n loss_metrics.update_state(loss)\n grads = tape.gradient(loss, network.trainable_variables)\n optimizer.apply_gradients(zip(grads, network.trainable_variables))\n if step % 100 == 0:\n print(step, \"loss:\", loss) # 第100步的结果\n # 计算100步的平均值?\n print(step, \"loss:\", loss_metrics.result())\n loss_metrics.reset_states()\n if step % 300 == 0:\n total, total_correct = 0, 0\n acc_metrics.reset_states()\n for step, (x, y) in enumerate(db_test):\n x = tf.reshape(x, [-1, 28*28])\n out = network(x)\n pred = tf.nn.softmax(out)\n pred = tf.argmax(pred, axis=1)\n pred = tf.cast(pred, dtype=tf.int32)\n result = tf.cast(tf.equal(pred, y), dtype=tf.int32)\n total_correct += tf.reduce_sum(result)\n total += result.shape[0]\n # acc_metrics.reset_states()\n acc_metrics.update_state(y, pred) # 第0步和第300步准确度的平均值\n print(step, \"Evaluate Acc:\", total_correct/total, acc_metrics.result().numpy())\n\n","sub_path":"HighRiseAPI/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"544069246","text":"import torch\nimport torch.nn as nn\nimport torch.nn.modules.conv\nimport torch.utils.data\n\n\ndef binarize_model(model: nn.Module, drop_layers=(nn.ReLU, nn.PReLU), keep_data=True) -> nn.Module:\n \"\"\"\n :param model: net model\n :param drop_layers: remove these layers from the input model\n :param keep_data: keep original parameters data (True)\n or re-sample (False) as two Gaussian peaks near 0.5 and -0.5\n :return: model with linear and conv layers wrapped in BinaryDecorator\n \"\"\"\n if isinstance(model, BinaryDecorator):\n print(\"Layer is already binarized.\")\n return model\n for name, child in list(model.named_children()):\n if isinstance(child, drop_layers):\n delattr(model, name)\n continue\n child_new = binarize_model(model=child, drop_layers=drop_layers, keep_data=keep_data)\n if child_new is not child:\n setattr(model, name, child_new)\n if isinstance(model, (nn.modules.conv._ConvNd, nn.Linear)):\n if hasattr(model, 'bias'):\n delattr(model, 'bias')\n model.register_parameter(name='bias', param=None)\n model = BinaryDecorator(model, as_two_peaks=not keep_data)\n return model\n\n\ndef compile_inference(model: nn.Module):\n for name, child in list(model.named_children()):\n compile_inference(child)\n if isinstance(model, BinaryDecorator):\n model.compile_inference()\n\n\nclass BinaryFunc(torch.autograd.Function):\n\n @staticmethod\n def forward(ctx, tensor):\n ctx.save_for_backward(tensor)\n tensor = tensor > 0\n tensor = 2 * tensor.type(torch.FloatTensor) - 1\n return tensor\n\n @staticmethod\n def backward(ctx, grad_output):\n input, = ctx.saved_tensors\n grad_output[input.ge(1)] = 0\n grad_output[input.le(-1)] = 0\n return grad_output\n\n\nclass BinaryDecorator(nn.Module):\n def __init__(self, layer: nn.Module, as_two_peaks=False):\n super().__init__()\n for param in layer.parameters():\n if as_two_peaks:\n data_peaks = 0.5 + 0.1 * torch.randn(param.data.shape)\n data_peaks[torch.rand(data_peaks.shape) > 0.5] *= -1\n if param.data.is_cuda:\n data_peaks = data_peaks.cuda()\n param.data = data_peaks\n param.is_binary = True\n self.layer = layer\n self.is_inference = False\n\n def compile_inference(self):\n for param in self.layer.parameters():\n param.data.sign_()\n self.is_inference = True\n\n def forward(self, x):\n x = BinaryFunc.apply(x)\n if self.is_inference:\n x = self.layer(x)\n else:\n weight_full = self.layer.weight.data.clone()\n self.layer.weight.data.sign_()\n x = self.layer(x)\n self.layer.weight.data = weight_full\n return x\n\n def __repr__(self):\n tag = \"[Binary]\"\n if self.is_inference:\n tag += '[Compiled]'\n return tag + repr(self.layer)\n\n\nclass ScaleLayer(nn.Module):\n\n def __init__(self, size: int, init_value=1e-3):\n super().__init__()\n self.scale = nn.Parameter(torch.FloatTensor(size).fill_(init_value))\n\n def forward(self, x):\n return self.scale * x\n\n def __repr__(self):\n return self.__class__.__name__ + f\"(size={self.scale.numel()})\"\n","sub_path":"layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"620078672","text":"import time, random, string, os, time\r\nimport data_structures as dt\r\n\r\n\r\nclass Carro:\r\n def __init__ (self, placa):\r\n self.placa = placa\r\n self.enParqueo = False\r\n self.esp = None\r\n\r\nclass Persona:\r\n def __init__ (self, ced, nickname, password, nombre, apellido, edad, email=None, direccion=None, tel=None):\r\n self.ced = ced\r\n self.nickname = nickname\r\n self.password = password\r\n self.nombre = nombre\r\n self.apellido = apellido\r\n self.edad = edad\r\n self.email = email\r\n self.direccion = direccion\r\n self.tel = tel\r\n\r\nclass Usuario(Persona):\r\n def __init__(self, ced, nickname, password, nombre, apellido, edad, placa, puntos, email, direccion, tel, index):\r\n self.carro = Carro(placa)\r\n super().__init__(ced, nickname, password, nombre, apellido, edad, email, direccion, tel)\r\n self.puntos = int(puntos)\r\n self.index = index\r\n\r\nclass Espacio:\r\n def __init__ (self,cod,tiempoI,carro = None,libre = True):\r\n self.cod = cod\r\n self.tiempoInicio = tiempoI\r\n self.libre = libre\r\n self.carro = carro\r\n\r\nclass Empleado:\r\n def __init__(self,nickname,password,nombre):\r\n self.nickname = nickname\r\n self.password = password\r\n self.nombre = nombre\r\n\r\nclass EasyParking:\r\n def __init__(self):\r\n self.usuariosRoute = \"usuarios.ep\"\r\n self.parqueaderosRoute = \"parqueaderos\"\r\n self.empleadosRoute = \"empleados.ep\"\r\n\r\n self.usuarios = dt.HashMap(1000)\r\n self.puntos = dt.BinaryDataHeap(1000)\r\n self.nicknames = dt.StringHashMap(1000,15)\r\n self.placas = dt.StringHashMap(1000,10)\r\n self.addUsuarios()\r\n\r\n self.parqueaderos = []\r\n self.addParqueaderos()\r\n\r\n self.empleados = dt.StringHashMap(1000,15)\r\n self.addEmpleados()\r\n\r\n def addParqueaderos(self):\r\n n = len(os.listdir(self.parqueaderosRoute))\r\n p=0\r\n while p 1: f.write('\\n')\r\n f.writelines(line)\r\n\r\n return 3\r\n \r\n def checkLoginEmpleado(self,nickname,password):\r\n e = self.empleados.get(nickname)\r\n if e is None: return \"-2\"\r\n elif e.password == password: return e\r\n return \"-1\"\r\n\r\n def checkInfoEmpleado(self,info):\r\n if self.empleados.get(int(info[0])) is not None: return 0\r\n elif self.empNicknames.get(info[1]): return 1\r\n return 3\r\n\r\n def buscarUsuario(self,ced):\r\n return self.usuarios.get(ced)\r\n \r\n def updateFile(self,ced):\r\n u = self.usuarios.get(ced)\r\n\r\n if u is None: return\r\n\r\n info = [u.ced,u.nickname,u.password,u.nombre,u.apellido,u.edad,u.carro.placa,str(u.puntos),\"\",\"\",\"\"]\r\n if u.email is not None: info[8] = u.email\r\n if u.direccion is not None: info[9] = u.direccion\r\n if u.tel is not None: info[10] = u.tel\r\n\r\n line = '*'.join(info)+'\\n'\r\n\r\n with open(self.usuariosRoute,\"r\") as f:\r\n data = f.readlines()\r\n\r\n data[u.index] = line\r\n\r\n with open(self.usuariosRoute,\"w\") as f:\r\n f.writelines(''.join(data))\r\n \r\n def vaciarParqueadero(self,inP):\r\n with open(self.parqueaderosRoute+\"/p\"+str(inP),\"r\") as f:\r\n data = f.readlines()[2:]\r\n\r\n for l in data:\r\n ced = int(l.split(\"*\")[1])\r\n self.usuarios.get(ced).carro.enParqueo = False\r\n self.usuarios.get(ced).carro.esp = None\r\n\r\n p = self.parqueaderos[inP]\r\n p.espaciosTree.makeEmpty()\r\n p.espacios = [None]*p.totales\r\n p.ocupados = 0\r\n data = [p.nombre,p.cod,p.direccion,p.tel,p.gerente,str(p.totales),str(p.ocupados)]\r\n data = \"*\".join(data) + \"\\n\"\r\n data += \"0\"*p.totales + \"\\n\"\r\n with open(self.parqueaderosRoute+\"/p\"+str(inP),\"w\") as f:\r\n f.write(data)\r\n\r\n def restaurarParqueadero(self,inP):\r\n self.parqueaderos[inP].totales = 50\r\n self.vaciarParqueadero(inP)\r\n\r\n\r\nclass Parqueadero:\r\n\r\n def __init__(self,nombre,cod,direccion,tel,gerente,totales,ocupados):\r\n self.espaciosTree = dt.AvlTree()\r\n self.espacios = [None]*totales\r\n self.totales = totales\r\n self.ocupados = ocupados\r\n\r\n self.nombre = nombre\r\n self.cod = cod\r\n self.direccion = direccion\r\n self.tel = tel\r\n self.gerente = gerente\r\n\r\n self.parqueaderosRoute = \"parqueaderos\"\r\n\r\n def parqueo(self,user,inP,e,verified):\r\n if user is not None and user.carro is not None and not user.carro.enParqueo:\r\n self.espacios[e] = Espacio(e,int(time.time()), user.carro,False)\r\n self.espaciosTree.root = self.espaciosTree.insert(e,self.espaciosTree.root)\r\n user.carro.enParqueo = True\r\n user.carro.esp = (self.cod,inP,e)\r\n if not verified:\r\n with open(self.parqueaderosRoute+\"/p\"+str(inP),\"r\") as f:\r\n data = f.readlines()\r\n data[1]=data[1][0:e]+\"1\"+data[1][e+1:]\r\n #data.append(str(int(time.time()))+\"*\"+user.ced+\"\\n\")\r\n data.insert(data[1].count(\"1\",0,e)+2,str(int(time.time()))+\"*\"+user.ced+\"\\n\")\r\n with open(self.parqueaderosRoute+\"/p\"+str(inP),\"w\") as f: \r\n f.write(\"\".join(data))\r\n\r\n self.ocupados += 1\r\n\r\n return True\r\n\r\n return False\r\n\r\n def desparqueo(self,user,inP):\r\n if user is not None and user.carro is not None and user.carro.enParqueo:\r\n inE = user.carro.esp[2]\r\n self.espacios[inE] = None\r\n self.espaciosTree.remove(inE,self.espaciosTree.root)\r\n user.carro.enParqueo = False\r\n user.carro.esp = None\r\n with open(self.parqueaderosRoute+\"/p\"+str(inP),\"r\") as f:\r\n data = f.readlines()\r\n \r\n data[1]=data[1][0:inE]+\"0\"+data[1][inE+1:]\r\n data.pop(data[1].count(\"1\",0,inE)+2)\r\n with open(self.parqueaderosRoute+\"/p\"+str(inP),\"w\") as f:\r\n f.write(\"\".join(data)) \r\n\r\n self.ocupados -= 1 \r\n\r\n def agregarEspacios(self,inP):\r\n self.espacios = self.espacios + [None]*10\r\n self.totales+=10\r\n\r\n with open(self.parqueaderosRoute+\"/p\"+str(inP),\"r\") as f:\r\n data = f.readlines()\r\n \r\n data[1] = data[1].rstrip('\\n')+\"0\"*10+'\\n'\r\n\r\n with open(self.parqueaderosRoute+\"/p\"+str(inP),\"w\") as f:\r\n f.write(''.join(data))\r\n\r\n\r\ndef main():\r\n ep = EasyParking()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n \r\n","sub_path":"EasyParking.py","file_name":"EasyParking.py","file_ext":"py","file_size_in_byte":12960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"419827223","text":"class Solution(object):\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n rows = len(grid)\n cols= len(grid[0])\n queue = []\n neighbors = [(0,1),(1,0),(0,-1),(-1,0)]\n visited = [[False for col in range(cols) ] for row in range(rows) ]\n #print(visited)\n islands = 0\n\n for i in range(rows):\n for j in range(cols):\n if grid[i][j] == 1 and not visited[i][j]:\n queue.append((i,j))\n islands += 1\n\n while len(queue) > 0:\n current_i, current_j = queue.pop(0)\n\n if self.isValid(current_i, current_j, grid) and not visited[current_i][current_j]:\n visited[current_i][current_j] = True\n\n for neighbour_i, neighbour_j in neighbors:\n queue.append((current_i + neighbour_i, current_j + neighbour_j))\n\n\n return islands\n\n def isValid(self, row ,col, grid):\n\n if row < 0 or row >= len(grid) or col < 0 or col >= len(grid[0]) or grid[row][col] == 0:\n return False\n return True\n\nsol = Solution()\nprint(sol.numIslands([\n [1,1,0,0,0],\n [1,1,0,0,0],\n [0,0,1,0,0],\n [0,0,0,1,1]\n]))","sub_path":"leetcode/islands.py","file_name":"islands.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"520063453","text":"#Including Libraries\nimport socket\nimport mouse\nimport threading\n\n# Declaring global variables\ndata=b'0' #For reading data and using in functions \npresscheck=False #An easy alternate of stoping thread\n\n\nhostname = socket.gethostname() # Getting the hostname by socket.gethostname() method\nHOST = socket.gethostbyname(hostname) # Getting the IP address using socket.gethostbyname() method\nPORT = 50000 # Assigning a free port number for the connection\n\ns=socket.socket(socket.AF_INET, socket.SOCK_STREAM) #Creating a TCP connection socket\ns.bind((HOST, PORT)) #Assigning our socket IP & Port number\ns.listen()\n\n#Functions for doing different tasks\ndef close():\n global presscheck\n presscheck=False\n\n\ndef left():\n global presscheck\n presscheck=True\n while True:\n mouse.move(-5, 0, absolute=False, duration=0.01)\n if presscheck==False:\n break\n\ndef right():\n global presscheck\n presscheck=True\n while True:\n mouse.move(5, 0, absolute=False, duration=0.01)\n if presscheck==False:\n break\n\ndef up():\n global presscheck\n presscheck=True\n while True:\n mouse.move(0, -5, absolute=False, duration=0.01)\n if presscheck==False:\n break\n\ndef down():\n global presscheck\n presscheck=True\n while True:\n mouse.move(0, 5, absolute=False, duration=0.01)\n if presscheck==False:\n break\n\ndef left_click():\n global presscheck\n presscheck=False\n mouse.click('left')\n\ndef right_click():\n global presscheck\n presscheck=False\n mouse.click('right')\n\ndef no_recognition():\n\n print(\"Invalid Instruction Command\")\n\n#This function reads global variable data which is being recieved as a instruction \n#from the client request and calls appropiate function\ndef commandline(): \n switcher = { \n b'close': close, \n b'left': left,\n b'right': right,\n b'up': up,\n b'down': down,\n b'left_click': left_click,\n b'right_click': right_click,\n } \n global data\n func=switcher.get(data,no_recognition)\n return func()\n\ndef server_mouse():\n close_command=b'close' #close command\n last_command='' \n global data\n global presscheck\n #Waiting for commands and assigining work\n while True:\n conn, addr = s.accept()\n with conn:\n print('Connected by', addr)\n while True:\n data = conn.recv(1024)\n if data:\n if presscheck==True:\n presscheck=False\n else:\n th = threading.Thread(target=commandline)\n th.start()\n last_command=data\n\n if not data:\n conn.close()\n break\n #conn.sendall(data)\n if last_command ==close_command:\n break\n\n# Main code\nprint(\"Server started!\")\nprint(\"Host name: \",hostname)\nprint(\"Host ip : \",HOST)\nthread = threading.Thread(target=server_mouse)\nthread.start()\n","sub_path":"pycontrol/pycontrol/pycontrol.py","file_name":"pycontrol.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"205810137","text":"import requests\nimport json\nimport urllib.parse\n\nmain_user_id = 'MAIN_USER_ID'\nsession_id = 'SESSION_ID'\n\nmy_headers = {\n 'X-IG-App-ID': '936619743392459',\n 'Cookie': 'sessionid=' + session_id\n}\ncount = 0\n\n\ndef list_friendships(id, max=\"\"):\n params = {\n 'count': 100,\n 'search_surface': 'follow_list_page',\n 'max_id': str(max)\n }\n r = requests.get('https://i.instagram.com/api/v1/friendships/' +\n id + '/followers/', params=params, headers=my_headers)\n r = r.json()\n\n for x in r['users']:\n pk = x['pk']\n full_name = x['full_name']\n username = x['username']\n is_private = x['is_private']\n\n print('%s,%s,%s,%s' % (pk, full_name, username, is_private))\n global count\n count += 1\n\n if 'next_max_id' in r:\n list_friendships(id, r['next_max_id'])\n\n\nlist_friendships(main_user_id)\nprint(main_user_id, \"count:\", count)\n","sub_path":"list_friendships.py","file_name":"list_friendships.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"402935185","text":"import random\r\nfrom time import sleep\r\n\r\n\r\npossibleWeapons = ['pistol', 'stick', 'sword']\r\n\r\ndamage = 0 #This variable is for the fighting function \r\n\r\ndef NewDamage(damageTaken):\r\n global damage\r\n damage = damageTaken\r\n print('From function ' + str(damage))\r\n\r\ndef CheckForWep(Player):\r\n if Player.inventory['Weapon'] in possibleWeapons:\r\n print('Weapon has been found!')\r\n NewDamage(30)\r\n else:\r\n print('No weapon has been found!')\r\n NewDamage(20)\r\n\r\n \r\nclass Player:\r\n inventory = {'gold': 10, 'Weapon': 'stick'}\r\n\r\n def __init__(self, health=100):\r\n self.health = health\r\n\r\n\r\nclass Enemy:\r\n inventory = {}\r\n\r\n def __init__(self, health=100):\r\n self.health = health\r\n\r\n'''\r\nthug1 = Enemy()\r\nplayer1 = Player()\r\n'''\r\n\r\n\r\ndef fightTest(player, enemy):\r\n blockFate = ['Block successful', 'You have been hit']\r\n strike = ['You hit the enemy', 'The enemy has hit you']\r\n CheckForWep(player)\r\n print('You start fighting')\r\n \r\n while True:\r\n fate = random.choice(strike)\r\n print(\"Enter 'attack' to hit the enemy or 'block' to deflect the enemy\")\r\n attack = input()\r\n if attack == 'attack':\r\n print('You try to attack the enemy')\r\n print(fate)\r\n if fate == strike[0]:\r\n enemy.health -= damage\r\n print('Damage done ' + str(damage))\r\n sleep(1)\r\n print('Their health ' + str(enemy.health))\r\n elif fate == strike[1]:\r\n player.health -= damage\r\n print('Damage done ' + str(damage))\r\n sleep(1)\r\n print('Current health ' + str(player.health))\r\n elif attack == 'block':\r\n print('You attempt to block the enemies attack')\r\n fate1 = random.choice(blockFate)\r\n print(fate1)\r\n if fate1 == blockFate[0]:\r\n print('No damage done to you')\r\n elif fate1 == blockFate[1]:\r\n player.health -= damage\r\n print('Current health ' + str(player.health))\r\n if player.health < 0:\r\n print('You die')\r\n break\r\n print('Game over, thank you for playing!')\r\n if enemy.health < 0:\r\n print('Enemy dies')\r\n \r\n \r\n\r\n\r\n","sub_path":"newcombatsystem.py","file_name":"newcombatsystem.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"616537691","text":"\r\nfrom rocketcea.biprop_utils.rho_isp_plot_obj import RhoIspPlot\r\nfrom rocketcea.biprop_utils.veh_stage_obj import ReferenceStage\r\n\r\n#rp = RhoIspPlot(bipropL=[('LOX','LH2'),('N2O4','MMH')], nsteps_sg=119, nsteps_isp=119)\r\nrp = RhoIspPlot()\r\n\r\n\r\nstg_obj = ReferenceStage( WtPayload=10000.0 )\r\n\r\nrp.add_rho_isp_contours(label_frac_pos=0.2)\r\n\r\nrp.add_stage_param_contours( stg_obj, set_param='DeltaV', param_value=5000.0,\r\n plot_param_valD={'GLOW':[20000, 19000, 18000, 17000, 16000], \r\n 'MassFrac':[.65,.7,.75,.8,.85],\r\n 'CubicFt':[75,100,200,300,400]},\r\n label_frac_posD={'GLOW':0.9, 'CubicFt':.4},\r\n plot_paramL=['GLOW','CubicFt','MassFrac'], num_ticks=6)\r\n \r\nrp.savefig('rho_veh_1.png', dpi=120)\r\nrp.show()\r\n\r\n","sub_path":"lib/python2.7/site-packages/rocketcea/examples/rho_veh_1.py","file_name":"rho_veh_1.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"176565431","text":"# Copyright 2017 Regents of the University of Colorado. All Rights Reserved.\n# Released under the MIT license.\n# This software was developed at the University of Colorado's Laboratory for Atmospheric and Space Physics.\n# Verify current version before use at: https://github.com/MAVENSDC/Pydivide\n\nimport os\nimport datetime\nimport numpy as np\n\nimport pytplot\n\n\ndef read_sts_file(sts_file=None, read_only=False, prefix='', suffix=''):\n \"\"\"\n Read in a given filename in situ file into a dictionary object\n Optional keywords maybe used to downselect instruments returned\n and the time windows.\n\n Input:\n filename: str/list of str\n The file names and full paths of STS files to be read and parsed.\n read_only: boolean\n If True, just reads data into dict and returns the dict.\n If False, loads data into dict and loads data in the dict into tplot variables.\n prefix: str\n The tplot variable names will be given this prefix. By default,\n no prefix is added.\n suffix: str\n The tplot variable names will be given this suffix. By default,\n no suffix is added.\n Output:\n Either a dictionary (data structure) containing up to all of the columns included\n in a STS data file, or tplot variable names.\n \"\"\"\n\n # List of headers present in MAG STS file\n headers = ['year', 'doy', 'hour', 'min', 'sec', 'msec', 'dday', 'outboard_b_j2000_x',\n 'outboard_b_j2000_y', 'outboard_b_j2000_z', 'outboard_b_j2000_range', 'sc_posn_x', 'sc_posn_y',\n 'sc_posn_z', 'outboard_bd_payload_x', 'outboard_bd_payload_y', 'outboard_bd_payload_z',\n 'outboard_bd_payload_range']\n\n # Create a dictionary and list in which we'll store STS variable data and variable names, respectively\n sts_dict = {}\n stored_variables = []\n\n # Code assumes a list of STS files\n if isinstance(sts_file, str):\n sts_file = [sts_file]\n elif isinstance(sts_file, list):\n sts_file = sts_file\n else:\n print(\"Invalid filenames input.\")\n return stored_variables\n\n for s_file in sts_file:\n with open(s_file, 'r') as f:\n lines = f.readlines()\n\n # In STS files, the beginning of the data starts after the last time 'END_OBJECT' is found\n end_objects = [l for l, line in enumerate(lines) if 'END_OBJECT' in line]\n end_headers = end_objects[-1]\n data = lines[end_headers+1:]\n data = [d.strip().split() for d in data] # Remove extra spaces, then split on whitespaces\n\n # Create the STS dictionary\n for h, head in enumerate(headers):\n data_column = [d[h] for d in data[:10]]\n if head not in sts_dict:\n sts_dict[head] = data_column\n else:\n sts_dict[head].extend(data_column)\n\n # We need to create datetime objects from the sts_dict's year, doy, hour, min, sec, and msec data\n year = sts_dict['year']\n doy = sts_dict['doy']\n hour = sts_dict['hour']\n min = sts_dict['min']\n sec = sts_dict['sec']\n msec = sts_dict['msec']\n\n # First get year, month, and day\n dates = [datetime.datetime.strptime('{}+{}'.format(yr, dy), '%Y+%j') for yr, dy in zip(year, doy)]\n # Then add in the sts_dict's hour, min, sec, and msec data\n dtimes = [d.replace(hour=int(hr), minute=int(mn), second=int(s), microsecond=int(ms), tzinfo=datetime.timezone.utc)\n for d, hr, mn, s, ms in zip(dates, hour, min, sec, msec)]\n sts_dict['time_unix'] = dtimes\n\n # These keys are no longer necessary, nix them\n remove_time_keys = ['year', 'doy', 'hour', 'min', 'sec', 'msec']\n for key in remove_time_keys:\n try:\n sts_dict.pop(key)\n except KeyError:\n print('Key {} was not found'.format(key))\n\n # Don't create tplot vars if that's not what's desired\n if read_only:\n return sts_dict\n\n for key in sts_dict.keys():\n # create variable name\n obs_specific = prefix + key + suffix\n # if all values are NaN, continue\n if all(v is None for v in sts_dict[key]):\n continue\n # store data in tplot variable\n if key != 'time_unix':\n try:\n pytplot.store_data(\n obs_specific, data={'x': sts_dict['time_unix'], 'y': [np.float(val) for val in sts_dict[key]]})\n except ValueError:\n continue\n if obs_specific not in stored_variables:\n stored_variables.append(obs_specific)\n\n return stored_variables\n","sub_path":"pytplot/importers/sts_to_tplot.py","file_name":"sts_to_tplot.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"271785053","text":"from __future__ import division, print_function\n\nimport MDAnalysis\nimport numpy as np\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\n\nfrom IPython import embed\n\nimport cPickle as pickle\n\nuniv = MDAnalysis.Universe('top.tpr', 'prot.gro')\nuniv.add_TopologyAttr('tempfactor')\n\n#rg = univ.residues[1:-1]\nrg = univ.residues\nag = rg.atoms\ncharges = np.abs(rg.atoms.charges)\n\nwidth = 0.01\nbb = np.arange(0,1+width,width)\nhist, bb = np.histogram(charges, bins=bb)\n\nplt.bar(bb[:-1], hist, width=width, align='edge')\nymax = 116\nxcoords = [0.15, 0.20, 0.25, 0.30, 0.35]\nfor xc in xcoords:\n plt.axvline(x=xc, ymax=ymax, linestyle='--', color='k')\nplt.ylim(0,ymax)\nplt.show()\n\nthresh = float(raw_input(\"choose charge threshold: \"))\n\ncharge_dict = {}\n\nfor res in rg:\n print(\"Residue: {}\".format(res.resname))\n chrge = 0\n charge_dict[res.resname] = {}\n #ag = res.atoms.select_atoms('not name H*')\n for atm in res.atoms:\n \n # Hydrophilic: -1; Hydrophobic: 0\n hydrophil = -1 if np.abs(atm.charge) > thresh else 1\n \n atm.tempfactor = hydrophil\n charge_dict[res.resname][atm.name] = hydrophil\n print(\" atm: {} hv: {} charge: {}\".format(atm.name, hydrophil, atm.charge))\n chrge += atm.charge\n \n print(\"charge: {}\".format(chrge))\n res.atoms.write(\"{}.pdb\".format(res.resname))\n\nwith open('charge_assign.pkl', 'w') as f:\n pickle.dump(charge_dict, f)","sub_path":"scratch/plot_charge_dist.py","file_name":"plot_charge_dist.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"436614945","text":"# auto create graph from given excel size(20 * 3)\r\n\r\nfrom py2neo import Graph, Node, Relationship, NodeMatcher\r\nimport xlrd\r\nimport numpy\r\n\r\n# data preparation\r\npath = 'E:/2. NaRi/1. My NaRi Projects/1. neo4j/alert2(1).bak.xlsx'\r\nworkbook = xlrd.open_workbook(path)\r\nData_sheet = workbook.sheets()[0] # 通过索引获取\r\nrowNum = Data_sheet.nrows # sheet行数\r\ncolNum = Data_sheet.ncols # sheet列数\r\n\r\n# 获取所有单元格的内容\r\nlist = []\r\nfor i in range(rowNum):\r\n rowlist = []\r\n for j in range(colNum):\r\n rowlist.append(Data_sheet.cell_value(i, j))\r\n list.append(rowlist)\r\narr = numpy.delete(list, 0, axis = 0) # delete first row\r\n\r\n\r\n\r\nfor i in range(19): # from row1 to row19\r\n # connection to db s6000-demo\r\n graph = Graph(\r\n \"http://localhost:7474\",\r\n username=\"neo4j\",\r\n password=\"1\"\r\n )\r\n tx = graph.begin()\r\n m = NodeMatcher(graph)\r\n\r\n # definition\r\n attacker_ip = arr[i][0]\r\n victim_ip = arr[i][1]\r\n log_index = i\r\n attack_type = arr[i][2]\r\n relation_attack_log = \"attack_related\"\r\n relation_victim_log = \"victim_related\"\r\n\r\n # check existing node\r\n check_result = graph.nodes.match(\"attacker\", IP = attacker_ip).first()\r\n if check_result == None:\r\n # create attacker node\r\n a = Node(\"attacker\", IP = attacker_ip)\r\n tx.create(a)\r\n print('check_result == None')\r\n else:\r\n _a = m.match(\"attacker\", IP = attacker_ip).first()\r\n a = graph.nodes[_a.identity]\r\n print('check_result != None')\r\n\r\n check_result = graph.nodes.match(\"victim\", IP = victim_ip).first()\r\n if check_result == None:\r\n # create victim node\r\n v = Node(\"victim\", IP = victim_ip)\r\n tx.create(v)\r\n print('check_result == None')\r\n else:\r\n _v = m.match(\"victim\", IP = victim_ip).first()\r\n v = graph.nodes[_v.identity]\r\n print('check_result != None')\r\n\r\n # create log node and relationship\r\n l = Node(\"warninglog\", LogIndex=log_index, ATTACKTYPE=attack_type)\r\n al = Relationship(a, relation_attack_log, l)\r\n tx.create(al)\r\n\r\n vl = Relationship(v, relation_victim_log, l)\r\n tx.create(vl)\r\n tx.commit() # this line of codes should be as ending code","sub_path":"s6000-demo.v2.py","file_name":"s6000-demo.v2.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"405006037","text":"import tensorflow as tf\nimport numpy as np\n\nfrom utils import utilities\nimport model\n\n\nclass LogisticRegression(model.Model):\n\n \"\"\"Simple Logistic Regression using TensorFlow.\n The interface of the class is sklearn-like.\n \"\"\"\n\n def __init__(self, main_dir='lr/', model_name='lr', loss_func='cross_entropy', dataset='mnist',\n learning_rate=0.01, verbose=0, num_epochs=10, batch_size=10):\n\n \"\"\"\n :param verbose: Level of verbosity. 0 - silent, 1 - print accuracy.\n \"\"\"\n model.Model.__init__(self, model_name, main_dir)\n\n self._initialize_training_parameters(loss_func, learning_rate, num_epochs, batch_size,\n dataset, None, None)\n\n self.verbose = verbose\n\n # Computational graph nodes\n self.input_data = None\n self.input_labels = None\n\n self.W_ = None\n self.b_ = None\n\n self.model_output = None\n\n self.accuracy = None\n\n def build_model(self, n_features, n_classes):\n\n \"\"\" Creates the computational graph.\n :param n_features: number of features\n :param n_classes: number of classes\n :return: self\n \"\"\"\n\n self._create_placeholders(n_features, n_classes)\n self._create_variables(n_features, n_classes)\n\n self.model_output = tf.nn.softmax(tf.matmul(self.input_data, self.W_) + self.b_)\n\n self._create_cost_function_node(self.loss_func, self.model_output, self.input_labels)\n self.train_step = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.cost)\n self._create_test_node()\n\n def _create_placeholders(self, n_features, n_classes):\n\n \"\"\" Create the TensorFlow placeholders for the model.\n :param n_features: number of features\n :param n_classes: number of classes\n :return: self\n \"\"\"\n\n self.input_data = tf.placeholder(\"float\", [None, n_features], name='x-input')\n self.input_labels = tf.placeholder(\"float\", [None, n_classes], name='y-input')\n\n def _create_variables(self, n_features, n_classes):\n\n \"\"\" Create the TensorFlow variables for the model.\n :param n_features: number of features\n :param n_classes: number of classes\n :return: self\n \"\"\"\n\n self.W_ = tf.Variable(tf.zeros([n_features, n_classes]), name='weights')\n self.b_ = tf.Variable(tf.zeros([n_classes]), name='biases')\n\n def _create_test_node(self):\n\n \"\"\"\n :return:\n \"\"\"\n\n with tf.name_scope(\"test\"):\n correct_prediction = tf.equal(tf.argmax(self.model_output, 1), tf.argmax(self.input_labels, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n _ = tf.scalar_summary('accuracy', self.accuracy)\n\n def fit(self, train_set, train_labels, validation_set=None, validation_labels=None, restore_previous_model=False):\n\n \"\"\" Fit the model to the data.\n :param train_set: Training data. shape(n_samples, n_features).\n :param train_labels: Labels for the data. shape(n_samples, n_classes).\n :param validation_set: optional, default None. Validation data. shape(n_validation_samples, n_features).\n :param validation_labels: optional, default None. Labels for the validation data. shape(n_validation_samples, n_classes).\n :param restore_previous_model:\n if true, a previous trained model\n with the same name of this model is restored from disk to continue training.\n :return: self\n \"\"\"\n\n with tf.Session() as self.tf_session:\n self._initialize_tf_utilities_and_ops(restore_previous_model)\n self._train_model(train_set, train_labels, validation_set, validation_labels)\n self.tf_saver.save(self.tf_session, self.models_dir + self.model_name)\n\n def _train_model(self, train_set, train_labels, validation_set, validation_labels):\n\n \"\"\" Train the model.\n :param train_set: training set\n :param train_labels: training labels\n :param validation_set: validation set\n :param validation_labels: validation labels\n :return: self\n \"\"\"\n\n for i in range(self.num_epochs):\n\n shuff = zip(train_set, train_labels)\n np.random.shuffle(shuff)\n\n batches = [_ for _ in utilities.gen_batches(zip(train_set, train_labels), self.batch_size)]\n\n for batch in batches:\n x_batch, y_batch = zip(*batch)\n self.tf_session.run(self.train_step, feed_dict={self.input_data: x_batch, self.input_labels: y_batch})\n\n if validation_set is not None:\n self._run_validation_error_and_summaries(i, validation_set, validation_labels)\n\n def _run_validation_error_and_summaries(self, epoch, validation_set, validation_labels):\n\n \"\"\" Run the summaries and error computation on the validation set.\n :param epoch: current epoch\n :param validation_set: validation set\n :param validation_labels: validation labels\n :return: self\n \"\"\"\n\n feed = {self.input_data: validation_set, self.input_labels: validation_labels}\n result = self.tf_session.run([self.tf_merged_summaries, self.accuracy], feed_dict=feed)\n summary_str = result[0]\n acc = result[1]\n\n self.tf_summary_writer.add_summary(summary_str, epoch)\n\n if self.verbose == 1:\n print(\"Accuracy at step %s: %s\" % (epoch, acc))\n\n def predict(self, test_set, test_labels):\n\n \"\"\" Compute the accuracy over the test set.\n :param test_set: Testing data. shape(n_test_samples, n_features).\n :param test_labels: Labels for the test data. shape(n_test_samples, n_classes).\n :return: accuracy\n \"\"\"\n\n with tf.Session() as self.tf_session:\n self.tf_saver.restore(self.tf_session, self.models_dir + self.model_name)\n return self.accuracy.eval({self.input_data: test_set, self.input_labels: test_labels})\n","sub_path":"GH_buggy_examples/DLT_20d1b59/models/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":6050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"340511101","text":"from setuptools import setup, find_packages\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\nsetup(\n name=\"bsplash\",\n version=\"0.01\",\n description=\"Utilities for creating bootsplash themes.\",\n long_description=long_description,\n author=\"Seiya Nuta\",\n author_email=\"nuta@seiya.me\",\n url=\"http://github.com/ntsy/bsplash\",\n scripts=[\"bsplash-create\", \"bsplash-install\", \"bsplash-enable\", \"bsplash-disable\", \"bsplash-fix-grub-config\"],\n install_requires=[\"pillow\"],\n classifiers = [\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: POSIX :: BSD :: FreeBSD\",\n \"Topic :: Utilities\"\n ]\n)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"360084943","text":"class Solution:\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n n1, n2 = len(nums1), len(nums2)\n if n1 > n2:\n return self.findMedianSortedArrays(nums2, nums1)\n\n k = (n1 + n2 + 1) // 2\n\n start, end = 0, n1\n while start < end:\n m1 = start + (end - start) // 2\n m2 = k - m1\n if nums1[m1] < nums2[m2 - 1]:\n start = m1 + 1\n else:\n end = m1\n\n m1 = end\n m2 = k - end\n\n c1 = max(float('-inf') if m1 <= 0 else nums1[m1 - 1],\n float('-inf') if m2 <= 0 else nums2[m2 - 1])\n\n if (n1 + n2) % 2 == 1:\n return c1\n c2 = min(float('inf') if m1 >= n1 else nums1[m1],\n float('inf') if m2 >= n2 else nums2[m2])\n\n return (c1 + c2) / 2\n\nclass Solution:\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n m, n = len(nums1), len(nums2)\n left_idx, right_idx = (m + n + 1) // 2, (m + n + 2) // 2\n left_val = self.find_kth(nums1, 0, nums2, 0, left_idx)\n right_val = self.find_kth(nums1, 0, nums2, 0, right_idx)\n return (left_val + right_val) / 2\n\n def find_kth(self, nums1, idx1, nums2, idx2, k):\n if idx1 >= len(nums1):\n return nums2[idx2 + k - 1]\n if idx2 >= len(nums2):\n return nums1[idx1 + k - 1]\n if k == 1:\n return min(nums1[idx1], nums2[idx2])\n val1 = idx1 + k // 2 - 1 if idx1 < len(nums1) else float('-inf')\n val2 = idx2 + k // 2 - 1 if idx2 < len(nums2) else float('-inf')\n if val1 < val2:\n return self.find_kth(nums1, idx1 + k // 2, nums2, idx2, k - k // 2)\n else:\n return self.find_kth(nums1, idx1, nums2, idx2 + k // 2, k - k // 2)\n\n\nclass Solution:\n def findMedianSortedArrays(self, nums1: 'List[int]', nums2: 'List[int]') -> 'float':\n size = len(nums1) + len(nums2)\n smaller = self.find_kth(nums1, nums2, (size + 1) // 2)\n bigger = self.find_kth(nums1, nums2, (size + 2) // 2)\n return (smaller + bigger) / 2\n\n def find_kth(self, nums1, nums2, k):\n if len(nums1) == 0:\n return nums2[k - 1]\n if len(nums2) == 0:\n return nums1[k - 1]\n if k == 1:\n return min(nums1[0], nums2[0])\n v1 = nums1[k // 2 - 1] if k // 2 - 1 < len(nums1) else float('inf')\n v2 = nums2[k // 2 - 1] if k // 2 - 1 < len(nums2) else float('inf')\n if v1 < v2:\n return self.find_kth(nums1[k // 2: ], nums2, k - k // 2)\n else:\n return self.find_kth(nums1, nums2[k // 2: ], k - k // 2)\n","sub_path":"python/4. Median of Two Sorted Arrays.py","file_name":"4. Median of Two Sorted Arrays.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"521505280","text":"import csv\nimport numpy as np\n\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom backend.db_management import load_unlabeled_sentences\nfrom backend.extraction_pipeline import path_quote_detection_weights, path_author_attribution_weights, \\\n author_prediction_poly_degree, quote_detection_poly_degree\nfrom backend.helpers import change_confidence\nfrom backend.ml.author_prediction import evaluate_author_prediction_test\nfrom backend.ml.helpers import save_model\nfrom backend.ml.quote_detection import train_quote_detection, evaluate_unlabeled_sentences\nfrom backend.xml_parsing.helpers import load_nlp\n\n\ndef form_sentence(nlp, tokens):\n \"\"\" \"\"\"\n sentence = ''.join(tokens)\n doc = nlp(sentence)\n return doc\n\n\nclass Command(BaseCommand):\n help = 'Trains the model with all fully annotated articles.'\n\n def add_arguments(self, parser):\n parser.add_argument('--epochs', type=int, help='Max number of epochs to train for. Default: 500', default=500)\n\n parser.add_argument('--qd_loss', help='The loss to use for quote detection. Default: log',\n choices=['log', 'hinge'], default='log')\n parser.add_argument('--qd_penalty', help='The penalty to use for quote detection. Default: l2',\n choices=['l1', 'l2'], default='l2')\n parser.add_argument('--qd_reg', type=float, help='Reg to use for quote detection. Default: 0.01',\n default=0.01)\n\n parser.add_argument('--ap_loss', help='The loss to use for author prediction. Default: hinge',\n choices=['log', 'hinge'], default='hinge')\n parser.add_argument('--ap_penalty', help='The penalty to use for author prediction. Default: l1',\n choices=['l1', 'l2'], default='l1')\n parser.add_argument('--ap_reg', type=float, help='Reg to use for author prediction. Default: 0.01',\n default=0.01)\n\n def handle(self, *args, **options):\n max_epochs = options['epochs']\n qd_loss = options['qd_loss']\n qd_penalty = options['qd_penalty']\n qd_alpha = options['qd_reg']\n\n ap_loss = options['ap_loss']\n ap_penalty = options['ap_penalty']\n ap_alpha = options['ap_reg']\n\n try:\n print('\\nLoading language model...\\n')\n nlp = load_nlp()\n with open('data/cue_verbs.csv', 'r') as f:\n reader = csv.reader(f)\n cue_verbs = set(list(reader)[0])\n\n print('Training quote detection...')\n qd_ed = quote_detection_poly_degree\n qd_trained_model = train_quote_detection(qd_loss, qd_penalty, qd_alpha, max_epochs, nlp, cue_verbs, qd_ed)\n save_model(qd_trained_model, path_quote_detection_weights)\n print(f'Saved trained model at {path_quote_detection_weights}\\n')\n\n print(\"Training author prediction...\")\n ap_ed = author_prediction_poly_degree\n ap_trained_model, _, _, _, _, _ =\\\n evaluate_author_prediction_test(ap_loss, ap_penalty, ap_alpha, max_epochs, nlp, cue_verbs, ap_ed)\n save_model(ap_trained_model, path_author_attribution_weights)\n print(f'Saved trained model at {path_quote_detection_weights}\\n')\n\n print('Evaluating all unlabeled quotes...')\n proba = qd_loss == 'log'\n articles, sentences, in_quotes = load_unlabeled_sentences(nlp)\n max_hinge_value = 0.00001\n confidences = []\n predictions = []\n for article, article_sentences, article_in_quotes in zip(articles, sentences, in_quotes):\n probabilities = evaluate_unlabeled_sentences(qd_trained_model, article_sentences, cue_verbs,\n article_in_quotes, proba=proba)\n if proba:\n # Map the probability that a sentence is a quote to a confidence:\n # * probability is 0.5: model has no clue, confidence 0\n # * probability is 0 or 1: model knows, confidence 1\n confidence = [2 * abs(0.5 - prob) for prob in probabilities]\n confidences.append(confidence)\n prediction = [round(prob) for prob in probabilities]\n predictions.append(prediction)\n else:\n # When using hinge loss, the confidence is the distance to the seperating hyperplane\n # Take the log to reduce the effect of very large values\n confidence = [np.log(abs(prob)) for prob in probabilities]\n confidences.append(confidence)\n prediction = [int(prob > 0) for prob in probabilities]\n predictions.append(prediction)\n max_hinge_value = max(max_hinge_value, max(confidence))\n\n for article, confidence, prediction in zip(articles, confidences, predictions):\n if not proba:\n confidence = [conf/max_hinge_value for conf in confidence]\n # For sentences in the article that are fully labeled, the confidence is 1\n new_confidences = [max(label, conf) for label, conf in zip(article.labeled['labeled'], confidence)]\n change_confidence(article.id, new_confidences, prediction)\n\n print('Done\\n')\n\n except IOError:\n raise\n raise CommandError('IO Error.')\n","sub_path":"activelearning/backend/management/commands/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"368296255","text":"from random import randint\r\n\r\nlist = []\r\n\r\n# Input range of list\r\nn = int(input(\"Range: \"))\r\n\r\n# random each item in list\r\nfor i in range(n):\r\n\tlist.append(randint(1, 100))\r\n\r\nprint(\"\\nBefore: {}\\n\".format(list))\r\n\r\ndef selection_sort(list):\r\n\tfor i in range(len(list)):\r\n\t\tsmallest = i\r\n\t\tfor j in range(i+1, len(list)):\r\n\t\t\tif list[j] < list[smallest]:\r\n\t\t\t\tsmallest = j\r\n\t\tlist[smallest], list[i] = list[i], list[smallest]\r\n\treturn list\r\n\r\ndef insertion_sort(list):\r\n\tfor i in range(1, len(list)):\r\n\t\tj = i\r\n\t\twhile j > 0 and list[j-1] > list[j]:\r\n\t\t\tlist[j-1], list[j] = list[j], list[j-1]\r\n\t\t\tj -= 1\r\n\treturn list\r\n\r\ndef bubble_sort(list):\r\n\tflag = True\r\n\tlength = len(list)\r\n\twhile flag:\r\n\t\tflag = False\r\n\t\tfor i in range(1, length):\r\n\t\t\tif list[i-1] > list[i]:\r\n\t\t\t\tflag = True\r\n\t\t\t\tlist[i-1], list[i] = list[i], list[i-1]\r\n\t\tlength -= 1\r\n\treturn list\r\n\r\ndef merge(left, right):\r\n result = []\r\n i, j = 0, 0\r\n while i < len(left) and j < len(right):\r\n if left[i] <= right[j]:\r\n result.append(left[i])\r\n i += 1\r\n else:\r\n result.append(right[j])\r\n j += 1\r\n result += left[i:]\r\n result += right[j:]\r\n return result\r\n\r\ndef merge_sort(list):\r\n if len(list) < 2:\r\n return list\r\n mid = len(list) // 2\r\n left = merge_sort(list[:mid])\r\n right = merge_sort(list[mid:])\r\n return merge(left, right)\r\n\r\ndef quicksort(list):\r\n\tless = []\r\n\tequal = []\r\n\tgreater = []\r\n\tif len(list) > 1:\r\n\t\tpivot = list[0]\r\n\t\tfor item in list:\r\n\t\t\tif item < pivot:\r\n\t\t\t\tless.append(item)\r\n\t\t\telif item == pivot:\r\n\t\t\t\tequal.append(item)\r\n\t\t\telif item > pivot:\r\n\t\t\t\tgreater.append(item)\r\n\t\treturn quicksort(less) + equal + quicksort(greater)\r\n\telse:\r\n\t\treturn list\r\n\r\n\r\nprint(\"Selection Sort -> Enter 1\")\r\nprint(\"Insertion Sort -> Enter 2\")\r\nprint(\"Bubble Sort -> Enter 3\")\r\nprint(\"Merge Sort -> Enter 4\")\r\nprint(\"Quick Sort -> Enter 5\\n\")\r\n\r\ntry:\r\n\tinput = int(input(\"> \"))\r\nexcept ValueError:\r\n\tprint(\"Please enter a number...\")\r\nfinally:\r\n\tif input == 1:\r\n\t\tprint(selection_sort(list))\r\n\telif input == 2:\r\n\t\tprint(insertion_sort(list))\r\n\telif input == 3:\r\n\t\tprint(bubble_sort(list))\r\n\telif input == 4:\r\n\t\tprint(merge_sort(list))\r\n\telif input == 5:\r\n\t\tprint(quicksort(list))\r\n","sub_path":"lab2/bai1.py","file_name":"bai1.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"512560627","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/pynestml/codegeneration/pynestml_2_nest_type_converter.py\n# Compiled at: 2020-03-05 05:49:41\n# Size of source mod 2**32: 2667 bytes\nfrom pynestml.symbols.type_symbol import TypeSymbol\nfrom pynestml.symbols.real_type_symbol import RealTypeSymbol\nfrom pynestml.symbols.boolean_type_symbol import BooleanTypeSymbol\nfrom pynestml.symbols.integer_type_symbol import IntegerTypeSymbol\nfrom pynestml.symbols.string_type_symbol import StringTypeSymbol\nfrom pynestml.symbols.void_type_symbol import VoidTypeSymbol\nfrom pynestml.symbols.unit_type_symbol import UnitTypeSymbol\nfrom pynestml.symbols.nest_time_type_symbol import NESTTimeTypeSymbol\nfrom pynestml.symbols.error_type_symbol import ErrorTypeSymbol\n\nclass PyNestml2NestTypeConverter(object):\n __doc__ = '\\n This class contains a single operation as used to convert nestml types to nest centerpieces.\\n '\n\n @classmethod\n def convert(cls, type_symbol):\n \"\"\"\n Converts the name of the type symbol to a corresponding nest representation.\n :param type_symbol: a single type symbol\n :type type_symbol: TypeSymbol\n :return: the corresponding string representation.\n :rtype: str\n \"\"\"\n assert isinstance(type_symbol, TypeSymbol)\n if type_symbol.is_buffer:\n return 'nest::RingBuffer'\n if isinstance(type_symbol, RealTypeSymbol):\n return 'double'\n if isinstance(type_symbol, BooleanTypeSymbol):\n return 'bool'\n if isinstance(type_symbol, IntegerTypeSymbol):\n return 'long'\n if isinstance(type_symbol, StringTypeSymbol):\n return 'std::string'\n if isinstance(type_symbol, VoidTypeSymbol):\n return 'void'\n if isinstance(type_symbol, UnitTypeSymbol):\n return 'double'\n if isinstance(type_symbol, NESTTimeTypeSymbol):\n return 'nest::Time'\n if isinstance(type_symbol, ErrorTypeSymbol):\n return 'ERROR'\n raise Exception('Unknown NEST type')","sub_path":"pycfiles/NESTML-3.1-py3.5/pynestml_2_nest_type_converter.cpython-35.py","file_name":"pynestml_2_nest_type_converter.cpython-35.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"522257870","text":"#------------------------------------------------------------------------------\n# Copyright (c) 2011, Enthought, Inc.\n# All rights reserved.\n#------------------------------------------------------------------------------\nimport wx\n\nfrom .wx_window import WXWindow\n\nfrom ...components.dialog import AbstractTkDialog\n\n\nDIALOG_RETCODE_MAP = {\n wx.ID_OK: 'accepted',\n wx.ID_CANCEL: 'rejected'\n}\n\n\nclass WXDialogSizer(wx.PySizer):\n \"\"\" A custom wx Sizer for use in the WXDialog. This sizers expands\n its child to fit the allowable space, regardless of the settings on\n the child settings. This is similar to how central widgets behave \n in a WXWindow. \n\n There can only be one widget in this sizer at a time and it should\n be added via the .Add(...) method. Old items will be removed \n automatically (but not destroyed).\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(WXDialogSizer, self).__init__(*args, **kwargs)\n self._widget = None\n\n def Add(self, widget):\n \"\"\" Adds the given widget to the sizer, removing the old widget\n if present. The old widget is not destroyed.\n\n \"\"\"\n self.Clear(deleteWindows=False)\n self._widget = widget\n return super(WXDialogSizer, self).Add(widget)\n\n def CalcMin(self):\n \"\"\" Returns the minimum size for the children this sizer is \n managing. Since the size of the Dialog is managed externally,\n this always returns (-1, -1).\n\n \"\"\"\n return (-1, -1)\n \n def RecalcSizes(self):\n \"\"\" Resizes the child to fit the available space of the scroll\n area.\n\n \"\"\"\n widget = self._widget\n if widget:\n widget.SetSize(self.GetSize())\n\n\nclass WXDialog(WXWindow, AbstractTkDialog):\n \"\"\" A wxPython implementation of a Dialog.\n\n WXDialog uses a wx.Dialog to create a simple top-level dialog.\n\n \"\"\"\n #---------------------------------------------------------------------------\n # Setup methods\n #---------------------------------------------------------------------------\n def create(self, parent):\n \"\"\" Creates the underlying wx.Dialog control.\n\n \"\"\"\n style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER\n self.widget = wx.Dialog(parent, style=style)\n self.widget.SetSizer(WXDialogSizer())\n\n #--------------------------------------------------------------------------\n # Implementation\n #--------------------------------------------------------------------------\n def accept(self):\n \"\"\" Close the dialog and set the result to 'accepted'.\n\n \"\"\"\n self.widget.EndModal(wx.ID_OK)\n\n def reject(self):\n \"\"\" Close the dialog and set the result to 'rejected'.\n\n \"\"\"\n self.widget.EndModal(wx.ID_CANCEL)\n\n #--------------------------------------------------------------------------\n # Widget Update Methods\n #--------------------------------------------------------------------------\n def set_central_widget(self, central_widget):\n \"\"\" Sets the central widget in the window with the given value.\n\n \"\"\"\n # It's possible for the central widget component to be None.\n # This must be allowed since the central widget may be generated\n # by an Include component, in which case it will not exist \n # during initialization. However, we must have a central widget\n # for the Dialog, and so we just fill it with a dummy widget.\n central_widget = self.shell_obj.central_widget\n if central_widget is None:\n child_widget = wx.Panel(self.widget)\n else:\n child_widget = central_widget.toolkit_widget\n self.widget.GetSizer().Add(child_widget)\n\n #--------------------------------------------------------------------------\n # Overrides\n #--------------------------------------------------------------------------\n def set_visible(self, visible):\n \"\"\" Overridden from the parent class to properly launch and close \n the dialog.\n\n \"\"\"\n widget = self.widget\n shell = self.shell_obj\n if visible:\n shell._active = True\n shell.opened()\n # wx cannot distinguish between app modal and \n # window modal, so we only get one kind.\n retcode = widget.ShowModal()\n else:\n retcode = wx.ID_CANCEL\n self._handle_retcode(retcode)\n \n #--------------------------------------------------------------------------\n # Auxiliary Methods \n #--------------------------------------------------------------------------\n def _handle_retcode(self, retcode):\n \"\"\" Destroys the dialog, fires events, and set status attributes.\n\n \"\"\"\n shell = self.shell_obj\n result = DIALOG_RETCODE_MAP[retcode]\n shell._result = result\n shell._active = False\n shell.closed(result)\n # Explicitly Destroy the dialog or the wxApp won't properly exit.\n # We can't simply destroy the shell object since the user may\n # still need something from it.\n widget = self.widget\n if widget:\n widget.Destroy() \n\n","sub_path":"enaml/backends/wx/wx_dialog.py","file_name":"wx_dialog.py","file_ext":"py","file_size_in_byte":5191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"456372720","text":"import numpy as np\n\ndef main():\n \"\"\"\n X = np.array([[1.0, 2.0], [7.0, 2.0]])\n b = np.array([2.0, 2.0])\n X = np.vstack([X, b])\n inv = np.linalg.inv(X)\n trans = np.transpose(X)\n product = X @ trans\n print(inv)\n \"\"\"\n a = np.array([2,5,4,6,5,5,3,3,3])\n a = np.sort(a)\n unique, count = np.unique(a,return_counts=True)\n answer= unique[np.argmax(count)]\n print()\n\nif __name__ == '__main__':\n main()","sub_path":"matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"339654041","text":"\nimport socket # 导入 socket 模块\nimport time\ns = socket.socket() # 创建 socket 对象\ns.connect(('127.0.0.1', 8712))\nprint(s.recv(1024).decode(encoding='utf8'))\nwhile True:\n s.send(\"我是901\".encode('utf8'))\n time.sleep(10)\n# print(s.recv(1024).decode(encoding='utf8'))\n","sub_path":"test/client1.py","file_name":"client1.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"41042700","text":"import simpy\nimport random\nimport numpy\nimport matplotlib.pyplot as plt\n\ndef proceso(nombre,env,tiempo,espacio,RAM):\n global totalT \n global tiempos\n global desviacion\n\n #Se crea el proceso\n yield env.timeout(tiempo)\n #Tiempo que le toma al proceso llegar\n tiempoInicial = env.now\n #Se establece la memoria que se utilizara y la cantidad de instrucciones\n memoria = random.randint(1,10)\n instrucciones = random.randint(1, 10)\n print ('%s proceso inicia en tiempo %f necesita %d de memoria y tiene %e instrucciones' % (nombre,tiempoInicial,memoria, instrucciones))\n\n #Se define ir a la cola\n with RAM.get(instrucciones) as turno:\n print(nombre, \"tiemp: \", env.now)\n yield turno\n #Si tiene mas de dos instrucciones\n while instrucciones>5:\n with espacio.request() as simular:\n yield simular\n instrucciones = instrucciones-6\n yield env.timeout(1)\n print(nombre, \"tiempo: \", env.now)\n io = random.randint(1,2)\n if(io == 2):\n yield env.timeout(1)\n #Si se tienen menos de tres \n if instrucciones<6:\n yield env.timeout(1)\n RAM.put(memoria)\n\n #Espacio a usar del cpu\n with espacio.request() as turno:\n yield turno \n yield env.timeout(instrucciones)\n print ('%s proceso termina a las %f' % (nombre, env.now))\n TOTAL = env.now - tiempoInicial\n tiempos.append(TOTAL)\n print ('%s se tardo %f' % (nombre, TOTAL))\n totalT = totalT + TOTAL\n\n\ndesviacion1=list()\npromedios1=list()\ndesviacion2=list()\npromedios2=list()\ndesviacion3=list()\npromedios3=list()\nCantidadProcesos=[25,50,100,150,200]\nintervalos=[10,5,1]\nprint(\"Con intervalos de 10\")\nfor k in CantidadProcesos:\n env = simpy.Environment() #ambiente de simulación\n espacio = simpy.Resource(env,capacity = 1)#Cantidad de CPU\n RAM = simpy.Container(env,capacity= 100, init=100) #Cantidad de RAM\n random.seed(10) # fijar el inicio de random\n tiempos = list()\n totalT = 0\n procesos=k\n for i in range(k): #numero de procesos \n env.process(proceso('proceso %d'%i,env,random.expovariate(1.0/10),espacio,RAM))\n\n env.run() #correr la simulación en tiempo infinito\n promedios1.append(totalT/k)\n desviacion1.append(numpy.std(tiempos))\n print (\"tiempo promedio para\", k ,\"procesos es: \", totalT/k)\nprint(\"Los promedios son: \",promedios1)\nprint(\"Las desviaciones estandar son: \",desviacion1)\n\nprint(\"Con intervalos de 5\")\nfor k in CantidadProcesos:\n env = simpy.Environment() #ambiente de simulación\n espacio = simpy.Resource(env,capacity = 1)#Cantidad de CPU\n RAM = simpy.Container(env,capacity= 100, init=100) #Cantidad de RAM\n random.seed(10) # fijar el inicio de random\n tiempos = list()\n totalT = 0\n procesos=k\n for i in range(k): #numero de procesos \n env.process(proceso('proceso %d'%i,env,random.expovariate(1.0/5),espacio,RAM))\n\n env.run() #correr la simulación en tiempo infinito\n promedios2.append(totalT/k)\n desviacion2.append(numpy.std(tiempos))\n print (\"tiempo promedio para\", k ,\"procesos es: \", totalT/k)\nprint(\"Los promedios son: \",promedios2)\nprint(\"Las desviaciones estandar son: \",desviacion2)\n\nprint(\"Con intervalos de 1\")\nfor k in CantidadProcesos:\n env = simpy.Environment() #ambiente de simulación\n espacio = simpy.Resource(env,capacity = 1)#Cantidad de CPU\n RAM = simpy.Container(env,capacity= 100, init=100) #Cantidad de RAM\n random.seed(10) # fijar el inicio de random\n tiempos = list()\n totalT = 0\n procesos=k\n for i in range(k): #numero de procesos \n env.process(proceso('proceso %d'%i,env,random.expovariate(1.0),espacio,RAM))\n\n env.run() #correr la simulación en tiempo infinito\n promedios3.append(totalT/k)\n desviacion3.append(numpy.std(tiempos))\n print (\"tiempo promedio para\", k ,\"procesos es: \", totalT/k)\nprint(\"Los promedios son: \",promedios1)\nprint(\"Las desviaciones estandar son: \",desviacion1)\nplt.plot(CantidadProcesos,promedios1,\"ro\",color=\"green\")\nplt.plot(CantidadProcesos,promedios2,\"ro\",color=\"red\")\nplt.plot(CantidadProcesos,promedios3,\"ro\",color=\"blue\")\nplt.title(\"Promedios por cantidad de procesos\")\nplt.xlabel(\"Cantidad de procesos\")\nplt.ylabel(\"Promedio\")\nplt.legend((\"Promedios con invervalos de 10\",\"Promedios con invervalos de 5\",\"Promedios con invervalos de 1\"),loc=\"upper left\")\nplt.show()","sub_path":"HT5Cii.py","file_name":"HT5Cii.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"446680647","text":"\r\n\r\nclass Coord:\r\n \"défition des coordonnées dans R3\"\r\n def __init__(self,x,y,z,genre): #méthode spéciale de construction\r\n self.x = x\r\n self.y = y\r\n self.z = z\r\n self.genre = genre\r\n\r\n def affiche(self):#méthode d'affichage d'une instance de la classe Coord\r\n return (self.x,self.y,self.z,self.genre) # remplacer return par print\r\n\r\n def __sub__(self,other): # définition de la soustraction de deux points\r\n if self.genre == \"point\" and other.genre == \"point\":\r\n return Coord(self.x-other.x,self.y-other.y,self.z-other.z,genre=\"vecteur\")\r\n\r\n def produit_vectoriel(self,other):\r\n x = self.y*other.z-self.z*other.y\r\n y = self.z*other.x-self.x*other.z\r\n z = self.x*other.y-self.y*other.x\r\n return Coord(x,y,z,genre=\"vecteur\")\r\n\r\n #definition de la methode pour les vecteurs colinéaires\r\n def colinéarité (self,other):\r\n vect=self.produit_vectoriel(other)\r\n return (vect.x==0) and (vect.y==0) and (vect.z==0)\r\n\r\n\r\n #definition de la methode pour les vecteurs alignés\r\n def pts_alignés(self,other1,other2):\r\n v1=other1-self\r\n v2=other2-self\r\n return v1.colinéarité(v2)\r\n \r\n #definition d'un plan par un point et un vecteur normal\r\n def plan(self,other):\r\n if self.genre==\"point\" and other.genre==\"vecteur\" :\r\n a=other.x\r\n b=other.y\r\n c=other.z\r\n d=self.x*other.x+self.y*other.y+self.z*other.z\r\n p1={\"x\":a,\"y\":b,\"z\":c, \"constante\":d}\r\n return p1\r\n #définition d'un plan par un point et deux vecteurs colinéaires\r\n def plan2 (self, other1, other2):\r\n if self.genre==\"point\" and other1.genre==\"vecteur\" and other2.genre==\"vecteur\" and not (other1.colinéarité(other2)):\r\n n=other1.produit_vectoriel(other2)\r\n return self.plan(n)\r\n def plan3(self, other1, other2):\r\n if self.genre==\"point\" and other1.genre==\"point\" and other2.genre==\"point\" and not (self.pts_alignés(other1,other2)):\r\n v1=other1-self\r\n v2=other2-self\r\n return self.plan2(v1,v2)\r\n\r\ndef affiche_equation(P1):\r\n a=P1['x']\r\n b=P1['y']\r\n c=P1['z']\r\n d=P1['constante']\r\n\r\n\r\n\r\n\r\n\r\ndef coli(u,v):\r\n vect=u.produit_vectoriel(v)\r\n return (vect.x==0) and (vect.y==0) and (vect.z==0) \r\n\r\n\r\nA = Coord(1,2,-3,'point')\r\nB = Coord(-2,2,0,\"point\")\r\nC = Coord(1,-2,4,\"point\")\r\n\r\nAB = B - A\r\nAC = C - A\r\n\r\nu = AB.produit_vectoriel(AC)\r\n\r\n \r\n","sub_path":"Python/P2/TP08.py","file_name":"TP08.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"507137566","text":"import os\nimport cv2\n\ndata_dir = \"/home/lichen/deepfashion/dataset/categary/2/\"\n\nfor img_list in os.listdir(data_dir):\n img_list = data_dir + img_list\n for img in os.listdir(img_list):\n image_dir = img_list +\"/\" + img\n image = cv2.imread(image_dir, cv2.IMREAD_GRAYSCALE)\n image = cv2.resize(image, (160, 160), interpolation=cv2.INTER_CUBIC)\n cv2.imwrite(image_dir,image) \n print(img)\n","sub_path":"tools/categary/data_resize.py","file_name":"data_resize.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"104025220","text":"import sys, warnings\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mcmc import mcmc, model\n\nwarnings.filterwarnings(\"ignore\")\n\nplt.rc('text', usetex=True, fontsize=24)\n\ndef radius(data):\n E_pot = data[:,6]\n min_pot = np.argmin(E_pot)\n #print \"min_pot\", min_pot\n x = data[:,0] - data[min_pot, 0]\n y = data[:,1] - data[min_pot, 1]\n z = data[:,2] - data[min_pot, 2]\n r = np.sqrt(x**2 + y**2 +z**2)\n r = np.sort(r)\n return r[1:]\n\nif (len(sys.argv) != 2):\n sys.exit('Usage: python fit.py n_bodies')\n\n#data\n\nn_bodies = int(sys.argv[1])\n\ndata_final = np.loadtxt('./state_final_{}.dat'.format(n_bodies))\n\n#x\nr_final = radius(data_final)\nlog_r_final = np.log10(r_final)\n\nh, c = np.histogram(log_r_final, bins=10)\nlog_r_center = 0.5 * (c[1:]+c[:-1])\n\n#y\nlog_rho = np.log10(h)-2.0*log_r_center #log(rho) = log(m)-2*log(r)\n\n\n#plot data\n\nplt.figure(figsize=(12,8))\n\nplt.plot(log_r_center, log_rho, label='$\\mathrm{data}$')\n\nplt.xlabel(r'$\\log{(r)}$')\nplt.ylabel(r'$\\log{(\\rho (r))}$')\nplt.legend(loc='lower left')\n\nplt.savefig('./mcmc_plots/density_profile_data.png')\n\n\n#fit\n#$\\rho(r) = \\frac{\\rho_0}{(\\frac{r}{r_c})^\\alpha (1+\\frac{r}{r_c})^\\beta}$\n\nlog_rho_0_0 = 4\nlog_r_c_0 = -1.0\nalpha_0 = 1\nbeta_0 = 2\n\nfit_0 = model(log_r_center, log_rho_0_0, log_r_c_0, alpha_0, beta_0)\n\n\n# plot first guess\n\nplt.figure(figsize=(12,8))\n\nplt.plot(log_r_center, log_rho, label='$\\mathrm{data}$')\nplt.plot(log_r_center, fit_0, label='$\\mathrm{first\\;guess}$')\n\nplt.xlabel(r'$\\log{(r)}$')\nplt.ylabel(r'$\\log{(\\rho (r))}$')\nplt.legend(loc='lower left')\n\nplt.savefig('./mcmc_plots/density_profile_firstguess.png')\n\n\n#fit\n\nlog_rho_0, log_r_c, alpha, beta, log_rho_0_std, log_r_c_std, alpha_std, beta_std = mcmc(log_r_center, log_rho)\n\nfit = model(log_r_center, log_rho_0, log_r_c, alpha, beta)\n\n\n#plot fit\n\nplt.figure(figsize=(12,8))\n\nplt.plot(log_r_center, log_rho, label='$\\mathrm{data}$')\nplt.plot(log_r_center, fit, label='$\\mathrm{fit}$')\n\nplt.xlabel(r'$\\log{(r)}$')\nplt.ylabel(r'$\\log{(\\rho (r))}$')\nplt.legend(loc='lower left')\n\nplt.savefig('./mcmc_plots/density_profile_fit.png')\n\n\n#best values\n\nprint('log(rho_0) = {} +/- {}'.format(log_rho_0, log_rho_0_std))\nprint('log(r_c) = {} +/- {}'.format(log_r_c, log_r_c_std))\nprint('alpha = {} +/- {}'.format(alpha, alpha_std))\nprint('beta = {} +/- {}'.format(beta, beta_std))\n","sub_path":"fit.py","file_name":"fit.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"571918633","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDBF readonly file component specification module.\n\"\"\"\n\nfrom ...object import object_spc\nfrom ...editor import property_editor_id\n\n__version__ = (0, 0, 0, 1)\n\nCOMPONENT_TYPE = 'iqDBFReadOnlyFile'\n\n\nDBFREADONLYFILE_SPC = {\n 'name': 'default',\n 'type': COMPONENT_TYPE,\n 'description': '',\n 'activate': True,\n\n '_children_': [],\n\n 'dbf_filename': None,\n\n '__package__': u'Data',\n '__icon__': 'fatcow/database_table',\n '__parent__': object_spc.OBJECT_SPC,\n '__doc__': None,\n '__content__': (),\n '__edit__': {\n 'dbf_filename': property_editor_id.FILE_EDITOR,\n },\n '__help__': {\n 'dbf_filename': u'DBF filename',\n },\n}\n\nSPC = DBFREADONLYFILE_SPC\n","sub_path":"iq/components/dbf_readonly_file/spc.py","file_name":"spc.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"307022107","text":"import json\n\nfrom django.http import HttpResponse\nfrom django.views.generic import View\nfrom django.core import serializers\nfrom braces.views import PermissionRequiredMixin\n\nfrom questionnaire.models import QuestionGroup\n\n\nclass SubsectionQuestions(PermissionRequiredMixin, View):\n permission_required = 'auth.can_view_questionnaire'\n\n def get(self, request, *args, **kwargs):\n subsection_id = kwargs['subsection_id']\n question_group = QuestionGroup.objects.select_related('question').filter(subsection_id=subsection_id,\n grid=False)\n question_group_list = map(lambda qg: list(qg.question.all()), list(question_group))\n questions = []\n for qg in question_group_list:\n for q in qg:\n question_json = serializers.serialize(\"json\", [q])\n options_json = serializers.serialize(\"json\", q.options.all())\n question_dict = json.loads(question_json)[0]\n question_dict['options'] = json.loads(options_json)\n questions.append(question_dict)\n\n data = {}\n data['questions'] = questions\n\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n","sub_path":"questionnaire/views/subsection_questions.py","file_name":"subsection_questions.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"201615755","text":"import tensorflow as tf\nimport numpy as np\nimport random\n\ndef myfunc(_x):\n w = 1.3 # 기울기\n b = 2.6 # y 절편. 점(0, 2.6)\n # x 절편은 점(-2, 0)이 됨.\n _y = w * _x + b\n noise = random.random() * 0.01\n return _y + noise\n\n# random.random() -- 0.0 ~ 1.0\nNUM_DATA = 100\nXVALUE = 3 # x값의 범위\n# type: python list\nxlist = [random.random() * XVALUE for i in range(NUM_DATA)]\nylist = [myfunc(x) for x in xlist]\nprint(xlist)\nprint(ylist)\n\n# type: numpy ndarray\nxlist = np.array(xlist)\nylist = np.array(ylist)\nprint(xlist.shape) # shape == (10,)\nprint(ylist.shape) # shape == (10,)\nxlist = xlist.reshape((NUM_DATA, 1)) # shape == (10,1)\nylist = ylist.reshape((NUM_DATA, 1)) # shape == (10,1)\nprint(xlist.shape)\nprint(ylist.shape)\n\nX = tf.placeholder(tf.float32, [None, 1], name='inputPlace')\ny = tf.placeholder(tf.float32, [None, 1])\nW = tf.Variable(tf.random_normal([1,1], -1, 1), name='weight')\nb = tf.Variable(tf.random_normal([1], -1, 1), name='bias')\nO = tf.matmul(X, W) + b\nO_ = tf.nn.sigmoid(O)\ncalc_error = tf.reduce_mean(tf.square(O - y))\n#optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)\noptimizer = tf.train.AdamOptimizer(learning_rate=0.1)\ntraining = optimizer.minimize(calc_error)\n\nprint('X', X.name)\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\nBATCH_SIZE = int(NUM_DATA / 20) # 한번의 training에 넣는 데이터의 개수.\nfor i in range(1000):\n # random sampling from 0,1,2,...,N-1\n index_selected = random.sample(range(NUM_DATA), BATCH_SIZE)\n batch_x = [xlist[i] for i in index_selected]\n batch_y = [ylist[i] for i in index_selected]\n\n res_training, error_val = sess.run([training, calc_error],\n feed_dict={X: batch_x, y:batch_y})\n #print('RES_OPT', res_opt)\n \n if error_val < 0.00001:\n break\n if i % 10 == 0:\n see_loss = sess.run([calc_error],\n feed_dict={X: xlist, y: ylist})\n see_o, see_w, see_b = sess.run([O, W, b], feed_dict={X: xlist, y: ylist})\n print('[%03d]' % i, end=' ')\n print('LOSS', see_loss, end=' ')\n print('W', see_w, 'bias', see_b)\n","sub_path":"Academy/deepLearning/SIMPLE/nn_with_batch.py","file_name":"nn_with_batch.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"538656861","text":"#Prime\n\n# main function\ndef main():\n\n # Local variable\n number = 0\n\n # Get number\n number = int(input('Enter an integer: '))\n\n # Display information regarding whether the number is prime\n if is_prime(number):\n print('The number you entered is a prime number.')\n else:\n print('The number you entered is not a prime number.')\n\n#The is_prime function recieves a number as an argument,\n# and returns True if number is prime, False otherwise\ndef is_prime(number):\n #Local Variables\n half = int(number / 2)\n status = True\n\n for count in range(2, half + 1):\n if number % count == 0:\n status = False\n\n return status\n\n# Call the main function\nmain()","sub_path":"prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"501910811","text":"from kivy.uix.boxlayout import BoxLayout\nfrom kivy.properties import ObjectProperty\nfrom kivy.uix.label import Label\nfrom kivy.graphics import Color, Rectangle\nimport communication\n\n##\n# @brief Bottom bar widget used to display debug messages.\n# \n# The bottom bar widget receives updates from several widgets in\n# form of a simple string that is displayed. Furthermore, the current\n# connection status is displayed in the form of a colored label.\nclass BottomBar(BoxLayout):\n\n ##\n # @brief Reference to label displaying debug messages.\n message_label = ObjectProperty(None)\n\n ##\n # @brief Reference to label displaying connection status.\n connection_label = ObjectProperty(None)\n\n def __init__(self, **kwargs):\n super(BottomBar, self).__init__(**kwargs)\n\n ##\n # @brief Update current text and display new received string.\n #\n # @param[in] instance: the object updating text.\n # @param[in] value: the new string to be shown.\n def update_text(self, instance, value):\n self.message_label.text = value\n\n ##\n # @brief Callback called upon change in connection state.\n #\n # Update connection label based on new connection state.\n #\n # @param[in] instance: the object updating the connection state.\n # @param[in] value: the new connection state.\n def connection_event(self, instance, value):\n if (value == communication.CONNECTION_STATE_FOUND):\n self.connection_label.update_color(1, 1, 0)\n self.connection_label.color = (0, 0, 0, 1)\n elif (value == communication.CONNECTION_STATE_CONNECTED):\n self.connection_label.update_color(0, 0.5, 0)\n self.connection_label.color = (1, 1, 1, 1)\n else:\n self.connection_label.update_color(0.3, 0.3, 0.3, 1.0)\n self.connection_label.color = (1, 1, 1, 1)\n \n##\n# @brief Label with colored background.\n# \n# This is a label that allows to update the background color.\nclass ColoredLabel(Label):\n\n ##\n # @brief Update background color of the label.\n #\n # @param[in] r: red color.\n # @param[in] g: green color.\n # @param[in] b: blu color.\n def update_color(self, r, g, b):\n self.canvas.before.clear()\n with self.canvas.before:\n Color(r,g,b,1)\n self.rect = Rectangle(pos=self.pos, size=self.size)\n self.bind(pos=self.update_rect,\n size=self.update_rect)\n \n ##\n # @brief Update rectangle of the label.\n #\n # This is required so that, when resizing the window, the\n # color of the label continues to fill the background.\n #\n def update_rect(self, *kwargs):\n self.rect.pos = self.pos\n self.rect.size = self.size","sub_path":"08_LIS3DH/bottom_bar.py","file_name":"bottom_bar.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"36185546","text":"from django.test import TestCase\n\nfrom modules.todo import service as todo_sv\nfrom modules.todo.models.todo import Todo\n\n\nclass TodoServiceTests(TestCase):\n\n def test_create(self):\n todo_sv.create(text='hoge', is_done=False)\n todo_list = Todo.objects.all()\n self.assertEqual(len(todo_list), 1)\n\n def test_get_all(self):\n todo_sv.create(text='hoge', is_done=False)\n todo_list = todo_sv.get_all()\n self.assertEqual(len(todo_list), 1)\n\n def test_get_by_id(self):\n todo = todo_sv.create(text='hoge', is_done=False)\n todo_id = todo_sv.get_by_id(todo.id).id\n self.assertEqual(todo.id, todo_id)\n","sub_path":"modules/todo/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"643139792","text":"# -*- coding: utf-8 -*-\n\nfrom collections import OrderedDict\nfrom collections import namedtuple\n\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.client import session\nfrom tensorflow.python.ops import array_ops\n\nfrom intel_quantization.quantize_graph.quantize_graph_common import QuantizeGraphHelper as helper\n\nimport logging\nimport tensorflow as tf\nimport numpy as np\nimport os\n\n\nclass QuantizeGraphBase(object):\n \"\"\"\n This is the base class for quantize graph.\n \"\"\"\n\n def __init__(self, output_node_names):\n self.output_node_names = output_node_names\n self.transformers = OrderedDict()\n\n def register_transformer(self, node_name, entry):\n if node_name not in self.transformers:\n self.transformers[node_name] = []\n\n self.transformers[node_name].append(entry)\n\n def do_transform(self):\n \"\"\"\n This is the virtual interface need to be implemented by derived class\n :return:\n \"\"\"\n pass\n\n def remove_dead_nodes(self, input_graph, output_names):\n \"\"\"Removes nodes that are no longer needed for inference from the graph.\"\"\"\n return graph_util.extract_sub_graph(input_graph, output_names)\n\n def get_supported_fusion_node(self):\n return self.transformers.keys()\n\n\nclass QuantizeNodeBase(object):\n \"\"\"This is the base class for nodes fusion\n\n\n Arguments:\n object {[type]} -- [description]\n \"\"\"\n node_details = namedtuple('node_details', ['node', 'input_node', 'output'])\n\n def __init__(self,\n input_graph,\n output_node_names,\n per_channel,\n start_node_name,\n enable_s8=True):\n if isinstance(input_graph, graph_pb2.GraphDef):\n self.input_graph = input_graph\n else:\n self.input_graph = graph_pb2.GraphDef()\n with gfile.Open(input_graph, 'rb') as f:\n self.input_graph.ParseFromString(f.read())\n\n self._parse_graph()\n\n self.output_node_names = output_node_names\n self.output_node_maps = {}\n self.output_graph = graph_pb2.GraphDef()\n self.quantized_node_dict = {}\n self.intel_cpu_eightbitize = True\n self.per_channel = per_channel\n self.start_node_name = start_node_name\n self.enable_s8 = False if tf.__version__ < '2.1.0' else enable_s8\n\n def apply_the_transform(self):\n \"\"\"\n This is the virtual interface to be implemented by derived class\n :return:\n \"\"\"\n pass\n\n def get_longest_fuse(self):\n pass\n\n def _is_match(self, patterns):\n \"\"\"Detect the rule matched nodes collections.\n\n Returns:\n [List] -- [the matched rule]\n [String] -- [the list contains the matched node name]\n \"\"\"\n matched_node_name = []\n\n for k, v in enumerate(self.op_list):\n if v in set(fusion[0] for fusion in patterns):\n cur_node = self.node_name_mapping[list(\n self.node_name_mapping.keys())[k]].node\n if cur_node.name != self.start_node_name:\n continue\n\n if (v in (\"MatMul\") or (\n v in (\"Conv2D\", \"DepthwiseConv2dNative\") and not self.enable_s8)) and not self._find_relu_node(\n cur_node):\n continue\n\n for sub_rule in patterns:\n if v != sub_rule[0]:\n continue\n\n sub_rule_len = len(sub_rule)\n logging.debug(\"Try to apply rule: {}\".format(sub_rule))\n\n cur_node_name = list(self.node_name_mapping.keys())[k]\n matched_node_name.append(cur_node_name)\n\n while sub_rule_len > 1:\n if not self.node_name_mapping[cur_node_name].output:\n logging.debug(\"Failed to match {}\".format(sub_rule))\n break\n\n next_node_name = self.node_name_mapping[\n cur_node_name].output[0]\n\n next_node_op = self.node_name_mapping[\n next_node_name].node.op\n is_shared_output = True if len(\n self.node_name_mapping[cur_node_name].output\n ) > 1 else False\n if not is_shared_output and next_node_op == sub_rule[\n 1 - sub_rule_len]:\n matched_node_name.append(next_node_name)\n sub_rule_len -= 1\n cur_node_name = next_node_name\n else:\n matched_node_name.clear()\n logging.debug(\"Failed to match {}\".format(sub_rule))\n break\n\n if sub_rule_len == 1:\n logging.debug(\"match {} on nodes {} \".format(\n sub_rule, matched_node_name))\n return sub_rule, matched_node_name\n\n return None, None\n\n def _need_to_check(self, node_type):\n op_list = (\"ConcatV2\", \"Conv2D\", \"DepthwiseConv2D\", \"QuantizeV2\",\n \"DepthwiseConv2dNative\", \"MaxPool\", \"Requantize\", \"AvgPool\",\n \"Pad\", \"CropAndResize\", \"Dequantize\", \"Mean\")\n return any([node_type.find(i) != -1 for i in op_list])\n\n def _find_relu_node(self, node):\n if node.op in (\"Relu\", \"Relu6\") or node.op.find(\"AndRelu\") != -1:\n return True\n elif (node.op.find(\"QuantizedConv\") != -1 or\n node.op.find(\"QuantizedDepthwiseConv\") != -1\n ) and node.op.find(\"Relu\") == -1:\n return False\n elif self._need_to_check(node.op):\n input_node = self.node_name_mapping[helper.node_name_from_input(\n node.input[0])]\n return self._find_relu_node(input_node.node)\n else:\n return False\n\n def _add_output_node(self, node_name, node):\n if node_name not in self.output_node_maps:\n self.output_node_maps[node_name] = node\n else:\n raise ValueError(\"Duplicate Node Found {} {} {}\".format(\n node_name, node.op, self.output_node_maps[node_name].op))\n\n def _reset_output_node_maps(self):\n self.output_node_maps = {}\n\n def write_graph(self, out_graph_def, out_graph_file):\n \"\"\"Write output graphDef to file.\n\n :param out_graph_def: output graphDef.\n :param out_graph_file: path to output graph file.\n :return: None.\n \"\"\"\n if not isinstance(out_graph_def, tf.compat.v1.GraphDef):\n raise ValueError(\n 'out_graph_def is not instance of TensorFlow GraphDef.')\n if out_graph_file and not os.path.exists(\n os.path.dirname(out_graph_file)):\n raise ValueError('\"output_graph\" directory does not exists.')\n f = gfile.GFile(out_graph_file, 'wb')\n f.write(out_graph_def.SerializeToString())\n\n def _get_op_list(self):\n self.op_list = []\n for _, v in enumerate(self.node_name_mapping):\n self.op_list.append(self.node_name_mapping[v].node.op)\n\n def _get_node_input(self, node_name):\n \"\"\"\n Return control_input name, non-control_input node name\n \"\"\"\n\n return [\n i for i in self.node_name_mapping[node_name].node.input\n if i[0] == '^'\n ], [\n i for i in self.node_name_mapping[node_name].node.input\n if i[0] != '^'\n ]\n\n def _intel_cpu_add_dequantize_result_node(self,\n quantized_output_name,\n original_node_name,\n dtype=dtypes.quint8,\n min_tensor_index=1):\n min_max_inputs = [\n \"%s:%s\" % (quantized_output_name, min_tensor_index),\n \"%s:%s\" % (quantized_output_name, min_tensor_index + 1)\n ]\n dequantize_name = original_node_name\n\n dequantize_node = helper.create_node(\n \"Dequantize\", dequantize_name,\n [quantized_output_name, min_max_inputs[0], min_max_inputs[1]])\n helper.set_attr_dtype(dequantize_node, \"T\", dtype)\n helper.set_attr_string(dequantize_node, \"mode\",\n b\"SCALED\" if self.per_channel else b\"MIN_FIRST\")\n self.add_output_graph_node(dequantize_node)\n\n def eightbitize_single_input_tensor_node(self, original_node,\n add_op_function):\n quantized_op_name = original_node.name + \"_eightbit_quantized\"\n quantized_op_type = \"Quantized\" + original_node.op\n all_input_names = self._add_eightbit_prologue_nodes(original_node.name)\n quantized_op_node = helper.create_node(quantized_op_type,\n quantized_op_name,\n all_input_names)\n add_op_function(original_node, quantized_op_node)\n self.add_output_graph_node(quantized_op_node)\n self._intel_cpu_add_dequantize_result_node(quantized_op_name,\n original_node.name)\n\n def _add_eightbit_prologue_nodes(self, original_node):\n namespace_prefix = original_node + \"_eightbit\"\n reshape_dims_name, reduction_dims_name = self._add_common_quantization_nodes(\n namespace_prefix,\n self.node_name_mapping[original_node].node.input[0])\n input_names = []\n min_max_names = []\n for each_input_name in self.node_name_mapping[original_node].node.input:\n if each_input_name[0] == '^':\n continue\n input_node_name = helper.node_name_from_input(each_input_name)\n if self.intel_cpu_eightbitize and input_node_name in self.output_node_maps:\n dtype = dtypes.DType(\n self.output_node_maps[input_node_name].attr[\"T\"].type\n ) if self.output_node_maps[\n input_node_name].op == \"Dequantize\" else dtypes.quint8\n else:\n dtype = dtypes.quint8 if self._find_relu_node(\n self.node_name_mapping[original_node].node\n ) else dtypes.qint8\n\n quantize_input_name, min_input_name, max_input_name = (\n self._eightbitize_input_to_node(namespace_prefix,\n each_input_name,\n reshape_dims_name,\n reduction_dims_name,\n dtype=dtype))\n input_names.append(quantize_input_name)\n min_max_names.append(min_input_name)\n min_max_names.append(max_input_name)\n all_input_names = []\n all_input_names.extend(input_names)\n all_input_names.extend(min_max_names)\n\n for original_input_name in self.node_name_mapping[\n original_node].node.input:\n if original_input_name[0] == '^':\n all_input_names.append(original_input_name)\n return all_input_names\n\n def _add_common_quantization_nodes(self,\n namespace_prefix,\n control_input_names=None):\n \"\"\"Builds constant nodes needed for quantization of inputs.\"\"\"\n reshape_dims_name = namespace_prefix + \"_reshape_dims\"\n reduction_dims_name = namespace_prefix + \"_reduction_dims\"\n\n reshape_dims_node = helper.create_constant_node(reshape_dims_name, -1,\n dtypes.int32, [1])\n if control_input_names:\n reshape_dims_node.input.append(\"^\" + control_input_names)\n\n self.add_output_graph_node(reshape_dims_node)\n reduction_dims_node = helper.create_constant_node(\n reduction_dims_name, 0, dtypes.int32, [1])\n if control_input_names:\n reduction_dims_node.input.append(\"^\" + control_input_names)\n self.add_output_graph_node(reduction_dims_node)\n return reshape_dims_name, reduction_dims_name\n\n def add_output_graph_node(self, output_node):\n \"\"\"Inserts one node into the new graph.\"\"\"\n self.output_graph.node.extend([output_node])\n self._add_output_node(output_node.name, output_node)\n\n def _parse_graph(self, input_graph=None):\n \"\"\"\n Parse the graph and get the input node and output node name details.\n \"\"\"\n logging.debug(\"start parsing graph\")\n self.node_name_mapping = OrderedDict()\n\n graph = self.input_graph if input_graph is None else input_graph\n for node in graph.node:\n each_node = self.node_details(node=node, input_node=[], output=[])\n\n if node.name in self.node_name_mapping:\n raise ValueError(\n \"Duplicate Node Found when _parse_graph, the node name is {}\"\n .format(node.name))\n\n self.node_name_mapping[node.name] = each_node\n\n for node in graph.node:\n for input in node.input:\n self.node_name_mapping[helper.node_name_from_input(\n input)].output.append(node.name)\n\n def remove_redundant_quantization(self, old_graph):\n old_nodes_map = self.create_nodes_map(old_graph)\n self.output_graph = graph_pb2.GraphDef()\n inputs_to_rename = {}\n # We go through all the nodes, looking for any that match the patterns we\n # know how to optimize away.\n for node in old_graph.node:\n # We always start with a Quantize node, and examine its inputs to see if\n # they are in a form that can be removed.\n if node.op not in [\"Quantize\", \"QuantizeV2\"]:\n continue\n\n dequantize_node_name = helper.node_name_from_input(node.input[0])\n if dequantize_node_name not in old_nodes_map:\n raise ValueError(\"Input node name '\" + dequantize_node_name +\n \"' not found in node '\" + node.name + \"'\")\n dequantize_node = old_nodes_map[dequantize_node_name]\n # Do we have a Dequantize feeding in, with the same type as the Quantize?\n if dequantize_node.op != \"Dequantize\":\n continue\n\n if node.attr[\"T\"] != dequantize_node.attr[\"T\"]:\n continue\n\n # Now look at the other inputs, and ensure they're Min/Max nodes.\n min_node_name = helper.node_name_from_input(node.input[1])\n max_node_name = helper.node_name_from_input(node.input[2])\n min_node = old_nodes_map[min_node_name]\n max_node = old_nodes_map[max_node_name]\n is_min_right_type = (min_node.op in [\"Min\", \"Dequantize\"])\n is_max_right_type = (max_node.op in [\"Max\", \"Dequantize\"])\n if not is_min_right_type or not is_max_right_type:\n print(\"Didn't find expected types on inputs : %s, %s.\" %\n (min_node.op, max_node.op))\n continue\n min_node_input_name = helper.node_name_from_input(min_node.input[0])\n max_node_input_name = helper.node_name_from_input(max_node.input[0])\n # There are two different patterns for Min nodes we can recognize, one\n # where the input comes directly from the same one as the Max, and\n # another where we run it through another Min first, so check for both.\n is_same_input = False\n if min_node_input_name == max_node_input_name:\n is_same_input = True\n else:\n first_min_node_input = old_nodes_map[min_node_input_name]\n if first_min_node_input.op == \"Concat\":\n second_min_node_name = helper.node_name_from_input(\n first_min_node_input.input[1])\n second_min_node = old_nodes_map[second_min_node_name]\n if second_min_node.op == \"Min\":\n second_min_node_input_name = helper.node_name_from_input(\n second_min_node.input[0])\n is_same_input = (\n second_min_node_input_name == max_node_input_name)\n if not is_same_input:\n print(\"Different min/max inputs: \" + min_node_input_name)\n continue\n # We recognize this pattern, so mark the graph edges to be rewired to\n # route around it entirely, since we know it's a no-op.\n dequantize_source_name = helper.node_name_from_input(\n dequantize_node.input[0])\n node_tensor_name = helper.ensure_tensor_name_has_port(node.name)\n min_tensor_name = node.name + \":1\"\n max_tensor_name = node.name + \":2\"\n\n inputs_to_rename[node_tensor_name] = dequantize_source_name\n inputs_to_rename[min_tensor_name] = dequantize_node.input[1]\n inputs_to_rename[max_tensor_name] = dequantize_node.input[2]\n # Finally we apply all the rewiring we've marked to the graph.\n for node in old_graph.node:\n for index, input_full_name in enumerate(node.input):\n input_name = helper.ensure_tensor_name_has_port(input_full_name)\n if input_name in inputs_to_rename:\n node.input[index] = inputs_to_rename[input_name]\n self.add_output_graph_node(node)\n return self.output_graph\n\n def apply_final_node_renames(self):\n \"\"\"Applies node renames in self.final_node_renames to self.output_graph.\"\"\"\n old_graph = self.output_graph\n self.output_graph = graph_pb2.GraphDef()\n for node in old_graph.node:\n node.name = self.final_node_renames.get(node.name, node.name)\n for index, input_name in enumerate(node.input):\n node_name = helper.node_name_from_input(input_name)\n input_full_name = helper.ensure_tensor_name_has_port(input_name)\n if node_name in self.final_node_renames:\n node.input[index] = \"%s%s\" % (\n self.final_node_renames[node_name],\n input_full_name[len(node_name):])\n self.add_output_graph_node(node)\n return self.output_graph\n\n def create_nodes_map(self, graph):\n \"\"\"Builds a mapping of node names to their defs from the graph.\"\"\"\n nodes_map = {}\n for node in graph.node:\n if node.name not in nodes_map.keys():\n nodes_map[node.name] = node\n else:\n raise ValueError(\"Duplicate node names detected.\")\n\n return nodes_map\n\n def _add_quantize_down_nodes(self,\n original_node,\n quantized_output_name,\n requantize_type=dtypes.quint8,\n is_relu6=False):\n quantized_outputs = [\n quantized_output_name, quantized_output_name + \":1\",\n quantized_output_name + \":2\"\n ]\n # Add a RequantizationRange node for finding the min and max values.\n requant_range_node = helper.create_node(\n \"RequantizationRangePerChannel\"\n if self.per_channel else \"RequantizationRange\",\n original_node.name + \"_eightbit_requant_range\", quantized_outputs)\n\n if self.per_channel:\n helper.set_attr_dtype(requant_range_node, \"T\", dtypes.qint32)\n if is_relu6:\n helper.set_attr_float(requant_range_node, \"clip_value_max\", 6.0)\n else:\n helper.set_attr_float(requant_range_node, \"clip_value_max\",\n 1e30)\n else:\n helper.set_attr_dtype(requant_range_node, \"Tinput\", dtypes.qint32)\n\n self.add_output_graph_node(requant_range_node)\n min_max_inputs = [\n requant_range_node.name + \":0\", requant_range_node.name + \":1\"\n ]\n requantize_node = helper.create_node(\n \"RequantizePerChannel\" if self.per_channel else \"Requantize\",\n original_node.name + \"_eightbit_requantize\",\n quantized_outputs + min_max_inputs)\n if self.per_channel:\n helper.set_attr_dtype(requantize_node, \"T\", dtypes.qint32)\n else:\n helper.set_attr_dtype(requantize_node, \"Tinput\", dtypes.qint32)\n\n helper.set_attr_dtype(requantize_node, \"out_type\", requantize_type)\n self.add_output_graph_node(requantize_node)\n return requantize_node.name\n\n def add_dequantize_result_node(self,\n quantized_output_name,\n original_node_name,\n min_tensor_index=1):\n min_max_inputs = [\n \"%s:%s\" % (quantized_output_name, min_tensor_index),\n \"%s:%s\" % (quantized_output_name, (min_tensor_index + 1))\n ]\n dequantize_name = original_node_name\n\n dequantize_node = helper.create_node(\n \"Dequantize\", dequantize_name,\n [quantized_output_name, min_max_inputs[0], min_max_inputs[1]])\n helper.set_attr_dtype(dequantize_node, \"T\", dtypes.quint8)\n helper.set_attr_string(\n dequantize_node, \"mode\",\n b\"SCALED\" if self.intel_cpu_eightbitize else b\"MIN_FIRST\")\n self.add_output_graph_node(dequantize_node)\n\n def _eightbitize_input_to_node(self,\n namespace_prefix,\n original_input_name,\n reshape_dims_name,\n reduction_dims_name,\n dtype=dtypes.quint8):\n \"\"\"Takes one float input to an op, and converts it to quantized form.\"\"\"\n unique_input_name = helper.unique_node_name_from_input(\n original_input_name)\n if unique_input_name in self.quantized_node_dict:\n quantized_tuple = self.quantized_node_dict[unique_input_name]\n return quantized_tuple[0], quantized_tuple[1], quantized_tuple[2]\n\n reshape_input_name = namespace_prefix + \"_reshape_\" + unique_input_name\n min_input_name = namespace_prefix + \"_min_\" + unique_input_name\n max_input_name = namespace_prefix + \"_max_\" + unique_input_name\n quantize_input_name = namespace_prefix + \"_quantize_\" + unique_input_name\n reshape_input_node = helper.create_node(\n \"Reshape\", reshape_input_name,\n [original_input_name, reshape_dims_name])\n helper.set_attr_dtype(reshape_input_node, \"T\", dtypes.float32)\n self.add_output_graph_node(reshape_input_node)\n min_input_node = helper.create_node(\n \"Min\", min_input_name, [reshape_input_name, reduction_dims_name])\n helper.set_attr_dtype(min_input_node, \"T\", dtypes.float32)\n helper.set_attr_dtype(min_input_node, \"Tidx\", dtypes.int32)\n helper.set_attr_bool(min_input_node, \"keep_dims\", False)\n self.add_output_graph_node(min_input_node)\n max_input_node = helper.create_node(\n \"Max\", max_input_name, [reshape_input_name, reduction_dims_name])\n helper.set_attr_dtype(max_input_node, \"T\", dtypes.float32)\n helper.set_attr_dtype(max_input_node, \"Tidx\", dtypes.int32)\n helper.set_attr_bool(max_input_node, \"keep_dims\", False)\n self.add_output_graph_node(max_input_node)\n quantize_input_node = helper.create_node(\n \"QuantizeV2\", quantize_input_name,\n [original_input_name, min_input_name, max_input_name])\n\n helper.set_attr_dtype(quantize_input_node, \"T\", dtype)\n\n helper.set_attr_string(quantize_input_node, \"mode\", b\"SCALED\")\n helper.set_attr_string(quantize_input_node, \"round_mode\",\n b\"HALF_TO_EVEN\")\n # if FLAGS.model_name in [\"wide_deep_large_ds\"]:\n # set_attr_string(quantize_input_node, \"mode\", b\"MIN_FIRST\")\n # else:\n # set_attr_string(quantize_input_node, \"mode\",\n # b\"SCALED\" if self.intel_cpu_eightbitize else b\"MIN_FIRST\")\n # set_attr_string(quantize_input_node, \"round_mode\",\n # b\"HALF_TO_EVEN\" if self.intel_cpu_eightbitize\n # else b\"HALF_AWAY_FROM_ZERO\")\n self.add_output_graph_node(quantize_input_node)\n min_output_name = quantize_input_name + \":1\"\n max_output_name = quantize_input_name + \":2\"\n self.quantized_node_dict[unique_input_name] = (quantize_input_name,\n min_output_name,\n max_output_name)\n return quantize_input_name, min_output_name, max_output_name\n\n def _intel_cpu_quantize_weight_eightbit(self,\n parent,\n input_node,\n per_channel,\n quantization_mode=b\"SCALED\"):\n base_name = input_node.name + \"_\"\n qint8_const_name = base_name + \"qint8_const\"\n min_name = base_name + \"min\"\n max_name = base_name + \"max\"\n float_tensor = tensor_util.MakeNdarray(input_node.attr[\"value\"].tensor)\n epsilon = 1e-4 # Needs to be set empirically if accuracy is not satisfactory\n if parent in (\"Conv2D\", \"MatMul\"):\n if per_channel:\n ranges = np.abs(float_tensor).max(axis=(0, 1, 2))\n min_value = -ranges\n max_value = ranges\n # nudging min-max values outside epsilon radius around zero\n ranges[ranges < epsilon] = epsilon\n min_value[np.abs(min_value) < epsilon] = -epsilon\n max_value[np.abs(max_value) < epsilon] = epsilon\n qint8_tensor = (float_tensor * 127.0 / ranges).astype(np.int8)\n else:\n min_value = np.min(float_tensor.flatten())\n max_value = np.max(float_tensor.flatten())\n # Same processing of min-max as in quantize_weight_eightbit\n # function.\n if min_value > 0.0:\n min_value = 0.0\n if min_value == max_value:\n if abs(min_value) < 0.000001:\n max_value = min_value + 1.0\n elif min_value > 0:\n max_value = 2 * min_value\n else:\n max_value = min_value / 2.0\n\n sess = session.Session()\n with sess.as_default():\n quantize_op = array_ops.quantize_v2(\n float_tensor,\n min_value,\n max_value,\n dtypes.qint8,\n mode=quantization_mode,\n round_mode=\"HALF_TO_EVEN\")\n qint8_tensor = quantize_op[0].eval()\n # Updated min-max values should be passed to the next feeding node.\n min_value = quantize_op[1].eval()\n max_value = quantize_op[2].eval()\n elif parent == \"DepthwiseConv2dNative\":\n # get the max values based on dim 0 and 1 for depthwise conv\n # since, the output channel will be dim 2 * dim 3\n ranges = np.abs(float_tensor).max(axis=(0, 1))\n ranges = ranges.flatten()\n min_value = -ranges\n max_value = ranges\n # nudging min-max values outside epsilon radius around zero\n ranges[ranges < epsilon] = epsilon\n min_value[np.abs(min_value) < epsilon] = -epsilon\n max_value[np.abs(max_value) < epsilon] = epsilon\n # Since output channel will be 1 dim which is dim 2 * dim 3\n # When divide by range, qint8_tensor needs to be 3 dim\n # where, 3rd dim should be same dim of ranges\n a, b, c, d = float_tensor.shape\n qint8_tensor = (float_tensor.reshape(a, b, c * d) * 127.0 /\n ranges).astype(np.int8)\n # get the shape back to 4 dim\n qint8_tensor = qint8_tensor.reshape(a, b, c, d)\n shape = tensor_util.TensorShapeProtoToList(\n input_node.attr[\"value\"].tensor.tensor_shape)\n qint8_const_node = helper.create_constant_node(qint8_const_name,\n qint8_tensor,\n dtypes.qint8,\n shape=shape)\n\n min_node = helper.create_constant_node(min_name, min_value,\n dtypes.float32)\n\n max_node = helper.create_constant_node(max_name, max_value,\n dtypes.float32)\n\n dequantize_node = helper.create_node(\n \"Dequantize\", input_node.name,\n [qint8_const_name, min_name, max_name])\n\n helper.set_attr_dtype(dequantize_node, \"T\", dtypes.qint8)\n helper.set_attr_string(dequantize_node, \"mode\", b\"SCALED\")\n self.add_output_graph_node(qint8_const_node)\n self.add_output_graph_node(min_node)\n self.add_output_graph_node(max_node)\n self.add_output_graph_node(dequantize_node)\n","sub_path":"api/intel_quantization/quantize_graph/quantize_graph_base.py","file_name":"quantize_graph_base.py","file_ext":"py","file_size_in_byte":29931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"634046594","text":"import time\nimport os\nimport sys\nimport termios\nimport tty\nimport threading\nimport json\nimport serial\nimport serial.tools.list_ports\n\nfrom pymycobot.mycobot import MyCobot\n\n\nport: str\nmc: MyCobot\nsp: int = 80\n\n\ndef setup():\n print(\"\")\n global port, mc\n plist = list(serial.tools.list_ports.comports())\n idx = 1\n for port in plist:\n print(\"{} : {}\".format(idx, port))\n idx += 1\n\n _in = input(\"\\nPlease input 1 - {} to choice:\".format(idx - 1))\n port = str(plist[int(_in) - 1]).split(\" - \")[0].strip()\n print(port)\n print(\"\")\n\n baud = 115200\n _baud = input(\"Please input baud(default:115200):\")\n try:\n baud = int(_baud)\n except Exception:\n pass\n print(baud)\n print(\"\")\n\n DEBUG = False\n f = input(\"Wether DEBUG mode[Y/n]:\")\n if f in [\"y\", \"Y\", \"yes\", \"Yes\"]:\n DEBUG = True\n # mc = MyCobot(port, debug=True)\n mc = MyCobot(port, baud, debug=DEBUG)\n\n\nclass Raw(object):\n \"\"\"Set raw input mode for device\"\"\"\n\n def __init__(self, stream):\n self.stream = stream\n self.fd = self.stream.fileno()\n\n def __enter__(self):\n self.original_stty = termios.tcgetattr(self.stream)\n tty.setcbreak(self.stream)\n\n def __exit__(self, type, value, traceback):\n termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)\n\n\nclass Helper(object):\n def __init__(self) -> None:\n self.w, self.h = os.get_terminal_size()\n\n def echo(self, msg):\n print(\"\\r{}\".format(\" \" * self.w), end=\"\")\n print(\"\\r{}\".format(msg), end=\"\")\n\n\nclass TeachingTest(Helper):\n def __init__(self, mycobot) -> None:\n super().__init__()\n self.mc = mycobot\n self.recording = False\n self.playing = False\n self.record_list = []\n self.record_t = None\n self.play_t = None\n\n def record(self):\n self.record_list = []\n self.recording = True\n\n def _record():\n start_t = time.time()\n\n while self.recording:\n angles = self.mc.get_angles()\n if angles:\n self.record_list.append(angles)\n time.sleep(0.1)\n print(\"\\r {}\".format(time.time() - start_t), end=\"\")\n\n self.echo(\"Start recording.\")\n self.record_t = threading.Thread(target=_record, daemon=True)\n self.record_t.start()\n\n def stop_record(self):\n if self.recording:\n self.recording = False\n self.record_t.join()\n self.echo(\"Stop record\")\n\n def play(self):\n self.echo(\"Start play\")\n for angles in self.record_list:\n # print(angles)\n self.mc.send_angles(angles, 80)\n time.sleep(0.1)\n self.echo(\"Finish play\")\n\n def loop_play(self):\n self.playing = True\n\n def _loop():\n len_ = len(self.record_list)\n i = 0\n while self.playing:\n idx_ = i % len_\n i += 1\n self.mc.send_angles(self.record_list[idx_], 80)\n time.sleep(0.1)\n\n self.echo(\"Start loop play.\")\n self.play_t = threading.Thread(target=_loop, daemon=True)\n self.play_t.start()\n\n def stop_loop_play(self):\n if self.playing:\n self.playing = False\n self.play_t.join()\n self.echo(\"Stop loop play.\")\n\n def save_to_local(self):\n if not self.record_list:\n self.echo(\"No data should save.\")\n return\n\n with open(os.path.dirname(__file__) + \"/record.txt\", \"w\") as f:\n json.dump(self.record_list, f, indent=2)\n self.echo(\"save dir: {}\".format(os.path.dirname(__file__)))\n\n def load_from_local(self):\n\n with open(os.path.dirname(__file__) + \"/record.txt\", \"r\") as f:\n try:\n data = json.load(f)\n self.record_list = data\n self.echo(\"Load data success.\")\n except Exception:\n self.echo(\"Error: invalid data.\")\n\n def print_menu(self):\n print(\n \"\"\"\\\n \\r q: quit\n \\r r: start record\n \\r c: stop record\n \\r p: play once\n \\r P: loop play / stop loop play\n \\r s: save to local\n \\r l: load from local\n \\r f: release mycobot\n \\r----------------------------------\n \"\"\"\n )\n\n def start(self):\n self.print_menu()\n\n while not False:\n with Raw(sys.stdin):\n key = sys.stdin.read(1)\n if key == \"q\":\n break\n elif key == \"r\": # recorder\n self.record()\n elif key == \"c\": # stop recorder\n self.stop_record()\n elif key == \"p\": # play\n self.play()\n elif key == \"P\": # loop play\n if not self.playing:\n self.loop_play()\n else:\n self.stop_loop_play()\n elif key == \"s\": # save to local\n self.save_to_local()\n elif key == \"l\": # load from local\n self.load_from_local()\n elif key == \"f\": # free move\n self.mc.release_all_servos()\n self.echo(\"Released\")\n else:\n print(key)\n continue\n\n\nif __name__ == \"__main__\":\n setup()\n recorder = TeachingTest(mc)\n recorder.start()\n","sub_path":"demo/drag_trial_teaching.py","file_name":"drag_trial_teaching.py","file_ext":"py","file_size_in_byte":5538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"357401403","text":"#!/usr/bin/env python\n\nimport sys\n\nwith open(sys.argv[1], 'r') as my_file:\n contents = my_file.read()\n \nwords = contents.split('\\n')\n\nletters = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\",\"K\",\"L\",\"M\",\"N\",\"O\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"W\",\"X\",\"Y\",\"Z\"]\n\nrank = dict(zip(letters, [i for i in range(1,27)]))\n\nlength = int(words[0])\n\nfor w in range(1, length+1):\n current = words[w]\n if current == \"\":\n continue\n s = \"\"\n si = current[0]\n s += si\n if len(current) > 1:\n for c in current[1:]:\n if rank[c] >= rank[s[0]]:\n s = c + s\n else:\n s += c\n s = \"Case #\" + str(w) + \": \" + s\n print(s) \n \n\n\n\n\n","sub_path":"problem-1a/thelastword.py","file_name":"thelastword.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"523656192","text":"from functools import partial\r\nimport logging\r\nimport sys\r\n\r\nimport pyqtgraph as pq\r\nfrom PySide2.QtWidgets import QApplication, QMainWindow\r\n\r\nfrom pattern import SLM, AnnularMask, Field, Objective\r\nfrom pattern.dialog import Ui_Dialog\r\n\r\n__all__ = [\"Dialog\"]\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\ndef connect_signals_to_callbacks(signals, callbacks):\r\n for signal in signals:\r\n for callback in callbacks:\r\n signal.connect(callback)\r\n\r\n\r\nclass Dialog(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.ui = Ui_Dialog()\r\n\r\n self.setup_ui()\r\n\r\n ##\r\n\r\n ##\r\n\r\n def regenerate(self):\r\n wavelength = self.ui.wavelength_spinbox.value()\r\n mag = self.ui.system_magnification_spinbox.value()\r\n\r\n print(dir(self))\r\n # attempt to initizlie uninit components\r\n init_funcs = {\r\n \"slm\": self._update_slm,\r\n \"mask\": self._update_mask,\r\n \"objective\": self._update_objective,\r\n }\r\n for name, func in init_funcs.items():\r\n if not hasattr(self, f\"_{name}\"):\r\n logger.debug(f'implicit update \"{name}\"')\r\n func()\r\n\r\n # create field\r\n field = Field(self._slm, self._mask, self._objective, wavelength, mag)\r\n\r\n field = Bessel(3.824, 2.689)(field)\r\n results = field.simulate()\r\n\r\n image = pq.ImageItem(results[\"ideal\"])\r\n self.ui.ideal.addItem(image)\r\n\r\n # complete update, disable\r\n self.ui.regenerate.setEnabled(False)\r\n\r\n ##\r\n\r\n def setup_ui(self):\r\n # generate layout from the ui file\r\n self.ui.setupUi(self)\r\n\r\n self._setup_slm_parameters()\r\n self._setup_mask_parameters()\r\n self._setup_objective_parameters()\r\n self._setup_system_parameters()\r\n\r\n self._setup_binarize_parameters()\r\n\r\n self._setup_bessel_parameters()\r\n self._setup_linear_bessel_parameters()\r\n self._setup_tiling_parameters()\r\n\r\n self.ui.regenerate.clicked.connect(self.regenerate)\r\n\r\n def _setup_slm_parameters(self):\r\n # populate screen size options\r\n for size in (\"QXGA\", \"SXGA\"):\r\n self.ui.screensize_combobox.addItem(size)\r\n\r\n signals = [\r\n self.ui.screensize_combobox.currentIndexChanged,\r\n self.ui.pixel_size_spinbox.valueChanged,\r\n self.ui.focal_length_spinbox.valueChanged,\r\n ]\r\n callbacks = [self._update_slm, self._requires_regenerate]\r\n connect_signals_to_callbacks(signals, callbacks)\r\n\r\n def _setup_mask_parameters(self):\r\n signals = [\r\n self.ui.mask_od_spinbox.valueChanged,\r\n self.ui.mask_id_spinbox.valueChanged,\r\n ]\r\n callbacks = [self._update_mask, self._requires_regenerate]\r\n connect_signals_to_callbacks(signals, callbacks)\r\n\r\n def _setup_objective_parameters(self):\r\n signals = [\r\n self.ui.objective_magnification_spinbox.valueChanged,\r\n self.ui.objective_na_spinbox.valueChanged,\r\n self.ui.tube_lens_spinbox.valueChanged,\r\n ]\r\n callbacks = [self._update_objective, self._requires_regenerate]\r\n connect_signals_to_callbacks(signals, callbacks)\r\n\r\n def _setup_system_parameters(self):\r\n signals = [\r\n self.ui.wavelength_spinbox.valueChanged,\r\n self.ui.system_magnification_spinbox.valueChanged,\r\n self.ui.dither_steps_spinbox.valueChanged,\r\n self.ui.dither_interval_spinbox.valueChanged,\r\n ]\r\n callbacks = [self._update_system, self._requires_regenerate]\r\n connect_signals_to_callbacks(signals, callbacks)\r\n\r\n self.ui.dither_steps_spinbox.valueChanged.connect(self._toggle_dithering)\r\n\r\n def _setup_binarize_parameters(self):\r\n pass\r\n\r\n def _setup_bessel_parameters(self):\r\n pass\r\n\r\n def _setup_linear_bessel_parameters(self):\r\n self.ui.bessel_parameters.toggled.connect(self._toggle_bessel_array)\r\n self.ui.same_as_mask.toggled.connect(self._toggle_same_as_mask)\r\n\r\n self.ui.fill_screen_checkbox.toggled.connect(self._toggle_fill_screen)\r\n self.ui.auto_spacing.toggled.connect(self._toggle_auto_spacing)\r\n\r\n def _setup_tiling_parameters(self):\r\n pass\r\n\r\n ##\r\n\r\n def _requires_regenerate(self):\r\n self.ui.regenerate.setEnabled(True)\r\n\r\n ##\r\n\r\n def _update_slm(self):\r\n logger.debug(\"update slm\")\r\n\r\n size = {\"QXGA\": (1536, 2048), \"SXGA\": (1024, 1280)}[\r\n self.ui.screensize_combobox.currentText()\r\n ]\r\n self._slm = SLM(\r\n size,\r\n (self.ui.pixel_size_spinbox.value(),) * 2,\r\n self.ui.focal_length_spinbox.value(),\r\n )\r\n\r\n def _update_mask(self):\r\n logger.debug(\"update mask\")\r\n\r\n # clear na\r\n self.ui.mask_od_na.setText(\"-\")\r\n self.ui.mask_id_na.setText(\"-\")\r\n\r\n d_out = self.ui.mask_od_spinbox.value()\r\n d_in = self.ui.mask_id_spinbox.value()\r\n self._mask = AnnularMask(d_out, d_in)\r\n\r\n def _update_objective(self):\r\n logger.debug(\"update objective\")\r\n\r\n mag = self.ui.objective_magnification_spinbox.value()\r\n na = self.ui.objective_na_spinbox.value()\r\n tl = self.ui.tube_lens_spinbox.value()\r\n self._objective = Objective(mag, na, tl)\r\n\r\n def _update_system(self):\r\n pass\r\n\r\n ##\r\n\r\n def _toggle_dithering(self, n_steps):\r\n self.ui.dither_interval_spinbox.setEnabled(n_steps > 1)\r\n\r\n def _toggle_bessel_array(self, active):\r\n if not active:\r\n self.ui.linear_bessel_array_parameters.setChecked(False)\r\n\r\n def _toggle_same_as_mask(self, active):\r\n self.ui.bessel_od_spinbox.setEnabled(not active)\r\n self.ui.bessel_id_spinbox.setEnabled(not active)\r\n\r\n def _toggle_fill_screen(self, active):\r\n self.ui.fill_screen_spinbox.setEnabled(active)\r\n\r\n def _toggle_auto_spacing(self, active):\r\n self.ui.spacing_spinbox.setEnabled(not active)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import coloredlogs\r\n\r\n logging.getLogger(\"matplotlib\").setLevel(logging.ERROR)\r\n coloredlogs.install(\r\n level=\"DEBUG\", fmt=\"%(asctime)s %(levelname)s %(message)s\", datefmt=\"%H:%M:%S\"\r\n )\r\n\r\n app = QApplication(sys.argv)\r\n\r\n window = Dialog()\r\n window.show()\r\n\r\n sys.exit(app.exec_())\r\n","sub_path":"pattern/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":6406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"247143021","text":"\"\"\"Crie um programa que leia o nome e o preço de vários produtos.\r\nO programa deverá perguntar se o usuário vai continuar ou não. No final, mostre:\r\nA) qual é o total gasto na compra.\r\nB) quantos produtos custam mais de R$1000.\r\nC) qual é o nome do produto mais barato. \"\"\"\r\nsoma = mil = cont = menor = 0\r\nbarato = ' '\r\nwhile True:\r\n item = str(input('Nome do produto: ')).strip()\r\n preço = float(input('Preço: R$ '))\r\n soma += preço\r\n cont += 1\r\n if cont == 1 or preço < menor:\r\n menor = preço\r\n barato = item\r\n if preço > 1000:\r\n mil += 1\r\n continuar = ' '\r\n while continuar not in \"SN\":\r\n continuar = str(input('Deseja continuar? [S/N] ')).strip().upper()[0]\r\n if continuar == 'N':\r\n break\r\nprint(f'O valor total da compra foi de R${soma:.2f}. {mil} produtos custaram mais de R$1000,00.'\r\n f'O produto mais barato foi: {barato} custando R$ {menor:.2f}')\r\n\r\n","sub_path":"ex070.py","file_name":"ex070.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"332501878","text":"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"tensorflow_io.experimental.IODataset\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow_io.core.python.ops import io_dataset\nfrom tensorflow_io.core.python.experimental import libsvm_dataset_ops\nfrom tensorflow_io.core.python.experimental import image_dataset_ops\n\nclass IODataset(io_dataset.IODataset):\n \"\"\"IODataset\"\"\"\n\n #=============================================================================\n # Factory Methods\n #=============================================================================\n\n @classmethod\n def from_libsvm(cls,\n filename,\n num_features,\n dtype=None,\n label_dtype=None,\n compression_type='',\n **kwargs):\n \"\"\"Creates an `IODataset` from a libsvm file.\n\n Args:\n filename: A `tf.string` tensor containing one or more filenames.\n num_features: The number of features.\n dtype(Optional): The type of the output feature tensor.\n Default to tf.float32.\n label_dtype(Optional): The type of the output label tensor.\n Default to tf.int64.\n compression_type: (Optional.) A `tf.string` scalar evaluating to one of\n `\"\"` (no compression), `\"ZLIB\"`, or `\"GZIP\"`.\n name: A name prefix for the IOTensor (optional).\n\n Returns:\n A `IODataset`.\n\n \"\"\"\n with tf.name_scope(kwargs.get(\"name\", \"IOFromLibSVM\")):\n return libsvm_dataset_ops.LibSVMIODataset(\n filename, num_features,\n dtype=dtype, label_dtype=label_dtype,\n compression_type=compression_type,\n internal=True, **kwargs)\n\n @classmethod\n def from_tiff(cls,\n filename,\n **kwargs):\n \"\"\"Creates an `IODataset` from a TIFF file.\n\n Args:\n filename: A string, the filename of a TIFF file.\n name: A name prefix for the IOTensor (optional).\n\n Returns:\n A `IODataset`.\n\n \"\"\"\n with tf.name_scope(kwargs.get(\"name\", \"IOFromTIFF\")):\n return image_dataset_ops.TIFFIODataset(\n filename, internal=True)\n","sub_path":"tensorflow_io/core/python/experimental/io_dataset_ops.py","file_name":"io_dataset_ops.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"431493681","text":"import numpy as np\n\ndef f(i):\n alpha = float(display['alpha'])\n \n o = np.zeros(len(i))\n o[0] = o[0]\n \n for j, s in enumerate(i[1:]):\n o[j+1] = alpha * i[j+1] + (1-alpha) * o[j]\n \n return o","sub_path":"server/PyWeaver/lib/core/Signal Analysis/Filter/Exponential smoothing/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"175857264","text":"from contextlib import contextmanager\n\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, String, Integer, create_engine\n\nBase = declarative_base()\n\n\nclass SQLAlchemyStorage(object):\n def __init__(self, sqlalchemy_connection_string):\n self.engine = create_engine(sqlalchemy_connection_string)\n Base.metadata.create_all(self.engine)\n\n def wipe_database(self):\n Base.metadata.drop_all(self.engine)\n Base.metadata.create_all(self.engine)\n\n # taken from: http://docs.sqlalchemy.org/en/latest/orm/session_basics.html\n @contextmanager\n def session_scope(self):\n \"\"\"Provide a transactional scope around a series of operations.\"\"\"\n from sqlalchemy.orm import sessionmaker\n session = sessionmaker(bind=self.engine)()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n\nclass Record(Base):\n __tablename__ = 'records'\n\n id = Column(Integer, primary_key=True, unique=True, autoincrement=True)\n message = Column(String)\n","sub_path":"src/pydemo/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"195668835","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n tests._test_msui.test_tableview\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n This module provides pytest functions to tests msui.tableview\n\n This file is part of MSS.\n\n :copyright: Copyright 2017 Joern Ungermann\n :copyright: Copyright 2017-2023 by the MSS team, see AUTHORS.\n :license: APACHE-2.0, see LICENSE for details.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport mock\nimport os\nimport pytest\nimport sys\n\nfrom PyQt5 import QtWidgets, QtCore, QtTest\nfrom mslib.msui import flighttrack as ft\nfrom mslib.msui.performance_settings import DEFAULT_PERFORMANCE\nimport mslib.msui.tableview as tv\n\n\nclass Test_TableView(object):\n def setup_method(self):\n self.application = QtWidgets.QApplication(sys.argv)\n\n # Create an initital flight track.\n initial_waypoints = [ft.Waypoint(flightlevel=0, location=\"EDMO\", comments=\"take off OP\"),\n ft.Waypoint(48.10, 10.27, 200),\n ft.Waypoint(52.32, 09.21, 200),\n ft.Waypoint(52.55, 09.99, 200),\n ft.Waypoint(flightlevel=0, location=\"Hamburg\", comments=\"landing HH\")]\n\n waypoints_model = ft.WaypointsTableModel(\"\")\n waypoints_model.insertRows(\n 0, rows=len(initial_waypoints), waypoints=initial_waypoints)\n\n self.window = tv.MSUITableViewWindow(model=waypoints_model)\n self.window.show()\n\n QtWidgets.QApplication.processEvents()\n QtTest.QTest.qWaitForWindowExposed(self.window)\n QtWidgets.QApplication.processEvents()\n\n def teardown_method(self):\n self.window.hide()\n QtWidgets.QApplication.processEvents()\n self.application.quit()\n QtWidgets.QApplication.processEvents()\n\n def test_open_hex(self):\n \"\"\"\n Tests opening the hexagon dock widget.\n \"\"\"\n self.window.cbTools.currentIndexChanged.emit(1)\n QtWidgets.QApplication.processEvents()\n assert len(self.window.docks) == 2\n assert self.window.docks[0] is not None\n assert self.window.docks[1] is None\n\n def test_open_perf_settings(self):\n \"\"\"\n Tests opening the performance settings dock widget.\n \"\"\"\n self.window.cbTools.currentIndexChanged.emit(2)\n QtWidgets.QApplication.processEvents()\n assert len(self.window.docks) == 2\n assert self.window.docks[0] is None\n assert self.window.docks[1] is not None\n\n @mock.patch(\"PyQt5.QtWidgets.QMessageBox.question\",\n return_value=QtWidgets.QMessageBox.Yes)\n def test_insertremove_hexagon(self, mockbox):\n \"\"\"\n Test inserting and removing hexagons in TableView using the Hexagon dockwidget\n \"\"\"\n self.window.cbTools.currentIndexChanged.emit(1)\n QtWidgets.QApplication.processEvents()\n assert len(self.window.waypoints_model.waypoints) == 5\n QtTest.QTest.mouseClick(self.window.docks[0].widget().pbAddHexagon, QtCore.Qt.LeftButton)\n QtWidgets.QApplication.processEvents()\n assert len(self.window.waypoints_model.waypoints) == 12\n assert mockbox.call_count == 0\n QtTest.QTest.mouseClick(self.window.docks[0].widget().pbRemoveHexagon, QtCore.Qt.LeftButton)\n QtWidgets.QApplication.processEvents()\n assert mockbox.call_count == 1\n assert len(self.window.waypoints_model.waypoints) == 5\n\n @mock.patch(\"PyQt5.QtWidgets.QMessageBox.critical\")\n @mock.patch(\"mslib.msui.performance_settings.get_open_filename\",\n return_value=os.path.join(\n os.path.dirname(__file__), \"..\", \"data\", \"performance_simple.json\"))\n def test_performance(self, mockopen, mockcrit):\n \"\"\"\n Check effect of performance settings on TableView\n \"\"\"\n self.window.cbTools.currentIndexChanged.emit(2)\n QtWidgets.QApplication.processEvents()\n\n self.window.waypoints_model.performance_settings = DEFAULT_PERFORMANCE\n self.window.waypoints_model.update_distances(0)\n self.window.waypoints_model.dataChanged.emit(\n self.window.waypoints_model.index(0, 0), self.window.waypoints_model.index(0, 0))\n self.window.resizeColumns()\n assert self.window.waypoints_model.columnCount() == 15\n visible = dict(DEFAULT_PERFORMANCE)\n visible[\"visible\"] = True\n self.window.waypoints_model.performance_settings = visible\n self.window.waypoints_model.update_distances(0)\n self.window.waypoints_model.dataChanged.emit(\n self.window.waypoints_model.index(0, 0), self.window.waypoints_model.index(0, 0))\n self.window.resizeColumns()\n assert self.window.waypoints_model.columnCount() == 15\n # todo this does not check that actually something happens\n QtTest.QTest.mouseClick(self.window.docks[1].widget().pbLoadPerformance, QtCore.Qt.LeftButton)\n QtWidgets.QApplication.processEvents()\n assert mockopen.call_count == 1\n assert mockcrit.call_count == 0\n\n def test_insert_point(self):\n \"\"\"\n Check insertion of points\n \"\"\"\n item = self.window.tableWayPoints.visualRect(\n self.window.waypoints_model.index(2, 0))\n QtTest.QTest.mouseClick(\n self.window.tableWayPoints.viewport(),\n QtCore.Qt.LeftButton, QtCore.Qt.NoModifier, item.center())\n assert len(self.window.waypoints_model.waypoints) == 5\n wps = list(self.window.waypoints_model.waypoints)\n QtTest.QTest.mouseClick(self.window.btAddWayPointToFlightTrack, QtCore.Qt.LeftButton)\n QtWidgets.QApplication.processEvents()\n wps2 = self.window.waypoints_model.waypoints\n assert len(self.window.waypoints_model.waypoints) == 6\n assert all(_x == _y for _x, _y in zip(wps[:3], wps2[:3])), (wps, wps2)\n assert all(_x == _y for _x, _y in zip(wps[3:], wps2[4:])), (wps, wps2)\n\n def test_clone_point(self):\n \"\"\"\n Check cloning of points\n \"\"\"\n item = self.window.tableWayPoints.visualRect(\n self.window.waypoints_model.index(2, 0))\n QtTest.QTest.mouseClick(\n self.window.tableWayPoints.viewport(),\n QtCore.Qt.LeftButton, QtCore.Qt.NoModifier, item.center())\n assert len(self.window.waypoints_model.waypoints) == 5\n wps = list(self.window.waypoints_model.waypoints)\n QtTest.QTest.mouseClick(self.window.btCloneWaypoint, QtCore.Qt.LeftButton)\n QtWidgets.QApplication.processEvents()\n wps2 = self.window.waypoints_model.waypoints\n assert len(self.window.waypoints_model.waypoints) == 6\n assert all(_x == _y for _x, _y in zip(wps[:3], wps2[:3])), (wps, wps2)\n assert all(_x == _y for _x, _y in zip(wps[3:], wps2[4:])), (wps, wps2)\n\n @mock.patch(\"PyQt5.QtWidgets.QMessageBox.question\",\n return_value=QtWidgets.QMessageBox.Yes)\n def test_remove_point(self, mockbox):\n \"\"\"\n Check insertion of points\n \"\"\"\n item = self.window.tableWayPoints.visualRect(\n self.window.waypoints_model.index(1, 0))\n QtTest.QTest.mouseClick(\n self.window.tableWayPoints.viewport(),\n QtCore.Qt.LeftButton, QtCore.Qt.NoModifier, item.center())\n assert len(self.window.waypoints_model.waypoints) == 5\n wps = list(self.window.waypoints_model.waypoints)\n QtTest.QTest.mouseClick(self.window.btDeleteWayPoint, QtCore.Qt.LeftButton)\n QtWidgets.QApplication.processEvents()\n wps2 = self.window.waypoints_model.waypoints\n assert mockbox.call_count == 1\n assert len(self.window.waypoints_model.waypoints) == 4\n assert all([_x == _y for _x, _y in zip(wps[:1], wps2[:1])])\n assert all([_x == _y for _x, _y in zip(wps[2:], wps2[1:])])\n\n def test_reverse_points(self):\n \"\"\"\n Check insertion of points\n \"\"\"\n wps = list(self.window.waypoints_model.waypoints)\n QtTest.QTest.mouseClick(self.window.btInvertDirection, QtCore.Qt.LeftButton)\n QtWidgets.QApplication.processEvents()\n wps2 = self.window.waypoints_model.waypoints\n assert all([_x == _y for _x, _y in zip(wps[::-1], wps2)])\n\n def test_drag_point(self):\n \"\"\"\n Check insertion of points\n \"\"\"\n\n pytest.skip(\"drag/drop testing does not seem to work o qt5.\")\n\n assert len(self.window.waypoints_model.waypoints) == 5\n wps_before = list(self.window.waypoints_model.waypoints)\n item1 = self.window.tableWayPoints.visualRect(\n self.window.waypoints_model.index(2, 0))\n item2 = self.window.tableWayPoints.visualRect(\n self.window.waypoints_model.index(3, 0))\n QtTest.QTest.mousePress(\n self.window.tableWayPoints.viewport(),\n QtCore.Qt.LeftButton, QtCore.Qt.NoModifier, item1.center())\n QtWidgets.QApplication.processEvents()\n QtTest.QTest.mouseMove(\n self.window.tableWayPoints.viewport(),\n item2.center())\n QtWidgets.QApplication.processEvents()\n QtTest.QTest.mouseRelease(\n self.window.tableWayPoints.viewport(),\n QtCore.Qt.LeftButton, QtCore.Qt.NoModifier, item2.center())\n QtWidgets.QApplication.processEvents()\n assert len(self.window.waypoints_model.waypoints) == 5\n wps_after = list(self.window.waypoints_model.waypoints)\n assert wps_before != wps_after, (wps_before, wps_after)\n\n @mock.patch(\"PyQt5.QtWidgets.QMessageBox\")\n def test_roundtrip(self, mockbox):\n \"\"\"\n Test connecting the last and first point\n Test connecting the first point to itself\n \"\"\"\n count = len(self.window.waypoints_model.waypoints)\n\n # Test if the last waypoint connects to the first\n self.window.update_roundtrip_enabled()\n assert self.window.is_roundtrip_possible()\n self.window.make_roundtrip()\n assert len(self.window.waypoints_model.waypoints) == count + 1\n first = self.window.waypoints_model.waypoints[0]\n dupe = self.window.waypoints_model.waypoints[-1]\n assert first.lat == dupe.lat and first.lon == dupe.lon\n\n # Check if roundtrip is disabled if the last and first point are equal\n self.window.update_roundtrip_enabled()\n assert not self.window.is_roundtrip_possible()\n assert not self.window.btRoundtrip.isEnabled()\n self.window.make_roundtrip()\n assert len(self.window.waypoints_model.waypoints) == count + 1\n\n # Remove connection\n self.window.waypoints_model.removeRows(count, 1)\n assert len(self.window.waypoints_model.waypoints) == count\n assert mockbox.critical.call_count == 0\n","sub_path":"tests/_test_msui/test_tableview.py","file_name":"test_tableview.py","file_ext":"py","file_size_in_byte":11269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"160203276","text":"import sys, os, errno\nimport socket as sk\nimport struct\nfrom inspect import currentframe, getframeinfo\nimport binascii\nimport codecs\nimport time\n\n#Return values\nOK = 0\nERROR = 1\nIN_USE = 2\nNO_SUCH_PLAYER = 3\n\n#Primitives part\nREGISTER = 0\nCONN_INIT = 1\nDATA = 2\nCONN_CLOSE = 3\nRESULT = 4\n\ndef primToStr(prim):\n if prim == REGISTER:\n return \"REGISTER\"\n elif prim == CONN_INIT:\n return \"CONN_INIT\"\n elif prim == DATA:\n return \"DATA\"\n elif prim == CONN_CLOSE:\n return \"CONN_CLOSE\"\n elif prim == RESULT:\n return \"RESULT\"\n else:\n return \"UNKNOWN\"\n\ndef errToStr(err):\n if err == OK:\n return \"OK\"\n elif err == ERROR:\n return \"ERROR\"\n elif err == IN_USE:\n return \"IN_USE\"\n elif err == NO_SUCH_PLAYER:\n return \"NO_SUCH_PLAYER\"\n else:\n return \"UNKNOWN\"\n\ndef checkValue(text, result, expected, exception, toStr):\n if result != expected:\n raise exception(text + \"Expected: \" + toStr(expected) + \", but received: \" + toStr(result))\n return\n\nclass AppException(Exception):\n pass\n\nclass RegisterException(Exception):\n pass\n\nclass ConnInitException(Exception):\n pass\n\nclass DataException(Exception):\n pass\n\nclass ConnCloseException(Exception):\n pass\n\nclass App(object):\n\n def __init__(self, sock, name):\n self.sock = sock\n self.name = name\n self.remoteName = None\n\n def registerClient(self):\n try:\n self.__sendRegister()\n self.__receiveResultCode(RESULT, 2, REGISTER, OK, RegisterException)\n except RegisterException as err:\n print(\"Registration failed: \" + err.args[0])\n self.sock.close()\n return False\n except OSError as err:\n errno, strerror = err.args\n print(\"I/O error({0}): {1}\\n\".format(errno, strerror))\n self.sock.close()\n return False\n return True\n\n def __sendRegister(self):\n msg = b\"\"\n msg += bytes([REGISTER, len(self.name)])\n msg += codecs.encode(self.name, \"utf-8\") \n self.sock.send(msg)\n\n def __sendConnInit(self, remoteName):\n msg = b\"\"\n msg += bytes([CONN_INIT, len(remoteName)])\n msg += codecs.encode(remoteName, \"utf-8\") \n self.sock.send(msg)\n\n def __sendData(self, data):\n msg = b\"\"\n msg += bytes([DATA, len(data)])\n msg += codecs.encode(data, \"utf-8\") \n self.sock.send(msg)\n\n def __sendConnClose(self):\n msg = b\"\"\n msg += bytes([CONN_CLOSE, 0])\n self.sock.send(msg)\n\n def __receiveResultCode(self, exPrim, exLenght, exResPrim, exErrCode, exception):\n prim = self.sock.recv(1)[0]\n checkValue(\"Incorrect receive primitive:\", prim, exPrim, exception, primToStr) \n\n length = self.sock.recv(1)[0]\n checkValue(\"Incorrect length\", length, exLenght, exception, str) \n\n resPrim = self.sock.recv(1)[0]\n checkValue(\"Incorrect result primitive: \", resPrim, exResPrim, exception, primToStr) \n\n errCode = self.sock.recv(1)[0]\n print(errCode)\n checkValue(\"Incorrect error code: \", errCode, exErrCode, exception, errToStr) \n\n print(\"[OK] Received responce for \" + primToStr(resPrim) + \" error code: \" + errToStr(errCode))\n\n def connInit(self, remoteName):\n self.remoteName = remoteName\n try:\n self.__sendConnInit(remoteName)\n self.__receiveResultCode(RESULT, 2, CONN_INIT, OK, ConnInitException)\n except ConnInitException as err:\n print(\"ConnInit failed: \" + err.args[0])\n return False\n except OSError as err:\n errno, strerror = err.args\n print(\"I/O error({0}): {1}\\n\".format(errno, strerror))\n return False\n return True\n \n def sendData(self, data):\n try: \n self.__sendData(data)\n self.__receiveResultCode(RESULT, 2, DATA, OK, DataException)\n except DataException as err:\n print(\"Data failed: \" + err.args[0])\n return False\n except OSError as err:\n errno, strerror = err.args\n print(\"I/O error({0}): {1}\\n\".format(errno, strerror))\n return False\n return True\n\n def connClose(self):\n try: \n self.__connClose()\n self.__receiveResultCode(RESULT, 2, CONN_CLOSE, OK, ConnCloseException)\n except ConnCloseException as err:\n print(\"ConnClose failed: \" + err.args[0])\n return False\n except OSError as err:\n errno, strerror = err.args\n print(\"I/O error({0}): {1}\\n\".format(errno, strerror))\n return False\n return True\n \n def game(self):\n try:\n self.remoteName = self.__receiveConInit()\n print(\"Received CONN_INIT form \" + self.remoteName)\n while True:\n if not self.__handleMessage():\n break\n\n except ConnCloseException as err:\n print(\"Loop failed: \" + err.args[0])\n return False\n except OSError as err:\n errno, strerror = err.args\n print(\"I/O error({0}): {1}\\n\".format(errno, strerror))\n return False\n return True\n \n def __receiveConInit(self):\n prim = self.sock.recv(1)[0]\n checkValue(\"Incorrect primitive\", prim, CONN_INIT, ConnInitException, primToStr)\n\n length = self.sock.recv(1)[0]\n remoteName = self.sock.recv(length).decode(\"utf-8\")\n return remoteName\n\n def __handleMessage(self):\n prim = self.sock.recv(1)[0]\n length = self.sock.recv(1)[0]\n if prim == DATA:\n data = self.sock.recv(length).decode(\"utf-8\")\n print(\"Received data: \" + data)\n elif prim == CONN_CLOSE:\n print(\"Game is ended\")\n return False\n return True\n\ndef main(servAddr, servPort):\n sock = estabilishConnection(servAddr, servPort)\n if sock == None:\n print(\"Connection cann't be estabilish\\n\")\n return 1\n\n name = input(\"Please enter you login: \")\n\n app = App(sock, name)\n if not app.registerClient():\n return 1\n\n if not app.game():\n return 1\n\n return 0\n \n\ndef estabilishConnection(addr, port):\n sock = None\n try:\n sock = sk.socket(sk.AF_INET, sk.SOCK_STREAM, 0)\n sock.connect((addr, port)) \n except IOError as err:\n errno, strerror = err.args\n sys.stderr.write(\"I/O error({0}): {1}\\n\".format(errno, strerror))\n if sock != None:\n sock.close()\n return None\n\n return sock \n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n sys.stderr.write(\"Please specify server's address and port\\n\")\n sys.exit(1)\n servAddr = sys.argv[1]\n servPort = sys.argv[2]\n sys.exit(main(servAddr, int(servPort)))\n","sub_path":"server/test/clientReceiver.py","file_name":"clientReceiver.py","file_ext":"py","file_size_in_byte":6950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"115085384","text":"from django.utils import timezone\n\nfrom .settings import *\n\nnow = timezone.localtime()\n\nLOGGER_NAME = 'deflix_logger'\nLOG_PATH = os.path.join(BASE_DIR, 'log')\n\n# Logging\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '{levelname} {asctime} {module} {user}: {message}',\n 'style': '{',\n },\n 'simple': {\n 'format': '{levelname} {message}',\n 'style': '{',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple'\n },\n 'fileInfo': {\n 'level': 'INFO',\n 'class': 'logging.FileHandler',\n 'filename': f'{LOG_PATH}/{now.year}{now.month}{now.day}_info.log',\n 'formatter': 'verbose'\n },\n 'fileWarn': {\n 'level': 'WARNING',\n 'class': 'logging.FileHandler',\n 'filename': f'{LOG_PATH}/{now.year}{now.month}{now.day}_warning.log',\n 'formatter': 'verbose'\n }\n\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['fileInfo'],\n 'level': 'INFO',\n 'propagate': True,\n },\n 'django.server': {\n 'handlers': ['fileWarn'],\n 'level': 'WARNING',\n 'propagate': True,\n },\n }\n}\n","sub_path":"deflix/deflix/deflix/settings_local.py","file_name":"settings_local.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}